Cinder model integration

This patch adds Cinder model integration.

Change-Id: I31d5bc5e2bbed885d074d66bf7999d42cec15f10
Implements: blueprint cinder-model-integration
This commit is contained in:
Hidekazu Nakamura 2017-03-28 17:50:10 +09:00
parent 5b6768140f
commit 489356da3a
32 changed files with 2544 additions and 1 deletions

View File

@ -0,0 +1,4 @@
---
features:
- |
Added cinder cluster data model

View File

@ -88,6 +88,8 @@ watcher_planners =
watcher_cluster_data_model_collectors =
compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector
storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector
[pbr]
warnerrors = true

View File

@ -0,0 +1,79 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from oslo_log import log
from watcher.common import clients
from watcher.common import exception
LOG = log.getLogger(__name__)
class CinderHelper(object):
def __init__(self, osc=None):
""":param osc: an OpenStackClients instance"""
self.osc = osc if osc else clients.OpenStackClients()
self.cinder = self.osc.cinder()
def get_storage_node_list(self):
return list(self.cinder.services.list(binary='cinder-volume'))
def get_storage_node_by_name(self, name):
"""Get storage node by name(host@backendname)"""
try:
storages = list(filter(lambda storage:
storage.host == name,
self.get_storage_node_list()))
if len(storages) != 1:
raise exception.StorageNodeNotFound(name=name)
return storages[0]
except Exception as exc:
LOG.exception(exc)
raise exception.StorageNodeNotFound(name=name)
def get_storage_pool_list(self):
return self.cinder.pools.list(detailed=True)
def get_storage_pool_by_name(self, name):
"""Get pool by name(host@backend#poolname)"""
try:
pools = list(filter(lambda pool:
pool.name == name,
self.get_storage_pool_list()))
if len(pools) != 1:
raise exception.PoolNotFound(name=name)
return pools[0]
except Exception as exc:
LOG.exception(exc)
raise exception.PoolNotFound(name=name)
def get_volume_list(self):
return self.cinder.volumes.list(search_opts={'all_tenants': True})
def get_volume_type_list(self):
return self.cinder.volume_types.list()
def get_volume_type_by_backendname(self, backendname):
volume_type_list = self.get_volume_type_list()
volume_type = list(filter(
lambda volume_type:
volume_type.extra_specs.get(
'volume_backend_name') == backendname, volume_type_list))
if volume_type:
return volume_type[0].name
else:
return ""

View File

@ -432,6 +432,22 @@ class ComputeNodeNotFound(ComputeResourceNotFound):
msg_fmt = _("The compute node %(name)s could not be found")
class StorageResourceNotFound(WatcherException):
msg_fmt = _("The storage resource '%(name)s' could not be found")
class StorageNodeNotFound(StorageResourceNotFound):
msg_fmt = _("The storage node %(name)s could not be found")
class PoolNotFound(StorageResourceNotFound):
msg_fmt = _("The pool %(name)s could not be found")
class VolumeNotFound(StorageResourceNotFound):
msg_fmt = _("The volume '%(name)s' could not be found")
class LoadingError(WatcherException):
msg_fmt = _("Error loading plugin '%(name)s'")

View File

@ -0,0 +1,209 @@
# -*- encoding: utf-8 -*-
# Copyright 2017 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from oslo_log import log
from watcher.common import cinder_helper
from watcher.common import exception
from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.model.notification import cinder
LOG = log.getLogger(__name__)
class CinderClusterDataModelCollector(base.BaseClusterDataModelCollector):
"""Cinder cluster data model collector
The Cinder cluster data model collector creates an in-memory
representation of the resources exposed by the storage service.
"""
def __init__(self, config, osc=None):
super(CinderClusterDataModelCollector, self).__init__(config, osc)
@property
def notification_endpoints(self):
"""Associated notification endpoints
:return: Associated notification endpoints
:rtype: List of :py:class:`~.EventsNotificationEndpoint` instances
"""
return [
cinder.CapacityNotificationEndpoint(self),
cinder.VolumeCreateEnd(self),
cinder.VolumeDeleteEnd(self),
cinder.VolumeUpdateEnd(self),
cinder.VolumeAttachEnd(self),
cinder.VolumeDetachEnd(self),
cinder.VolumeResizeEnd(self)
]
def execute(self):
"""Build the storage cluster data model"""
LOG.debug("Building latest Cinder cluster data model")
builder = ModelBuilder(self.osc)
return builder.execute()
class ModelBuilder(object):
"""Build the graph-based model
This model builder adds the following data"
- Storage-related knowledge (Cinder)
"""
def __init__(self, osc):
self.osc = osc
self.model = model_root.StorageModelRoot()
self.cinder = osc.cinder()
self.cinder_helper = cinder_helper.CinderHelper(osc=self.osc)
def _add_physical_layer(self):
"""Add the physical layer of the graph.
This includes components which represent actual infrastructure
hardware.
"""
for snode in self.cinder_helper.get_storage_node_list():
self.add_storage_node(snode)
for pool in self.cinder_helper.get_storage_pool_list():
pool = self._build_storage_pool(pool)
self.model.add_pool(pool)
storage_name = getattr(pool, 'name')
try:
storage_node = self.model.get_node_by_name(
storage_name)
# Connect the instance to its compute node
self.model.map_pool(pool, storage_node)
except exception.StorageNodeNotFound:
continue
def add_storage_node(self, node):
# Build and add base node.
storage_node = self.build_storage_node(node)
self.model.add_node(storage_node)
def add_storage_pool(self, pool):
storage_pool = self._build_storage_pool(pool)
self.model.add_pool(storage_pool)
def build_storage_node(self, node):
"""Build a storage node from a Cinder storage node
:param node: A storage node
:type node: :py:class:`~cinderclient.v2.services.Service`
"""
# node.host is formatted as host@backendname since ocata,
# or may be only host as of ocata
backend = ""
try:
backend = node.host.split('@')[1]
except IndexError:
pass
volume_type = self.cinder_helper.get_volume_type_by_backendname(
backend)
# build up the storage node.
node_attributes = {
"host": node.host,
"zone": node.zone,
"state": node.state,
"status": node.status,
"volume_type": volume_type}
storage_node = element.StorageNode(**node_attributes)
return storage_node
def _build_storage_pool(self, pool):
"""Build a storage pool from a Cinder storage pool
:param pool: A storage pool
:type pool: :py:class:`~cinderlient.v2.capabilities.Capabilities`
"""
# build up the storage pool.
node_attributes = {
"name": pool.name,
"total_volumes": pool.total_volumes,
"total_capacity_gb": pool.total_capacity_gb,
"free_capacity_gb": pool.free_capacity_gb,
"provisioned_capacity_gb": pool.provisioned_capacity_gb,
"allocated_capacity_gb": pool.allocated_capacity_gb}
storage_pool = element.Pool(**node_attributes)
return storage_pool
def _add_virtual_layer(self):
"""Add the virtual layer to the graph.
This layer is the virtual components of the infrastructure.
"""
self._add_virtual_storage()
def _add_virtual_storage(self):
volumes = self.cinder_helper.get_volume_list()
for vol in volumes:
volume = self._build_volume_node(vol)
self.model.add_volume(volume)
pool_name = getattr(vol, 'os-vol-host-attr:host')
if pool_name is None:
# The volume is not attached to any pool
continue
try:
pool = self.model.get_pool_by_pool_name(
pool_name)
self.model.map_volume(volume, pool)
except exception.PoolNotFound:
continue
def _build_volume_node(self, volume):
"""Build an volume node
Create an volume node for the graph using cinder and the
`volume` cinder object.
:param instance: Cinder Volume object.
:return: A volume node for the graph.
"""
attachments = [{k: v for k, v in six.iteritems(d) if k in (
'server_id', 'attachment_id')} for d in volume.attachments]
volume_attributes = {
"uuid": volume.id,
"size": volume.size,
"status": volume.status,
"attachments": attachments,
"name": volume.name or "",
"multiattach": volume.multiattach,
"snapshot_id": volume.snapshot_id or "",
"project_id": getattr(volume, 'os-vol-tenant-attr:tenant_id'),
"metadata": volume.metadata,
"bootable": volume.bootable}
return element.Volume(**volume_attributes)
def execute(self):
"""Instantiates the graph with the openstack cluster data.
The graph is populated along 2 layers: virtual and physical. As each
new layer is built connections are made back to previous layers.
"""
self._add_physical_layer()
self._add_virtual_layer()
return self.model

View File

@ -18,11 +18,23 @@
from watcher.decision_engine.model.element import instance
from watcher.decision_engine.model.element import node
from watcher.decision_engine.model.element import volume
ServiceState = node.ServiceState
ComputeNode = node.ComputeNode
StorageNode = node.StorageNode
Pool = node.Pool
InstanceState = instance.InstanceState
Instance = instance.Instance
VolumeState = volume.VolumeState
Volume = volume.Volume
__all__ = ['ServiceState', 'ComputeNode', 'InstanceState', 'Instance']
__all__ = ['ServiceState',
'ComputeNode',
'InstanceState',
'Instance',
'StorageNode',
'Pool',
'VolumeState',
'Volume']

View File

@ -17,6 +17,7 @@
import enum
from watcher.decision_engine.model.element import compute_resource
from watcher.decision_engine.model.element import storage_resource
from watcher.objects import base
from watcher.objects import fields as wfields
@ -45,3 +46,35 @@ class ComputeNode(compute_resource.ComputeResource):
def accept(self, visitor):
raise NotImplementedError()
@base.WatcherObjectRegistry.register_if(False)
class StorageNode(storage_resource.StorageResource):
fields = {
"host": wfields.StringField(),
"zone": wfields.StringField(),
"status": wfields.StringField(default=ServiceState.ENABLED.value),
"state": wfields.StringField(default=ServiceState.ONLINE.value),
"volume_type": wfields.StringField()
}
def accept(self, visitor):
raise NotImplementedError()
@base.WatcherObjectRegistry.register_if(False)
class Pool(storage_resource.StorageResource):
fields = {
"name": wfields.StringField(),
"total_volumes": wfields.NonNegativeIntegerField(),
"total_capacity_gb": wfields.NonNegativeIntegerField(),
"free_capacity_gb": wfields.NonNegativeIntegerField(),
"provisioned_capacity_gb": wfields.NonNegativeIntegerField(),
"allocated_capacity_gb": wfields.NonNegativeIntegerField(),
"virtual_free": wfields.NonNegativeIntegerField(),
}
def accept(self, visitor):
raise NotImplementedError()

View File

@ -0,0 +1,33 @@
# -*- encoding: utf-8 -*-
# Copyright 2017 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
from watcher.decision_engine.model.element import base
from watcher.objects import fields as wfields
@six.add_metaclass(abc.ABCMeta)
class StorageResource(base.Element):
VERSION = '1.0'
fields = {
"uuid": wfields.StringField(),
"human_id": wfields.StringField(default=""),
}

View File

@ -0,0 +1,56 @@
# -*- encoding: utf-8 -*-
# Copyright 2017 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from watcher.decision_engine.model.element import storage_resource
from watcher.objects import base
from watcher.objects import fields as wfields
class VolumeState(enum.Enum):
# https://developer.openstack.org/api-ref/block-storage/v3/#volumes-volumes
CREATING = 'creating'
AVAILABLE = 'available'
ATTACHING = 'attaching'
IN_USE = 'in-use'
DELETING = 'deleting'
ERROR = 'error'
ERROR_DELETING = 'error_deleting'
BACKING_UP = 'backing-up'
RESTORING_BACKUP = 'restoring-backup'
ERROR_RESTORING = 'error_restoring'
ERROR_EXTENDING = 'error_extending'
@base.WatcherObjectRegistry.register_if(False)
class Volume(storage_resource.StorageResource):
fields = {
"size": wfields.NonNegativeIntegerField(),
"status": wfields.StringField(default=VolumeState.AVAILABLE.value),
"attachments": wfields.FlexibleListOfDictField(),
"name": wfields.StringField(),
"multiattach": wfields.BooleanField(),
"snapshot_id": wfields.UUIDField(),
"project_id": wfields.UUIDField(),
"metadata": wfields.JsonField(),
"bootable": wfields.BooleanField()
}
def accept(self, visitor):
raise NotImplementedError()

View File

@ -242,3 +242,300 @@ class ModelRoot(nx.DiGraph, base.Model):
return node1.as_dict() == node2.as_dict()
return nx.algorithms.isomorphism.isomorph.is_isomorphic(
G1, G2, node_match=node_match)
class StorageModelRoot(nx.DiGraph, base.Model):
"""Cluster graph for an Openstack cluster."""
def __init__(self, stale=False):
super(StorageModelRoot, self).__init__()
self.stale = stale
def __nonzero__(self):
return not self.stale
__bool__ = __nonzero__
@staticmethod
def assert_node(obj):
if not isinstance(obj, element.StorageNode):
raise exception.IllegalArgumentException(
message=_("'obj' argument type is not valid: %s") % type(obj))
@staticmethod
def assert_pool(obj):
if not isinstance(obj, element.Pool):
raise exception.IllegalArgumentException(
message=_("'obj' argument type is not valid: %s") % type(obj))
@staticmethod
def assert_volume(obj):
if not isinstance(obj, element.Volume):
raise exception.IllegalArgumentException(
message=_("'obj' argument type is not valid: %s") % type(obj))
@lockutils.synchronized("storage_model")
def add_node(self, node):
self.assert_node(node)
super(StorageModelRoot, self).add_node(node.host, node)
@lockutils.synchronized("storage_model")
def add_pool(self, pool):
self.assert_pool(pool)
super(StorageModelRoot, self).add_node(pool.name, pool)
@lockutils.synchronized("storage_model")
def remove_node(self, node):
self.assert_node(node)
try:
super(StorageModelRoot, self).remove_node(node.host)
except nx.NetworkXError as exc:
LOG.exception(exc)
raise exception.StorageNodeNotFound(name=node.host)
@lockutils.synchronized("storage_model")
def remove_pool(self, pool):
self.assert_pool(pool)
try:
super(StorageModelRoot, self).remove_node(pool.name)
except nx.NetworkXError as exc:
LOG.exception(exc)
raise exception.PoolNotFound(name=pool.name)
@lockutils.synchronized("storage_model")
def map_pool(self, pool, node):
"""Map a newly created pool to a node
:param pool: :py:class:`~.Pool` object or pool name
:param node: :py:class:`~.StorageNode` object or node host
"""
if isinstance(pool, six.string_types):
pool = self.get_pool_by_pool_name(pool)
if isinstance(node, six.string_types):
node = self.get_node_by_name(node)
self.assert_node(node)
self.assert_pool(pool)
self.add_edge(pool.name, node.host)
@lockutils.synchronized("storage_model")
def unmap_pool(self, pool, node):
"""Unmap a pool from a node
:param pool: :py:class:`~.Pool` object or pool name
:param node: :py:class:`~.StorageNode` object or node name
"""
if isinstance(pool, six.string_types):
pool = self.get_pool_by_pool_name(pool)
if isinstance(node, six.string_types):
node = self.get_node_by_name(node)
self.remove_edge(pool.name, node.host)
@lockutils.synchronized("storage_model")
def add_volume(self, volume):
self.assert_volume(volume)
super(StorageModelRoot, self).add_node(volume.uuid, volume)
@lockutils.synchronized("storage_model")
def remove_volume(self, volume):
self.assert_volume(volume)
try:
super(StorageModelRoot, self).remove_node(volume.uuid)
except nx.NetworkXError as exc:
LOG.exception(exc)
raise exception.VolumeNotFound(name=volume.uuid)
@lockutils.synchronized("storage_model")
def map_volume(self, volume, pool):
"""Map a newly created volume to a pool
:param volume: :py:class:`~.Volume` object or volume UUID
:param pool: :py:class:`~.Pool` object or pool name
"""
if isinstance(volume, six.string_types):
volume = self.get_volume_by_uuid(volume)
if isinstance(pool, six.string_types):
pool = self.get_pool_by_pool_name(pool)
self.assert_pool(pool)
self.assert_volume(volume)
self.add_edge(volume.uuid, pool.name)
@lockutils.synchronized("storage_model")
def unmap_volume(self, volume, pool):
"""Unmap a volume from a pool
:param volume: :py:class:`~.Volume` object or volume UUID
:param pool: :py:class:`~.Pool` object or pool name
"""
if isinstance(volume, six.string_types):
volume = self.get_volume_by_uuid(volume)
if isinstance(pool, six.string_types):
pool = self.get_pool_by_pool_name(pool)
self.remove_edge(volume.uuid, pool.name)
def delete_volume(self, volume):
self.assert_volume(volume)
self.remove_volume(volume)
@lockutils.synchronized("storage_model")
def get_all_storage_nodes(self):
return {host: cn for host, cn in self.nodes(data=True)
if isinstance(cn, element.StorageNode)}
@lockutils.synchronized("storage_model")
def get_node_by_name(self, name):
"""Get a node by node name
:param node: :py:class:`~.StorageNode` object or node name
"""
try:
return self._get_by_name(name.split("#")[0])
except exception.StorageResourceNotFound:
raise exception.StorageNodeNotFound(name=name)
@lockutils.synchronized("storage_model")
def get_pool_by_pool_name(self, name):
try:
return self._get_by_name(name)
except exception.StorageResourceNotFound:
raise exception.PoolNotFound(name=name)
@lockutils.synchronized("storage_model")
def get_volume_by_uuid(self, uuid):
try:
return self._get_by_uuid(uuid)
except exception.StorageResourceNotFound:
raise exception.VolumeNotFound(name=uuid)
def _get_by_uuid(self, uuid):
try:
return self.node[uuid]
except Exception as exc:
LOG.exception(exc)
raise exception.StorageResourceNotFound(name=uuid)
def _get_by_name(self, name):
try:
return self.node[name]
except Exception as exc:
LOG.exception(exc)
raise exception.StorageResourceNotFound(name=name)
@lockutils.synchronized("storage_model")
def get_node_by_pool_name(self, pool_name):
pool = self._get_by_name(pool_name)
for node_name in self.neighbors(pool.name):
node = self._get_by_name(node_name)
if isinstance(node, element.StorageNode):
return node
raise exception.StorageNodeNotFound(name=pool_name)
@lockutils.synchronized("storage_model")
def get_node_pools(self, node):
self.assert_node(node)
node_pools = []
for pool_name in self.predecessors(node.host):
pool = self._get_by_name(pool_name)
if isinstance(pool, element.Pool):
node_pools.append(pool)
return node_pools
@lockutils.synchronized("storage_model")
def get_pool_by_volume(self, volume):
self.assert_volume(volume)
volume = self._get_by_uuid(volume.uuid)
for p in self.neighbors(volume.uuid):
pool = self._get_by_name(p)
if isinstance(pool, element.Pool):
return pool
raise exception.PoolNotFound(name=volume.uuid)
@lockutils.synchronized("storage_model")
def get_all_volumes(self):
return {name: vol for name, vol in self.nodes(data=True)
if isinstance(vol, element.Volume)}
@lockutils.synchronized("storage_model")
def get_pool_volumes(self, pool):
self.assert_pool(pool)
volumes = []
for vol in self.predecessors(pool.name):
volume = self._get_by_uuid(vol)
if isinstance(volume, element.Volume):
volumes.append(volume)
return volumes
def to_string(self):
return self.to_xml()
def to_xml(self):
root = etree.Element("ModelRoot")
# Build storage node tree
for cn in sorted(self.get_all_storage_nodes().values(),
key=lambda cn: cn.host):
storage_node_el = cn.as_xml_element()
# Build mapped pool tree
node_pools = self.get_node_pools(cn)
for pool in sorted(node_pools, key=lambda x: x.name):
pool_el = pool.as_xml_element()
storage_node_el.append(pool_el)
# Build mapped volume tree
pool_volumes = self.get_pool_volumes(pool)
for volume in sorted(pool_volumes, key=lambda x: x.uuid):
volume_el = volume.as_xml_element()
pool_el.append(volume_el)
root.append(storage_node_el)
# Build unmapped volume tree (i.e. not assigned to any pool)
for volume in sorted(self.get_all_volumes().values(),
key=lambda vol: vol.uuid):
try:
self.get_pool_by_volume(volume)
except (exception.VolumeNotFound, exception.PoolNotFound):
root.append(volume.as_xml_element())
return etree.tostring(root, pretty_print=True).decode('utf-8')
@classmethod
def from_xml(cls, data):
model = cls()
root = etree.fromstring(data)
for cn in root.findall('.//StorageNode'):
node = element.StorageNode(**cn.attrib)
model.add_node(node)
for p in root.findall('.//Pool'):
pool = element.Pool(**p.attrib)
model.add_pool(pool)
parent = p.getparent()
if parent.tag == 'StorageNode':
node = model.get_node_by_name(parent.get('host'))
model.map_pool(pool, node)
else:
model.add_pool(pool)
for vol in root.findall('.//Volume'):
volume = element.Volume(**vol.attrib)
model.add_volume(volume)
parent = vol.getparent()
if parent.tag == 'Pool':
pool = model.get_pool_by_pool_name(parent.get('name'))
model.map_volume(volume, pool)
else:
model.add_volume(volume)
return model
@classmethod
def is_isomorphic(cls, G1, G2):
return nx.algorithms.isomorphism.isomorph.is_isomorphic(
G1, G2)

View File

@ -0,0 +1,387 @@
# -*- encoding: utf-8 -*-
# Copyright 2017 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from oslo_log import log
from watcher.common import cinder_helper
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.model.notification import base
from watcher.decision_engine.model.notification import filtering
LOG = log.getLogger(__name__)
class CinderNotification(base.NotificationEndpoint):
def __init__(self, collector):
super(CinderNotification, self).__init__(collector)
self._cinder = None
@property
def cinder(self):
if self._cinder is None:
self._cinder = cinder_helper.CinderHelper()
return self._cinder
def update_pool(self, pool, data):
"""Update the storage pool using the notification data."""
pool.update({
"total_capacity_gb": data['total'],
"free_capacity_gb": data['free'],
"provisioned_capacity_gb": data['provisioned'],
"allocated_capacity_gb": data['allocated'],
"virtual_free": data['virtual_free']
})
node_name = pool.name.split("#")[0]
node = self.get_or_create_node(node_name)
self.cluster_data_model.map_pool(pool, node)
LOG.debug("Mapped pool %s to %s", pool.name, node.host)
def update_pool_by_api(self, pool):
"""Update the storage pool using the API data."""
if not pool:
return
_pool = self.cinder.get_storage_pool_by_name(pool.name)
pool.update({
"total_volumes": _pool.total_volumes,
"total_capacity_gb": _pool.total_capacity_gb,
"free_capacity_gb": _pool.free_capacity_gb,
"provisioned_capacity_gb": _pool.provisioned_capacity_gb,
"allocated_capacity_gb": _pool.allocated_capacity_gb
})
node_name = pool.name.split("#")[0]
node = self.get_or_create_node(node_name)
self.cluster_data_model.map_pool(pool, node)
LOG.debug("Mapped pool %s to %s", pool.name, node.host)
def create_storage_node(self, name):
"""Create the storage node by querying the Cinder API."""
try:
_node = self.cinder.get_storage_node_by_name(name)
_volume_type = self.cinder.get_volume_type_by_backendname(
# name is formatted as host@backendname
name.split('@')[1])
storage_node = element.StorageNode(
host=_node.host,
zone=_node.zone,
state=_node.state,
status=_node.status,
volume_type=_volume_type)
return storage_node
except Exception as exc:
LOG.exception(exc)
LOG.debug("Could not create storage node %s.", name)
raise exception.StorageNodeNotFound(name=name)
def get_or_create_node(self, name):
"""Get storage node by name, otherwise create storage node"""
if name is None:
LOG.debug("Storage node name not provided: skipping")
return
try:
return self.cluster_data_model.get_node_by_name(name)
except exception.StorageNodeNotFound:
# The node didn't exist yet so we create a new node object
node = self.create_storage_node(name)
LOG.debug("New storage node created: %s", name)
self.cluster_data_model.add_node(node)
LOG.debug("New storage node added: %s", name)
return node
def create_pool(self, pool_name):
"""Create the storage pool by querying the Cinder API."""
try:
_pool = self.cinder.get_storage_pool_by_name(pool_name)
pool = element.Pool(
name=_pool.name,
total_volumes=_pool.total_volumes,
total_capacity_gb=_pool.total_capacity_gb,
free_capacity_gb=_pool.free_capacity_gb,
provisioned_capacity_gb=_pool.provisioned_capacity_gb,
allocated_capacity_gb=_pool.allocated_capacity_gb)
return pool
except Exception as exc:
LOG.exception(exc)
LOG.debug("Could not refresh the pool %s.", pool_name)
raise exception.PoolNotFound(name=pool_name)
def get_or_create_pool(self, name):
if not name:
LOG.debug("Pool name not provided: skipping")
return
try:
return self.cluster_data_model.get_pool_by_pool_name(name)
except exception.PoolNotFound:
# The pool didn't exist yet so we create a new pool object
pool = self.create_pool(name)
LOG.debug("New storage pool created: %s", name)
self.cluster_data_model.add_pool(pool)
LOG.debug("New storage pool added: %s", name)
return pool
def get_or_create_volume(self, volume_id, pool_name=None):
try:
if pool_name:
self.get_or_create_pool(pool_name)
except exception.PoolNotFound:
LOG.warning("Could not find storage pool %(pool)s for "
"volume %(volume)s",
dict(pool=pool_name, volume=volume_id))
try:
return self.cluster_data_model.get_volume_by_uuid(volume_id)
except exception.VolumeNotFound:
# The volume didn't exist yet so we create a new volume object
volume = element.Volume(uuid=volume_id)
self.cluster_data_model.add_volume(volume)
return volume
def update_volume(self, volume, data):
"""Update the volume using the notification data."""
def _keyReplace(key):
if key == 'instance_uuid':
return 'server_id'
if key == 'id':
return 'attachment_id'
attachments = [
{_keyReplace(k): v for k, v in six.iteritems(d)
if k in ('instance_uuid', 'id')}
for d in data['volume_attachment']
]
# glance_metadata is provided if volume is bootable
bootable = False
if 'glance_metadata' in data:
bootable = True
volume.update({
"name": data['display_name'] or "",
"size": data['size'],
"status": data['status'],
"attachments": attachments,
"snapshot_id": data['snapshot_id'] or "",
"project_id": data['tenant_id'],
"metadata": data['metadata'],
"bootable": bootable
})
try:
# if volume is under pool, let's update pool element.
# get existing pool or create pool by cinder api
pool = self.get_or_create_pool(data['host'])
self.update_pool_by_api(pool)
except exception.PoolNotFound as exc:
LOG.exception(exc)
pool = None
self.update_volume_mapping(volume, pool)
def update_volume_mapping(self, volume, pool):
if pool is None:
self.cluster_data_model.add_volume(volume)
LOG.debug("Volume %s not yet attached to any pool: skipping",
volume.uuid)
return
try:
try:
current_pool = (
self.cluster_data_model.get_pool_by_volume(
volume) or self.get_or_create_pool(pool.name))
except exception.PoolNotFound as exc:
LOG.exception(exc)
# If we can't create the pool,
# we consider the volume as unmapped
current_pool = None
LOG.debug("Mapped pool %s found", pool.name)
if current_pool and pool != current_pool:
LOG.debug("Unmapping volume %s from %s",
volume.uuid, pool.name)
self.cluster_data_model.unmap_volume(volume, current_pool)
except exception.VolumeNotFound:
# The instance didn't exist yet so we map it for the first time
LOG.debug("New volume: mapping it to %s", pool.name)
finally:
if pool:
self.cluster_data_model.map_volume(volume, pool)
LOG.debug("Mapped volume %s to %s", volume.uuid, pool.name)
def delete_volume(self, volume, pool):
try:
self.cluster_data_model.delete_volume(volume)
except Exception:
LOG.info("Volume %s already deleted", volume.uuid)
try:
if pool:
# if volume is under pool, let's update pool element.
# get existing pool or create pool by cinder api
pool = self.get_or_create_pool(pool.name)
self.update_pool_by_api(pool)
except exception.PoolNotFound as exc:
LOG.exception(exc)
pool = None
class CapacityNotificationEndpoint(CinderNotification):
@property
def filter_rule(self):
"""Cinder capacity notification filter"""
return filtering.NotificationFilter(
publisher_id=r'capacity.*',
event_type='capacity.pool',
)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
LOG.debug(payload)
name = payload['name_to_id']
try:
pool = self.get_or_create_pool(name)
self.update_pool(pool, payload)
except exception.PoolNotFound as exc:
LOG.exception(exc)
class VolumeNotificationEndpoint(CinderNotification):
publisher_id_regex = r'^volume.*'
class VolumeCreateEnd(VolumeNotificationEndpoint):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.create.end',
)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
LOG.debug(payload)
volume_id = payload['volume_id']
poolname = payload['host']
volume = self.get_or_create_volume(volume_id, poolname)
self.update_volume(volume, payload)
class VolumeUpdateEnd(VolumeNotificationEndpoint):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.update.end',
)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
LOG.debug(payload)
volume_id = payload['volume_id']
poolname = payload['host']
volume = self.get_or_create_volume(volume_id, poolname)
self.update_volume(volume, payload)
class VolumeAttachEnd(VolumeUpdateEnd):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.attach.end',
)
class VolumeDetachEnd(VolumeUpdateEnd):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.detach.end',
)
class VolumeResizeEnd(VolumeUpdateEnd):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.resize.end',
)
class VolumeDeleteEnd(VolumeNotificationEndpoint):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.delete.end',
)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s" %
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
LOG.debug(payload)
volume_id = payload['volume_id']
poolname = payload['host']
volume = self.get_or_create_volume(volume_id, poolname)
try:
pool = self.get_or_create_pool(poolname)
except exception.PoolNotFound as exc:
LOG.exception(exc)
pool = None
self.delete_volume(volume, pool)

View File

@ -82,6 +82,7 @@ class BaseStrategy(loadable.Loadable):
self._osc = osc
self._collector_manager = None
self._compute_model = None
self._storage_model = None
self._input_parameters = utils.Struct()
self._audit_scope = None
self._audit_scope_handler = None
@ -192,6 +193,27 @@ class BaseStrategy(loadable.Loadable):
return self._compute_model
@property
def storage_model(self):
"""Cluster data model
:returns: Cluster data model the strategy is executed on
:rtype model: :py:class:`~.ModelRoot` instance
"""
if self._storage_model is None:
collector = self.collector_manager.get_cluster_model_collector(
'storage', osc=self.osc)
self._storage_model = self.audit_scope_handler.get_scoped_model(
collector.get_latest_cluster_data_model())
if not self._storage_model:
raise exception.ClusterStateNotDefined()
if self._storage_model.stale:
raise exception.ClusterStateStale()
return self._storage_model
@classmethod
def get_schema(cls):
"""Defines a Schema that the input parameters shall comply to

View File

@ -0,0 +1,126 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from watcher.common import cinder_helper
from watcher.common import clients
from watcher.common import exception
from watcher.tests import base
@mock.patch.object(clients.OpenStackClients, 'cinder')
class TestCinderHelper(base.TestCase):
def setUp(self):
super(TestCinderHelper, self).setUp()
@staticmethod
def fake_storage_node(**kwargs):
node = mock.MagicMock()
node.binary = kwargs.get('binary', 'cinder-volume')
node.host = kwargs.get('name', 'host@backend')
return node
def test_get_storage_node_list(self, mock_cinder):
node1 = self.fake_storage_node()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.services.list.return_value = [node1]
cinder_util.get_storage_node_list()
cinder_util.cinder.services.list.assert_called_once_with(
binary='cinder-volume')
def test_get_storage_node_by_name_success(self, mock_cinder):
node1 = self.fake_storage_node()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.services.list.return_value = [node1]
node = cinder_util.get_storage_node_by_name('host@backend')
self.assertEqual(node, node1)
def test_get_storage_node_by_name_failure(self, mock_cinder):
node1 = self.fake_storage_node()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.services.list.return_value = [node1]
self.assertRaisesRegex(
exception.StorageNodeNotFound,
"The storage node failure could not be found",
cinder_util.get_storage_node_by_name, 'failure')
@staticmethod
def fake_pool(**kwargs):
pool = mock.MagicMock()
pool.name = kwargs.get('name', 'host@backend#pool')
return pool
def test_get_storage_pool_list(self, mock_cinder):
pool = self.fake_pool()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.pools.list.return_value = [pool]
cinder_util.get_storage_pool_list()
cinder_util.cinder.pools.list.assert_called_once_with(detailed=True)
def test_get_storage_pool_by_name_success(self, mock_cinder):
pool1 = self.fake_pool()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.pools.list.return_value = [pool1]
pool = cinder_util.get_storage_pool_by_name('host@backend#pool')
self.assertEqual(pool, pool1)
def test_get_storage_pool_by_name_failure(self, mock_cinder):
pool1 = self.fake_pool()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.services.list.return_value = [pool1]
self.assertRaisesRegex(
exception.PoolNotFound,
"The pool failure could not be found",
cinder_util.get_storage_pool_by_name, 'failure')
@staticmethod
def fake_volume_type(**kwargs):
volume_type = mock.MagicMock()
volume_type.name = kwargs.get('name', 'fake_type')
extra_specs = {'volume_backend_name': 'backend'}
volume_type.extra_specs = kwargs.get('extra_specs', extra_specs)
return volume_type
def test_get_volume_type_list(self, mock_cinder):
volume_type1 = self.fake_volume_type()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.volume_types.list.return_value = [volume_type1]
cinder_util.get_volume_type_list()
cinder_util.cinder.volume_types.list.assert_called_once_with()
def test_get_volume_type_by_backendname_with_backend_exist(
self, mock_cinder):
volume_type1 = self.fake_volume_type()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.volume_types.list.return_value = [volume_type1]
volume_type_name = cinder_util.get_volume_type_by_backendname(
'backend')
self.assertEqual(volume_type_name, volume_type1.name)
def test_get_volume_type_by_backendname_with_no_backend_exist(
self, mock_cinder):
volume_type1 = self.fake_volume_type()
cinder_util = cinder_helper.CinderHelper()
cinder_util.cinder.volume_types.list.return_value = [volume_type1]
volume_type_name = cinder_util.get_volume_type_by_backendname(
'nobackend')
self.assertEqual("", volume_type_name)

View File

@ -0,0 +1,23 @@
<ModelRoot>
<StorageNode status="enabled" zone="zone_0" state="up" volume_type="type_0" host="host_0@backend_0" human_id="">
<Pool total_capacity_gb="500" name="host_0@backend_0#pool_0" total_volumes="2" provisioned_capacity_gb="80" human_id="" allocated_capacity_gb="80" free_capacity_gb="420" virtual_free="420">
<Volume status="in-use" uuid="VOLUME_0" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_0" human_id="" name="name_0" bootable="false"/>
<Volume status="in-use" uuid="VOLUME_1" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_1" human_id="" name="name_1" bootable="false"/>
</Pool>
<Pool total_capacity_gb="500" name="host_0@backend_0#pool_1" total_volumes="2" provisioned_capacity_gb="80" human_id="" allocated_capacity_gb="80" free_capacity_gb="420" virtual_free="420">
<Volume status="in-use" uuid="VOLUME_2" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_2" human_id="" name="name_2" bootable="false"/>
<Volume status="in-use" uuid="VOLUME_3" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_3" human_id="" name="name_3" bootable="false"/>
</Pool>
</StorageNode>
<StorageNode status="enabled" zone="zone_1" state="up" volume_type="type_1" host="host_1@backend_1" human_id="">
<Pool total_capacity_gb="500" name="host_1@backend_1#pool_0" total_volumes="2" provisioned_capacity_gb="80" human_id="" allocated_capacity_gb="80" free_capacity_gb="420" virtual_free="420">
<Volume status="in-use" uuid="VOLUME_4" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_4" human_id="" name="name_4" bootable="false"/>
<Volume status="in-use" uuid="VOLUME_5" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_5" human_id="" name="name_5" bootable="false"/>
</Pool>
<Pool total_capacity_gb="500" name="host_1@backend_1#pool_1" total_volumes="2" provisioned_capacity_gb="80" human_id="" allocated_capacity_gb="80" free_capacity_gb="420" virtual_free="420">
<Volume status="in-use" uuid="VOLUME_6" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_6" human_id="" name="name_6" bootable="false"/>
<Volume status="in-use" uuid="VOLUME_7" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_7" human_id="" name="name_7" bootable="false"/>
</Pool>
</StorageNode>
<Volume status="in-use" uuid="VOLUME_8" attachments='[{"server_id": "server", "attachment_id": "attachment"}]' multiattach="true" size="40" metadata='{"readonly": false, "attached_mode": "rw"}' snapshot_id="" project_id="project_8" human_id="" name="name_8" bootable="false"/>
</ModelRoot>

View File

@ -135,3 +135,123 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
def generate_scenario_9_with_3_active_plus_1_disabled_nodes(self):
return self.load_model(
'scenario_9_with_3_active_plus_1_disabled_nodes.xml')
class FakerStorageModelCollector(base.BaseClusterDataModelCollector):
def __init__(self, config=None, osc=None):
if config is None:
config = mock.Mock(period=777)
super(FakerStorageModelCollector, self).__init__(config)
@property
def notification_endpoints(self):
return []
def load_data(self, filename):
cwd = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(cwd, "data")
with open(os.path.join(data_folder, filename), 'rb') as xml_file:
xml_data = xml_file.read()
return xml_data
def load_model(self, filename):
return modelroot.StorageModelRoot.from_xml(self.load_data(filename))
def execute(self):
return self._cluster_data_model or self.build_scenario_1()
def build_scenario_1(self):
model = modelroot.StorageModelRoot()
# number of nodes
node_count = 2
# number of pools per node
pool_count = 2
# number of volumes
volume_count = 9
for i in range(0, node_count):
host = "host_{0}@backend_{0}".format(i)
zone = "zone_{0}".format(i)
volume_type = "type_{0}".format(i)
node_attributes = {
"host": host,
"zone": zone,
"status": 'enabled',
"state": 'up',
"volume_type": volume_type,
}
node = element.StorageNode(**node_attributes)
model.add_node(node)
for j in range(0, pool_count):
name = "host_{0}@backend_{0}#pool_{1}".format(i, j)
pool_attributes = {
"name": name,
"total_volumes": 2,
"total_capacity_gb": 500,
"free_capacity_gb": 420,
"provisioned_capacity_gb": 80,
"allocated_capacity_gb": 80,
"virtual_free": 420,
}
pool = element.Pool(**pool_attributes)
model.add_pool(pool)
mappings = [
("host_0@backend_0#pool_0", "host_0@backend_0"),
("host_0@backend_0#pool_1", "host_0@backend_0"),
("host_1@backend_1#pool_0", "host_1@backend_1"),
("host_1@backend_1#pool_1", "host_1@backend_1"),
]
for pool_name, node_name in mappings:
model.map_pool(
model.get_pool_by_pool_name(pool_name),
model.get_node_by_name(node_name),
)
for k in range(volume_count):
uuid = "VOLUME_{0}".format(k)
name = "name_{0}".format(k)
project_id = "project_{0}".format(k)
volume_attributes = {
"size": 40,
"status": "in-use",
"uuid": uuid,
"attachments":
'[{"server_id": "server","attachment_id": "attachment"}]',
"name": name,
"multiattach": 'True',
"snapshot_id": uuid,
"project_id": project_id,
"metadata": '{"readonly": false,"attached_mode": "rw"}',
"bootable": 'False'
}
volume = element.Volume(**volume_attributes)
model.add_volume(volume)
mappings = [
("VOLUME_0", "host_0@backend_0#pool_0"),
("VOLUME_1", "host_0@backend_0#pool_0"),
("VOLUME_2", "host_0@backend_0#pool_1"),
("VOLUME_3", "host_0@backend_0#pool_1"),
("VOLUME_4", "host_1@backend_1#pool_0"),
("VOLUME_5", "host_1@backend_1#pool_0"),
("VOLUME_6", "host_1@backend_1#pool_1"),
("VOLUME_7", "host_1@backend_1#pool_1"),
]
for volume_uuid, pool_name in mappings:
model.map_volume(
model.get_volume_by_uuid(volume_uuid),
model.get_pool_by_pool_name(pool_name),
)
return model
def generate_scenario_1(self):
return self.load_model('storage_scenario_1.xml')

View File

@ -0,0 +1,14 @@
{
"priority": "INFO",
"publisher_id": "capacity.host1@backend1#pool1",
"event_type": "capacity.pool",
"payload": {
"name_to_id": "capacity.host1@backend1#pool1",
"total": 3,
"free": 1,
"allocated": 2,
"provisioned": 2,
"virtual_free": 1,
"reported_at": "2017-05-15T13:42:11Z"
}
}

View File

@ -0,0 +1,17 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.create.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_00",
"display_name": "name_00",
"size": "40",
"status": "available",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_00",
"metadata": {"readonly": false, "attached_mode": "rw"},
"glance_metadata": {}
}
}

View File

@ -0,0 +1,14 @@
{
"priority": "INFO",
"publisher_id": "capacity.host_0@backend_0#pool_0",
"event_type": "capacity.pool",
"payload": {
"name_to_id": "host_0@backend_0#pool_0",
"total": 500,
"free": 460,
"allocated": 40,
"provisioned": 40,
"virtual_free": 460,
"reported_at": "2017-05-15T13:42:11Z"
}
}

View File

@ -0,0 +1,14 @@
{
"priority": "INFO",
"publisher_id": "capacity.host_2@backend_2#pool_0",
"event_type": "capacity.pool",
"payload": {
"name_to_id": "host_2@backend_2#pool_0",
"total": 500,
"free": 460,
"allocated": 40,
"provisioned": 40,
"virtual_free": 460,
"reported_at": "2017-05-15T13:42:11Z"
}
}

View File

@ -0,0 +1,14 @@
{
"priority": "INFO",
"publisher_id": "capacity.host_0@backend_0#pool_2",
"event_type": "capacity.pool",
"payload": {
"name_to_id": "host_0@backend_0#pool_2",
"total": 500,
"free": 380,
"allocated": 120,
"provisioned": 120,
"virtual_free": 380,
"reported_at": "2017-05-15T13:42:11Z"
}
}

View File

@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.create.end",
"payload": {
"host": "",
"volume_id": "VOLUME_00",
"display_name": "name_00",
"size": "40",
"status": "error",
"volume_attachment": [],
"snapshot_id": "",
"tenant_id": "project_00",
"metadata": {}
}
}

View File

@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.attach.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_0",
"display_name": "name_0",
"size": "40",
"status": "in-use",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_0",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.create.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_00",
"display_name": "name_00",
"size": "40",
"status": "available",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_00",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_2@backend_2#pool_0",
"event_type": "volume.create.end",
"payload": {
"host": "host_2@backend_2#pool_0",
"volume_id": "VOLUME_00",
"display_name": "name_00",
"size": "40",
"status": "available",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_00",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.delete.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_0",
"display_name": "name_0",
"size": "40",
"status": "deleting",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_0",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.detach.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_0",
"display_name": "name_0",
"size": "40",
"status": "available",
"volume_attachment": [],
"snapshot_id": "",
"tenant_id": "project_0",
"metadata": {}
}
}

View File

@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.resize.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_0",
"display_name": "name_0",
"size": "20",
"status": "in-use",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_0",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@ -0,0 +1,16 @@
{
"priority": "INFO",
"publisher_id": "volume.host_0@backend_0#pool_0",
"event_type": "volume.update.end",
"payload": {
"host": "host_0@backend_0#pool_0",
"volume_id": "VOLUME_0",
"display_name": "name_01",
"size": "40",
"status": "enabled",
"volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}],
"snapshot_id": "",
"tenant_id": "project_0",
"metadata": {"readonly": false, "attached_mode": "rw"}
}
}

View File

@ -17,6 +17,7 @@
# limitations under the License.
from watcher.common import service_manager
from watcher.decision_engine.model.notification import cinder as cnotification
from watcher.decision_engine.model.notification import nova as novanotification
from watcher.tests.decision_engine.model import faker_cluster_state
@ -65,3 +66,20 @@ class FakeManager(service_manager.ServiceManager):
novanotification.LegacyLiveMigratedEnd(self.fake_cdmc),
novanotification.LegacyInstanceDeletedEnd(self.fake_cdmc),
]
class FakeStorageManager(FakeManager):
fake_cdmc = faker_cluster_state.FakerStorageModelCollector()
@property
def notification_endpoints(self):
return [
cnotification.CapacityNotificationEndpoint(self.fake_cdmc),
cnotification.VolumeCreateEnd(self.fake_cdmc),
cnotification.VolumeUpdateEnd(self.fake_cdmc),
cnotification.VolumeDeleteEnd(self.fake_cdmc),
cnotification.VolumeAttachEnd(self.fake_cdmc),
cnotification.VolumeDetachEnd(self.fake_cdmc),
cnotification.VolumeResizeEnd(self.fake_cdmc),
]

View File

@ -0,0 +1,607 @@
# -*- encoding: utf-8 -*-
# Copyright 2017 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import mock
from oslo_serialization import jsonutils
from watcher.common import cinder_helper
from watcher.common import context
from watcher.common import exception
from watcher.common import service as watcher_service
from watcher.db.sqlalchemy import api as db_api
from watcher.decision_engine.model.notification import cinder as cnotification
from watcher.tests import base as base_test
from watcher.tests.db import utils
from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model.notification import fake_managers
class NotificationTestCase(base_test.TestCase):
@staticmethod
def load_message(filename):
cwd = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(cwd, "data")
with open(os.path.join(data_folder, filename), 'rb') as json_file:
json_data = jsonutils.load(json_file)
return json_data
class TestReceiveCinderNotifications(NotificationTestCase):
FAKE_METADATA = {'message_id': None, 'timestamp': None}
def setUp(self):
super(TestReceiveCinderNotifications, self).setUp()
p_from_dict = mock.patch.object(context.RequestContext, 'from_dict')
m_from_dict = p_from_dict.start()
m_from_dict.return_value = self.context
self.addCleanup(p_from_dict.stop)
p_get_service_list = mock.patch.object(
db_api.Connection, 'get_service_list')
p_update_service = mock.patch.object(
db_api.Connection, 'update_service')
m_get_service_list = p_get_service_list.start()
m_update_service = p_update_service.start()
fake_service = utils.get_test_service(
created_at=datetime.datetime.utcnow())
m_get_service_list.return_value = [fake_service]
m_update_service.return_value = fake_service.copy()
self.addCleanup(p_get_service_list.stop)
self.addCleanup(p_update_service.stop)
@mock.patch.object(cnotification.CapacityNotificationEndpoint, 'info')
def test_cinder_receive_capacity(self, m_info):
message = self.load_message('capacity.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'capacity.host1@backend1#pool1', 'capacity.pool',
expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeCreateEnd, 'info')
def test_cinder_receive_volume_create_end(self, m_info):
message = self.load_message('scenario_1_volume-create.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.create.end', expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeUpdateEnd, 'info')
def test_cinder_receive_volume_update_end(self, m_info):
message = self.load_message('scenario_1_volume-update.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.update.end', expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeAttachEnd, 'info')
def test_cinder_receive_volume_attach_end(self, m_info):
message = self.load_message('scenario_1_volume-attach.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.attach.end', expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeDetachEnd, 'info')
def test_cinder_receive_volume_detach_end(self, m_info):
message = self.load_message('scenario_1_volume-detach.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.detach.end', expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeResizeEnd, 'info')
def test_cinder_receive_volume_resize_end(self, m_info):
message = self.load_message('scenario_1_volume-resize.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.resize.end', expected_message, self.FAKE_METADATA)
@mock.patch.object(cnotification.VolumeDeleteEnd, 'info')
def test_cinder_receive_volume_delete_end(self, m_info):
message = self.load_message('scenario_1_volume-delete.json')
expected_message = message['payload']
de_service = watcher_service.Service(fake_managers.FakeStorageManager)
incoming = mock.Mock(ctxt=self.context.to_dict(), message=message)
de_service.notification_handler.dispatcher.dispatch(incoming)
m_info.assert_called_once_with(
self.context, 'volume.host_0@backend_0#pool_0',
'volume.delete.end', expected_message, self.FAKE_METADATA)
class TestCinderNotifications(NotificationTestCase):
FAKE_METADATA = {'message_id': None, 'timestamp': None}
def setUp(self):
super(TestCinderNotifications, self).setUp()
# fake cluster
self.fake_cdmc = faker_cluster_state.FakerStorageModelCollector()
def test_cinder_capacity(self):
"""test consuming capacity"""
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc)
pool_0_name = 'host_0@backend_0#pool_0'
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
# before
self.assertEqual(pool_0_name, pool_0.name)
self.assertEqual(420, pool_0.free_capacity_gb)
self.assertEqual(420, pool_0.virtual_free)
self.assertEqual(80, pool_0.allocated_capacity_gb)
self.assertEqual(80, pool_0.provisioned_capacity_gb)
message = self.load_message('scenario_1_capacity.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# after
self.assertEqual(pool_0_name, pool_0.name)
self.assertEqual(460, pool_0.free_capacity_gb)
self.assertEqual(460, pool_0.virtual_free)
self.assertEqual(40, pool_0.allocated_capacity_gb)
self.assertEqual(40, pool_0.provisioned_capacity_gb)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_capacity_pool_notfound(self, m_cinder_helper):
"""test consuming capacity, new pool in existing node"""
# storage_pool_by_name mock
return_mock = mock.Mock()
return_mock.configure_mock(
name='host_0@backend_0#pool_2',
total_volumes='2',
total_capacity_gb='500',
free_capacity_gb='380',
provisioned_capacity_gb='120',
allocated_capacity_gb='120')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_mock)
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc)
message = self.load_message('scenario_1_capacity_pool_notfound.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# after consuming message, still pool_0 exists
pool_0_name = 'host_0@backend_0#pool_0'
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
self.assertEqual(pool_0_name, pool_0.name)
self.assertEqual(420, pool_0.free_capacity_gb)
self.assertEqual(420, pool_0.virtual_free)
self.assertEqual(80, pool_0.allocated_capacity_gb)
self.assertEqual(80, pool_0.provisioned_capacity_gb)
# new pool was added
pool_1_name = 'host_0@backend_0#pool_2'
m_get_storage_pool_by_name.assert_called_once_with(pool_1_name)
storage_node = storage_model.get_node_by_pool_name(pool_1_name)
self.assertEqual('host_0@backend_0', storage_node.host)
pool_1 = storage_model.get_pool_by_pool_name(pool_1_name)
self.assertEqual(pool_1_name, pool_1.name)
self.assertEqual(500, pool_1.total_capacity_gb)
self.assertEqual(380, pool_1.free_capacity_gb)
self.assertEqual(120, pool_1.allocated_capacity_gb)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_capacity_node_notfound(self, m_cinder_helper):
"""test consuming capacity, new pool in new node"""
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_2@backend_2#pool_0',
total_volumes='2',
total_capacity_gb='500',
free_capacity_gb='460',
provisioned_capacity_gb='40',
allocated_capacity_gb='40')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
# storage_node_by_name mock
return_node_mock = mock.Mock()
return_node_mock.configure_mock(
host='host_2@backend_2',
zone='nova',
state='up',
status='enabled')
m_get_storage_node_by_name = mock.Mock(
side_effect=lambda name: return_node_mock)
m_get_volume_type_by_backendname = mock.Mock(
side_effect=lambda name: mock.Mock('backend_2'))
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name,
get_storage_node_by_name=m_get_storage_node_by_name,
get_volume_type_by_backendname=m_get_volume_type_by_backendname)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc)
message = self.load_message('scenario_1_capacity_node_notfound.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# new pool and new node was added
node_1_name = 'host_2@backend_2'
pool_1_name = node_1_name + '#pool_0'
volume_type = 'backend_2'
m_get_storage_pool_by_name.assert_called_once_with(pool_1_name)
m_get_storage_node_by_name.assert_called_once_with(node_1_name)
m_get_volume_type_by_backendname.assert_called_once_with(volume_type)
# new node was added
storage_node = storage_model.get_node_by_pool_name(pool_1_name)
self.assertEqual('host_2@backend_2', storage_node.host)
# new pool was added
pool_1 = storage_model.get_pool_by_pool_name(pool_1_name)
self.assertEqual(pool_1_name, pool_1.name)
self.assertEqual(500, pool_1.total_capacity_gb)
self.assertEqual(460, pool_1.free_capacity_gb)
self.assertEqual(40, pool_1.allocated_capacity_gb)
self.assertEqual(40, pool_1.provisioned_capacity_gb)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_volume_create(self, m_cinder_helper):
"""test creating volume in existing pool and node"""
# create storage_pool_by_name mock
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_0@backend_0#pool_0',
total_volumes='3',
total_capacity_gb='500',
free_capacity_gb='380',
provisioned_capacity_gb='120',
allocated_capacity_gb='120')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeCreateEnd(self.fake_cdmc)
message = self.load_message('scenario_1_volume-create.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# check that volume00 was added to the model
volume_00_name = 'VOLUME_00'
volume_00 = storage_model.get_volume_by_uuid(volume_00_name)
self.assertEqual(volume_00_name, volume_00.uuid)
self.assertFalse(volume_00.bootable)
# check that capacity was updated
pool_0_name = 'host_0@backend_0#pool_0'
m_get_storage_pool_by_name.assert_called_once_with(pool_0_name)
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
self.assertEqual(pool_0.name, pool_0_name)
self.assertEqual(3, pool_0.total_volumes)
self.assertEqual(380, pool_0.free_capacity_gb)
self.assertEqual(120, pool_0.allocated_capacity_gb)
self.assertEqual(120, pool_0.provisioned_capacity_gb)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_bootable_volume_create(self, m_cinder_helper):
"""test creating bootable volume in existing pool and node"""
# create storage_pool_by_name mock
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_0@backend_0#pool_0',
total_volumes='3',
total_capacity_gb='500',
free_capacity_gb='380',
provisioned_capacity_gb='120',
allocated_capacity_gb='120')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeCreateEnd(self.fake_cdmc)
message = self.load_message('scenario_1_bootable-volume-create.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# check that volume00 was added to the model
volume_00_name = 'VOLUME_00'
volume_00 = storage_model.get_volume_by_uuid(volume_00_name)
self.assertEqual(volume_00_name, volume_00.uuid)
self.assertTrue(volume_00.bootable)
# check that capacity was updated
pool_0_name = 'host_0@backend_0#pool_0'
m_get_storage_pool_by_name.assert_called_once_with(pool_0_name)
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
self.assertEqual(pool_0.name, pool_0_name)
self.assertEqual(3, pool_0.total_volumes)
self.assertEqual(380, pool_0.free_capacity_gb)
self.assertEqual(120, pool_0.allocated_capacity_gb)
self.assertEqual(120, pool_0.provisioned_capacity_gb)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_volume_create_pool_notfound(self, m_cinder_helper):
"""check creating volume in not existing pool and node"""
# get_storage_pool_by_name mock
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_2@backend_2#pool_0',
total_volumes='1',
total_capacity_gb='500',
free_capacity_gb='460',
provisioned_capacity_gb='40',
allocated_capacity_gb='40')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
# create storage_node_by_name mock
return_node_mock = mock.Mock()
return_node_mock.configure_mock(
host='host_2@backend_2',
zone='nova',
state='up',
status='enabled')
m_get_storage_node_by_name = mock.Mock(
side_effect=lambda name: return_node_mock)
m_get_volume_type_by_backendname = mock.Mock(
side_effect=lambda name: mock.Mock('backend_2'))
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name,
get_storage_node_by_name=m_get_storage_node_by_name,
get_volume_type_by_backendname=m_get_volume_type_by_backendname)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeCreateEnd(self.fake_cdmc)
message = self.load_message(
'scenario_1_volume-create_pool_notfound.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# check that volume00 was added to the model
volume_00_name = 'VOLUME_00'
volume_00 = storage_model.get_volume_by_uuid(volume_00_name)
self.assertEqual(volume_00_name, volume_00.uuid)
# check that capacity was updated
node_2_name = 'host_2@backend_2'
pool_0_name = node_2_name + '#pool_0'
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
self.assertEqual(pool_0.name, pool_0_name)
self.assertEqual(1, pool_0.total_volumes)
self.assertEqual(460, pool_0.free_capacity_gb)
self.assertEqual(40, pool_0.allocated_capacity_gb)
self.assertEqual(40, pool_0.provisioned_capacity_gb)
# check that node was added
m_get_storage_node_by_name.assert_called_once_with(node_2_name)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_error_volume_unmapped(self, m_cinder_helper):
"""test creating error volume unmapped"""
m_get_storage_pool_by_name = mock.Mock(
side_effect=exception.PoolNotFound(name="TEST"))
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeCreateEnd(self.fake_cdmc)
message = self.load_message('scenario_1_error-volume-create.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# we do not call get_storage_pool_by_name
m_get_storage_pool_by_name.assert_not_called()
# check that volume00 was added to the model
volume_00_name = 'VOLUME_00'
volume_00 = storage_model.get_volume_by_uuid(volume_00_name)
self.assertEqual(volume_00_name, volume_00.uuid)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_volume_update(self, m_cinder_helper):
"""test updating volume in existing pool and node"""
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeUpdateEnd(self.fake_cdmc)
volume_0_name = 'VOLUME_0'
volume_0 = storage_model.get_volume_by_uuid(volume_0_name)
self.assertEqual('name_0', volume_0.name)
# create storage_pool_by name mock
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_0@backend_0#pool_0',
total_volumes='2',
total_capacity_gb='500',
free_capacity_gb='420',
provisioned_capacity_gb='80',
allocated_capacity_gb='80')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
message = self.load_message('scenario_1_volume-update.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# check that name of volume_0 was updated in the model
volume_0 = storage_model.get_volume_by_uuid(volume_0_name)
self.assertEqual('name_01', volume_0.name)
@mock.patch.object(cinder_helper, 'CinderHelper')
def test_cinder_volume_delete(self, m_cinder_helper):
"""test deleting volume"""
# create storage_pool_by name mock
return_pool_mock = mock.Mock()
return_pool_mock.configure_mock(
name='host_0@backend_0#pool_0',
total_volumes='1',
total_capacity_gb='500',
free_capacity_gb='460',
provisioned_capacity_gb='40',
allocated_capacity_gb='40')
m_get_storage_pool_by_name = mock.Mock(
side_effect=lambda name: return_pool_mock)
m_cinder_helper.return_value = mock.Mock(
get_storage_pool_by_name=m_get_storage_pool_by_name)
storage_model = self.fake_cdmc.generate_scenario_1()
self.fake_cdmc.cluster_data_model = storage_model
handler = cnotification.VolumeDeleteEnd(self.fake_cdmc)
# volume exists before consuming
volume_0_uuid = 'VOLUME_0'
volume_0 = storage_model.get_volume_by_uuid(volume_0_uuid)
self.assertEqual(volume_0_uuid, volume_0.uuid)
message = self.load_message('scenario_1_volume-delete.json')
handler.info(
ctxt=self.context,
publisher_id=message['publisher_id'],
event_type=message['event_type'],
payload=message['payload'],
metadata=self.FAKE_METADATA,
)
# volume does not exists after consuming
self.assertRaises(
exception.VolumeNotFound,
storage_model.get_volume_by_uuid, volume_0_uuid)
# check that capacity was updated
pool_0_name = 'host_0@backend_0#pool_0'
m_get_storage_pool_by_name.assert_called_once_with(pool_0_name)
pool_0 = storage_model.get_pool_by_pool_name(pool_0_name)
self.assertEqual(pool_0.name, pool_0_name)
self.assertEqual(1, pool_0.total_volumes)
self.assertEqual(460, pool_0.free_capacity_gb)
self.assertEqual(40, pool_0.allocated_capacity_gb)
self.assertEqual(40, pool_0.provisioned_capacity_gb)

View File

@ -70,3 +70,85 @@ class TestElement(base.TestCase):
def test_as_xml_element(self):
el = self.cls(**self.data)
el.as_xml_element()
class TestStorageElement(base.TestCase):
scenarios = [
("StorageNode_with_all_fields", dict(
cls=element.StorageNode,
data={
'host': 'host@backend',
'zone': 'zone',
'status': 'enabled',
'state': 'up',
'volume_type': 'volume_type',
})),
("Pool_with_all_fields", dict(
cls=element.Pool,
data={
'name': 'host@backend#pool',
'total_volumes': 1,
'total_capacity_gb': 500,
'free_capacity_gb': 420,
'provisioned_capacity_gb': 80,
'allocated_capacity_gb': 80,
'virtual_free': 420,
})),
("Pool_without_virtual_free_fields", dict(
cls=element.Pool,
data={
'name': 'host@backend#pool',
'total_volumes': 1,
'total_capacity_gb': 500,
'free_capacity_gb': 420,
'provisioned_capacity_gb': 80,
'allocated_capacity_gb': 80,
})),
("Volume_with_all_fields", dict(
cls=element.Volume,
data={
'uuid': 'FAKE_UUID',
'size': 1,
'status': 'in-use',
'attachments': '[{"key": "value"}]',
'name': 'name',
'multiattach': 'false',
'snapshot_id': '',
'project_id': 'project_id',
'metadata': '{"key": "value"}',
'bootable': 'false',
'human_id': 'human_id',
})),
("Volume_without_bootable_fields", dict(
cls=element.Volume,
data={
'uuid': 'FAKE_UUID',
'size': 1,
'status': 'in-use',
'attachments': '[]',
'name': 'name',
'multiattach': 'false',
'snapshot_id': '',
'project_id': 'project_id',
'metadata': '{"key": "value"}',
'human_id': 'human_id',
})),
("Volume_without_human_id_fields", dict(
cls=element.Volume,
data={
'uuid': 'FAKE_UUID',
'size': 1,
'status': 'in-use',
'attachments': '[]',
'name': 'name',
'multiattach': 'false',
'snapshot_id': '',
'project_id': 'project_id',
'metadata': '{"key": "value"}',
})),
]
def test_as_xml_element(self):
el = self.cls(**self.data)
el.as_xml_element()

View File

@ -63,6 +63,21 @@ class TestModel(base.TestCase):
model = model_root.ModelRoot.from_xml(struct_str)
self.assertEqual(expected_model.to_string(), model.to_string())
def test_get_node_by_instance_uuid(self):
model = model_root.ModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
node = element.ComputeNode(id=1)
node.uuid = uuid_
model.add_node(node)
self.assertEqual(node, model.get_node_by_uuid(uuid_))
uuid_ = "{0}".format(uuidutils.generate_uuid())
instance = element.Instance(id=1)
instance.uuid = uuid_
model.add_instance(instance)
self.assertEqual(instance, model.get_instance_by_uuid(uuid_))
model.map_instance(instance, node)
self.assertEqual(node, model.get_node_by_instance_uuid(instance.uuid))
def test_add_node(self):
model = model_root.ModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
@ -151,3 +166,204 @@ class TestModel(base.TestCase):
model = model_root.ModelRoot()
self.assertRaises(exception.IllegalArgumentException,
model.assert_instance, "valeur_qcq")
class TestStorageModel(base.TestCase):
def load_data(self, filename):
cwd = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(cwd, "data")
with open(os.path.join(data_folder, filename), 'rb') as xml_file:
xml_data = xml_file.read()
return xml_data
def load_model(self, filename):
return model_root.StorageModelRoot.from_xml(self.load_data(filename))
def test_model_structure(self):
fake_cluster = faker_cluster_state.FakerStorageModelCollector()
model1 = fake_cluster.build_scenario_1()
self.assertEqual(2, len(model1.get_all_storage_nodes()))
self.assertEqual(9, len(model1.get_all_volumes()))
self.assertEqual(12, len(model1.edges()))
expected_struct_str = self.load_data('storage_scenario_1.xml')
model2 = model_root.StorageModelRoot.from_xml(expected_struct_str)
self.assertTrue(
model_root.StorageModelRoot.is_isomorphic(model2, model1))
def test_build_model_from_xml(self):
fake_cluster = faker_cluster_state.FakerStorageModelCollector()
expected_model = fake_cluster.generate_scenario_1()
struct_str = self.load_data('storage_scenario_1.xml')
model = model_root.StorageModelRoot.from_xml(struct_str)
self.assertEqual(expected_model.to_string(), model.to_string())
def test_assert_node_raise(self):
model = model_root.StorageModelRoot()
node = element.StorageNode(host="host@backend")
model.add_node(node)
self.assertRaises(exception.IllegalArgumentException,
model.assert_node, "obj")
def test_assert_pool_raise(self):
model = model_root.StorageModelRoot()
pool = element.Pool(name="host@backend#pool")
model.add_pool(pool)
self.assertRaises(exception.IllegalArgumentException,
model.assert_pool, "obj")
def test_assert_volume_raise(self):
model = model_root.StorageModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertRaises(exception.IllegalArgumentException,
model.assert_volume, "obj")
def test_add_node(self):
model = model_root.StorageModelRoot()
hostname = "host@backend"
node = element.StorageNode(host=hostname)
model.add_node(node)
self.assertEqual(node, model.get_node_by_name(hostname))
def test_add_pool(self):
model = model_root.StorageModelRoot()
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
def test_remove_node(self):
model = model_root.StorageModelRoot()
hostname = "host@backend"
node = element.StorageNode(host=hostname)
model.add_node(node)
self.assertEqual(node, model.get_node_by_name(hostname))
model.remove_node(node)
self.assertRaises(exception.StorageNodeNotFound,
model.get_node_by_name, hostname)
def test_remove_pool(self):
model = model_root.StorageModelRoot()
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
model.remove_pool(pool)
self.assertRaises(exception.PoolNotFound,
model.get_pool_by_pool_name, pool_name)
def test_map_unmap_pool(self):
model = model_root.StorageModelRoot()
hostname = "host@backend"
node = element.StorageNode(host=hostname)
model.add_node(node)
self.assertEqual(node, model.get_node_by_name(hostname))
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
model.map_pool(pool, node)
self.assertTrue(pool.name in model.predecessors(node.host))
model.unmap_pool(pool, node)
self.assertFalse(pool.name in model.predecessors(node.host))
def test_add_volume(self):
model = model_root.StorageModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertEqual(volume, model.get_volume_by_uuid(uuid_))
def test_remove_volume(self):
model = model_root.StorageModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertEqual(volume, model.get_volume_by_uuid(uuid_))
model.remove_volume(volume)
self.assertRaises(exception.VolumeNotFound,
model.get_volume_by_uuid, uuid_)
def test_map_unmap_volume(self):
model = model_root.StorageModelRoot()
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertEqual(volume, model.get_volume_by_uuid(uuid_))
model.map_volume(volume, pool)
self.assertTrue(volume.uuid in model.predecessors(pool.name))
model.unmap_volume(volume, pool)
self.assertFalse(volume.uuid in model.predecessors(pool.name))
def test_get_all_storage_nodes(self):
model = model_root.StorageModelRoot()
for i in range(10):
hostname = "host_{0}".format(i)
node = element.StorageNode(host=hostname)
model.add_node(node)
all_nodes = model.get_all_storage_nodes()
for hostname in all_nodes:
node = model.get_node_by_name(hostname)
model.assert_node(node)
def test_get_all_volumes(self):
model = model_root.StorageModelRoot()
for id_ in range(10):
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
all_volumes = model.get_all_volumes()
for vol in all_volumes:
volume = model.get_volume_by_uuid(vol)
model.assert_volume(volume)
def test_get_node_pools(self):
model = model_root.StorageModelRoot()
hostname = "host@backend"
node = element.StorageNode(host=hostname)
model.add_node(node)
self.assertEqual(node, model.get_node_by_name(hostname))
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
model.map_pool(pool, node)
self.assertEqual([pool], model.get_node_pools(node))
def test_get_pool_by_volume(self):
model = model_root.StorageModelRoot()
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertEqual(volume, model.get_volume_by_uuid(uuid_))
model.map_volume(volume, pool)
self.assertEqual(pool, model.get_pool_by_volume(volume))
def test_get_pool_volumes(self):
model = model_root.StorageModelRoot()
pool_name = "host@backend#pool"
pool = element.Pool(name=pool_name)
model.add_pool(pool)
self.assertEqual(pool, model.get_pool_by_pool_name(pool_name))
uuid_ = "{0}".format(uuidutils.generate_uuid())
volume = element.Volume(uuid=uuid_)
model.add_volume(volume)
self.assertEqual(volume, model.get_volume_by_uuid(uuid_))
model.map_volume(volume, pool)
self.assertEqual([volume], model.get_pool_volumes(pool))