Extract volume_manager from nailgun core

* remove node_extension_call from everywhere Nailgun
  core source code
* remove volume_manager Node property (models)
* moved volume and disk releated data manipulations to
  volume manager extension pipeline
* removed no longer valid tests
* added new extension callback for pre deployment check
* fix some tests
* moved volume_manger specific tests to volume_manger module
* marked 'skip test' to some tests which are no longer
  valid in current places but they valuable and should be moved
  to volume_manager module in next patches

implements: blueprint data-pipeline
Change-Id: I8edd25166e5eccf914eb92882b6b4a7b3fff6a89
This commit is contained in:
Sylwester Brzeczkowski 2016-03-11 16:23:06 +01:00
parent 4684e0e3e1
commit 5d1d1fe960
20 changed files with 383 additions and 298 deletions

View File

@ -34,7 +34,6 @@ from nailgun.db.sqlalchemy.models.base import Base
from nailgun.db.sqlalchemy.models.fields import JSON
from nailgun.db.sqlalchemy.models.mutable import MutableDict
from nailgun.db.sqlalchemy.models.mutable import MutableList
from nailgun.extensions.volume_manager.manager import VolumeManager
from nailgun.logger import logger
@ -144,14 +143,6 @@ class Node(Base):
from nailgun.extensions.network_manager.manager import NetworkManager
return NetworkManager.get_node_networks(self)
@property
def volume_manager(self):
# TODO(eli): will be moved into an extension.
# Should be done as a part of blueprint:
# https://blueprints.launchpad.net/fuel/+spec
# /volume-manager-refactoring
return VolumeManager(self)
@property
def needs_reprovision(self):
return self.status == 'error' and self.error_type == 'provision' and \

View File

@ -18,7 +18,7 @@ from nailgun.extensions.base import BaseExtension
from nailgun.extensions.base import BasePipeline
from nailgun.extensions.manager import get_extension
from nailgun.extensions.manager import get_all_extensions
from nailgun.extensions.manager import node_extension_call
from nailgun.extensions.manager import fire_callback_on_before_deployment_check
from nailgun.extensions.manager import fire_callback_on_node_delete
from nailgun.extensions.manager import fire_callback_on_node_collection_delete
from nailgun.extensions.manager import fire_callback_on_node_create
@ -29,3 +29,4 @@ from nailgun.extensions.manager import \
fire_callback_on_deployment_data_serialization
from nailgun.extensions.manager import \
fire_callback_on_provisioning_data_serialization
from nailgun.extensions.manager import node_extension_call

View File

@ -51,21 +51,26 @@ class BaseExtension(object):
"handler": HandlerClass
}
]
urls = []
Specify a list of calls which extension provides.
This list is required for core and other extensions
to find extension with specific functionality.
If extension needs to manipulate provisioning or deployment data it should
define data_pipelines list which is a list of BasePipeline sub-classes:
data_pipelines = [
ExamplePipelineClass,
ExamplePipelineClass2,
]
Specify a list of calls which extension provides (not required):
provides = [
'method_1',
'method_2',
]
"""
"""
urls = []
provides = []
data_pipelines = []
provides = []
@classmethod
def alembic_migrations_path(cls):
@ -123,3 +128,7 @@ class BaseExtension(object):
@classmethod
def on_cluster_delete(cls, cluster):
"""Callback which gets executed when cluster is deleted"""
@classmethod
def on_before_deployment_check(cls, cluster):
"""Callback which gets executed when "before deployment check" runs"""

View File

@ -72,8 +72,6 @@ def _get_extension_by_node(call_name, node):
def node_extension_call(call_name, node, *args, **kwargs):
# NOTE(sbrzeczkowski): should be removed once data-pipeline blueprint is
# done: https://blueprints.launchpad.net/fuel/+spec/data-pipeline
extension = _get_extension_by_node(call_name, node)
return getattr(extension, call_name)(node, *args, **kwargs)
@ -108,6 +106,11 @@ def fire_callback_on_cluster_delete(cluster):
extension.on_cluster_delete(cluster)
def fire_callback_on_before_deployment_check(cluster):
for extension in get_all_extensions():
extension.on_before_deployment_check(cluster)
def _collect_data_pipelines_for_cluster(cluster):
extensions = set(cluster.extensions)
return chain.from_iterable(e.data_pipelines for e in get_all_extensions()

View File

@ -14,43 +14,27 @@
# License for the specific language governing permissions and limitations
# under the License.
from distutils.version import StrictVersion
import os
import six
from nailgun import consts
from nailgun import errors
from nailgun.extensions import BaseExtension
from nailgun.extensions import BasePipeline
from nailgun.logger import logger
from nailgun.objects import Node
from nailgun.objects import Notification
from nailgun import objects
from nailgun.utils.ceph import get_pool_pg_count
from .handlers.disks import NodeDefaultsDisksHandler
from .handlers.disks import NodeDisksHandler
from .handlers.disks import NodeVolumesInformationHandler
from .manager import calc_glance_cache_size
from .manager import VolumeManager
class VolumeManagerExtension(BaseExtension):
name = 'volume_manager'
version = '1.0.0'
provides = [
'get_node_volumes',
'set_node_volumes',
'set_default_node_volumes']
description = "Volume Manager Extension"
@classmethod
def alembic_migrations_path(cls):
return os.path.join(os.path.dirname(__file__),
'alembic_migrations', 'migrations')
urls = [
{'uri': r'/nodes/(?P<node_id>\d+)/disks/?$',
'handler': NodeDisksHandler},
{'uri': r'/nodes/(?P<node_id>\d+)/disks/defaults/?$',
'handler': NodeDefaultsDisksHandler},
{'uri': r'/nodes/(?P<node_id>\d+)/volumes/?$',
'handler': NodeVolumesInformationHandler}]
class VolumeObjectMethodsMixin(object):
@classmethod
def get_node_volumes(cls, node):
@ -72,13 +56,147 @@ class VolumeManagerExtension(BaseExtension):
logger.exception(exc)
msg = "Failed to generate volumes for node '{0}': '{1}'".format(
node.human_readable_name, six.text_type(exc))
Notification.create({
objects.Notification.create({
'topic': 'error',
'message': msg,
'node_id': node.id})
if node.cluster_id:
Node.add_pending_change(node, 'disks')
objects.Node.add_pending_change(node, 'disks')
class NodeVolumesPipeline(VolumeObjectMethodsMixin, BasePipeline):
@classmethod
def process_provisioning(cls, provisioning_data, cluster, nodes, **kwargs):
nodes_db = {node.id: node for node in nodes}
for node in provisioning_data['nodes']:
volumes = cls.get_node_volumes(nodes_db[int(node['uid'])])
node['ks_meta']['pm_data']['ks_spaces'] = volumes
return provisioning_data
@classmethod
def process_deployment(cls, deployment_data, cluster, nodes, **kwargs):
if (StrictVersion(cluster.release.environment_version) >=
StrictVersion('8.0')):
nodes_wo_master = six.moves.filter(
lambda n: n['uid'] != consts.MASTER_NODE_UID,
deployment_data)
nodes_dict = {int(node['uid']): node for node in nodes_wo_master}
for node in nodes:
volumes = cls.get_node_volumes(node)
nodes_dict[node.id]['node_volumes'] = volumes
return deployment_data
class PgCountPipeline(VolumeObjectMethodsMixin, BasePipeline):
@classmethod
def process_deployment(cls, deployment_data, cluster, nodes, **kwargs):
cls._set_pg_count_storage_parameters(deployment_data, nodes)
return deployment_data
@classmethod
def _set_pg_count_storage_parameters(cls, data, nodes):
"""Generate pg_num
pg_num is generated as the number of OSDs across the cluster
multiplied by 100, divided by Ceph replication factor, and
rounded up to the nearest power of 2.
"""
osd_num = 0
osd_nodes = [node for node in nodes
if 'ceph-osd' in node.all_roles]
for node in osd_nodes:
for disk in cls.get_node_volumes(node):
for part in disk.get('volumes', []):
if part.get('name') == 'ceph' and part.get('size', 0) > 0:
osd_num += 1
for node in data:
storage_attrs = node['storage']
pg_counts = get_pool_pg_count(
osd_num=osd_num,
pool_sz=int(storage_attrs['osd_pool_size']),
ceph_version='firefly',
volumes_ceph=storage_attrs['volumes_ceph'],
objects_ceph=storage_attrs['objects_ceph'],
ephemeral_ceph=storage_attrs['ephemeral_ceph'],
images_ceph=storage_attrs['images_ceph'],
emulate_pre_7_0=False)
# Log {pool_name: pg_count} mapping
pg_str = ", ".join(map("{0[0]}={0[1]}".format, pg_counts.items()))
logger.debug("Ceph: PG values {%s}", pg_str)
storage_attrs['pg_num'] = pg_counts['default_pg_num']
storage_attrs['per_pool_pg_nums'] = pg_counts
class SetImageCacheMaxSizePipeline(VolumeObjectMethodsMixin, BasePipeline):
@classmethod
def process_deployment(cls, deployment_data, cluster, nodes, **kwargs):
nodes_wo_master = six.moves.filter(
lambda n: n['uid'] != consts.MASTER_NODE_UID,
deployment_data)
cls._set_image_cache_max_size(nodes_wo_master, cluster, nodes)
return deployment_data
@classmethod
def _set_image_cache_max_size(cls, data, cluster, nodes):
nodes_db = {node.id: node for node in nodes}
editable_attrs = objects.Cluster.get_editable_attributes(cluster)
images_ceph = editable_attrs['storage']['images_ceph']['value']
for node in data:
if images_ceph:
image_cache_max_size = '0'
else:
volumes = cls.get_node_volumes(nodes_db[int(node['uid'])])
image_cache_max_size = calc_glance_cache_size(volumes)
node.setdefault(
'glance', {})['image_cache_max_size'] = image_cache_max_size
class VolumeManagerExtension(VolumeObjectMethodsMixin, BaseExtension):
name = 'volume_manager'
version = '1.0.0'
description = "Volume Manager Extension"
data_pipelines = [
NodeVolumesPipeline,
PgCountPipeline,
SetImageCacheMaxSizePipeline,
]
provides = [
'get_node_volumes',
'set_node_volumes',
'set_default_node_volumes'
]
@classmethod
def alembic_migrations_path(cls):
return os.path.join(os.path.dirname(__file__),
'alembic_migrations', 'migrations')
urls = [
{'uri': r'/nodes/(?P<node_id>\d+)/disks/?$',
'handler': NodeDisksHandler},
{'uri': r'/nodes/(?P<node_id>\d+)/disks/defaults/?$',
'handler': NodeDefaultsDisksHandler},
{'uri': r'/nodes/(?P<node_id>\d+)/volumes/?$',
'handler': NodeVolumesInformationHandler}]
@classmethod
def on_node_create(cls, node):
@ -101,3 +219,46 @@ class VolumeManagerExtension(BaseExtension):
def on_node_collection_delete(cls, node_ids):
from .objects.volumes import VolumeObject
VolumeObject.delete_by_node_ids(node_ids)
@classmethod
def on_before_deployment_check(cls, cluster):
cls._check_disks(cluster)
cls._check_volumes(cluster)
@classmethod
def _is_disk_checking_required(cls, node):
"""True if node disk requires checking
:param node: Node (model) instance
:returns: bool
"""
if (node.status in (consts.NODE_STATUSES.ready,
consts.NODE_STATUSES.deploying,
consts.NODE_STATUSES.provisioned) or
(node.status == consts.NODE_STATUSES.error and
node.error_type != consts.NODE_ERRORS.provision)):
return False
return True
@classmethod
def _check_disks(cls, cluster):
try:
for node in cluster.nodes:
if cls._is_disk_checking_required(node):
VolumeManager(node).check_disk_space_for_deployment()
except errors.NotEnoughFreeSpace:
raise errors.NotEnoughFreeSpace(
u"Node '{}' has insufficient disk space".format(
node.human_readable_name))
@classmethod
def _check_volumes(cls, cluster):
try:
for node in cluster.nodes:
if cls._is_disk_checking_required(node):
VolumeManager(node).check_volume_sizes_for_deployment()
except errors.NotEnoughFreeSpace as e:
raise errors.NotEnoughFreeSpace(
u"Node '{}' has insufficient disk space\n{}".format(
node.human_readable_name, e.message))

View File

@ -19,6 +19,7 @@ Handlers dealing with disks
"""
from ..manager import DisksFormatConvertor
from ..manager import VolumeManager
from ..validators.disks import NodeDisksValidator
from nailgun.api.v1.handlers.base import BaseHandler
from nailgun.api.v1.handlers.base import content
@ -84,7 +85,7 @@ class NodeDefaultsDisksHandler(BaseHandler):
node = self.get_object_or_404(objects.Node, node_id)
volumes = DisksFormatConvertor.format_disks_to_simple(
node.volume_manager.gen_volumes_info())
VolumeManager(node).gen_volumes_info())
return volumes

View File

@ -224,7 +224,7 @@ class DisksFormatConvertor(object):
@classmethod
def format_disks_to_full(cls, node, disks):
"""Convert disks from simple format to full format."""
volume_manager = node.volume_manager
volume_manager = VolumeManager(node)
for disk in disks:
for volume in disk['volumes']:
volume_manager.set_volume_size(disk['id'],
@ -327,7 +327,7 @@ class DisksFormatConvertor(object):
volumes_info = []
for space in get_node_spaces(node):
# Here we calculate min_size of nodes
min_size = node.volume_manager.expand_generators(
min_size = VolumeManager(node).expand_generators(
space)['min_size']
volumes_info.append({
@ -593,9 +593,10 @@ class VolumeManager(object):
self.node_name = node.name
# Make sure that we don't change volumes directly from manager
from .extension import VolumeManagerExtension
from .objects.volumes import VolumeObject
self.volumes = deepcopy(
VolumeManagerExtension.get_node_volumes(node)) or []
VolumeObject.get_volumes(node)) or []
# For swap calculation
self.ram = node.ram
self.allowed_volumes = node.get_node_spaces()

View File

@ -0,0 +1,96 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nailgun.extensions import fire_callback_on_before_deployment_check
from nailgun.extensions.volume_manager.extension import VolumeManagerExtension
from nailgun.extensions.volume_manager.manager import VolumeManager
from nailgun.test.base import BaseTestCase
class TestCheckBeforeDeploymentCallback(BaseTestCase):
def setUp(self):
super(TestCheckBeforeDeploymentCallback, self).setUp()
self.env.create(
release_kwargs={'version': '1111-8.0'},
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': 'gre'
},
nodes_kwargs=[{'roles': ['controller']}])
self.env.create_node()
self.cluster = self.env.clusters[0]
self.node = self.cluster.nodes[0]
def is_checking_required(self):
return VolumeManagerExtension._is_disk_checking_required(self.node)
def test_is_disk_checking_required(self):
self.node.status = 'ready'
self.assertFalse(self.is_checking_required())
self.node.status = 'deploying'
self.assertFalse(self.is_checking_required())
self.node.status = 'discover'
self.assertTrue(self.is_checking_required())
self.node.status = 'provisioned'
self.assertFalse(self.is_checking_required())
def test_is_disk_checking_required_in_case_of_error(self):
self.node.status = 'error'
self.node.error_type = 'provision'
self.assertTrue(self.is_checking_required())
self.node.error_type = 'deploy'
self.assertFalse(self.is_checking_required())
def test_check_volumes_and_disks_do_not_run_if_node_ready(self):
self.node.status = 'ready'
with mock.patch.object(
VolumeManager,
'check_disk_space_for_deployment') as check_mock:
fire_callback_on_before_deployment_check(self.cluster)
self.assertFalse(check_mock.called)
with mock.patch.object(
VolumeManager,
'check_volume_sizes_for_deployment') as check_mock:
fire_callback_on_before_deployment_check(self.cluster)
self.assertFalse(check_mock.called)
def test_check_volumes_and_disks_run_if_node_not_ready(self):
self.node.status = 'discover'
with mock.patch.object(
VolumeManager,
'check_disk_space_for_deployment') as check_mock:
fire_callback_on_before_deployment_check(self.cluster)
self.assertEqual(check_mock.call_count, 1)
with mock.patch.object(
VolumeManager,
'check_volume_sizes_for_deployment') as check_mock:
fire_callback_on_before_deployment_check(self.cluster)
self.assertEqual(check_mock.call_count, 1)

View File

@ -12,11 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
# TODO(eli): All extension specific tests will be moved
# into extension directory.
# Will be done as a part of blueprint:
# https://blueprints.launchpad.net/fuel/+spec/volume-manager-refactoring
import alembic
from oslo_serialization import jsonutils
import sqlalchemy as sa

View File

@ -462,7 +462,7 @@ class TestNodeDefaultsDisksHandler(BaseIntegrationTest):
node_db = self.env.nodes[0]
volumes_from_api = self.get(node_db.id)
default_volumes = node_db.volume_manager.gen_volumes_info()
default_volumes = VolumeManager(node_db).gen_volumes_info()
disks = only_disks(default_volumes)
self.assertEqual(len(disks), len(volumes_from_api))
@ -747,7 +747,7 @@ class TestVolumeManager(BaseIntegrationTest):
def test_allocates_all_free_space_for_os_for_controller_role(self):
node = self.create_node('controller')
disks = only_disks(node.volume_manager.volumes)
disks = only_disks(VolumeManager(node).volumes)
disks_size_sum = sum([disk['size'] for disk in disks])
os_sum_size = self.os_size(disks)
mysql_sum_size = self.mysql_size(disks)
@ -767,9 +767,9 @@ class TestVolumeManager(BaseIntegrationTest):
def test_allocates_all_free_space_for_vm_for_compute_role(self):
node = self.create_node('compute')
self.should_contain_os_with_minimal_size(node.volume_manager)
self.should_contain_os_with_minimal_size(VolumeManager(node))
self.all_free_space_except_os_for_volume(
node.volume_manager.volumes, 'vm')
VolumeManager(node).volumes, 'vm')
self.logical_volume_sizes_should_equal_all_phisical_volumes(
VolumeManagerExtension.get_node_volumes(node))
self.check_disk_size_equal_sum_of_all_volumes(
@ -777,26 +777,26 @@ class TestVolumeManager(BaseIntegrationTest):
def test_allocates_all_free_space_for_vm_for_cinder_role(self):
node = self.create_node('cinder')
self.should_contain_os_with_minimal_size(node.volume_manager)
self.should_contain_os_with_minimal_size(VolumeManager(node))
self.all_free_space_except_os_for_volume(
node.volume_manager.volumes, 'cinder')
VolumeManager(node).volumes, 'cinder')
self.check_disk_size_equal_sum_of_all_volumes(
VolumeManagerExtension.get_node_volumes(node))
def test_allocates_space_single_disk_for_ceph_for_ceph_role(self):
node = self.create_node('ceph-osd')
self.update_node_with_single_disk(node, 30000)
self.should_contain_os_with_minimal_size(node.volume_manager)
self.should_contain_os_with_minimal_size(VolumeManager(node))
self.all_free_space_except_os_for_volume(
node.volume_manager.volumes, 'ceph')
VolumeManager(node).volumes, 'ceph')
self.check_disk_size_equal_sum_of_all_volumes(
VolumeManagerExtension.get_node_volumes(node))
def test_allocates_full_disks_for_ceph_for_ceph_role(self):
node = self.create_node('ceph-osd')
self.should_contain_os_with_minimal_size(node.volume_manager)
self.should_contain_os_with_minimal_size(VolumeManager(node))
self.all_free_space_except_os_disks_for_volume(
node.volume_manager, 'ceph')
VolumeManager(node), 'ceph')
def should_allocates_same_size(self, volumes, same_size_volume_names):
disks = only_disks(volumes)
@ -829,9 +829,9 @@ class TestVolumeManager(BaseIntegrationTest):
def test_multirole_controller_ceph(self):
node = self.create_node('controller', 'ceph-osd')
self.should_contain_os_with_minimal_size(node.volume_manager)
self.should_contain_os_with_minimal_size(VolumeManager(node))
self.should_allocates_same_size(
node.volume_manager.volumes, ['image', 'ceph'])
VolumeManager(node).volumes, ['image', 'ceph'])
self.logical_volume_sizes_should_equal_all_phisical_volumes(
VolumeManagerExtension.get_node_volumes(node))
self.check_disk_size_equal_sum_of_all_volumes(
@ -839,9 +839,9 @@ class TestVolumeManager(BaseIntegrationTest):
def test_multirole_controller_cinder_ceph(self):
node = self.create_node('controller', 'cinder', 'ceph-osd')
self.should_contain_os_with_minimal_size(node.volume_manager)
self.should_contain_os_with_minimal_size(VolumeManager(node))
self.should_allocates_same_size(
node.volume_manager.volumes, ['image', 'cinder', 'ceph'])
VolumeManager(node).volumes, ['image', 'cinder', 'ceph'])
self.logical_volume_sizes_should_equal_all_phisical_volumes(
VolumeManagerExtension.get_node_volumes(node))
self.check_disk_size_equal_sum_of_all_volumes(
@ -850,7 +850,7 @@ class TestVolumeManager(BaseIntegrationTest):
def create_node_and_calculate_min_size(
self, role, space_info, volumes_metadata):
node = self.create_node(role)
volume_manager = node.volume_manager
volume_manager = VolumeManager(node)
min_installation_size = self.__calc_minimal_installation_size(
volume_manager
)
@ -903,7 +903,7 @@ class TestVolumeManager(BaseIntegrationTest):
role, space_info, volumes_metadata)
self.update_node_with_single_disk(node, min_size)
vm = node.volume_manager
vm = VolumeManager(node)
with patch.object(vm,
'_VolumeManager'
'__calc_minimal_installation_size',
@ -911,7 +911,7 @@ class TestVolumeManager(BaseIntegrationTest):
vm.check_disk_space_for_deployment()
self.update_node_with_single_disk(node, min_size - 1)
vm = node.volume_manager
vm = VolumeManager(node)
with patch.object(vm,
'_VolumeManager'
'__calc_minimal_installation_size',
@ -927,7 +927,7 @@ class TestVolumeManager(BaseIntegrationTest):
for role, space_info in six.iteritems(volumes_roles_mapping):
node = self.create_node(role)
vm = node.volume_manager
vm = VolumeManager(node)
self.assertEqual(
vm._VolumeManager__calc_minimal_installation_size(),
self.__calc_minimal_installation_size(vm)
@ -951,7 +951,7 @@ class TestVolumeManager(BaseIntegrationTest):
self.update_node_with_single_disk(node, 116384)
# Second is taken entirely by ceph
self.add_disk_to_node(node, 65536)
node.volume_manager.check_volume_sizes_for_deployment()
VolumeManager(node).check_volume_sizes_for_deployment()
# First disk contains less than minimum size of all VGs
self.update_node_with_single_disk(node, 16384)
@ -959,14 +959,14 @@ class TestVolumeManager(BaseIntegrationTest):
self.add_disk_to_node(node, 65536)
self.assertRaises(
errors.NotEnoughFreeSpace,
node.volume_manager.check_volume_sizes_for_deployment)
VolumeManager(node).check_volume_sizes_for_deployment)
def update_ram_and_assert_swap_size(self, node, size, swap_size):
new_meta = deepcopy(node.meta)
new_meta['memory']['total'] = (1024 ** 2) * size
node.meta = new_meta
self.env.db.commit()
self.assertEqual(node.volume_manager._calc_swap_size(), swap_size)
self.assertEqual(VolumeManager(node)._calc_swap_size(), swap_size)
def test_root_size_calculation(self):
node = self.create_node('controller')

View File

@ -79,7 +79,7 @@ class TestVolumeManagerGlancePartition(base.BaseIntegrationTest):
params=jsonutils.dumps({
'editable': {'storage': {'images_ceph': {'value': True}}}}),
headers=self.default_headers)
volumes = self.env.nodes[0].volume_manager.gen_volumes_info()
volumes = manager.VolumeManager(self.env.nodes[0]).gen_volumes_info()
image_volume = next((v for v in volumes if v['id'] == 'image'), None)
self.assertIsNone(image_volume)
@ -93,7 +93,7 @@ class TestVolumeManagerGlancePartition(base.BaseIntegrationTest):
consts.CLUSTER_MODES.multinode]},
nodes_kwargs=[
{'roles': ['controller']}])
volumes = self.env.nodes[0].volume_manager.gen_volumes_info()
volumes = manager.VolumeManager(self.env.nodes[0]).gen_volumes_info()
image_volume = next((v for v in volumes if v['id'] == 'image'), None)
self.assertIsNotNone(image_volume)

View File

@ -17,19 +17,17 @@
"""Deployment serializers for orchestrator"""
from copy import deepcopy
from distutils.version import StrictVersion
import six
from nailgun import consts
from nailgun.extensions import fire_callback_on_deployment_data_serialization
from nailgun.extensions import node_extension_call
from nailgun.extensions.volume_manager import manager as volume_manager
from nailgun.logger import logger
from nailgun import objects
from nailgun.plugins import adapters
from nailgun.settings import settings
from nailgun import utils
from nailgun.utils.ceph import get_pool_pg_count
from nailgun.utils.role_resolver import NameMatchingPolicy
from nailgun.utils.role_resolver import RoleResolver
@ -164,8 +162,6 @@ class DeploymentMultinodeSerializer(object):
if self.role_resolver.resolve(['cinder']):
attrs['use_cinder'] = True
self.set_storage_parameters(cluster, attrs)
net_serializer = self.get_net_provider_serializer(cluster)
net_common_attrs = net_serializer.get_common_attrs(cluster, attrs)
attrs = utils.dict_merge(attrs, net_common_attrs)
@ -174,43 +170,6 @@ class DeploymentMultinodeSerializer(object):
return attrs
def set_storage_parameters(self, cluster, attrs):
"""Generate pg_num
pg_num is generated as the number of OSDs across the cluster
multiplied by 100, divided by Ceph replication factor, and
rounded up to the nearest power of 2.
"""
osd_num = 0
ceph_nodes_uids = self.role_resolver.resolve(['ceph-osd'])
ceph_nodes = objects.NodeCollection.filter_by_id_list(
self.all_nodes, ceph_nodes_uids
)
for node in ceph_nodes:
for disk in node_extension_call('get_node_volumes', node):
for part in disk.get('volumes', []):
if part.get('name') == 'ceph' and part.get('size', 0) > 0:
osd_num += 1
storage_attrs = attrs['storage']
pg_counts = get_pool_pg_count(
osd_num=osd_num,
pool_sz=int(storage_attrs['osd_pool_size']),
ceph_version='firefly',
volumes_ceph=storage_attrs['volumes_ceph'],
objects_ceph=storage_attrs['objects_ceph'],
ephemeral_ceph=storage_attrs['ephemeral_ceph'],
images_ceph=storage_attrs['images_ceph'],
emulate_pre_7_0=False)
# Log {pool_name: pg_count} mapping
pg_str = ", ".join(map("{0[0]}={0[1]}".format, pg_counts.items()))
logger.debug("Ceph: PG values {%s}", pg_str)
storage_attrs['pg_num'] = pg_counts['default_pg_num']
storage_attrs['per_pool_pg_nums'] = pg_counts
@classmethod
def node_list(cls, nodes):
"""Generate nodes list. Represents as "nodes" parameter in facts."""
@ -267,21 +226,10 @@ class DeploymentMultinodeSerializer(object):
net_serializer = self.get_net_provider_serializer(node.cluster)
node_attrs.update(net_serializer.get_node_attrs(node))
node_attrs.update(net_serializer.network_ranges(node.group_id))
node_attrs.update(self.get_image_cache_max_size(node))
node_attrs.update(self.generate_test_vm_image_data(node))
return node_attrs
def get_image_cache_max_size(self, node):
images_ceph = (node.cluster.attributes['editable']['storage']
['images_ceph']['value'])
if images_ceph:
image_cache_max_size = '0'
else:
image_cache_max_size = volume_manager.calc_glance_cache_size(
node_extension_call('get_node_volumes', node))
return {'glance': {'image_cache_max_size': image_cache_max_size}}
def generate_test_vm_image_data(self, node):
# Instantiate all default values in dict.
image_data = {
@ -555,13 +503,6 @@ class DeploymentHASerializer70(DeploymentHASerializer61):
class DeploymentHASerializer80(DeploymentHASerializer70):
def serialize_node(self, node, role):
serialized_node = super(
DeploymentHASerializer80, self).serialize_node(node, role)
serialized_node.update(self.generate_node_volumes_data(node))
return serialized_node
@classmethod
def get_net_provider_serializer(cls, cluster):
if cluster.network_config.configuration_template:
@ -569,15 +510,6 @@ class DeploymentHASerializer80(DeploymentHASerializer70):
else:
return NeutronNetworkDeploymentSerializer80
def generate_node_volumes_data(self, node):
"""Serialize information about disks.
This function returns information about disks and
volume groups for each node in cluster.
Will be passed to Astute.
"""
return {'node_volumes': node_extension_call('get_node_volumes', node)}
class DeploymentHASerializer90(DeploymentHASerializer80):
@ -840,7 +772,8 @@ def get_serializer_for_cluster(cluster):
return serializers[env_mode]
# return latest serializer by default
latest_version = sorted(six.iterkeys(serializers_map))[-1]
latest_version = max(serializers_map, key=lambda v: StrictVersion(v))
return serializers_map[latest_version][env_mode]

View File

@ -23,7 +23,6 @@ import six
from nailgun import consts
from nailgun.extensions import fire_callback_on_provisioning_data_serialization
from nailgun.extensions import node_extension_call
from nailgun.logger import logger
from nailgun import objects
from nailgun.orchestrator.base_serializers import MellanoxMixin
@ -133,7 +132,6 @@ class ProvisioningSerializer(MellanoxMixin):
'udevrules': cls.interfaces_mapping_for_udev(node)},
'ks_meta': {
'pm_data': {
'ks_spaces': node_extension_call('get_node_volumes', node),
'kernel_params': objects.Node.get_kernel_params(node)},
'fuel_version': node.cluster.fuel_version,
'cloud_init_templates':

View File

@ -36,6 +36,7 @@ from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import Task
from nailgun import errors
from nailgun.extensions import fire_callback_on_before_deployment_check
from nailgun.extensions.network_manager.manager import NetworkManager
from nailgun import lcm
from nailgun.logger import logger
@ -656,8 +657,8 @@ class DeletionTask(object):
# check if there's a Zabbix server in an environment
# and if there is, remove hosts
if (task.name != consts.TASK_NAMES.cluster_deletion
and ZabbixManager.get_zabbix_node(task.cluster)):
if (task.name != consts.TASK_NAMES.cluster_deletion and
ZabbixManager.get_zabbix_node(task.cluster)):
zabbix_credentials = ZabbixManager.get_zabbix_credentials(
task.cluster
)
@ -1309,10 +1310,10 @@ class CheckBeforeDeploymentTask(object):
@classmethod
def execute(cls, task):
fire_callback_on_before_deployment_check(task.cluster)
cls._check_nodes_are_online(task)
cls._check_disks(task)
cls._check_ceph(task)
cls._check_volumes(task)
cls._check_public_network(task)
cls._check_vmware_consistency(task)
cls._validate_network_template(task)
@ -1345,30 +1346,6 @@ class CheckBeforeDeploymentTask(object):
' Remove them from environment '
'and try again.'.format(node_names))
@classmethod
def _check_disks(cls, task):
try:
for node in task.cluster.nodes:
if cls._is_disk_checking_required(node):
node.volume_manager.check_disk_space_for_deployment()
except errors.NotEnoughFreeSpace:
raise errors.NotEnoughFreeSpace(
u"Node '{0}' has insufficient disk space".format(
node.human_readable_name
)
)
@classmethod
def _check_volumes(cls, task):
try:
for node in task.cluster.nodes:
if cls._is_disk_checking_required(node):
node.volume_manager.check_volume_sizes_for_deployment()
except errors.NotEnoughFreeSpace as e:
raise errors.NotEnoughFreeSpace(
u"Node '%s' has insufficient disk space\n%s" % (
node.human_readable_name, e.message))
@classmethod
def _check_ceph(cls, task):
storage = objects.Attributes.merged_attrs(
@ -1381,15 +1358,6 @@ class CheckBeforeDeploymentTask(object):
cls._check_ceph_osds(task)
return
@classmethod
def _is_disk_checking_required(cls, node):
"""Disk checking required in case if node is not provisioned."""
if node.status in ('ready', 'deploying', 'provisioned') or \
(node.status == 'error' and node.error_type != 'provision'):
return False
return True
@classmethod
def _check_ceph_osds(cls, task):
osd_count = len(filter(
@ -1450,15 +1418,19 @@ class CheckBeforeDeploymentTask(object):
"""Check for mongo nodes presence in env with external mongo."""
components = objects.Attributes.merged_attrs(
task.cluster.attributes).get("additional_components", None)
if (components and components["ceilometer"]["value"]
and components["mongo"]["value"]
and len(objects.Cluster.get_nodes_by_role(
task.cluster, 'mongo')) > 0):
if (components and components["ceilometer"]["value"] and
components["mongo"]["value"] and
len(objects.Cluster.get_nodes_by_role(
task.cluster, 'mongo')) > 0):
raise errors.ExtMongoCheckerError
if (components and components["ceilometer"]["value"]
and not components["mongo"]["value"]
and len(objects.Cluster.get_nodes_by_role(
task.cluster, 'mongo')) == 0):
if (components and components["ceilometer"]["value"] and not
components["mongo"]["value"] and
len(objects.Cluster.get_nodes_by_role(
task.cluster, 'mongo')) == 0):
raise errors.MongoNodesCheckError
@classmethod

View File

@ -27,8 +27,6 @@ from nailgun import objects
from nailgun.db.sqlalchemy import models
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.extensions.network_manager.manager import NetworkManager
from nailgun.extensions.volume_manager.extension import VolumeManagerExtension
from nailgun.extensions.volume_manager import manager
from nailgun.settings import settings
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
@ -213,9 +211,6 @@ class TestHandlers(BaseIntegrationTest):
}}
individual_atts.update(common_attrs)
individual_atts['glance']['image_cache_max_size'] = str(
manager.calc_glance_cache_size(
VolumeManagerExtension.get_node_volumes(node)))
deployment_info.append(deepcopy(individual_atts))
controller_nodes = filter(
@ -285,9 +280,8 @@ class TestHandlers(BaseIntegrationTest):
'mco_enable': 1,
'mco_identity': n.id,
'pm_data': {
'ks_spaces': VolumeManagerExtension.get_node_volumes(
n),
'kernel_params': objects.Node.get_kernel_params(n),
'ks_spaces': None,
},
'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
'authorized_keys':
@ -370,7 +364,9 @@ class TestHandlers(BaseIntegrationTest):
'tasks',
'uids',
'percentage',
'vms_conf'])
'vms_conf',
'ks_spaces',
])
self.check_pg_count(args[1][1]['args']['deployment_info'])
@ -384,9 +380,10 @@ class TestHandlers(BaseIntegrationTest):
'IP',
'workloads_collector',
'vms_conf',
'storage',
'tasks_directory',
'tasks_graph'])
'tasks_graph',
'storage',
'glance'])
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')
@ -676,9 +673,6 @@ class TestHandlers(BaseIntegrationTest):
}
individual_atts.update(common_attrs)
individual_atts['glance']['image_cache_max_size'] = str(
manager.calc_glance_cache_size(
VolumeManagerExtension.get_node_volumes(node)))
deployment_info.append(deepcopy(individual_atts))
controller_nodes = filter(
@ -749,9 +743,8 @@ class TestHandlers(BaseIntegrationTest):
'mco_enable': 1,
'mco_identity': n.id,
'pm_data': {
'ks_spaces': VolumeManagerExtension.get_node_volumes(
n),
'kernel_params': objects.Node.get_kernel_params(n),
'ks_spaces': None,
},
'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
'authorized_keys':
@ -832,7 +825,8 @@ class TestHandlers(BaseIntegrationTest):
'IP',
'tasks',
'uids',
'percentage'])
'percentage',
'ks_spaces'])
self.check_pg_count(args[1][1]['args']['deployment_info'])
@ -847,7 +841,8 @@ class TestHandlers(BaseIntegrationTest):
'tasks',
'priority',
'workloads_collector',
'storage'])
'storage',
'glance'])
def check_pg_count(self, deployment_info):
pools = ['volumes', 'compute', 'backups', '.rgw',
@ -1180,9 +1175,6 @@ class TestHandlers(BaseIntegrationTest):
}
individual_atts.update(common_attrs)
individual_atts['glance']['image_cache_max_size'] = str(
manager.calc_glance_cache_size(
VolumeManagerExtension.get_node_volumes(node)))
deployment_info.append(deepcopy(individual_atts))
controller_nodes = filter(
@ -1253,9 +1245,8 @@ class TestHandlers(BaseIntegrationTest):
'mco_enable': 1,
'mco_identity': n.id,
'pm_data': {
'ks_spaces': VolumeManagerExtension.get_node_volumes(
n),
'kernel_params': objects.Node.get_kernel_params(n),
'ks_spaces': None,
},
'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
'authorized_keys':
@ -1336,7 +1327,8 @@ class TestHandlers(BaseIntegrationTest):
'IP',
'tasks',
'uids',
'percentage'])
'percentage',
'ks_spaces'])
self.check_pg_count(args[1][1]['args']['deployment_info'])
@ -1352,7 +1344,8 @@ class TestHandlers(BaseIntegrationTest):
'priority',
'workloads_collector',
'vms_conf',
'storage'])
'storage',
'glance'])
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')

View File

@ -26,6 +26,7 @@ from netaddr import IPAddress
from netaddr import IPNetwork
from netaddr import IPRange
from oslo_serialization import jsonutils
import unittest2
import yaml
from nailgun import consts
@ -56,8 +57,6 @@ from nailgun.orchestrator.orchestrator_graph import AstuteGraph
from nailgun.db.sqlalchemy import models
from nailgun import objects
from nailgun.extensions.volume_manager.extension import VolumeManagerExtension
from nailgun.extensions.volume_manager import manager
from nailgun.settings import settings
from nailgun.test import base
from nailgun.utils import reverse
@ -243,10 +242,6 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
self.assertEqual(serialized_data['online'], node_db.online)
self.assertEqual(serialized_data['fqdn'],
'%s.%s' % (node_db.hostname, settings.DNS_DOMAIN))
self.assertEqual(
serialized_data['glance'],
{'image_cache_max_size': manager.calc_glance_cache_size(
VolumeManagerExtension.get_node_volumes(node_db))})
def test_serialize_node_vms_conf(self):
node = self.env.create_node(
@ -2285,15 +2280,8 @@ class TestCephOsdImageOrchestratorSerialize(OrchestratorSerializerTestBase):
headers=self.default_headers)
self.cluster = self.db.query(Cluster).get(cluster['id'])
def test_glance_image_cache_max_size(self):
data = self.serialize(self.cluster)
self.assertEqual(len(data), 2)
# one node - 2 roles
self.assertEqual(data[0]['uid'], data[1]['uid'])
self.assertEqual(data[0]['glance']['image_cache_max_size'], '0')
self.assertEqual(data[1]['glance']['image_cache_max_size'], '0')
@unittest2.skip("Should be moved to volume_manager tests")
class TestCephPgNumOrchestratorSerialize(OrchestratorSerializerTestBase):
env_version = '1111-6.0'
@ -2574,7 +2562,6 @@ class BaseDeploymentSerializer(BaseSerializerTest):
result['cinder']['instances'][0]['vc_password'],
"secret")
# check glance parameters
self.assertEqual(result['glance']['vc_host'], "1.2.3.4")
self.assertEqual(result['glance']['vc_user'], "admin")
self.assertEqual(result['glance']['vc_password'], "secret")

View File

@ -15,11 +15,12 @@
# under the License.
from copy import deepcopy
import mock
import six
import unittest2
import nailgun
from nailgun import consts
from nailgun.db.sqlalchemy import models
from nailgun import objects
@ -357,6 +358,7 @@ class TestDeploymentAttributesSerialization80(
consts.DEFAULT_BRIDGES_NAMES.br_baremetal)
self.assertIn(expected_patch, transformations)
@unittest2.skip("Should be moved to volume_manager tests")
def test_disks_attrs(self):
disks = [
{
@ -611,6 +613,7 @@ class TestBlockDeviceDevicesSerialization80(
self.cluster_db = self.db.query(models.Cluster).get(self.cluster['id'])
self.serializer = self.create_serializer(self.cluster_db)
@unittest2.skip("Should be moved to volume_manager tests")
def test_block_device_disks(self):
self.env.create_node(
cluster_id=self.cluster_db.id,

View File

@ -45,6 +45,7 @@ class TestSerializer90Mixin(object):
return serializer_type(None)
# NOTE(sbrzeczkowski): this one is skipped in test_orch*_80.py
class TestBlockDeviceDevicesSerialization90(
TestSerializer90Mixin,
test_orchestrator_serializer_80.TestBlockDeviceDevicesSerialization80

View File

@ -18,7 +18,6 @@ from nailgun import consts
from nailgun.db.sqlalchemy.models import Node
from nailgun import objects
from nailgun.extensions.volume_manager.extension import VolumeManagerExtension
from nailgun.orchestrator import provisioning_serializers as ps
from nailgun.settings import settings
from nailgun.test.base import BaseIntegrationTest
@ -173,10 +172,8 @@ class TestProvisioningSerializer(BaseIntegrationTest):
'udevrules': '{0}_{1}'.format(intr_mac, intr_name)
})
self.assertDictEqual(node['ks_meta']['pm_data'], {
'ks_spaces': VolumeManagerExtension.get_node_volumes(node_db),
'kernel_params': kernal_params
})
self.assertEqual(
node['ks_meta']['pm_data']['kernel_params'], kernal_params)
# Check node interfaces section
self.assertEqual(
node['interfaces'][intr_name]['mac_address'], intr_mac)

View File

@ -21,7 +21,7 @@ from oslo_serialization import jsonutils
from nailgun import consts
from nailgun.db.sqlalchemy.models import Task
from nailgun import errors
from nailgun.extensions.volume_manager.manager import VolumeManager
from nailgun import objects
from nailgun.task import task
from nailgun.test.base import BaseTestCase
@ -320,63 +320,6 @@ class TestCheckBeforeDeploymentTask(BaseTestCase):
self.env.db.commit()
self.assertEqual(self.node.error_type, error_type)
def is_checking_required(self):
return task.CheckBeforeDeploymentTask._is_disk_checking_required(
self.node)
def test_is_disk_checking_required(self):
self.set_node_status('ready')
self.assertFalse(self.is_checking_required())
self.set_node_status('deploying')
self.assertFalse(self.is_checking_required())
self.set_node_status('discover')
self.assertTrue(self.is_checking_required())
self.set_node_status('provisioned')
self.assertFalse(self.is_checking_required())
def test_is_disk_checking_required_in_case_of_error(self):
self.set_node_status('error')
self.set_node_error_type('provision')
self.assertTrue(self.is_checking_required())
self.set_node_error_type('deploy')
self.assertFalse(self.is_checking_required())
def test_check_volumes_and_disks_do_not_run_if_node_ready(self):
self.set_node_status('ready')
with mock.patch.object(
VolumeManager,
'check_disk_space_for_deployment') as check_mock:
task.CheckBeforeDeploymentTask._check_disks(self.task)
self.assertFalse(check_mock.called)
with mock.patch.object(
VolumeManager,
'check_volume_sizes_for_deployment') as check_mock:
task.CheckBeforeDeploymentTask._check_volumes(self.task)
self.assertFalse(check_mock.called)
def test_check_volumes_and_disks_run_if_node_not_ready(self):
self.set_node_status('discover')
with mock.patch.object(
VolumeManager,
'check_disk_space_for_deployment') as check_mock:
task.CheckBeforeDeploymentTask._check_disks(self.task)
self.assertEqual(check_mock.call_count, 1)
with mock.patch.object(
VolumeManager,
'check_volume_sizes_for_deployment') as check_mock:
task.CheckBeforeDeploymentTask._check_volumes(self.task)
self.assertEqual(check_mock.call_count, 1)
def test_check_nodes_online_raises_exception(self):
self.node.online = False
self.env.db.commit()