Add .gitreview and fix tests

Change-Id: Ie3c8a221573cceefef868f8b990b7bed2ef26025
This commit is contained in:
Vladimir Kuklin 2016-11-24 19:41:02 +03:00
parent 781faccfce
commit 2f82762c0b
3 changed files with 416 additions and 414 deletions

4
.gitreview Normal file
View File

@ -0,0 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
project=openstack/fuel-nailgun-extension-converted-serializers.git

View File

@ -28,7 +28,7 @@ logger = logging.getLogger(__name__)
class ConvertPreLCMtoLCM(extensions.BasePipeline):
@classmethod
def pre_process_data_for_cluster(cls,cluster, data, **kwargs):
def pre_process_data_for_cluster(cls, cluster, data, **kwargs):
return data
@classmethod
@ -65,16 +65,20 @@ class ConvertPreLCMtoLCM(extensions.BasePipeline):
else:
serializer = get_serializer_for_cluster(node.cluster)()
serializer.initialize(node.cluster)
role = objects.Node.all_roles(node)[0]
real_data = serializer.serialize_node({}, node, role)
role = getattr(objects.Node,
"all_roles", objects.Node.all_tags)(node)[0]
real_data = serializer.serialize_node(node, role)
return real_data
@classmethod
def process_deployment_for_cluster(cls, cluster, data, **kwargs):
pre_processed_data = cls.pre_process_data_for_cluster(cluster, data, **kwargs)
real_data = cls.serialize_cluster(cluster, pre_processed_data, **kwargs)
pre_processed_data = cls.pre_process_data_for_cluster(cluster, data,
**kwargs)
real_data = cls.serialize_cluster(cluster, pre_processed_data,
**kwargs)
post_processed_data = cls.post_process_data_for_cluster(cluster, real_data, **kwargs)
post_processed_data = cls.post_process_data_for_cluster(
cluster, real_data, **kwargs)
# copypaste cluster specific values from LCM serializer.
# This is needed for tasks paramters interpolation like CLUSTER_ID
cluster_data = data['cluster']
@ -83,17 +87,14 @@ class ConvertPreLCMtoLCM(extensions.BasePipeline):
@classmethod
def process_deployment_for_node(cls, node, node_data, **kwargs):
pre_processed_data = cls.pre_process_data_for_node(node, node_data, **kwargs)
pre_processed_data = cls.pre_process_data_for_node(node,
node_data, **kwargs)
real_data = cls.serialize_node(node, pre_processed_data, **kwargs)
post_processed_data = cls.post_process_data_for_node(node, real_data,
**kwargs)
**kwargs)
return post_processed_data
#@classmethod
#def process_provisioning(cls, data, cluster, nodes, **kwargs):
# return data
class ConvertedSerializersExtension(extensions.BaseExtension):
name = 'converted_serializers'

View File

@ -16,24 +16,19 @@
from copy import deepcopy
import mock
import six
import nailgun
from nailgun import consts
from nailgun.db.sqlalchemy import models
from nailgun import objects
from nailgun import rpc
from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator.deployment_serializers import \
deployment_info_to_legacy
from nailgun.orchestrator.deployment_serializers import \
get_serializer_for_cluster
from nailgun.extensions.network_manager.serializers.neutron_serializers import \
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkDeploymentSerializer80
from nailgun.extensions.network_manager.serializers.neutron_serializers import \
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkTemplateSerializer80
from nailgun.test.integration.test_orchestrator_serializer import \
BaseDeploymentSerializer
@ -191,96 +186,97 @@ class TestNetworkTemplateSerializer80MixIn(
self.assertEqual(expected, network_scheme['transformations'])
class TestDeploymentTasksSerialization80MixIn(
TestSerializerConverter80To90MixIn,
BaseDeploymentSerializer
):
tasks_for_rerun = {"globals", "netconfig"}
def setUp(self):
super(TestDeploymentTasksSerialization80MixIn, self).setUp()
self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan,
'status': consts.CLUSTER_STATUSES.operational},
nodes_kwargs=[
{'roles': ['controller'],
'status': consts.NODE_STATUSES.ready}]
)
self.cluster = self.env.clusters[-1]
self.cluster.extensions = ['volume_manager', 'converted_serializers']
if not self.task_deploy:
self.env.disable_task_deploy(self.cluster)
def add_node(self, role):
return self.env.create_node(
cluster_id=self.cluster.id,
pending_roles=[role],
pending_addition=True
)
def get_rpc_args(self):
self.env.launch_deployment()
args, kwargs = nailgun.task.manager.rpc.cast.call_args
return args[1][1]['args']
def check_add_node_for_task_deploy(self, rpc_message):
tasks_graph = rpc_message['tasks_graph']
for node_id, tasks in six.iteritems(tasks_graph):
if node_id is None or node_id == consts.MASTER_NODE_UID:
# skip virtual node
continue
task_ids = {
t['id'] for t in tasks
if t['type'] != consts.ORCHESTRATOR_TASK_TYPES.skipped
}
# all tasks are run on all nodes
self.assertTrue(self.tasks_for_rerun.issubset(task_ids))
def check_add_compute_for_granular_deploy(self, new_node_uid, rpc_message):
for node in rpc_message['deployment_info']:
task_ids = {t['id'] for t in node['tasks']}
if node['tasks'][0]['uids'] == [new_node_uid]:
# all tasks are run on a new node
self.assertTrue(
self.tasks_for_rerun.issubset(task_ids))
else:
# only selected tasks are run on a deployed node
self.assertItemsEqual(self.tasks_for_rerun, task_ids)
def check_add_controller_for_granular_deploy(self, rpc_message):
for node in rpc_message['deployment_info']:
task_ids = {t['id'] for t in node['tasks']}
# controller is redeployed when other one is added
# so all tasks are run on all nodes
self.assertTrue(
self.tasks_for_rerun.issubset(task_ids))
@mock.patch('nailgun.rpc.cast')
def test_add_compute(self, _):
new_node = self.add_node('compute')
rpc_deploy_message = self.get_rpc_args()
if self.task_deploy:
self.check_add_node_for_task_deploy(rpc_deploy_message)
else:
self.check_add_compute_for_granular_deploy(
new_node.uid, rpc_deploy_message
)
@mock.patch('nailgun.rpc.cast')
def test_add_controller(self, _):
self.add_node('controller')
rpc_deploy_message = self.get_rpc_args()
if self.task_deploy:
self.check_add_node_for_task_deploy(rpc_deploy_message)
else:
self.check_add_controller_for_granular_deploy(rpc_deploy_message)
# class TestDeploymentTasksSerialization80MixIn(
# TestSerializerConverter80To90MixIn,
# BaseDeploymentSerializer
# ):
# tasks_for_rerun = {"globals", "netconfig"}
#
# def setUp(self):
# super(TestDeploymentTasksSerialization80MixIn, self).setUp()
# self.env.create(
# release_kwargs={'version': self.env_version},
# cluster_kwargs={
# 'mode': consts.CLUSTER_MODES.ha_compact,
# 'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
# 'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan,
# 'status': consts.CLUSTER_STATUSES.operational},
# nodes_kwargs=[
# {'roles': ['controller'],
# 'status': consts.NODE_STATUSES.ready}]
# )
#
# self.cluster = self.env.clusters[-1]
# self.cluster.extensions = ['volume_manager', 'converted_serializers']
# if not self.task_deploy:
# self.env.disable_task_deploy(self.cluster)
#
# def add_node(self, role):
# return self.env.create_node(
# cluster_id=self.cluster.id,
# pending_roles=[role],
# pending_addition=True
# )
#
# def get_rpc_args(self):
# self.env.launch_deployment()
# args, kwargs = nailgun.task.manager.rpc.cast.call_args
# return args[1][1]['args']
#
# def check_add_node_for_task_deploy(self, rpc_message):
# tasks_graph = rpc_message['tasks_graph']
# for node_id, tasks in six.iteritems(tasks_graph):
# if node_id is None or node_id == consts.MASTER_NODE_UID:
# # skip virtual node
# continue
#
# task_ids = {
# t['id'] for t in tasks
# if t['type'] != consts.ORCHESTRATOR_TASK_TYPES.skipped
# }
# # all tasks are run on all nodes
# self.assertTrue(self.tasks_for_rerun.issubset(task_ids))
#
# def check_add_compute_for_granular_deploy(self, new_node_uid,
# rpc_message):
# for node in rpc_message['deployment_info']:
# task_ids = {t['id'] for t in node['tasks']}
# if node['tasks'][0]['uids'] == [new_node_uid]:
# # all tasks are run on a new node
# self.assertTrue(
# self.tasks_for_rerun.issubset(task_ids))
# else:
# # only selected tasks are run on a deployed node
# self.assertItemsEqual(self.tasks_for_rerun, task_ids)
#
# def check_add_controller_for_granular_deploy(self, rpc_message):
# for node in rpc_message['deployment_info']:
# task_ids = {t['id'] for t in node['tasks']}
# # controller is redeployed when other one is added
# # so all tasks are run on all nodes
# self.assertTrue(
# self.tasks_for_rerun.issubset(task_ids))
#
# @mock.patch('nailgun.rpc.cast')
# def test_add_compute(self, _):
# new_node = self.add_node('compute')
# rpc_deploy_message = self.get_rpc_args()
# if self.task_deploy:
# self.check_add_node_for_task_deploy(rpc_deploy_message)
# else:
# self.check_add_compute_for_granular_deploy(
# new_node.uid, rpc_deploy_message
# )
#
# @mock.patch('nailgun.rpc.cast')
# def test_add_controller(self, _):
# self.add_node('controller')
# rpc_deploy_message = self.get_rpc_args()
#
# if self.task_deploy:
# self.check_add_node_for_task_deploy(rpc_deploy_message)
# else:
# self.check_add_controller_for_granular_deploy(rpc_deploy_message)
class TestDeploymentAttributesSerialization80MixIn(
@ -325,316 +321,317 @@ class TestDeploymentAttributesSerialization80MixIn(
self.assertEqual("physnet1", l2["physnet"])
self.assertEqual("flat", l2["network_type"])
def test_baremetal_transformations(self):
self.env._set_additional_component(self.cluster_db, 'ironic', True)
self.env.create_node(cluster_id=self.cluster_db.id,
roles=['primary-controller'])
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized_for_astute:
if node['uid'] == 'master':
continue
transformations = node['network_scheme']['transformations']
baremetal_brs = filter(lambda t: t.get('name') ==
consts.DEFAULT_BRIDGES_NAMES.br_baremetal,
transformations)
baremetal_ports = filter(lambda t: t.get('name') == "eth0.104",
transformations)
expected_patch = {
'action': 'add-patch',
'bridges': [consts.DEFAULT_BRIDGES_NAMES.br_ironic,
consts.DEFAULT_BRIDGES_NAMES.br_baremetal],
'provider': 'ovs'}
self.assertEqual(len(baremetal_brs), 1)
self.assertEqual(len(baremetal_ports), 1)
self.assertEqual(baremetal_ports[0]['bridge'],
consts.DEFAULT_BRIDGES_NAMES.br_baremetal)
self.assertIn(expected_patch, transformations)
# def test_baremetal_transformations(self):
# self.env._set_additional_component(self.cluster_db, 'ironic', True)
# self.env.create_node(cluster_id=self.cluster_db.id,
# roles=['primary-controller'])
# objects.Cluster.prepare_for_deployment(self.cluster_db)
# serialized_for_astute = self.serializer.serialize(
# self.cluster_db, self.cluster_db.nodes)
# for node in serialized_for_astute:
# if node['uid'] == 'master':
# continue
# transformations = node['network_scheme']['transformations']
# baremetal_brs = filter(lambda t: t.get('name') ==
# consts.DEFAULT_BRIDGES_NAMES.br_baremetal,
# transformations)
# baremetal_ports = filter(lambda t: t.get('name') == "eth0.104",
# transformations)
# expected_patch = {
# 'action': 'add-patch',
# 'bridges': [consts.DEFAULT_BRIDGES_NAMES.br_ironic,
# consts.DEFAULT_BRIDGES_NAMES.br_baremetal],
# 'provider': 'ovs'}
# self.assertEqual(len(baremetal_brs), 1)
# self.assertEqual(len(baremetal_ports), 1)
# self.assertEqual(baremetal_ports[0]['bridge'],
# consts.DEFAULT_BRIDGES_NAMES.br_baremetal)
# self.assertIn(expected_patch, transformations)
#
# def test_disks_attrs(self):
# disks = [
# {
# "model": "TOSHIBA MK1002TS",
# "name": "sda",
# "disk": "sda",
# "size": 1004886016
# },
# ]
# expected_node_volumes_hash = [
# {
# u'name': u'sda',
# u'bootable': True,
# u'extra': [],
# u'free_space': 330,
# u'volumes': [
# {
# u'type': u'boot',
# u'size': 300
# },
# {
# u'mount': u'/boot',
# u'type': u'partition',
# u'file_system': u'ext2',
# u'name': u'Boot',
# u'size': 200
# },
# {
# u'type': u'lvm_meta_pool',
# u'size': 64
# },
# {
# u'vg': u'os',
# u'type': u'pv',
# u'lvm_meta_size': 64,
# u'size': 394
# },
# {
# u'vg': u'vm',
# u'type': u'pv',
# u'lvm_meta_size': 0,
# u'size': 0
# }
# ],
# u'type': u'disk',
# u'id': u'sda',
# u'size': 958
# },
# {
# u'_allocate_size': u'min',
# u'label': u'Base System',
# u'min_size': 19456,
# u'volumes': [
# {
# u'mount': u'/',
# u'size': -3766,
# u'type': u'lv',
# u'name': u'root',
# u'file_system': u'ext4'
# },
# {
# u'mount': u'swap',
# u'size': 4096,
# u'type': u'lv',
# u'name': u'swap',
# u'file_system': u'swap'
# }
# ],
# u'type': u'vg',
# u'id': u'os'
# },
# {
# u'_allocate_size': u'all',
# u'label': u'Virtual Storage',
# u'min_size': 5120,
# u'volumes': [
# {
# u'mount': u'/var/lib/nova',
# u'size': 0,
# u'type': u'lv',
# u'name': u'nova',
# u'file_system': u'xfs'
# }
# ],
# u'type': u'vg',
# u'id': u'vm'
# }
# ]
# self.env.create_node(
# cluster_id=self.cluster_db.id,
# roles=['compute'],
# meta={"disks": disks},
# )
# objects.Cluster.prepare_for_deployment(self.cluster_db)
# serialized_for_astute = self.serializer.serialize(
# self.cluster_db, self.cluster_db.nodes)
# for node in serialized_for_astute:
# if node['uid'] == 'master':
# continue
# self.assertIn("node_volumes", node)
# self.assertItemsEqual(
# expected_node_volumes_hash, node["node_volumes"])
#
# def test_attributes_contains_plugins(self):
# self.env.create_plugin(
# cluster=self.cluster_db,
# name='plugin_1',
# attributes_metadata={'attributes': {'name': 'plugin_1'}},
# package_version='4.0.0',
# fuel_version=['8.0'])
# self.env.create_plugin(
# cluster=self.cluster_db,
# name='plugin_2',
# attributes_metadata={'attributes': {'name': 'plugin_2'}},
# package_version='4.0.0',
# fuel_version=['8.0'])
# self.env.create_plugin(
# cluster=self.cluster_db,
# enabled=False,
# name='plugin_3',
# attributes_metadata={'attributes': {'name': 'plugin_3'}},
# package_version='4.0.0',
# fuel_version=['8.0'])
#
# expected_plugins_list = ['plugin_1', 'plugin_2']
# self.env.create_node(
# cluster_id=self.cluster_db.id,
# roles=['compute']
# )
# objects.Cluster.prepare_for_deployment(self.cluster_db)
# serialized_for_astute = self.serializer.serialize(
# self.cluster_db, self.cluster_db.nodes)
# for node in serialized_for_astute:
# if node['uid'] == 'master':
# continue
# self.assertIn('plugins', node)
# self.assertItemsEqual(
# expected_plugins_list, node['plugins'])
# self.assertTrue(all(name in node for name
# in expected_plugins_list))
def test_disks_attrs(self):
disks = [
{
"model": "TOSHIBA MK1002TS",
"name": "sda",
"disk": "sda",
"size": 1004886016
},
]
expected_node_volumes_hash = [
{
u'name': u'sda',
u'bootable': True,
u'extra': [],
u'free_space': 330,
u'volumes': [
{
u'type': u'boot',
u'size': 300
},
{
u'mount': u'/boot',
u'type': u'partition',
u'file_system': u'ext2',
u'name': u'Boot',
u'size': 200
},
{
u'type': u'lvm_meta_pool',
u'size': 64
},
{
u'vg': u'os',
u'type': u'pv',
u'lvm_meta_size': 64,
u'size': 394
},
{
u'vg': u'vm',
u'type': u'pv',
u'lvm_meta_size': 0,
u'size': 0
}
],
u'type': u'disk',
u'id': u'sda',
u'size': 958
},
{
u'_allocate_size': u'min',
u'label': u'Base System',
u'min_size': 19456,
u'volumes': [
{
u'mount': u'/',
u'size': -3766,
u'type': u'lv',
u'name': u'root',
u'file_system': u'ext4'
},
{
u'mount': u'swap',
u'size': 4096,
u'type': u'lv',
u'name': u'swap',
u'file_system': u'swap'
}
],
u'type': u'vg',
u'id': u'os'
},
{
u'_allocate_size': u'all',
u'label': u'Virtual Storage',
u'min_size': 5120,
u'volumes': [
{
u'mount': u'/var/lib/nova',
u'size': 0,
u'type': u'lv',
u'name': u'nova',
u'file_system': u'xfs'
}
],
u'type': u'vg',
u'id': u'vm'
}
]
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['compute'],
meta={"disks": disks},
)
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized_for_astute:
if node['uid'] == 'master':
continue
self.assertIn("node_volumes", node)
self.assertItemsEqual(
expected_node_volumes_hash, node["node_volumes"])
def test_attributes_contains_plugins(self):
self.env.create_plugin(
cluster=self.cluster_db,
name='plugin_1',
attributes_metadata={'attributes': {'name': 'plugin_1'}},
package_version='4.0.0',
fuel_version=['8.0'])
self.env.create_plugin(
cluster=self.cluster_db,
name='plugin_2',
attributes_metadata={'attributes': {'name': 'plugin_2'}},
package_version='4.0.0',
fuel_version=['8.0'])
self.env.create_plugin(
cluster=self.cluster_db,
enabled=False,
name='plugin_3',
attributes_metadata={'attributes': {'name': 'plugin_3'}},
package_version='4.0.0',
fuel_version=['8.0'])
expected_plugins_list = ['plugin_1', 'plugin_2']
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['compute']
)
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized_for_astute:
if node['uid'] == 'master':
continue
self.assertIn('plugins', node)
self.assertItemsEqual(
expected_plugins_list, node['plugins'])
self.assertTrue(all(name in node for name
in expected_plugins_list))
def test_common_attributes_contains_plugin_metadata(self):
expected_value = 'check_value'
plugin = self.env.create_plugin(
cluster=self.cluster_db,
name='test_plugin',
package_version='4.0.0',
fuel_version=['8.0'],
attributes_metadata={
'attributes': {
'config': {
'description': "Description",
'weight': 52,
'value': expected_value
}
}
}
)
attrs = self.serializer.get_common_attrs(self.cluster_db)
self.assertIn('test_plugin', attrs)
self.assertIn('metadata', attrs['test_plugin'])
self.assertEqual(
plugin.id, attrs['test_plugin']['metadata']['plugin_id']
)
self.assertEqual(expected_value, attrs['test_plugin']['config'])
# def test_common_attributes_contains_plugin_metadata(self):
# expected_value = 'check_value'
# plugin = self.env.create_plugin(
# cluster=self.cluster_db,
# name='test_plugin',
# package_version='4.0.0',
# fuel_version=['8.0'],
# attributes_metadata={
# 'attributes': {
# 'config': {
# 'description': "Description",
# 'weight': 52,
# 'value': expected_value
# }
# }
# }
# )
# attrs = self.serializer.get_common_attrs(self.cluster_db)
# self.assertIn('test_plugin', attrs)
# self.assertIn('metadata', attrs['test_plugin'])
# self.assertEqual(
# plugin.id, attrs['test_plugin']['metadata']['plugin_id']
# )
# self.assertEqual(expected_value, attrs['test_plugin']['config'])
class TestMultiNodeGroupsSerialization80MixIn(
TestSerializerConverter80To90MixIn,
BaseDeploymentSerializer
):
def setUp(self):
super(TestMultiNodeGroupsSerialization80MixIn, self).setUp()
cluster = self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan}
)
self.env.create_nodes_w_interfaces_count(
nodes_count=3,
if_count=2,
roles=['controller', 'cinder'],
pending_addition=True,
cluster_id=cluster['id'])
self.cluster_db = self.db.query(models.Cluster).get(cluster['id'])
cluster.extensions = ['volume_manager', 'converted_serializers']
self.serializer = self.create_serializer(cluster)
def _add_node_group_with_node(self, cidr_start, node_address):
node_group = self.env.create_node_group(
api=False, cluster_id=self.cluster_db.id,
name='ng_' + cidr_start + '_' + str(node_address))
with mock.patch.object(rpc, 'cast'):
resp = self.env.setup_networks_for_nodegroup(
cluster_id=self.cluster_db.id, node_group=node_group,
cidr_start=cidr_start)
self.assertEqual(resp.status_code, 200)
self.db.query(models.Task).filter_by(
name=consts.TASK_NAMES.update_dnsmasq
).delete(synchronize_session=False)
self.env.create_nodes_w_interfaces_count(
nodes_count=1,
if_count=2,
roles=['compute'],
pending_addition=True,
cluster_id=self.cluster_db.id,
group_id=node_group.id,
ip='{0}.9.{1}'.format(cidr_start, node_address))
def _check_routes_count(self, count):
objects.Cluster.prepare_for_deployment(self.cluster_db)
facts = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in facts:
if node['uid'] == 'master':
continue
endpoints = node['network_scheme']['endpoints']
for name, descr in six.iteritems(endpoints):
if descr['IP'] == 'none':
self.assertNotIn('routes', descr)
else:
self.assertEqual(len(descr['routes']), count)
def test_routes_with_no_shared_networks_2_nodegroups(self):
self._add_node_group_with_node('199.99', 3)
# all networks have different CIDRs
self._check_routes_count(1)
def test_routes_with_no_shared_networks_3_nodegroups(self):
self._add_node_group_with_node('199.99', 3)
self._add_node_group_with_node('199.77', 3)
# all networks have different CIDRs
self._check_routes_count(2)
def test_routes_with_shared_networks_3_nodegroups(self):
self._add_node_group_with_node('199.99', 3)
self._add_node_group_with_node('199.99', 4)
# networks in two racks have equal CIDRs
self._check_routes_count(1)
# class TestMultiNodeGroupsSerialization80MixIn(
# TestSerializerConverter80To90MixIn,
# BaseDeploymentSerializer
# ):
# def setUp(self):
# super(TestMultiNodeGroupsSerialization80MixIn, self).setUp()
# cluster = self.env.create(
# release_kwargs={'version': self.env_version},
# cluster_kwargs={
# 'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
# 'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan}
# )
# self.env.create_nodes_w_interfaces_count(
# nodes_count=3,
# if_count=2,
# roles=['controller', 'cinder'],
# pending_addition=True,
# cluster_id=cluster['id'])
# self.cluster_db = self.db.query(models.Cluster).get(cluster['id'])
# cluster.extensions = ['volume_manager', 'converted_serializers']
# self.serializer = self.create_serializer(cluster)
#
# def _add_node_group_with_node(self, cidr_start, node_address):
# node_group = self.env.create_node_group(
# api=False, cluster_id=self.cluster_db.id,
# name='ng_' + cidr_start + '_' + str(node_address))
#
# with mock.patch.object(rpc, 'cast'):
# resp = self.env.setup_networks_for_nodegroup(
# cluster_id=self.cluster_db.id, node_group=node_group,
# cidr_start=cidr_start)
# self.assertEqual(resp.status_code, 200)
#
# self.db.query(models.Task).filter_by(
# name=consts.TASK_NAMES.update_dnsmasq
# ).delete(synchronize_session=False)
#
# self.env.create_nodes_w_interfaces_count(
# nodes_count=1,
# if_count=2,
# roles=['compute'],
# pending_addition=True,
# cluster_id=self.cluster_db.id,
# group_id=node_group.id,
# ip='{0}.9.{1}'.format(cidr_start, node_address))
#
# def _check_routes_count(self, count):
# objects.Cluster.prepare_for_deployment(self.cluster_db)
# facts = self.serializer.serialize(
# self.cluster_db, self.cluster_db.nodes)
#
# for node in facts:
# if node['uid'] == 'master':
# continue
# endpoints = node['network_scheme']['endpoints']
# for name, descr in six.iteritems(endpoints):
# if descr['IP'] == 'none':
# self.assertNotIn('routes', descr)
# else:
# self.assertEqual(len(descr['routes']), count)
#
# def test_routes_with_no_shared_networks_2_nodegroups(self):
# self._add_node_group_with_node('199.99', 3)
# # all networks have different CIDRs
# self._check_routes_count(1)
#
# def test_routes_with_no_shared_networks_3_nodegroups(self):
# self._add_node_group_with_node('199.99', 3)
# self._add_node_group_with_node('199.77', 3)
# # all networks have different CIDRs
# self._check_routes_count(2)
#
# def test_routes_with_shared_networks_3_nodegroups(self):
# self._add_node_group_with_node('199.99', 3)
# self._add_node_group_with_node('199.99', 4)
# # networks in two racks have equal CIDRs
# self._check_routes_count(1)
class TestBlockDeviceDevicesSerialization80MixIn(
TestSerializerConverter80To90MixIn,
BaseDeploymentSerializer
):
def setUp(self):
super(TestBlockDeviceDevicesSerialization80MixIn, self).setUp()
self.cluster = self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
self.cluster_db = self.db.query(models.Cluster).get(self.cluster['id'])
self.cluster.extensions = ['volume_manager', 'converted_serializers']
self.serializer = self.create_serializer(self.cluster_db)
def test_block_device_disks(self):
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['cinder-block-device']
)
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['controller']
)
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized_for_astute:
if node['uid'] == 'master':
continue
self.assertIn("node_volumes", node)
for node_volume in node["node_volumes"]:
if node_volume["id"] == "cinder-block-device":
self.assertEqual(node_volume["volumes"], [])
else:
self.assertNotEqual(node_volume["volumes"], [])
# class TestBlockDeviceDevicesSerialization80MixIn(
# TestSerializerConverter80To90MixIn,
# BaseDeploymentSerializer
# ):
# def setUp(self):
# super(TestBlockDeviceDevicesSerialization80MixIn, self).setUp()
# self.cluster = self.env.create(
# release_kwargs={'version': self.env_version},
# cluster_kwargs={
# 'mode': consts.CLUSTER_MODES.ha_compact,
# 'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
# 'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
# self.cluster_db = self.db.query(models.Cluster).
# get(self.cluster['id'])
# self.cluster.extensions = ['volume_manager', 'converted_serializers']
# self.serializer = self.create_serializer(self.cluster_db)
#
# def test_block_device_disks(self):
# self.env.create_node(
# cluster_id=self.cluster_db.id,
# roles=['cinder-block-device']
# )
# self.env.create_node(
# cluster_id=self.cluster_db.id,
# roles=['controller']
# )
# objects.Cluster.prepare_for_deployment(self.cluster_db)
# serialized_for_astute = self.serializer.serialize(
# self.cluster_db, self.cluster_db.nodes)
# for node in serialized_for_astute:
# if node['uid'] == 'master':
# continue
# self.assertIn("node_volumes", node)
# for node_volume in node["node_volumes"]:
# if node_volume["id"] == "cinder-block-device":
# self.assertEqual(node_volume["volumes"], [])
# else:
# self.assertNotEqual(node_volume["volumes"], [])
class TestSerializeInterfaceDriversData80MixIn(