don't merge common_attrs with node data in deployment_info

The size of deployment_info grows as n^2 depending on
nodes number. That's because common_attrs, which is
merged into each node's contains info about all nodes.

For example for 600 nodes we store about 1Gb of data in
the database. So as first step let's store common_attrs
separately in deployment_info structure inside python
code and in the database.
Also removed old test for migrations, which are not related
to actual database state.

Change-Id: I431062b3f9c8dedd407570729166072b780dc59a
Partial-Bug: #1596987
This commit is contained in:
Dmitry Guryanov 2016-08-25 17:31:50 +03:00 committed by Bulat Gaifullin
parent 0643311964
commit 7a83ee0cac
26 changed files with 617 additions and 711 deletions

View File

@ -170,12 +170,15 @@ class DefaultDeploymentInfo(DefaultOrchestratorInfo):
def _serialize(self, cluster, nodes):
if objects.Release.is_lcm_supported(cluster.release):
return deployment_serializers.serialize_for_lcm(
serialized = deployment_serializers.serialize_for_lcm(
cluster, nodes, ignore_customized=True
)
graph = orchestrator_graph.AstuteGraph(cluster)
return deployment_serializers.serialize(
graph, cluster, nodes, ignore_customized=True)
else:
graph = orchestrator_graph.AstuteGraph(cluster)
serialized = deployment_serializers.serialize(
graph, cluster, nodes, ignore_customized=True)
return _deployment_info_in_compatible_format(serialized)
class DefaultPrePluginsHooksInfo(DefaultOrchestratorInfo):
@ -212,10 +215,25 @@ class ProvisioningInfo(OrchestratorInfo):
class DeploymentInfo(OrchestratorInfo):
def get_orchestrator_info(self, cluster):
return objects.Cluster.get_deployment_info(cluster)
return _deployment_info_in_compatible_format(
objects.Cluster.get_deployment_info(cluster)
)
def update_orchestrator_info(self, cluster, data):
return objects.Cluster.replace_deployment_info(cluster, data)
if isinstance(data, list):
# FIXME(bgaifullin) need to update fuelclient
# use uid common to determine cluster attributes
nodes = {n['uid']: n for n in data if 'uid' in n}
custom_info = {
'common': nodes.pop('common', {}),
'nodes': nodes
}
else:
custom_info = data
return _deployment_info_in_compatible_format(
objects.Cluster.replace_deployment_info(cluster, custom_info)
)
class RunMixin(object):
@ -474,3 +492,14 @@ class SerializedTasksHandler(NodesFilterMixin, BaseHandler):
except errors.TaskBaseDeploymentNotAllowed as exc:
raise self.http(400, msg=six.text_type(exc))
def _deployment_info_in_compatible_format(depoyment_info):
# FIXME(bgaifullin) need to update fuelclient
# uid 'common' because fuelclient expects list of dicts, where
# each dict contains field 'uid', which will be used as name of file
data = depoyment_info.get('nodes', [])
common = depoyment_info.get('common')
if common:
data.append(dict(common, uid='common'))
return data

View File

@ -51,6 +51,7 @@ rule_to_pick_bootdisk = [
def upgrade():
upgrade_cluster_attributes()
upgrade_release_with_rules_to_pick_bootable_disk()
upgrade_task_model()
upgrade_deployment_graphs_attributes()
@ -60,6 +61,7 @@ def upgrade():
def downgrade():
downgrade_cluster_attributes()
downgrade_deployment_history_summary()
downgrade_node_error_type()
downgrade_orchestrator_task_types()
@ -247,3 +249,56 @@ def downgrade_node_error_type():
def downgrade_deployment_history_summary():
op.drop_column('deployment_history', 'summary')
def upgrade_cluster_attributes():
select_query = sa.sql.text(
"SELECT id, replaced_deployment_info FROM clusters"
" WHERE replaced_deployment_info IS NOT NULL"
)
update_query = sa.sql.text(
"UPDATE clusters SET replaced_deployment_info = :info "
"WHERE id = :id"
)
connection = op.get_bind()
for cluster_id, info in connection.execute(select_query):
info = jsonutils.loads(info)
if isinstance(info, dict):
continue
# replaced_deployment_info does not contain value since 5.1
# replaced_deployment_info was moved from cluster to nodes table
connection.execute(
update_query,
id=cluster_id,
info=jsonutils.dumps({}),
)
def downgrade_cluster_attributes():
select_query = sa.sql.text(
"SELECT id, replaced_deployment_info FROM clusters"
" WHERE replaced_deployment_info IS NOT NULL"
)
update_query = sa.sql.text(
"UPDATE clusters SET replaced_deployment_info = :info "
"WHERE id = :id"
)
connection = op.get_bind()
for cluster_id, info in connection.execute(select_query):
info = jsonutils.loads(info)
if isinstance(info, list):
continue
connection.execute(
update_query,
id=cluster_id,
info=jsonutils.dumps([]),
)

View File

@ -108,7 +108,8 @@ class Cluster(Base):
cascade="delete"
)
replaced_deployment_info = Column(
MutableList.as_mutable(JSON), default=[])
MutableDict.as_mutable(JSON), default={}
)
replaced_provisioning_info = Column(
MutableDict.as_mutable(JSON), default={})
is_customized = Column(Boolean, default=False)

View File

@ -54,7 +54,7 @@ class TestBlockDeviceDevicesSerialization80(BaseDeploymentSerializer):
AstuteGraph(self.cluster_db),
self.cluster_db,
self.cluster_db.nodes)
for node in serialized_for_astute:
for node in serialized_for_astute['nodes']:
self.assertIn("node_volumes", node)
for node_volume in node["node_volumes"]:
if node_volume["id"] == "cinder-block-device":
@ -184,7 +184,7 @@ class TestDeploymentAttributesSerialization80(
self.cluster_db,
self.cluster_db.nodes)
for node in serialized_for_astute:
for node in serialized_for_astute['nodes']:
self.assertIn("node_volumes", node)
self.assertItemsEqual(
expected_node_volumes_hash, node["node_volumes"])
@ -219,26 +219,26 @@ class TestCephPgNumOrchestratorSerialize(OrchestratorSerializerTestBase):
return deployment_serializers.serialize(
AstuteGraph(cluster),
cluster,
cluster.nodes)
cluster.nodes)['common']
def test_pg_num_no_osd_nodes(self):
cluster = self.create_env([
{'roles': ['controller']}])
data = self.serialize(cluster)
self.assertEqual(data[0]['storage']['pg_num'], 128)
self.assertEqual(data['storage']['pg_num'], 128)
def test_pg_num_1_osd_node(self):
cluster = self.create_env([
{'roles': ['controller', 'ceph-osd']}])
data = self.serialize(cluster)
self.assertEqual(data[0]['storage']['pg_num'], 256)
self.assertEqual(data['storage']['pg_num'], 256)
def test_pg_num_1_osd_node_repl_4(self):
cluster = self.create_env(
[{'roles': ['controller', 'ceph-osd']}],
'4')
data = self.serialize(cluster)
self.assertEqual(data[0]['storage']['pg_num'], 128)
self.assertEqual(data['storage']['pg_num'], 128)
def test_pg_num_3_osd_nodes(self):
cluster = self.create_env([
@ -246,4 +246,4 @@ class TestCephPgNumOrchestratorSerialize(OrchestratorSerializerTestBase):
{'roles': ['compute', 'ceph-osd']},
{'roles': ['compute', 'ceph-osd']}])
data = self.serialize(cluster)
self.assertEqual(data[0]['storage']['pg_num'], 512)
self.assertEqual(data['storage']['pg_num'], 512)

View File

@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from nailgun import utils
class TransactionContext(object):
def __init__(self, new_state, old_state=None, **kwargs):
@ -27,7 +29,11 @@ class TransactionContext(object):
self.options = kwargs
def get_new_data(self, node_id):
return self.new[node_id]
return utils.dict_merge(self.new['common'],
self.new['nodes'][node_id])
def get_old_data(self, node_id, task_id):
return self.old.get(task_id, {}).get(node_id, {})
node_info = utils.get_in(self.old, task_id, 'nodes', node_id)
if not node_info:
return {}
return utils.dict_merge(self.old[task_id]['common'], node_info)

View File

@ -638,8 +638,8 @@ class Cluster(NailgunObject):
net_manager.clear_assigned_networks(node)
net_manager.clear_bond_configuration(node)
cls.replace_provisioning_info_on_nodes(instance, [], nodes_to_remove)
cls.replace_deployment_info_on_nodes(instance, [], nodes_to_remove)
cls.replace_provisioning_info_on_nodes(instance, {}, nodes_to_remove)
cls.replace_deployment_info_on_nodes(instance, {}, nodes_to_remove)
objects.NodeCollection.reset_network_template(nodes_to_remove)
objects.NodeCollection.reset_attributes(nodes_to_remove)
@ -688,14 +688,24 @@ class Cluster(NailgunObject):
@classmethod
def replace_provisioning_info_on_nodes(cls, instance, data, nodes):
if isinstance(data, list):
data = {n.get('uid'): n for n in data}
for node in nodes:
node_data = next((n for n in data if node.uid == n.get('uid')), {})
node.replaced_provisioning_info = node_data
node.replaced_provisioning_info = data.get(node.uid, {})
@classmethod
def replace_deployment_info_on_nodes(cls, instance, data, nodes):
for node in instance.nodes:
node_data = [n for n in data if node.uid == n.get('uid')]
if isinstance(data, list):
data = {n.get('uid'): n for n in data}
for node in nodes:
node_data = data.get(node.uid, [])
# replaced deployment info for node should be list
# because before in previous versions of nailgun
# node info will be per role, not per node
if isinstance(node_data, dict):
node_data = [node_data]
node.replaced_deployment_info = node_data
@classmethod
@ -710,7 +720,10 @@ class Cluster(NailgunObject):
@classmethod
def replace_deployment_info(cls, instance, data):
instance.is_customized = True
cls.replace_deployment_info_on_nodes(instance, data, instance.nodes)
instance.replaced_deployment_info = data.get('common', {})
cls.replace_deployment_info_on_nodes(
instance, data.get('nodes', {}), instance.nodes
)
return cls.get_deployment_info(instance)
@classmethod
@ -728,10 +741,16 @@ class Cluster(NailgunObject):
@classmethod
def get_deployment_info(cls, instance):
data = []
nodes = []
for node in instance.nodes:
if node.replaced_deployment_info:
data.extend(node.replaced_deployment_info)
nodes.extend(node.replaced_deployment_info)
data = {}
if nodes:
data['nodes'] = nodes
if instance.replaced_deployment_info:
data['common'] = instance.replaced_deployment_info
return data
@classmethod

View File

@ -47,10 +47,12 @@ class Transaction(NailgunObject):
@classmethod
def attach_deployment_info(cls, instance, deployment_info):
for node_uid, node_di in deployment_info.items():
for uid, dinfo in deployment_info['nodes'].items():
NodeDeploymentInfo.create({'task_id': instance.id,
'node_uid': node_uid,
'deployment_info': node_di})
'node_uid': uid,
'deployment_info': dinfo})
if 'common' in deployment_info:
instance.deployment_info = deployment_info['common']
@classmethod
def get_deployment_info(cls, instance, node_uids=None):
@ -63,9 +65,12 @@ class Transaction(NailgunObject):
node_di_list = NodeDeploymentInfoCollection.filter_by_list(
node_di_list, "node_uid", node_uids)
deployment_info = {node_di.node_uid: node_di.deployment_info
for node_di in node_di_list}
return deployment_info
nodes_info = {node_di.node_uid: node_di.deployment_info
for node_di in node_di_list}
if nodes_info or instance.deployment_info:
return {'common': instance.deployment_info or {},
'nodes': nodes_info}
return {}
@classmethod
def attach_network_settings(cls, instance, settings):

View File

@ -74,6 +74,12 @@ class DeploymentMultinodeSerializer(object):
try:
self.initialize(cluster)
common_attrs = self.get_common_attrs(cluster)
if not ignore_customized and cluster.replaced_deployment_info:
# patch common attributes with custom deployment info
utils.dict_update(
common_attrs, cluster.replaced_deployment_info
)
extensions.fire_callback_on_cluster_serialization_for_deployment(
cluster, common_attrs
)
@ -103,10 +109,13 @@ class DeploymentMultinodeSerializer(object):
# changes in tasks introduced during granular deployment,
# and that mech should be used
self.set_tasks(serialized_nodes)
deployment_info = {'common': common_attrs,
'nodes': serialized_nodes}
finally:
self.finalize()
return serialized_nodes
return deployment_info
def serialize_generated(self, common_attrs, nodes):
serialized_nodes = self.serialize_nodes(common_attrs, nodes)
@ -120,7 +129,7 @@ class DeploymentMultinodeSerializer(object):
extensions.fire_callback_on_node_serialization_for_deployment(
nodes_map[node_data['uid']], node_data
)
yield utils.dict_merge(common_attrs, node_data)
yield node_data
def serialize_customized(self, common_attrs, nodes):
for node in nodes:
@ -155,8 +164,7 @@ class DeploymentMultinodeSerializer(object):
net_serializer = self.get_net_provider_serializer(cluster)
net_common_attrs = net_serializer.get_common_attrs(cluster, attrs)
attrs = utils.dict_merge(attrs, net_common_attrs)
utils.dict_update(attrs, net_common_attrs)
self.inject_list_of_plugins(attrs, cluster)
return attrs
@ -864,13 +872,21 @@ def _invoke_serializer(serializer, cluster, nodes, ignore_customized):
def serialize(orchestrator_graph, cluster, nodes, ignore_customized=False):
"""Serialization depends on deployment mode."""
return _invoke_serializer(
serialized = _invoke_serializer(
get_serializer_for_cluster(cluster)(orchestrator_graph),
cluster, nodes, ignore_customized
)
return serialized
def serialize_for_lcm(cluster, nodes, ignore_customized=False):
return _invoke_serializer(
DeploymentLCMSerializer(), cluster, nodes, ignore_customized
)
def deployment_info_to_legacy(deployment_info):
common_attrs = deployment_info['common']
nodes = [utils.dict_merge(common_attrs, n)
for n in deployment_info['nodes']]
return nodes

View File

@ -42,6 +42,8 @@ from nailgun import lcm
from nailgun.logger import logger
from nailgun import objects
from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator.deployment_serializers import \
deployment_info_to_legacy
from nailgun.orchestrator import orchestrator_graph
from nailgun.orchestrator import provisioning_serializers
from nailgun.orchestrator import stages
@ -161,10 +163,13 @@ class BaseDeploymentTask(object):
@classmethod
def _save_deployment_info(cls, transaction, deployment_info):
# TODO(bgaifullin) need to rework serializers, it should return dict
# instead of list
normalized = {node['uid']: node for node in deployment_info}
objects.Transaction.attach_deployment_info(transaction, normalized)
normalized = {
'common': deployment_info['common'],
'nodes': {n['uid']: n for n in deployment_info['nodes']}
}
objects.Transaction.attach_deployment_info(
transaction, normalized)
return normalized
@ -326,12 +331,17 @@ class DeploymentTask(BaseDeploymentTask):
graph, transaction.cluster, nodes)
cls._save_deployment_info(transaction, serialized_cluster)
serialized_cluster = deployment_info_to_legacy(serialized_cluster)
if affected_nodes:
graph.reexecutable_tasks(events)
serialized_cluster.extend(deployment_serializers.serialize(
serialized_affected_nodes = deployment_serializers.serialize(
graph, transaction.cluster, affected_nodes
))
)
serialized_affected_nodes = deployment_info_to_legacy(
serialized_affected_nodes)
serialized_cluster.extend(serialized_affected_nodes)
nodes = nodes + affected_nodes
pre_deployment = stages.pre_deployment_serialize(
graph, transaction.cluster, nodes,
@ -377,6 +387,8 @@ class DeploymentTask(BaseDeploymentTask):
None, transaction.cluster, nodes
)
cls._save_deployment_info(transaction, serialized_cluster)
serialized_cluster = deployment_info_to_legacy(serialized_cluster)
logger.info("cluster serialization is finished.")
tasks_events = events and \
task_based_deployment.TaskEvents('reexecute_on', events)
@ -477,12 +489,14 @@ class ClusterTransaction(DeploymentTask):
for _, node_uid, task_name in data:
task_state = state.setdefault(task_name, {})
task_state.setdefault(node_uid, {})
task_state.setdefault('nodes', {})
if cls.is_node_for_redeploy(nodes.get(node_uid)):
task_state[node_uid] = {}
task_state['nodes'][node_uid] = {}
else:
task_state[node_uid] = deployment_info.get(node_uid, {})
node_info = deployment_info['nodes'].get(node_uid, {})
task_state['nodes'][node_uid] = node_info
task_state['common'] = deployment_info['common']
return state
@ -504,7 +518,7 @@ class ClusterTransaction(DeploymentTask):
tasks = list(cls.mark_skipped(tasks, selected_task_ids))
if force:
current_state = {}
current_state = {'common': {}, 'nodes': {}}
else:
current_state = cls.get_current_state(
transaction.cluster, nodes, tasks)
@ -512,8 +526,9 @@ class ClusterTransaction(DeploymentTask):
expected_state = cls._save_deployment_info(
transaction, deployment_info
)
# Added cluster state
expected_state[None] = {}
expected_state['nodes'][None] = {}
context = lcm.TransactionContext(expected_state, current_state)
logger.debug("tasks serialization is started.")
@ -536,6 +551,7 @@ class ClusterTransaction(DeploymentTask):
tasks,
role_resolver,
)
logger.info("tasks serialization is finished.")
return {
"tasks_directory": directory,

View File

@ -89,7 +89,9 @@ class TestMellanox(OrchestratorSerializerTestBase):
mellanox=True)
serialized_data = self.serializer.serialize(self.cluster,
self.cluster.nodes)
for data in serialized_data:
common_attrs = serialized_data['common']
for data in serialized_data['nodes']:
# Check plugin parameters
self.assertIn('physical_port', data['neutron_mellanox'])
self.assertIn('ml2_eswitch', data['neutron_mellanox'])
@ -101,21 +103,21 @@ class TestMellanox(OrchestratorSerializerTestBase):
self.assertIn('apply_profile_patch', eswitch_dict)
self.assertEqual(True, eswitch_dict['apply_profile_patch'])
# Check L2 settings
quantum_settings_l2 = data['quantum_settings']['L2']
self.assertIn('mechanism_drivers', quantum_settings_l2)
self.assertIn('mlnx', quantum_settings_l2['mechanism_drivers'])
self.assertIn('type_drivers', quantum_settings_l2)
seg_type = self.cluster.network_config.segmentation_type
self.assertEquals(
quantum_settings_l2['type_drivers'],
'{0},flat,local'.format(seg_type)
)
self.assertIn(self.segment_type,
quantum_settings_l2['type_drivers'])
self.assertIn('tenant_network_types', quantum_settings_l2)
self.assertIn(self.segment_type,
quantum_settings_l2['tenant_network_types'])
# Check L2 settings
quantum_settings_l2 = common_attrs['quantum_settings']['L2']
self.assertIn('mechanism_drivers', quantum_settings_l2)
self.assertIn('mlnx', quantum_settings_l2['mechanism_drivers'])
self.assertIn('type_drivers', quantum_settings_l2)
seg_type = self.cluster.network_config.segmentation_type
self.assertEquals(
quantum_settings_l2['type_drivers'],
'{0},flat,local'.format(seg_type)
)
self.assertIn(self.segment_type,
quantum_settings_l2['type_drivers'])
self.assertIn('tenant_network_types', quantum_settings_l2)
self.assertIn(self.segment_type,
quantum_settings_l2['tenant_network_types'])
def test_serialize_mellanox_iser_enabled_untagged(self):
# Serialize cluster
@ -125,7 +127,7 @@ class TestMellanox(OrchestratorSerializerTestBase):
serialized_data = self.serializer.serialize(self.cluster,
self.cluster.nodes)
for data in serialized_data:
for data in serialized_data['nodes']:
# Check Mellanox iSER values
self.assertIn('storage_parent', data['neutron_mellanox'])
self.assertIn('iser_interface_name', data['neutron_mellanox'])
@ -155,7 +157,7 @@ class TestMellanox(OrchestratorSerializerTestBase):
serialized_data = self.serializer.serialize(self.cluster,
self.cluster.nodes)
for data in serialized_data:
for data in serialized_data['nodes']:
# Check Mellanox iSER values
self.assertIn('storage_parent', data['neutron_mellanox'])
self.assertIn('iser_interface_name', data['neutron_mellanox'])

View File

@ -24,7 +24,6 @@ from nailgun import objects
from nailgun.objects import DeploymentGraph
from nailgun.orchestrator.task_based_deployment import TaskProcessor
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import mock_rpc
from nailgun.utils import reverse
@ -48,13 +47,12 @@ class TestDefaultOrchestratorInfoHandlers(BaseIntegrationTest):
def setUp(self):
super(TestDefaultOrchestratorInfoHandlers, self).setUp()
cluster = self.env.create(
self.cluster = self.env.create(
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True},
{'roles': ['cinder'], 'pending_addition': True}])
self.cluster = self.db.query(Cluster).get(cluster['id'])
{'roles': ['cinder'], 'pending_addition': True}]
)
def customization_handler_helper(self, handler_name, get_info, facts):
resp = self.app.put(
@ -88,6 +86,7 @@ class TestDefaultOrchestratorInfoHandlers(BaseIntegrationTest):
# and we check only that nodes are included to result
expected_node_uids = {n.uid for n in cluster.nodes}
actual_node_uids = {n['uid'] for n in resp.json_body}
self.assertIn('common', actual_node_uids)
self.assertTrue(expected_node_uids.issubset(actual_node_uids))
def test_default_deployment_handler(self):
@ -143,8 +142,8 @@ class TestDefaultOrchestratorInfoHandlers(BaseIntegrationTest):
resp = self.app.get(url, headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(2, len(resp.json_body))
actual_uids = [node['uid'] for node in resp.json_body]
node_ids.append('common')
self.assertItemsEqual(actual_uids, node_ids)
def test_cluster_provisioning_customization(self):
@ -160,12 +159,13 @@ class TestDefaultOrchestratorInfoHandlers(BaseIntegrationTest):
)
def test_cluster_deployment_customization(self):
cluster = self.cluster
facts = []
for node in self.env.nodes:
facts.append({"key": "value", "uid": node.uid})
self.customization_handler_helper(
'DeploymentInfo',
lambda: objects.Cluster.get_deployment_info(self.cluster),
lambda: objects.Cluster.get_deployment_info(cluster)['nodes'],
facts
)

View File

@ -34,6 +34,8 @@ from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
from nailgun.orchestrator.deployment_serializers import \
deployment_info_to_legacy
from nailgun.orchestrator.deployment_serializers import\
DeploymentHASerializer
from nailgun.orchestrator.deployment_serializers import\
@ -169,9 +171,9 @@ class TestReplacedDeploymentInfoSerialization(OrchestratorSerializerTestBase):
)
serialized_data = self.serializer.serialize(self.cluster, [node])
# verify that task list is not empty
self.assertTrue(serialized_data[0]['tasks'])
self.assertTrue(serialized_data['nodes'][0]['tasks'])
# verify that priority is preserved
self.assertEqual(serialized_data[0]['priority'], 'XXX')
self.assertEqual(serialized_data['nodes'][0]['priority'], 'XXX')
# TODO(awoodward): multinode deprecation: probably has duplicates
@ -327,14 +329,14 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
def test_flatdhcp_manager(self):
facts = self.serializer.serialize(self.cluster, self.cluster.nodes)
for fact in facts:
self.assertEqual(
fact['novanetwork_parameters']['network_manager'],
'FlatDHCPManager')
self.assertEqual(
fact['novanetwork_parameters']['num_networks'], 1)
self.assertEqual(
fact['novanetwork_parameters']['network_size'], 65536)
common = facts['common']
self.assertEqual(
common['novanetwork_parameters']['network_manager'],
'FlatDHCPManager')
self.assertEqual(
common['novanetwork_parameters']['num_networks'], 1)
self.assertEqual(
common['novanetwork_parameters']['network_size'], 65536)
def test_vlan_manager(self):
data = {'networking_parameters': {'net_manager': 'VlanManager'}}
@ -344,19 +346,22 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
headers=self.default_headers,
expect_errors=False)
facts = self.serializer.serialize(self.cluster, self.cluster.nodes)
common = facts['common']
facts = facts['nodes']
for fact in facts:
self.assertEqual(fact['vlan_interface'], 'eth0')
self.assertEqual(fact['fixed_interface'], 'eth0')
self.assertEqual(
fact['novanetwork_parameters']['network_manager'],
'VlanManager')
self.assertEqual(
fact['novanetwork_parameters']['num_networks'], 1)
self.assertEqual(
fact['novanetwork_parameters']['vlan_start'], 103)
self.assertEqual(
fact['novanetwork_parameters']['network_size'], 256)
self.assertEqual(
common['novanetwork_parameters']['network_manager'],
'VlanManager')
self.assertEqual(
common['novanetwork_parameters']['num_networks'], 1)
self.assertEqual(
common['novanetwork_parameters']['vlan_start'], 103)
self.assertEqual(
common['novanetwork_parameters']['network_size'], 256)
def test_floating_ranges_generation(self):
# Set ip ranges for floating ips
@ -368,6 +373,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
self.db.commit()
facts = self.serializer.serialize(self.cluster, self.cluster.nodes)
facts = deployment_info_to_legacy(facts)
for fact in facts:
self.assertEqual(
fact['floating_network_range'],
@ -496,7 +502,7 @@ class TestNovaNetworkOrchestratorSerializer61(OrchestratorSerializerTestBase):
)
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
for node in facts:
scheme = node['network_scheme']
self.assertEqual(
@ -557,7 +563,7 @@ class TestNovaNetworkOrchestratorSerializer61(OrchestratorSerializerTestBase):
manager=consts.NOVA_NET_MANAGERS.VlanManager
)
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
for node in facts:
scheme = node['network_scheme']
self.assertEqual(
@ -627,7 +633,7 @@ class TestNovaNetworkOrchestratorSerializer61(OrchestratorSerializerTestBase):
'mode': consts.BOND_MODES.balance_rr
})
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
for node in facts:
self.assertEqual(
node['network_scheme']['transformations'],
@ -677,7 +683,7 @@ class TestNovaNetworkOrchestratorSerializer61(OrchestratorSerializerTestBase):
'mode': consts.BOND_MODES.balance_rr
})
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
for node in facts:
self.assertEqual(
node['network_scheme']['roles'],
@ -844,7 +850,7 @@ class TestNeutronOrchestratorSerializer61(OrchestratorSerializerTestBase):
cluster = self.create_env(segment_type='vlan')
self.add_nics_properties(cluster)
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
self.check_vlan_schema(facts, [
{'action': 'add-br',
@ -901,7 +907,7 @@ class TestNeutronOrchestratorSerializer61(OrchestratorSerializerTestBase):
self.db.flush()
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
self.check_vlan_schema(facts, [
{'action': 'add-br',
'name': 'br-fw-admin'},
@ -960,7 +966,7 @@ class TestNeutronOrchestratorSerializer61(OrchestratorSerializerTestBase):
'mtu': 9000
})
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
for node in facts:
transformations = [
{'action': 'add-br',
@ -1011,7 +1017,7 @@ class TestNeutronOrchestratorSerializer61(OrchestratorSerializerTestBase):
cluster = self.create_env(segment_type='gre')
self.add_nics_properties(cluster)
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
for node in facts:
node_db = objects.Node.get_by_uid(node['uid'])
is_public = objects.Node.should_have_public(node_db)
@ -1111,7 +1117,7 @@ class TestNeutronOrchestratorSerializer61(OrchestratorSerializerTestBase):
'mtu': 9000
})
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
for node in facts:
transformations = [
{'action': 'add-br',
@ -1204,7 +1210,7 @@ class TestNeutronOrchestratorSerializer61(OrchestratorSerializerTestBase):
objects.Cluster.prepare_for_deployment(cluster)
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
for node in facts:
node_db = objects.Node.get_by_uid(node['uid'])
is_public = objects.Node.should_have_public(node_db)
@ -1347,7 +1353,7 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
controllers, key=attrgetter('id'), reverse=True)
result_nodes = self.serializer.serialize(
self.cluster, reverse_sorted_controllers)
self.cluster, reverse_sorted_controllers)['nodes']
high_priority = sorted(result_nodes, key=itemgetter('priority'))[0]
self.assertEqual(high_priority['role'], 'primary-controller')
@ -1536,28 +1542,27 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
self.assertEqual(serialized_node, expected_node)
def test_neutron_vlan_ids_tag_present_on_6_0_env(self):
serialized_nodes = self.serialize_env_w_version('2014.2-6.0')
for node in serialized_nodes:
serialized = self.serialize_env_w_version('2014.2-6.0')
for node in serialized['nodes']:
for item in node['network_scheme']['transformations']:
if 'tags' in item:
self.assertEqual(item['tags'], item['vlan_ids'])
def check_5x_60_neutron_attrs(self, version):
serialized_nodes = self.serialize_env_w_version(version)
for node in serialized_nodes:
self.assertEqual(
{
"network_type": "local",
"segment_id": None,
"router_ext": True,
"physnet": None
},
node['quantum_settings']['predefined_networks'][
'admin_floating_net']['L2']
)
self.assertFalse(
'physnet1' in node['quantum_settings']['L2']['phys_nets']
)
common_attrs = self.serialize_env_w_version(version)['common']
self.assertEqual(
{
"network_type": "local",
"segment_id": None,
"router_ext": True,
"physnet": None
},
common_attrs['quantum_settings']['predefined_networks'][
'admin_floating_net']['L2']
)
self.assertFalse(
'physnet1' in common_attrs['quantum_settings']['L2']['phys_nets']
)
def test_serialize_neutron_attrs_on_6_0_env(self):
self.check_5x_60_neutron_attrs("2014.2-6.0")
@ -1566,25 +1571,24 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
self.check_5x_60_neutron_attrs("2014.1.1-5.1")
def check_50x_neutron_attrs(self, version):
serialized_nodes = self.serialize_env_w_version(version)
for node in serialized_nodes:
self.assertEqual(
{
"network_type": "flat",
"segment_id": None,
"router_ext": True,
"physnet": "physnet1"
},
node['quantum_settings']['predefined_networks'][
'admin_floating_net']['L2']
)
self.assertEqual(
{
"bridge": "br-ex",
"vlan_range": None
},
node['quantum_settings']['L2']['phys_nets']['physnet1']
)
common_attrs = self.serialize_env_w_version(version)['common']
self.assertEqual(
{
"network_type": "flat",
"segment_id": None,
"router_ext": True,
"physnet": "physnet1"
},
common_attrs['quantum_settings']['predefined_networks'][
'admin_floating_net']['L2']
)
self.assertEqual(
{
"bridge": "br-ex",
"vlan_range": None
},
common_attrs['quantum_settings']['L2']['phys_nets']['physnet1']
)
def test_serialize_neutron_attrs_on_5_0_2_env(self):
self.check_50x_neutron_attrs("2014.1.1-5.0.2")
@ -1709,10 +1713,20 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
self.set_assign_public_to_all_nodes(self.cluster, assign)
objects.Cluster.prepare_for_deployment(self.cluster)
serialized_nodes = self.serializer.serialize(self.cluster,
self.cluster.nodes)
serialized = self.serializer.serialize(self.cluster,
self.cluster.nodes)
for node_attrs in serialized['common']['nodes']:
is_public_for_role = objects.Node.should_have_public(
objects.Node.get_by_mac_or_uid(
node_uid=int(node_attrs['uid'])))
self.assertEqual('public_address' in node_attrs,
is_public_for_role)
self.assertEqual('public_netmask' in node_attrs,
is_public_for_role)
need_public_nodes_count = set()
for node in serialized_nodes:
for node in serialized['nodes']:
node_db = self.db.query(Node).get(int(node['uid']))
is_public = objects.Node.should_have_public(node_db)
if is_public:
@ -1725,15 +1739,6 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
is_public
)
for node_attrs in node['nodes']:
is_public_for_role = objects.Node.should_have_public(
objects.Node.get_by_mac_or_uid(
node_uid=int(node_attrs['uid'])))
self.assertEqual('public_address' in node_attrs,
is_public_for_role)
self.assertEqual('public_netmask' in node_attrs,
is_public_for_role)
self.assertEqual(
{
'action': 'add-br',
@ -1773,9 +1778,10 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
self.db.add(public_ng)
self.db.commit()
facts = self.serializer.serialize(cluster, cluster.nodes)
serialized = self.serializer.serialize(cluster, cluster.nodes)
common_attrs = serialized['common']
pd_nets = facts[0]["quantum_settings"]["predefined_networks"]
pd_nets = common_attrs["quantum_settings"]["predefined_networks"]
self.assertEqual(
pd_nets["admin_floating_net"]["L3"]["gateway"],
test_gateway
@ -1819,9 +1825,10 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
self.assertEqual(resp.status_code, 200)
objects.Cluster.prepare_for_deployment(cluster)
facts = self.serializer.serialize(cluster, cluster.nodes)
serialized = self.serializer.serialize(cluster, cluster.nodes)
common_attrs = serialized['common']
pd_nets = facts[0]["quantum_settings"]["predefined_networks"]
pd_nets = common_attrs["quantum_settings"]["predefined_networks"]
self.assertEqual(
pd_nets["admin_floating_net"]["L3"]["subnet"],
ng2_networks['public']['cidr']
@ -1837,11 +1844,13 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
def test_gre_segmentation(self):
cluster = self.create_env(consts.CLUSTER_MODES.ha_compact, 'gre')
facts = self.serializer.serialize(cluster, cluster.nodes)
serialized = self.serializer.serialize(cluster, cluster.nodes)
common_attrs = serialized['common']
for fact in facts:
self.assertEqual(
fact['quantum_settings']['L2']['segmentation_type'], 'gre')
self.assertEqual(
common_attrs['quantum_settings']['L2']['segmentation_type'], 'gre')
for fact in serialized['nodes']:
self.assertEqual(
'br-prv' in fact['network_scheme']['endpoints'], False)
self.assertEqual(
@ -1850,11 +1859,13 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
def test_tun_segmentation(self):
self.new_env_release_version = 'liberty-8.0'
cluster = self.create_env(consts.CLUSTER_MODES.ha_compact, 'tun')
facts = self.serializer.serialize(cluster, cluster.nodes)
serialized = self.serializer.serialize(cluster, cluster.nodes)
common_attrs = serialized['common']
facts = serialized['nodes']
self.assertEqual(
common_attrs['quantum_settings']['L2']['segmentation_type'], 'tun')
for fact in facts:
self.assertEqual(
fact['quantum_settings']['L2']['segmentation_type'], 'tun')
self.assertNotIn(
'br-prv', fact['network_scheme']['endpoints'])
self.assertNotIn(
@ -1870,7 +1881,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
objects.Cluster.prepare_for_deployment(cluster)
serializer = self.create_serializer(cluster)
facts = serializer.serialize(cluster, cluster.nodes)
facts = serializer.serialize(cluster, cluster.nodes)['nodes']
for fact in facts:
ep = fact['network_scheme']['endpoints']
@ -1973,7 +1984,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
cluster = self.db.query(Cluster).get(cluster_id)
self.assertNotIn('vlan_splinters', editable_attrs)
node = self.serializer.serialize(cluster, cluster.nodes)[0]
node = self.serializer.serialize(cluster, cluster.nodes)['nodes'][0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
@ -1995,7 +2006,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
self.assertEqual(editable_attrs['vlan_splinters']['vswitch']['value'],
'some_text')
node = self.serializer.serialize(cluster, cluster.nodes)[0]
node = self.serializer.serialize(cluster, cluster.nodes)['nodes'][0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
@ -2016,7 +2027,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
False
)
node = self.serializer.serialize(cluster, cluster.nodes)[0]
node = self.serializer.serialize(cluster, cluster.nodes)['nodes'][0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
@ -2041,7 +2052,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
self.assertEqual(editable_attrs['vlan_splinters']['vswitch']['value'],
'kernel_lt')
node = self.serializer.serialize(cluster, cluster.nodes)[0]
node = self.serializer.serialize(cluster, cluster.nodes)['nodes'][0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
@ -2062,7 +2073,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
vlan_set = set(
[ng.vlan_start for ng in cluster.network_groups if ng.vlan_start]
)
node = self.serializer.serialize(cluster, cluster.nodes)[0]
node = self.serializer.serialize(cluster, cluster.nodes)['nodes'][0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
@ -2092,7 +2103,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
vlan_set.update(range(*private_vlan_range))
vlan_set.add(private_vlan_range[1])
node = self.serializer.serialize(cluster, cluster.nodes)[0]
node = self.serializer.serialize(cluster, cluster.nodes)['nodes'][0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
@ -2115,7 +2126,7 @@ class TestVlanSplinters(OrchestratorSerializerTestBase):
cluster.attributes.editable = editable_attrs
self.db.commit()
node = self.serializer.serialize(cluster, cluster.nodes)[0]
node = self.serializer.serialize(cluster, cluster.nodes)['nodes'][0]
interfaces = node['network_scheme']['interfaces']
for iface_attrs in interfaces.itervalues():
self.assertIn('L2', iface_attrs)
@ -2247,7 +2258,7 @@ class TestNeutronOrchestratorSerializerBonds(OrchestratorSerializerTestBase):
['eth1', 'eth2'],
node.id)
facts = self.serialize(cluster)
for node in facts:
for node in facts['nodes']:
transforms = node['network_scheme']['transformations']
bonds = filter(lambda t: t['action'] == 'add-bond',
transforms)
@ -2365,9 +2376,9 @@ class TestNSXOrchestratorSerializer(OrchestratorSerializerTestBase):
def test_serialize_node(self):
serialized_data = self.serializer.serialize(self.cluster,
self.cluster.nodes)[0]
self.cluster.nodes)
q_settings = serialized_data['quantum_settings']
q_settings = serialized_data['common']['quantum_settings']
self.assertIn('L2', q_settings)
self.assertIn('provider', q_settings['L2'])
self.assertEqual(q_settings['L2']['provider'], 'nsx')
@ -2865,5 +2876,5 @@ class TestDeploymentGraphlessSerializers(OrchestratorSerializerTestBase):
def test_serialize_cluster(self):
serialized_data = self.serialize(self.cluster)
self.assertGreater(len(serialized_data), 0)
self.assertNotIn('tasks', serialized_data[0])
self.assertGreater(len(serialized_data[0]['nodes']), 0)
self.assertNotIn('tasks', serialized_data['nodes'][0])
self.assertGreater(len(serialized_data['common']['nodes']), 0)

View File

@ -33,6 +33,8 @@ from nailgun.test import base
from nailgun.test.base import DeploymentTasksTestMixin
from nailgun.utils import reverse
from nailgun.orchestrator.deployment_serializers import \
deployment_info_to_legacy
from nailgun.orchestrator.deployment_serializers import \
get_serializer_for_cluster
from nailgun.orchestrator.orchestrator_graph import AstuteGraph
@ -263,6 +265,8 @@ class BaseTestDeploymentAttributesSerialization70(BaseDeploymentSerializer,
self.serializer = serializer_type(AstuteGraph(self.cluster_db))
self.serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
self.serialized_for_astute = deployment_info_to_legacy(
self.serialized_for_astute)
self.vm_data = self.env.read_fixtures(['vmware_attributes'])
def create_env(self, mode):
@ -401,6 +405,8 @@ class TestDeploymentAttributesSerialization70(
serialized_for_astute = serializer.serialize(
self.cluster_db, self.cluster_db.nodes
)
serialized_for_astute = deployment_info_to_legacy(
serialized_for_astute)
for node in serialized_for_astute:
vips = node['network_metadata']['vips']
roles = node['network_scheme']['roles']
@ -825,6 +831,7 @@ class TestPluginDeploymentTasksInjection70(base.BaseIntegrationTest):
serializer = \
get_serializer_for_cluster(self.cluster)(graph)
serialized = serializer.serialize(self.cluster, self.cluster.nodes)
serialized = deployment_info_to_legacy(serialized)
serialized_tasks = serialized[0]['tasks']
@ -869,6 +876,7 @@ class TestPluginDeploymentTasksInjection70(base.BaseIntegrationTest):
serializer = \
get_serializer_for_cluster(self.cluster)(graph)
serialized = serializer.serialize(self.cluster, self.cluster.nodes)
serialized = deployment_info_to_legacy(serialized)
serialized_tasks = serialized[0]['tasks']
@ -1021,6 +1029,7 @@ class TestPluginDeploymentTasksInjection70(base.BaseIntegrationTest):
serializer = \
get_serializer_for_cluster(self.cluster)(graph)
serialized = serializer.serialize(self.cluster, self.cluster.nodes)
serialized = deployment_info_to_legacy(serialized)
tasks = serialized[0]['tasks']
release_depl_tasks_ids = ('first-fake-depl-task',
@ -1121,6 +1130,7 @@ class TestRolesSerializationWithPlugins(BaseDeploymentSerializer,
serializer = self._get_serializer(self.cluster)
serialized_data = serializer.serialize(
self.cluster, self.cluster.nodes)
serialized_data = deployment_info_to_legacy(serialized_data)
self.assertItemsEqual(serialized_data[0]['tasks'], [{
'parameters': {
'cwd': '/etc/fuel/plugins/testing_plugin-0.1/',
@ -1155,6 +1165,7 @@ class TestRolesSerializationWithPlugins(BaseDeploymentSerializer,
serializer = self._get_serializer(self.cluster)
serialized_data = serializer.serialize(
self.cluster, self.cluster.nodes)
serialized_data = deployment_info_to_legacy(serialized_data)
self.maxDiff = None
self._compare_tasks([
@ -1218,6 +1229,8 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer,
serializer = get_serializer_for_cluster(self.cluster)
self.serialized_for_astute = serializer(
AstuteGraph(cluster_db)).serialize(self.cluster, cluster_db.nodes)
self.serialized_for_astute = deployment_info_to_legacy(
self.serialized_for_astute)
def create_env(self, segment_type):
release = self.patch_net_roles_for_release()
@ -1299,6 +1312,8 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer,
serializer = get_serializer_for_cluster(self.cluster)
serialized_for_astute = serializer(
AstuteGraph(cluster_db)).serialize(self.cluster, cluster_db.nodes)
serialized_for_astute = deployment_info_to_legacy(
serialized_for_astute)
# 7 node roles on 5 nodes
self.assertEqual(len(serialized_for_astute), 7)
@ -1408,6 +1423,8 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer,
serializer = get_serializer_for_cluster(self.cluster)
serialized_for_astute = serializer(
AstuteGraph(cluster_db)).serialize(self.cluster, cluster_db.nodes)
serialized_for_astute = deployment_info_to_legacy(
serialized_for_astute)
for node_data in serialized_for_astute:
node = objects.Node.get_by_uid(node_data['uid'])
@ -1495,6 +1512,7 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer,
serializer = get_serializer_for_cluster(self.cluster)
facts = serializer(AstuteGraph(self.cluster)).serialize(
self.cluster, self.cluster.nodes)
facts = deployment_info_to_legacy(facts)
for node in facts:
node_db = objects.Node.get_by_uid(node['uid'])
@ -1697,6 +1715,8 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer,
serializer = get_serializer_for_cluster(cluster_db)
self.serialized_for_astute = serializer(
AstuteGraph(cluster_db)).serialize(cluster_db, cluster_db.nodes)
self.serialized_for_astute = deployment_info_to_legacy(
self.serialized_for_astute)
network_roles = [
'management',
@ -1759,6 +1779,8 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer,
serializer = get_serializer_for_cluster(self.cluster)
self.serialized_for_astute = serializer(
AstuteGraph(cluster_db)).serialize(self.cluster, cluster_db.nodes)
self.serialized_for_astute = deployment_info_to_legacy(
self.serialized_for_astute)
for node_data in self.serialized_for_astute:
node = objects.Node.get_by_uid(node_data['uid'])
# check nodes with assigned public ip

View File

@ -25,6 +25,8 @@ from nailgun.db.sqlalchemy import models
from nailgun import objects
from nailgun import rpc
from nailgun.orchestrator.deployment_serializers import \
deployment_info_to_legacy
from nailgun.orchestrator.deployment_serializers import \
get_serializer_for_cluster
from nailgun.orchestrator.orchestrator_graph import AstuteGraph
@ -54,6 +56,8 @@ class TestSerializer80Mixin(object):
objects.Cluster.prepare_for_deployment(cluster)
serialized_for_astute = self.serializer.serialize(
cluster, cluster.nodes)
serialized_for_astute = deployment_info_to_legacy(
serialized_for_astute)
for node in serialized_for_astute:
expected_network = {
"network_type": "flat",
@ -317,6 +321,8 @@ class TestDeploymentAttributesSerialization80(
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
serialized_for_astute = deployment_info_to_legacy(
serialized_for_astute)
for node in serialized_for_astute:
self.assertEqual(
{
@ -338,6 +344,8 @@ class TestDeploymentAttributesSerialization80(
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
serialized_for_astute = deployment_info_to_legacy(
serialized_for_astute)
for node in serialized_for_astute:
transformations = node['network_scheme']['transformations']
baremetal_brs = filter(lambda t: t.get('name') ==
@ -385,6 +393,8 @@ class TestDeploymentAttributesSerialization80(
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
serialized_for_astute = deployment_info_to_legacy(
serialized_for_astute)
for node in serialized_for_astute:
self.assertIn('plugins', node)
self.assertItemsEqual(
@ -468,6 +478,7 @@ class TestMultiNodeGroupsSerialization80(
objects.Cluster.prepare_for_deployment(self.cluster_db)
facts = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
facts = deployment_info_to_legacy(facts)
for node in facts:
endpoints = node['network_scheme']['endpoints']

View File

@ -91,8 +91,8 @@ class TestDeploymentAttributesSerialization90(
serialised_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
self.assertEqual(len(serialised_for_astute), 1)
node = serialised_for_astute[0]
self.assertEqual(len(serialised_for_astute['nodes']), 1)
node = serialised_for_astute['nodes'][0]
dpdk = node.get('dpdk')
self.assertIsNotNone(dpdk)
self.assertTrue(dpdk.get('enabled'))
@ -170,8 +170,8 @@ class TestDeploymentAttributesSerialization90(
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialised_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
self.assertEqual(len(serialised_for_astute), 1)
node = serialised_for_astute[0]
self.assertEqual(len(serialised_for_astute['nodes']), 1)
node = serialised_for_astute['nodes'][0]
dpdk = node.get('dpdk')
self.assertIsNotNone(dpdk)
self.assertTrue(dpdk.get('enabled'))
@ -240,14 +240,14 @@ class TestDeploymentAttributesSerialization90(
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
serialized_node = serialized_for_astute[0]
serialized_node = serialized_for_astute['nodes'][0]
self.assertEqual(serialized_node['dpdk']['ovs_core_mask'], '0x2')
self.assertEqual(serialized_node['dpdk']['ovs_pmd_core_mask'], '0x4')
self.assertEqual(serialized_node['nova']['cpu_pinning'], [5, 6])
node_name = objects.Node.get_slave_name(node)
node_common_attrs = \
serialized_node['network_metadata']['nodes'][node_name]
network_data = serialized_for_astute['common']['network_metadata']
node_common_attrs = network_data['nodes'][node_name]
self.assertTrue(node_common_attrs['nova_cpu_pinning_enabled'])
def test_pinning_cpu_for_dpdk(self):
@ -275,15 +275,15 @@ class TestDeploymentAttributesSerialization90(
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
serialized_node = serialized_for_astute[0]
serialized_node = serialized_for_astute['nodes'][0]
self.assertEqual(serialized_node['dpdk']['ovs_core_mask'], '0x2')
self.assertEqual(serialized_node['dpdk']['ovs_pmd_core_mask'], '0x4')
self.assertNotIn('cpu_pinning', serialized_node['nova'])
node_name = objects.Node.get_slave_name(node)
node_common_attrs = \
serialized_node['network_metadata']['nodes'][node_name]
network_data = serialized_for_astute['common']['network_metadata']
node_common_attrs = network_data['nodes'][node_name]
self.assertFalse(node_common_attrs['nova_cpu_pinning_enabled'])
def test_dpdk_hugepages(self):
@ -317,7 +317,7 @@ class TestDeploymentAttributesSerialization90(
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
serialized_node = serialized_for_astute[0]
serialized_node = serialized_for_astute['nodes'][0]
self.assertEquals(
[128, 128, 128],
@ -356,7 +356,8 @@ class TestDeploymentAttributesSerialization90(
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
serialized_node = serialized_for_astute[0]
serialized_node = serialized_for_astute['nodes'][0]
self.assertNotIn('hugepages', serialized_node)
@ -391,7 +392,8 @@ class TestDeploymentAttributesSerialization90(
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
serialized_node = serialized_for_astute[0]
serialized_node = serialized_for_astute['nodes'][0]
expected = [
{'numa_id': 0, 'size': 2048, 'count': 512},
@ -411,7 +413,7 @@ class TestDeploymentAttributesSerialization90(
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for serialized_node in serialized_for_astute:
for serialized_node in serialized_for_astute['nodes']:
nova = serialized_node.get('nova', {})
self.assertNotIn('cpu_pinning', nova)
@ -419,9 +421,9 @@ class TestDeploymentAttributesSerialization90(
self.assertNotIn('ovs_core_mask', dpdk)
self.assertNotIn('ovs_pmd_core_mask', dpdk)
nodes_attrs = serialized_node['network_metadata']['nodes']
for node_attrs in six.itervalues(nodes_attrs):
self.assertFalse(node_attrs['nova_cpu_pinning_enabled'])
network_data = serialized_for_astute['common']['network_metadata']
for node_attrs in six.itervalues(network_data['nodes']):
self.assertFalse(node_attrs['nova_cpu_pinning_enabled'])
def test_hugepages_disabled(self):
nodes_roles = [['compute'], ['controller']]
@ -434,7 +436,7 @@ class TestDeploymentAttributesSerialization90(
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for serialized_node in serialized_for_astute:
for serialized_node in serialized_for_astute['nodes']:
nova = serialized_node.get('nova', {})
self.assertFalse(nova.get('enable_hugepages', False))
@ -443,9 +445,9 @@ class TestDeploymentAttributesSerialization90(
self.assertNotIn('hugepages', serialized_node)
nodes_attrs = serialized_node['network_metadata']['nodes']
for node_attrs in six.itervalues(nodes_attrs):
self.assertFalse(node_attrs['nova_hugepages_enabled'])
network_data = serialized_for_astute['common']['network_metadata']
for node_attrs in six.itervalues(network_data['nodes']):
self.assertFalse(node_attrs['nova_hugepages_enabled'])
def test_immutable_metadata_key(self):
node = self.env.create_node(
@ -462,10 +464,11 @@ class TestDeploymentAttributesSerialization90(
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node_data in serialized_for_astute:
for k, v in six.iteritems(node_data['network_metadata']['nodes']):
node = objects.Node.get_by_uid(v['uid'])
self.assertEqual(objects.Node.permanent_id(node), k)
network_data = serialized_for_astute['common']['network_metadata']
for k, v in six.iteritems(network_data['nodes']):
node = objects.Node.get_by_uid(v['uid'])
self.assertEqual(objects.Node.permanent_id(node), k)
class TestDeploymentLCMSerialization90(
@ -513,11 +516,12 @@ class TestDeploymentLCMSerialization90(
)
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized = self.serializer.serialize(self.cluster_db, [self.node])
self.assertEqual(
{'glance_config': 'value1',
'nova_config': 'value3',
'ceph_config': 'value2'},
serialized[0]['configuration']
serialized['nodes'][0]['configuration']
)
def test_cluster_attributes_in_serialized(self):
@ -536,12 +540,13 @@ class TestDeploymentLCMSerialization90(
"status": self.cluster_db.status,
"mode": self.cluster_db.mode
}
for node_info in serialized:
self.assertEqual(cluster_info, node_info['cluster'])
self.assertEqual(release_info, node_info['release'])
self.assertEqual(cluster_info, serialized['common']['cluster'])
self.assertEqual(release_info, serialized['common']['release'])
self.assertEqual(['compute'], serialized[0]['roles'])
self.assertEqual([consts.TASK_ROLES.master], serialized[1]['roles'])
self.assertEqual(['compute'], serialized['nodes'][0]['roles'])
self.assertEqual(
[consts.TASK_ROLES.master], serialized['nodes'][1]['roles']
)
@mock.patch.object(
plugins.adapters.PluginAdapterBase, 'repo_files',
@ -602,9 +607,10 @@ class TestDeploymentLCMSerialization90(
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized:
self.assertIn('plugins', node)
self.datadiff(plugins_data, node['plugins'], compare_sorted=True)
self.assertIn('plugins', serialized['common'])
self.datadiff(plugins_data, serialized['common']['plugins'],
compare_sorted=True)
def test_serialize_with_customized(self):
objects.Cluster.prepare_for_deployment(self.cluster_db)
@ -614,32 +620,31 @@ class TestDeploymentLCMSerialization90(
objects.Cluster.prepare_for_deployment(self.cluster_db)
cust_serialized = self.serializer.serialize(
self.cluster_db, [self.node])
for item in serialized:
if item['uid'] != consts.MASTER_NODE_UID:
self.assertIn(item, cust_serialized)
else:
self.assertIn(item, cust_serialized)
self.assertEqual(serialized['common'], cust_serialized['common'])
self.assertItemsEqual(serialized['nodes'], cust_serialized['nodes'])
def test_provision_info_serialized(self):
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized = self.serializer.serialize(self.cluster_db, [self.node])
node_info = next(x for x in serialized if x['uid'] == self.node.uid)
node_info = next(x for x in serialized['nodes']
if x['uid'] == self.node.uid)
self.assertIn('provision', node_info)
provision_info = node_info['provision']
# check that key options present in provision section
self.assertIn('ks_meta', provision_info)
self.assertIn('engine', provision_info)
self.assertIn('engine', serialized['common']['provision'])
def test_deleted_field_present_only_for_deleted_nodes(self):
objects.Cluster.prepare_for_deployment(self.cluster_db)
self.node.pending_deletion = True
serialized = self.serializer.serialize(self.cluster_db, [self.node])
node_info = next(x for x in serialized if x['uid'] == self.node.uid)
node_info = next(x for x in serialized['nodes']
if x['uid'] == self.node.uid)
self.assertTrue(node_info['deleted'])
self.node.pending_deletion = False
serialized = self.serializer.serialize(self.cluster_db, [self.node])
node_info = next(x for x in serialized if x['uid'] == self.node.uid)
node_info = next(x for x in serialized['nodes']
if x['uid'] == self.node.uid)
self.assertNotIn('deleted', node_info)
@ -666,14 +671,12 @@ class TestDeploymentHASerializer90(
objects.Cluster.prepare_for_deployment(cluster)
serialized = serializer.serialize(cluster, cluster.nodes)
objects.Cluster.replace_deployment_info(cluster, serialized)
objects.Cluster.prepare_for_deployment(cluster)
cust_serialized = serializer.serialize(
cluster, cluster.nodes)
cust_serialized = serializer.serialize(cluster, cluster.nodes)
for item in serialized:
self.assertIn(item, cust_serialized)
self.assertEqual(serialized['common'], cust_serialized['common'])
self.assertItemsEqual(serialized['nodes'], cust_serialized['nodes'])
class TestDeploymentTasksSerialization90(
@ -849,9 +852,11 @@ class TestSriovSerialization90(
else:
self.fail('NIC without assigned networks was not found')
node0 = self.serialize()[0]
serialized = self.serialize()
node0 = serialized['nodes'][0]
common_attrs = serialized['common']
self.assertEqual(
node0['quantum_settings']['supported_pci_vendor_devs'],
common_attrs['quantum_settings']['supported_pci_vendor_devs'],
['1234:5678']
)
for trans in node0['network_scheme']['transformations']:

View File

@ -172,7 +172,7 @@ class TestTaskManagers(BaseIntegrationTest):
nodes_ids.append(consts.MASTER_NODE_UID)
# check that deployment info contains information about all nodes
# that are not deleted
self.assertItemsEqual(nodes_ids, info)
self.assertItemsEqual(nodes_ids, info['nodes'])
@mock.patch('nailgun.task.task.rpc.cast')
@mock.patch('objects.Cluster.get_deployment_tasks')

View File

@ -1,364 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nailgun.test.base import BaseTestCase
from nailgun.utils.migration import negate_condition
from nailgun.utils.migration import remove_question_operator
from nailgun.utils.migration import upgrade_attributes_metadata_6_0_to_6_1
from nailgun.utils.migration import upgrade_release_attributes_50_to_51
from nailgun.utils.migration import upgrade_release_attributes_51_to_60
from nailgun.utils.migration import upgrade_release_roles_50_to_51
from nailgun.utils.migration import upgrade_release_roles_51_to_60
from nailgun.utils.migration import upgrade_role_limits_6_0_to_6_1
from nailgun.utils.migration import upgrade_role_restrictions_6_0_to_6_1
class TestDataMigration(BaseTestCase):
def test_release_attributes_metadata_upgrade_50_to_51(self):
attributes_metadata_50 = {
'editable': {
'storage': {
'volumes_ceph': {
'value': False,
'label': "Ceph RBD for volumes (Cinder)",
'description': "Configures Cinder to store "
"volumes in Ceph RBD images.",
'weight': 20,
'type': "checkbox",
'conflicts': [
{"settings:common.libvirt_type.value": "vcenter"},
{"settings:storage.volumes_lvm.value": True}
]
},
'objects_ceph': {
'value': False,
'label': "Ceph RadosGW for objects(Swift API)",
'description': "Configures RadosGW front end "
"for Ceph RBD.",
'weight': 40,
'type': "checkbox",
'depends': [
{"settings:storage.images_ceph.value": True}
],
'conflicts': [
{"settings:common.libvirt_type.value": "vcenter"}
]
}
}
}
}
attributes_metadata_51 = upgrade_release_attributes_50_to_51(
attributes_metadata_50
)
storage_attrs = attributes_metadata_51["editable"]["storage"]
self.assertEqual(
storage_attrs['volumes_ceph'].get("restrictions"),
[
"settings:common.libvirt_type.value == 'vcenter'",
"settings:storage.volumes_lvm.value == true"
]
)
self.assertEqual(
storage_attrs['objects_ceph'].get("restrictions"),
[
"settings:storage.images_ceph.value != true",
"settings:common.libvirt_type.value == 'vcenter'"
]
)
def test_release_roles_metadata_upgrade_50_to_51(self):
ceilometer_depends = {
'condition': {
"settings:additional_components.ceilometer.value": True
},
'warning': "Ceilometer should be enabled"
}
new_ceilometer_depends = {
'condition': "settings:additional_components.ceilometer.value == "
"true",
'warning': "Ceilometer should be enabled"
}
roles_metadata_50 = {
'mongo': {
'name': "Telemetry - MongoDB",
'description': "A feature-complete and recommended "
"database for storage of metering data "
"from OpenStack Telemetry (Ceilometer)",
'conflicts': ['compute',
'ceph-osd'],
'depends': [ceilometer_depends]
}
}
roles_metadata_51 = upgrade_release_roles_50_to_51(
roles_metadata_50
)
self.assertEqual(
roles_metadata_51["mongo"]["depends"],
[new_ceilometer_depends]
)
def test_negate_condition(self):
self.assertEqual(
negate_condition('a == b'),
'not (a == b)'
)
self.assertEqual(
negate_condition('a != b'),
'not (a != b)'
)
self.assertEqual(
negate_condition('a in b'),
'not (a in b)'
)
def test_release_attributes_metadata_upgrade_51_to_60(self):
sample_group = {
"field1": {
"type": "text",
"restrictions": [{
"action": "hide",
"condition": "cluster:net_provider != 'neutron' or "
"networking_parameters:net_l23_provider? != 'nsx'"
}],
"description": "Description",
"label": "Label"
},
"field2": {
"type": "radio",
"values": [{
"restrictions": [
"settings:common.libvirt_type.value != 'kvm' or "
"not (cluster:net_provider == 'neutron' and "
"networking_parameters:segmentation_type? == 'vlan')"
],
"data": "value1",
"description": "Description1",
"label": "Label1"
}, {
"restrictions": [
"settings:common.libvirt_type.value == 'kvm?'"
],
"data": "value2",
"description": "Description2",
"label": "Label2"
}]
}
}
attributes_metadata = {
"editable": {
"group": sample_group
}
}
upgrade_release_attributes_51_to_60(attributes_metadata)
self.assertEqual(
sample_group["field1"]["restrictions"][0]["condition"],
"cluster:net_provider != 'neutron' or "
"networking_parameters:net_l23_provider != 'nsx'"
)
self.assertEqual(
sample_group["field2"]["values"][0]["restrictions"][0],
"settings:common.libvirt_type.value != 'kvm' or "
"not (cluster:net_provider == 'neutron' and "
"networking_parameters:segmentation_type == 'vlan')"
)
self.assertEqual(
sample_group["field2"]["values"][1]["restrictions"][0],
"settings:common.libvirt_type.value == 'kvm?'"
)
def test_release_roles_metadata_upgrade_51_to_60(self):
operational_restriction = {
'condition': "cluster:status != 'operational'",
'warning': "MongoDB node can not be added to an "
"operational environment."
}
ceilometer_restriction = {
'condition': 'settings:additional_components.ceilometer.value? == '
'true',
'warning': "Ceilometer should be enabled."
}
new_operational_restriction = {
'condition': remove_question_operator(negate_condition(
operational_restriction['condition'])),
'message': operational_restriction['warning'],
}
new_ceilometer_restriction = {
'condition': remove_question_operator(negate_condition(
ceilometer_restriction['condition'])),
'message': ceilometer_restriction['warning']
}
false_restriction = {
'condition': "1 == 2",
'message': "This is always false"
}
roles_metadata_51 = {
'mongo': {
'name': "Telemetry - MongoDB",
'description': "A feature-complete and recommended "
"database for storage of metering data "
"from OpenStack Telemetry (Ceilometer)",
'conflicts': ['compute',
'ceph-osd'],
'depends': [
operational_restriction,
ceilometer_restriction
],
},
'test': {
'name': "Test restrictions extend",
'description': "Testing restrictions list extend",
'conflicts': [],
'depends': [
operational_restriction,
ceilometer_restriction
],
'restrictions': [
false_restriction
]
}
}
roles_metadata_60 = upgrade_release_roles_51_to_60(
roles_metadata_51
)
self.assertTrue('depends' not in roles_metadata_60["mongo"])
self.assertTrue('depends' not in roles_metadata_60["test"])
self.assertEqual(
roles_metadata_60['mongo']['restrictions'],
[
new_operational_restriction,
new_ceilometer_restriction
]
)
self.assertEqual(
roles_metadata_60['test']['restrictions'],
[
false_restriction,
new_operational_restriction,
new_ceilometer_restriction
]
)
def test_upgrade_attributes_metadata_from_6_0_to_6_1(self):
attributes_original = {
'editable': {
'storage': {
'volumes_lvm': {
'description': 'xx',
}
},
'common': {}
}
}
attributes_after = {
'editable': {
'storage': {
'volumes_lvm': {
'description': ('It is recommended to have at least '
'one Cinder node.')
}
},
'common': {
'use_vcenter': {
'value': False,
'weight': 30,
'type': "hidden"
}
}
}
}
self.assertEqual(
upgrade_attributes_metadata_6_0_to_6_1(attributes_original),
attributes_after
)
def test_role_limits_metadata_upgrade_from_6_0_to_6_1(self):
roles_meta_original = {
'controller': {
'name': 'Controller',
},
'no-add': {
'name': 'No limits will be added here',
}
}
limits_definitions = {
'controller': {
'max': 1,
'overrides': [
{
'max': 5,
'condition': '1 == 2',
'message': 'Test',
}
]
}
}
roles_meta_after = copy.deepcopy(roles_meta_original)
roles_meta_after['controller']['limits'] = copy.deepcopy(
limits_definitions['controller'])
roles_meta_test = upgrade_role_limits_6_0_to_6_1(
roles_meta_original,
limits_definitions
)
self.assertEqual(roles_meta_test, roles_meta_after)
def test_role_restrictions_upgrade_from_6_0_to_6_1(self):
roles_meta_original = {
'controller': {
'name': 'Controller',
'restrictions': [
{
'condition': '1 == 2',
'message': 'Always false'
}
]
},
'no-edit': {
'name': 'No restrictions will be changed here',
}
}
restriction_definitions = {
'controller': [
{
'condition': '1 == 1',
'message': 'Always true'
}
]
}
roles_meta_after = copy.deepcopy(roles_meta_original)
roles_meta_after['controller']['restrictions'] = \
restriction_definitions['controller']
self.assertEqual(
upgrade_role_restrictions_6_0_to_6_1(
roles_meta_original,
restriction_definitions
),
roles_meta_after
)

View File

@ -39,7 +39,7 @@ def setup_module():
def prepare():
meta = base.reflect_db_metadata()
db.execute(
result = db.execute(
meta.tables['releases'].insert(),
[{
'name': 'test_name',
@ -57,6 +57,24 @@ def prepare():
{'type': 'very_important_rule'}
]})
}])
release_id = result.inserted_primary_key[0]
db.execute(
meta.tables['clusters'].insert(),
[{
'name': 'test_cluster',
'release_id': release_id,
'mode': 'ha_compact',
'status': 'new',
'net_provider': 'neutron',
'grouping': 'roles',
'fuel_version': '10.0',
'deployment_tasks': '[]',
'replaced_deployment_info': '[]'
}]
)
db.commit()
@ -147,3 +165,12 @@ class TestDeploymentHistorySummaryField(base.BaseAlembicMigrationTest):
def test_downgrade_tasks_noop(self):
deployment_history = self.meta.tables['deployment_history']
self.assertNotIn('summary', deployment_history.c)
class TestClusterAttributesDowngrade(base.BaseAlembicMigrationTest):
def test_deployment_info_downgrade(self):
clusters_table = self.meta.tables['clusters']
deployment_info = db.execute(
sa.select([clusters_table.c.replaced_deployment_info])
).fetchone()[0]
self.assertEqual('[]', deployment_info)

View File

@ -26,12 +26,16 @@ class TestTaskSerializerContext(BaseUnitTest):
def setUpClass(cls):
cls.transaction = TransactionContext(
{
'1': {
'common': {
'cluster': {'id': 1},
'release': {'version': 'liberty-9.0'},
'openstack_version': 'liberty-9.0',
'public_ssl': {'hostname': 'localhost'},
'attribute': '1'
},
'nodes': {
'1': {
'attribute': '1'
}
}
}
)
@ -100,24 +104,26 @@ class TestTaskSerializer(BaseUnitTest):
@classmethod
def setUpClass(cls):
cls.context = task_serializer.Context(TransactionContext({
'1': {
'common': {
'cluster': {'id': 1},
'release': {'version': 'liberty-9.0'},
'openstack_version': 'liberty-9.0',
'public_ssl': {'hostname': 'localhost'},
'attributes': {
'a_str': 'text1',
'a_int': 1
}
},
'2': {
'cluster': {'id': 2},
'release': {'version': 'liberty-9.0'},
'openstack_version': 'liberty-9.0',
'public_ssl': {'hostname': 'localhost'},
'attributes': {
'a_str': 'text2',
'a_int': 2
'nodes': {
'1': {
'cluster': {'id': 1},
'attributes': {
'a_str': 'text1',
'a_int': 1
}
},
'2': {
'cluster': {'id': 2},
'attributes': {
'a_str': 'text2',
'a_int': 2
}
}
}
}))

View File

@ -74,44 +74,36 @@ class TestTransactionSerializer(BaseUnitTest):
]
cls.context = lcm.TransactionContext({
'1': {
'common': {
'cluster': {'id': 1},
'release': {'version': 'liberty-9.0'},
'openstack_version': 'liberty-9.0',
'public_ssl': {'hostname': 'localhost'},
'attributes': {
'a_str': 'text1',
'a_int': 1
}
},
'2': {
'cluster': {'id': 1},
'release': {'version': 'liberty-9.0'},
'openstack_version': 'liberty-9.0',
'public_ssl': {'hostname': 'localhost'},
'attributes': {
'a_str': 'text2',
'a_int': 2
}
},
'3': {
'cluster': {'id': 1},
'release': {'version': 'liberty-9.0'},
'openstack_version': 'liberty-9.0',
'public_ssl': {'hostname': 'localhost'},
'attributes': {
'a_str': 'text3',
'a_int': 3
}
},
'4': {
'cluster': {'id': 1},
'release': {'version': 'liberty-9.0'},
'openstack_version': 'liberty-9.0',
'public_ssl': {'hostname': 'localhost'},
'attributes': {
'a_str': 'text3',
'a_int': 3
'nodes': {
'1': {
'attributes': {
'a_str': 'text1',
'a_int': 1
}
},
'2': {
'attributes': {
'a_str': 'text2',
'a_int': 2
}
},
'3': {
'attributes': {
'a_str': 'text3',
'a_int': 3
}
},
'4': {
'attributes': {
'a_str': 'text3',
'a_int': 3
}
}
}
})

View File

@ -1,57 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.test.base import BaseIntegrationTest
from nailgun.utils.migration import upgrade_clusters_replaced_info
class TestReplacedDataMigration(BaseIntegrationTest):
def setUp(self):
super(TestReplacedDataMigration, self).setUp()
self.cluster = self.env.create(
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller', 'cinder'], 'pending_addition': True},
]
)
self.nodes = self.env.nodes
self.deployment_info = []
self.provisioning_info = {'nodes': [], 'engine': {'custom': 'type'}}
for node in self.env.nodes:
self.deployment_info.append({'uid': node.uid, 'type': 'deploy'})
self.provisioning_info['nodes'].append(
{'uid': node.uid, 'type': 'provision'})
self.cluster.replaced_deployment_info = self.deployment_info
self.cluster.replaced_provisioning_info = self.provisioning_info
self.db.commit()
self.provisioning_nodes = self.provisioning_info.pop('nodes')
def test_migration_passed_successfully(self):
connection = self.db.connection()
upgrade_clusters_replaced_info(connection)
self.assertEqual(self.provisioning_info,
self.cluster.replaced_provisioning_info)
self.assertEqual(self.cluster.replaced_deployment_info, [])
for node in self.nodes:
self.assertEqual(
node.replaced_deployment_info,
[n for n in self.deployment_info if n['uid'] == node.uid]
)
self.assertEqual(
node.replaced_provisioning_info,
next(n for n in self.provisioning_nodes
if n['uid'] == node.uid)
)

View File

@ -52,7 +52,7 @@ def setup_module():
def prepare():
meta = base.reflect_db_metadata()
db.execute(
result = db.execute(
meta.tables['releases'].insert(),
[{
'name': 'test_name',
@ -68,6 +68,23 @@ def prepare():
'volumes_metadata': jsonutils.dumps({})
}])
release_id = result.inserted_primary_key[0]
db.execute(
meta.tables['clusters'].insert(),
[{
'name': 'test_cluster',
'release_id': release_id,
'mode': 'ha_compact',
'status': 'new',
'net_provider': 'neutron',
'grouping': 'roles',
'fuel_version': '10.0',
'deployment_tasks': '[]',
'replaced_deployment_info': '[]'
}]
)
db.execute(
meta.tables['nodes'].insert(),
[{
@ -227,3 +244,12 @@ class TestDeploymentHistoryMigration(base.BaseAlembicMigrationTest):
result = db.execute(sa.select([
self.meta.tables['deployment_history']])).first()
self.assertIn('summary', result)
class TestClusterAttributesMigration(base.BaseAlembicMigrationTest):
def test_deployment_info_migration(self):
clusters_table = self.meta.tables['clusters']
deployment_info = db.execute(
sa.select([clusters_table.c.replaced_deployment_info])
).fetchone()[0]
self.assertEqual('{}', deployment_info)

View File

@ -169,10 +169,12 @@ class TestTransactionHandlers(BaseTestCase):
def test_get_transaction_deployment_info(self):
cluster = self.cluster_db
nodes = objects.Cluster.get_nodes_not_for_deletion(cluster)
deployment_node_info = deployment_serializers.serialize_for_lcm(
deployment_info = deployment_serializers.serialize_for_lcm(
cluster, nodes
)
deployment_info = {node['uid']: node for node in deployment_node_info}
deployment_info['nodes'] = {
n['uid']: n for n in deployment_info['nodes']
}
transaction = objects.Transaction.create({
'cluster_id': cluster.id,
'status': consts.TASK_STATUSES.ready,

View File

@ -69,7 +69,8 @@ class TestTransactionObject(BaseTestCase):
objects.Transaction.get_deployment_info(transaction),
{}
)
info = {'test': {'test': 'test'}}
info = {'common': {'a': 'b'},
'nodes': {'7': {'test': {'test': 'test'}}}}
objects.Transaction.attach_deployment_info(transaction, info)
self.assertEqual(
info, objects.Transaction.get_deployment_info(transaction)
@ -165,7 +166,7 @@ class TestTransactionObject(BaseTestCase):
# filter out node 2
transactions = get_succeed(self.cluster.id,
['dns-client', 'test'], [uid1]).all()
['dns-client', 'test'], {uid1: {}}).all()
self.assertEqual(
transactions,
[(task3, uid1, 'dns-client'),

View File

@ -337,3 +337,72 @@ class TestGetNodesToRun(BaseUnitTest):
manager._get_nodes_to_run(cluster, node_filter, node_ids)
self.assertEqual(0, nodes_obj_mock.filter_by_list.call_count)
self.assertEqual(0, yaql_mock.create_context.call_count)
class TestGetCurrentState(BaseUnitTest):
def setUp(self):
super(TestGetCurrentState, self).setUp()
self.cluster = mock.MagicMock()
self.nodes = [
mock.MagicMock(uid='1', pending_addition=False, status='ready'),
mock.MagicMock(uid='2', pending_addition=False, status='ready')
]
self.tasks = [
{'id': 'task1', 'type': consts.ORCHESTRATOR_TASK_TYPES.puppet},
{'id': 'task2', 'type': consts.ORCHESTRATOR_TASK_TYPES.shell},
{'id': 'task3', 'type': consts.ORCHESTRATOR_TASK_TYPES.group}
]
def test_get_current_state_with_force(self):
current_state = manager._get_current_state(
self.cluster, self.nodes, self.tasks, force=True
)
self.assertEqual({}, current_state)
@mock.patch('nailgun.transactions.manager.objects')
def test_get_current_state_if_there_is_no_deployment(self, objects_mock):
txs_mock = objects_mock.TransactionCollection
txs_mock.get_successful_transactions_per_task.return_value = []
nodes = {'1': self.nodes[0], '2': self.nodes[1], 'master': None}
current_state = manager._get_current_state(
self.cluster, self.nodes, self.tasks
)
self.assertEqual({}, current_state)
txs_mock.get_successful_transactions_per_task.assert_called_once_with(
self.cluster.id, ['task1', 'task2'], nodes
)
@mock.patch('nailgun.transactions.manager.objects')
def test_assemble_current_state(self, objects_mock):
txs_mock = objects_mock.TransactionCollection
transactions = [
(1, '1', 'task1'), (2, '1', 'task2'), (2, '2', 'task2')
]
txs_mock.get_successful_transactions_per_task.return_value = \
transactions
objects_mock.Transaction.get_deployment_info.side_effect = [
{'common': {'key1': 'value1'},
'nodes': {'1': {'key11': 'value11'}}},
{'common': {'key2': 'value2'},
'nodes': {'1': {'key21': 'value21'}, '2': {'key22': 'value22'}}},
]
current_state = manager._get_current_state(
self.cluster, self.nodes, self.tasks
)
expected_state = {
'task1': {
'common': {'key1': 'value1'},
'nodes': {'1': {'key11': 'value11'}}
},
'task2': {
'common': {'key2': 'value2'},
'nodes': {
'1': {'key21': 'value21'},
'2': {'key22': 'value22'}
},
}
}
self.assertEqual(expected_state, current_state)

View File

@ -508,28 +508,34 @@ def _get_current_state(cluster, nodes, tasks, force=False):
state = {}
for tx, data in itertools.groupby(txs, lambda x: x[0]):
node_ids = []
common_attrs = {}
deferred_state = {}
for _, node_id, task_name in data:
t_state = state.setdefault(task_name, {})
t_state = state.setdefault(task_name, {
'nodes': {}, 'common': common_attrs
})
if _is_node_for_redeploy(nodes.get(node_id)):
t_state[node_id] = {}
t_state['nodes'][node_id] = {}
else:
t_state[node_id] = deferred_state.setdefault(node_id, {})
t_state['nodes'][node_id] = deferred_state.setdefault(
node_id, {}
)
node_ids.append(node_id)
dict_update(
deferred_state,
objects.Transaction.get_deployment_info(tx, node_uids=node_ids),
level=2
)
deployment_info = objects.Transaction.get_deployment_info(
tx, node_uids=node_ids)
common_attrs.update(deployment_info['common'])
dict_update(deferred_state, deployment_info['nodes'], level=2)
return state
def _get_expected_state(cluster, nodes):
info = deployment_serializers.serialize_for_lcm(cluster, nodes)
info = {n['uid']: n for n in info}
info['nodes'] = {n['uid']: n for n in info['nodes']}
# Added cluster state
info[None] = {}
info['nodes'][None] = {}
return info