Serialize default_gateway/other_nets for 6.0 only

Both default_gateway and other_nets fields have been introduced in Fuel 6.0,
but accidently were serialized for old envs too. This led to puppet
errors when we tried to re-deploy node in one of the old envs.

Change-Id: I7e4ba8f3a6a0ba822c47b9c14fe4d02b3523d658
Closes-Bug: #1403560
This commit is contained in:
Igor Kalnitsky 2014-12-17 19:43:16 +02:00 committed by Igor Kalnitsky
parent dc621c96e3
commit 5f91157daa
2 changed files with 489 additions and 20 deletions

View File

@ -535,11 +535,6 @@ class NeutronNetworkDeploymentSerializer(NetworkDeploymentSerializer):
if objects.Node.should_have_public(node):
netgroup_mapping.append(('public', 'br-ex'))
# Include information about all subnets that don't belong to this node.
# This is used during deployment to configure routes to all other
# networks in the environment.
other_nets = nm.get_networks_not_on_node(node)
netgroups = {}
for ngname, brname in netgroup_mapping:
# Here we get a dict with network description for this particular
@ -547,25 +542,13 @@ class NeutronNetworkDeploymentSerializer(NetworkDeploymentSerializer):
netgroup = nm.get_node_network_by_netname(node, ngname)
if netgroup.get('ip'):
attrs['endpoints'][brname]['IP'] = [netgroup['ip']]
if netgroup.get('gateway'):
attrs['endpoints'][brname]['gateway'] = netgroup['gateway']
attrs['endpoints'][brname]['other_nets'] = \
other_nets.get(ngname, [])
netgroups[ngname] = netgroup
if objects.Node.should_have_public(node):
attrs['endpoints']['br-ex']['gateway'] = \
netgroups['public']['gateway']
else:
gw = nm.get_default_gateway(node.id)
attrs['endpoints']['br-fw-admin']['gateway'] = gw
for brname in brnames:
if attrs['endpoints'][brname].get('gateway'):
attrs['endpoints'][brname]['default_gateway'] = True
break
attrs['endpoints']['br-fw-admin']['gateway'] = settings.MASTER_IP
# Connect interface bridges to network bridges.
for ngname, brname in netgroup_mapping:
@ -773,6 +756,7 @@ class NeutronNetworkDeploymentSerializer51(NeutronNetworkDeploymentSerializer):
class NeutronNetworkDeploymentSerializer60(
NeutronNetworkDeploymentSerializer51
):
@classmethod
def generate_network_scheme(cls, node):
attrs = super(NeutronNetworkDeploymentSerializer60, cls). \
@ -782,6 +766,36 @@ class NeutronNetworkDeploymentSerializer60(
if 'tags' in item:
item['vlan_ids'] = item['tags']
# Include information about all subnets that don't belong to this node.
# This is used during deployment to configure routes to all other
# networks in the environment.
nm = objects.Node.get_network_manager(node)
other_nets = nm.get_networks_not_on_node(node)
netgroup_mapping = [
('storage', 'br-storage'),
('management', 'br-mgmt'),
('fuelweb_admin', 'br-fw-admin'),
]
if objects.Node.should_have_public(node):
netgroup_mapping.append(('public', 'br-ex'))
for ngname, brname in netgroup_mapping:
netgroup = nm.get_node_network_by_netname(node, ngname)
if netgroup.get('gateway'):
attrs['endpoints'][brname]['gateway'] = netgroup['gateway']
attrs['endpoints'][brname]['other_nets'] = \
other_nets.get(ngname, [])
if not objects.Node.should_have_public(node):
gw = nm.get_default_gateway(node.id)
attrs['endpoints']['br-fw-admin']['gateway'] = gw
for brname in attrs['endpoints'].keys():
if attrs['endpoints'][brname].get('gateway'):
attrs['endpoints'][brname]['default_gateway'] = True
break
return attrs

View File

@ -383,10 +383,465 @@ class TestHandlers(BaseIntegrationTest):
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')
def test_neutron_deploy_cast_with_right_args(self, mocked_rpc):
def test_neutron_deploy_cast_with_right_args_for_5_1_1(self, mocked_rpc):
self.env.create(
release_kwargs={
'version': "2014.1.1-5.1"
'version': "2014.1.3-5.1.1"
},
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': 'gre'
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller', 'cinder'], 'pending_addition': True},
{'roles': ['compute', 'cinder'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True},
{'roles': ['cinder'], 'pending_addition': True}
]
)
cluster_db = self.env.clusters[0]
attrs = cluster_db.attributes.editable
attrs['public_network_assignment']['assign_to_all_nodes']['value'] = \
True
attrs['provision']['method'] = consts.PROVISION_METHODS.image
resp = self.app.patch(
reverse(
'ClusterAttributesHandler',
kwargs={'cluster_id': cluster_db.id}),
params=jsonutils.dumps({'editable': attrs}),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
common_attrs = {
'deployment_mode': 'ha_compact',
'management_vip': '192.168.0.1',
'public_vip': '172.16.0.2',
'management_network_range': '192.168.0.0/24',
'storage_network_range': '192.168.1.0/24',
'mp': [{'weight': '1', 'point': '1'},
{'weight': '2', 'point': '2'}],
'quantum': True,
'quantum_settings': {},
'master_ip': '127.0.0.1',
'use_cinder': True,
'deployment_id': cluster_db.id,
'openstack_version_prev': None,
'openstack_version': cluster_db.release.version,
'fuel_version': cluster_db.fuel_version
}
common_attrs.update(
objects.Release.get_orchestrator_data_dict(cluster_db.release)
)
cluster_attrs = objects.Attributes.merged_attrs_values(
cluster_db.attributes
)
common_attrs.update(cluster_attrs)
L2 = {
"base_mac": "fa:16:3e:00:00:00",
"segmentation_type": "gre",
"phys_nets": {},
"tunnel_id_ranges": "2:65535"
}
L3 = {
"use_namespaces": True
}
predefined_networks = {
"net04_ext": {
'shared': False,
'L2': {
'router_ext': True,
'network_type': 'local',
'physnet': None,
'segment_id': None},
'L3': {
'subnet': u'172.16.0.0/24',
'enable_dhcp': False,
'nameservers': [],
'floating': '172.16.0.130:172.16.0.254',
'gateway': '172.16.0.1'},
'tenant': 'admin'
},
"net04": {
'shared': False,
'L2': {
'router_ext': False,
'network_type': 'gre',
'physnet': None,
'segment_id': None},
'L3': {
'subnet': u'192.168.111.0/24',
'enable_dhcp': True,
'nameservers': [
'8.8.4.4',
'8.8.8.8'],
'floating': None,
'gateway': '192.168.111.1'},
'tenant': 'admin'
}
}
common_attrs['quantum_settings'].update(
L2=L2,
L3=L3,
predefined_networks=predefined_networks)
# Common attrs calculation
nodes_list = []
nodes_db = sorted(cluster_db.nodes, key=lambda n: n.id)
assigned_ips = {}
i = 0
admin_ips = [
'10.20.0.134/24',
'10.20.0.133/24',
'10.20.0.132/24',
'10.20.0.131/24',
'10.20.0.130/24',
'10.20.0.129/24']
for node in nodes_db:
node_id = node.id
admin_ip = admin_ips.pop()
for role in sorted(node.roles + node.pending_roles):
assigned_ips[node_id] = {}
assigned_ips[node_id]['management'] = '192.168.0.%d' % (i + 2)
assigned_ips[node_id]['public'] = '172.16.0.%d' % (i + 3)
assigned_ips[node_id]['storage'] = '192.168.1.%d' % (i + 1)
assigned_ips[node_id]['admin'] = admin_ip
nodes_list.append({
'role': role,
'internal_address': '',
'public_address': '',
'storage_address': '',
'internal_netmask': '255.255.255.0',
'public_netmask': '255.255.255.0',
'storage_netmask': '255.255.255.0',
'uid': str(node_id),
'swift_zone': str(node_id),
'name': 'node-%d' % node_id,
'fqdn': 'node-%d.%s' % (node_id, settings.DNS_DOMAIN)})
i += 1
controller_nodes = filter(
lambda node: node['role'] == 'controller',
deepcopy(nodes_list))
common_attrs['nodes'] = nodes_list
common_attrs['nodes'][0]['role'] = 'primary-controller'
common_attrs['last_controller'] = controller_nodes[-1]['name']
common_attrs['storage']['pg_num'] = 128
common_attrs['test_vm_image'] = {
'container_format': 'bare',
'public': 'true',
'disk_format': 'qcow2',
'img_name': 'TestVM',
'img_path': '/opt/vm/cirros-x86_64-disk.img',
'os_name': 'cirros',
'min_ram': 64,
'glance_properties': (
"""--property murano_image_info="""
"""'{"title": "Murano Demo", "type": "cirros.demo"}'"""
),
}
# Individual attrs calculation and
# merging with common attrs
priority_mapping = {
'controller': [600, 600, 500],
'cinder': 700,
'compute': 700
}
critical_mapping = {
'primary-controller': True,
'controller': False,
'cinder': False,
'compute': False
}
deployment_info = []
for node in nodes_db:
ips = assigned_ips[node.id]
for role in sorted(node.roles):
priority = priority_mapping[role]
is_critical = critical_mapping[role]
if isinstance(priority, list):
priority = priority.pop()
individual_atts = {
'uid': str(node.id),
'status': node.status,
'role': role,
'online': node.online,
'fail_if_error': is_critical,
'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
'priority': priority,
'network_scheme': {
"version": "1.0",
"provider": "ovs",
"interfaces": {
"eth0": {
"L2": {"vlan_splinters": "off"},
"mtu": 1500
},
"eth1": {
"L2": {"vlan_splinters": "off"},
"mtu": 1500
},
"eth2": {
"L2": {"vlan_splinters": "off"},
"mtu": 1500
},
},
"endpoints": {
"br-mgmt": {"IP": [ips['management'] + "/24"]},
"br-ex": {
"IP": [ips['public'] + "/24"],
"gateway": "172.16.0.1",
},
"br-storage": {"IP": [ips['storage'] + "/24"]},
"br-fw-admin": {"IP": [ips['admin']]},
},
"roles": {
"management": "br-mgmt",
"mesh": "br-mgmt",
"ex": "br-ex",
"storage": "br-storage",
"fw-admin": "br-fw-admin",
},
"transformations": [
{
"action": "add-br",
"name": u"br-eth0"},
{
"action": "add-port",
"bridge": u"br-eth0",
"name": u"eth0"},
{
"action": "add-br",
"name": u"br-eth1"},
{
"action": "add-port",
"bridge": u"br-eth1",
"name": u"eth1"},
{
"action": "add-br",
"name": "br-ex"},
{
"action": "add-br",
"name": "br-mgmt"},
{
"action": "add-br",
"name": "br-storage"},
{
"action": "add-br",
"name": "br-fw-admin"},
{
"action": "add-patch",
"bridges": [u"br-eth0", "br-storage"],
"tags": [102, 0],
"vlan_ids": [102, 0]},
{
"action": "add-patch",
"bridges": [u"br-eth0", "br-mgmt"],
"tags": [101, 0],
"vlan_ids": [101, 0]},
{
"action": "add-patch",
"bridges": [u"br-eth1", "br-fw-admin"],
"trunks": [0]},
{
"action": "add-patch",
"bridges": [u"br-eth0", "br-ex"],
"trunks": [0]},
]
}
}
individual_atts.update(common_attrs)
individual_atts['glance']['image_cache_max_size'] = str(
manager.calc_glance_cache_size(node.attributes.volumes)
)
deployment_info.append(deepcopy(individual_atts))
controller_nodes = filter(
lambda node: node['role'] == 'controller',
deployment_info)
controller_nodes[0]['role'] = 'primary-controller'
controller_nodes[0]['fail_if_error'] = True
supertask = self.env.launch_deployment()
deploy_task_uuid = [x.uuid for x in supertask.subtasks
if x.name == 'deployment'][0]
deployment_msg = {
'api_version': '1',
'method': 'deploy',
'respond_to': 'deploy_resp',
'args': {}
}
deployment_msg['args']['task_uuid'] = deploy_task_uuid
deployment_msg['args']['deployment_info'] = deployment_info
deployment_msg['args']['pre_deployment'] = []
deployment_msg['args']['post_deployment'] = []
provision_nodes = []
admin_net = self.env.network_manager.get_admin_network_group()
for n in sorted(self.env.nodes, key=lambda n: n.id):
udev_interfaces_mapping = ','.join([
'{0}_{1}'.format(i.mac, i.name) for i in n.interfaces])
eth1_mac = [i.mac for i in n.interfaces if i.name == 'eth1'][0]
pnd = {
'profile': cluster_attrs['cobbler']['profile'],
'power_type': 'ssh',
'power_user': 'root',
'kernel_options': {
'netcfg/choose_interface': eth1_mac,
'udevrules': udev_interfaces_mapping},
'power_address': n.ip,
'power_pass': settings.PATH_TO_BOOTSTRAP_SSH_KEY,
'name': objects.Node.make_slave_name(n),
'hostname': n.fqdn,
'name_servers': '\"%s\"' % settings.DNS_SERVERS,
'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
'netboot_enabled': '1',
'ks_meta': {
'fuel_version': cluster_db.fuel_version,
'puppet_auto_setup': 1,
'puppet_master': settings.PUPPET_MASTER_HOST,
'puppet_enable': 0,
'mco_auto_setup': 1,
'install_log_2_syslog': 1,
'mco_pskey': settings.MCO_PSKEY,
'mco_vhost': settings.MCO_VHOST,
'mco_host': settings.MCO_HOST,
'mco_user': settings.MCO_USER,
'mco_password': settings.MCO_PASSWORD,
'mco_connector': settings.MCO_CONNECTOR,
'mco_enable': 1,
'pm_data': {
'ks_spaces': n.attributes.volumes,
'kernel_params': objects.Node.get_kernel_params(n),
},
'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
'authorized_keys':
["\"%s\"" % key for key in settings.AUTHORIZED_KEYS],
'timezone': settings.TIMEZONE,
'master_ip': settings.MASTER_IP,
'mlnx_vf_num': "16",
'mlnx_plugin_mode': "disabled",
'mlnx_iser_enabled': False,
'image_data': cluster_attrs['provision']['image_data'],
'gw':
self.env.network_manager.get_default_gateway(n.id),
'admin_net':
self.env.network_manager.get_admin_network_group(n.id).cidr
}
}
orchestrator_data = objects.Release.get_orchestrator_data_dict(
cluster_db.release)
if orchestrator_data:
pnd['ks_meta']['repo_metadata'] = \
orchestrator_data['repo_metadata']
vlan_splinters = cluster_attrs.get('vlan_splinters', None)
if vlan_splinters == 'kernel_lt':
pnd['ks_meta']['kernel_lt'] = 1
NetworkManager.assign_admin_ips([n])
admin_ip = self.env.network_manager.get_admin_ip_for_node(n.id)
for i in n.meta.get('interfaces', []):
if 'interfaces' not in pnd:
pnd['interfaces'] = {}
pnd['interfaces'][i['name']] = {
'mac_address': i['mac'],
'static': '0',
}
if 'interfaces_extra' not in pnd:
pnd['interfaces_extra'] = {}
pnd['interfaces_extra'][i['name']] = {
'peerdns': 'no',
'onboot': 'no'
}
if i['mac'] == n.mac:
pnd['interfaces'][i['name']]['dns_name'] = n.fqdn
pnd['interfaces_extra'][i['name']]['onboot'] = 'yes'
pnd['interfaces'][i['name']]['ip_address'] = admin_ip
pnd['interfaces'][i['name']]['netmask'] = str(
netaddr.IPNetwork(admin_net.cidr).netmask)
provision_nodes.append(pnd)
provision_task_uuid = filter(
lambda t: t.name == 'provision',
supertask.subtasks)[0].uuid
provision_msg = {
'api_version': '1',
'method': 'provision',
'respond_to': 'provision_resp',
'args': {
'task_uuid': provision_task_uuid,
'provisioning_info': {
'engine': {
'url': settings.COBBLER_URL,
'username': settings.COBBLER_USER,
'password': settings.COBBLER_PASSWORD,
'master_ip': settings.MASTER_IP,
'provision_method': consts.PROVISION_METHODS.image
},
'nodes': provision_nodes}}}
args, kwargs = nailgun.task.manager.rpc.cast.call_args
self.assertEqual(len(args), 2)
self.assertEqual(len(args[1]), 2)
self.datadiff(
args[1][0],
provision_msg,
ignore_keys=['internal_address',
'public_address',
'storage_address',
'ipaddr',
'IP'])
self.datadiff(
args[1][1],
deployment_msg,
ignore_keys=['internal_address',
'public_address',
'storage_address',
'ipaddr',
'IP'])
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')
def test_neutron_deploy_cast_with_right_args_for_6_0(self, mocked_rpc):
self.env.create(
release_kwargs={
'version': "2014.2-6.0"
},
cluster_kwargs={
'net_provider': 'neutron',