dhcp functionality with network verification

This commit is contained in:
Dmitry Shulyak 2013-09-17 17:13:55 +03:00 committed by default
parent 00e6775b72
commit e7a55c9c97
15 changed files with 458 additions and 109 deletions

View File

@ -627,6 +627,8 @@ class Task(Base):
# network
'check_networks',
'verify_networks',
'check_dhcp',
'verify_network_connectivity',
# plugin
'install_plugin',

View File

@ -14,10 +14,13 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
import json
import netifaces
import traceback
from sqlalchemy import or_
from nailgun.api.models import IPAddr
@ -565,6 +568,57 @@ class NailgunReceiver(object):
TaskHelper.update_task_status(task_uuid, status,
progress, error_msg, result)
@classmethod
def _master_networks_gen(cls, ifaces):
for iface in ifaces:
iface_data = netifaces.ifaddresses(iface)
if netifaces.AF_LINK in iface_data:
yield netifaces.ifaddresses(iface)[netifaces.AF_LINK]
@classmethod
def _get_master_macs(cls):
return itertools.chain(*cls._master_networks_gen(
netifaces.interfaces()))
@classmethod
def check_dhcp_resp(cls, **kwargs):
"""Receiver method for check_dhcp task
For example of kwargs check FakeCheckingDhcpThread
"""
logger.info(
"RPC method check_dhcp_resp received: %s" %
json.dumps(kwargs)
)
messages = []
result = collections.defaultdict(list)
message_template = ("Dhcp server on {server_id} - {mac}."
"Discovered from node {yiaddr} on {iface}.")
task_uuid = kwargs.get('task_uuid')
nodes = kwargs.get('nodes')
error_msg = kwargs.get('error')
status = kwargs.get('status')
progress = kwargs.get('progress')
macs = [item['addr'] for item in cls._get_master_macs()]
logger.debug('Mac addr on master node %s', macs)
if nodes:
for node in nodes:
if node['status'] == 'ready':
for row in node.get('data', []):
if row['mac'] not in macs:
messages.append(message_template.format(**row))
result[node['uid']].append(row)
elif node['status'] == 'error':
messages.append(node.get('error_msg',
('Dhcp check method failed.'
' Check logs for details.')))
status = status if not messages else "error"
error_msg = '\n'.join(messages) if messages else error_msg
TaskHelper.update_task_status(task_uuid, status,
progress, error_msg, result)
# Red Hat related callbacks
@classmethod

View File

@ -339,6 +339,41 @@ class FakeVerificationThread(FakeThread):
self.sleep(tick_interval)
class FakeCheckingDhcpThread(FakeAmpqThread):
"""Thread to be used with test_task_managers.py
"""
NODES = [{'uid': '90',
'status': 'ready',
'data': [{'mac': 'ee:ae:c5:e0:f5:17',
'server_id': '10.20.0.157',
'yiaddr': '10.20.0.133',
'iface': 'eth0'}]},
{'uid': '91',
'status': 'ready',
'data': [{'mac': 'bc:ae:c5:e0:f5:85',
'server_id': '10.20.0.20',
'yiaddr': '10.20.0.131',
'iface': 'eth0'}]}]
@property
def _message(self):
"""Example of message with discovered dhcp server
"""
return {'task_uuid': self.task_uuid,
'error': '',
'status': 'ready',
'progress': 100,
'nodes': self.NODES}
def message_gen(self):
self.sleep(self.tick_interval)
if self.params.get("dhcp_error"):
return self.error_message_gen()
else:
return (self._message,)
class FakeRedHatCredentials(FakeAmpqThread):
def message_gen(self):
self.sleep(self.tick_interval)
@ -444,6 +479,7 @@ FAKE_THREADS = {
'deploy': FakeDeploymentThread,
'remove_nodes': FakeDeletionThread,
'verify_networks': FakeVerificationThread,
'check_dhcp': FakeCheckingDhcpThread,
'download_release': DownloadReleaseThread,
'check_redhat_credentials': FakeRedHatCredentials,
'check_redhat_licenses': FakeRedHatLicenses,

View File

@ -111,8 +111,9 @@ class TaskHelper(object):
os.system("/usr/bin/pkill -HUP rsyslog")
@classmethod
def update_task_status(cls, uuid, status, progress, msg="",
result=None):
def update_task_status(cls, uuid, status, progress, msg="", result=None):
# verify_networks - task is expecting to receive result with
# some data if connectivity_verification fails
logger.debug("Updating task: %s", uuid)
task = db().query(Task).filter_by(uuid=uuid).first()
if not task:
@ -142,7 +143,7 @@ class TaskHelper(object):
task.cluster_id, status)
cls.update_cluster_status(uuid)
if task.parent:
logger.debug("Updating parent task: %s", task.parent.uuid)
logger.debug("Updating parent task: %s.", task.parent.uuid)
cls.update_parent_task(task.parent.uuid)
@classmethod
@ -153,7 +154,7 @@ class TaskHelper(object):
if all(map(lambda s: s.status == 'ready', subtasks)):
task.status = 'ready'
task.progress = 100
task.message = '; '.join(map(
task.message = '\n'.join(map(
lambda s: s.message, filter(
lambda s: s.message is not None, subtasks)))
db().add(task)
@ -162,7 +163,7 @@ class TaskHelper(object):
elif all(map(lambda s: s.status in ('ready', 'error'), subtasks)):
task.status = 'error'
task.progress = 100
task.message = '; '.join(list(set(map(
task.message = '\n'.join(list(set(map(
lambda s: (s.message or ""), filter(
lambda s: (
s.status == 'error' and not

View File

@ -331,8 +331,14 @@ class VerifyNetworksTaskManager(TaskManager):
name="check_networks",
cluster=self.cluster
)
if not task.cluster.nodes:
task.status = 'error'
task.message = ('There should be atleast 1 node for dhcp check.'
'And 2 nodes for connectivity check')
db().add(task)
db().commit()
self._call_silently(
task,
tasks.CheckNetworksTask,
@ -343,14 +349,23 @@ class VerifyNetworksTaskManager(TaskManager):
# this one is connected with UI issues - we need to
# separate if error happened inside nailgun or somewhere
# in the orchestrator, and UI does it by task name.
task.name = "verify_networks"
db().add(task)
dhcp_subtask = Task(
name='check_dhcp',
cluster=self.cluster,
parent_id=task.id)
db().add(dhcp_subtask)
db().commit()
db().refresh(task)
task.name = 'verify_networks'
self._call_silently(
task,
tasks.VerifyNetworksTask,
vlan_ids
)
return task

View File

@ -444,7 +444,13 @@ class ClusterDeletionTask(object):
class VerifyNetworksTask(object):
@classmethod
def execute(self, task, data):
def _subtask_message(cls, task):
for subtask in task.subtasks:
yield subtask.name, {'respond_to': '{0}_resp'.format(subtask.name),
'task_uuid': subtask.uuid}
@classmethod
def _message(cls, task, data):
nodes = []
for n in task.cluster.nodes:
node_json = {'uid': n.id, 'networks': []}
@ -466,12 +472,18 @@ class VerifyNetworksTask(object):
{'iface': nic.name, 'vlans': vlans}
)
nodes.append(node_json)
return {
'method': task.name,
'respond_to': '{0}_resp'.format(task.name),
'args': {'task_uuid': task.uuid,
'nodes': nodes},
'subtasks': dict(cls._subtask_message(task))}
message = {'method': 'verify_networks',
'respond_to': 'verify_networks_resp',
'args': {'task_uuid': task.uuid,
'nodes': nodes}}
logger.debug("Network verification is called with: %s", message)
@classmethod
def execute(cls, task, data):
message = cls._message(task, data)
logger.debug("%s method is called with: %s",
task.name, message)
task.cache = message
db().add(task)

View File

@ -15,6 +15,7 @@
# under the License.
import json
from mock import patch
import uuid
from nailgun.api.models import Attributes
@ -342,6 +343,196 @@ class TestVerifyNetworks(BaseIntegrationTest):
self.assertEqual(task.result, error_nodes)
class TestDhcpCheckTask(BaseIntegrationTest):
def setUp(self):
super(TestDhcpCheckTask, self).setUp()
self.receiver = rcvr.NailgunReceiver()
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"api": False},
{"api": False}
]
)
cluster_db = self.env.clusters[0]
self.node1, self.node2 = self.env.nodes
self.task = Task(
name="check_dhcp",
cluster_id=cluster_db.id
)
self.db.add(self.task)
self.db.commit()
def test_check_dhcp_resp_master_mac(self):
kwargs = {
'task_uuid': self.task.uuid,
'status': 'ready',
'nodes': [{'uid': '90',
'status': 'ready',
'data': [{'mac': 'bc:ae:c5:e0:f5:85',
'server_id': '10.20.0.157',
'yiaddr': '10.20.0.133',
'iface': 'eth0'}]},
{'uid': '91',
'status': 'ready',
'data': [{'mac': 'bc:ae:c5:e0:f5:85',
'server_id': '10.20.0.20',
'yiaddr': '10.20.0.131',
'iface': 'eth0'}]}]
}
with patch('nailgun.rpc.receiver.NailgunReceiver._get_master_macs') \
as master_macs:
master_macs.return_value = [{'addr': 'bc:ae:c5:e0:f5:85'}]
self.receiver.check_dhcp_resp(**kwargs)
self.db.refresh(self.task)
self.assertEqual(self.task.status, "ready")
self.assertEqual(self.task.result, {})
def test_check_dhcp_resp_roque_dhcp_mac(self):
kwargs = {
'task_uuid': self.task.uuid,
'status': 'ready',
'nodes': [{'uid': '90',
'status': 'ready',
'data': [{'mac': 'ee:ae:c5:e0:f5:17',
'server_id': '10.20.0.157',
'yiaddr': '10.20.0.133',
'iface': 'eth0'}]},
{'uid': '91',
'status': 'ready',
'data': [{'mac': 'bc:ae:c5:e0:f5:85',
'server_id': '10.20.0.20',
'yiaddr': '10.20.0.131',
'iface': 'eth0'}]}]
}
with patch.object(self.receiver, '_get_master_macs') as master_macs:
master_macs.return_value = [{'addr': 'bc:ae:c5:e0:f5:85'}]
self.receiver.check_dhcp_resp(**kwargs)
self.db.refresh(self.task)
self.assertEqual(self.task.status, "error")
def test_check_dhcp_resp_empty_nodes(self):
kwargs = {
'task_uuid': self.task.uuid,
'status': 'ready'
}
with patch.object(self.receiver, '_get_master_macs') as master_macs:
master_macs.return_value = [{'addr': 'bc:ae:c5:e0:f5:85'}]
self.receiver.check_dhcp_resp(**kwargs)
self.db.refresh(self.task)
self.assertEqual(self.task.status, "ready")
self.assertEqual(self.task.result, {})
def test_check_dhcp_resp_empty_nodes_erred(self):
kwargs = {
'task_uuid': self.task.uuid,
'status': 'error'
}
with patch.object(self.receiver, '_get_master_macs'):
self.receiver.check_dhcp_resp(**kwargs)
self.db.refresh(self.task)
self.assertEqual(self.task.status, 'error')
self.assertEqual(self.task.result, {})
class TestDhcpCheckTask(BaseIntegrationTest):
def setUp(self):
super(TestDhcpCheckTask, self).setUp()
self.receiver = rcvr.NailgunReceiver()
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"api": False},
{"api": False}
]
)
cluster_db = self.env.clusters[0]
self.node1, self.node2 = self.env.nodes
self.task = Task(
name="check_dhcp",
cluster_id=cluster_db.id
)
self.db.add(self.task)
self.db.commit()
def test_check_dhcp_resp_master_mac(self):
kwargs = {
'task_uuid': self.task.uuid,
'status': 'ready',
'nodes': [{'uid': '90',
'status': 'ready',
'data': [{'mac': 'bc:ae:c5:e0:f5:85',
'server_id': '10.20.0.157',
'yiaddr': '10.20.0.133',
'iface': 'eth0'}]},
{'uid': '91',
'status': 'ready',
'data': [{'mac': 'bc:ae:c5:e0:f5:85',
'server_id': '10.20.0.20',
'yiaddr': '10.20.0.131',
'iface': 'eth0'}]}]
}
with patch('nailgun.rpc.receiver.NailgunReceiver._get_master_macs') \
as master_macs:
master_macs.return_value = [{'addr': 'bc:ae:c5:e0:f5:85'}]
self.receiver.check_dhcp_resp(**kwargs)
self.db.refresh(self.task)
self.assertEqual(self.task.status, "ready")
self.assertEqual(self.task.result, {})
def test_check_dhcp_resp_roque_dhcp_mac(self):
kwargs = {
'task_uuid': self.task.uuid,
'status': 'ready',
'nodes': [{'uid': '90',
'status': 'ready',
'data': [{'mac': 'ee:ae:c5:e0:f5:17',
'server_id': '10.20.0.157',
'yiaddr': '10.20.0.133',
'iface': 'eth0'}]},
{'uid': '91',
'status': 'ready',
'data': [{'mac': 'bc:ae:c5:e0:f5:85',
'server_id': '10.20.0.20',
'yiaddr': '10.20.0.131',
'iface': 'eth0'}]}]
}
with patch.object(self.receiver, '_get_master_macs') as master_macs:
master_macs.return_value = [{'addr': 'bc:ae:c5:e0:f5:85'}]
self.receiver.check_dhcp_resp(**kwargs)
self.db.refresh(self.task)
self.assertEqual(self.task.status, "error")
def test_check_dhcp_resp_empty_nodes(self):
kwargs = {
'task_uuid': self.task.uuid,
'status': 'ready'
}
with patch.object(self.receiver, '_get_master_macs') as master_macs:
master_macs.return_value = [{'addr': 'bc:ae:c5:e0:f5:85'}]
self.receiver.check_dhcp_resp(**kwargs)
self.db.refresh(self.task)
self.assertEqual(self.task.status, "ready")
self.assertEqual(self.task.result, {})
def test_check_dhcp_resp_empty_nodes_erred(self):
kwargs = {
'task_uuid': self.task.uuid,
'status': 'error'
}
with patch.object(self.receiver, '_get_master_macs'):
self.receiver.check_dhcp_resp(**kwargs)
self.db.refresh(self.task)
self.assertEqual(self.task.status, 'error')
self.assertEqual(self.task.result, {})
class TestConsumer(BaseIntegrationTest):
def setUp(self):

View File

@ -193,86 +193,6 @@ class TestTaskManagers(BaseIntegrationTest):
supertask = self.env.launch_deployment()
self.env.wait_error(supertask, 60)
@fake_tasks()
def test_network_verify_task_managers(self):
meta1 = self.env.generate_interfaces_in_meta(1)
mac1 = meta1['interfaces'][0]['mac']
meta2 = self.env.generate_interfaces_in_meta(1)
mac2 = meta2['interfaces'][0]['mac']
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"api": True, "meta": meta1, "mac": mac1},
{"api": True, "meta": meta2, "mac": mac2},
]
)
task = self.env.launch_verify_networks()
self.env.wait_ready(task, 30)
@fake_tasks()
def test_network_verify_compares_received_with_cached(self):
meta1 = self.env.generate_interfaces_in_meta(1)
mac1 = meta1['interfaces'][0]['mac']
meta2 = self.env.generate_interfaces_in_meta(1)
mac2 = meta2['interfaces'][0]['mac']
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"api": True, "meta": meta1, "mac": mac1},
{"api": True, "meta": meta2, "mac": mac2},
]
)
resp = self.app.get(
reverse(
'NetworkConfigurationHandler',
kwargs={'cluster_id': self.env.clusters[0].id}
),
headers=self.default_headers
)
self.assertEquals(200, resp.status)
nets = json.loads(resp.body)
nets['networks'][-1]["vlan_start"] = 500
task = self.env.launch_verify_networks(nets)
self.env.wait_ready(task, 30)
@fake_tasks(fake_rpc=False)
def test_network_verify_fails_if_admin_intersection(self, mocked_rpc):
meta1 = self.env.generate_interfaces_in_meta(1)
mac1 = meta1['interfaces'][0]['mac']
meta2 = self.env.generate_interfaces_in_meta(1)
mac2 = meta2['interfaces'][0]['mac']
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"api": True, "meta": meta1, "mac": mac1},
{"api": True, "meta": meta2, "mac": mac2},
]
)
resp = self.app.get(
reverse(
'NetworkConfigurationHandler',
kwargs={'cluster_id': self.env.clusters[0].id}
),
headers=self.default_headers
)
self.assertEquals(200, resp.status)
nets = json.loads(resp.body)
nets['networks'][-1]['cidr'] = settings.ADMIN_NETWORK['cidr']
task = self.env.launch_verify_networks(nets)
self.env.wait_error(task, 30)
self.assertIn(
task.message,
"Intersection with admin "
"network(s) '{0}' found".format(
settings.ADMIN_NETWORK['cidr']
)
)
self.assertEquals(mocked_rpc.called, False)
def test_deletion_empty_cluster_task_manager(self):
cluster = self.env.create_cluster(api=True)
resp = self.app.delete(

View File

@ -0,0 +1,104 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from mock import patch
from nailgun.settings import settings
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
from nailgun.test.base import reverse
@patch('nailgun.rpc.receiver.NailgunReceiver._get_master_macs')
class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
def setUp(self):
self.master_macs = [{'addr': 'bc:ae:c5:e0:f5:85'},
{'addr': 'ee:ae:c5:e0:f5:17'}]
self.not_master_macs = [{'addr': 'ee:ae:ee:e0:f5:85'}]
super(TestVerifyNetworkTaskManagers, self).setUp()
meta1 = self.env.generate_interfaces_in_meta(1)
mac1 = meta1['interfaces'][0]['mac']
meta2 = self.env.generate_interfaces_in_meta(1)
mac2 = meta2['interfaces'][0]['mac']
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"api": True, "meta": meta1, "mac": mac1},
{"api": True, "meta": meta2, "mac": mac2},
]
)
def tearDown(self):
self._wait_for_threads()
super(TestVerifyNetworkTaskManagers, self).tearDown()
@fake_tasks()
def test_network_verify_task_managers_dhcp_on_master(self, macs_mock):
macs_mock.return_value = self.master_macs
task = self.env.launch_verify_networks()
self.env.wait_ready(task, 30)
@fake_tasks()
def test_network_verify_compares_received_with_cached(self, macs_mock):
macs_mock.return_value = self.master_macs
resp = self.app.get(
reverse(
'NetworkConfigurationHandler',
kwargs={'cluster_id': self.env.clusters[0].id}
),
headers=self.default_headers
)
self.assertEquals(200, resp.status)
nets = json.loads(resp.body)
nets['networks'][-1]["vlan_start"] = 500
task = self.env.launch_verify_networks(nets)
self.env.wait_ready(task, 30)
@fake_tasks(fake_rpc=False)
def test_network_verify_fails_if_admin_intersection(self,
mocked_rpc, macs_mock):
macs_mock.return_value = self.master_macs
resp = self.app.get(
reverse(
'NetworkConfigurationHandler',
kwargs={'cluster_id': self.env.clusters[0].id}
),
headers=self.default_headers
)
self.assertEquals(200, resp.status)
nets = json.loads(resp.body)
nets['networks'][-1]['cidr'] = settings.ADMIN_NETWORK['cidr']
task = self.env.launch_verify_networks(nets)
self.env.wait_error(task, 30)
self.assertIn(
task.message,
"Intersection with admin "
"network(s) '{0}' found".format(
settings.ADMIN_NETWORK['cidr']
)
)
self.assertEquals(mocked_rpc.called, False)

View File

@ -40,6 +40,8 @@ requires = [
'wsgiref==0.1.2',
'fysom==1.0.11',
'jsonschema==2.0.0',
'netifaces==0.8',
'psycopg2==2.4.6'
]
major_version = '0.1'

View File

@ -30,10 +30,11 @@
</div>
<div class="verification-text-placeholder">
<li><strong>Network Verification is done in 3 steps:</strong></li>
<li><strong>Network Verification is done in 4 steps:</strong></li>
<li>1. Every node starts listening for test frames</li>
<li>2. Every node sends out 802.1Q tagged UDP frames</li>
<li>3. Nodes listeners register test frames from other nodes</li>
<li>4. Send DHCP discover messages on all available ports.</li>
</div>
<% if (cluster.task('verify_networks', 'ready')) { %>

View File

@ -333,17 +333,9 @@ casper.then(function() {
casper.then(function() {
this.test.comment('Testing cluster networks: verification');
this.click('.verify-networks-btn:not(:disabled)');
this.test.assertSelectorAppears('.connect-3-success', 'Verification result is rendered', 10000);
this.then(function() {
this.test.assertExists('input:disabled', 'Form fields are disabled while verification');
this.test.assertExists('.connect-3-success', 'Verification is in progress');
this.test.info('Waiting for verification readiness...');
});
this.test.assertSelectorDisappears('.connect-3-success', 'Verification result is rendered', 10000);
this.then(function() {
this.test.assertExists('.connect-3-error', 'Verification was failed without nodes in cluster');
this.test.assertExists('input:not(:disabled)', 'Form fields are enabled again after verification');
});
this.test.assertSelectorAppears('.connect-3-error',
'There should be atleast 1 node for dhcp check. And 2 nodes for connectivity check', 10000);
});
casper.then(function() {

View File

@ -25,6 +25,7 @@ module Naily
autoload 'Producer', 'naily/producer'
autoload 'Dispatcher', 'naily/dispatcher'
autoload 'Reporter', 'naily/reporter'
autoload 'SubtaskReporter', 'naily/reporter'
def self.logger
@logger

View File

@ -96,9 +96,8 @@ module Naily
end
def verify_networks(data)
reporter = Naily::Reporter.new(@producer, data['respond_to'], data['args']['task_uuid'])
args = data['args']
result = @orchestrator.verify_networks(reporter, data['args']['task_uuid'], args['nodes'])
reporter = Naily::SubtaskReporter.new(@producer, data['respond_to'], data['args']['task_uuid'], data['subtasks'])
result = @orchestrator.verify_networks(reporter, data['args']['task_uuid'], data['args']['nodes'])
report_result(result, reporter)
end
@ -126,7 +125,7 @@ module Naily
end
private
def report_result(result, reporter)
result = {} unless result.instance_of?(Hash)
status = {'status' => 'ready', 'progress' => 100}.merge(result)

View File

@ -27,4 +27,23 @@ module Naily
@producer.publish(message)
end
end
class SubtaskReporter < Reporter
def initialize(producer, method, task_uuid, subtasks)
super(producer, method, task_uuid)
@subtasks = subtasks
end
def report_to_subtask(subtask_name, msg)
if @subtasks[subtask_name] and @subtasks[subtask_name].any?
subtask_msg = {'task_uuid'=>@subtasks[subtask_name]['task_uuid']}.merge(msg)
message = {'method' => @subtasks[subtask_name]['respond_to'],
'args' => subtask_msg}
Naily.logger.info "Casting message to fuel: #{message.inspect}"
@producer.publish(message)
else
Naily.logger.info "No subtask #{subtask_name} for : #{@method}"
end
end
end
end