Update dnsmasq to setup dhcp when Admin networks are changed

When parameters of any Admin network are changed or Admin network is
deleted new UpdateDnsmasq task is called to setup dnsmasq with new
configuration for dhcp.

Co-Authored-By: Aleksei Kasatkin <akasatkin@mirantis.com>

Partial-Bug: #1495593

Change-Id: I532f21508c78bd142fbd0aa1754bcb7b617b121b
This commit is contained in:
=Nikita Koshikov 2015-09-15 14:14:04 -05:00 committed by Aleksey Kasatkin
parent 915a59d82c
commit 4978a95bb7
24 changed files with 688 additions and 157 deletions

View File

@ -45,6 +45,7 @@ from nailgun.db.sqlalchemy.models import Task
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.task.manager import CheckNetworksTaskManager
from nailgun.task.manager import UpdateDnsmasqTaskManager
from nailgun.task.manager import VerifyNetworksTaskManager
@ -66,10 +67,10 @@ class ProviderHandler(BaseHandler):
raise self.http(403, "Network configuration cannot be changed "
"during deployment and after upgrade.")
def _raise_error_task(self, cluster, exc):
def _raise_error_task(self, cluster, task_name, exc):
# set task status to error and update its corresponding data
task = Task(
name=consts.TASK_NAMES.check_networks,
name=task_name,
cluster=cluster,
status=consts.TASK_STATUSES.error,
progress=100,
@ -110,10 +111,10 @@ class ProviderHandler(BaseHandler):
def PUT(self, cluster_id):
""":returns: JSONized network configuration for cluster.
:http: * 200 (task successfully executed)
* 202 (network checking task scheduled for execution)
* 400 (data validation failed)
:http: * 200 (OK)
* 400 (data validation or some of tasks failed)
* 404 (cluster not found in db)
* 409 (previous dsnmasq setup is not finished yet)
"""
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
self.check_net_provider(cluster)
@ -130,9 +131,17 @@ class ProviderHandler(BaseHandler):
if task.status == consts.TASK_STATUSES.error:
raise self.http(400, task.message, err_list=task.result)
objects.Cluster.get_network_manager(
cluster
).update(cluster, data)
nm = objects.Cluster.get_network_manager(cluster)
admin_nets = nm.get_admin_networks()
nm.update(cluster, data)
if admin_nets != nm.get_admin_networks():
try:
task = UpdateDnsmasqTaskManager().execute()
except errors.TaskAlreadyRunning as exc:
raise self.http(409, six.text_type(exc))
if task.status == consts.TASK_STATUSES.error:
raise self.http(400, task.message)
return self.serializer.serialize_for_cluster(cluster)
@ -221,7 +230,8 @@ class NetworkConfigurationVerifyHandler(ProviderHandler):
try:
data = self.validator.validate_networks_data(web.data(), cluster)
except Exception as exc:
self._raise_error_task(cluster, exc)
self._raise_error_task(
cluster, consts.TASK_NAMES.verify_networks, exc)
vlan_ids = [{
'name': n['name'],

View File

@ -14,6 +14,11 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with node groups
"""
import six
import web
from nailgun.api.v1.handlers.base import CollectionHandler
@ -22,13 +27,12 @@ from nailgun.api.v1.handlers.base import SingleHandler
from nailgun.api.v1.handlers.base import content
from nailgun.api.v1.validators.node_group import NodeGroupValidator
from nailgun import consts
from nailgun.db import db
from nailgun.errors import errors
from nailgun import objects
"""
Handlers dealing with node groups
"""
from nailgun.task.manager import UpdateDnsmasqTaskManager
class NodeGroupHandler(SingleHandler):
@ -37,9 +41,22 @@ class NodeGroupHandler(SingleHandler):
validator = NodeGroupValidator
def DELETE(self, group_id):
""":returns: {}
:http: * 204 (object successfully deleted)
* 400 (data validation or some of tasks failed)
* 404 (nodegroup not found in db)
* 409 (previous dsnmasq setup is not finished yet)
"""
node_group = self.get_object_or_404(objects.NodeGroup, group_id)
db().delete(node_group)
db().commit()
db().flush()
try:
task = UpdateDnsmasqTaskManager().execute()
except errors.TaskAlreadyRunning as exc:
raise self.http(409, six.text_type(exc))
if task.status == consts.TASK_STATUSES.error:
raise self.http(400, task.message)
raise web.webapi.HTTPError(
status="204 No Content",
data=""

View File

@ -27,6 +27,7 @@ from nailgun.db import db
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import NodeGroup
from nailgun.errors import errors
from nailgun import objects
@ -85,13 +86,20 @@ class NetworkConfigurationValidator(BasicValidator):
raise errors.InvalidData(
"No CIDR was specified for network "
"{0}".format(ng_db.id))
if cluster.is_locked and cls._check_for_ip_conflicts(
ng_data, cluster, notation, use_gateway):
nm = objects.Cluster.get_network_manager(cluster)
if ng_db.name == consts.NETWORKS.fuelweb_admin and \
cls._check_for_ip_conflicts(
ng_data, ng_db, nm, notation, True):
raise errors.InvalidData(
"New IP ranges for network '{0}' conflict "
"with already allocated IPs.".format(ng_data['name'])
)
"New IP ranges for network '{0}'({1}) conflict "
"with nodes' IPs.".format(
ng_data['name'], ng_data['id']))
elif cluster.is_locked and cls._check_for_ip_conflicts(
ng_data, ng_db, nm, notation, use_gateway):
raise errors.InvalidData(
"New IP ranges for network '{0}'({1}) conflict "
"with already allocated IPs.".format(
ng_data['name'], ng_data['id']))
return ng_data
@ -121,20 +129,13 @@ class NetworkConfigurationValidator(BasicValidator):
return data
@classmethod
def _check_for_ip_conflicts(cls, network, cluster, notation, use_gateway):
"""Will there be ip confclicts after networks update?
def _check_for_ip_conflicts(cls, network, ng_db, nm, notation,
use_gateway):
"""This method checks if any of already allocated IPs
This method checks if any of already allocated IPs will be
out of all ip-ranges after networks update.
will be out of all ip-ranges after networks update.
"""
# skip admin network
manager = objects.Cluster.get_network_manager(cluster)
if network['id'] == manager.get_admin_network_group_id():
return False
ng_db = db().query(NetworkGroup).get(network['id'])
ips = manager.get_assigned_ips_by_network_id(network['id'])
ips = nm.get_assigned_ips_by_network_id(network['id'])
ranges = []
if notation == consts.NETWORK_NOTATION.ip_ranges:
ranges = network.get('ip_ranges',
@ -143,29 +144,26 @@ class NetworkConfigurationValidator(BasicValidator):
cidr = network.get('cidr', ng_db.cidr)
ip_network = IPNetwork(cidr)
first_index = 2 if use_gateway else 1
ranges = [(ip_network[first_index], ip_network[-1])]
return not manager.check_ips_belong_to_ranges(ips, ranges)
ranges = [(ip_network[first_index], ip_network[-2])]
# check IPs of bootstrap nodes in Admin network
if ng_db.name == consts.NETWORKS.fuelweb_admin:
nodes = db().query(Node.ip).filter_by(group_id=ng_db.group_id)
node_ips = [x[0] for x in nodes]
if ng_db.group_id is None:
# shared admin network. get nodes from all default groups
nodes = db().query(Node.ip).join(NodeGroup).filter(
NodeGroup.name == consts.NODE_GROUPS.default
)
node_ips.extend(x[0] for x in nodes)
if not nm.check_ips_belong_to_ranges(node_ips, ranges):
return True
return not nm.check_ips_belong_to_ranges(ips, ranges)
@classmethod
def prepare_data(cls, data):
"""Prepares input data.
Filter input data based on the fact that
updating parameters of the fuel admin network
is forbidden for default node group.
Admin network cannot be updated because of:
- sharing itself between environments;
- having no mechanism to change its parameters on deployed Master node.
"""
if data.get("networks"):
default_admin = db.query(
NetworkGroup).filter_by(group_id=None).first()
data["networks"] = [
n for n in data["networks"]
if n.get("id") != default_admin.id
]
"""Prepares input data. Noop filter for now."""
return data
@classmethod

View File

@ -265,7 +265,10 @@ TASK_NAMES = Enum(
# statistics
'create_stats_user',
'remove_stats_user'
'remove_stats_user',
# setup dhcp via dnsmasq for multi-node-groups
'update_dnsmasq'
)
NOTIFICATION_STATUSES = Enum(

View File

@ -32,6 +32,7 @@ from nailgun.utils.migration import drop_enum
from nailgun.utils.migration import upgrade_enum
release_states_old = (
'available',
'unavailable',
@ -53,15 +54,47 @@ task_statuses_new = task_statuses_old + (
)
task_names_old = (
'super',
'deploy',
'deployment',
'provision',
'stop_deployment',
'reset_environment',
'update',
'spawn_vms',
'node_deletion',
'cluster_deletion',
'remove_images',
'check_before_deployment',
'check_networks',
'verify_networks',
'check_dhcp',
'verify_network_connectivity',
'multicast_verification',
'check_repo_availability',
'check_repo_availability_with_setup',
'dump',
'capacity_log',
'create_stats_user',
'remove_stats_user',
)
task_names_new = task_names_old + (
'update_dnsmasq',
)
def upgrade():
create_components_table()
create_release_components_table()
upgrade_nodegroups_name_cluster_constraint()
upgrade_release_state()
task_statuses_upgrade()
task_names_upgrade()
def downgrade():
task_names_downgrade()
task_statuses_downgrade()
downgrade_release_state()
op.drop_constraint('_name_cluster_uc', 'nodegroups',)
@ -180,3 +213,23 @@ def task_statuses_upgrade():
def task_statuses_downgrade():
upgrade_enum('tasks', 'status', 'task_status',
task_statuses_new, task_statuses_old)
def task_names_upgrade():
upgrade_enum(
"tasks",
"name",
"task_name",
task_names_old,
task_names_new
)
def task_names_downgrade():
upgrade_enum(
"tasks",
"name",
"task_name",
task_names_new,
task_names_old
)

View File

@ -20,7 +20,7 @@ default_messages = {
# common errors
"InvalidData": "Invalid data received",
"AlreadyExists": "Object already exists",
"DumpRunning": "Dump already running",
"TaskAlreadyRunning": "Task is already running",
# REST errors
"CannotDelete": "Can't delete object",

View File

@ -6,12 +6,12 @@
"status": "discover",
"name": "Supermicro X9DRW",
"hostname": "node-1",
"ip": "172.18.67.168",
"ip": "10.20.0.168",
"online": true,
"labels": {},
"pending_addition": false,
"platform_name": "X9DRW",
"mac": "00:25:90:6a:b1:10",
"mac": "00:25:90:6a:b1:11",
"meta": {
"cpu": {
"real": 2,
@ -158,7 +158,7 @@
"max_speed": 1000,
"name": "eth1",
"current_speed": null,
"ip": "10.20.0.3",
"ip": "10.20.0.168",
"driver": "igb",
"bus_info": "0000:02:00.0",
"offloading_modes":[
@ -344,7 +344,7 @@
"status": "discover",
"name": "Dell Inspiron",
"hostname": "node-2",
"ip": "10.20.0.1",
"ip": "10.20.0.133",
"online": true,
"labels": {},
"pending_addition": false,
@ -370,7 +370,7 @@
},
"interfaces": [
{
"ip": "10.20.0.4",
"ip": "10.20.0.133",
"mac": "58:91:cF:2a:c4:1b",
"netmask": "255.255.255.0",
"driver": "igb",
@ -556,12 +556,12 @@
"status": "discover",
"name": "Supermicro X9SCD",
"hostname": "node-3",
"ip": "172.18.67.167",
"ip": "10.20.0.167",
"online": true,
"labels": {},
"pending_addition": false,
"platform_name": "X9SCD",
"mac": "00:25:90:23:9f:b7",
"mac": "00:25:90:23:9f:b6",
"meta": {
"memory": {
"slots": 4,
@ -625,7 +625,7 @@
"pxe": false
},
{
"ip": "10.20.0.5",
"ip": "10.20.0.167",
"mac": "00:25:90:67:9f:b6",
"max_speed": 1000,
"name": "eth0",
@ -675,7 +675,7 @@
"current_speed": 1000,
"driver": "igb",
"bus_info": "0000:09:00.0",
"pxe": true
"pxe": false
}
],
"disks": [
@ -753,7 +753,7 @@
"status": "discover",
"name": "Supermicro X9SCD (offline)",
"hostname": "node-4",
"ip": "172.18.67.173",
"ip": "10.20.0.173",
"online": false,
"labels": {},
"pending_addition": false,
@ -799,7 +799,7 @@
},
{
"name": "eth0",
"ip": "10.20.0.6",
"ip": "10.20.0.173",
"netmask": "255.255.255.240",
"driver": "igb",
"bus_info": "0000:11:00.0",
@ -885,12 +885,12 @@
"error_msg": "You need more money",
"name": "Supermicro X9SCD (error)",
"hostname": "node-5",
"ip": "172.18.82.135",
"ip": "10.20.0.135",
"online": true,
"labels": {},
"pending_addition": false,
"platform_name": "X9SCD",
"mac": "00:25:90:67:9c:9c",
"mac": "00:25:90:67:9c:9d",
"meta": {
"memory": {
"slots": 4,
@ -925,7 +925,7 @@
"max_speed": 1000,
"name": "eth1",
"current_speed": null,
"ip": "10.20.0.7",
"ip": "10.20.0.135",
"driver": "igb",
"bus_info": "0000:12:00.0",
"pxe": true
@ -1016,7 +1016,7 @@
"status": "discover",
"name": "VirtualBox",
"hostname": "node-6",
"ip": "10.20.0.63",
"ip": "10.20.0.163",
"online": true,
"labels": {},
"pending_addition": false,
@ -1028,7 +1028,7 @@
"interfaces": [
{
"name": "eth0",
"ip": "10.20.0.63",
"ip": "10.20.0.163",
"netmask": "255.255.255.0",
"mac": "08:00:27:22:ab:aa",
"driver": "e1000",
@ -1217,12 +1217,12 @@
"status": "discover",
"name": "Supermicro X9DRW (srv07)",
"hostname": "node-8",
"ip": "172.18.67.170",
"ip": "10.20.0.170",
"online": true,
"labels": {},
"pending_addition": false,
"platform_name": "X9DRW",
"mac": "00:25:90:6a:b1:84",
"mac": "00:25:90:6a:b1:b5",
"meta": {
"cpu": {
"real": 2,
@ -1339,7 +1339,7 @@
"pxe": false
},
{
"ip": "10.20.0.7",
"ip": "10.20.0.170",
"mac": "00:25:90:6a:b1:b5",
"max_speed": 1000,
"name": "p2p2",

View File

@ -339,6 +339,11 @@ class NetworkManager(object):
# IP address has not been assigned, let's do it
vip = cls.get_free_ips(network, ips_in_use=ips_in_use)[0]
ne_db = IPAddr(network=network.id, ip_addr=vip, vip_type=vip_type)
# delete stalled VIP address after new one was found.
if cluster_vip:
db().delete(cluster_vip)
db().add(ne_db)
db().flush()
@ -1258,6 +1263,8 @@ class NetworkManager(object):
)
db().add(new_admin)
db().flush()
objects.NetworkGroup._update_range_from_cidr(
new_admin, new_admin.cidr, use_gateway=True)
@classmethod
def check_network_restrictions(cls, cluster, restrictions):
@ -1356,8 +1363,6 @@ class NetworkManager(object):
def update_networks(cls, network_configuration):
if 'networks' in network_configuration:
for ng in network_configuration['networks']:
if ng['id'] == cls.get_admin_network_group_id():
continue
ng_db = db().query(NetworkGroup).get(ng['id'])
@ -1534,13 +1539,48 @@ class NetworkManager(object):
"""Returns IPs related to network with provided ID.
"""
return [x[0] for x in
db().query(IPAddr.ip_addr).filter_by(
network=network_id)]
db().query(
IPAddr.ip_addr
).filter(
IPAddr.network == network_id,
or_(
IPAddr.node.isnot(None),
IPAddr.vip_type.isnot(None)
)
)]
@classmethod
def get_admin_networks(cls, cluster_nodegroup_info=False):
admin_db = db().query(
NetworkGroup
).filter_by(
name=consts.NETWORKS.fuelweb_admin
)
result = []
for net in admin_db:
net_info = {
'id': net.id,
'cidr': net.cidr,
'gateway': net.gateway,
'ip_ranges': [[ir.first, ir.last]
for ir in net.ip_ranges]
}
if cluster_nodegroup_info:
net_info.update({
'node_group_id': net.group_id,
'node_group_name':
net.nodegroup.name if net.group_id else None,
'cluster_id':
net.nodegroup.cluster_id if net.group_id else None,
'cluster_name':
net.nodegroup.cluster.name if net.group_id else None,
})
result.append(net_info)
return result
class AllocateVIPs70Mixin(object):
@classmethod
def _build_advanced_vip_info(cls, vip_info, role, address):
return {'network_role': role['id'],

View File

@ -169,8 +169,7 @@ class Cluster(NailgunObject):
except (
errors.OutOfVLANs,
errors.OutOfIPs,
errors.NoSuitableCIDR,
errors.InvalidNetworkPool
errors.NoSuitableCIDR
) as exc:
db().delete(new_cluster)
raise errors.CannotCreate(exc.message)

View File

@ -141,10 +141,6 @@ class NetworkGroup(NailgunObject):
"""
notation = instance.meta['notation']
data_meta = data.get('meta', {})
# if notation data is present change ip ranges and remove
# stalled ip addresses for the network group
if notation and not instance.nodegroup.cluster.is_locked:
cls._delete_ips(instance)
notation = data_meta.get('notation', notation)
if notation == consts.NETWORK_NOTATION.ip_ranges:

View File

@ -37,14 +37,12 @@ class NodeGroup(NailgunObject):
cluster = Cluster.get_by_uid(new_group.cluster_id)
nm = Cluster.get_network_manager(cluster)
nst = cluster.network_config.segmentation_type
nm.create_network_groups(cluster, nst,
gid=new_group.id)
nm.create_network_groups(cluster, nst, gid=new_group.id)
nm.create_admin_network_group(new_group.cluster_id, new_group.id)
except (
errors.OutOfVLANs,
errors.OutOfIPs,
errors.NoSuitableCIDR,
errors.InvalidNetworkPool
errors.NoSuitableCIDR
) as exc:
db().delete(new_group)
raise errors.CannotCreate(exc.message)

View File

@ -175,8 +175,13 @@ class NailgunReceiver(object):
map(db().delete, ips)
db().flush()
nm = objects.Cluster.get_network_manager(cluster)
admin_nets = nm.get_admin_networks()
objects.Cluster.delete(cluster)
db().flush()
if admin_nets != nm.get_admin_networks():
# import it here due to cyclic dependencies problem
from nailgun.task.manager import UpdateDnsmasqTaskManager
UpdateDnsmasqTaskManager().execute()
notifier.notify(
"done",
@ -1175,3 +1180,25 @@ class NailgunReceiver(object):
except nailgun_errors.ObjectNotFound:
logger.warning("Task '%s' acknowledgement as running failed "
"due to task doesn't exist in DB", task_uuid)
@classmethod
def update_dnsmasq_resp(cls, **kwargs):
logger.info("RPC method update_dnsmasq_resp received: %s",
jsonutils.dumps(kwargs))
task_uuid = kwargs.get('task_uuid')
status = kwargs.get('status')
error = kwargs.get('error', '')
message = kwargs.get('msg', '')
task = objects.Task.get_by_uuid(
task_uuid, fail_if_not_found=True, lock_for_update=True)
data = {'status': status, 'progress': 100, 'message': message}
if status == consts.TASK_STATUSES.error:
logger.error("Task %s, id: %s failed: %s",
task.name, task.id, error)
data['message'] = error
objects.Task.update(task, data)
cls._update_action_log_entry(status, task.name, task_uuid, [])

View File

@ -92,7 +92,7 @@ class TaskManager(object):
)
for task in current_tasks:
if task.status == "running":
raise errors.DumpRunning()
raise errors.TaskAlreadyRunning()
elif task.status in ("ready", "error"):
db().delete(task)
db().commit()
@ -1274,3 +1274,19 @@ class RemoveStatsUserTaskManager(BaseStatsUserTaskManager):
task_name = consts.TASK_NAMES.remove_stats_user
task_cls = tasks.RemoveStatsUserTask
class UpdateDnsmasqTaskManager(TaskManager):
def execute(self):
logger.info("Starting update_dnsmasq task")
self.check_running_task(consts.TASK_NAMES.update_dnsmasq)
task = Task(name=consts.TASK_NAMES.update_dnsmasq)
db().add(task)
db().commit()
self._call_silently(
task,
tasks.UpdateDnsmasqTask
)
return task

View File

@ -20,6 +20,7 @@ import os
import netaddr
import six
import yaml
from sqlalchemy import func
from sqlalchemy import not_
@ -1639,6 +1640,56 @@ class RemoveStatsUserTask(object):
)
class UpdateDnsmasqTask(object):
@classmethod
def get_admin_networks_data(cls):
nm = objects.Cluster.get_network_manager()
return {'admin_networks': nm.get_admin_networks(True)}
@classmethod
def message(cls, task):
rpc_message = make_astute_message(
task,
'execute_tasks',
'update_dnsmasq_resp',
{
'tasks': [{
'type': consts.ORCHESTRATOR_TASK_TYPES.upload_file,
'uids': ['master'],
'parameters': {
'path': '/etc/hiera/networks.yaml',
'data': yaml.safe_dump(cls.get_admin_networks_data())}
}, {
'type': consts.ORCHESTRATOR_TASK_TYPES.puppet,
'uids': ['master'],
'parameters': {
'puppet_modules': '/etc/puppet/modules',
'puppet_manifest': '/etc/puppet/modules/nailgun/'
'examples/dhcp-ranges.pp',
'timeout': 300,
'cwd': '/'}
}, {
'type': 'cobbler_sync',
'uids': ['master'],
'parameters': {
'provisioning_info':
provisioning_serializers.ProvisioningSerializer.
serialize_cluster_info(None, None)
}
}]
}
)
return rpc_message
@classmethod
def execute(cls, task):
rpc.cast(
'naily',
cls.message(task)
)
if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
rpc.cast = fake_cast
CheckRepositoryConnectionFromMasterNodeTask\

View File

@ -404,10 +404,25 @@ class EnvironmentManager(object):
ng = resp
else:
ng = NodeGroup.create(ng_data)
db().commit()
db().flush()
return ng
def delete_node_group(self, ng_id, api=True):
if api:
return self.app.delete(
reverse(
'NodeGroupHandler',
kwargs={'obj_id': ng_id}
),
headers=self.default_headers,
expect_errors=False
)
else:
ng = db().query(NodeGroup).get(ng_id)
db().delete(ng)
db().flush()
def create_plugin(self, api=False, cluster=None, **kwargs):
plugin_data = self.get_default_plugin_metadata(**kwargs)

View File

@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from mock import patch
from oslo_serialization import jsonutils
@ -437,6 +439,25 @@ class TestNeutronNetworkConfigurationHandler(BaseIntegrationTest):
self.cluster.network_groups)[0]
self.assertEqual(publ_ng.cidr, '199.61.0.0/24')
@patch('nailgun.task.task.rpc.cast')
def test_admin_range_update(self, _):
data = self.env.neutron_networks_get(self.cluster.id).json_body
admin = filter(lambda ng: ng['name'] == 'fuelweb_admin',
data['networks'])[0]
orig_range = netaddr.IPRange(admin['ip_ranges'][0][0],
admin['ip_ranges'][0][1])
admin['ip_ranges'][0] = [str(orig_range[1]), str(orig_range[-2])]
new_range = admin['ip_ranges'][0]
resp = self.env.neutron_networks_put(self.cluster.id, data)
self.assertEqual(resp.status_code, 200)
data = self.env.neutron_networks_get(self.cluster.id).json_body
admin = filter(lambda ng: ng['name'] == 'fuelweb_admin',
data['networks'])[0]
self.assertEqual(new_range, admin['ip_ranges'][0])
def test_set_ip_range(self):
ng_names = (consts.NETWORKS.management,
consts.NETWORKS.storage,
@ -673,10 +694,7 @@ class TestAdminNetworkConfiguration(BaseIntegrationTest):
cluster_kwargs={
"api": True,
"net_provider": consts.CLUSTER_NET_PROVIDERS.nova_network,
},
nodes_kwargs=[
{"pending_addition": True, "api": True}
]
}
)
def test_netconfig_error_when_admin_cidr_match_other_network_cidr(self):
@ -691,6 +709,8 @@ class TestAdminNetworkConfiguration(BaseIntegrationTest):
task['message'])
def test_deploy_error_when_admin_cidr_match_other_network_cidr(self):
self.env.create_node(cluster_id=self.cluster['id'],
pending_addition=True)
resp = self.env.cluster_changes_put(self.cluster['id'],
expect_errors=True)
self.assertEqual(resp.status_code, 200)

View File

@ -916,6 +916,54 @@ class TestNeutronManager(BaseIntegrationTest):
self.check_networks_assignment(self.env.nodes[0])
def test_admin_networks_serialization(self):
cluster = self.env.create(
cluster_kwargs={
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.gre}
)
ng = self.env.create_node_group().json_body
admin_nets = self.db.query(NetworkGroup).filter_by(
name='fuelweb_admin'
)
admin_def = admin_nets.filter_by(group_id=None).first()
admin_ng = admin_nets.filter_by(group_id=ng['id']).first()
expected = [
{
'id': admin_def.id,
'cidr': '10.20.0.0/24',
'gateway': '10.20.0.1',
'ip_ranges': [['10.20.0.129', '10.20.0.254']]
},
{
'id': admin_ng.id,
'cidr': '9.9.9.0/24',
'gateway': '9.9.9.1',
'ip_ranges': [['9.9.9.2', '9.9.9.254']]
}
]
nm = objects.Cluster.get_network_manager()
admin_nets = nm.get_admin_networks()
self.assertItemsEqual(admin_nets, expected)
expected[0].update({
'node_group_id': None,
'node_group_name': None,
'cluster_id': None,
'cluster_name': None
})
expected[1].update({
'node_group_id': ng['id'],
'node_group_name': ng['name'],
'cluster_id': cluster['id'],
'cluster_name': cluster['name']
})
admin_nets = nm.get_admin_networks(True)
self.assertItemsEqual(admin_nets, expected)
def test_check_admin_network_mapping(self):
self.env.create(
cluster_kwargs={

View File

@ -15,6 +15,7 @@
# under the License.
import copy
from mock import patch
from oslo_serialization import jsonutils
@ -52,16 +53,21 @@ class TestNetworkModels(BaseIntegrationTest):
self._wait_for_threads()
super(TestNetworkModels, self).tearDown()
def test_cluster_locking_during_deployment(self):
def create_env_using_statuses(self, cluster_status, node_status):
self.env.create(
cluster_kwargs={'status': consts.CLUSTER_STATUSES.deployment},
cluster_kwargs={
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.gre,
'status': cluster_status
},
nodes_kwargs=[
{'pending_addition': False,
'status': consts.NODE_STATUSES.deploying},
{'pending_addition': False,
'status': consts.NODE_STATUSES.deploying},
{'pending_addition': False,
'status': consts.NODE_STATUSES.deploying}])
{'pending_addition': False, 'status': node_status},
{'pending_addition': False, 'status': node_status},
{'pending_deletion': False, 'status': node_status}])
def test_cluster_locking_during_deployment(self):
self.create_env_using_statuses(consts.CLUSTER_STATUSES.deployment,
consts.NODE_STATUSES.deploying)
test_nets = self.env.neutron_networks_get(
self.env.clusters[0].id).json_body
@ -93,19 +99,8 @@ class TestNetworkModels(BaseIntegrationTest):
self.assertEqual(resp_cluster.status_code, 403)
def test_networks_update_after_deployment(self):
self.env.create(
cluster_kwargs={
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.gre,
'status': consts.CLUSTER_STATUSES.operational
},
nodes_kwargs=[
{'pending_addition': False,
'status': consts.NODE_STATUSES.ready},
{'pending_addition': False,
'status': consts.NODE_STATUSES.ready},
{'pending_deletion': False,
'status': consts.NODE_STATUSES.ready}])
self.create_env_using_statuses(consts.CLUSTER_STATUSES.operational,
consts.NODE_STATUSES.ready)
test_nets = self.env.neutron_networks_get(
self.env.clusters[0].id).json_body
@ -127,8 +122,9 @@ class TestNetworkModels(BaseIntegrationTest):
self.assertEqual(400, resp_neutron_net.status_code)
self.assertEqual(
"New IP ranges for network '{0}' conflict "
"with already allocated IPs.".format(test_network_name),
"New IP ranges for network '{0}'({1}) conflict "
"with already allocated IPs.".format(test_network_name,
mgmt_net['id']),
resp_neutron_net.json_body['message'])
mgmt_net['cidr'] = u'192.168.0.0/30'
@ -150,6 +146,36 @@ class TestNetworkModels(BaseIntegrationTest):
self.assertDictEqual(test_network_params,
new_nets['networking_parameters'])
def test_admin_network_update_after_deployment(self):
self.create_env_using_statuses(consts.CLUSTER_STATUSES.operational,
consts.NODE_STATUSES.ready)
test_nets = self.env.neutron_networks_get(
self.env.clusters[0].id).json_body
admin_net = filter(
lambda x: x['name'] == consts.NETWORKS.fuelweb_admin,
test_nets['networks'])[0]
admin_net['cidr'] = u'191.111.0.0/26'
admin_net['ip_ranges'] = [[u'191.111.0.5', u'191.111.0.62']]
resp_neutron_net = self.env.neutron_networks_put(
self.env.clusters[0].id, test_nets, expect_errors=True)
self.assertEqual(400, resp_neutron_net.status_code)
self.assertEqual(
"New IP ranges for network '{0}'({1}) conflict "
"with nodes' IPs.".format(admin_net['name'], admin_net['id']),
resp_neutron_net.json_body['message'])
for node in self.env.nodes:
self.db.delete(node)
self.db.commit()
with patch('task.task.rpc.cast'):
resp_neutron_net = self.env.neutron_networks_put(
self.env.clusters[0].id, test_nets)
self.assertEqual(200, resp_neutron_net.status_code)
def test_nova_net_networking_parameters(self):
cluster = self.env.create_cluster(api=False)
self.db.delete(cluster.network_config)

View File

@ -1155,20 +1155,13 @@ class TestNeutronOrchestratorSerializer61(OrchestratorSerializerTestBase):
transformations
)
def test_gre_with_multi_groups(self):
@mock.patch('nailgun.task.task.rpc.cast')
def test_gre_with_multi_groups(self, mocked_rpc):
cluster = self.create_env(segment_type='gre', ctrl_count=3)
resp = self.env.create_node_group()
group_id = resp.json_body['id']
self.env.create_nodes_w_interfaces_count(
nodes_count=3,
if_count=2,
roles=['compute'],
pending_addition=True,
cluster_id=cluster.id,
group_id=group_id)
nets = self.env.neutron_networks_get(cluster.id).json_body
nets_w_gw = {'management': '199.99.20.0/24',
'storage': '199.98.20.0/24',
@ -1179,18 +1172,34 @@ class TestNeutronOrchestratorSerializer61(OrchestratorSerializerTestBase):
if net['name'] in nets_w_gw.keys():
if net['group_id'] == group_id:
net['cidr'] = nets_w_gw[net['name']]
# IP ranges for networks in default nodegroup must
# be updated as well to exclude gateway address.
# Use first 126 addresses to avoid clashing
# with floating range.
net['ip_ranges'] = [[
str(IPAddress(IPNetwork(net['cidr']).first + 2)),
str(IPAddress(IPNetwork(net['cidr']).first + 127)),
]]
if net['meta']['notation'] == 'ip_ranges':
net['ip_ranges'] = [[
str(IPAddress(IPNetwork(net['cidr']).first + 2)),
str(IPAddress(IPNetwork(net['cidr']).first + 126)),
]]
if not net['meta']['use_gateway']:
# IP ranges for networks in default nodegroup must
# be updated as well to exclude gateway address.
# Do not use first address to avoid clashing
# with floating range.
net['ip_ranges'] = [[
str(IPAddress(IPNetwork(net['cidr']).first + 2)),
str(IPAddress(IPNetwork(net['cidr']).first + 254)),
]]
net['meta']['use_gateway'] = True
net['gateway'] = str(
IPAddress(IPNetwork(net['cidr']).first + 1))
resp = self.env.neutron_networks_put(cluster.id, nets)
self.assertEqual(resp.status_code, 200)
self.assertEqual(mocked_rpc.call_count, 1)
self.env.create_nodes_w_interfaces_count(
nodes_count=3,
if_count=2,
roles=['compute'],
pending_addition=True,
cluster_id=cluster.id,
group_id=group_id)
self.prepare_for_deployment(cluster.nodes, 'gre')
serializer = get_serializer_for_cluster(cluster)
@ -1846,7 +1855,8 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
test_gateway
)
def test_neutron_l3_floating_w_multiple_node_groups(self):
@mock.patch('nailgun.rpc.cast')
def test_neutron_l3_floating_w_multiple_node_groups(self, _):
self.new_env_release_version = '1111-7.0'
self.prepare_for_deployment = \

View File

@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import yaml
import nailgun
import nailgun.rpc as rpc
@ -35,7 +37,7 @@ from nailgun.db.sqlalchemy import models
from nailgun.errors import errors
from nailgun.task.helpers import TaskHelper
from nailgun.task import manager
from nailgun.task.task import DeletionTask
from nailgun.task import task
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
from nailgun.utils import reverse
@ -595,7 +597,7 @@ class TestTaskManagers(BaseIntegrationTest):
self.assertRaises(errors.WrongNodeStatus, manager_.execute)
@fake_tasks()
@mock.patch.object(DeletionTask, 'execute')
@mock.patch.object(task.DeletionTask, 'execute')
def test_deletion_task_called(self, mdeletion_execute):
cluster = self.env.create_cluster()
cluster_id = cluster['id']
@ -611,19 +613,21 @@ class TestTaskManagers(BaseIntegrationTest):
manager_.execute()
self.assertEqual(mdeletion_execute.call_count, 1)
task, nodes = mdeletion_execute.call_args[0]
nodes = mdeletion_execute.call_args[0][1]
# unfortunately assertItemsEqual does not recurse into dicts
self.assertItemsEqual(
nodes['nodes_to_delete'],
DeletionTask.prepare_nodes_for_task([node_db])['nodes_to_delete']
task.DeletionTask.prepare_nodes_for_task(
[node_db])['nodes_to_delete']
)
self.assertItemsEqual(
nodes['nodes_to_restore'],
DeletionTask.prepare_nodes_for_task([node_db])['nodes_to_restore']
task.DeletionTask.prepare_nodes_for_task(
[node_db])['nodes_to_restore']
)
@fake_tasks()
@mock.patch.object(DeletionTask, 'execute')
@mock.patch.object(task.DeletionTask, 'execute')
def test_deletion_task_w_check_ceph(self, mdeletion_execute):
cluster = self.env.create_cluster()
cluster_id = cluster['id']
@ -984,3 +988,179 @@ class TestTaskManagers(BaseIntegrationTest):
self.env.nodes)[0]
self.assertNotEqual(primary_node.id, new_primary.id)
class TestUpdateDnsmasqTaskManagers(BaseIntegrationTest):
def tearDown(self):
self._wait_for_threads()
super(TestUpdateDnsmasqTaskManagers, self).tearDown()
def setUp(self):
super(TestUpdateDnsmasqTaskManagers, self).setUp()
cluster = self.env.create(
cluster_kwargs={
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.gre},
nodes_kwargs=[
{'api': True,
'pending_addition': True}
]
)
self.cluster = self.db.query(models.Cluster).get(cluster['id'])
def change_ip_range(self, net_name='fuelweb_admin', status_code=200):
data = self.env.neutron_networks_get(self.cluster['id']).json_body
admin = filter(lambda ng: ng['name'] == net_name,
data['networks'])[0]
orig_range = netaddr.IPRange(admin['ip_ranges'][0][0],
admin['ip_ranges'][0][1])
admin['ip_ranges'][0] = [str(orig_range[0]), str(orig_range[-2])]
resp = self.env.neutron_networks_put(
self.cluster['id'], data, expect_errors=(status_code != 200))
self.assertEqual(resp.status_code, status_code)
def test_update_dnsmasq_is_started_with_correct_message(self):
message = {
'api_version': '1',
'method': 'execute_tasks',
'respond_to': 'update_dnsmasq_resp',
'args': {
'task_uuid': '',
'tasks': [{
'type': consts.ORCHESTRATOR_TASK_TYPES.upload_file,
'uids': ['master'],
'parameters': {
'path': '/etc/hiera/networks.yaml',
'data': ''}
}, {
'type': consts.ORCHESTRATOR_TASK_TYPES.puppet,
'uids': ['master'],
'parameters': {
'puppet_modules': '/etc/puppet/modules',
'puppet_manifest': '/etc/puppet/modules/nailgun/'
'examples/dhcp-ranges.pp',
'timeout': 300,
'cwd': '/'}
}, {
'type': 'cobbler_sync',
'uids': ['master'],
'parameters': {
'provisioning_info': {
'engine': {
'url': 'http://localhost/cobbler_api',
'username': 'cobbler',
'password': 'cobbler',
'master_ip': '127.0.0.1'
}
}
}
}]
}
}
with mock.patch('nailgun.task.task.rpc.cast') as \
mocked_task:
self.change_ip_range()
message['args']['tasks'][0]['parameters']['data'] = yaml.safe_dump(
task.UpdateDnsmasqTask.get_admin_networks_data())
update_task = self.db.query(models.Task).filter_by(
name=consts.TASK_NAMES.update_dnsmasq).first()
message['args']['task_uuid'] = update_task.uuid
self.assertEqual(mocked_task.call_count, 1)
self.assertEqual(mocked_task.call_args[0][1], message)
@mock.patch('nailgun.task.task.rpc.cast')
def test_update_dnsmasq_started_and_completed(self, mocked_rpc):
self.change_ip_range()
self.assertEqual(mocked_rpc.call_count, 1)
update_task = self.db.query(models.Task).filter_by(
name=consts.TASK_NAMES.update_dnsmasq).first()
self.assertEqual(update_task.status, consts.TASK_STATUSES.running)
update_dnsmasq_msg = {
"status": "ready",
"task_uuid": update_task.uuid,
"error": "",
"msg": "Everything went fine."}
rpc.receiver.NailgunReceiver.update_dnsmasq_resp(**update_dnsmasq_msg)
self.db.refresh(update_task)
self.assertEqual(update_task.status, consts.TASK_STATUSES.ready)
self.assertEqual(update_task.message, update_dnsmasq_msg['msg'])
# run it one more time
self.change_ip_range()
# rpc.cast was called one more time
self.assertEqual(mocked_rpc.call_count, 2)
update_tasks = self.db.query(models.Task).filter_by(
name=consts.TASK_NAMES.update_dnsmasq)
new_tasks = update_tasks.filter_by(status=consts.TASK_STATUSES.running)
self.assertEqual(new_tasks.count(), 1)
# old task was deleted
self.assertEqual(update_tasks.count(), 1)
@mock.patch('nailgun.task.task.rpc.cast')
def test_update_dnsmasq_started_and_failed(self, mocked_rpc):
self.change_ip_range()
self.assertEqual(mocked_rpc.call_count, 1)
update_task = self.db.query(models.Task).filter_by(
name=consts.TASK_NAMES.update_dnsmasq).first()
self.assertEqual(update_task.status, consts.TASK_STATUSES.running)
update_dnsmasq_msg = {
"status": consts.TASK_STATUSES.error,
"task_uuid": update_task.uuid,
"error": "Something went wrong.",
"msg": ""}
rpc.receiver.NailgunReceiver.update_dnsmasq_resp(**update_dnsmasq_msg)
self.db.refresh(update_task)
self.assertEqual(update_task.status, consts.TASK_STATUSES.error)
self.assertEqual(update_task.message, update_dnsmasq_msg['error'])
# run it one more time
self.change_ip_range()
# rpc.cast was called one more time
self.assertEqual(mocked_rpc.call_count, 2)
update_tasks = self.db.query(models.Task).filter_by(
name=consts.TASK_NAMES.update_dnsmasq)
new_tasks = update_tasks.filter_by(status=consts.TASK_STATUSES.running)
self.assertEqual(new_tasks.count(), 1)
# old task was deleted
self.assertEqual(update_tasks.count(), 1)
@mock.patch('nailgun.task.task.rpc.cast')
def test_update_admin_failed_while_previous_in_progress(self, mocked_rpc):
self.change_ip_range()
self.assertEqual(mocked_rpc.call_count, 1)
update_task = self.db.query(models.Task).filter_by(
name=consts.TASK_NAMES.update_dnsmasq).first()
self.assertEqual(update_task.status, consts.TASK_STATUSES.running)
# change of other network works as it does not require to run
# update_dnsmasq
self.change_ip_range(net_name='public')
# no more calls were made
self.assertEqual(mocked_rpc.call_count, 1)
# request was rejected as previous update_dnsmasq task is still
# in progress
self.change_ip_range(status_code=409)
# no more calls were made
self.assertEqual(mocked_rpc.call_count, 1)
@mock.patch('nailgun.task.task.rpc.cast')
def test_update_dnsmasq_started_on_node_group_deletion(self, mocked_rpc):
ng = self.env.create_node_group().json_body
self.assertEqual(mocked_rpc.call_count, 0)
self.env.delete_node_group(ng['id'])
self.assertEqual(mocked_rpc.call_count, 1)
update_task = self.db.query(models.Task).filter_by(
name=consts.TASK_NAMES.update_dnsmasq).first()
self.assertEqual(update_task.status, consts.TASK_STATUSES.running)

View File

@ -472,7 +472,7 @@ class TestLogs(BaseIntegrationTest):
def test_snapshot_task_manager_already_running(self):
self.env.create_task(name="dump")
tm = DumpTaskManager()
self.assertRaises(errors.DumpRunning, tm.execute)
self.assertRaises(errors.TaskAlreadyRunning, tm.execute)
def test_log_package_handler_ok(self):
task = {
@ -521,7 +521,7 @@ class TestLogs(BaseIntegrationTest):
"""400 status when errors with uncompleted models in session occur"""
def dump_task_with_bad_model(*args, **kwargs):
raise errors.DumpRunning()
raise errors.TaskAlreadyRunning()
dump_manager().execute.side_effect = dump_task_with_bad_model

View File

@ -306,3 +306,22 @@ class TestTaskStatus(base.BaseAlembicMigrationTest):
for row in result.fetchall():
status = row[0]
self.assertEqual(status, consts.TASK_STATUSES.pending)
class TestTaskNameMigration(base.BaseAlembicMigrationTest):
def test_task_name_enum(self):
added_task_names = ('update_dnsmasq',)
tasks_table = self.meta.tables['tasks']
for name in added_task_names:
insert_table_row(tasks_table,
{'name': name,
'uuid': str(uuid.uuid4()),
'status': 'running'})
with self.assertRaisesRegexp(DataError, 'invalid input value for '
'enum task_name'):
insert_table_row(tasks_table,
{'name': 'wrong_task_name',
'uuid': str(uuid.uuid4()),
'status': 'running'})

View File

@ -22,7 +22,7 @@ from nailgun.api.v1.validators.network import NovaNetworkConfigurationValidator
from nailgun import consts
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.errors import errors
from nailgun.network.neutron import NeutronManager
from nailgun import objects
from nailgun.test import base
@ -333,31 +333,33 @@ class TestNetworkConfigurationValidator(base.BaseIntegrationTest):
"'use_gateway' cannot be provided without gateway")
def test_check_ip_conflicts(self):
nm = objects.Cluster.get_network_manager(self.cluster)
mgmt = self.find_net_by_name(consts.NETWORKS.management)
mgmt_db = self.db.query(NetworkGroup).get(mgmt['id'])
# firstly check default IPs from management net assigned to nodes
ips = NeutronManager.get_assigned_ips_by_network_id(mgmt['id'])
ips = nm.get_assigned_ips_by_network_id(mgmt['id'])
self.assertListEqual(['192.168.0.1', '192.168.0.2'], sorted(ips),
"Default IPs were changed for some reason.")
mgmt['cidr'] = '10.101.0.0/24'
result = NetworkConfigurationValidator._check_for_ip_conflicts(
mgmt, self.cluster, consts.NETWORK_NOTATION.cidr, False)
mgmt, mgmt_db, nm, 'cidr', False)
self.assertTrue(result)
mgmt['cidr'] = '192.168.0.0/28'
result = NetworkConfigurationValidator._check_for_ip_conflicts(
mgmt, self.cluster, consts.NETWORK_NOTATION.cidr, False)
mgmt, mgmt_db, nm, 'cidr', False)
self.assertFalse(result)
mgmt['ip_ranges'] = [['192.168.0.1', '192.168.0.15']]
result = NetworkConfigurationValidator._check_for_ip_conflicts(
mgmt, self.cluster, consts.NETWORK_NOTATION.ip_ranges, False)
mgmt, mgmt_db, nm, 'ip_ranges', False)
self.assertFalse(result)
mgmt['ip_ranges'] = [['10.101.0.1', '10.101.0.255']]
result = NetworkConfigurationValidator._check_for_ip_conflicts(
mgmt, self.cluster, consts.NETWORK_NOTATION.ip_ranges, False)
mgmt, mgmt_db, nm, 'ip_ranges', False)
self.assertTrue(result)
@ -625,18 +627,18 @@ class TestNeutronNetworkConfigurationValidator(base.BaseIntegrationTest):
roles=["compute"],
group_id=node_group.id)
def check_no_admin_network_in_validated_data(self):
def check_admin_network_in_validated_data(self):
default_admin = self.db.query(
NetworkGroup).filter_by(group_id=None).first()
validated_data = self.validator.prepare_data(self.config)
self.assertNotIn(
self.assertIn(
default_admin.id,
[ng['id'] for ng in validated_data['networks']]
)
def test_fuelweb_admin_removed(self):
self.check_no_admin_network_in_validated_data()
def test_fuelweb_admin_present(self):
self.check_admin_network_in_validated_data()
def test_fuelweb_admin_removed_w_additional_node_group(self):
def test_fuelweb_admin_present_w_additional_node_group(self):
self.create_additional_node_group()
self.check_no_admin_network_in_validated_data()
self.check_admin_network_in_validated_data()

View File

@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
import json
from nailgun import consts
@ -100,7 +102,8 @@ class TestNodeGroups(BaseIntegrationTest):
nets = db().query(NetworkGroup).filter_by(group_id=response['id'])
self.assertEquals(nets.count(), 5)
def test_nodegroup_deletion(self):
@patch('nailgun.task.task.rpc.cast')
def test_nodegroup_deletion(self, _):
resp = self.env.create_node_group()
response = resp.json_body
group_id = response['id']