Speed up ClusterChangesHandler

* added performance test
* improved assign_vip
* improved geting controllers in CheckBeforeDeployment
* improved checking untagged networks
* changed name of property default_group on to default_group_id
* correct tests and add new

Change-Id: Iba10fdae455c3f19a71cbcdc88f8cd370ffd1993
Closes-Bug: #1384623
This commit is contained in:
Kamil Sambor 2014-11-13 12:40:09 +01:00
parent 58e5f47457
commit a08434e5fb
20 changed files with 221 additions and 87 deletions

View File

@ -110,6 +110,10 @@ NODE_ERRORS = Enum(
'deletion'
)
NODE_GROUPS = Enum(
'default'
)
NETWORK_INTERFACE_TYPES = Enum(
'ether',
'bond'

View File

@ -31,7 +31,6 @@ from nailgun.db import db
from nailgun.db.sqlalchemy.models.base import Base
from nailgun.db.sqlalchemy.models.fields import JSON
from nailgun.db.sqlalchemy.models.node import Node
from nailgun.db.sqlalchemy.models.node import NodeGroup
class ClusterChanges(Base):
@ -99,11 +98,6 @@ class Cluster(Base):
is_customized = Column(Boolean, default=False)
fuel_version = Column(Text, nullable=False)
def create_default_group(self):
ng = NodeGroup(cluster_id=self.id, name="default")
db().add(ng)
db().commit()
@property
def changes(self):
return [
@ -133,15 +127,6 @@ class Cluster(Base):
return False
return True
@property
def default_group(self):
if not self.node_groups:
self.create_default_group()
return [g.id for g in self.node_groups if g.name == "default"][0]
def get_default_group(self):
return [g for g in self.node_groups if g.name == "default"][0]
@property
def network_groups(self):
net_list = []

View File

@ -89,37 +89,42 @@ class NetworkCheck(object):
self.err_msgs)
def check_untagged_intersection(self):
"""check if there are 2 or more untagged networks on the same interface
except public and floating networks
"""check if there are untagged networks on the same interface
(both nova-net and neutron)
"""
netw_untagged = lambda n: (n['vlan_start'] is None) \
and (not n['meta'].get('ext_net_data')) \
and (not n['meta'].get('neutron_vlan_range'))
untagged_nets = set([n['name'] for n in self.networks
if netw_untagged(n)])
# check only if we have 2 or more untagged networks
untagged_nets = dict([(n['id'], n['name']) for n in self.networks
if netw_untagged(n)])
# check if nic have assign only one untagged network
if len(untagged_nets) >= 2:
logger.info(
"Untagged networks found, "
"checking intersection between them...")
interfaces = []
for node in self.cluster.nodes:
for iface in node.interfaces:
interfaces.append(iface)
found_intersection = []
for iface in interfaces:
bond_interfaces = (
objects.Cluster.get_bond_interfaces_for_all_nodes(
self.cluster,
untagged_nets.keys()))
nic_interfaces = (
objects.Cluster.get_nic_interfaces_for_all_nodes(
self.cluster,
untagged_nets.keys()))
found_intersection = []
all_interfaces = bond_interfaces + nic_interfaces
for iface in all_interfaces:
# network name is changed for Admin on UI
nets = [[ng['name'] for ng in self.networks
if n.id == ng['id']][0]
if n.id == ng['id']][0]
for n in iface.assigned_networks_list]
crossed_nets = set(nets) & untagged_nets
crossed_nets = set(nets) & set(untagged_nets.values())
if len(crossed_nets) > 1:
err_net_names = ['"{0}"'.format(i)
for i in crossed_nets]
found_intersection.append(
[iface.node.name, err_net_names])
found_intersection.append((objects.Node.get_by_mac_or_uid(
node_uid=iface.node_id).name,
err_net_names))
if found_intersection:
nodes_with_errors = [

View File

@ -213,7 +213,8 @@ class NetworkManager(object):
if network_name == 'public' and \
not objects.Node.should_have_public(node):
continue
group_id = node.group_id or node.cluster.default_group
group_id = (node.group_id or
objects.Cluster.get_default_group(node.cluster).id)
network = network_groups.filter(
or_(
@ -289,15 +290,7 @@ class NetworkManager(object):
if not cluster:
raise Exception(u"Cluster id='%s' not found" % cluster_id)
group_id = None
for node in cluster.nodes:
if 'controller' in node.all_roles or \
'primary-controller' in node.all_roles:
group_id = node.group_id
break
if not group_id:
group_id = cluster.default_group
group_id = objects.Cluster.get_controllers_group_id(cluster)
network = db().query(NetworkGroup).\
filter_by(name=network_name, group_id=group_id).first()
@ -326,7 +319,7 @@ class NetworkManager(object):
vip = cls.get_free_ips(network)[0]
ne_db = IPAddr(network=network.id, ip_addr=vip)
db().add(ne_db)
db().commit()
db().flush()
return vip
@ -415,11 +408,10 @@ class NetworkManager(object):
networks metadata
"""
nics = []
group_id = node.group_id
if not group_id:
group_id = node.cluster.default_group
group_id = (node.group_id or
objects.Cluster.get_default_group(node.cluster).id)
node_group = db().query(NodeGroup).get(group_id)
ngs = node_group.networks + [cls.get_admin_network_group(node.id)]
ngs_by_id = dict((ng.id, ng) for ng in ngs)
# sort Network Groups ids by map_priority
@ -1001,17 +993,16 @@ class NetworkManager(object):
db().flush()
@classmethod
def create_network_groups(cls, cluster_id, neutron_segment_type, gid=None):
def create_network_groups(cls, cluster, neutron_segment_type, gid=None):
"""Method for creation of network groups for cluster.
:param cluster_id: Cluster database ID.
:type cluster_id: int
:param cluster: Cluster instance.
:type cluster: instance
:returns: None
"""
cluster_db = objects.Cluster.get_by_uid(cluster_id)
group_id = gid or cluster_db.default_group
networks_metadata = cluster_db.release.networks_metadata
networks_list = networks_metadata[cluster_db.net_provider]["networks"]
group_id = gid or objects.Cluster.get_default_group(cluster).id
networks_metadata = cluster.release.networks_metadata
networks_list = networks_metadata[cluster.net_provider]["networks"]
used_nets = [IPNetwork(cls.get_admin_network_group().cidr)]
def check_range_in_use_already(cidr_range):
@ -1053,7 +1044,7 @@ class NetworkManager(object):
new_ip_range.last))
nw_group = NetworkGroup(
release=cluster_db.release.id,
release=cluster.release.id,
name=net['name'],
cidr=str(cidr) if cidr else None,
gateway=gw,
@ -1111,7 +1102,7 @@ class NetworkManager(object):
@classmethod
def create_network_groups_and_config(cls, cluster, data):
cls.create_network_groups(cluster.id,
cls.create_network_groups(cluster,
data.get('net_segment_type'))
if cluster.net_provider == 'neutron':
cls.create_neutron_config(cluster,
@ -1132,7 +1123,7 @@ class NetworkManager(object):
all_nets = [(n.name, n.cidr)
for n in node.cluster.network_groups if n.cidr]
if node.group_id != node.cluster.default_group:
if node.group_id != objects.Cluster.get_default_group(node.cluster).id:
admin_net = cls.get_admin_network_group()
all_nets.append((admin_net.name, admin_net.cidr))

View File

@ -174,14 +174,13 @@ class Cluster(NailgunObject):
data["fuel_version"] = settings.VERSION["release"]
new_cluster = super(Cluster, cls).create(data)
new_cluster.create_default_group()
cls.create_default_group(new_cluster)
cls.create_attributes(new_cluster)
try:
cls.get_network_manager(new_cluster).\
create_network_groups_and_config(new_cluster, data)
cls.add_pending_changes(new_cluster, "attributes")
cls.add_pending_changes(new_cluster, "networks")
@ -606,6 +605,73 @@ class Cluster(NailgunObject):
if meta.get('has_primary'):
cls.set_primary_role(instance, nodes, role)
@classmethod
def get_all_controllers(cls, instance):
roles_id = db().query(models.Role).\
filter_by(release_id=instance.release_id).\
filter_by(name='controller').first().id
deployed_controllers = db().query(models.Node).filter_by(
cluster_id=instance.id).join(models.Node.role_list, aliased=True).\
filter(models.Role.id == roles_id).all()
pending_controllers = db().query(models.Node).\
filter_by(cluster_id=instance.id).\
join(models.Node.pending_role_list, aliased=True).\
filter(models.Role.id == roles_id).all()
return deployed_controllers + pending_controllers
@classmethod
def get_controllers_group_id(cls, instance):
roles_id = db().query(models.Role).filter_by(
release_id=instance.release_id).\
filter_by(name='controller').first().id
controller = db().query(models.Node).\
filter_by(cluster_id=instance.id).\
filter(False == models.Node.pending_deletion).\
join(models.Node.role_list, aliased=True).\
filter(models.Role.id == roles_id).first()
if not controller or not controller.group_id:
controller = db().query(models.Node).\
filter(False == models.Node.pending_deletion).\
filter_by(cluster_id=instance.id).\
join(models.Node.pending_role_list, aliased=True).\
filter(models.Role.id == roles_id).first()
if controller and controller.group_id:
return controller.group_id
return cls.get_default_group(instance).id
@classmethod
def get_bond_interfaces_for_all_nodes(cls, instance, networks=None):
bond_interfaces_query = db().query(models.NodeBondInterface).\
join(models.Node).filter(models.Node.cluster_id == instance.id)
if networks:
bond_interfaces_query = bond_interfaces_query.join(
models.NodeBondInterface.assigned_networks_list,
aliased=True).filter(models.NetworkGroup.id.in_(networks))
return bond_interfaces_query.all()
@classmethod
def get_nic_interfaces_for_all_nodes(cls, instance, networks=None):
nic_interfaces_query = db().query(models.NodeNICInterface).\
join(models.Node).filter(models.Node.cluster_id == instance.id)
if networks:
nic_interfaces_query = nic_interfaces_query.join(
models.NodeNICInterface.assigned_networks_list, aliased=True).\
filter(models.NetworkGroup.id.in_(networks))
return nic_interfaces_query.all()
@classmethod
def get_default_group(cls, instance):
return [g for g in instance.node_groups
if g.name == consts.NODE_GROUPS.default][0]
@classmethod
def create_default_group(cls, instance):
node_group = models.NodeGroup(name=consts.NODE_GROUPS.default)
instance.node_groups.append(node_group)
db.add(node_group)
db().flush()
return node_group
class ClusterCollection(NailgunCollection):
"""Cluster collection

View File

@ -233,7 +233,7 @@ class Node(NailgunObject):
break
if not instance.group_id:
instance.group_id = instance.cluster.default_group
instance.group_id = Cluster.get_default_group(instance.cluster).id
db().add(instance)
db().flush()

View File

@ -49,7 +49,7 @@ class NodeGroup(NailgunObject):
cluster = Cluster.get_by_uid(new_group.cluster_id)
nm = Cluster.get_network_manager(cluster)
nst = cluster.network_config.segmentation_type
nm.create_network_groups(new_group.cluster_id, nst,
nm.create_network_groups(cluster, nst,
gid=new_group.id)
nm.create_admin_network_group(new_group.cluster_id, new_group.id)
except (

View File

@ -56,7 +56,8 @@ class NetworkDeploymentSerializer(object):
def get_common_attrs(cls, cluster, attrs):
"""Cluster network attributes."""
common = cls.network_provider_cluster_attrs(cluster)
common.update(cls.network_ranges(cluster.default_group))
common.update(
cls.network_ranges(Cluster.get_default_group(cluster).id))
common.update({'master_ip': settings.MASTER_IP})
common['nodes'] = deepcopy(attrs['nodes'])
@ -648,7 +649,7 @@ class NeutronNetworkDeploymentSerializer(NetworkDeploymentSerializer):
NetworkGroup.cidr,
NetworkGroup.gateway
).filter_by(
group_id=cluster.default_group,
group_id=Cluster.get_default_group(cluster).id,
name='public'
).first()
join_range = lambda r: (":".join(map(str, r)) if r else None)

View File

@ -595,10 +595,7 @@ class CheckBeforeDeploymentTask(object):
@classmethod
def _check_controllers_count(cls, task):
cluster = task.cluster
controllers = filter(
lambda node: 'controller' in node.all_roles,
task.cluster.nodes)
controllers = objects.Cluster.get_all_controllers(task.cluster)
# we should make sure that cluster has at least one controller
if len(controllers) < 1:
raise errors.NotEnoughControllers(

View File

@ -17,6 +17,8 @@
from mock import patch
from sqlalchemy.sql import not_
from nailgun import objects
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Release
@ -70,7 +72,7 @@ class TestHandlers(BaseIntegrationTest):
for clstr in (cluster_db, cluster2_db):
management_net = self.db.query(NetworkGroup).filter_by(
name="management",
group_id=clstr.default_group
group_id=objects.Cluster.get_default_group(clstr).id
).first()
NovaNetworkManager.update(
clstr,

View File

@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from nailgun import objects
from nailgun.db.sqlalchemy.models import IPAddr
from nailgun.db.sqlalchemy.models import NetworkGroup
@ -42,7 +43,8 @@ class TestHorizonURL(BaseIntegrationTest):
network = self.db.query(NetworkGroup).\
filter(NetworkGroup.group_id ==
self.env.clusters[0].default_group).\
objects.Cluster.get_default_group(
self.env.clusters[0]).id).\
filter_by(name="public").first()
lost_ips = self.db.query(IPAddr).filter_by(
network=network.id,

View File

@ -24,6 +24,7 @@ from netaddr import IPRange
from sqlalchemy import not_
import nailgun
from nailgun import objects
from nailgun.db.sqlalchemy.models import IPAddr
from nailgun.db.sqlalchemy.models import IPAddrRange
@ -58,7 +59,8 @@ class TestNetworkManager(BaseIntegrationTest):
management_net = self.db.query(NetworkGroup).\
filter(
NetworkGroup.group_id == self.env.clusters[0].default_group
NetworkGroup.group_id ==
objects.Cluster.get_default_group(self.env.clusters[0]).id
).filter_by(
name='management'
).first()

View File

@ -988,7 +988,8 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
public_ng = self.db.query(NetworkGroup).filter(
NetworkGroup.name == 'public'
).filter(
NetworkGroup.group_id == cluster.default_group
NetworkGroup.group_id ==
objects.Cluster.get_default_group(cluster).id
).first()
public_ng.gateway = test_gateway
self.db.add(public_ng)

View File

@ -1134,26 +1134,21 @@ class TestConsumer(BaseIntegrationTest):
self.assertEqual(error_node.status, "error")
def test_remove_cluster_resp(self):
self.env.create(
cluster = self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"api": False},
{"api": False}
]
)
cluster_id = self.env.clusters[0].id
cluster_db = self.db.query(Cluster).get(cluster["id"])
cluster_id = cluster_db.id
node1, node2 = self.env.nodes
node1_id, node2_id = [n.id for n in self.env.nodes]
self.env.create_notification(
cluster_id=cluster_id
)
networks = self.db.query(NetworkGroup).\
filter(NetworkGroup.group_id ==
self.env.clusters[0].default_group).all()
vlans = []
for net in networks:
vlans.append(net.vlan_start)
group_id = objects.Cluster.get_default_group(cluster_db).id
task = Task(
uuid=str(uuid.uuid4()),
@ -1192,7 +1187,7 @@ class TestConsumer(BaseIntegrationTest):
nets_db = self.db.query(NetworkGroup).\
filter(NetworkGroup.group_id ==
self.env.clusters[0].default_group).all()
group_id).all()
self.assertEquals(len(nets_db), 0)
task_db = self.db.query(Task)\
@ -1250,5 +1245,7 @@ class TestConsumer(BaseIntegrationTest):
nets_db = self.db.query(NetworkGroup).\
filter(NetworkGroup.group_id ==
self.env.clusters[0].default_group).all()
objects.Cluster.get_default_group(
self.env.clusters[0]).id).\
all()
self.assertNotEqual(len(nets_db), 0)

View File

@ -241,3 +241,4 @@ class BaseIntegrationLoadTestCase(BaseLoadTestCase):
"than expected: {max_exec_time}".format(
exec_time=exec_time,
max_exec_time=self.total_time))
self.db.remove()

View File

@ -13,6 +13,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from mock import patch
from nailgun.test.base import fake_tasks
from nailgun.test.performance.base import BaseIntegrationLoadTestCase
@ -21,6 +23,7 @@ from nailgun.test.performance.base import BaseIntegrationLoadTestCase
class IntegrationClusterTests(BaseIntegrationLoadTestCase):
MAX_EXEC_TIME = 60
MAX_TOTAL_EXEC_TIME = 350
def setUp(self):
super(IntegrationClusterTests, self).setUp()
@ -50,3 +53,30 @@ class IntegrationClusterTests(BaseIntegrationLoadTestCase):
def test_deploy(self, mock_rpc):
self.provision(self.cluster['id'], self.nodes_ids)
self.deployment(self.cluster['id'], self.nodes_ids)
@fake_tasks()
def test_put_cluster_changes(self):
func = functools.partial(
self.put_handler,
'ClusterChangesHandler',
[],
handler_kwargs={'cluster_id': self.cluster['id']}
)
self.check_time_exec(func, 4)
@fake_tasks()
def test_put_cluster_changes_after_reset(self):
self.deployment(self.cluster['id'], self.nodes_ids)
func = functools.partial(
self.put_handler,
'ClusterResetHandler',
[],
handler_kwargs={'cluster_id': self.cluster['id']})
self.check_time_exec(func, 10)
func = functools.partial(
self.put_handler,
'ClusterChangesHandler',
[],
handler_kwargs={'cluster_id': self.cluster['id']}
)
self.check_time_exec(func, 10)

View File

@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from nailgun import consts
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import node
@ -53,25 +54,25 @@ class TestNetworkCheck(BaseIntegrationTest):
checker = NetworkCheck(FakeTask(cluster_db), {})
checker.networks = [{'id': 1,
'cidr': '192.168.0.0/24',
'name': 'fake1',
'name': consts.NETWORKS.storage,
'vlan_start': None,
'meta': {'notation': 'cidr'}},
{'id': 2,
'cidr': '192.168.0.0/26',
'name': 'fake2',
'name': consts.NETWORKS.management,
'vlan_start': None,
'meta': {'notation': 'cidr'}}]
ng1 = NetworkGroup()
ng1.name = 'fake1'
ng1.name = consts.NETWORKS.storage
ng1.id = 1
ng2 = NetworkGroup()
ng2.name = 'fake2'
ng2.name = consts.NETWORKS.management
ng2.id = 2
checker.cluster.nodes[0].interfaces[0].assigned_networks_list = \
[ng1, ng2]
checker.cluster.nodes[0].interfaces[1].assigned_networks_list = \
[ng1, ng2]
self.env.db.flush()
self.assertRaises(errors.NetworkCheckError,
checker.check_untagged_intersection)

View File

@ -18,6 +18,8 @@ import logging
from mock import patch
from nailgun import objects
from nailgun.db.sqlalchemy.models import IPAddr
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
@ -58,8 +60,9 @@ class TestNodeDeletion(BaseIntegrationTest):
self.assertEqual(node_try, None)
management_net = self.db.query(NetworkGroup).\
filter(NetworkGroup.group_id == cluster.default_group).filter_by(
name='management').first()
filter(NetworkGroup.group_id ==
objects.Cluster.get_default_group(cluster).id).\
filter_by(name='management').first()
ipaddrs = self.db.query(IPAddr).\
filter_by(node=node.id).all()

View File

@ -27,6 +27,7 @@ from itertools import ifilter
import mock
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import BaseTestCase
from nailgun.test.base import reverse
from nailgun.errors import errors
@ -34,6 +35,7 @@ from nailgun.errors import errors
from nailgun import consts
from nailgun.db import NoCacheQuery
from nailgun.db.sqlalchemy.models import NodeBondInterface
from nailgun.db.sqlalchemy.models import Task
from nailgun.openstack.common import jsonutils
@ -797,3 +799,46 @@ class TestActionLogObject(BaseIntegrationTest):
self.assertIn('already_present_data', six.iterkeys(al.additional_info))
self.db.rollback()
class TestClusterObject(BaseTestCase):
def setUp(self):
super(TestClusterObject, self).setUp()
self.env.create(
nodes_kwargs=[
{'roles': ['controller']},
{'roles': ['controller']},
{'roles': ['compute']},
{'roles': ['cinder']}])
def test_all_controllers(self):
self.assertEqual(len(objects.Cluster.get_all_controllers(
self.env.clusters[0])), 2)
def test_get_group_id(self):
controllers = objects.Cluster.get_all_controllers(
self.env.clusters[0])
group_id = objects.Cluster.get_controllers_group_id(
self.env.clusters[0])
self.assertEqual(controllers[0].group_id, group_id)
def test_get_nic_interfaces_for_all_nodes(self):
nodes = self.env.nodes
interfaces = []
for node in nodes:
for inf in node.nic_interfaces:
interfaces.append(inf)
nic_interfaces = objects.Cluster.get_nic_interfaces_for_all_nodes(
self.env.clusters[0])
self.assertEqual(len(nic_interfaces), len(interfaces))
def test_get_bond_interfaces_for_all_nodes(self):
node = self.env.nodes[0]
node.bond_interfaces.append(
NodeBondInterface(name='ovs-bond0',
slaves=node.nic_interfaces))
self.db.flush()
bond_interfaces = objects.Cluster.get_bond_interfaces_for_all_nodes(
self.env.clusters[0])
self.assertEqual(len(bond_interfaces), 1)

View File

@ -284,6 +284,7 @@ class TestCheckBeforeDeploymentTask(BaseTestCase):
# check there's exception with one non-controller node
self.node.roles = ['compute']
self.env.db.flush()
self.assertRaises(
errors.NotEnoughControllers,
CheckBeforeDeploymentTask._check_controllers_count,