diff --git a/etc/neutron.conf b/etc/neutron.conf index eb25c750350..1d0281e11af 100644 --- a/etc/neutron.conf +++ b/etc/neutron.conf @@ -167,6 +167,23 @@ lock_path = $state_path/lock # Driver to use for scheduling a loadbalancer pool to an lbaas agent # loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler +# (StrOpt) Representing the resource type whose load is being reported by +# the agent. +# This can be 'networks','subnets' or 'ports'. When specified (Default is networks), +# the server will extract particular load sent as part of its agent configuration object +# from the agent report state, which is the number of resources being consumed, at +# every report_interval. +# dhcp_load_type can be used in combination with network_scheduler_driver = +# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler +# When the network_scheduler_driver is WeightScheduler, dhcp_load_type can +# be configured to represent the choice for the resource being balanced. +# Example: dhcp_load_type = networks +# Values: +# networks - number of networks hosted on the agent +# subnets - number of subnets associated with the networks hosted on the agent +# ports - number of ports associated with the networks hosted on the agent +# dhcp_load_type = networks + # Allow auto scheduling networks to DHCP agent. It will schedule non-hosted # networks to first DHCP agent which sends get_active_networks message to # neutron server diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py index 7596fef9d83..9b01b746376 100644 --- a/neutron/db/agents_db.py +++ b/neutron/db/agents_db.py @@ -33,11 +33,31 @@ from neutron.i18n import _LW from neutron import manager LOG = logging.getLogger(__name__) -cfg.CONF.register_opt( + +AGENT_OPTS = [ cfg.IntOpt('agent_down_time', default=75, help=_("Seconds to regard the agent is down; should be at " "least twice report_interval, to be sure the " - "agent is down for good."))) + "agent is down for good.")), + cfg.StrOpt('dhcp_load_type', default='networks', + choices=['networks', 'subnets', 'ports'], + help=_('Representing the resource type whose load is being ' + 'reported by the agent. This can be "networks", ' + '"subnets" or "ports". ' + 'When specified (Default is networks), the server will ' + 'extract particular load sent as part of its agent ' + 'configuration object from the agent report state, ' + 'which is the number of resources being consumed, at ' + 'every report_interval.' + 'dhcp_load_type can be used in combination with ' + 'network_scheduler_driver = ' + 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler ' + 'When the network_scheduler_driver is WeightScheduler, ' + 'dhcp_load_type can be configured to represent the ' + 'choice for the resource being balanced. ' + 'Example: dhcp_load_type=networks')), +] +cfg.CONF.register_opts(AGENT_OPTS) class Agent(model_base.BASEV2, models_v2.HasId): @@ -68,6 +88,8 @@ class Agent(model_base.BASEV2, models_v2.HasId): description = sa.Column(sa.String(255)) # configurations: a json dict string, I think 4095 is enough configurations = sa.Column(sa.String(4095), nullable=False) + # load - number of resources hosted by the agent + load = sa.Column(sa.Integer, default=0, nullable=False) @property def is_active(self): @@ -117,6 +139,16 @@ class AgentDbMixin(ext_agent.AgentPluginBase): conf = {} return conf + def _get_agent_load(self, agent): + configs = agent.get('configurations', {}) + load_type = None + load = 0 + if(agent['agent_type'] == constants.AGENT_TYPE_DHCP): + load_type = cfg.CONF.dhcp_load_type + if load_type: + load = int(configs.get(load_type, 0)) + return load + def _make_agent_dict(self, agent, fields=None): attr = ext_agent.RESOURCE_ATTRIBUTE_MAP.get( ext_agent.RESOURCE_NAME + 's') @@ -178,6 +210,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase): configurations_dict = agent.get('configurations', {}) res['configurations'] = jsonutils.dumps(configurations_dict) + res['load'] = self._get_agent_load(agent) current_time = timeutils.utcnow() try: agent_db = self._get_agent_by_type_and_host( diff --git a/neutron/db/migration/alembic_migrations/versions/1955efc66455_weight_scheduler.py b/neutron/db/migration/alembic_migrations/versions/1955efc66455_weight_scheduler.py new file mode 100644 index 00000000000..69a0f334541 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/1955efc66455_weight_scheduler.py @@ -0,0 +1,39 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""weight_scheduler + +Revision ID: 1955efc66455 +Revises: 35a0f3365720 +Create Date: 2015-03-12 22:11:37.607390 + +""" + +# revision identifiers, used by Alembic. +revision = '1955efc66455' +down_revision = '35a0f3365720' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.add_column('agents', + sa.Column('load', sa.Integer(), + default=0, nullable=False)) + + +def downgrade(): + op.drop_column('agents', 'load') diff --git a/neutron/db/migration/alembic_migrations/versions/HEAD b/neutron/db/migration/alembic_migrations/versions/HEAD index c535b02c38e..750594a58cd 100644 --- a/neutron/db/migration/alembic_migrations/versions/HEAD +++ b/neutron/db/migration/alembic_migrations/versions/HEAD @@ -1 +1 @@ -35a0f3365720 +1955efc66455 diff --git a/neutron/scheduler/base_resource_filter.py b/neutron/scheduler/base_resource_filter.py new file mode 100644 index 00000000000..a2b1729123c --- /dev/null +++ b/neutron/scheduler/base_resource_filter.py @@ -0,0 +1,40 @@ +# Copyright (c) 2015 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + + +class BaseResourceFilter(object): + """Encapsulate logic that is specific to the resource type.""" + @abc.abstractmethod + def filter_agents(self, plugin, context, resource): + """Return the agents that can host the resource.""" + + def bind(self, context, agents, resource_id): + """Bind the resource to the agents.""" + with context.session.begin(subtransactions=True): + res = {} + for agent in agents: + # Load is being incremented here to reflect latest agent load + # even within the agent report interval. This will be very + # much necessary when bulk resource creation happens within a + # agent report interval time. + # NOTE: The resource being bound might or might not be of the + # same type which is accounted for the load. It isn't a + # problem because "+ 1" here does not meant to predict + # precisely what the load of the agent will be. The value will + # be corrected by the agent on the next report interval. + res['load'] = agent.load + 1 + agent.update(res) diff --git a/neutron/scheduler/base_scheduler.py b/neutron/scheduler/base_scheduler.py new file mode 100644 index 00000000000..9ed9b806ff1 --- /dev/null +++ b/neutron/scheduler/base_scheduler.py @@ -0,0 +1,77 @@ +# Copyright (c) 2015 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +from operator import attrgetter +import random + +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +class BaseScheduler(object): + """The base scheduler (agnostic to resource type). + Child classes of BaseScheduler must define the + self.resource_filter to filter agents of + particular type. + """ + resource_filter = None + + @abc.abstractmethod + def select(self, plugin, context, resource_hostable_agents, + num_agents_needed): + """Return a subset of agents based on the specific scheduling logic.""" + + def schedule(self, plugin, context, resource): + """Select and bind agents to a given resource.""" + if not self.resource_filter: + return + # filter the agents that can host the resource + filtered_agents_dict = self.resource_filter.filter_agents( + plugin, context, resource) + num_agents = filtered_agents_dict['n_agents'] + hostable_agents = filtered_agents_dict['hostable_agents'] + chosen_agents = self.select(plugin, context, hostable_agents, + num_agents) + # bind the resource to the agents + self.resource_filter.bind(context, chosen_agents, resource['id']) + return chosen_agents + + +class BaseChanceScheduler(BaseScheduler): + """Choose agents randomly.""" + + def __init__(self, resource_filter): + self.resource_filter = resource_filter + + def select(self, plugin, context, resource_hostable_agents, + num_agents_needed): + chosen_agents = random.sample(resource_hostable_agents, + num_agents_needed) + return chosen_agents + + +class BaseWeightScheduler(BaseScheduler): + """Choose agents based on load.""" + + def __init__(self, resource_filter): + self.resource_filter = resource_filter + + def select(self, plugin, context, resource_hostable_agents, + num_agents_needed): + chosen_agents = sorted(resource_hostable_agents, + key=attrgetter('load'))[0:num_agents_needed] + return chosen_agents diff --git a/neutron/scheduler/dhcp_agent_scheduler.py b/neutron/scheduler/dhcp_agent_scheduler.py index d950864d1b7..73840505d13 100644 --- a/neutron/scheduler/dhcp_agent_scheduler.py +++ b/neutron/scheduler/dhcp_agent_scheduler.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import random from oslo_config import cfg from oslo_db import exception as db_exc @@ -24,80 +23,17 @@ from neutron.common import constants from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.i18n import _LI, _LW - +from neutron.scheduler import base_resource_filter +from neutron.scheduler import base_scheduler LOG = logging.getLogger(__name__) -class ChanceScheduler(object): - """Allocate a DHCP agent for a network in a random way. - More sophisticated scheduler (similar to filter scheduler in nova?) - can be introduced later. - """ - - def _schedule_bind_network(self, context, agents, network_id): - for agent in agents: - context.session.begin(subtransactions=True) - # saving agent_id to use it after rollback to avoid - # DetachedInstanceError - agent_id = agent.id - binding = agentschedulers_db.NetworkDhcpAgentBinding() - binding.dhcp_agent_id = agent_id - binding.network_id = network_id - try: - context.session.add(binding) - # try to actually write the changes and catch integrity - # DBDuplicateEntry - context.session.commit() - except db_exc.DBDuplicateEntry: - # it's totally ok, someone just did our job! - context.session.rollback() - LOG.info(_LI('Agent %s already present'), agent_id) - LOG.debug('Network %(network_id)s is scheduled to be ' - 'hosted by DHCP agent %(agent_id)s', - {'network_id': network_id, - 'agent_id': agent_id}) - - def schedule(self, plugin, context, network): - """Schedule the network to active DHCP agent(s). - - A list of scheduled agents is returned. - """ - agents_per_network = cfg.CONF.dhcp_agents_per_network - - #TODO(gongysh) don't schedule the networks with only - # subnets whose enable_dhcp is false - with context.session.begin(subtransactions=True): - dhcp_agents = plugin.get_dhcp_agents_hosting_networks( - context, [network['id']], active=True) - if len(dhcp_agents) >= agents_per_network: - LOG.debug('Network %s is hosted already', - network['id']) - return - n_agents = agents_per_network - len(dhcp_agents) - enabled_dhcp_agents = plugin.get_agents_db( - context, filters={ - 'agent_type': [constants.AGENT_TYPE_DHCP], - 'admin_state_up': [True]}) - if not enabled_dhcp_agents: - LOG.warn(_LW('No more DHCP agents')) - return - active_dhcp_agents = [ - agent for agent in set(enabled_dhcp_agents) - if agent not in dhcp_agents and plugin.is_eligible_agent( - context, True, agent) - ] - if not active_dhcp_agents: - LOG.warn(_LW('No more DHCP agents')) - return - n_agents = min(len(active_dhcp_agents), n_agents) - chosen_agents = random.sample(active_dhcp_agents, n_agents) - self._schedule_bind_network(context, chosen_agents, network['id']) - return chosen_agents +class AutoScheduler(object): def auto_schedule_networks(self, plugin, context, host): - """Schedule non-hosted networks to the DHCP agent on - the specified host. + """Schedule non-hosted networks to the DHCP agent on the specified + host. """ agents_per_network = cfg.CONF.dhcp_agents_per_network # a list of (agent, net_ids) tuples @@ -132,5 +68,108 @@ class ChanceScheduler(object): # do it outside transaction so particular scheduling results don't # make other to fail for agent, net_id in bindings_to_add: - self._schedule_bind_network(context, [agent], net_id) + self.resource_filter.bind(context, [agent], net_id) return True + + +class ChanceScheduler(base_scheduler.BaseChanceScheduler, AutoScheduler): + + def __init__(self): + super(ChanceScheduler, self).__init__(DhcpFilter()) + + +class WeightScheduler(base_scheduler.BaseWeightScheduler, AutoScheduler): + + def __init__(self): + super(WeightScheduler, self).__init__(DhcpFilter()) + + +class DhcpFilter(base_resource_filter.BaseResourceFilter): + + def bind(self, context, agents, network_id): + """Bind the network to the agents.""" + # customize the bind logic + bound_agents = agents[:] + for agent in agents: + context.session.begin(subtransactions=True) + # saving agent_id to use it after rollback to avoid + # DetachedInstanceError + agent_id = agent.id + binding = agentschedulers_db.NetworkDhcpAgentBinding() + binding.dhcp_agent_id = agent_id + binding.network_id = network_id + try: + context.session.add(binding) + # try to actually write the changes and catch integrity + # DBDuplicateEntry + context.session.commit() + except db_exc.DBDuplicateEntry: + # it's totally ok, someone just did our job! + context.session.rollback() + bound_agents.remove(agent) + LOG.info(_LI('Agent %s already present'), agent_id) + LOG.debug('Network %(network_id)s is scheduled to be ' + 'hosted by DHCP agent %(agent_id)s', + {'network_id': network_id, + 'agent_id': agent_id}) + super(DhcpFilter, self).bind(context, bound_agents, network_id) + + def filter_agents(self, plugin, context, network): + """Return the agents that can host the network.""" + agents_dict = self._get_network_hostable_dhcp_agents( + plugin, context, network) + if not agents_dict['hostable_agents'] or agents_dict['n_agents'] <= 0: + return {'n_agents': 0, 'hostable_agents': []} + return agents_dict + + def _get_dhcp_agents_hosting_network(self, plugin, context, network): + """Return dhcp agents hosting the given network or None if a given + network is already hosted by enough number of agents. + """ + agents_per_network = cfg.CONF.dhcp_agents_per_network + #TODO(gongysh) don't schedule the networks with only + # subnets whose enable_dhcp is false + with context.session.begin(subtransactions=True): + network_hosted_agents = plugin.get_dhcp_agents_hosting_networks( + context, [network['id']], active=True) + if len(network_hosted_agents) >= agents_per_network: + LOG.debug('Network %s is already hosted by enough agents.', + network['id']) + return + return network_hosted_agents + + def _get_active_agents(self, plugin, context): + """Return a list of active dhcp agents.""" + with context.session.begin(subtransactions=True): + active_dhcp_agents = plugin.get_agents_db( + context, filters={ + 'agent_type': [constants.AGENT_TYPE_DHCP], + 'admin_state_up': [True]}) + if not active_dhcp_agents: + LOG.warn(_LW('No more DHCP agents')) + return [] + return active_dhcp_agents + + def _get_network_hostable_dhcp_agents(self, plugin, context, network): + """Return number of agents which will actually host the given network + and a list of dhcp agents which can host the given network + """ + hosted_agents = self._get_dhcp_agents_hosting_network(plugin, + context, network) + if hosted_agents is None: + return {'n_agents': 0, 'hostable_agents': []} + n_agents = cfg.CONF.dhcp_agents_per_network - len(hosted_agents) + active_dhcp_agents = self._get_active_agents(plugin, context) + if not active_dhcp_agents: + return {'n_agents': 0, 'hostable_agents': []} + hostable_dhcp_agents = [ + agent for agent in set(active_dhcp_agents) + if agent not in hosted_agents and plugin.is_eligible_agent( + context, True, agent) + ] + + if not hostable_dhcp_agents: + return {'n_agents': 0, 'hostable_agents': []} + n_agents = min(len(hostable_dhcp_agents), n_agents) + return {'n_agents': n_agents, 'hostable_agents': + hostable_dhcp_agents} diff --git a/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py b/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py index 7322aff02cc..9646542043e 100644 --- a/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py +++ b/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py @@ -22,16 +22,14 @@ from neutron.db import agentschedulers_db from neutron.db import common_db_mixin from neutron.scheduler import dhcp_agent_scheduler from neutron.tests.unit import test_dhcp_scheduler as test_dhcp_sch +from operator import attrgetter # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios -class TestScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, - agentschedulers_db.DhcpAgentSchedulerDbMixin, - agents_db.AgentDbMixin, - common_db_mixin.CommonDbMixin): - """Test various scenarios for ChanceScheduler.schedule. +class BaseTestScheduleNetwork(object): + """Base class which defines scenarios for schedulers. agent_count Number of dhcp agents (also number of hosts). @@ -96,6 +94,14 @@ class TestScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, expected_scheduled_agent_count=1)), ] + +class TestChanceScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + agents_db.AgentDbMixin, + common_db_mixin.CommonDbMixin, + BaseTestScheduleNetwork): + """Test various scenarios for ChanceScheduler.schedule.""" + def test_schedule_network(self): self.config(dhcp_agents_per_network=self.max_agents_per_network) scheduler = dhcp_agent_scheduler.ChanceScheduler() @@ -111,9 +117,8 @@ class TestScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, if self.scheduled_agent_count: # schedule the network schedule_agents = active_agents[:self.scheduled_agent_count] - scheduler._schedule_bind_network(self.ctx, schedule_agents, - self.network_id) - + scheduler.resource_filter.bind(self.ctx, + schedule_agents, self.network_id) actual_scheduled_agents = scheduler.schedule(self, self.ctx, self.network) if self.expected_scheduled_agent_count: @@ -125,7 +130,53 @@ class TestScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, len(actual_scheduled_agents), len(hosted_agents['agents'])) else: - self.assertIsNone(actual_scheduled_agents) + self.assertEqual([], actual_scheduled_agents) + + +class TestWeightScheduleNetwork(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, + agentschedulers_db.DhcpAgentSchedulerDbMixin, + agents_db.AgentDbMixin, + common_db_mixin.CommonDbMixin, + BaseTestScheduleNetwork): + """Test various scenarios for WeightScheduler.schedule.""" + + def test_weight_schedule_network(self): + self.config(dhcp_agents_per_network=self.max_agents_per_network) + scheduler = dhcp_agent_scheduler.WeightScheduler() + + # create dhcp agents + hosts = ['host-%s' % i for i in range(self.agent_count)] + dhcp_agents = self._create_and_set_agents_down( + hosts, down_agent_count=self.down_agent_count) + + active_agents = dhcp_agents[self.down_agent_count:] + + unscheduled_active_agents = list(active_agents) + # schedule some agents before calling schedule + if self.scheduled_agent_count: + # schedule the network + schedule_agents = active_agents[:self.scheduled_agent_count] + scheduler.resource_filter.bind(self.ctx, + schedule_agents, self.network_id) + for agent in schedule_agents: + unscheduled_active_agents.remove(agent) + actual_scheduled_agents = scheduler.schedule(self, self.ctx, + self.network) + if self.expected_scheduled_agent_count: + sorted_unscheduled_active_agents = sorted( + unscheduled_active_agents, + key=attrgetter('load'))[0:self.expected_scheduled_agent_count] + self.assertItemsEqual(actual_scheduled_agents, + sorted_unscheduled_active_agents) + self.assertEqual(self.expected_scheduled_agent_count, + len(actual_scheduled_agents)) + hosted_agents = self.list_dhcp_agents_hosting_network( + self.ctx, self.network_id) + self.assertEqual(self.scheduled_agent_count + + len(actual_scheduled_agents), + len(hosted_agents['agents'])) + else: + self.assertEqual([], actual_scheduled_agents) class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, @@ -323,9 +374,9 @@ class TestAutoSchedule(test_dhcp_sch.TestDhcpSchedulerBaseTestCase, agent_index = self._extract_index(agent) for net in networks: net_index = self._extract_index(net) - scheduler._schedule_bind_network(self.ctx, - [dhcp_agents[agent_index]], - self._networks[net_index]) + scheduler.resource_filter.bind(self.ctx, + [dhcp_agents[agent_index]], + self._networks[net_index]) retval = scheduler.auto_schedule_networks(self, self.ctx, hosts[host_index]) diff --git a/neutron/tests/unit/test_dhcp_scheduler.py b/neutron/tests/unit/test_dhcp_scheduler.py index 3865ee257b9..a50173356bc 100644 --- a/neutron/tests/unit/test_dhcp_scheduler.py +++ b/neutron/tests/unit/test_dhcp_scheduler.py @@ -17,6 +17,8 @@ import contextlib import datetime import mock +from oslo_config import cfg +from oslo_utils import importutils from oslo_utils import timeutils import testscenarios @@ -80,7 +82,7 @@ class TestDhcpSchedulerBaseTestCase(testlib_api.SqlTestCase): def _test_schedule_bind_network(self, agents, network_id): scheduler = dhcp_agent_scheduler.ChanceScheduler() - scheduler._schedule_bind_network(self.ctx, agents, network_id) + scheduler.resource_filter.bind(self.ctx, agents, network_id) results = self.ctx.session.query( sched_db.NetworkDhcpAgentBinding).filter_by( network_id=network_id).all() @@ -265,14 +267,106 @@ class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, self.assertIn('foo3', res_ids) self.assertIn('foo4', res_ids) - def test_remove_networks_from_down_agents_catches_all(self): - with contextlib.nested( - mock.patch.object( - self, 'remove_network_from_dhcp_agent', - side_effect=Exception("Unexpected exception!")), - mock.patch.object( - self, '_filter_bindings', - return_value=[sched_db.NetworkDhcpAgentBinding( - network_id='foo', dhcp_agent_id='bar')]) - ): - self.remove_networks_from_down_agents() + +class DHCPAgentWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase): + """Unit test scenarios for WeightScheduler.schedule.""" + + hostc = { + 'binary': 'neutron-dhcp-agent', + 'host': 'host-c', + 'topic': 'DHCP_AGENT', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'networks': 0, + 'use_namespaces': True, + }, + 'agent_type': constants.AGENT_TYPE_DHCP} + + hostd = { + 'binary': 'neutron-dhcp-agent', + 'host': 'host-d', + 'topic': 'DHCP_AGENT', + 'configurations': {'dhcp_driver': 'dhcp_driver', + 'networks': 1, + 'use_namespaces': True, + }, + 'agent_type': constants.AGENT_TYPE_DHCP} + + def setUp(self): + super(DHCPAgentWeightSchedulerTestCase, self).setUp() + DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin' + cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS) + cfg.CONF.set_override("network_scheduler_driver", + 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler') + self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.' + 'Ml2Plugin') + self.plugin.network_scheduler = importutils.import_object( + 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler' + ) + cfg.CONF.set_override('dhcp_agents_per_network', 1) + cfg.CONF.set_override("dhcp_load_type", "networks") + + def test_scheduler_one_agents_per_network(self): + cfg.CONF.set_override('dhcp_agents_per_network', 1) + self._save_networks(['1111']) + agents = self._get_agents(['host-c', 'host-d']) + self._save_agents(agents) + self.plugin.network_scheduler.schedule(self.plugin, self.ctx, + {'id': '1111'}) + agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, + ['1111']) + self.assertEqual(1, len(agents)) + + def test_scheduler_two_agents_per_network(self): + cfg.CONF.set_override('dhcp_agents_per_network', 2) + self._save_networks(['1111']) + agents = self._get_agents(['host-c', 'host-d']) + self._save_agents(agents) + self.plugin.network_scheduler.schedule(self.plugin, self.ctx, + {'id': '1111'}) + agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, + ['1111']) + self.assertEqual(2, len(agents)) + + def test_scheduler_no_active_agents(self): + self._save_networks(['1111']) + self.plugin.network_scheduler.schedule(self.plugin, self.ctx, + {'id': '1111'}) + agents = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, + ['1111']) + self.assertEqual(0, len(agents)) + + def test_scheduler_equal_distribution(self): + cfg.CONF.set_override('dhcp_agents_per_network', 1) + self._save_networks(['1111', '2222', '3333']) + agents = self._get_agents(['host-c', 'host-d']) + self._save_agents(agents) + callback = agents_db.AgentExtRpcCallback() + callback.report_state(self.ctx, + agent_state={'agent_state': self.hostc}, + time=timeutils.strtime()) + callback.report_state(self.ctx, + agent_state={'agent_state': self.hostd}, + time=timeutils.strtime()) + self.plugin.network_scheduler.schedule(self.plugin, self.ctx, + {'id': '1111'}) + agent1 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, + ['1111']) + self.hostd['configurations']['networks'] = 2 + callback.report_state(self.ctx, + agent_state={'agent_state': self.hostd}, + time=timeutils.strtime()) + self.plugin.network_scheduler.schedule(self.plugin, self.ctx, + {'id': '2222'}) + agent2 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, + ['2222']) + self.hostc['configurations']['networks'] = 4 + callback.report_state(self.ctx, + agent_state={'agent_state': self.hostc}, + time=timeutils.strtime()) + self.plugin.network_scheduler.schedule(self.plugin, self.ctx, + {'id': '3333'}) + agent3 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx, + ['3333']) + self.assertEqual('host-c', agent1[0]['host']) + self.assertEqual('host-c', agent2[0]['host']) + self.assertEqual('host-d', agent3[0]['host'])