Check for ha port to become ACTIVE

After reboot(restart of l3 and l2 agents) of the node routers
can be processed by l3 agent before openvswitch agent sets up
appropriate ha ports. This change add notification for l3 agent
that ha port becomes ACTIVE and keepalived can be enabled.

note: Release notes added to specify l3 agent dependency on neutron
server.

Closes-bug: #1597461

Co-Authored-By: venkata anil <anilvenkata@redhat.com>

(cherry picked from commit 25f5912cf8)

Conflicts:
        neutron/db/l3_hascheduler_db.py
        neutron/services/l3_router/l3_router_plugin.py
        neutron/tests/unit/plugins/ml2/test_plugin.py
        neutron/tests/functional/agent/l3/test_ha_router.py
        releasenotes/notes/l3ha-agent-server-dependency-1fcb775328ac4502.yaml

Change-Id: Iedad1ccae45005efaaa74d5571df04197757d07a
(cherry picked from commit 4ad841c4cf)

split out l3-ha specific test from TestMl2PortsV2

split out test_update_port_status_notify_port_event_after_update
from ml2.test_plugin.TestMl2PortsV2 into TestMl2PortsV2WithL3

The change set of 25f5912cf8
change id of Iedad1ccae45005efaaa74d5571df04197757d07a
introduced a test,
test_update_port_status_notify_port_event_after_update, that is valid
only when l3 plugin support l3-ha. Such assumption isn't always true
depending on actual ml2 driver.
Since test cases in ml2.test_plugin is used as a common base for
multiple drivers,
test_update_port_status_notify_port_event_after_update, may or may not
pass. So split out tests with very specific assumption into a new
dedicated testcase so that each driver can safely reuse tests in
tests/unit/plugin/ml2 based on their characteristics.

Conflicts:
        neutron/tests/unit/plugins/ml2/test_plugin.py

Closes-Bug: #1618601
Change-Id: Ie81dde976649111d029a7d107c99960aded64915
(cherry picked from commit 03c412ff01)

Change-Id: Iedad1ccae45005efaaa74d5571df04197757d07a
(cherry picked from commit 4ad841c4cf)
This commit is contained in:
venkata anil 2016-10-12 10:57:46 +00:00
parent 369faa2873
commit 5860fb21e9
6 changed files with 83 additions and 1 deletions

View File

@ -385,7 +385,9 @@ class HaRouter(router.RouterInfo):
def process(self, agent):
super(HaRouter, self).process(agent)
if self.ha_port:
self.ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
if (self.ha_port and
self.ha_port['status'] == n_consts.PORT_STATUS_ACTIVE):
self.enable_keepalived()
@common_utils.synchronized('enable_radvd')

View File

@ -12,13 +12,20 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from sqlalchemy import func
from sqlalchemy import sql
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db as l3_sch_db
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.common import constants as service_constants
class L3_HA_scheduler_db_mixin(l3_sch_db.AZL3AgentSchedulerDbMixin):
@ -81,3 +88,25 @@ class L3_HA_scheduler_db_mixin(l3_sch_db.AZL3AgentSchedulerDbMixin):
bindings = [(binding.l3_agent, None) for binding in bindings]
return self._get_agents_dict_for_router(bindings)
def _notify_l3_agent_ha_port_update(resource, event, trigger, **kwargs):
port_db = kwargs.get('port')
context = kwargs.get('context')
core_plugin = manager.NeutronManager.get_plugin()
new_port = core_plugin._make_port_dict(port_db)
host = new_port.get(portbindings.HOST_ID)
if new_port and host:
new_device_owner = new_port.get('device_owner', '')
if (new_device_owner == constants.DEVICE_OWNER_ROUTER_HA_INTF and
new_port['status'] == constants.PORT_STATUS_ACTIVE):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
l3plugin.l3_rpc_notifier.routers_updated_on_host(
context, [new_port['device_id']], host)
def subscribe():
registry.subscribe(
_notify_l3_agent_ha_port_update, resources.PORT, events.AFTER_UPDATE)

View File

@ -30,6 +30,7 @@ from neutron.db import l3_dvr_ha_scheduler_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron.plugins.common import constants
from neutron.quota import resource_registry
from neutron.services import service_base
@ -65,6 +66,8 @@ class L3RouterPlugin(service_base.ServicePluginBase,
super(L3RouterPlugin, self).__init__()
if 'dvr' in self.supported_extension_aliases:
l3_dvrscheduler_db.subscribe()
if 'l3-ha' in self.supported_extension_aliases:
l3_hascheduler_db.subscribe()
l3_db.subscribe()
self.start_rpc_listeners()

View File

@ -256,6 +256,18 @@ class L3HATestCase(framework.L3AgentTestFramework):
# call the configure_fip_addresses directly here
router.configure_fip_addresses(interface_name)
def test_ha_port_status_update(self):
router_info = self.generate_router_info(enable_ha=True)
router_info[l3_constants.HA_INTERFACE_KEY]['status'] = (
l3_constants.PORT_STATUS_DOWN)
router1 = self.manage_router(self.agent, router_info)
utils.wait_until_true(lambda: router1.ha_state == 'backup')
router1.router[l3_constants.HA_INTERFACE_KEY]['status'] = (
l3_constants.PORT_STATUS_ACTIVE)
self.agent._process_updated_router(router1.router)
utils.wait_until_true(lambda: router1.ha_state == 'master')
class L3HATestFailover(framework.L3AgentTestFramework):

View File

@ -32,6 +32,7 @@ from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants
from neutron.common import exceptions as exc
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron.db import agents_db
@ -54,6 +55,8 @@ from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers import type_vlan
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron.plugins.ml2 import rpc
from neutron.services.l3_router import l3_router_plugin
from neutron.services.qos import qos_consts
from neutron.tests import base
from neutron.tests.unit import _test_extension_portbindings as test_bindings
@ -76,6 +79,7 @@ PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
HOST = 'fake_host'
TEST_ROUTER_ID = 'router_id'
# TODO(marun) - Move to somewhere common for reuse
@ -873,6 +877,32 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
self.assertTrue(listener.except_raised)
class TestMl2PortsV2WithL3(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
"""For testing methods that require the L3 service plugin."""
def test_update_port_status_notify_port_event_after_update(self):
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
notifier = rpc.AgentNotifierApi(topics.AGENT)
self.plugin_rpc = rpc.RpcCallbacks(notifier, plugin.type_manager)
# enable subscription for events
l3_router_plugin.L3RouterPlugin()
l3plugin = manager.NeutronManager.get_service_plugins().get(
p_const.L3_ROUTER_NAT)
host_arg = {portbindings.HOST_ID: HOST}
with mock.patch.object(l3plugin.l3_rpc_notifier,
'routers_updated_on_host') as mock_updated:
with self.port(device_owner=constants.DEVICE_OWNER_ROUTER_HA_INTF,
device_id=TEST_ROUTER_ID,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
self.plugin_rpc.update_device_up(
ctx, agent_id="theAgentId", device=port['port']['id'],
host=HOST)
mock_updated.assert_called_once_with(
mock.ANY, [TEST_ROUTER_ID], HOST)
class TestMl2PluginOnly(Ml2PluginV2TestCase):
"""For testing methods that don't call drivers"""

View File

@ -0,0 +1,6 @@
---
upgrade:
- Server notifies L3 HA agents when HA router interface
port status becomes active. Then L3 HA agents spawn
keepalived process. So, server has to be restarted
before the L3 agents during upgrade.