Move host agents

The apic host agent and metadata namespace proxy agent were
moved to the python-opflex-agent repository in order to improve
packaging.

Change-Id: Ib4efe99e060ad15a39829cea239aba8af212850e
(cherry picked from commit f6076d8492)
This commit is contained in:
Thomas Bachman 2017-09-07 15:11:14 +00:00 committed by Thomas Bachman
parent 58542a0a9d
commit a29a3725af
13 changed files with 1 additions and 728 deletions

View File

@ -1,29 +0,0 @@
# Copyright (c) 2017 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from neutron.agent.linux import dhcp
class ApicDnsmasq(dhcp.Dnsmasq):
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated
A subnet is always considered isolated for APIC.
"""
isolated_subnets = collections.defaultdict(lambda: True)
return isolated_subnets

View File

@ -1,229 +0,0 @@
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_config import cfg
from oslo_log import log as logging
import six.moves.urllib.parse as urlparse
import webob
from neutron._i18n import _LE
from neutron.agent.linux import daemon
from neutron.agent.linux import utils as agent_utils
from neutron.common import config
from neutron.common import utils
from neutron import wsgi
from oslo_serialization import jsonutils
LOG = logging.getLogger(__name__)
class NetworkMetadataProxyHandler(object):
"""Proxy AF_INET metadata request through Unix Domain socket.
The Unix domain socket allows the proxy access resource that are not
accessible within the isolated tenant context.
"""
def __init__(self, network_id=None, router_id=None, domain_id=None):
self.network_id = network_id
self.router_id = router_id
self.domain_id = domain_id
if network_id is None and router_id is None and domain_id is None:
msg = _('network_id, router_id, and domain_id are None. '
'One of them must be provided.')
raise ValueError(msg)
@webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req):
LOG.debug("Request: %s", req)
try:
return self._proxy_request(req.remote_addr,
req.method,
req.path_info,
req.query_string,
req.body)
except Exception:
LOG.exception(_LE("Unexpected error."))
msg = _('An unknown error has occurred. '
'Please try your request again.')
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
def get_network_id(self, domain_id, remote_address):
filedir = '/var/lib/neutron/opflex_agent'
filename = 'instance_networks.state'
fqfn = '%s/%s' % (filedir, filename)
nets = None
try:
with open(fqfn, "r") as f:
nets = jsonutils.load(f)
except Exception as e:
LOG.warning("Exception in reading file: %s" % str(e))
if nets:
if domain_id in nets:
if remote_address in nets[domain_id]:
return nets[domain_id][remote_address]
LOG.warning("IP address not found: domain=%s, addr=%s" % (
domain_id, remote_address))
return None
def _proxy_request(self, remote_address, method, path_info,
query_string, body):
headers = {
'X-Forwarded-For': remote_address,
}
if self.domain_id:
network_id = self.get_network_id(self.domain_id, remote_address)
if network_id:
headers['X-Neutron-Network-ID'] = network_id
else:
return webob.exc.HTTPNotFound()
elif self.router_id:
headers['X-Neutron-Router-ID'] = self.router_id
else:
headers['X-Neutron-Network-ID'] = self.network_id
url = urlparse.urlunsplit((
'http',
'169.254.169.254', # a dummy value to make the request proper
path_info,
query_string,
''))
h = httplib2.Http()
resp, content = h.request(
url,
method=method,
headers=headers,
body=body,
connection_type=agent_utils.UnixDomainHTTPConnection)
if resp.status == 200:
LOG.debug(resp)
LOG.debug(content)
response = webob.Response()
response.status = resp.status
response.headers['Content-Type'] = resp['content-type']
response.body = content
return response
elif resp.status == 400:
return webob.exc.HTTPBadRequest()
elif resp.status == 404:
return webob.exc.HTTPNotFound()
elif resp.status == 409:
return webob.exc.HTTPConflict()
elif resp.status == 500:
msg = _(
'Remote metadata server experienced an internal server error.'
)
LOG.debug(msg)
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
else:
raise Exception(_('Unexpected response code: %s') % resp.status)
class ProxyDaemon(daemon.Daemon):
def __init__(self, pidfile, port, network_id=None, router_id=None,
domain_id=None,
user=None, group=None, watch_log=True, host="0.0.0.0"):
uuid = domain_id or network_id or router_id
super(ProxyDaemon, self).__init__(pidfile, uuid=uuid, user=user,
group=group, watch_log=watch_log)
self.network_id = network_id
self.router_id = router_id
self.domain_id = domain_id
self.port = port
self.host = host
def run(self):
handler = NetworkMetadataProxyHandler(
self.network_id,
self.router_id,
self.domain_id)
proxy = wsgi.Server('opflex-network-metadata-proxy')
proxy.start(handler, self.port, host=self.host)
# Drop privileges after port bind
super(ProxyDaemon, self).run()
proxy.wait()
def main():
opts = [
cfg.StrOpt('network_id',
help=_('Network that will have instance metadata '
'proxied.')),
cfg.StrOpt('router_id',
help=_('Router that will have connected instances\' '
'metadata proxied.')),
cfg.StrOpt('domain_id',
help=_('L3 domain that will have connected instances\' '
'metadata proxied.')),
cfg.StrOpt('pid_file',
help=_('Location of pid file of this process.')),
cfg.BoolOpt('daemonize',
default=False,
help=_('Run as daemon.')),
cfg.StrOpt('metadata_host',
default="0.0.0.0",
help=_("IP address to listen for metadata server "
"requests.")),
cfg.IntOpt('metadata_port',
default=9697,
help=_("TCP Port to listen for metadata server "
"requests.")),
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location of Metadata Proxy UNIX domain '
'socket')),
cfg.StrOpt('metadata_proxy_user',
default=None,
help=_("User (uid or name) running metadata proxy after "
"its initialization")),
cfg.StrOpt('metadata_proxy_group',
default=None,
help=_("Group (gid or name) running metadata proxy after "
"its initialization")),
cfg.BoolOpt('metadata_proxy_watch_log',
default=True,
help=_("Watch file log. Log watch should be disabled when "
"metadata_proxy_user/group has no read/write "
"permissions on metadata proxy log file.")),
]
cfg.CONF.register_cli_opts(opts)
# Don't get the default configuration file
cfg.CONF(project='neutron', default_config_files=[])
config.setup_logging()
utils.log_opt_values(LOG)
proxy = ProxyDaemon(cfg.CONF.pid_file,
cfg.CONF.metadata_port,
network_id=cfg.CONF.network_id,
router_id=cfg.CONF.router_id,
domain_id=cfg.CONF.domain_id,
user=cfg.CONF.metadata_proxy_user,
group=cfg.CONF.metadata_proxy_group,
watch_log=cfg.CONF.metadata_proxy_watch_log,
host=cfg.CONF.metadata_host)
if cfg.CONF.daemonize:
proxy.start()
else:
proxy.run()

View File

@ -1,290 +0,0 @@
# Copyright (c) 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import periodic_task
from oslo_service import service as svc
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import config as common_cfg
from neutron.common import utils as neutron_utils
from neutron import manager
from neutron import service
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice.neutron.agent.topology import rpc as arpc
ACI_CHASSIS_DESCR_FORMAT = 'topology/pod-1/node-(\d+)'
ACI_PORT_DESCR_FORMATS = [
'topology/pod-1/node-(\d+)/sys/conng/path-\[eth(\d+)/(\d+(\/\d+)*)\]',
'topology/pod-1/paths-(\d+)/pathep-\[eth(\d+)/(\d+(\/\d+)*)\]',
]
ACI_PORT_LOCAL_FORMAT = 'Eth(\d+)/(\d+(\/\d+)*)'
ACI_VPCPORT_DESCR_FORMAT = ('topology/pod-1/protpaths-(\d+)-(\d+)/pathep-'
'\[(.*)\]')
AGENT_FORCE_UPDATE_COUNT = 5
BINARY_APIC_HOST_AGENT = 'neutron-cisco-apic-host-agent'
TYPE_APIC_HOST_AGENT = 'cisco-apic-host-agent'
VPCMODULE_NAME = 'vpc-%s-%s'
LOG = logging.getLogger(__name__)
apic_opts = [
cfg.ListOpt('apic_host_uplink_ports',
default=[],
help=_('The uplink ports to check for ACI connectivity')),
cfg.FloatOpt('apic_agent_poll_interval',
default=60,
help=_('Interval between agent poll for topology (in sec)')),
cfg.FloatOpt('apic_agent_report_interval',
default=60,
help=_('Interval between agent status updates (in sec)')),
]
cfg.CONF.register_opts(apic_opts, "ml2_cisco_apic")
class ApicTopologyAgent(manager.Manager):
def __init__(self, host=None):
if host is None:
host = neutron_utils.get_hostname()
super(ApicTopologyAgent, self).__init__(host=host)
self.conf = cfg.CONF.ml2_cisco_apic
self.count_current = 0
self.count_force_send = AGENT_FORCE_UPDATE_COUNT
self.interfaces = {}
self.lldpcmd = None
self.peers = {}
self.port_desc_re = map(re.compile, ACI_PORT_DESCR_FORMATS)
self.port_local_re = re.compile(ACI_PORT_LOCAL_FORMAT)
self.vpcport_desc_re = re.compile(ACI_VPCPORT_DESCR_FORMAT)
self.chassis_desc_re = re.compile(ACI_CHASSIS_DESCR_FORMAT)
self.service_agent = arpc.ApicTopologyServiceNotifierApi()
self.state = None
self.state_agent = None
self.topic = arpc.TOPIC_APIC_SERVICE
self.uplink_ports = []
self.invalid_peers = []
def init_host(self):
LOG.info(_LI("APIC host agent: agent starting on %s"), self.host)
self.state = {
'binary': BINARY_APIC_HOST_AGENT,
'host': self.host,
'topic': self.topic,
'configurations': {},
'start_flag': True,
'agent_type': TYPE_APIC_HOST_AGENT,
}
self.uplink_ports = []
for inf in self.conf.apic_host_uplink_ports:
if ip_lib.device_exists(inf):
self.uplink_ports.append(inf)
else:
# ignore unknown interfaces
LOG.error(_LE("No such interface (ignored): %s"), inf)
self.lldpcmd = ['lldpctl', '-f', 'keyvalue'] + self.uplink_ports
def after_start(self):
LOG.info(_LI("APIC host agent: started on %s"), self.host)
@periodic_task.periodic_task(
spacing=cfg.CONF.ml2_cisco_apic.apic_agent_poll_interval,
run_immediately=True)
def _check_for_new_peers(self, context):
LOG.debug("APIC host agent: _check_for_new_peers")
if not self.lldpcmd:
return
try:
# Check if we must send update even if there is no change
force_send = False
self.count_current += 1
if self.count_current >= self.count_force_send:
force_send = True
self.count_current = 0
# Check for new peers
new_peers = self._get_peers()
new_peers = self._valid_peers(new_peers)
# Make a copy of current interfaces
curr_peers = {}
for interface in self.peers:
curr_peers[interface] = self.peers[interface]
# Based curr -> new updates, add the new interfaces
self.peers = {}
for interface in new_peers:
peer = new_peers[interface]
self.peers[interface] = peer
if (interface in curr_peers and
curr_peers[interface] != peer):
LOG.debug('reporting peer removal: %s', peer)
self.service_agent.update_link(
context, peer[0], peer[1], None, 0, 0, 0, '')
if (interface not in curr_peers or
curr_peers[interface] != peer or
force_send):
LOG.debug('reporting new peer: %s', peer)
self.service_agent.update_link(context, *peer)
if interface in curr_peers:
curr_peers.pop(interface)
# Any interface still in curr_peers need to be deleted
for peer in curr_peers.values():
LOG.debug('reporting peer removal: %s', peer)
self.service_agent.update_link(
context, peer[0], peer[1], None, 0, 0, 0, '')
except Exception:
LOG.exception(_LE("APIC service agent: exception in LLDP parsing"))
def _get_peers(self):
interfaces = {}
peers = {}
lldpkeys = utils.execute(self.lldpcmd, run_as_root=True)
for line in lldpkeys.splitlines():
if '=' not in line:
continue
fqkey, value = line.split('=', 1)
lldp, interface, key = fqkey.split('.', 2)
if lldp == 'lldp':
if interface not in interfaces:
interfaces[interface] = {}
interfaces[interface][key] = value
for interface in interfaces:
if 'port.descr' in interfaces[interface]:
value = interfaces[interface]['port.descr']
port_desc = value
for regexp in self.port_desc_re:
match = regexp.match(value)
if match:
mac = self._get_mac(interface)
switch, module, port = match.group(1, 2, 3)
peer = (self.host, interface, mac,
switch, module, port, port_desc)
if interface not in peers:
peers[interface] = []
peers[interface].append(peer)
match = self.vpcport_desc_re.match(value)
if match:
mac = self._get_mac(interface)
switch1, switch2, bundle = match.group(1, 2, 3)
switch, module, port = None, None, None
if (bundle is not None and
'chassis.descr' in interfaces[interface]):
value = interfaces[interface]['chassis.descr']
match = self.chassis_desc_re.match(value)
if match:
switch = match.group(1)
if (switch is not None and
'port.local' in interfaces[interface]):
value = interfaces[interface]['port.local']
match = self.port_local_re.match(value)
if match:
module, port = match.group(1, 2)
if module is not None and port is not None:
vpcmodule = VPCMODULE_NAME % (module, port)
peer = (self.host, interface, mac,
switch, vpcmodule, bundle, port_desc)
if interface not in peers:
peers[interface] = []
peers[interface].append(peer)
return peers
def _valid_peers(self, peers):
# Reduce the peers array to one valid peer per interface
# NOTE:
# There is a bug in lldpd daemon that it keeps reporting
# old peers even after their updates have stopped
# we keep track of that report remove them from peers
valid_peers = {}
invalid_peers = []
for interface in peers:
curr_peer = None
for peer in peers[interface]:
if peer in self.invalid_peers or curr_peer:
invalid_peers.append(peer)
else:
curr_peer = peer
if curr_peer is not None:
valid_peers[interface] = curr_peer
self.invalid_peers = invalid_peers
return valid_peers
def _get_mac(self, interface):
if interface in self.interfaces:
return self.interfaces[interface]
try:
mac = ip_lib.IPDevice(interface).link.address
self.interfaces[interface] = mac
return mac
except Exception:
# we can safely ignore it, it is only needed for debugging
LOG.exception(
_LE("APIC service agent: can not get MACaddr for %s"),
interface)
def report_send(self, context):
if not self.state_agent:
return
LOG.debug("APIC host agent: sending report state")
try:
self.state_agent.report_state(context, self.state)
self.state.pop('start_flag', None)
except AttributeError:
# This means the server does not support report_state
# ignore it
return
except Exception:
LOG.exception(_LE("APIC host agent: failed in reporting state"))
def launch(binary, manager, topic=None):
cfg.CONF(project='neutron')
common_cfg.init(sys.argv[1:])
config.setup_logging()
report_period = cfg.CONF.ml2_cisco_apic.apic_agent_report_interval
poll_period = cfg.CONF.ml2_cisco_apic.apic_agent_poll_interval
server = service.Service.create(
binary=binary, manager=manager, topic=topic,
report_interval=report_period, periodic_interval=poll_period)
svc.launch(cfg.CONF, server).wait()
def agent_main():
launch(
BINARY_APIC_HOST_AGENT,
'apic_ml2.neutron.plugins.ml2.drivers.' +
'cisco.apic.apic_topology.ApicTopologyAgent')

View File

@ -1,39 +0,0 @@
# Copyright (c) 2017 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_messaging
from neutron.common import rpc
TOPIC_APIC_SERVICE = 'apic-service'
class ApicTopologyServiceNotifierApi(object):
def __init__(self):
target = oslo_messaging.Target(topic=TOPIC_APIC_SERVICE, version='1.2')
self.client = rpc.get_client(target)
def update_link(self, context, host, interface, mac, switch, module, port,
port_description=''):
cctxt = self.client.prepare(version='1.2', fanout=True)
cctxt.cast(context, 'update_link', host=host, interface=interface,
mac=mac, switch=switch, module=module, port=port,
port_description=port_description)
def delete_link(self, context, host, interface):
cctxt = self.client.prepare(version='1.2', fanout=True)
cctxt.cast(context, 'delete_link', host=host, interface=interface,
mac=None, switch=0, module=0, port=0)

View File

@ -42,6 +42,7 @@ from neutron_lib import constants as n_constants
from neutron_lib import exceptions as n_exceptions
from neutron_lib.plugins import directory
from opflexagent import constants as ofcst
from opflexagent import host_agent_rpc as arpc
from opflexagent import rpc as ofrpc
from oslo_config import cfg
from oslo_db import exception as db_exc
@ -52,7 +53,6 @@ from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice._i18n import _LW
from gbpservice.network.neutronv2 import local_api
from gbpservice.neutron.agent.topology import rpc as arpc
from gbpservice.neutron.extensions import cisco_apic
from gbpservice.neutron.extensions import cisco_apic_l3 as a_l3
from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus

View File

@ -1,138 +0,0 @@
# Copyright (c) 2017 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron.tests import base
from gbpservice.neutron.agent.topology import apic_topology
PERIODIC_TASK = 'oslo_service.periodic_task'
DEV_EXISTS = 'neutron.agent.linux.ip_lib.device_exists'
IP_DEVICE = 'neutron.agent.linux.ip_lib.IPDevice'
EXECUTE = 'neutron.agent.linux.utils.execute'
LLDP_CMD = ['lldpctl', '-f', 'keyvalue']
APIC_EXT_SWITCH = '203'
APIC_EXT_MODULE = '1'
APIC_EXT_PORT = '34'
APIC_UPLINK_PORTS = ['uplink_port']
SERVICE_HOST = 'host1'
SERVICE_HOST_IFACE = 'eth0'
SERVICE_HOST_MAC = 'aa:ee:ii:oo:uu:yy'
SERVICE_PEER_CHASSIS_NAME = 'leaf4'
SERVICE_PEER_CHASSIS = 'topology/pod-1/node-' + APIC_EXT_SWITCH
SERVICE_PEER_PORT_LOCAL = 'Eth%s/%s' % (APIC_EXT_MODULE, APIC_EXT_PORT)
SERVICE_PEER_PORT_DESC = ('topology/pod-1/paths-%s/pathep-[%s]' %
(APIC_EXT_SWITCH, SERVICE_PEER_PORT_LOCAL.lower()))
ETH0 = SERVICE_HOST_IFACE
LLDPCTL_RES = (
'lldp.' + ETH0 + '.via=LLDP\n'
'lldp.' + ETH0 + '.rid=1\n'
'lldp.' + ETH0 + '.age=0 day, 20:55:54\n'
'lldp.' + ETH0 + '.chassis.mac=' + SERVICE_HOST_MAC + '\n'
'lldp.' + ETH0 + '.chassis.name=' + SERVICE_PEER_CHASSIS_NAME + '\n'
'lldp.' + ETH0 + '.chassis.descr=' + SERVICE_PEER_CHASSIS + '\n'
'lldp.' + ETH0 + '.chassis.Bridge.enabled=on\n'
'lldp.' + ETH0 + '.chassis.Router.enabled=on\n'
'lldp.' + ETH0 + '.port.local=' + SERVICE_PEER_PORT_LOCAL + '\n'
'lldp.' + ETH0 + '.port.descr=' + SERVICE_PEER_PORT_DESC)
class TestCiscoApicTopologyAgent(base.BaseTestCase):
def setUp(self):
super(TestCiscoApicTopologyAgent, self).setUp()
# Configure the Cisco APIC mechanism driver
cfg.CONF.set_override('apic_host_uplink_ports',
APIC_UPLINK_PORTS, 'ml2_cisco_apic')
# Patch device_exists
self.dev_exists = mock.patch(DEV_EXISTS).start()
# Patch IPDevice
ipdev_c = mock.patch(IP_DEVICE).start()
self.ipdev = mock.Mock()
ipdev_c.return_value = self.ipdev
self.ipdev.link.address = SERVICE_HOST_MAC
# Patch execute
self.execute = mock.patch(EXECUTE).start()
self.execute.return_value = LLDPCTL_RES
# Patch tasks
self.periodic_task = mock.patch(PERIODIC_TASK).start()
self.agent = apic_topology.ApicTopologyAgent()
self.agent.host = SERVICE_HOST
self.agent.service_agent = mock.Mock()
self.agent.lldpcmd = LLDP_CMD
def test_init_host_device_exists(self):
self.agent.lldpcmd = None
self.dev_exists.return_value = True
self.agent.init_host()
self.assertEqual(LLDP_CMD + APIC_UPLINK_PORTS,
self.agent.lldpcmd)
def test_init_host_device_not_exist(self):
self.agent.lldpcmd = None
self.dev_exists.return_value = False
self.agent.init_host()
self.assertEqual(LLDP_CMD, self.agent.lldpcmd)
def test_get_peers(self):
self.agent.peers = {}
peers = self.agent._get_peers()
expected = [(SERVICE_HOST, SERVICE_HOST_IFACE,
SERVICE_HOST_MAC, APIC_EXT_SWITCH,
APIC_EXT_MODULE, APIC_EXT_PORT,
SERVICE_PEER_PORT_DESC)]
self.assertEqual(expected,
peers[SERVICE_HOST_IFACE])
def test_check_for_new_peers_no_peers(self):
self.agent.peers = {}
expected = (SERVICE_HOST, SERVICE_HOST_IFACE,
SERVICE_HOST_MAC, APIC_EXT_SWITCH,
APIC_EXT_MODULE, APIC_EXT_PORT,
SERVICE_PEER_PORT_DESC)
peers = {SERVICE_HOST_IFACE: [expected]}
context = mock.Mock()
with mock.patch.object(self.agent, '_get_peers',
return_value=peers):
self.agent._check_for_new_peers(context)
self.assertEqual(expected,
self.agent.peers[SERVICE_HOST_IFACE])
self.agent.service_agent.update_link.assert_called_once_with(
context, *expected)
def test_check_for_new_peers_with_peers(self):
expected = (SERVICE_HOST, SERVICE_HOST_IFACE,
SERVICE_HOST_MAC, APIC_EXT_SWITCH,
APIC_EXT_MODULE, APIC_EXT_PORT,
SERVICE_PEER_PORT_DESC)
peers = {SERVICE_HOST_IFACE: [expected]}
self.agent.peers = {SERVICE_HOST_IFACE:
[tuple(x + '1' for x in expected)]}
context = mock.Mock()
with mock.patch.object(self.agent, '_get_peers',
return_value=peers):
self.agent._check_for_new_peers(context)
self.agent.service_agent.update_link.assert_called_with(
context, *expected)

View File

@ -40,8 +40,6 @@ scripts =
[entry_points]
console_scripts=
gbp-db-manage = gbpservice.neutron.db.migration.cli:main
neutron-cisco-apic-host-agent = gbpservice.neutron.agent.topology.apic_topology:agent_main
opflex-ns-proxy = apic_ml2.neutron.agent.metadata.namespace_proxy:main
neutron.core_plugins =
ml2plus = gbpservice.neutron.plugins.ml2plus.plugin:Ml2PlusPlugin
neutron.service_plugins =