Support for ovs-vhostuser and vpp datapath

DPDK is a high performance data path.
ovs-dpdk: https://docs.openstack.org/newton/networking-guide/config-ovs-dpdk.html
VPP is a datapath that builds on top of dpdk for better performance.
VPP: https://wiki.fd.io/view/VPP/What_is_VPP%3F
Both of these use vhostuser-socket interfaces.
Adding opflex network binding support for these data paths.

Change-Id: Id18be10e73b4bcf84a826da2f3d483fe55bf5620
This commit is contained in:
Kiran Shastri 2018-04-06 00:26:40 -07:00 committed by kshastri
parent 624630e6cc
commit acfe922888
2 changed files with 117 additions and 8 deletions

View File

@ -15,6 +15,7 @@
import copy
import netaddr
import os
import re
import sqlalchemy as sa
@ -46,6 +47,8 @@ from neutron.plugins.common import constants as pconst
from neutron.plugins.ml2 import db as n_db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context as ml2_context
from neutron.plugins.ml2.drivers.openvswitch.agent.common import (
constants as a_const)
from neutron.plugins.ml2 import models
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_constants
@ -172,6 +175,7 @@ class KeystoneNotificationEndpoint(object):
class ApicMechanismDriver(api_plus.MechanismDriver,
db.DbMixin):
NIC_NAME_LEN = 14
class TopologyRpcEndpoint(object):
target = oslo_messaging.Target(version='3.0')
@ -1746,6 +1750,11 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
self._opflex_bind_port):
return
# Try to bind OpFlex VPP agent.
if self._agent_bind_port(context, ofcst.AGENT_TYPE_OPFLEX_VPP,
self._opflex_bind_port):
return
# If we reached here, it means that either there is no active opflex
# agent running on the host, or the agent on the host is not
# configured for this physical network. Treat the host as a physical
@ -2142,8 +2151,9 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
return False
elif network_type != 'local':
return False
self._complete_binding(context, segment)
context.set_binding(
segment[api.ID], self._opflex_get_vif_type(agent),
self._opflex_get_vif_details(context, agent))
return True
def _dvs_bind_port(self, context, segment, agent):
@ -2214,17 +2224,66 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
elif segment.get('aim_ml2_created'):
# Complete binding if another driver did not bind the
# dynamic segment that we created.
self._complete_binding(context, segment)
context.set_binding(segment[api.ID], portbindings.VIF_TYPE_OVS,
self._update_binding_sg())
return True
def _complete_binding(self, context, segment):
def _opflex_get_vif_type(self, agent):
if agent['agent_type'] == ofcst.AGENT_TYPE_OPFLEX_VPP:
return portbindings.VIF_TYPE_VHOST_USER
else:
if (agent['configurations'].get('datapath_type') ==
a_const.OVS_DATAPATH_NETDEV):
return portbindings.VIF_TYPE_VHOST_USER
else:
return portbindings.VIF_TYPE_OVS
@staticmethod
def _agent_vhu_sockpath(agent, port_id):
"""Return the agent's vhost-user socket path for a given port"""
sockdir = agent['configurations'].get('vhostuser_socket_dir',
a_const.VHOST_USER_SOCKET_DIR)
sock_name = (n_constants.VHOST_USER_DEVICE_PREFIX +
port_id)[:ApicMechanismDriver.NIC_NAME_LEN]
return os.path.join(sockdir, sock_name)
def _get_vhost_mode(self):
# REVISIT(kshastri): this function converts the ovs vhost user
# driver mode into the qemu vhost user mode. If OVS is the server,
# qemu is the client and vice-versa. For ACI MD, we will need to
# support agent capabilities field to choose client-mode. As of
# now only support server mode for nova.
return portbindings.VHOST_USER_MODE_SERVER
def _opflex_get_vif_details(self, context, agent):
vif_type = self._opflex_get_vif_type(agent)
details = {}
if vif_type == portbindings.VIF_TYPE_VHOST_USER:
sock_path = self._agent_vhu_sockpath(agent,
context.current['id'])
mode = self._get_vhost_mode()
details = {portbindings.VHOST_USER_MODE: mode,
portbindings.VHOST_USER_SOCKET: sock_path}
if agent['agent_type'] == ofcst.AGENT_TYPE_OPFLEX_VPP:
details.update({portbindings.CAP_PORT_FILTER: False,
portbindings.OVS_HYBRID_PLUG: False,
portbindings.VHOST_USER_OVS_PLUG: False,
ofcst.VHOST_USER_VPP_PLUG: True})
else:
details.update({portbindings.OVS_DATAPATH_TYPE:
a_const.OVS_DATAPATH_NETDEV,
portbindings.VHOST_USER_OVS_PLUG: True})
if agent['agent_type'] == ofcst.AGENT_TYPE_OPFLEX_OVS:
details.update(self._update_binding_sg())
return details
def _update_binding_sg(self):
enable_firewall = False
if self.enable_iptables_firewall:
enable_firewall = self.sg_enabled
context.set_binding(
segment[api.ID], portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: enable_firewall,
portbindings.OVS_HYBRID_PLUG: enable_firewall})
return {portbindings.CAP_PORT_FILTER: enable_firewall,
portbindings.OVS_HYBRID_PLUG: enable_firewall}
@property
def plugin(self):

View File

@ -85,6 +85,23 @@ AGENT_TYPE_DVS = md.AGENT_TYPE_DVS
AGENT_CONF_DVS = {'alive': True, 'binary': 'anotherbinary',
'topic': 'anothertopic', 'agent_type': AGENT_TYPE_DVS,
'configurations': {'opflex_networks': None}}
AGENT_TYPE_VPP = ofcst.AGENT_TYPE_OPFLEX_VPP
AGENT_CONF_VPP = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE_VPP,
'configurations': {
'opflex_networks': None,
'vhostuser_socket_dir': '/var/run/vpp-sockets'}}
AGENT_CONF_OPFLEX_OVS_DPDK = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic',
'agent_type': ofcst.AGENT_TYPE_OPFLEX_OVS,
'configurations': {
'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'},
'vhostuser_socket_dir':
'/var/run/openvswitch',
'datapath_type': 'netdev'}}
BOOKED_PORT_VALUE = 'myBookedPort'
DN = 'apic:distinguished_names'
@ -4029,6 +4046,39 @@ class TestPortBinding(ApicAimTestCase):
port['binding:vif_details'])
self.assertEqual(n_constants.PORT_STATUS_ACTIVE, port['status'])
def test_bind_port_ovs_dpdk(self):
self._register_agent('host1', AGENT_CONF_OPFLEX_OVS_DPDK)
net = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net, '10.0.1.1', '10.0.1.0/24')
port = self._make_port(self.fmt, net['network']['id'])['port']
port_id = port['id']
port = self._bind_port_to_host(port_id, 'host1')['port']
self.assertEqual('vhostuser', port['binding:vif_type'])
self.assertEqual(
{'datapath_type': 'netdev', 'port_filter': False,
'ovs_hybrid_plug': False, 'vhostuser_ovs_plug': True,
'vhostuser_mode': 'server', 'vhostuser_socket':
AGENT_CONF_OPFLEX_OVS_DPDK['configurations'][
'vhostuser_socket_dir'] + '/' + ('vhu' + port_id)[:14]},
port['binding:vif_details'])
def test_bind_port_vpp(self):
self._register_agent('host1', AGENT_CONF_VPP)
net = self._make_network(self.fmt, 'net1', True)
self._make_subnet(self.fmt, net, '10.0.1.1', '10.0.1.0/24')
port = self._make_port(self.fmt, net['network']['id'])['port']
port_id = port['id']
port = self._bind_port_to_host(port_id, 'host1')['port']
self.assertEqual('vhostuser', port['binding:vif_type'])
self.assertEqual({'port_filter': False, 'ovs_hybrid_plug': False,
'vhostuser_ovs_plug': False,
'vhostuser_vpp_plug': True,
'vhostuser_mode': 'server',
'vhostuser_socket': AGENT_CONF_VPP['configurations'][
'vhostuser_socket_dir'] + '/' + (
('vhu' + port_id)[:14])},
port['binding:vif_details'])
# TODO(rkukura): Add tests for opflex, local and unsupported
# network_type values.