port 'up' and 'down' status notification
This adds the ability syn DF db port status to Neutron db by monitoring ovsdb at each compute node. This patch handles the Logical port status up/down updates At startup, it catches the initial 'create' events from compute node and updates the port status if not in sync. If the Logical_Port.up changes from False to True it sets the port status to 'ACTIVE' so that neutron server can send 'network-vif-plugged' event to nova. If the Logical_Port.up changes from True to False it sets the port status to 'DOWN' so that neutron server can send 'network-vif-unplugged' event to nova. Change-Id: I9ab9375686e98f3d90778322d024122abf4ff8c8
This commit is contained in:
parent
018a1d2e25
commit
d1162da526
|
@ -56,6 +56,9 @@ OVS_DB_PID=$OVS_DIR"/"$OVS_DB_SERVICE".pid"
|
|||
OVS_VSWITCHD_PID=$OVS_DIR"/"$OVS_VSWITCHD_SERVICE".pid"
|
||||
OVS_VSWITCH_OCSSCHEMA_FILE=${OVS_VSWITCH_OCSSCHEMA_FILE:-"/usr/share/openvswitch/vswitch.ovsschema"}
|
||||
|
||||
# Port status notifier
|
||||
ENABLE_PORT_STATUS_NOTIFIER=${ENABLE_PORT_STATUS_NOTIFIER:-"True"}
|
||||
|
||||
ACTION=$1
|
||||
STAGE=$2
|
||||
|
||||
|
@ -114,6 +117,7 @@ fi
|
|||
if [[ "$DF_REDIS_PUBSUB" == "True" ]]; then
|
||||
DF_PUB_SUB="True"
|
||||
DF_PUB_SUB_USE_MULTIPROC="False"
|
||||
PORT_STATUS_NOTIFIER="redis_port_status_notifier_driver"
|
||||
source $DEST/dragonflow/devstack/redis_pubsub_driver
|
||||
fi
|
||||
|
||||
|
@ -164,10 +168,13 @@ function configure_df_plugin {
|
|||
cd $_pwd
|
||||
cp $DRAGONFLOW_DIR/etc/dragonflow.ini.sample $DRAGONFLOW_CONF
|
||||
|
||||
|
||||
iniset $DRAGONFLOW_CONF df remote_db_ip "$REMOTE_DB_IP"
|
||||
iniset $DRAGONFLOW_CONF df remote_db_port $REMOTE_DB_PORT
|
||||
iniset $DRAGONFLOW_CONF df remote_db_hosts "$REMOTE_DB_HOSTS"
|
||||
iniset $DRAGONFLOW_CONF df nb_db_class "$NB_DRIVER_CLASS"
|
||||
iniset $DRAGONFLOW_CONF df port_status_notifier "$PORT_STATUS_NOTIFIER"
|
||||
iniset $DRAGONFLOW_CONF df enable_port_status_notifier "$ENABLE_PORT_STATUS_NOTIFIER"
|
||||
iniset $DRAGONFLOW_CONF df local_ip "$HOST_IP"
|
||||
iniset $DRAGONFLOW_CONF df tunnel_type "$TUNNEL_TYPE"
|
||||
iniset $DRAGONFLOW_CONF df integration_bridge "$INTEGRATION_BRIDGE"
|
||||
|
@ -185,6 +192,7 @@ function configure_df_plugin {
|
|||
iniset $NEUTRON_CONF DEFAULT core_plugin "$Q_PLUGIN_CLASS"
|
||||
iniset $NEUTRON_CONF DEFAULT service_plugins "$Q_SERVICE_PLUGIN_CLASSES"
|
||||
|
||||
|
||||
if is_service_enabled q-dhcp ; then
|
||||
iniset $DRAGONFLOW_CONF df use_centralized_ipv6_DHCP "True"
|
||||
else
|
||||
|
@ -223,10 +231,13 @@ function configure_df_plugin {
|
|||
cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
|
||||
cp $DRAGONFLOW_DIR/etc/dragonflow.ini.sample $DRAGONFLOW_CONF
|
||||
|
||||
|
||||
iniset $DRAGONFLOW_CONF df remote_db_ip "$REMOTE_DB_IP"
|
||||
iniset $DRAGONFLOW_CONF df remote_db_port $REMOTE_DB_PORT
|
||||
iniset $DRAGONFLOW_CONF df remote_db_hosts "$REMOTE_DB_HOSTS"
|
||||
iniset $DRAGONFLOW_CONF df nb_db_class "$NB_DRIVER_CLASS"
|
||||
iniset $DRAGONFLOW_CONF df port_status_notifier "$PORT_STATUS_NOTIFIER"
|
||||
iniset $DRAGONFLOW_CONF df enable_port_status_notifier "$ENABLE_PORT_STATUS_NOTIFIER"
|
||||
iniset $DRAGONFLOW_CONF df local_ip "$HOST_IP"
|
||||
iniset $DRAGONFLOW_CONF df tunnel_type "$TUNNEL_TYPE"
|
||||
iniset $DRAGONFLOW_CONF df integration_bridge "$INTEGRATION_BRIDGE"
|
||||
|
@ -237,6 +248,7 @@ function configure_df_plugin {
|
|||
iniset $DRAGONFLOW_CONF df_dnat_app int_peer_patch_port "$INTEGRATION_PEER_PORT"
|
||||
iniset $DRAGONFLOW_CONF df_dnat_app ex_peer_patch_port "$PUBLIC_PEER_PORT"
|
||||
|
||||
|
||||
if [[ "$DF_PUB_SUB" == "True" ]]; then
|
||||
DF_SELECTIVE_TOPO_DIST=${DF_SELECTIVE_TOPO_DIST:-"True"}
|
||||
else
|
||||
|
|
|
@ -38,3 +38,4 @@ OVS_MANAGER=${OVS_MANAGER:-"ptcp:6640:0.0.0.0"}
|
|||
OVS_INTEGRATION_BRIDGE_PROTOCOLS=${OVS_INTEGRATION_BRIDGE_PROTOCOLS:-"OpenFlow10,OpenFlow13"}
|
||||
|
||||
DF_REDIS_INSTALL_FROM_RUBY=${DF_REDIS_INSTALL_FROM_RUBY:-"True"}
|
||||
PORT_STATUS_NOTIFIER=${PORT_STATUS_NOTIFIER:-""}
|
||||
|
|
|
@ -59,6 +59,12 @@ DF_OPTS = [
|
|||
cfg.StrOpt('pub_sub_multiproc_driver',
|
||||
default='zmq_pubsub_multiproc_driver',
|
||||
help=_('Drivers to use for the Dragonflow pub/sub')),
|
||||
cfg.StrOpt('enable_port_status_notifier',
|
||||
default=True,
|
||||
help=_('Enable notifier for the Dragonflow port status')),
|
||||
cfg.StrOpt('port_status_notifier',
|
||||
default='redis_port_status_notifier_driver',
|
||||
help=_('Notifier for the Dragonflow port status')),
|
||||
cfg.ListOpt('publishers_ips',
|
||||
default=['$local_ip'],
|
||||
help=_('List of the Neutron Server Publisher IPs.')),
|
||||
|
|
|
@ -18,3 +18,6 @@ OVS_BRIDGE_INTERFACE = "bridge"
|
|||
OVS_PATCH_INTERFACE = "patch"
|
||||
OVS_TUNNEL_INTERFACE = "tunnel"
|
||||
OVS_UNKNOWN_INTERFACE = "unknown"
|
||||
|
||||
PORT_STATUS_UP = "up"
|
||||
PORT_STATUS_DOWN = "down"
|
||||
|
|
|
@ -30,6 +30,7 @@ from dragonflow._i18n import _, _LE
|
|||
|
||||
DF_PUBSUB_DRIVER_NAMESPACE = 'dragonflow.pubsub_driver'
|
||||
DF_NB_DB_DRIVER_NAMESPACE = 'dragonflow.nb_db_driver'
|
||||
DF_PORT_STATUS_DRIVER_NAMESPACE = 'dragonflow.port_status_driver'
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
|
@ -60,6 +60,7 @@ class DfLocalController(object):
|
|||
self.ip = cfg.CONF.df.local_ip
|
||||
self.tunnel_type = cfg.CONF.df.tunnel_type
|
||||
self.sync_finished = False
|
||||
self.port_status_notifier = None
|
||||
nb_driver = df_utils.load_driver(
|
||||
cfg.CONF.df.nb_db_class,
|
||||
df_utils.DF_NB_DB_DRIVER_NAMESPACE)
|
||||
|
@ -67,6 +68,10 @@ class DfLocalController(object):
|
|||
nb_driver,
|
||||
use_pubsub=cfg.CONF.df.enable_df_pub_sub)
|
||||
self.vswitch_api = vswitch_impl.OvsApi(self.ip)
|
||||
if cfg.CONF.df.enable_port_status_notifier:
|
||||
self.port_status_notifier = df_utils.load_driver(
|
||||
cfg.CONF.df.port_status_notifier,
|
||||
df_utils.DF_PORT_STATUS_DRIVER_NAMESPACE)
|
||||
kwargs = dict(
|
||||
nb_api=self.nb_api,
|
||||
vswitch_api=self.vswitch_api,
|
||||
|
@ -86,6 +91,12 @@ class DfLocalController(object):
|
|||
self.nb_api.initialize(db_ip=cfg.CONF.df.remote_db_ip,
|
||||
db_port=cfg.CONF.df.remote_db_port)
|
||||
self.vswitch_api.initialize(self.nb_api)
|
||||
if cfg.CONF.df.enable_port_status_notifier:
|
||||
self.port_status_notifier.initialize(mech_driver=None,
|
||||
nb_api=self.nb_api,
|
||||
pub=self.nb_api.publisher,
|
||||
sub=None,
|
||||
is_neutron_server=False)
|
||||
self.topology = topology.Topology(self,
|
||||
self.enable_selective_topo_dist)
|
||||
|
||||
|
@ -634,6 +645,9 @@ class DfLocalController(object):
|
|||
def get_nb_api(self):
|
||||
return self.nb_api
|
||||
|
||||
def get_portstatus_notifier(self):
|
||||
return self.port_status_notifier
|
||||
|
||||
def get_db_store(self):
|
||||
return self.db_store
|
||||
|
||||
|
|
|
@ -10,9 +10,11 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from dragonflow._i18n import _LI, _LE, _LW
|
||||
from dragonflow.common import constants
|
||||
from dragonflow.db import api_nb
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
@ -36,6 +38,7 @@ class Topology(object):
|
|||
|
||||
self.controller = controller
|
||||
self.nb_api = controller.get_nb_api()
|
||||
self.port_status_reporter = controller.get_portstatus_notifier()
|
||||
self.db_store = controller.get_db_store()
|
||||
self.openflow_app = controller.get_openflow_app()
|
||||
self.chassis_name = controller.get_chassis_name()
|
||||
|
@ -132,6 +135,10 @@ class Topology(object):
|
|||
|
||||
def _vm_port_added(self, ovs_port):
|
||||
self._vm_port_updated(ovs_port)
|
||||
# publish vm port up event
|
||||
if cfg.CONF.df.enable_port_status_notifier:
|
||||
self.port_status_reporter.notify_port_status(
|
||||
ovs_port, constants.PORT_STATUS_UP)
|
||||
|
||||
def _vm_port_updated(self, ovs_port):
|
||||
lport_id = ovs_port.get_iface_id()
|
||||
|
@ -187,12 +194,11 @@ class Topology(object):
|
|||
LOG.exception(_LE(
|
||||
'Failed to process logical port offline event %s') % lport_id)
|
||||
finally:
|
||||
# TODO(duankebo) publish vm port offline later
|
||||
# currently we will not publish vm port offline event.
|
||||
# lport = self.nb_api.get_logical_port(lport_id)
|
||||
# if lport.get_chassis() == self.chassis_name:
|
||||
# self.nb_api.update_lport(lport.get_id(), chassis=None,
|
||||
# status='DOWN')
|
||||
# publish vm port down event.
|
||||
if cfg.CONF.df.enable_port_status_notifier:
|
||||
self.port_status_reporter.notify_port_status(
|
||||
ovs_port, constants.PORT_STATUS_DOWN)
|
||||
|
||||
del self.ovs_to_lport_mapping[ovs_port_id]
|
||||
self._del_from_topic_subscribed(topic, lport_id)
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
# under the License.
|
||||
|
||||
import abc
|
||||
import random
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
|
@ -144,6 +145,15 @@ class NbApi(object):
|
|||
def allocate_tunnel_key(self):
|
||||
return self.driver.allocate_unique_key()
|
||||
|
||||
def get_all_port_status_keys(self):
|
||||
topics = self.driver.get_all_entries('portstats')
|
||||
topic = random.choice(topics)
|
||||
return topic
|
||||
|
||||
def create_port_status(self, server_ip):
|
||||
self.driver.create_key('portstats', server_ip,
|
||||
server_ip, None)
|
||||
|
||||
def register_notification_callback(self, controller):
|
||||
self.controller = controller
|
||||
LOG.info(_LI("DB configuration sync finished, waiting for changes"))
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class PortStatusDriver(object):
|
||||
# PortStatus implements port status update southbound
|
||||
# notification mechanism.
|
||||
|
||||
@abc.abstractmethod
|
||||
def initialize(self, mech_driver, nb_api,
|
||||
pub, sub, is_neutron_server):
|
||||
"""Initialise the portstatus both in server
|
||||
compute node
|
||||
|
||||
:param mech_driver: neutron ml2 driver
|
||||
:nb_api: nb_api driver
|
||||
:pub: publisher
|
||||
:sub: subscriber
|
||||
:is_neutron_server server or compute
|
||||
:return: None
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def notify_port_status(self, ovs_port, status):
|
||||
"""notify port status changes to server
|
||||
|
||||
:param ovs_port: which port status changed
|
||||
:param status: notify port status up or down
|
||||
:return: None
|
||||
"""
|
|
@ -0,0 +1,142 @@
|
|||
# Copyright (c) 2015 OpenStack Foundation.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from dragonflow._i18n import _LI
|
||||
from dragonflow.common import constants
|
||||
from dragonflow.db import db_common
|
||||
from dragonflow.db import port_status_api
|
||||
from dragonflow.db import pub_sub_api
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class RedisPortStatusNotifier(port_status_api.PortStatusDriver):
|
||||
# PortStatusNotifier implements port status update
|
||||
# southbound notification mechanism based on redis
|
||||
# pub/sub driver at present.
|
||||
|
||||
def __init__(self):
|
||||
self.mech_driver = None
|
||||
self.nb_api = None
|
||||
self.status_callback = None
|
||||
self.db_table_monitor = None
|
||||
self.pub = None
|
||||
self.sub = None
|
||||
|
||||
def initialize(self, mech_driver, nb_api, pub, sub,
|
||||
is_neutron_server=False):
|
||||
self.mech_driver = mech_driver
|
||||
self.nb_api = nb_api
|
||||
self.pub = pub
|
||||
self.sub = sub
|
||||
|
||||
if is_neutron_server:
|
||||
# for pub/sub use case,code in server node,
|
||||
# keeping topic alive default in use.
|
||||
self.start_subsciber()
|
||||
self.server_status_monitors()
|
||||
else:
|
||||
# for pub/sub design, local controller will send
|
||||
# pub/sub event to notify server if there is a
|
||||
# new port status update
|
||||
self.start_publisher()
|
||||
|
||||
def server_status_monitors(self):
|
||||
# In pub/sub design, we need to mark a new timestamp
|
||||
# periodically, so consistency tool will check if a
|
||||
# server is alive.
|
||||
self.db_table_monitor = self._start_db_table_monitor('pubsub')
|
||||
|
||||
def notify_port_status(self, ovs_port, status):
|
||||
port_id = ovs_port.get_iface_id()
|
||||
self._send_port_status_event('lport', port_id, 'update', status)
|
||||
|
||||
# server code
|
||||
def _start_db_table_monitor(self, table_name):
|
||||
table_monitor = PortStatusMonitor(
|
||||
table_name,
|
||||
self.nb_api.driver,
|
||||
self.pub,
|
||||
cfg.CONF.df.publisher_timeout,
|
||||
cfg.CONF.df.monitor_table_poll_time,
|
||||
cfg.CONF.df.local_ip
|
||||
)
|
||||
table_monitor.daemonize()
|
||||
return table_monitor
|
||||
|
||||
def _stop_db_table_monitor(self):
|
||||
if not self.db_table_monitors:
|
||||
return
|
||||
self.db_table_monitor.stop()
|
||||
self.db_table_monitor = None
|
||||
|
||||
# server code
|
||||
def port_status_callback(self, table, key, action, value, topic=None):
|
||||
if 'lport' == table and 'update' == action:
|
||||
LOG.info(_LI("Process port %s status update event"), str(key))
|
||||
if constants.PORT_STATUS_UP == value:
|
||||
self.mech_driver.set_port_status_up(key)
|
||||
if constants.PORT_STATUS_DOWN == value:
|
||||
self.mech_driver.set_port_status_down(key)
|
||||
|
||||
# local controller code
|
||||
def _send_port_status_event(self, table, key, action, value):
|
||||
topic = self.nb_api.get_all_port_status_keys()
|
||||
update = db_common.DbUpdate(table, key, action, value, topic=topic)
|
||||
self.pub.send_event(update)
|
||||
|
||||
# server code
|
||||
def start_subsciber(self):
|
||||
self.sub.initialize(self.port_status_callback)
|
||||
server_ip = cfg.CONF.df.local_ip
|
||||
self.sub.register_topic(server_ip)
|
||||
# In portstats table, there are key value pairs like this:
|
||||
# port_status_192.168.1.10 : 192.168.1.10
|
||||
self.nb_api.create_port_status(server_ip)
|
||||
self.sub.daemonize()
|
||||
|
||||
def start_publisher(self):
|
||||
self.pub.initialize()
|
||||
|
||||
|
||||
class PortStatusMonitor(pub_sub_api.TableMonitor):
|
||||
def __init__(self, table_name, driver, publisher=None,
|
||||
timeout=1, polling_time=10, local_ip='127.0.0.1'):
|
||||
super(PortStatusMonitor, self).__init__(
|
||||
table_name,
|
||||
driver,
|
||||
publisher,
|
||||
polling_time
|
||||
)
|
||||
self._timeout = timeout
|
||||
self._server_ip = local_ip
|
||||
self.table_name = table_name
|
||||
# In table pubsub, there are key values pairs like this
|
||||
# port_status_192.168.1.10 : timestamp
|
||||
self._driver.create_key(self.table_name, self._server_ip,
|
||||
time.time(), None)
|
||||
|
||||
def _poll_once(self, old_cache):
|
||||
# update server port status timestamp in DB, leave
|
||||
# timeout process to DB sync tool
|
||||
self._driver.update_key(self.table_name, self._server_ip,
|
||||
time.time(), None)
|
|
@ -13,6 +13,7 @@
|
|||
from neutron.callbacks import events
|
||||
from neutron.callbacks import registry
|
||||
from neutron.callbacks import resources
|
||||
from neutron import context as n_context
|
||||
from neutron.extensions import allowedaddresspairs as addr_pair
|
||||
from neutron.extensions import portbindings
|
||||
from neutron.extensions import portsecurity as psec
|
||||
|
@ -61,8 +62,19 @@ class DFMechDriver(driver_api.MechanismDriver):
|
|||
self.vif_details = {portbindings.CAP_PORT_FILTER: True}
|
||||
self.vif_type = portbindings.VIF_TYPE_OVS
|
||||
self._set_base_port_binding()
|
||||
self.port_status = n_const.PORT_STATUS_ACTIVE
|
||||
|
||||
self.nb_api = api_nb.NbApi.get_instance(True)
|
||||
if cfg.CONF.df.enable_port_status_notifier:
|
||||
port_status_notifier = df_utils.load_driver(
|
||||
cfg.CONF.df.port_status_notifier,
|
||||
df_utils.DF_PORT_STATUS_DRIVER_NAMESPACE)
|
||||
self.port_status_notifier = port_status_notifier
|
||||
self.port_status_notifier.initialize(self, self.nb_api,
|
||||
pub=None,
|
||||
sub=self.nb_api.subscriber,
|
||||
is_neutron_server=True)
|
||||
self.port_status = None
|
||||
|
||||
registry.subscribe(self.update_security_group,
|
||||
resources.SECURITY_GROUP,
|
||||
|
@ -247,7 +259,6 @@ class DFMechDriver(driver_api.MechanismDriver):
|
|||
def _get_dhcp_port_for_subnet(self, context, subnet_id):
|
||||
filters = {'fixed_ips': {'subnet_id': [subnet_id]},
|
||||
'device_owner': [n_const.DEVICE_OWNER_DHCP]}
|
||||
|
||||
core_plugin = manager.NeutronManager.get_plugin()
|
||||
ports = core_plugin.get_ports(context, filters=filters)
|
||||
if 0 != len(ports):
|
||||
|
@ -301,7 +312,6 @@ class DFMechDriver(driver_api.MechanismDriver):
|
|||
'device_owner': n_const.DEVICE_OWNER_DHCP,
|
||||
'mac_address': n_const.ATTR_NOT_SPECIFIED,
|
||||
'fixed_ips': [{'subnet_id': subnet['id']}]}}
|
||||
|
||||
core_plugin = manager.NeutronManager.get_plugin()
|
||||
port = core_plugin.create_port(context, port)
|
||||
|
||||
|
@ -547,6 +557,16 @@ class DFMechDriver(driver_api.MechanismDriver):
|
|||
"by concurrent operation.", updated_port['id'])
|
||||
return
|
||||
|
||||
# Here we do not want port status update to trigger
|
||||
# sending event to other compute node.
|
||||
if (cfg.CONF.df.enable_port_status_notifier and
|
||||
n_const.DEVICE_OWNER_COMPUTE_PREFIX
|
||||
in updated_port['device_owner'] and
|
||||
context.status != context.original_status and
|
||||
(context.status == n_const.PORT_STATUS_DOWN or
|
||||
context.status == n_const.PORT_STATUS_ACTIVE)):
|
||||
return None
|
||||
|
||||
# If a subnet enabled dhcp, the DFMechDriver will create a dhcp server
|
||||
# port. When delete this subnet, the port should be deleted.
|
||||
# In ml2/plugin.py, when delete subnet, it will call
|
||||
|
@ -631,7 +651,7 @@ class DFMechDriver(driver_api.MechanismDriver):
|
|||
context.set_binding(segment[driver_api.ID],
|
||||
self.vif_type,
|
||||
self.vif_details,
|
||||
status=n_const.PORT_STATUS_ACTIVE)
|
||||
status=self.port_status)
|
||||
LOG.debug("Bound using segment: %s", segment)
|
||||
return
|
||||
else:
|
||||
|
@ -651,3 +671,17 @@ class DFMechDriver(driver_api.MechanismDriver):
|
|||
constants.TYPE_GENEVE,
|
||||
constants.TYPE_GRE,
|
||||
constants.TYPE_LOCAL]
|
||||
|
||||
def set_port_status_up(self, port_id):
|
||||
LOG.debug("DF reports status up for port: %s", port_id)
|
||||
core_plugin = manager.NeutronManager.get_plugin()
|
||||
core_plugin.update_port_status(n_context.get_admin_context(),
|
||||
port_id,
|
||||
n_const.PORT_STATUS_ACTIVE)
|
||||
|
||||
def set_port_status_down(self, port_id):
|
||||
LOG.debug("DF reports status down for port: %s", port_id)
|
||||
core_plugin = manager.NeutronManager.get_plugin()
|
||||
core_plugin.update_port_status(n_context.get_admin_context(),
|
||||
port_id,
|
||||
n_const.PORT_STATUS_DOWN)
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from dragonflow.controller.common import constants as const
|
||||
from dragonflow.tests.common import utils
|
||||
from dragonflow.tests.fullstack import test_base
|
||||
|
@ -58,10 +60,28 @@ class TestTopology(test_base.DFTestBase):
|
|||
self.assertTrue(vm_mac is not None)
|
||||
vm_flows = self._get_vm_flows(vm_mac)
|
||||
self.assertTrue(any(vm_flows))
|
||||
core_plugin = cfg.CONF.core_plugin
|
||||
if core_plugin == 'ml2':
|
||||
# test port status update
|
||||
utils.wait_until_true(
|
||||
lambda: self._is_VM_port_status(vm, 'ACTIVE'),
|
||||
timeout=60,
|
||||
exception=Exception('Port status not change to ACTIVE')
|
||||
)
|
||||
return vm
|
||||
|
||||
def _remove_vm(self, vm):
|
||||
vm_mac = vm.get_first_mac()
|
||||
core_plugin = cfg.CONF.core_plugin
|
||||
if core_plugin == 'ml2':
|
||||
# test port status update
|
||||
vm.server.stop()
|
||||
utils.wait_until_true(
|
||||
lambda: self._is_VM_port_status(vm, 'DOWN'),
|
||||
timeout=60,
|
||||
exception=Exception('Port status not change to DOWN')
|
||||
)
|
||||
# delete vm
|
||||
vm.close()
|
||||
utils.wait_until_none(
|
||||
lambda: 1 if any(self._get_vm_flows(vm_mac)) else None, timeout=60,
|
||||
|
@ -75,3 +95,11 @@ class TestTopology(test_base.DFTestBase):
|
|||
flow['table'] == str(const.ARP_TABLE) and
|
||||
vm_mac in flow['actions']]
|
||||
return flows
|
||||
|
||||
def _is_VM_port_status(self, vm, status):
|
||||
port = vm._get_VM_port()
|
||||
if port:
|
||||
port_status = port['status']
|
||||
return port_status == status
|
||||
else:
|
||||
return False
|
||||
|
|
|
@ -66,6 +66,8 @@ dragonflow.nb_db_driver =
|
|||
ramcloud_nb_db_driver = dragonflow.db.drivers.ramcloud_db_driver:RamCloudDbDriver
|
||||
zookeeper_nb_db_driver = dragonflow.db.drivers.zookeeper_db_driver:ZookeeperDbDriver
|
||||
redis_nb_db_driver = dragonflow.db.drivers.redis_db_driver:RedisDbDriver
|
||||
dragonflow.port_status_driver =
|
||||
redis_port_status_notifier_driver = dragonflow.db.pubsub_drivers.redis_port_status_notifier:RedisPortStatusNotifier
|
||||
neutron.service_plugins =
|
||||
df-l3 = dragonflow.neutron.services.l3_router_plugin:DFL3RouterPlugin
|
||||
oslo.config.opts =
|
||||
|
|
Loading…
Reference in New Issue