Bulk up port status updating in ML2 RPC

This eliminates the last of the bottlenecks in
get_devices_details_list_and_failed_devices by making the status
updates use a bulk data retrieval as well.

The last remaining thing that will impact performance is the status
update back to ACTIVE on removal of the provisioning blocks. However,
that will require a much larger refactor since it is callback driven
at the individual port level.

Elimination of the L2pop driver will ultimately solve this completely
since we won't need to cycle the port status anymore on every single
agent restart.

Closes-Bug: #1665215
Change-Id: I99c2b77b35e6eabb6e4f633c4e8e2533594c6b55
This commit is contained in:
Kevin Benton 2017-02-15 23:05:42 -08:00
parent a04332ca4a
commit 1be00e8239
3 changed files with 42 additions and 14 deletions

View File

@ -1597,23 +1597,53 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
return {d: self._bind_port_if_needed(pctx) if pctx else None
for d, pctx in result.items()}
@utils.transaction_guard
@db_api.retry_if_session_inactive()
def update_port_status(self, context, port_id, status, host=None,
network=None):
"""
Returns port_id (non-truncated uuid) if the port exists.
Otherwise returns None.
network can be passed in to avoid another get_network call if
one was already performed by the caller.
'network' is deprecated and has no effect
"""
updated = False
with db_api.context_manager.writer.using(context):
port = db.get_port(context, port_id)
if not port:
full = db.partial_port_ids_to_full_ids(context, [port_id])
if port_id not in full:
return None
port_id = full[port_id]
return self.update_port_statuses(
context, {port_id: status}, host)[port_id]
@utils.transaction_guard
@db_api.retry_if_session_inactive()
def update_port_statuses(self, context, port_id_to_status, host=None):
result = {}
port_ids = port_id_to_status.keys()
port_dbs_by_id = db.get_port_db_objects(context, port_ids)
for port_id, status in port_id_to_status.items():
if not port_dbs_by_id.get(port_id):
LOG.debug("Port %(port)s update to %(val)s by agent not found",
{'port': port_id, 'val': status})
return None
result[port_id] = None
continue
result[port_id] = self._safe_update_individual_port_db_status(
context, port_dbs_by_id[port_id], status, host)
return result
def _safe_update_individual_port_db_status(self, context, port,
status, host):
port_id = port.id
try:
return self._update_individual_port_db_status(
context, port, status, host)
except Exception:
with excutils.save_and_reraise_exception() as ectx:
# don't reraise if port doesn't exist anymore
ectx.reraise = bool(db.get_port(context, port_id))
def _update_individual_port_db_status(self, context, port, status, host):
updated = False
network = None
port_id = port.id
with db_api.context_manager.writer.using(context):
context.session.add(port) # bring port into writer session
if (port.status != status and
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE):
original_port = self._make_port_dict(port)

View File

@ -195,9 +195,7 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
# filter out any without status changes
new_status_map = {p: s for p, s in new_status_map.items() if s}
try:
for port_id, new_status in new_status_map.items():
plugin.update_port_status(rpc_context, port_id,
new_status, host)
plugin.update_port_statuses(rpc_context, new_status_map, host)
except Exception:
LOG.exception("Failure updating statuses, retrying all")
failed_devices = devices_to_fetch

View File

@ -890,10 +890,10 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase):
plugin = directory.get_plugin()
with self.port() as port:
net = plugin.get_network(ctx, port['port']['network_id'])
with mock.patch.object(plugin, 'get_network') as get_net:
with mock.patch.object(plugin, 'get_networks') as get_nets:
plugin.update_port_status(ctx, port['port']['id'], 'UP',
network=net)
self.assertFalse(get_net.called)
self.assertFalse(get_nets.called)
def test_update_port_mac(self):
self.check_update_port_mac(