Octavia driver: agent implementation

The driver is loaded, then terminated whenever a request is issued.
This behavior causes termination of the Octavia listener which is
responsible to the processing of the driver status updates and
statistics processing.
The following change implements an agent which will execute the
listener.

Change-Id: I566aaa65df4ba7455577a539aa9eebb6cc36a099
(cherry picked from commit 24e93461d0)
This commit is contained in:
Kobi Samoray 2019-09-26 19:30:18 +03:00 committed by Adit Sarfaty
parent ec56cf9765
commit e7840c8d7c
3 changed files with 30 additions and 15 deletions

View File

@ -155,6 +155,9 @@ Add octavia and python-octaviaclient repos as external repositories and configur
[controller_worker]
network_driver = allowed_address_pairs_driver
[driver_agent]
enabled_provider_agents=vmwareagent
NSX-T
-----
@ -279,6 +282,8 @@ Add octavia and python-octaviaclient repos as external repositories and configur
[controller_worker]
network_driver = allowed_address_pairs_driver
[driver_agent]
enabled_provider_agents=vmwareagent
NSX-P
-----
@ -360,6 +365,9 @@ Add octavia and python-octaviaclient repos as external repositories and configur
[controller_worker]
network_driver = allowed_address_pairs_driver
[driver_agent]
enabled_provider_agents=vmwareagent
Trunk Driver
~~~~~~~~~~~~

View File

@ -95,6 +95,8 @@ vmware_nsx.neutron.nsxv3.housekeeper.jobs =
mismatch_logical_port = vmware_nsx.plugins.nsx_v3.housekeeper.mismatch_logical_port:MismatchLogicalportJob
octavia.api.drivers =
vmwareedge = vmware_nsx.services.lbaas.octavia.octavia_driver:NSXOctaviaDriver
octavia.driver_agent.provider_agents =
vmwareagent = vmware_nsx.services.lbaas.octavia.octavia_driver:vmware_nsx_provider_agent
[build_sphinx]
source-dir = doc/source
build-dir = doc/build

View File

@ -15,6 +15,7 @@
import copy
import socket
import time
from oslo_config import cfg
from oslo_log import helpers as log_helpers
@ -75,7 +76,6 @@ class NSXOctaviaDriver(driver_base.ProviderDriver):
def __init__(self):
super(NSXOctaviaDriver, self).__init__()
self._init_rpc_messaging()
self._init_rpc_listener()
self._init_cert_manager()
self.repositories = repositories.Repositories()
@ -88,20 +88,6 @@ class NSXOctaviaDriver(driver_base.ProviderDriver):
version='1.0')
self.client = messaging.RPCClient(transport, target)
@log_helpers.log_method_call
def _init_rpc_listener(self):
# Initialize RPC listener
topic = d_const.DRIVER_TO_OCTAVIA_TOPIC
server = socket.gethostname()
target = messaging.Target(topic=topic, server=server,
exchange="common", fanout=False)
endpoints = [NSXOctaviaDriverEndpoint()]
access_policy = dispatcher.DefaultRPCAccessPolicy
self.octavia_server = get_rpc_server(target, endpoints,
access_policy)
self.octavia_server.start()
@log_helpers.log_method_call
def _init_cert_manager(self):
self.cert_manager = stevedore_driver.DriverManager(
@ -589,3 +575,22 @@ class NSXOctaviaDriverEndpoint(driver_lib.DriverLibrary):
except exceptions.UpdateStatisticsError as e:
LOG.error("Failed to update Octavia listener statistics. "
"Stats %s, Error %s", statistics, e.fault_string)
@log_helpers.log_method_call
def vmware_nsx_provider_agent(exit_event):
# Initialize RPC listener
topic = d_const.DRIVER_TO_OCTAVIA_TOPIC
server = socket.gethostname()
target = messaging.Target(topic=topic, server=server,
exchange="common", fanout=False)
endpoints = [NSXOctaviaDriverEndpoint()]
access_policy = dispatcher.DefaultRPCAccessPolicy
get_transport()
octavia_server = get_rpc_server(target, endpoints, access_policy)
octavia_server.start()
LOG.info('VMware NSX Octavia provider agent has started.')
while not exit_event.is_set():
time.sleep(1)
LOG.info('VMware NSX Octavia provider agent is exiting.')