New option for num_threads for state change server

Currently max number of client connections(i.e greenlets spawned at
a time) opened at any time by the WSGI server is set to 100 with
wsgi_default_pool_size[1].

This configuration may be fine for neutron api server. But with
wsgi_default_pool_size(=100) requests, state change server
is creating heavy cpu load on agent.
So this server(which run on agents) need lesser value i.e
can be configured to half the number of cpu on agent

We use "ha_keepalived_state_change_server_threads" config option
to configure number of threads in state change server instead of
wsgi_default_pool_size.

[1] https://review.openstack.org/#/c/278007/

DocImpact: Add new config option -
ha_keepalived_state_change_server_threads, to configure number
of threads in state change server.

Closes-Bug: #1581580
Change-Id: I822ea3844792a7731fd24419b7e90e5aef141993
(cherry picked from commit 70ea188f5d)
This commit is contained in:
venkata anil 2016-05-17 16:30:13 +00:00
parent ed12d71a55
commit 4387d4aedf
4 changed files with 38 additions and 4 deletions

View File

@ -45,6 +45,13 @@ OPTS = [
cfg.IntOpt('ha_vrrp_advert_int',
default=2,
help=_('The advertisement interval in seconds')),
cfg.IntOpt('ha_keepalived_state_change_server_threads',
default=(1 + common_utils.cpu_count()) // 2,
min=1,
help=_('Number of concurrent threads for '
'keepalived server connection requests.'
'More threads create a higher CPU load '
'on the agent node.')),
]
@ -79,7 +86,8 @@ class L3AgentKeepalivedStateChangeServer(object):
def run(self):
server = agent_utils.UnixDomainWSGIServer(
'neutron-keepalived-state-change')
'neutron-keepalived-state-change',
num_threads=self.conf.ha_keepalived_state_change_server_threads)
server.start(KeepalivedStateChangeHandler(self.agent),
self.get_keepalived_state_change_socket_path(self.conf),
workers=0,

View File

@ -402,11 +402,12 @@ class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol):
class UnixDomainWSGIServer(wsgi.Server):
def __init__(self, name):
def __init__(self, name, num_threads=None):
self._socket = None
self._launcher = None
self._server = None
super(UnixDomainWSGIServer, self).__init__(name, disable_ssl=True)
super(UnixDomainWSGIServer, self).__init__(name, disable_ssl=True,
num_threads=num_threads)
def start(self, application, file_socket, workers, backlog, mode=None):
self._socket = eventlet.listen(file_socket,

View File

@ -411,9 +411,9 @@ class TestUnixDomainWSGIServer(base.BaseTestCase):
super(TestUnixDomainWSGIServer, self).setUp()
self.eventlet_p = mock.patch.object(utils, 'eventlet')
self.eventlet = self.eventlet_p.start()
self.server = utils.UnixDomainWSGIServer('test')
def test_start(self):
self.server = utils.UnixDomainWSGIServer('test')
mock_app = mock.Mock()
with mock.patch.object(self.server, '_launch') as launcher:
self.server.start(mock_app, '/the/path', workers=5, backlog=128)
@ -427,6 +427,7 @@ class TestUnixDomainWSGIServer(base.BaseTestCase):
launcher.assert_called_once_with(mock_app, workers=5)
def test_run(self):
self.server = utils.UnixDomainWSGIServer('test')
self.server._run('app', 'sock')
self.eventlet.wsgi.server.assert_called_once_with(
@ -436,3 +437,17 @@ class TestUnixDomainWSGIServer(base.BaseTestCase):
log=mock.ANY,
max_size=self.server.num_threads
)
def test_num_threads(self):
num_threads = 8
self.server = utils.UnixDomainWSGIServer('test',
num_threads=num_threads)
self.server._run('app', 'sock')
self.eventlet.wsgi.server.assert_called_once_with(
'sock',
'app',
protocol=utils.UnixDomainHttpProtocol,
log=mock.ANY,
max_size=num_threads
)

View File

@ -0,0 +1,10 @@
---
upgrade:
- A new option ``ha_keepalived_state_change_server_threads`` has been
added to configure the number of concurrent threads spawned for
keepalived server connection requests. Higher values increase the
CPU load on the agent nodes. The default value is half of the number
of CPUs present on the node. This allows operators to tune the
number of threads to suit their environment. With more threads,
simultaneous requests for multiple HA routers state change can be
handled faster.