Run rootwrap with lower fd ulimit by default
On Python 2.x, a subprocess.Popen() with close_fds=True will fork and then close filedescriptors range(3..os.sysconf("SC_OPEN_MAX")), which thanks to Kernel PTI (Kaiser patches) is significantly slower in 2018 when the range is very large. With a soft limit of 1048576, benchmark.py reports an overhead of ~ 400ms without this patch and 2ms with the patch applied. This patch adds a configuration option and sets a more sensible default of 1024 file descriptor limit by default. Closes-Bug: 1796267 Change-Id: Idd98c183eca3e2df8648fc0f37d27fe9cc6d0563
This commit is contained in:
parent
2ebda956b4
commit
c0a8699820
|
@ -113,6 +113,14 @@ syslog_log_level
|
|||
unsuccessful attempts. Example:
|
||||
``syslog_log_level=ERROR``
|
||||
|
||||
rlimit_nofile
|
||||
Which rlimit for number of open file descriptors should be set for rootwrap
|
||||
and its children processes by default. This is useful in case there is a
|
||||
excessively large ulimit configured for the calling process that shouldn't
|
||||
inherit to oslo.rootwrap and its called processes. Will not attempt to raise
|
||||
the limit. Defaults to 1024.
|
||||
|
||||
|
||||
.filters files
|
||||
==============
|
||||
|
||||
|
|
|
@ -28,3 +28,6 @@ syslog_log_level=ERROR
|
|||
|
||||
# Rootwrap daemon exits after this seconds of inactivity
|
||||
daemon_timeout=600
|
||||
|
||||
# Rootwrap daemon limits itself to that many file descriptors
|
||||
rlimit_nofile=1024
|
||||
|
|
|
@ -33,12 +33,14 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import logging
|
||||
import resource
|
||||
import sys
|
||||
|
||||
from six import moves
|
||||
|
||||
from oslo_rootwrap import subprocess
|
||||
from oslo_rootwrap import wrapper
|
||||
|
||||
from six import moves
|
||||
|
||||
RC_UNAUTHORIZED = 99
|
||||
RC_NOCOMMAND = 98
|
||||
RC_BADCONFIG = 97
|
||||
|
@ -83,6 +85,22 @@ def main(run_daemon=False):
|
|||
_exit_error(execname, "Incorrect configuration file: %s" % configfile,
|
||||
RC_BADCONFIG, log=False)
|
||||
|
||||
# When use close_fds=True on Python 2.x, we spend significant time
|
||||
# in closing fds up to current soft ulimit, which could be large.
|
||||
# Lower our ulimit to a reasonable value to regain performance.
|
||||
fd_limits = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
sensible_fd_limit = min(config.rlimit_nofile, fd_limits[0])
|
||||
if (fd_limits[0] > sensible_fd_limit):
|
||||
# Unfortunately this inherits to our children, so allow them to
|
||||
# re-raise by passing through the hard limit unmodified
|
||||
resource.setrlimit(
|
||||
resource.RLIMIT_NOFILE, (sensible_fd_limit, fd_limits[1]))
|
||||
# This is set on import to the hard ulimit. if its defined we
|
||||
# already have imported it, so we need to update it to the new limit
|
||||
if (hasattr(subprocess, 'MAXFD') and
|
||||
subprocess.MAXFD > sensible_fd_limit):
|
||||
subprocess.MAXFD = sensible_fd_limit
|
||||
|
||||
if config.use_syslog:
|
||||
wrapper.setup_syslog(execname,
|
||||
config.syslog_log_facility,
|
||||
|
|
|
@ -97,6 +97,12 @@ class RootwrapConfig(object):
|
|||
else:
|
||||
self.daemon_timeout = 600
|
||||
|
||||
# fd ulimit
|
||||
if config.has_option("DEFAULT", "rlimit_nofile"):
|
||||
self.rlimit_nofile = int(config.get("DEFAULT", "rlimit_nofile"))
|
||||
else:
|
||||
self.rlimit_nofile = 1024
|
||||
|
||||
|
||||
def setup_syslog(execname, facility, level):
|
||||
try:
|
||||
|
|
Loading…
Reference in New Issue