Raise the default max header to accommodate large tokens

PKI tokens hit the default limit if there is enough
services defined in the keystone catalog.

Also the v3 catalog is larger than the v2 catalog which would explain
why this bug is being hit just now.

This change adds the configuration option max_header_line to each of the
API confurations which has a default of 16384.

Closes-Bug: #1190149
Change-Id: I5da09aa08a1242c5e356bd8bf532baa9347ce075
This commit is contained in:
Steve Baker 2014-02-03 08:45:14 +13:00
parent 1993b17c9a
commit 0b02feb20d
2 changed files with 34 additions and 0 deletions

View File

@ -870,6 +870,12 @@
# Number of workers for Heat service (integer value)
#workers=0
# Maximum line size of message headers to be accepted.
# max_header_line may need to be increased when using large
# tokens (typically those generated by the Keystone v3 API
# with big service catalogs (integer value)
#max_header_line=16384
[heat_api_cfn]
@ -899,6 +905,12 @@
# Number of workers for Heat service (integer value)
#workers=0
# Maximum line size of message headers to be accepted.
# max_header_line may need to be increased when using large
# tokens (typically those generated by the Keystone v3 API
# with big service catalogs (integer value)
#max_header_line=16384
[heat_api_cloudwatch]
@ -928,6 +940,12 @@
# Number of workers for Heat service (integer value)
#workers=0
# Maximum line size of message headers to be accepted.
# max_header_line may need to be increased when using large
# tokens (typically those generated by the Keystone v3 API
# with big service catalogs (integer value)
#max_header_line=16384
[keystone_authtoken]

View File

@ -73,6 +73,11 @@ api_opts = [
cfg.IntOpt('workers', default=0,
help=_("Number of workers for Heat service"),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs')),
]
api_group = cfg.OptGroup('heat_api')
cfg.CONF.register_group(api_group)
@ -102,6 +107,11 @@ api_cfn_opts = [
cfg.IntOpt('workers', default=0,
help=_("Number of workers for Heat service"),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs')),
]
api_cfn_group = cfg.OptGroup('heat_api_cfn')
cfg.CONF.register_group(api_cfn_group)
@ -131,6 +141,11 @@ api_cw_opts = [
cfg.IntOpt('workers', default=0,
help=_("Number of workers for Heat service"),
deprecated_group='DEFAULT'),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs')),
]
api_cw_group = cfg.OptGroup('heat_api_cloudwatch')
cfg.CONF.register_group(api_cw_group)
@ -250,6 +265,7 @@ class Server(object):
signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.running = False
eventlet.wsgi.MAX_HEADER_LINE = conf.max_header_line
self.application = application
self.sock = get_socket(conf, default_port)