Update neutron conf files for Juno

* sync comments with base
* add new section comments
* Update spec as needed

Change-Id: I3908613a9045331203e4bd9d371907fd0031cd5a
Closes-Bug: #1353000
This commit is contained in:
Mark Vanderwiel 2014-08-05 12:37:38 -05:00 committed by Wei Hu
parent 3442ec6376
commit df420eec3d
10 changed files with 371 additions and 88 deletions

View File

@ -3,6 +3,7 @@ This file is used to list changes made in each version of cookbook-openstack-net
## 10.0.0
* Upgrading to Juno
* Sync conf files with Juno
## 9.1.1
* Allow dhcp_delete_namespaces and router_dhcp_namespaces to be overridden.

View File

@ -39,7 +39,7 @@ platform_options['neutron_server_packages'].each do |pkg|
end
# Migrate network database
# If the database has never migrated, make the current version of alembic_version to Icehouse,
# If the database has never migrated, make the current version of alembic_version to match release,
# else migrate the database to latest version.
# The node['openstack']['network']['plugin_config_file'] attribute is set in the common.rb recipe

View File

@ -19,6 +19,10 @@ paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErr
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
delay_auth_decision = true
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
@ -27,7 +31,3 @@ paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
delay_auth_decision = true

View File

@ -9,17 +9,19 @@ debug = <%= node["openstack"]["network"]["debug"] %>
# seconds between attempts.
resync_interval = <%= node["openstack"]["network"]["dhcp"]["resync_interval"] %>
# The DHCP requires that an inteface driver be set. Choose the one that best
# The DHCP agent requires an interface driver be set. Choose the one that best
# matches your plugin.
# OVS based plugins (OVS, Ryu, NEC, NVP, BigSwitch/Floodlight)
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# LinuxBridge
#interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
interface_driver = <%= node["openstack"]["network"]["interface_driver"] %>
# OVS based plugins(Ryu, NEC, NVP, BigSwitch/Floodlight) that use OVS
# as OpenFlow switch and check port status
# Name of Open vSwitch bridge to use
# ovs_integration_bridge = br-int
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
ovs_use_veth = <%= node["openstack"]["network"]["dhcp"]["ovs_use_veth"] %>
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
@ -32,9 +34,9 @@ use_namespaces = <%= node["openstack"]["network"]["use_namespaces"] %>
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet gateway_ip is None. The guest instance must
# be configured to request host routes via DHCP (Option 121).
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
enable_isolated_metadata = <%= node["openstack"]["network"]["dhcp"]["enable_isolated_metadata"] %>
# Allows for serving metadata requests coming from a dedicated metadata
@ -45,17 +47,32 @@ enable_isolated_metadata = <%= node["openstack"]["network"]["dhcp"]["enable_isol
# This option requires enable_isolated_metadata = True
enable_metadata_network = <%= node["openstack"]["network"]["dhcp"]["enable_metadata_network"] %>
# Domain to use for building the host names of instances.
# If not set, it will default to "openstacklocal"
# Number of threads to use during sync process. Should not exceed connection
# pool size configured on server.
# num_sync_threads = 4
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
dhcp_domain = <%= node["openstack"]["network"]["dhcp"]["default_domain"] %>
# Pass a config file to dnsmasq so we can override settings
# like the mtu passed to the virtual machine
# Override the default dnsmasq settings with this file
dnsmasq_config_file = /etc/neutron/dnsmasq.conf
# Comma-separated list of DNS servers which will be used by dnsmasq
# as forwarders.
# dnsmasq_dns_servers =
# Limit number of leases to prevent a denial-of-service.
dnsmasq_lease_max = <%= node["openstack"]["network"]["dhcp"]["dnsmasq_lease_max"] %>
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# dhcp_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the dhcp agent.
# Do not enable this until you understand the problem with the Linux iproute
@ -63,3 +80,7 @@ dnsmasq_lease_max = <%= node["openstack"]["network"]["dhcp"]["dnsmasq_lease_max"
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a dhcp server is disabled.
dhcp_delete_namespaces = <%= node['openstack']['network']['dhcp']['dhcp_delete_namespaces'] %>
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -4,15 +4,19 @@
# Show debugging output in log (sets DEBUG log level output)
debug = <%= node["openstack"]["network"]["debug"] %>
# L3 requires that an interface driver be set. Choose the one that best
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# OVS based plugins (OVS, Ryu, NEC, NVP, BigSwitch/Floodlight)
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
interface_driver = <%= node["openstack"]["network"]["interface_driver"] %>
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
use_namespaces = <%= node["openstack"]["network"]["use_namespaces"] %>
@ -24,11 +28,11 @@ use_namespaces = <%= node["openstack"]["network"]["use_namespaces"] %>
router_id = <%= node["openstack"]["network"]["l3"]["router_id"] %>
<% end -%>
# Each L3 agent can be associated with at most one external network. This
# value should be set to the UUID of that external network. If empty,
# the agent will enforce that only a single external networks exists and
# use that external network id
# Default: gateway_external_network_id =
# When external_network_bridge is set, each L3 agent can be associated
# with no more than one external network. This value should be set to the UUID
# of that external network. To allow L3 agent support multiple external
# networks, both the external_network_bridge and gateway_external_network_id
# must be left empty.
<% if node["openstack"]["network"]["l3"]["gateway_external_network_id"] -%>
gateway_external_network_id = <%= node["openstack"]["network"]["l3"]["gateway_external_network_id"] %>
<% end -%>
@ -40,7 +44,8 @@ gateway_external_network_id = <%= node["openstack"]["network"]["l3"]["gateway_ex
handle_internal_only_routers = <%= node["openstack"]["network"]["l3"]["handle_internal_only_routers"] %>
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
external_network_bridge = <%= node["openstack"]["network"]["l3"]["external_network_bridge"] %>
# TCP Port used by Neutron metadata server
@ -57,6 +62,13 @@ periodic_interval = <%= node["openstack"]["network"]["l3"]["periodic_interval"]
# starting agent
periodic_fuzzy_delay = <%= node["openstack"]["network"]["l3"]["periodic_fuzzy_delay"] %>
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# router_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the L3 agent.
# Do not enable this until you understand the problem with the Linux iproute
@ -64,3 +76,18 @@ periodic_fuzzy_delay = <%= node["openstack"]["network"]["l3"]["periodic_fuzzy_de
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a router is destroyed.
router_delete_namespaces = <%= node['openstack']['network']['l3']['router_delete_namespaces'] %>
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10
# The working mode for the agent. Allowed values are:
# - legacy: this preserves the existing behavior where the L3 agent is
# deployed on a centralized networking node to provide L3 services
# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR.
# - dvr: this mode enables DVR functionality, and must be used for an L3
# agent that runs on a compute host.
# - dvr_snat: this enables centralized SNAT support in conjunction with
# DVR. This mode must be used for an L3 agent running on a centralized
# node (or in single-host deployments, e.g. devstack).
# agent_mode = legacy

View File

@ -9,12 +9,15 @@ debug = <%= node["openstack"]["network"]["debug"] %>
# seconds between attempts.
periodic_interval = <%= node["openstack"]["network"]["lbaas"]["periodic_interval"] %>
# LBaas requires an interface driver be set. Choose the one that best
# matches your plugin.
<% case node["openstack"]["network"]["lbaas_plugin"]
when "ovs" %>
# OVS based plugins(OVS, Ryu, NEC, NVP, BigSwitch/Floodlight)
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# OVS based plugins(Ryu, NEC, NVP, BigSwitch/Floodlight) that use OVS
# as OpenFlow switch and check port status
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
#ovs_use_veth = True
<% when "linuxbridge" %>
# LinuxBridge
@ -25,8 +28,8 @@ interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
interface_driver =
<% end %>
# The agent requires a driver to manage the loadbalancer. HAProxy is the
# opensource version.
# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version.
# Multiple device drivers reflecting different service providers could be specified:
device_driver = <%= node['openstack']['network']['lbaas']['device_driver'] %>
[haproxy]
@ -35,3 +38,7 @@ device_driver = <%= node['openstack']['network']['lbaas']['device_driver'] %>
# The user group
# user_group = nogroup
# When delete and re-add the same vip, send this many gratuitous ARPs to flush
# the ARP cache in the Router. Set it below or equal to 0 to disable this feature.
# send_gratuitous_arp = 3

View File

@ -23,8 +23,38 @@ nova_metadata_ip = <%= node["openstack"]["network"]["metadata"]["nova_metadata_i
nova_metadata_port = <%= node["openstack"]["network"]["metadata"]["nova_metadata_port"] %>
<% end -%>
# Which protocol to use for requests to Nova metadata server, http or https
# nova_metadata_protocol = http
# Whether insecure SSL connection should be accepted for Nova metadata server
# requests
# nova_metadata_insecure = False
# Client certificate for nova api, needed when nova api requires client
# certificates
# nova_client_cert =
# Private key for nova client certificate
# nova_client_priv_key =
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
metadata_proxy_shared_secret = <%= @metadata_secret %>
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# Number of separate worker processes for metadata server. Defaults to
# half the number of CPU cores
# metadata_workers =
# Number of backlog requests to configure the metadata server socket with
# metadata_backlog = 4096
# URL to connect to the cache backend.
# default_ttl=0 parameter will cause cache entries to never expire.
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url = memory://?default_ttl=5

View File

@ -1,11 +1,21 @@
<%= node["openstack"]["network"]["custom_template_banner"] %>
[DEFAULT]
# Default log level is INFO
# verbose and debug has the same result.
# One of them will set DEBUG log level output
debug = <%= node["openstack"]["network"]["debug"] %>
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = <%= node["openstack"]["network"]["verbose"] %>
# =========Start Global Config Option for Distributed L3 Router===============
# Setting the "router_distributed" flag to "True" will default to the creation
# of distributed tenant routers. The admin can override this flag by specifying
# the type of the router on the create request (admin-only attribute). Default
# value is "False" to support legacy mode (centralized) routers.
#
# router_distributed = False
#
# ===========End Global Config Option for Distributed L3 Router===============
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = <%= node["openstack"]["network"]["debug"] %>
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = <%= node["openstack"]["network"]["state_path"] %>
@ -48,12 +58,19 @@ bind_port = <%= @bind_port %>
# extensions are in there you don't need to specify them here
# api_extensions_path =
# Neutron plugin provider module
# core_plugin =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
core_plugin = <%= @core_plugin %>
# Advanced service modules
# service_plugins =
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
<% if node['openstack']['network']['service_plugins'].any? %>
service_plugins = <%= node['openstack']['network']['service_plugins'].join(',') %>
<% end %>
@ -67,17 +84,26 @@ api_paste_config = api-paste.ini
auth_strategy = <%= node["openstack"]["network"]['auth_strategy'] %>
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also used. The others will be
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# DVR Base MAC address. The first 3 octets will remain unchanged. If the
# 4th octet is not 00, it will also be used. The others will be randomly
# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
# avoid mixing them up with MAC's allocated for tenant ports.
# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
# The default is 3 octet
# dvr_base_mac = fa:16:3f:00:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
# DHCP Lease duration (in seconds). Use -1 to
# tell dnsmasq to use infinite lease times.
dhcp_lease_duration = <%= node["openstack"]["network"]["dhcp_lease_duration"] %>
# Allow sending resource operation notification to DHCP agent
@ -91,7 +117,7 @@ dhcp_lease_duration = <%= node["openstack"]["network"]["dhcp_lease_duration"] %>
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups and/or metadata service.
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
@ -210,7 +236,7 @@ notification_topics = <%= node["openstack"]["mq"]["network"]["notification_topic
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# greater than 0. If the number of items requested is greater than
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
@ -224,6 +250,9 @@ notification_topics = <%= node["openstack"]["mq"]["network"]["notification_topic
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# Maximum number of routes per router
# max_routes = 30
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
@ -235,6 +264,8 @@ agent_down_time = <%= node["openstack"]["network"]["api"]["agent"]["agent_down_t
network_scheduler_driver = <%= node["openstack"]["network"]["dhcp"]["scheduler"] %>
# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = <%= node["openstack"]["network"]["l3"]["scheduler"] %>
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
@ -244,33 +275,53 @@ router_scheduler_driver = <%= node["openstack"]["network"]["l3"]["scheduler"] %>
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
# api_workers = 0
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
# rpc_workers = 0
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
#tcp_keepidle = 600
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
#retry_until_window = 30
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
#backlog = 4096
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
#use_ssl = False
# use_ssl = False
# Certificate file to use when starting API server securely
#ssl_cert_file = /path/to/certfile
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
#ssl_key_file = /path/to/keyfile
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
#ssl_ca_file = /path/to/cafile
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
@ -299,6 +350,12 @@ nova_admin_password = <%= @nova_admin_pass %>
# Authorization URL for connection to nova in admin context.
nova_admin_auth_url = <%= @identity_admin_endpoint.to_s %>
# CA file for novaclient to verify server certificates
# nova_ca_certificates_file =
# Boolean to control ignoring SSL errors on the nova url
# nova_api_insecure = False
# Number of seconds between sending events to nova if there are any events to send
send_events_interval = <%= node["openstack"]["network"]["nova"]["send_events_interval"] %>
@ -311,40 +368,97 @@ send_events_interval = <%= node["openstack"]["network"]["nova"]["send_events_int
<% end %>
<% end %>
[QUOTAS]
# resource name(s) that are supported in quota features
[matchmaker_redis]
#
# Options defined in oslo.messaging
#
# Host to locate redis. (string value)
#host=127.0.0.1
# Use this port to connect to redis host. (integer value)
#port=6379
# Password for Redis server (optional). (string value)
#password=<None>
[matchmaker_ring]
#
# Options defined in oslo.messaging
#
# Matchmaker ring file (JSON). (string value)
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
#ringfile=/etc/oslo/matchmaker_ring.json
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
quota_items = <%= node["openstack"]["network"]["quota"]["items"] %>
# default number of resource allowed per tenant, minus for unlimited
# Default number of resource allowed per tenant. A negative value means
# unlimited.
default_quota = <%= node["openstack"]["network"]["quota"]["default"] %>
# number of networks allowed per tenant, and minus means unlimited
# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = <%= node["openstack"]["network"]["quota"]["network"] %>
# number of subnets allowed per tenant, and minus means unlimited
# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = <%= node["openstack"]["network"]["quota"]["subnet"] %>
# number of ports allowed per tenant, and minus means unlimited
# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = <%= node["openstack"]["network"]["quota"]["port"] %>
# number of security groups allowed per tenant, and minus means unlimited
# Number of security groups allowed per tenant. A negative value means
# unlimited.
quota_security_group = <%= node["openstack"]["network"]["quota"]["security_group"] %>
# number of security group rules allowed per tenant, and minus means unlimited
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
quota_security_group_rule = <%= node["openstack"]["network"]["quota"]["security_group_rule"] %>
# default driver to use for quota checks
# Number of vips allowed per tenant. A negative value means unlimited.
quota_driver = <%= node["openstack"]["network"]["quota"]["driver"] %>
[DEFAULT_SERVICETYPE]
# Description of the default service type (optional)
# description = "default service type"
# Enter a service definition line for each advanced service provided
# by the default service type.
# Each service definition should be in the following format:
# <service>:<plugin>[:driver]
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
[AGENT]
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitor = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
# Number of firewalls allowed per tenant. A negative value means unlimited.
# quota_firewall = 1
# Number of firewall policies allowed per tenant. A negative value means
# unlimited.
# quota_firewall_policy = 1
# Number of firewall rules allowed per tenant. A negative value means
# unlimited.
# quota_firewall_rule = 100
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
@ -354,10 +468,12 @@ root_helper = "sudo neutron-rootwrap /etc/neutron/rootwrap.conf"
<% end %>
# =========== items for agent management extension =============
# seconds between nodes reporting state to server, should be less than
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
report_interval = <%= node["openstack"]["network"]["api"]["agent"]["agent_report_interval"] %>
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_uri = <%= @auth_uri %>
auth_host = <%= @identity_admin_endpoint.host %>
@ -374,10 +490,18 @@ signing_dir = <%= node["openstack"]["network"]["api"]["agent"]["signing_dir"] %>
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:nova@127.0.0.1:3306/neutron_linux_bridge
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
connection = <%= @sql_connection %>
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
# Database engine for which script will be generated when using offline
# migration
# engine =
# The SQLAlchemy connection string used to connect to the slave database
slave_connection = <%= node['openstack']['db']['network']['slave_connection'] %>
@ -415,9 +539,9 @@ pool_timeout = <%= node['openstack']['db']['network']['pool_timeout'] %>
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service type include LOADBALANCER, FIREWALL, VPN
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# this is multiline option, example for default provider:
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path

View File

@ -18,8 +18,11 @@ tenant_network_types = <%= node['openstack']['network']['ml2']['tenant_network_t
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism_drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
mechanism_drivers = <%= node['openstack']['network']['ml2']['mechanism_drivers'] %>
[ml2_type_flat]

View File

@ -4,9 +4,9 @@
# default value 'local' is useful only for single-box testing and
# provides no connectivity between hosts. You MUST either change this
# to 'vlan' and configure network_vlan_ranges below or change this to
# 'gre' and configure tunnel_id_ranges below in order for tenant
# networks to provide connectivity between hosts. Set to 'none' to
# disable creation of tenant networks.
# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for
# tenant networks to provide connectivity between hosts. Set to 'none'
# to disable creation of tenant networks.
#
# Default: tenant_network_type = local
# Example: tenant_network_type = gre
@ -18,7 +18,7 @@ tenant_network_type = <%= node["openstack"]["network"]["openvswitch"]["tenant_ne
# allocation. All physical networks listed are available for flat and
# VLAN provider network creation. Specified ranges of VLAN IDs are
# available for tenant network allocation if tenant_network_type is
# 'vlan'. If empty, only gre and local networks may be created.
# 'vlan'. If empty, only gre, vxlan and local networks may be created.
#
# Default: network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999
@ -30,6 +30,10 @@ network_vlan_ranges = <%= node["openstack"]["network"]["openvswitch"]["network_v
# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and
# GRE or VXLAN tunneling.
#
# WARNING: This option will be deprecated in the Icehouse release, at which
# point setting tunnel_type below will be required to enable
# tunneling.
#
# Default: enable_tunneling = False
enable_tunneling = <%= node["openstack"]["network"]["openvswitch"]["enable_tunneling"] %>
@ -40,8 +44,8 @@ enable_tunneling = <%= node["openstack"]["network"]["openvswitch"]["enable_tunne
tunnel_type = <%= node["openstack"]["network"]["openvswitch"]["tunnel_type"] %>
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples
# enumerating ranges of GRE tunnel IDs that are available for tenant
# network allocation if tenant_network_type is 'gre'.
# enumerating ranges of GRE or VXLAN tunnel IDs that are available for
# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'.
#
# Default: tunnel_id_ranges =
# Example: tunnel_id_ranges = 1:1000
@ -99,18 +103,74 @@ local_ip = <%= @local_ip %>
bridge_mappings = <%= node["openstack"]["network"]["openvswitch"]["bridge_mappings"] %>
<% end -%>
[AGENT]
# (BoolOpt) Use veths instead of patch ports to interconnect the integration
# bridge to physical networks. Support kernel without ovs patch port support
# so long as it is set to True.
# use_veth_interconnection = False
[agent]
# Agent's polling interval in seconds
polling_interval = <%= node['openstack']['network']['openvswitch']['polling_interval'] %>
# Minimize polling by monitoring ovsdb for interface changes
# minimize_polling = True
# When minimize_polling = True, the number of seconds to wait before
# respawning the ovsdb monitor after losing communication with it
# ovsdb_monitor_respawn_interval = 30
# (ListOpt) The types of tenant network tunnels supported by the agent.
# Setting this will enable tunneling support in the agent. This can be set to
# either 'gre' or 'vxlan'. If this is unset, it will default to [] and
# disable tunneling support in the agent. When running the agent with the OVS
# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section.
# When running the agent with ML2, you can specify as many values here as
# your compute hosts supports.
#
# tunnel_types =
# Example: tunnel_types = gre
# Example: tunnel_types = vxlan
# Example: tunnel_types = vxlan, gre
# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By
# default, this will make use of the Open vSwitch default value of '4789' if
# not specified.
#
# vxlan_udp_port =
# Example: vxlan_udp_port = 8472
# (IntOpt) This is the MTU size of veth interfaces.
# Do not change unless you have a good reason to.
# The default MTU size of veth interfaces is 1500.
# This option has no effect if use_veth_interconnection is False
# Example: veth_mtu = 1504
veth_mtu = <%= node["openstack"]["network"]["openvswitch"]["veth_mtu"] %>
[SECURITYGROUP]
# Firewall driver for realizing neutron security group function
# (BoolOpt) Flag to enable l2-population extension. This option should only be
# used in conjunction with ml2 plugin and l2population mechanism driver. It'll
# enable plugin to populate remote ports macs and IPs (using fdb_add/remove
# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to
# optimize tunnel management.
#
# l2_population = False
# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2
# population ML2 MechanismDriver.
#
# arp_responder = False
# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet
# carrying GRE/VXLAN tunnel. The default value is True.
#
# dont_fragment = True
# (BoolOpt) Set to True on L2 agents to enable support
# for distributed virtual routing.
#
# enable_distributed_routing = False
[securitygroup]
# Firewall driver for realizing neutron security group function.
# Default: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
firewall_driver = <%= node["openstack"]["network"]["openvswitch"]["fw_driver"] %>
@ -123,18 +183,28 @@ enable_security_group = <%= node['openstack']['network']['openvswitch']['enable_
#-----------------------------------------------------------------------------
#
# 1. With VLANs on eth1.
# [OVS]
# [ovs]
# network_vlan_ranges = default:2000:3999
# tunnel_id_ranges =
# integration_bridge = br-int
# bridge_mappings = default:br-eth1
# [AGENT]
# Add the following setting, if you want to log to a file
#
# 2. With tunneling.
# [OVS]
# 2. With GRE tunneling.
# [ovs]
# network_vlan_ranges =
# tunnel_id_ranges = 1:1000
# integration_bridge = br-int
# tunnel_bridge = br-tun
# local_ip = 10.0.0.3
#
# 3. With VXLAN tunneling.
# [ovs]
# network_vlan_ranges =
# tenant_network_type = vxlan
# tunnel_type = vxlan
# tunnel_id_ranges = 1:1000
# integration_bridge = br-int
# tunnel_bridge = br-tun
# local_ip = 10.0.0.3
# [agent]
# tunnel_types = vxlan