tacker.conf updated for ocata/pike and sfc usage

Tacker.conf was outdated and missing some options required for the integration
with networking-sfc.

- Several sections not needed anymore like tacker_heat or tacker_nova
- New concept of vim added and openstack is defined as its driver
- Clean up parts of tacker.conf which are not used for simplicity

The new tacker.conf is aligned with tacker official documentation:
https://docs.openstack.org/tacker/latest/install/manual_installation.html

Change-Id: I922618e2a27dde422dfe01e9ca4b688e5755abbc
Signed-off-by: Manuel Buil <mbuil@suse.com>
This commit is contained in:
Manuel Buil 2017-08-21 12:14:33 +02:00
parent 9a9f5971c3
commit a31be97fe9
1 changed files with 2 additions and 325 deletions

View File

@ -39,22 +39,6 @@ bind_host = {{ tacker_bind_address }}
# Port the bind the API server to
bind_port = {{ tacker_service_port }}
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of tacker.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Tacker core plugin entrypoint to be loaded from the
# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the tacker source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
# core_plugin =
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the tacker source distribution. For compatibility
@ -65,117 +49,10 @@ bind_port = {{ tacker_service_port }}
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = vnfm,nfvo
# Paste configuration file
# api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Tacker is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = tacker.openstack.common.rpc.impl_kombu
# Size of RPC thread pool
# rpc_thread_pool_size = 64
# Size of RPC connection pool
# rpc_conn_pool_size = 30
# Seconds to wait for a response from call or multicall
# rpc_response_timeout = 60
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
# rpc_cast_timeout = 30
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = tacker
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# IP address of the RabbitMQ installation
# rabbit_host = localhost
# Password of the RabbitMQ server
# rabbit_password = guest
# Port where RabbitMQ server is running/listening
# rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
# rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=tacker.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=tacker.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = tacker.openstack.common.notifier.no_op_notifier
@ -184,134 +61,6 @@ auth_strategy = keystone
# RPC driver.
notification_driver = tacker.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
# default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
# notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# =========== end of items for agent management extension =====
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# tacker server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to tacker server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
# api_workers = 0
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
# rpc_workers = 0
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== tacker nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
# nova_url = http://127.0.0.1:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
# Username for connection to nova in admin context
# nova_admin_username =
# The uuid of the admin nova tenant
# nova_admin_tenant_id =
# Password for connection to nova in admin context.
# nova_admin_password =
# Authorization URL for connection to nova in admin context.
# nova_admin_auth_url =
# CA file for novaclient to verify server certificates
# nova_ca_certificates_file =
# Boolean to control ignoring SSL errors on the nova url
# nova_api_insecure = False
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of tacker nova interactions ==========
[agent]
# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
# root filter facility.
@ -319,16 +68,11 @@ notification_driver = tacker.openstack.common.notifier.rpc_notifier
# root_helper = sudo
root_helper = sudo {{ tacker_bin }}/tacker-rootwrap {{ tacker_etc_dir }}/rootwrap.conf
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
# report_interval = 30
# =========== end of items for agent management extension =====
[nfvo]
vim_drivers = openstack
[keystone_authtoken]
signing_dir = /var/cache/tacker
# cafile = /opt/stack/data/ca-bundle.pem
project_domain_name = {{ tacker_service_project_domain_id }}
project_name = {{ tacker_service_project_name }}
user_domain_name = {{ tacker_service_user_domain_id }}
@ -337,8 +81,6 @@ password = {{ tacker_service_password }}
auth_url = {{ keystone_service_adminuri }}
auth_uri = {{ keystone_service_internaluri }}
auth_type = {{ tacker_keystone_auth_plugin }}
#memcached_servers = memcache:11211
[database]
# This line MUST be changed to actually run the plugin.
@ -353,73 +95,8 @@ auth_type = {{ tacker_keystone_auth_plugin }}
# configuration file.
connection = mysql://{{ tacker_galera_user }}:{{ tacker_container_mysql_password }}@{{ tacker_galera_address }}/{{ tacker_galera_database }}?charset=utf8
# Database engine for which script will be generated when using offline
# migration
# engine =
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[tacker]
# Specify drivers for hosting device
infra_driver = heat,nova,noop
# Specify drivers for mgmt
mgmt_driver = noop,openwrt
# Specify drivers for monitoring
monitor_driver = ping, http_ping
[nfvo_vim]
# Supported VIM drivers, resource orchestration controllers such as OpenStack, kvm
#Default VIM driver is OpenStack
#vim_drivers = openstack
#Default VIM placement if vim id is not provided
default_vim = VIM0
[vim_keys]
#openstack = /etc/tacker/vim/fernet_keys
[tacker_nova]
# parameters for novaclient to talk to nova
region_name = {{ tacker_service_region }}
project_domain_id = {{ nova_service_project_domain_id }}
project_name = {{ nova_service_project_name }}
user_domain_id = {{ nova_service_user_domain_id }}
password = {{ nova_service_password }}
username = {{ nova_service_user_name }}
auth_url = {{ keystone_service_adminuri }}
auth_plugin = {{ nova_keystone_auth_plugin }}
[tacker_heat]
heat_uri = {{ heat_service_adminurl }}
stack_retries = {{ tacker_heat_stack_retires }}
stack_retry_wait = {{ tacker_heat_stack_retry_wait }}