sahara/etc/savanna/savanna.conf.sample-full

337 lines
8.7 KiB
Plaintext

[DEFAULT]
#
# Options defined in savanna.config
#
# Hostname or IP address that will be used to listen on
# (string value)
#host=
# Port that will be used to listen on (integer value)
#port=8386
# Log request/response exchange details: environ, headers and
# bodies (boolean value)
#log_exchange=false
# Maximum length of job binary data in kilobytes that may be
# stored or retrieved in a single operation (integer value)
#job_binary_max_KB=5120
# If set to True, Savanna will use floating IPs to communicate
# with instances. To make sure that all instances have
# floating IPs assigned in Nova Network set
# "auto_assign_floating_ip=True" in nova.conf.If Neutron is
# used for networking, make sure thatall Node Groups have
# "floating_ip_pool" parameterdefined. (boolean value)
#use_floating_ips=true
# The suffix of the node's FQDN. In nova-network that is
# dhcp_domain config parameter (string value)
#node_domain=novalocal
# Use Neutron Networking (False indicates the use of Nova
# networking) (boolean value)
#use_neutron=false
# Use network namespaces for communication (only valid to use
# in conjunction with use_neutron=True) (boolean value)
#use_namespaces=false
#
# Options defined in savanna.main
#
# Protocol used to access OpenStack Identity service (string
# value)
#os_auth_protocol=http
# IP or hostname of machine on which OpenStack Identity
# service is located (string value)
#os_auth_host=127.0.0.1
# Port of OpenStack Identity service (string value)
#os_auth_port=5000
# This OpenStack user is used to verify provided tokens. The
# user must have admin role in <os_admin_tenant_name> tenant
# (string value)
#os_admin_username=admin
# Password of the admin user (string value)
#os_admin_password=nova
# Name of tenant where the user is admin (string value)
#os_admin_tenant_name=admin
# An engine which will be used to provision infrastructure for
# Hadoop cluster. (string value)
#infrastructure_engine=savanna
# A method for Savanna to execute commands on VMs. (string
# value)
#remote=ssh
#
# Options defined in savanna.db.base
#
# Driver to use for database access (string value)
#db_driver=savanna.db
#
# Options defined in savanna.openstack.common.db.sqlalchemy.session
#
# the filename to use with sqlite (string value)
#sqlite_db=savanna.sqlite
# If true, use synchronous mode for sqlite (boolean value)
#sqlite_synchronous=true
#
# Options defined in savanna.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
#debug=false
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error (boolean value)
#use_stderr=true
# format string to use for log messages with context (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# format string to use for log messages without context
# (string value)
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# data to append to log format when level is DEBUG (string
# value)
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
# prefix each line of exception output with this format
# (string value)
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# list of logger=LEVEL pairs (list value)
#default_log_levels=amqplib=WARN,qpid.messaging=INFO,stevedore=INFO,eventlet.wsgi.server=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,paramiko=WARN,requests=WARN,iso8601=WARN
# publish error events (boolean value)
#publish_errors=false
# make deprecations fatal (boolean value)
#fatal_deprecations=false
# The name of logging configuration file. It does not disable
# existing loggers, but just appends specified logging
# configuration to any other existing logging options. Please
# see the Python logging module documentation for details on
# logging configuration files. (string value)
#log_config_append=<None>
# DEPRECATED. A logging.Formatter log message format string
# which may use any of the available logging.LogRecord
# attributes. This option is deprecated. Please use
# logging_context_format_string and
# logging_default_format_string instead. (string value)
#log_format=<None>
# Format string for %%(asctime)s in log records. Default:
# %(default)s (string value)
#log_date_format=%Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If no default is
# set, logging will go to stdout. (string value)
#log_file=<None>
# (Optional) The base directory used for relative --log-file
# paths (string value)
#log_dir=<None>
# Use syslog for logging. (boolean value)
#use_syslog=false
# syslog facility to receive log lines (string value)
#syslog_log_facility=LOG_USER
#
# Options defined in savanna.plugins.base
#
# List of plugins to be loaded. Savanna preserves the order of
# the list when returning it. (list value)
#plugins=vanilla,hdp
#
# Options defined in savanna.service.edp.job_manager
#
# Postfix for storing jobs in hdfs. Will be added to
# /user/hadoop/ (string value)
#job_workflow_postfix=
#
# Options defined in savanna.service.periodic
#
# enable periodic tasks (boolean value)
#periodic_enable=true
# range of seconds to randomly delay when starting the
# periodic task scheduler to reduce stampeding. (Disable by
# setting to 0) (integer value)
#periodic_fuzzy_delay=60
# Max interval size between periodic tasks execution inseconds
# (integer value)
#periodic_interval_max=60
#
# Options defined in savanna.topology.topology_helper
#
# Enables data locality for hadoop cluster.
# Also enables data locality for Swift used by hadoop.
# If enabled, 'compute_topology' and 'swift_topology'
# configuration parameters should point to OpenStack and Swift
# topology correspondingly. (boolean value)
#enable_data_locality=false
# Enables four-level topology for data locality.
# Works only if corresponding plugin supports such mode.
# (boolean value)
#enable_hypervisor_awareness=true
# File with nova compute topology. It should
# contain mapping between nova computes and racks.
# File format: compute1 /rack1
# compute2 /rack2 compute3 /rack2 (string
# value)
#compute_topology_file=etc/savanna/compute.topology
# File with Swift topology. It should contain
# mapping between Swift nodes and racks. File
# format: node1 /rack1 node2
# /rack2 node3 /rack2 (string value)
#swift_topology_file=etc/savanna/swift.topology
#
# Options defined in savanna.utils.openstack.keystone
#
# Enables Savanna to use Keystone API v3. If that flag is
# disabled, per-job clusters will not be terminated
# automatically. (boolean value)
#use_identity_api_v3=false
#
# Options defined in savanna.utils.ssh_remote
#
# Maximum number of remote operations that will be running at
# the same time. Note that each remote operation requires its
# own process to run. (integer value)
#global_remote_threshold=100
# The same as global_remote_threshold, but for a single
# cluster. (integer value)
#cluster_remote_threshold=70
[conductor]
#
# Options defined in savanna.conductor.api
#
# Perform savanna-conductor operations locally (boolean value)
#use_local=true
[database]
#
# Options defined in savanna.db.migration.cli
#
# URL to database (string value)
#connection=
#
# Options defined in savanna.openstack.common.db.api
#
# The backend to use for db (string value)
#backend=sqlalchemy
#
# Options defined in savanna.openstack.common.db.sqlalchemy.session
#
# The SQLAlchemy connection string used to connect to the
# database (string value)
#connection=sqlite:////savanna/openstack/common/db/$sqlite_db
# The SQLAlchemy connection string used to connect to the
# slave database (string value)
#slave_connection=
# timeout before idle sql connections are reaped (integer
# value)
#idle_timeout=3600
# Minimum number of SQL connections to keep open in a pool
# (integer value)
#min_pool_size=1
# Maximum number of SQL connections to keep open in a pool
# (integer value)
#max_pool_size=<None>
# maximum db connection retries during startup. (setting -1
# implies an infinite retry count) (integer value)
#max_retries=10
# interval between retries of opening a sql connection
# (integer value)
#retry_interval=10
# If set, use this value for max_overflow with sqlalchemy
# (integer value)
#max_overflow=<None>
# Verbosity of SQL debugging information. 0=None,
# 100=Everything (integer value)
#connection_debug=0
# Add python stack traces to SQL as comment strings (boolean
# value)
#connection_trace=false
# If set, use this value for pool_timeout with sqlalchemy
# (integer value)
#pool_timeout=<None>
# Total option count: 62