sahara/sahara/tests/integration/configs/itest.conf.sample-full

244 lines
8.6 KiB
Plaintext

[COMMON]
# Username for OpenStack (string value)
#OS_USERNAME = 'admin'
# Password for OpenStack (string value)
#OS_PASSWORD = 'admin'
# Tenant name for OpenStack (string value)
#OS_TENANT_NAME = 'admin'
# Tenant ID for OpenStack (string value)
#OS_TENANT_ID = <None>
# URL for OpenStack (string value)
#OS_AUTH_URL = 'http://127.0.0.1:5000/v2.0'
#OpenStack auth version for Swift (string value)
#SWIFT_AUTH_VERSION = 2
# Host for Sahara API (string value)
#SAHARA_HOST = '127.0.0.1'
# Port for Sahara API (integer value)
#SAHARA_PORT = '8386'
# Api version for Sahara (string value)
#SAHARA_API_VERSION = '1.1'
# OpenStack flavor ID for virtual machines. If you leave default value of this
# parameter then flavor ID will be created automatically, using nova client.
# Created flavor will have the following parameters: name=i-test-flavor-<id>,
# ram=1024, vcpus=1, disk=10, ephemeral=10. <id> is ID of 8 characters
# (letters and/or digits) which is added to name of flavor for its uniqueness
# (string value)
#FLAVOR_ID = <None>
# Cluster creation timeout (in minutes); minimal value is 1 (integer value)
#CLUSTER_CREATION_TIMEOUT = 30
# Timeout for node process deployment on cluster nodes (in minutes);
# minimal value is 1 (integer value)
#TELNET_TIMEOUT = 5
# Timeout for HDFS initialization (in minutes); minimal value is 1
# (integer value)
#HDFS_INITIALIZATION_TIMEOUT = 5
# Timeout for job creation (in minutes); minimal value is 1 (integer value)
#JOB_LAUNCH_TIMEOUT = 5
# Timeout for a poll of state of transient cluster (in minutes);
# minimal value is 1 (integer value)
#TRANSIENT_CLUSTER_TIMEOUT = 3
# Name for cluster (string value)
#CLUSTER_NAME = 'test-cluster'
# OpenStack key pair ID of your SSH public key. Sahara transfers this key to
# cluster nodes for access of users to virtual machines of cluster via SSH.
# You can export your id_rsa.pub public key to OpenStack and specify its key
# pair ID in configuration file of tests. If you already have key pair in
# OpenStack then you just should specify its key pair ID in configuration file
# of tests. If you have no key pair in OpenStack or you do not want to export
# (create) key pair then you just should specify any key pair ID which you like
# (for example, "king-kong") but you have necessarily to leave default value of
# PATH_TO_SSH_KEY parameter. In this case key pair will be created
# automatically. Also to key pair ID will be added little ID (8 characters
# (letters and/or digits)) for its uniqueness. In the end of tests key pair
# will be deleted (string value)
#USER_KEYPAIR_ID = 'sahara-i-test-key-pair'
# Path to id_rsa key which is used with tests for remote command execution.
# If you specify wrong path to key then you will have the error "Private key
# file is encrypted". Please, make sure you specified right path to key. If
# this parameter is not specified, key pair (private and public SSH keys) will
# be generated automatically, using nova client (string value)
#PATH_TO_SSH_KEY = <None>
# Pool name for floating IPs. If Sahara uses Nova management network and auto
# assignment of IPs was enabled then you should leave default value of this
# parameter. If auto assignment was not enabled then you should specify value
# (floating IP pool name) of this parameter. If Sahara uses Neutron management
# network then you should always specify value (floating IP pool name) of this
# parameter (string value)
#FLOATING_IP_POOL = <None>
# If Sahara uses Nova management network then you should leave default value
# of this flag. If Sahara uses Neutron management network then you should set
# this flag to True and specify values of the following parameters:
# FLOATING_IP_POOL and INTERNAL_NEUTRON_NETWORK (boolean value)
#NEUTRON_ENABLED = False
# Name for internal Neutron network (string value)
#INTERNAL_NEUTRON_NETWORK = 'private'
# If this flag is True, do not delete the cluster after test.
# This is a debugging aid for instances when errors are logged
# on the cluster nodes but the cause of the failure is not
# evident from the integration test logs, ie an Oozie exception.
# It is intended for use on local hosts, not the official ci host.
#RETAIN_CLUSTER_AFTER_TEST = False
[VANILLA]
# Name of plugin (string value)
#PLUGIN_NAME = 'vanilla'
# ID for image which is used for cluster creation. Also you can specify image
# name or tag of image instead of image ID. If you do not specify image related
# parameters then image for cluster creation will be chosen by tag
# "sahara_i_tests" (string value)
#IMAGE_ID = <None>
# Name for image which is used for cluster creation. Also you can specify image
# ID or tag of image instead of image name. If you do not specify image related
# parameters then image for cluster creation will be chosen by tag
# "sahara_i_tests" (string value)
#IMAGE_NAME = <None>
# Tag for image which is used for cluster creation. Also you can specify image
# ID or image name instead of tag of image. If you do not specify image related
# parameters then image for cluster creation will be chosen by tag
# "sahara_i_tests (string value)
#IMAGE_TAG = <None>
# Username to get cluster node with SSH (string value)
#SSH_USERNAME = <None>
# Version of Hadoop (string value)
#HADOOP_VERSION = '1.2.1'
# Username which is used for access to Hadoop services (string value)
#HADOOP_USER = 'hadoop'
# Directory where Hadoop jar files are located (string value)
#HADOOP_DIRECTORY = '/usr/share/hadoop'
# Directory where logs of completed jobs are located (string value)
#HADOOP_LOG_DIRECTORY = '/mnt/log/hadoop/hadoop/userlogs'
# Directory where logs of completed jobs on volume mounted to node are located
# (string value)
#HADOOP_LOG_DIRECTORY_ON_VOLUME = '/volumes/disk1/log/hadoop/hadoop/userlogs'
# (dictionary value)
#HADOOP_PROCESSES_WITH_PORTS = jobtracker: 50030, namenode: 50070, tasktracker: 50060, datanode: 50075, secondarynamenode: 50090, oozie: 11000
# (dictionary value)
#PROCESS_NAMES = nn: namenode, tt: tasktracker, dn: datanode
#SKIP_ALL_TESTS_FOR_PLUGIN = False
#SKIP_CINDER_TEST = False
#SKIP_CLUSTER_CONFIG_TEST = False
#SKIP_EDP_TEST = False
#SKIP_MAP_REDUCE_TEST = False
#SKIP_SWIFT_TEST = False
#SKIP_SCALING_TEST = False
#SKIP_TRANSIENT_CLUSTER_TEST = False
#ONLY_TRANSIENT_CLUSTER_TEST = False
[HDP]
# Name of plugin (string value)
#PLUGIN_NAME = 'hdp'
# ID for image which is used for cluster creation. Also you can specify image
# name or tag of image instead of image ID. If you do not specify image related
# parameters then image for cluster creation will be chosen by tag
# "sahara_i_tests" (string value)
#IMAGE_ID = <None>
# Name for image which is used for cluster creation. Also you can specify image
# ID or tag of image instead of image name. If you do not specify image related
# parameters then image for cluster creation will be chosen by tag
# "sahara_i_tests" (string value)
#IMAGE_NAME = <None>
# Tag for image which is used for cluster creation. Also you can specify image
# ID or image name instead of tag of image. If you do not specify image related
# parameters then image for cluster creation will be chosen by tag
# "sahara_i_tests" (string value)
#IMAGE_TAG = <None>
# Username to get cluster node with SSH (string value)
#SSH_USERNAME = <None>
# A list of processes that will be launched on master node (list value)
#MASTER_NODE_PROCESSES = JOBTRACKER, NAMENODE, SECONDARY_NAMENODE, GANGLIA_SERVER, NAGIOS_SERVER, AMBARI_SERVER, OOZIE_SERVER
# A list of processes that will be launched on worker nodes (list value)
#WORKER_NODE_PROCESSES = TASKTRACKER, DATANODE, HDFS_CLIENT, MAPREDUCE_CLIENT, OOZIE_CLIENT, PIG
# Version of Hadoop (string value)
#HADOOP_VERSION = '1.3.2'
# Username which is used for access to Hadoop services (string value)
#HADOOP_USER = 'hdfs'
# Directory where Hadoop jar files are located (string value)
#HADOOP_DIRECTORY = '/usr/lib/hadoop'
# Directory where logs of completed jobs are located (string value)
#HADOOP_LOG_DIRECTORY = '/mnt/hadoop/mapred/userlogs'
# Directory where logs of completed jobs on volume mounted to node are located
# (string value)
#HADOOP_LOG_DIRECTORY_ON_VOLUME = '/volumes/disk1/hadoop/mapred/userlogs'
# The number of hosts to add while scaling an existing node group
#SCALE_EXISTING_NG_COUNT = 1
# The number of hosts to add while scaling a new node group
#SCALE_NEW_NG_COUNT = 1
# (dictionary value)
#HADOOP_PROCESSES_WITH_PORTS = JOBTRACKER: 50030, NAMENODE: 50070, TASKTRACKER: 50060, DATANODE: 50075, SECONDARY_NAMENODE: 50090
# (dictionary value)
#PROCESS_NAMES = nn: NAMENODE, tt: TASKTRACKER, dn: DATANODE
#SKIP_ALL_TESTS_FOR_PLUGIN = False
#SKIP_CINDER_TEST = False
#SKIP_MAP_REDUCE_TEST = False
#SKIP_SWIFT_TEST = False
#SKIP_SCALING_TEST = False