Remove all non-incubated oslo code

Rally repository includes openstack.common module with modules from
oslo-incubator(modules are listed in openstack-common.conf file).

All those modules can be splitted by 4 categories:
 1. logging modules
 2. aas related modules
 3. cliutils
 4. config

Modules from first category were graduated to separate lib, so we can remove
them and use `oslo.log`.

Modules from second category are not really used and will be not used,
because Rally-as-a-Service will use flask microframework instead of
pecan(modules from oslo-incubator are designed for pecan).

Since oslo core team did not plan to graduate `cliutils` module from
oslo-incubator, so there are no reasons to sync this module with oslo, we
can copy needed functions and maintain them in our repo.

Additional to rally.openstack dir, modules from oslo incubator are located
in tools dir.
tools/config modules works pretty bad, opts from rally.osclients always lost
during automated config generator.
`oslo.config` lib provides a better way to autogenerate config sample of
Rally. It requires new entry point, which returns a list with all opts(look
at 'rally.common.opts', 'setup.cfg' for more details). Since all opts are
used in one place, their names were unified.
Also config was updated(via `tox -egenconfig`).

tools/rally.bash_completion file is moved to etc dir, because etc dir
corresponds better and it gave us ability to remove whole tools dir

Closes-Bug: #1378960

Change-Id: Ic127269c367275d3adcfc9f40d9144fce8295391
This commit is contained in:
Andrey Kurilin 2015-02-04 12:23:08 +02:00
parent 5738707983
commit 387dc356e3
62 changed files with 962 additions and 3771 deletions

View File

@ -1,542 +1,477 @@
[DEFAULT]
#
# Options defined in rally.exceptions
# From oslo.log
#
# make exception message format errors fatal (boolean value)
#fatal_exception_format_errors=false
# Print debugging output (set logging level to DEBUG instead of
# default WARNING level). (boolean value)
#debug = false
# Print more verbose output (set logging level to INFO instead of
# default WARNING level). (boolean value)
#verbose = false
#
# Options defined in rally.log
#
# Print debugging output only for Rally. Off-site components
# stay quiet. (boolean value)
#rally_debug=false
#
# Options defined in rally.openstack.common.eventlet_backdoor
#
# Enable eventlet backdoor. Acceptable values are 0, <port>,
# and <start>:<end>, where 0 results in listening on a random
# tcp port number; <port> results in listening on the
# specified port number (and not enabling backdoor if that
# port is in use); and <start>:<end> results in listening on
# the smallest unused port number within the specified range
# of port numbers. The chosen port is displayed in the
# service's log file. (string value)
#backdoor_port=<None>
#
# Options defined in rally.openstack.common.lockutils
#
# Whether to disable inter-process locks (boolean value)
#disable_process_locking=false
# Directory to use for lock files. (string value)
#lock_path=<None>
#
# Options defined in rally.openstack.common.log
#
# Print debugging output (set logging level to DEBUG instead
# of default WARNING level). (boolean value)
#debug=false
# Print more verbose output (set logging level to INFO instead
# of default WARNING level). (boolean value)
#verbose=false
# Log output to standard error. (boolean value)
#use_stderr=true
# Format string to use for log messages with context. (string
# value)
#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages without context.
# The name of a logging configuration file. This file is appended to
# any existing logging configuration files. For details about logging
# configuration files, see the Python logging module documentation.
# (string value)
#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Data to append to log format when level is DEBUG. (string
# value)
#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format.
# (string value)
#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# List of logger=LEVEL pairs. (list value)
#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN
# Enables or disables publication of error events. (boolean
# value)
#publish_errors=false
# Enables or disables fatal status of deprecations. (boolean
# value)
#fatal_deprecations=false
# The format for an instance that is passed with the log
# message. (string value)
#instance_format="[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log
# message. (string value)
#instance_uuid_format="[instance: %(uuid)s] "
# The name of a logging configuration file. This file is
# appended to any existing logging configuration files. For
# details about logging configuration files, see the Python
# logging module documentation. (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append=<None>
#log_config_append = <None>
# DEPRECATED. A logging.Formatter log message format string
# which may use any of the available logging.LogRecord
# attributes. This option is deprecated. Please use
# logging_context_format_string and
# DEPRECATED. A logging.Formatter log message format string which may
# use any of the available logging.LogRecord attributes. This option
# is deprecated. Please use logging_context_format_string and
# logging_default_format_string instead. (string value)
#log_format=<None>
#log_format = <None>
# Format string for %%(asctime)s in log records. Default:
# %(default)s . (string value)
#log_date_format=%Y-%m-%d %H:%M:%S
# Format string for %%(asctime)s in log records. Default: %(default)s
# . (string value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If no default is
# set, logging will go to stdout. (string value)
# (Optional) Name of log file to output to. If no default is set,
# logging will go to stdout. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file=<None>
#log_file = <None>
# (Optional) The base directory used for relative --log-file
# paths. (string value)
# (Optional) The base directory used for relative --log-file paths.
# (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir=<None>
#log_dir = <None>
# Use syslog for logging. Existing syslog format is DEPRECATED
# during I, and will change in J to honor RFC5424. (boolean
# value)
#use_syslog=false
# Use syslog for logging. Existing syslog format is DEPRECATED during
# I, and will change in J to honor RFC5424. (boolean value)
#use_syslog = false
# (Optional) Enables or disables syslog rfc5424 format for
# logging. If enabled, prefixes the MSG part of the syslog
# message with APP-NAME (RFC5424). The format without the APP-
# NAME is deprecated in I, and will be removed in J. (boolean
# value)
#use_syslog_rfc_format=false
# (Optional) Enables or disables syslog rfc5424 format for logging. If
# enabled, prefixes the MSG part of the syslog message with APP-NAME
# (RFC5424). The format without the APP-NAME is deprecated in I, and
# will be removed in J. (boolean value)
#use_syslog_rfc_format = false
# Syslog facility to receive log lines. (string value)
#syslog_log_facility=LOG_USER
#syslog_log_facility = LOG_USER
# Log output to standard error. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages without context. (string
# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Data to append to log format when level is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string
# value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# List of logger=LEVEL pairs. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
# The format for an instance that is passed with the log message.
# (string value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message.
# (string value)
#instance_uuid_format = "[instance: %(uuid)s] "
#
# Options defined in rally.openstack.common.periodic_task
# From rally
#
# Some periodic tasks can be run in a separate process. Should
# we run them here? (boolean value)
#run_external_periodic_tasks=true
# Print debugging output only for Rally. Off-site components stay
# quiet. (boolean value)
#rally_debug = false
#
# Options defined in rally.osclients
#
# make exception message format errors fatal (boolean value)
#fatal_exception_format_errors = false
# HTTP timeout for any of OpenStack service in seconds
#openstack_client_http_timeout=180.0
# HTTP timeout for any of OpenStack service in seconds (floating point
# value)
#openstack_client_http_timeout = 180.0
# Use SSL for all OpenStack API interfaces
#https_insecure=False
# Use SSL for all OpenStack API interfaces (boolean value)
#https_insecure = false
# Path to CA server cetrificate for SSL (string value)
#https_cacert = <None>
# Path to CA server cetrificate for SSL
#https_cacert=<None>
[benchmark]
#
# Options defined in rally.benchmark.scenarios.cinder.utils
# From rally
#
# Time to sleep after creating a resource before polling for
# it status (floating point value)
#cinder_volume_create_prepoll_delay=2.0
# Time to sleep after creating a resource before polling for it status
# (floating point value)
#cinder_volume_create_prepoll_delay = 2.0
# Time to wait for cinder volume to be created. (floating
# Time to wait for cinder volume to be created. (floating point value)
#cinder_volume_create_timeout = 600.0
# Interval between checks when waiting for volume creation. (floating
# point value)
#cinder_volume_create_timeout=600.0
#cinder_volume_create_poll_interval = 2.0
# Interval between checks when waiting for volume creation.
# (floating point value)
#cinder_volume_create_poll_interval=2.0
# Time to wait for cinder volume to be deleted. (floating point value)
#cinder_volume_delete_timeout = 600.0
# Time to wait for cinder volume to be deleted. (floating
# Interval between checks when waiting for volume deletion. (floating
# point value)
#cinder_volume_delete_timeout=600.0
#cinder_volume_delete_poll_interval = 2.0
# Interval between checks when waiting for volume deletion.
# Time to sleep after creating a resource before polling for it status
# (floating point value)
#cinder_volume_delete_poll_interval=2.0
#glance_image_create_prepoll_delay = 2.0
# Time to wait for glance image to be created. (floating point value)
#glance_image_create_timeout = 120.0
#
# Options defined in rally.benchmark.scenarios.glance.utils
#
# Interval between checks when waiting for image creation. (floating
# point value)
#glance_image_create_poll_interval = 1.0
# Time to sleep after creating a resource before polling for
# it status (floating point value)
#glance_image_create_prepoll_delay=2.0
# Time to wait for glance image to be deleted. (floating point value)
#glance_image_delete_timeout = 120.0
# Time to wait for glance image to be created. (floating point
# Interval between checks when waiting for image deletion. (floating
# point value)
#glance_image_delete_poll_interval = 1.0
# Time to sleep after creating a resource before polling for it status
# (floating point value)
#heat_stack_create_prepoll_delay = 2.0
# Time to wait for heat stack to be created. (floating point value)
#heat_stack_create_timeout = 3600.0
# Interval between checks when waiting for stack creation. (floating
# point value)
#heat_stack_create_poll_interval = 1.0
# Time to wait for heat stack to be deleted. (floating point value)
#heat_stack_delete_timeout = 3600.0
# Interval between checks when waiting for stack deletion. (floating
# point value)
#heat_stack_delete_poll_interval = 1.0
# Time to sleep after updating a resource before polling for it status
# (floating point value)
#heat_stack_update_prepoll_delay = 2.0
# Time to wait for stack to be updated (floating point value)
#heat_stack_update_timeout = 3600.0
# Interval between checks when waiting for stack update. (floating
# point value)
#heat_stack_update_poll_interval = 1.0
# Time to sleep after start before polling for status (floating point
# value)
#glance_image_create_timeout=120.0
# Interval between checks when waiting for image creation.
# (floating point value)
#glance_image_create_poll_interval=1.0
# Time to wait for glance image to be deleted. (floating point
# value)
#glance_image_delete_timeout=120.0
# Interval between checks when waiting for image deletion.
# (floating point value)
#glance_image_delete_poll_interval=1.0
#
# Options defined in rally.benchmark.scenarios.heat.utils
#
# Time to sleep after creating a resource before polling for
# it status (floating point value)
#heat_stack_create_prepoll_delay=2.0
# Time to wait for heat stack to be created. (floating point
# value)
#heat_stack_create_timeout=3600.0
# Interval between checks when waiting for stack creation.
# (floating point value)
#heat_stack_create_poll_interval=1.0
# Time to wait for heat stack to be deleted. (floating point
# value)
#heat_stack_delete_timeout=3600.0
# Interval between checks when waiting for stack deletion.
# (floating point value)
#heat_stack_delete_poll_interval=1.0
#
# Options defined in rally.benchmark.scenarios.nova.utils
#
# Time to sleep after start before polling for status
# (floating point value)
#nova_server_start_prepoll_delay=0.0
#nova_server_start_prepoll_delay = 0.0
# Server start timeout (floating point value)
#nova_server_start_timeout=300.0
#nova_server_start_timeout = 300.0
# Server start poll interval (floating point value)
#nova_server_start_poll_interval=1.0
#nova_server_start_poll_interval = 1.0
# Time to sleep after stop before polling for status (floating
# point value)
#nova_server_stop_prepoll_delay=0.0
# Time to sleep after stop before polling for status (floating point
# value)
#nova_server_stop_prepoll_delay = 0.0
# Server stop timeout (floating point value)
#nova_server_stop_timeout=300.0
#nova_server_stop_timeout = 300.0
# Server stop poll interval (floating point value)
#nova_server_stop_poll_interval=2.0
#nova_server_stop_poll_interval = 2.0
# Time to sleep after boot before polling for status (floating
# point value)
#nova_server_boot_prepoll_delay=1.0
# Time to sleep after boot before polling for status (floating point
# value)
#nova_server_boot_prepoll_delay = 1.0
# Server boot timeout (floating point value)
#nova_server_boot_timeout=300.0
#nova_server_boot_timeout = 300.0
# Server boot poll interval (floating point value)
#nova_server_boot_poll_interval=1.0
#nova_server_boot_poll_interval = 1.0
# Time to sleep after delete before polling for status
# (floating point value)
#nova_server_delete_prepoll_delay=2.0
# Time to sleep after delete before polling for status (floating point
# value)
#nova_server_delete_prepoll_delay = 2.0
# Server delete timeout (floating point value)
#nova_server_delete_timeout=300.0
#nova_server_delete_timeout = 300.0
# Server delete poll interval (floating point value)
#nova_server_delete_poll_interval=2.0
#nova_server_delete_poll_interval = 2.0
# Time to sleep after reboot before polling for status
# (floating point value)
#nova_server_reboot_prepoll_delay=2.0
# Time to sleep after reboot before polling for status (floating point
# value)
#nova_server_reboot_prepoll_delay = 2.0
# Server reboot timeout (floating point value)
#nova_server_reboot_timeout=300.0
#nova_server_reboot_timeout = 300.0
# Server reboot poll interval (floating point value)
#nova_server_reboot_poll_interval=2.0
#nova_server_reboot_poll_interval = 2.0
# Time to sleep after rescue before polling for status
# (floating point value)
#nova_server_rescue_prepoll_delay=2.0
# Time to sleep after rescue before polling for status (floating point
# value)
#nova_server_rescue_prepoll_delay = 2.0
# Server rescue timeout (floating point value)
#nova_server_rescue_timeout=300.0
#nova_server_rescue_timeout = 300.0
# Server rescue poll interval (floating point value)
#nova_server_rescue_poll_interval=2.0
#nova_server_rescue_poll_interval = 2.0
# Time to sleep after unrescue before polling for status
# (floating point value)
#nova_server_unrescue_prepoll_delay=2.0
# Time to sleep after unrescue before polling for status (floating
# point value)
#nova_server_unrescue_prepoll_delay = 2.0
# Server unrescue timeout (floating point value)
#nova_server_unrescue_timeout=300.0
#nova_server_unrescue_timeout = 300.0
# Server unrescue poll interval (floating point value)
#nova_server_unrescue_poll_interval=2.0
#nova_server_unrescue_poll_interval = 2.0
# Time to sleep after suspend before polling for status
# (floating point value)
#nova_server_suspend_prepoll_delay=2.0
# Time to sleep after suspend before polling for status (floating
# point value)
#nova_server_suspend_prepoll_delay = 2.0
# Server suspend timeout (floating point value)
#nova_server_suspend_timeout=300.0
#nova_server_suspend_timeout = 300.0
# Server suspend poll interval (floating point value)
#nova_server_suspend_poll_interval=2.0
#nova_server_suspend_poll_interval = 2.0
# Time to sleep after image_create before polling for status
# (floating point value)
#nova_server_image_create_prepoll_delay=0.0
# Time to sleep after image_create before polling for status (floating
# point value)
#nova_server_image_create_prepoll_delay = 0.0
# Server image_create timeout (floating point value)
#nova_server_image_create_timeout=300.0
#nova_server_image_create_timeout = 300.0
# Server image_create poll interval (floating point value)
#nova_server_image_create_poll_interval=2.0
#nova_server_image_create_poll_interval = 2.0
# Time to sleep after image_delete before polling for status
# (floating point value)
#nova_server_image_delete_prepoll_delay=0.0
# Time to sleep after image_delete before polling for status (floating
# point value)
#nova_server_image_delete_prepoll_delay = 0.0
# Server image_delete timeout (floating point value)
#nova_server_image_delete_timeout=300.0
#nova_server_image_delete_timeout = 300.0
# Server image_delete poll interval (floating point value)
#nova_server_image_delete_poll_interval=2.0
#nova_server_image_delete_poll_interval = 2.0
# Time to sleep after resize before polling for status
# (floating point value)
#nova_server_resize_prepoll_delay=2.0
# Time to sleep after resize before polling for status (floating point
# value)
#nova_server_resize_prepoll_delay = 2.0
# Server resize timeout (floating point value)
#nova_server_resize_timeout=400.0
#nova_server_resize_timeout = 400.0
# Server resize poll interval (floating point value)
#nova_server_resize_poll_interval=5.0
#nova_server_resize_poll_interval = 5.0
# Time to sleep after resize_confirm before polling for status
# (floating point value)
#nova_server_resize_confirm_prepoll_delay=0.0
#nova_server_resize_confirm_prepoll_delay = 0.0
# Server resize_confirm timeout (floating point value)
#nova_server_resize_confirm_timeout=200.0
#nova_server_resize_confirm_timeout = 200.0
# Server resize_confirm poll interval (floating point value)
#nova_server_resize_confirm_poll_interval=2.0
#nova_server_resize_confirm_poll_interval = 2.0
# Time to sleep after resize_revert before polling for status
# (floating point value)
#nova_server_resize_revert_prepoll_delay=0.0
#nova_server_resize_revert_prepoll_delay = 0.0
# Server resize_revert timeout (floating point value)
#nova_server_resize_revert_timeout=200.0
#nova_server_resize_revert_timeout = 200.0
# Server resize_revert poll interval (floating point value)
#nova_server_resize_revert_poll_interval=2.0
#nova_server_resize_revert_poll_interval = 2.0
# Time to sleep after live_migrate before polling for status (floating
# point value)
#nova_server_live_migrate_prepoll_delay = 1.0
#
# Options defined in rally.benchmark.scenarios.sahara.utils
#
# Server live_migrate timeout (floating point value)
#nova_server_live_migrate_timeout = 400.0
# A timeout in seconds for a cluster create operation (integer
# value)
#cluster_create_timeout=600
# Server live_migrate poll interval (floating point value)
#nova_server_live_migrate_poll_interval = 2.0
# Time to sleep after migrate before polling for status (floating
# point value)
#nova_server_migrate_prepoll_delay = 1.0
# Server migrate timeout (floating point value)
#nova_server_migrate_timeout = 400.0
# Server migrate poll interval (floating point value)
#nova_server_migrate_poll_interval = 2.0
# A timeout in seconds for a cluster create operation (integer value)
#cluster_create_timeout = 600
# Cluster status polling interval in seconds (integer value)
#cluster_check_interval=5
#cluster_check_interval = 5
# A timeout in seconds for a cluster create operation (integer
# value)
#job_execution_timeout=600
# A timeout in seconds for a cluster create operation (integer value)
#job_execution_timeout = 600
# Cluster status polling interval in seconds (integer value)
#job_check_interval=5
#job_check_interval = 5
[database]
#
# Options defined in oslo.db
# From oslo.db
#
# The file name to use with SQLite. (string value)
#sqlite_db=oslo.sqlite
# Deprecated group/name - [DEFAULT]/sqlite_db
#sqlite_db = oslo.sqlite
# If True, SQLite uses synchronous mode. (boolean value)
#sqlite_synchronous=true
# Deprecated group/name - [DEFAULT]/sqlite_synchronous
#sqlite_synchronous = true
# The back end to use for the database. (string value)
# Deprecated group/name - [DEFAULT]/db_backend
#backend=sqlalchemy
#backend = sqlalchemy
# The SQLAlchemy connection string to use to connect to the
# database. (string value)
# The SQLAlchemy connection string to use to connect to the database.
# (string value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection=<None>
#connection = <None>
# The SQLAlchemy connection string to use to connect to the
# slave database. (string value)
#slave_connection=<None>
# The SQLAlchemy connection string to use to connect to the slave
# database. (string value)
#slave_connection = <None>
# The SQL mode to be used for MySQL sessions. This option,
# including the default, overrides any server-set SQL mode. To
# use whatever SQL mode is set by the server configuration,
# set this to no value. Example: mysql_sql_mode= (string
# value)
#mysql_sql_mode=TRADITIONAL
# The SQL mode to be used for MySQL sessions. This option, including
# the default, overrides any server-set SQL mode. To use whatever SQL
# mode is set by the server configuration, set this to no value.
# Example: mysql_sql_mode= (string value)
#mysql_sql_mode = TRADITIONAL
# Timeout before idle SQL connections are reaped. (integer
# value)
# Timeout before idle SQL connections are reaped. (integer value)
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout
# Deprecated group/name - [sql]/idle_timeout
#idle_timeout=3600
#idle_timeout = 3600
# Minimum number of SQL connections to keep open in a pool.
# (integer value)
# Minimum number of SQL connections to keep open in a pool. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
# Deprecated group/name - [DATABASE]/sql_min_pool_size
#min_pool_size=1
#min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool.
# (integer value)
# Maximum number of SQL connections to keep open in a pool. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
# Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size=<None>
#max_pool_size = <None>
# Maximum db connection retries during startup. Set to -1 to
# specify an infinite retry count. (integer value)
# Maximum number of database connection retries during startup. Set to
# -1 to specify an infinite retry count. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_retries
# Deprecated group/name - [DATABASE]/sql_max_retries
#max_retries=10
#max_retries = 10
# Interval between retries of opening a SQL connection.
# (integer value)
# Interval between retries of opening a SQL connection. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_retry_interval
# Deprecated group/name - [DATABASE]/reconnect_interval
#retry_interval=10
#retry_interval = 10
# If set, use this value for max_overflow with SQLAlchemy.
# (integer value)
# If set, use this value for max_overflow with SQLAlchemy. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_max_overflow
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
#max_overflow=<None>
#max_overflow = <None>
# Verbosity of SQL debugging information: 0=None,
# 100=Everything. (integer value)
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug=0
# Add Python stack traces to SQL as comment strings. (boolean
# value)
# Deprecated group/name - [DEFAULT]/sql_connection_trace
#connection_trace=false
# If set, use this value for pool_timeout with SQLAlchemy.
# Verbosity of SQL debugging information: 0=None, 100=Everything.
# (integer value)
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout=<None>
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug = 0
# Enable the experimental use of database reconnect on
# connection lost. (boolean value)
#use_db_reconnect=false
# Add Python stack traces to SQL as comment strings. (boolean value)
# Deprecated group/name - [DEFAULT]/sql_connection_trace
#connection_trace = false
# If set, use this value for pool_timeout with SQLAlchemy. (integer
# value)
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout = <None>
# Enable the experimental use of database reconnect on connection
# lost. (boolean value)
#use_db_reconnect = false
# Seconds between database connection retries. (integer value)
#db_retry_interval=1
#db_retry_interval = 1
# If True, increases the interval between database connection
# retries up to db_max_retry_interval. (boolean value)
#db_inc_retry_interval=true
# If True, increases the interval between database connection retries
# up to db_max_retry_interval. (boolean value)
#db_inc_retry_interval = true
# If db_inc_retry_interval is set, the maximum seconds between
# database connection retries. (integer value)
#db_max_retry_interval=10
#db_max_retry_interval = 10
# Maximum database connection retries before error is raised.
# Set to -1 to specify an infinite retry count. (integer
# value)
#db_max_retries=20
# Maximum database connection retries before error is raised. Set to
# -1 to specify an infinite retry count. (integer value)
#db_max_retries = 20
[image]
#
# Options defined in rally.verification.verifiers.tempest.config
# From rally
#
# Version of cirros image (string value)
#cirros_version=0.3.2
#cirros_version = 0.3.2
# Cirros image name (string value)
#cirros_image=cirros-0.3.2-x86_64-disk.img
[rest]
#
# Options defined in rally.aas.rest
#
# The port for the Rally API server (integer value)
#port=8877
# The listen IP for the Rally API server (string value)
#host=0.0.0.0
#cirros_image = cirros-0.3.2-x86_64-disk.img
[users_context]
#
# Options defined in rally.benchmark.context.users
# From rally
#
# How many concurrent threads use for serving users context
# (integer value)
#resource_management_workers=30
# ID of domain in which projects will be created. (string
# How many concurrent threads use for serving users context (integer
# value)
#project_domain=default
#resource_management_workers = 30
# ID of domain in which projects will be created. (string value)
#project_domain = default
# ID of domain in which users will be created. (string value)
#user_domain=default
#user_domain = default

View File

@ -1,10 +1,7 @@
[DEFAULT]
# The list of modules to copy from oslo-incubator.git
module=cliutils
module=log
module=service
module=periodic_task
# The base module to hold the copy of openstack.common
base=rally

View File

@ -0,0 +1,2 @@
# FIXME(andreykurilin): implement Rally-as-a-Service
pass

View File

@ -1,35 +0,0 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
REST_SERVICE_OPTS = [
cfg.IntOpt("port",
default=8877,
help="The port for the Rally API server",
),
cfg.StrOpt("host",
default="0.0.0.0",
help="The listen IP for the Rally API server",
),
]
REST_OPT_GROUP = cfg.OptGroup(name="rest",
title="Options for the openstack-rally-api "
"service")
CONF = cfg.CONF
CONF.register_group(REST_OPT_GROUP)
CONF.register_opts(REST_SERVICE_OPTS, REST_OPT_GROUP)

View File

@ -1,47 +0,0 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from rally.common import log as logging
def setup_app(config):
"""Initialize Pecan application.
This is a generic interface method of an application.
:param config: An instance of :class:`pecan.Config`.
:returns: A normal WSGI application, an instance of
:class:`pecan.Pecan`.
"""
app = pecan.Pecan(config.app.root, debug=logging.is_debug())
return app
def make_app():
"""Load Pecan application."""
config = {
"app": {
"root": "rally.aas.rest.controllers.root.RootController",
"modules": ["rally.aas.rest"],
"debug": logging.is_debug(),
},
"wsme": {
"debug": logging.is_debug(),
},
}
app = pecan.load_app(config)
return app

View File

@ -1,47 +0,0 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from rally.aas.rest.controllers import v1
from rally.aas.rest import types
class Root(wtypes.Base):
name = wtypes.text
description = wtypes.text
versions = [types.Version]
@classmethod
def convert(self, name, description, versions):
root = Root(name=name, description=description)
root.versions = [v.get()["result"] for v in versions]
return root
class RootController(rest.RestController):
v1 = v1.Controller()
@wsme_pecan.wsexpose(Root)
def get(self):
name = "OpenStack Rally API"
description = ("Rally is a Benchmark-as-a-Service project for "
"OpenStack.")
root = Root.convert(name, description, [self.v1])
return root

View File

@ -1,16 +0,0 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.aas.rest.controllers.v1.root import Controller # noqa

View File

@ -1,35 +0,0 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pecan import rest
import wsmeext.pecan as wsme_pecan
from rally.aas.rest import types
class Version(types.Version):
@classmethod
def convert(cls):
v = super(Version, cls).convert("v1", "CURRENT",
updated_at="2014-01-07T00:00:00Z")
return v
class Controller(rest.RestController):
"""Version 1 API Controller Root."""
@wsme_pecan.wsexpose(Version)
def get(self):
return Version.convert()

View File

@ -1,64 +0,0 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from wsme import types as wtypes
class Link(wtypes.Base):
"""A link representation."""
href = wtypes.text
rel = wtypes.text
type = wtypes.text
@classmethod
def make_link(cls, rel, url, resource, type=wtypes.Unset):
href = "{url}/{resource}".format(url=url, resource=resource)
return cls(href=href, rel=rel, type=type)
class MediaType(wtypes.Base):
"""A media type representation."""
base = wtypes.text
type = wtypes.text
def __init__(self, base, type):
self.base = base
self.type = type
class Version(wtypes.Base):
"""A version type representations."""
id = wtypes.text
status = wtypes.text
updated_at = wtypes.text
media_types = [MediaType]
links = [Link]
@classmethod
def convert(cls, id, status, updated_at=None, media_types=None,
links=None):
v = Version(id=id, status=status, updated_at=updated_at)
if media_types is None:
mime_type = "application/vnd.openstack.rally.%s+json" % id
media_types = [MediaType("application/json", mime_type)]
v.media_types = media_types
if links is None:
links = [Link.make_link("self", pecan.request.host_url, id)]
v.links = links
return v

View File

@ -33,7 +33,7 @@ from rally import osclients
LOG = logging.getLogger(__name__)
context_opts = [
USER_CONTEXT_OPTS = [
cfg.IntOpt("resource_management_workers",
default=30,
help="How many concurrent threads use for serving users "
@ -47,7 +47,7 @@ context_opts = [
]
CONF = cfg.CONF
CONF.register_opts(context_opts,
CONF.register_opts(USER_CONTEXT_OPTS,
group=cfg.OptGroup(name="users_context",
title="benchmark context options"))

View File

@ -22,7 +22,7 @@ from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
cinder_benchmark_opts = [
CINDER_BENCHMARK_OPTS = [
cfg.FloatOpt("cinder_volume_create_prepoll_delay",
default=2.0,
help="Time to sleep after creating a resource before"
@ -45,7 +45,7 @@ cinder_benchmark_opts = [
CONF = cfg.CONF
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(cinder_benchmark_opts, group=benchmark_group)
CONF.register_opts(CINDER_BENCHMARK_OPTS, group=benchmark_group)
class CinderScenario(base.Scenario):

View File

@ -22,7 +22,7 @@ from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
glance_benchmark_opts = [
GLANCE_BENCHMARK_OPTS = [
cfg.FloatOpt("glance_image_create_prepoll_delay",
default=2.0,
help="Time to sleep after creating a resource before "
@ -46,7 +46,7 @@ glance_benchmark_opts = [
CONF = cfg.CONF
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(glance_benchmark_opts, group=benchmark_group)
CONF.register_opts(GLANCE_BENCHMARK_OPTS, group=benchmark_group)
class GlanceScenario(base.Scenario):

View File

@ -21,7 +21,7 @@ from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
heat_benchmark_opts = [
HEAT_BENCHMARK_OPTS = [
cfg.FloatOpt("heat_stack_create_prepoll_delay",
default=2.0,
help="Time to sleep after creating a resource before "
@ -56,7 +56,7 @@ heat_benchmark_opts = [
CONF = cfg.CONF
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(heat_benchmark_opts, group=benchmark_group)
CONF.register_opts(HEAT_BENCHMARK_OPTS, group=benchmark_group)
class HeatScenario(base.Scenario):
@ -148,4 +148,4 @@ class HeatScenario(base.Scenario):
stack,
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_delete_timeout,
check_interval=CONF.benchmark.heat_stack_delete_poll_interval)
check_interval=CONF.benchmark.heat_stack_delete_poll_interval)

View File

@ -24,7 +24,7 @@ from rally.benchmark import utils as bench_utils
from rally import exceptions
nova_benchmark_opts = []
NOVA_BENCHMARK_OPTS = []
option_names_and_defaults = [
# action, prepoll delay, timeout, poll interval
("start", 0, 300, 1),
@ -45,7 +45,7 @@ option_names_and_defaults = [
]
for action, prepoll, timeout, poll in option_names_and_defaults:
nova_benchmark_opts.extend([
NOVA_BENCHMARK_OPTS.extend([
cfg.FloatOpt(
"nova_server_%s_prepoll_delay" % action,
default=float(prepoll),
@ -67,7 +67,7 @@ CONF = cfg.CONF
benchmark_group = cfg.OptGroup(name="benchmark",
title="benchmark options")
CONF.register_group(benchmark_group)
CONF.register_opts(nova_benchmark_opts, group=benchmark_group)
CONF.register_opts(NOVA_BENCHMARK_OPTS, group=benchmark_group)
class NovaScenario(base.Scenario):

View File

@ -28,7 +28,7 @@ from rally import exceptions
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
TIMEOUT_OPTS = [
SAHARA_TIMEOUT_OPTS = [
cfg.IntOpt("cluster_create_timeout", default=600,
help="A timeout in seconds for a cluster create operation"),
cfg.IntOpt("cluster_check_interval", default=5,
@ -40,7 +40,7 @@ TIMEOUT_OPTS = [
]
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(TIMEOUT_OPTS, group=benchmark_group)
CONF.register_opts(SAHARA_TIMEOUT_OPTS, group=benchmark_group)
class SaharaScenario(base.Scenario):

View File

@ -1,53 +0,0 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" The Rally Service API. """
import os
import sys
from wsgiref import simple_server
from oslo_config import cfg
from rally.aas.rest import app as rally_app
from rally.common.i18n import _
from rally.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def main():
# Initialize configuration and logging.
CONF(sys.argv[1:], project="rally")
logging.setup("rally")
# Prepare application and bind to the service socket.
host = CONF.rest.host
port = CONF.rest.port
app = rally_app.make_app()
server = simple_server.make_server(host, port, app)
# Start application.
LOG.info(_("Starting server in PID %s") % os.getpid())
LOG.info(_("Configuration:"))
CONF.log_opt_values(LOG, logging.INFO)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()

View File

@ -23,6 +23,8 @@ import warnings
import jsonschema
from oslo_config import cfg
from oslo_utils import encodeutils
import prettytable
import six
from rally.common.i18n import _
@ -30,7 +32,6 @@ from rally.common import log as logging
from rally.common import utils
from rally.common import version
from rally import exceptions
from rally.openstack.common import cliutils
CONF = cfg.CONF
@ -41,6 +42,95 @@ LOG = logging.getLogger(__name__)
MARGIN = 3
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, "__self__", None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {"sortby": field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = "l"
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(" ", "_")
else:
field_name = field.lower().replace(" ", "_")
data = getattr(o, field_name, "")
row.append(data)
pt.add_row(row)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
else:
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
def make_header(text, size=80, symbol="-"):
"""Unified way to make header message to CLI.
@ -302,8 +392,8 @@ def run(argv, categories):
# call the action with the remaining arguments
# check arguments
try:
cliutils.validate_args(fn, *fn_args, **fn_kwargs)
except cliutils.MissingArgs as e:
validate_args(fn, *fn_args, **fn_kwargs)
except MissingArgs as e:
# NOTE(mikal): this isn't the most helpful error message ever. It is
# long, and tells you a lot of things you probably don't want to know
# if you just got a single arg wrong.

View File

@ -33,7 +33,6 @@ from rally.common import utils
from rally import db
from rally import exceptions
from rally import objects
from rally.openstack.common import cliutils as common_cliutils
from rally import osclients
@ -171,9 +170,8 @@ class DeploymentCommands(object):
r = [str(t[column]) for column in headers[:-1]]
r.append("" if t["uuid"] != current_deployment else "*")
table_rows.append(utils.Struct(**dict(zip(headers, r))))
common_cliutils.print_list(table_rows, headers,
sortby_index=headers.index(
"created_at"))
cliutils.print_list(table_rows, headers,
sortby_index=headers.index("created_at"))
else:
print(_("There are no deployments. "
"To create a new deployment, use:"
@ -222,7 +220,7 @@ class DeploymentCommands(object):
for ep in endpoints:
data = [ep.get(m, "") for m in headers]
table_rows.append(utils.Struct(**dict(zip(headers, data))))
common_cliutils.print_list(table_rows, headers)
cliutils.print_list(table_rows, headers)
@cliutils.deprecated_args(
"--uuid", dest="deployment", type=str,
@ -255,7 +253,7 @@ class DeploymentCommands(object):
print(_("Authentication Issues: %s.")
% sys.exc_info()[1])
return(1)
common_cliutils.print_list(table_rows, headers)
cliutils.print_list(table_rows, headers)
@cliutils.args("--deployment", type=str, dest="deployment",
help="UUID or name of the deployment")

View File

@ -24,7 +24,6 @@ from rally.common import utils
from rally import db
from rally import exceptions
from rally import objects
from rally.openstack.common import cliutils as common_cliutils
from rally import osclients
@ -70,10 +69,10 @@ class ShowCommands(object):
data = [image.id, image.name, image.size]
table_rows.append(utils.Struct(**dict(zip(headers, data))))
common_cliutils.print_list(table_rows,
fields=headers,
formatters=formatters,
mixed_case_fields=mixed_case_fields)
cliutils.print_list(table_rows,
fields=headers,
formatters=formatters,
mixed_case_fields=mixed_case_fields)
except exceptions.InvalidArgumentsException as e:
print(_("Authentication Issues: %s") % e)
@ -107,10 +106,10 @@ class ShowCommands(object):
flavor.ram, flavor.swap, flavor.disk]
table_rows.append(utils.Struct(**dict(zip(headers, data))))
common_cliutils.print_list(table_rows,
fields=headers,
formatters=formatters,
mixed_case_fields=mixed_case_fields)
cliutils.print_list(table_rows,
fields=headers,
formatters=formatters,
mixed_case_fields=mixed_case_fields)
except exceptions.InvalidArgumentsException as e:
print(_("Authentication Issues: %s") % e)
@ -136,9 +135,9 @@ class ShowCommands(object):
data = [network.id, network.label, network.cidr]
table_rows.append(utils.Struct(**dict(zip(headers, data))))
common_cliutils.print_list(table_rows,
fields=headers,
mixed_case_fields=mixed_case_fields)
cliutils.print_list(table_rows,
fields=headers,
mixed_case_fields=mixed_case_fields)
except exceptions.InvalidArgumentsException as e:
print(_("Authentication Issues: %s") % e)
return(1)
@ -164,7 +163,7 @@ class ShowCommands(object):
secgroup.description]
table_rows.append(utils.Struct(**dict(zip(headers,
data))))
common_cliutils.print_list(
cliutils.print_list(
table_rows,
fields=headers,
mixed_case_fields=mixed_case_fields)
@ -192,9 +191,9 @@ class ShowCommands(object):
for keypair in nova_client.keypairs.list():
data = [keypair.name, keypair.fingerprint]
table_rows.append(utils.Struct(**dict(zip(headers, data))))
common_cliutils.print_list(table_rows,
fields=headers,
mixed_case_fields=mixed_case_fields)
cliutils.print_list(table_rows,
fields=headers,
mixed_case_fields=mixed_case_fields)
except exceptions.InvalidArgumentsException as e:
print(_("Authentication Issues: %s") % e)

View File

@ -39,7 +39,6 @@ from rally import consts
from rally import db
from rally import exceptions
from rally import objects
from rally.openstack.common import cliutils as common_cliutils
class FailedToLoadTask(exceptions.RallyException):
@ -287,10 +286,9 @@ class TaskCommands(object):
if r["atomic_actions"]:
for action in atomic_actions:
dlist.append(r["atomic_actions"].get(action) or 0)
table_rows.append(rutils.Struct(**dict(zip(headers, dlist))))
common_cliutils.print_list(table_rows,
fields=headers,
formatters=formatters)
cliutils.print_list(table_rows,
fields=headers,
formatters=formatters)
print()
task = db.task_get_detailed(task_id)
@ -355,8 +353,8 @@ class TaskCommands(object):
"0.0%", len(raw)]
table_rows.append(rutils.Struct(**dict(zip(table_cols, data))))
common_cliutils.print_list(table_rows, fields=table_cols,
formatters=formatters)
cliutils.print_list(table_rows, fields=table_cols,
formatters=formatters)
if iterations_data:
_print_iterations_data(raw)
@ -396,9 +394,9 @@ class TaskCommands(object):
row = [str(key)] + ["n/a"] * 5
table_rows.append(rutils.Struct(**dict(zip(headers, row))))
print("\nScenario Specific Results\n")
common_cliutils.print_list(table_rows,
fields=headers,
formatters=formatters)
cliutils.print_list(table_rows,
fields=headers,
formatters=formatters)
for result in raw:
errors = result["scenario_output"].get("errors")
@ -480,7 +478,7 @@ class TaskCommands(object):
x["duration"] = x["updated_at"] - x["created_at"]
if task_list:
common_cliutils.print_list(
cliutils.print_list(
task_list,
headers, sortby_index=headers.index("created_at"))
else:
@ -620,8 +618,8 @@ class TaskCommands(object):
if tojson:
print(json.dumps(data))
else:
common_cliutils.print_list(data, ("benchmark", "pos", "criterion",
"status", "detail"))
cliutils.print_list(data, ("benchmark", "pos", "criterion",
"status", "detail"))
return failed_criteria
@cliutils.args("--task", type=str, dest="task", required=False,

View File

@ -30,7 +30,6 @@ from rally import consts
from rally import db
from rally import exceptions
from rally import objects
from rally.openstack.common import cliutils as common_cliutils
from rally.verification.tempest import diff
from rally.verification.tempest import json2html
@ -96,8 +95,8 @@ class VerifyCommands(object):
el["duration"] = el["updated_at"] - el["created_at"]
if verifications:
common_cliutils.print_list(verifications, fields,
sortby_index=fields.index("Created at"))
cliutils.print_list(verifications, fields,
sortby_index=fields.index("Created at"))
else:
print(_("There are no results from verifier. To run a verifier, "
"use:\nrally verify start"))
@ -172,14 +171,14 @@ class VerifyCommands(object):
print ("Total results of verification:\n")
total_fields = ["UUID", "Deployment UUID", "Set name", "Tests",
"Failures", "Created at", "Status"]
common_cliutils.print_list([verification], fields=total_fields)
cliutils.print_list([verification], fields=total_fields)
print ("\nTests:\n")
fields = ["name", "time", "status"]
values = [objects.Verification(test)
for test in six.itervalues(tests.data["test_cases"])]
common_cliutils.print_list(values, fields, sortby_index=sortby_index)
cliutils.print_list(values, fields, sortby_index=sortby_index)
if detailed:
for test in six.itervalues(tests.data["test_cases"]):

View File

@ -16,17 +16,19 @@
import logging
from oslo_config import cfg
from rally.openstack.common import log as oslogging
from oslo_log import handlers
from oslo_log import log as oslogging
common_cli_opts = [cfg.BoolOpt("rally-debug",
default=False,
help="Print debugging output only for Rally. "
"Off-site components stay quiet.")]
DEBUG_OPTS = [cfg.BoolOpt(
"rally-debug",
default=False,
help="Print debugging output only for Rally. "
"Off-site components stay quiet.")]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(DEBUG_OPTS)
oslogging.register_options(CONF)
logging.RDEBUG = logging.DEBUG + 1
logging.addLevelName(logging.RDEBUG, "RALLYDEBUG")
@ -43,25 +45,26 @@ WARNING = logging.WARNING
def setup(product_name, version="unknown"):
dbg_color = oslogging.ColorHandler.LEVEL_COLORS[logging.DEBUG]
oslogging.ColorHandler.LEVEL_COLORS[logging.RDEBUG] = dbg_color
dbg_color = handlers.ColorHandler.LEVEL_COLORS[logging.DEBUG]
handlers.ColorHandler.LEVEL_COLORS[logging.RDEBUG] = dbg_color
oslogging.setup(product_name, version)
oslogging.setup(CONF, product_name, version)
if CONF.rally_debug:
oslogging.getLogger(None).logger.setLevel(logging.RDEBUG)
oslogging.getLogger(
project=product_name).logger.setLevel(logging.RDEBUG)
def getLogger(name="unknown", version="unknown"):
if name not in oslogging._loggers:
oslogging._loggers[name] = RallyContextAdapter(logging.getLogger(name),
name,
version)
{"project": "rally",
"version": version})
return oslogging._loggers[name]
class RallyContextAdapter(oslogging.ContextAdapter):
class RallyContextAdapter(oslogging.KeywordArgumentAdapter):
def debug(self, msg, *args, **kwargs):
self.log(logging.RDEBUG, msg, *args, **kwargs)

44
rally/common/opts.py Normal file
View File

@ -0,0 +1,44 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from rally.benchmark.context import users
from rally.benchmark.scenarios.cinder import utils as cinder_utils
from rally.benchmark.scenarios.glance import utils as glance_utils
from rally.benchmark.scenarios.heat import utils as heat_utils
from rally.benchmark.scenarios.nova import utils as nova_utils
from rally.benchmark.scenarios.sahara import utils as sahara_utils
from rally.common import log
from rally import exceptions
from rally import osclients
from rally.verification.tempest import config as tempest_conf
def list_opts():
return [
("DEFAULT",
itertools.chain(log.DEBUG_OPTS,
exceptions.EXC_LOG_OPTS,
osclients.OSCLIENTS_OPTS)),
("benchmark",
itertools.chain(cinder_utils.CINDER_BENCHMARK_OPTS,
glance_utils.GLANCE_BENCHMARK_OPTS,
heat_utils.HEAT_BENCHMARK_OPTS,
nova_utils.NOVA_BENCHMARK_OPTS,
sahara_utils.SAHARA_TIMEOUT_OPTS)),
("image",
itertools.chain(tempest_conf.IMAGE_OPTS)),
("users_context", itertools.chain(users.USER_CONTEXT_OPTS))
]

View File

@ -23,14 +23,14 @@ from rally.common import log as logging
LOG = logging.getLogger(__name__)
exc_log_opts = [
EXC_LOG_OPTS = [
cfg.BoolOpt("fatal_exception_format_errors",
default=False,
help="make exception message format errors fatal"),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
CONF.register_opts(EXC_LOG_OPTS)
class RallyException(Exception):

View File

@ -1,40 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
import oslo.i18n
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
# application name when this module is synced into the separate
# repository. It is OK to have more than one translation function
# using the same domain, since there will still only be one message
# catalog.
_translators = oslo.i18n.TranslatorFactory(domain='rally')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical

View File

@ -1,271 +0,0 @@
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# W0603: Using the global statement
# W0621: Redefining name %s from outer scope
# pylint: disable=W0603,W0621
from __future__ import print_function
import getpass
import inspect
import os
import sys
import textwrap
from oslo.utils import encodeutils
from oslo.utils import strutils
import prettytable
import six
from six import moves
from rally.openstack.common._i18n import _
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, '__self__', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
else:
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
def print_dict(dct, dict_property="Property", wrap=0):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
"""
pt = prettytable.PrettyTable([dict_property, 'Value'])
pt.align = 'l'
for k, v in six.iteritems(dct):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in moves.range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def exit(msg=''):
if msg:
print (msg, file=sys.stderr)
sys.exit(1)

View File

@ -1,145 +0,0 @@
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
from rally.openstack.common._i18n import _LI
from rally.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port

View File

@ -1,146 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import logging
import os
import tempfile
from oslo.utils import excutils
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload:
delete_cached_file(filename)
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_cached_file(filename):
"""Delete cached file if present.
:param filename: filename to delete
"""
global _FILE_CACHE
if filename in _FILE_CACHE:
del _FILE_CACHE[filename]
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return open(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path

View File

@ -1,45 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()

View File

@ -1,326 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from rally.openstack.common import fileutils
from rally.openstack.common._i18n import _, _LE, _LI
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Enables or disables inter-process locks.'),
cfg.StrOpt('lock_path',
default=os.environ.get("RALLY_LOCK_PATH"),
help='Directory to use for lock files.')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _FileLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def acquire(self):
basedir = os.path.dirname(self.fname)
if not os.path.exists(basedir):
fileutils.ensure_tree(basedir)
LOG.info(_LI('Created lock path: %s'), basedir)
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
LOG.debug('Got file lock "%s"', self.fname)
return True
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise threading.ThreadError(_("Unable to acquire lock on"
" `%(filename)s` due to"
" %(exception)s") %
{'filename': self.fname,
'exception': e})
def __enter__(self):
self.acquire()
return self
def release(self):
try:
self.unlock()
self.lockfile.close()
LOG.debug('Released file lock "%s"', self.fname)
except IOError:
LOG.exception(_LE("Could not release the acquired lock `%s`"),
self.fname)
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
return os.path.exists(self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_FileLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _FcntlLock(_FileLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _FcntlLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
def _get_lock_path(name, lock_file_prefix, lock_path=None):
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
return os.path.join(local_lock_path, name)
def external_lock(name, lock_file_prefix=None, lock_path=None):
LOG.debug('Attempting to grab external lock "%(lock)s"',
{'lock': name})
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
return InterProcessLock(lock_file_path)
def remove_external_lock_file(name, lock_file_prefix=None):
"""Remove an external lock file when it's not used anymore
This will be helpful when we have a lot of lock files
"""
with internal_lock(name):
lock_file_path = _get_lock_path(name, lock_file_prefix)
try:
os.remove(lock_file_path)
except OSError:
LOG.info(_LI('Failed to remove file %(file)s'),
{'file': lock_file_path})
def internal_lock(name):
with _semaphores_lock:
try:
sem = _semaphores[name]
LOG.debug('Using existing semaphore "%s"', name)
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
LOG.debug('Created new semaphore "%s"', name)
return sem
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
"""
int_lock = internal_lock(name)
with int_lock:
LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name})
try:
if external and not CONF.disable_process_locking:
ext_lock = external_lock(name, lock_file_prefix, lock_path)
with ext_lock:
yield ext_lock
else:
yield int_lock
finally:
LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name})
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug('Got semaphore / lock "%(function)s"',
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug('Semaphore / lock released "%(function)s"',
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["RALLY_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -1,705 +0,0 @@
# Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import socket
import sys
import traceback
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import importutils
import six
from six import moves
_PY26 = sys.version_info[0:2] == (2, 6)
from rally.openstack.common._i18n import _
from rally.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
"keystonemiddleware=WARN", "routes.middleware=WARN",
"stevedore=WARN"]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
def isEnabledFor(self, level):
if _PY26:
# This method was added in python 2.7 (and it does the exact
# same logic, so we need to do the exact same logic so that
# python 2.6 has this capability as well).
return self.logger.isEnabledFor(level)
else:
return super(BaseLoggerAdapter, self).isEnabledFor(level)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
if six.PY3:
# In Python 3, the code fails because the 'manager' attribute
# cannot be found when using a LoggerAdapter as the
# underlying logger. Work around this issue.
self._logger.manager = self._logger.logger.manager
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(msg, six.text_type):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except (moves.configparser.Error, KeyError) as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string=None,
default_log_levels=None):
# Just in case the caller is not setting the
# default_log_level. This is insurance because
# we introduced the default_log_level parameter
# later in a backwards in-compatible change
if default_log_levels is not None:
cfg.set_defaults(
log_opts,
default_log_levels=default_log_levels)
if logging_context_format_string is not None:
cfg.set_defaults(
log_opts,
logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"oslo.messaging.notify.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
if CONF.use_syslog:
try:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(facility=facility)
else:
syslog = logging.handlers.SysLogHandler(facility=facility)
log_root.addHandler(syslog)
except socket.error:
log_root.error('Unable to add syslog handler. Verify that syslog '
'is running.')
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))

View File

@ -1,147 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import time
from eventlet import event
from eventlet import greenthread
from rally.openstack.common._i18n import _LE, _LW
from rally.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
# with time.time() called in the standard logging module
# during unittests.
_ts = lambda: time.time()
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCallBase.
The poll-function passed to LoopingCallBase can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCallBase.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCallBase.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = _ts()
self.f(*self.args, **self.kw)
end = _ts()
if not self._running:
break
delay = end - start - interval
if delay > 0:
LOG.warn(_LW('task %(func_name)s run outlasted '
'interval by %(delay).2f sec'),
{'func_name': repr(self.f), 'delay': delay})
greenthread.sleep(-delay if delay < 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_LE('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug('Dynamic looping call %(func_name)s sleeping '
'for %(idle).02f seconds',
{'func_name': repr(self.f), 'idle': idle})
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_LE('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done

View File

@ -1,206 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import time
from oslo.config import cfg
import six
from rally.openstack.common._i18n import _, _LE, _LI
from rally.openstack.common import log as logging
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on the default
interval of 60 seconds.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parenthesis.
#
# In the 'with-parenthesis' case (with kwargs present), this function needs
# to return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parenthesis' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
task = value
name = task.__name__
if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
continue
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
continue
# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
def _nearest_boundary(last_run, spacing):
"""Find nearest boundary which is in the past, which is a multiple of the
spacing with the last run as an offset.
Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
31, 38...
0% to 5% of the spacing value will be added to this value to ensure tasks
do not synchronize. This jitter is rounded to the nearest second, this
means that spacings smaller than 20 seconds will not have jitter.
"""
current_time = time.time()
if last_run is None:
return current_time
delta = current_time - last_run
offset = delta % spacing
# Add up to 5% jitter
jitter = int(spacing * (random.random() / 20))
return current_time - offset + jitter
@six.add_metaclass(_PeriodicTasksMeta)
class PeriodicTasks(object):
def __init__(self):
super(PeriodicTasks, self).__init__()
self._periodic_last_run = {}
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]
# Check if due, if not skip
idle_for = min(idle_for, spacing)
if last_run is not None:
delta = last_run + spacing - time.time()
if delta > 0:
idle_for = min(idle_for, delta)
continue
LOG.debug("Running periodic task %(full_task_name)s",
{"full_task_name": full_task_name})
self._periodic_last_run[task_name] = _nearest_boundary(
last_run, spacing)
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
{"full_task_name": full_task_name, "e": e})
time.sleep(0)
return idle_for

View File

@ -1,504 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from rally.openstack.common import eventlet_backdoor
from rally.openstack.common._i18n import _LE, _LI, _LW
from rally.openstack.common import log as logging
from rally.openstack.common import systemd
from rally.openstack.common import threadgroup
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher

View File

@ -1,106 +0,0 @@
# Copyright 2012-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper module for systemd service readiness notification.
"""
import os
import socket
import sys
from rally.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _abstractify(socket_name):
if socket_name.startswith('@'):
# abstract namespace socket
socket_name = '\0%s' % socket_name[1:]
return socket_name
def _sd_notify(unset_env, msg):
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sock.connect(_abstractify(notify_socket))
sock.sendall(msg)
if unset_env:
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
LOG.debug("Systemd notification failed", exc_info=True)
finally:
sock.close()
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
notification is sent only once.
"""
_sd_notify(True, 'READY=1')
def onready(notify_socket, timeout):
"""Wait for systemd style notification on the socket.
:param notify_socket: local socket address
:type notify_socket: string
:param timeout: socket timeout
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occurred
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(_abstractify(notify_socket))
try:
msg = sock.recv(512)
except socket.timeout:
return 2
finally:
sock.close()
if 'READY=1' in msg:
return 0
else:
return 1
if __name__ == '__main__':
# simple CLI for testing
if len(sys.argv) == 1:
notify()
elif len(sys.argv) >= 2:
timeout = float(sys.argv[1])
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
retval = onready(notify_socket, timeout)
sys.exit(retval)

View File

@ -1,147 +0,0 @@
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import eventlet
from eventlet import greenpool
from rally.openstack.common import log as logging
from rally.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
def link(self, func, *args, **kwargs):
self.thread.link(func, *args, **kwargs)
class ThreadGroup(object):
"""The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
return th
def thread_done(self, thread):
self.threads.remove(thread)
def _stop_threads(self):
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
def stop_timers(self):
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self):
for x in self.timers:
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
continue
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)

View File

@ -36,14 +36,16 @@ from rally import exceptions
CONF = cfg.CONF
CONF.register_opts([
OSCLIENTS_OPTS = [
cfg.FloatOpt("openstack_client_http_timeout", default=180.0,
help="HTTP timeout for any of OpenStack service in seconds"),
cfg.BoolOpt("https_insecure", default=False,
help="Use SSL for all OpenStack API interfaces"),
cfg.StrOpt("https_cacert", default=None,
help="Path to CA server cetrificate for SSL")
])
]
CONF.register_opts(OSCLIENTS_OPTS)
# NOTE(boris-42): super dirty hack to fix nova python client 2.17 thread safe

View File

@ -34,7 +34,7 @@ from rally import osclients
LOG = logging.getLogger(__name__)
image_opts = [
IMAGE_OPTS = [
cfg.StrOpt("cirros_version",
default="0.3.2",
help="Version of cirros image"),
@ -43,7 +43,7 @@ image_opts = [
help="Cirros image name"),
]
CONF = cfg.CONF
CONF.register_opts(image_opts, "image")
CONF.register_opts(IMAGE_OPTS, "image")
class TempestConfigCreationFailure(exceptions.RallyException):

View File

@ -8,14 +8,14 @@ iso8601>=0.1.9
Jinja2>=2.6 # BSD License (3 clause)
jsonschema>=2.0.0,<3.0.0
netaddr>=0.7.12
oslo.config>=1.6.0 # Apache-2.0
oslo.db>=1.4.1 # Apache-2.0
oslo.i18n>=1.3.0 # Apache-2.0
oslo.config>=1.6.0 # Apache-2.0
oslo.db>=1.4.1 # Apache-2.0
oslo.i18n>=1.3.0 # Apache-2.0
oslo.log>=0.1.0 # Apache-2.0
oslo.serialization>=1.2.0 # Apache-2.0
oslo.utils>=1.2.0 # Apache-2.0
paramiko>=1.13.0
pbr>=0.6,!=0.7,<1.0
pecan>=0.8.0
PrettyTable>=0.7,<0.8
PyYAML>=3.1.0
psycopg2
@ -36,4 +36,3 @@ requests>=2.2.0,!=2.4.0
SQLAlchemy>=0.9.7,<=0.9.99
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
six>=1.9.0
WSME>=0.6

View File

@ -24,13 +24,14 @@ packages =
data_files =
/etc/bash_completion.d =
tools/rally.bash_completion
etc/rally.bash_completion
[entry_points]
console_scripts =
rally = rally.cmd.main:main
rally-api = rally.cmd.api:main
rally-manage = rally.cmd.manage:main
oslo.config.opts =
rally = rally.common.opts:list_opts
[global]
setup-hooks =

View File

@ -12,7 +12,7 @@ Rally Specific Commandments
* [N302] - Ensure that nonexistent "assert_called" is not used
* [N303] - Ensure that nonexistent "assert_called_once" is not used
* [N310-N314] - Reserved for rules related to logging
* [N310] - Ensure that ``rally.common.log`` is used instead of ``rally.openstack.common.log``
* [N310] - Ensure that ``rally.common.log`` is used as logging module
* [N311] - Validate that debug level logs are not translated
* [N312] - Validate correctness of debug on check.
* [N32x] - Reserved for rules related to assert* methods

View File

@ -120,8 +120,8 @@ def check_import_of_logging(logical_line, filename):
excluded_files = ["./rally/common/log.py", "./tests/unit/test_log.py"]
forbidden_imports = ["from rally.openstack.common import log",
"import rally.openstack.common.log",
forbidden_imports = ["from oslo_log",
"import oslo_log",
"import logging"]
if filename not in excluded_files:

View File

@ -1,59 +0,0 @@
# Copyright 2014 Kylin Cloud
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test for rest types."""
from __future__ import print_function
from rally.aas.rest import types
from tests.unit import test
class TestLink(test.TestCase):
def test_make_link(self):
url = "http://localhost:8877"
rel = "version"
link = types.Link.make_link(rel, url, "fake")
self.assertEqual("http://localhost:8877/fake", link.href)
self.assertEqual(rel, link.rel)
class TestMediaType(test.TestCase):
def test_init(self):
base = "application/json"
_type = "application/vnd.openstack.rally.v1+json"
mt = types.MediaType(base, _type)
self.assertEqual(base, mt.base)
self.assertEqual(_type, mt.type)
class TestVersion(test.TestCase):
def test_convert(self):
id = "v1"
status = "active"
updated_at = "2014-01-07T00:00:00Z"
link = types.Link.make_link("version", "http://localhost:8877", "fake")
version = types.Version.convert(id, status, updated_at=updated_at,
links=[link])
self.assertEqual(id, version.id)
self.assertEqual(status, version.status)
self.assertEqual(updated_at, version.updated_at)
self.assertEqual("application/json", version.media_types[0].base)
self.assertEqual("application/vnd.openstack.rally.v1+json",
version.media_types[0].type)
self.assertEqual([link], version.links)

View File

@ -100,7 +100,7 @@ class DeploymentCommandsTestCase(test.TestCase):
self.assertRaises(exceptions.InvalidArgumentsException,
self.deployment.destroy, None)
@mock.patch("rally.cmd.commands.deployment.common_cliutils.print_list")
@mock.patch("rally.cmd.commands.deployment.cliutils.print_list")
@mock.patch("rally.cmd.commands.deployment.utils.Struct")
@mock.patch("rally.cmd.commands.deployment.envutils.get_global")
@mock.patch("rally.cmd.commands.deployment.db.deployment_list")
@ -128,7 +128,7 @@ class DeploymentCommandsTestCase(test.TestCase):
sortby_index=headers.index(
"created_at"))
@mock.patch("rally.cmd.commands.deployment.common_cliutils.print_list")
@mock.patch("rally.cmd.commands.deployment.cliutils.print_list")
@mock.patch("rally.cmd.commands.deployment.utils.Struct")
@mock.patch("rally.cmd.commands.deployment.envutils.get_global")
@mock.patch("rally.cmd.commands.deployment.db.deployment_list")
@ -171,7 +171,7 @@ class DeploymentCommandsTestCase(test.TestCase):
self.assertRaises(exceptions.InvalidArgumentsException,
self.deployment.config, None)
@mock.patch("rally.cmd.commands.deployment.common_cliutils.print_list")
@mock.patch("rally.cmd.commands.deployment.cliutils.print_list")
@mock.patch("rally.cmd.commands.deployment.utils.Struct")
@mock.patch("rally.cmd.commands.deployment.db.deployment_get")
def test_show(self, mock_deployment, mock_struct, mock_print_list):
@ -206,4 +206,4 @@ class DeploymentCommandsTestCase(test.TestCase):
@mock.patch("rally.cmd.commands.use.UseCommands.deployment")
def test_use(self, mock_use_deployment):
self.deployment.use("fake_id")
mock_use_deployment.assert_called_once_with("fake_id")
mock_use_deployment.assert_called_once_with("fake_id")

View File

@ -34,7 +34,7 @@ class ShowCommandsTestCase(test.TestCase):
self.fake_glance_client = fakes.FakeGlanceClient()
self.fake_nova_client = fakes.FakeNovaClient()
@mock.patch("rally.cmd.commands.show.common_cliutils.print_list")
@mock.patch("rally.cmd.commands.show.cliutils.print_list")
@mock.patch("rally.cmd.commands.show.cliutils.pretty_float_formatter")
@mock.patch("rally.cmd.commands.show.utils.Struct")
@mock.patch("rally.cmd.commands.show.osclients.Clients.glance")
@ -62,7 +62,7 @@ class ShowCommandsTestCase(test.TestCase):
formatters=fake_formatters,
mixed_case_fields=mixed_case_fields)
@mock.patch("rally.cmd.commands.show.common_cliutils.print_list")
@mock.patch("rally.cmd.commands.show.cliutils.print_list")
@mock.patch("rally.cmd.commands.show.cliutils.pretty_float_formatter")
@mock.patch("rally.cmd.commands.show.utils.Struct")
@mock.patch("rally.cmd.commands.show.osclients.Clients.nova")
@ -94,7 +94,7 @@ class ShowCommandsTestCase(test.TestCase):
formatters=fake_formatters,
mixed_case_fields=mixed_case_fields)
@mock.patch("rally.cmd.commands.show.common_cliutils.print_list")
@mock.patch("rally.cmd.commands.show.cliutils.print_list")
@mock.patch("rally.cmd.commands.show.utils.Struct")
@mock.patch("rally.cmd.commands.show.osclients.Clients.nova")
@mock.patch("rally.cmd.commands.show.db.deployment_get")
@ -120,7 +120,7 @@ class ShowCommandsTestCase(test.TestCase):
fields=headers,
mixed_case_fields=mixed_case_fields)
@mock.patch("rally.cmd.commands.show.common_cliutils.print_list")
@mock.patch("rally.cmd.commands.show.cliutils.print_list")
@mock.patch("rally.cmd.commands.show.utils.Struct")
@mock.patch("rally.cmd.commands.show.osclients.Clients.nova")
@mock.patch("rally.cmd.commands.show.db.deployment_get")
@ -145,7 +145,7 @@ class ShowCommandsTestCase(test.TestCase):
fields=headers,
mixed_case_fields=mixed_case_fields)
@mock.patch("rally.cmd.commands.show.common_cliutils.print_list")
@mock.patch("rally.cmd.commands.show.cliutils.print_list")
@mock.patch("rally.cmd.commands.show.utils.Struct")
@mock.patch("rally.cmd.commands.show.osclients.Clients.nova")
@mock.patch("rally.cmd.commands.show.db.deployment_get")

View File

@ -479,7 +479,7 @@ class TaskCommandsTestCase(test.TestCase):
out="/tmp/tmp.hsml")
self.assertEqual(ret, 1)
@mock.patch("rally.cmd.commands.task.common_cliutils.print_list")
@mock.patch("rally.cmd.commands.task.cliutils.print_list")
@mock.patch("rally.cmd.commands.task.envutils.get_global",
return_value="123456789")
@mock.patch("rally.cmd.commands.task.objects.Task.list",
@ -542,7 +542,7 @@ class TaskCommandsTestCase(test.TestCase):
in task_uuids]
self.assertTrue(mock_api.Task.delete.mock_calls == expected_calls)
@mock.patch("rally.cmd.commands.task.common_cliutils.print_list")
@mock.patch("rally.cmd.commands.task.cliutils.print_list")
@mock.patch("rally.cmd.commands.task.objects.Task.get")
def test_sla_check(self, mock_task_get, mock_print_list):
data = [{"key": {"name": "fake_name",

View File

@ -94,7 +94,7 @@ class VerifyCommandsTestCase(test.TestCase):
consts.TempestTestsAPI)
self.assertFalse(mock_verify.called)
@mock.patch("rally.openstack.common.cliutils.print_list")
@mock.patch("rally.cmd.cliutils.print_list")
@mock.patch("rally.db.verification_list")
def test_list(self, mock_db_verification_list, mock_print_list):
fields = ["UUID", "Deployment UUID", "Set name", "Tests", "Failures",
@ -113,7 +113,7 @@ class VerifyCommandsTestCase(test.TestCase):
sortby_index=fields.index(
"Created at"))
@mock.patch("rally.openstack.common.cliutils.print_list")
@mock.patch("rally.cmd.cliutils.print_list")
@mock.patch("rally.db.verification_get")
@mock.patch("rally.db.verification_result_get")
@mock.patch("rally.objects.Verification")
@ -304,4 +304,4 @@ class VerifyCommandsTestCase(test.TestCase):
@mock.patch("rally.cmd.commands.use.UseCommands.verification")
def test_use(self, mock_use_verification):
self.verify.use("fake_id")
mock_use_verification.assert_called_once_with("fake_id")
mock_use_verification.assert_called_once_with("fake_id")

View File

@ -25,7 +25,6 @@ from rally.cmd.commands import task
from rally.cmd.commands import use
from rally.cmd.commands import verify
from rally import exceptions
from rally.openstack.common import cliutils as common_cliutils
from tests.unit import test
CONF = cfg.CONF
@ -158,8 +157,8 @@ class CliUtilsTestCase(test.TestCase):
self.assertTrue(mock_task_get.called)
self.assertEqual(ret, 1)
@mock.patch("rally.openstack.common.cliutils.validate_args",
side_effect=common_cliutils.MissingArgs("missing"))
@mock.patch("rally.common.cliutils.validate_args",
side_effect=cliutils.MissingArgs("missing"))
def test_run_show_fails(self, mock_validate_args):
ret = cliutils.run(["rally", "show", "keypairs"], self.categories)
self.assertTrue(mock_validate_args.called)
@ -175,3 +174,411 @@ class CliUtilsTestCase(test.TestCase):
ret = cliutils.run(["rally", "failure", "failed_to_open_file"],
{"failure": FailuresCommands})
self.assertEqual(1, ret)
class ValidateArgsTest(test.TestCase):
def test_lambda_no_args(self):
cliutils.validate_args(lambda: None)
def _test_lambda_with_args(self, *args, **kwargs):
cliutils.validate_args(lambda x, y: None, *args, **kwargs)
def test_lambda_positional_args(self):
self._test_lambda_with_args(1, 2)
def test_lambda_kwargs(self):
self._test_lambda_with_args(x=1, y=2)
def test_lambda_mixed_kwargs(self):
self._test_lambda_with_args(1, y=2)
def test_lambda_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_lambda_with_args)
def test_lambda_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_lambda_with_args, 1)
def test_lambda_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_lambda_with_args, y=2)
def _test_lambda_with_default(self, *args, **kwargs):
cliutils.validate_args(lambda x, y, z=3: None, *args, **kwargs)
def test_lambda_positional_args_with_default(self):
self._test_lambda_with_default(1, 2)
def test_lambda_kwargs_with_default(self):
self._test_lambda_with_default(x=1, y=2)
def test_lambda_mixed_kwargs_with_default(self):
self._test_lambda_with_default(1, y=2)
def test_lambda_positional_args_all_with_default(self):
self._test_lambda_with_default(1, 2, 3)
def test_lambda_kwargs_all_with_default(self):
self._test_lambda_with_default(x=1, y=2, z=3)
def test_lambda_mixed_kwargs_all_with_default(self):
self._test_lambda_with_default(1, y=2, z=3)
def test_lambda_with_default_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_lambda_with_default)
def test_lambda_with_default_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_lambda_with_default, 1)
def test_lambda_with_default_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_lambda_with_default, y=2)
def test_lambda_with_default_missing_args4(self):
self.assertRaises(cliutils.MissingArgs,
self._test_lambda_with_default, y=2, z=3)
def test_function_no_args(self):
def func():
pass
cliutils.validate_args(func)
def _test_function_with_args(self, *args, **kwargs):
def func(x, y):
pass
cliutils.validate_args(func, *args, **kwargs)
def test_function_positional_args(self):
self._test_function_with_args(1, 2)
def test_function_kwargs(self):
self._test_function_with_args(x=1, y=2)
def test_function_mixed_kwargs(self):
self._test_function_with_args(1, y=2)
def test_function_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_function_with_args)
def test_function_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_function_with_args, 1)
def test_function_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_function_with_args, y=2)
def _test_function_with_default(self, *args, **kwargs):
def func(x, y, z=3):
pass
cliutils.validate_args(func, *args, **kwargs)
def test_function_positional_args_with_default(self):
self._test_function_with_default(1, 2)
def test_function_kwargs_with_default(self):
self._test_function_with_default(x=1, y=2)
def test_function_mixed_kwargs_with_default(self):
self._test_function_with_default(1, y=2)
def test_function_positional_args_all_with_default(self):
self._test_function_with_default(1, 2, 3)
def test_function_kwargs_all_with_default(self):
self._test_function_with_default(x=1, y=2, z=3)
def test_function_mixed_kwargs_all_with_default(self):
self._test_function_with_default(1, y=2, z=3)
def test_function_with_default_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_function_with_default)
def test_function_with_default_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_function_with_default, 1)
def test_function_with_default_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_function_with_default, y=2)
def test_function_with_default_missing_args4(self):
self.assertRaises(cliutils.MissingArgs,
self._test_function_with_default, y=2, z=3)
def test_bound_method_no_args(self):
class Foo(object):
def bar(self):
pass
cliutils.validate_args(Foo().bar)
def _test_bound_method_with_args(self, *args, **kwargs):
class Foo(object):
def bar(self, x, y):
pass
cliutils.validate_args(Foo().bar, *args, **kwargs)
def test_bound_method_positional_args(self):
self._test_bound_method_with_args(1, 2)
def test_bound_method_kwargs(self):
self._test_bound_method_with_args(x=1, y=2)
def test_bound_method_mixed_kwargs(self):
self._test_bound_method_with_args(1, y=2)
def test_bound_method_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_bound_method_with_args)
def test_bound_method_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_bound_method_with_args, 1)
def test_bound_method_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_bound_method_with_args, y=2)
def _test_bound_method_with_default(self, *args, **kwargs):
class Foo(object):
def bar(self, x, y, z=3):
pass
cliutils.validate_args(Foo().bar, *args, **kwargs)
def test_bound_method_positional_args_with_default(self):
self._test_bound_method_with_default(1, 2)
def test_bound_method_kwargs_with_default(self):
self._test_bound_method_with_default(x=1, y=2)
def test_bound_method_mixed_kwargs_with_default(self):
self._test_bound_method_with_default(1, y=2)
def test_bound_method_positional_args_all_with_default(self):
self._test_bound_method_with_default(1, 2, 3)
def test_bound_method_kwargs_all_with_default(self):
self._test_bound_method_with_default(x=1, y=2, z=3)
def test_bound_method_mixed_kwargs_all_with_default(self):
self._test_bound_method_with_default(1, y=2, z=3)
def test_bound_method_with_default_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_bound_method_with_default)
def test_bound_method_with_default_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_bound_method_with_default, 1)
def test_bound_method_with_default_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_bound_method_with_default, y=2)
def test_bound_method_with_default_missing_args4(self):
self.assertRaises(cliutils.MissingArgs,
self._test_bound_method_with_default, y=2, z=3)
def test_unbound_method_no_args(self):
class Foo(object):
def bar(self):
pass
cliutils.validate_args(Foo.bar, Foo())
def _test_unbound_method_with_args(self, *args, **kwargs):
class Foo(object):
def bar(self, x, y):
pass
cliutils.validate_args(Foo.bar, Foo(), *args, **kwargs)
def test_unbound_method_positional_args(self):
self._test_unbound_method_with_args(1, 2)
def test_unbound_method_kwargs(self):
self._test_unbound_method_with_args(x=1, y=2)
def test_unbound_method_mixed_kwargs(self):
self._test_unbound_method_with_args(1, y=2)
def test_unbound_method_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_unbound_method_with_args)
def test_unbound_method_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_unbound_method_with_args, 1)
def test_unbound_method_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_unbound_method_with_args, y=2)
def _test_unbound_method_with_default(self, *args, **kwargs):
class Foo(object):
def bar(self, x, y, z=3):
pass
cliutils.validate_args(Foo.bar, Foo(), *args, **kwargs)
def test_unbound_method_positional_args_with_default(self):
self._test_unbound_method_with_default(1, 2)
def test_unbound_method_kwargs_with_default(self):
self._test_unbound_method_with_default(x=1, y=2)
def test_unbound_method_mixed_kwargs_with_default(self):
self._test_unbound_method_with_default(1, y=2)
def test_unbound_method_with_default_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_unbound_method_with_default)
def test_unbound_method_with_default_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_unbound_method_with_default, 1)
def test_unbound_method_with_default_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_unbound_method_with_default, y=2)
def test_unbound_method_with_default_missing_args4(self):
self.assertRaises(cliutils.MissingArgs,
self._test_unbound_method_with_default, y=2, z=3)
def test_class_method_no_args(self):
class Foo(object):
@classmethod
def bar(cls):
pass
cliutils.validate_args(Foo.bar)
def _test_class_method_with_args(self, *args, **kwargs):
class Foo(object):
@classmethod
def bar(cls, x, y):
pass
cliutils.validate_args(Foo.bar, *args, **kwargs)
def test_class_method_positional_args(self):
self._test_class_method_with_args(1, 2)
def test_class_method_kwargs(self):
self._test_class_method_with_args(x=1, y=2)
def test_class_method_mixed_kwargs(self):
self._test_class_method_with_args(1, y=2)
def test_class_method_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_class_method_with_args)
def test_class_method_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_class_method_with_args, 1)
def test_class_method_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_class_method_with_args, y=2)
def _test_class_method_with_default(self, *args, **kwargs):
class Foo(object):
@classmethod
def bar(cls, x, y, z=3):
pass
cliutils.validate_args(Foo.bar, *args, **kwargs)
def test_class_method_positional_args_with_default(self):
self._test_class_method_with_default(1, 2)
def test_class_method_kwargs_with_default(self):
self._test_class_method_with_default(x=1, y=2)
def test_class_method_mixed_kwargs_with_default(self):
self._test_class_method_with_default(1, y=2)
def test_class_method_with_default_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_class_method_with_default)
def test_class_method_with_default_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_class_method_with_default, 1)
def test_class_method_with_default_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_class_method_with_default, y=2)
def test_class_method_with_default_missing_args4(self):
self.assertRaises(cliutils.MissingArgs,
self._test_class_method_with_default, y=2, z=3)
def test_static_method_no_args(self):
class Foo(object):
@staticmethod
def bar():
pass
cliutils.validate_args(Foo.bar)
def _test_static_method_with_args(self, *args, **kwargs):
class Foo(object):
@staticmethod
def bar(x, y):
pass
cliutils.validate_args(Foo.bar, *args, **kwargs)
def test_static_method_positional_args(self):
self._test_static_method_with_args(1, 2)
def test_static_method_kwargs(self):
self._test_static_method_with_args(x=1, y=2)
def test_static_method_mixed_kwargs(self):
self._test_static_method_with_args(1, y=2)
def test_static_method_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_static_method_with_args)
def test_static_method_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_static_method_with_args, 1)
def test_static_method_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_static_method_with_args, y=2)
def _test_static_method_with_default(self, *args, **kwargs):
class Foo(object):
@staticmethod
def bar(x, y, z=3):
pass
cliutils.validate_args(Foo.bar, *args, **kwargs)
def test_static_method_positional_args_with_default(self):
self._test_static_method_with_default(1, 2)
def test_static_method_kwargs_with_default(self):
self._test_static_method_with_default(x=1, y=2)
def test_static_method_mixed_kwargs_with_default(self):
self._test_static_method_with_default(1, y=2)
def test_static_method_with_default_missing_args1(self):
self.assertRaises(cliutils.MissingArgs,
self._test_static_method_with_default)
def test_static_method_with_default_missing_args2(self):
self.assertRaises(cliutils.MissingArgs,
self._test_static_method_with_default, 1)
def test_static_method_with_default_missing_args3(self):
self.assertRaises(cliutils.MissingArgs,
self._test_static_method_with_default, y=2)
def test_static_method_with_default_missing_args4(self):
self.assertRaises(cliutils.MissingArgs,
self._test_static_method_with_default, y=2, z=3)

View File

@ -62,8 +62,8 @@ class HackingTestCase(test.TestCase):
self.assertTrue(actual_msg.startswith("N303"))
def test_check_wrong_logging_import(self):
bad_imports = ["from rally.openstack.common import log",
"import rally.openstack.common.log",
bad_imports = ["from oslo_log import log",
"import oslo_log",
"import logging"]
good_imports = ["from rally.common import log",
"from rally.common.log",

View File

@ -24,23 +24,24 @@ from tests.unit import test
class LogTestCase(test.TestCase):
@mock.patch("rally.common.log.CONF")
@mock.patch("rally.common.log.handlers")
@mock.patch("rally.common.log.oslogging")
def test_setup(self, mock_oslogger, mock_conf):
def test_setup(self, mock_oslogger, mock_handlers, mock_conf):
proj = "fakep"
version = "fakev"
mock_oslogger.ColorHandler.LEVEL_COLORS = {
mock_handlers.ColorHandler.LEVEL_COLORS = {
logging.DEBUG: "debug_color"}
mock_conf.rally_debug = True
log.setup(proj, version)
self.assertIn(logging.RDEBUG, mock_oslogger.ColorHandler.LEVEL_COLORS)
self.assertIn(logging.RDEBUG, mock_handlers.ColorHandler.LEVEL_COLORS)
self.assertEqual(
mock_oslogger.ColorHandler.LEVEL_COLORS[logging.DEBUG],
mock_oslogger.ColorHandler.LEVEL_COLORS[logging.RDEBUG])
mock_handlers.ColorHandler.LEVEL_COLORS[logging.DEBUG],
mock_handlers.ColorHandler.LEVEL_COLORS[logging.RDEBUG])
mock_oslogger.setup.assert_called_once_with(proj, version)
mock_oslogger.setup.assert_called_once_with(mock_conf, proj, version)
mock_oslogger.getLogger(None).logger.setLevel.assert_called_once_with(
logging.RDEBUG)
@ -49,27 +50,28 @@ class LogTestCase(test.TestCase):
@mock.patch("rally.common.log.oslogging")
def test_getLogger(self, mock_oslogger, mock_radapter, mock_pylogging):
proj = "fake"
name = "fake"
vers = "fake"
mock_oslogger._loggers = dict()
returned_logger = log.getLogger(proj, vers)
returned_logger = log.getLogger(name, vers)
self.assertIn(proj, mock_oslogger._loggers)
mock_radapter.assert_called_once_with(mock_pylogging.getLogger(proj),
proj, vers)
self.assertEqual(mock_oslogger._loggers[proj], returned_logger)
self.assertIn(name, mock_oslogger._loggers)
mock_radapter.assert_called_once_with(
mock_pylogging.getLogger(name),
{"project": "rally", "version": vers})
self.assertEqual(mock_oslogger._loggers[name], returned_logger)
class LogRallyContaxtAdapter(test.TestCase):
@mock.patch("rally.common.log.logging")
@mock.patch("rally.common.log.oslogging.ContextAdapter")
@mock.patch("rally.common.log.oslogging.KeywordArgumentAdapter")
def test_debug(self, mock_oslo_adapter, mock_logging):
mock_logging.RDEBUG = 123
fake_msg = "fake message"
radapter = log.RallyContextAdapter(mock.MagicMock(), "fakep", "fakev")
radapter = log.RallyContextAdapter(mock.MagicMock(), "fakep")
radapter.log = mock.MagicMock()
radapter.debug(fake_msg)

View File

@ -22,7 +22,7 @@ import rally
from rally.cmd import cliutils
from tests.unit import test
RES_PATH = os.path.join(os.path.dirname(rally.__file__), os.pardir, "tools")
RES_PATH = os.path.join(os.path.dirname(rally.__file__), os.pardir, "etc")
class BashCompletionTestCase(test.TestCase):
@ -39,5 +39,5 @@ class BashCompletionTestCase(test.TestCase):
self.fail("bash completion script is outdated. "
"New script is located at %s "
"You may fix this by executing "
"`mv %s tools/rally.bash_completion`" % (new_filename,
new_filename))
"`mv %s etc/rally.bash_completion`" % (new_filename,
new_filename))

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
PROJECT_NAME=${PROJECT_NAME:-rally}
CFGFILE_NAME=${PROJECT_NAME}.conf.sample
if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
elif [ -e etc/${CFGFILE_NAME} ]; then
CFGFILE=etc/${CFGFILE_NAME}
else
echo "${0##*/}: can not find config file"
exit 1
fi
TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
trap "rm -rf $TEMPDIR" EXIT
tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
then
echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
echo "${0##*/}: Please run ${0%%${0##*/}}generate_sample.sh."
exit 1
fi

View File

@ -1,119 +0,0 @@
#!/usr/bin/env bash
print_hint() {
echo "Try \`${0##*/} --help' for more information." >&2
}
PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
--long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
eval set -- "$PARSED_OPTIONS"
while true; do
case "$1" in
-h|--help)
echo "${0##*/} [options]"
echo ""
echo "options:"
echo "-h, --help show brief help"
echo "-b, --base-dir=DIR project base directory"
echo "-p, --package-name=NAME project package name"
echo "-o, --output-dir=DIR file output directory"
echo "-m, --module=MOD extra python module to interrogate for options"
echo "-l, --library=LIB extra library that registers options for discovery"
exit 0
;;
-b|--base-dir)
shift
BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-p|--package-name)
shift
PACKAGENAME=`echo $1`
shift
;;
-o|--output-dir)
shift
OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
shift
;;
-m|--module)
shift
MODULES="$MODULES -m $1"
shift
;;
-l|--library)
shift
LIBRARIES="$LIBRARIES -l $1"
shift
;;
--)
break
;;
esac
done
BASEDIR=${BASEDIR:-`pwd`}
if ! [ -d $BASEDIR ]
then
echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
elif [[ $BASEDIR != /* ]]
then
BASEDIR=$(cd "$BASEDIR" && pwd)
fi
PACKAGENAME=${PACKAGENAME:-$(python setup.py --name)}
TARGETDIR=$BASEDIR/$PACKAGENAME
if ! [ -d $TARGETDIR ]
then
echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1
fi
OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
# NOTE(bnemec): Some projects put their sample config in etc/,
# some in etc/$PACKAGENAME/
if [ -d $OUTPUTDIR/$PACKAGENAME ]
then
OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME
elif ! [ -d $OUTPUTDIR ]
then
echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
exit 1
fi
BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
find $TARGETDIR -type f -name "*.pyc" -delete
FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
-exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
RC_FILE="`dirname $0`/oslo.config.generator.rc"
if test -r "$RC_FILE"
then
source "$RC_FILE"
fi
for mod in ${RALLY_CONFIG_GENERATOR_EXTRA_MODULES}; do
MODULES="$MODULES -m $mod"
done
for lib in ${RALLY_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
LIBRARIES="$LIBRARIES -l $lib"
done
export EVENTLET_NO_GREENDNS=yes
OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
[ "$OS_VARS" ] && eval "unset \$OS_VARS"
DEFAULT_MODULEPATH=rally.openstack.common.config.generator
MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
# Hook to allow projects to append custom config file snippets
CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
for CONCAT_FILE in $CONCAT_FILES; do
cat $CONCAT_FILE >> $OUTPUTFILE
done

View File

@ -1,4 +0,0 @@
# Environmental Variables that affect the automatic sample config generation.
# Additions to any of these variables are space delimited. See the "generate_sample.sh"
# script for the variables that can be used.
RALLY_CONFIG_GENERATOR_EXTRA_LIBRARIES='oslo.db'

View File

@ -34,6 +34,13 @@ commands = python setup.py testr --coverage --testr-args='{posargs}'
changedir = doc/source
commands = make html
[testenv:genconfig]
commands =
oslo-config-generator --output-file etc/rally/rally.conf.sample \
--namespace rally \
--namespace oslo.db \
--namespace oslo.log
[tox:jenkins]
downloadcache = ~/cache/pip
@ -41,7 +48,7 @@ downloadcache = ~/cache/pip
ignore = E126,H703
show-source = true
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build,*rally/openstack*,setup.py
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build,setup.py
[hacking]
import_exceptions = rally.common.i18n