Modify run_tests.sh to support PostgreSQL

Modify run_tests.sh to run unit tests with PostgreSQL. Fix a number of
connection pool and deadlock related issues in the unit tests. Fix DB
clean up routine to remove records in the correct context and order.

Change-Id: If88c00a326c025af885d061ca95588939195b7c5
Implements: blueprint run-unit-tests-on-postgresql
This commit is contained in:
Winson Chan 2015-07-01 23:03:51 +00:00
parent 071700a76b
commit 71f74a93cb
15 changed files with 959 additions and 88 deletions

1
.gitignore vendored
View File

@ -30,6 +30,7 @@ nosetests.xml
cover/*
.testrepository/
subunit.log
.mistral.conf
# Translations
*.mo

View File

@ -0,0 +1,778 @@
[DEFAULT]
#
# From mistral.config
#
# Enables debugger. Note that using this option changes how the
# eventlet library is used to support async IO. This could result in
# failures that do not occur under normal operation. Use at your own
# risk. (boolean value)
#use_debugger = false
# Specifies which mistral server to start by the launch script. Valid
# options are all or any combination of api, engine, and executor.
# (list value)
#server = all
# Logger name for pretty workflow trace output. (string value)
#workflow_trace_log_name = workflow_trace
#
# From oslo.log
#
# Print debugging output (set logging level to DEBUG instead of
# default WARNING level). (boolean value)
#debug = false
# Print more verbose output (set logging level to INFO instead of
# default WARNING level). (boolean value)
#verbose = false
# The name of a logging configuration file. This file is appended to
# any existing logging configuration files. For details about logging
# configuration files, see the Python logging module documentation.
# (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# DEPRECATED. A logging.Formatter log message format string which may
# use any of the available logging.LogRecord attributes. This option
# is deprecated. Please use logging_context_format_string and
# logging_default_format_string instead. (string value)
#log_format = <None>
# Format string for %%(asctime)s in log records. Default: %(default)s
# . (string value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to output to. If no default is set,
# logging will go to stdout. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative --log-file paths.
# (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Use syslog for logging. Existing syslog format is DEPRECATED during
# I, and will change in J to honor RFC5424. (boolean value)
#use_syslog = false
# (Optional) Enables or disables syslog rfc5424 format for logging. If
# enabled, prefixes the MSG part of the syslog message with APP-NAME
# (RFC5424). The format without the APP-NAME is deprecated in I, and
# will be removed in J. (boolean value)
#use_syslog_rfc_format = false
# Syslog facility to receive log lines. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages without context. (string
# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Data to append to log format when level is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string
# value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# List of logger=LEVEL pairs. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
# The format for an instance that is passed with the log message.
# (string value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message.
# (string value)
#instance_uuid_format = "[instance: %(uuid)s] "
#
# From oslo.messaging
#
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
# interface, or IP. The "host" option should point or resolve to this
# address. (string value)
#rpc_zmq_bind_address = *
# MatchMaker driver. (string value)
#rpc_zmq_matchmaker = local
# ZeroMQ receiver listening port. (integer value)
#rpc_zmq_port = 9501
# Number of ZeroMQ contexts, defaults to 1. (integer value)
#rpc_zmq_contexts = 1
# Maximum number of ingress messages to locally buffer per topic.
# Default is unlimited. (integer value)
#rpc_zmq_topic_backlog = <None>
# Directory for holding IPC sockets. (string value)
#rpc_zmq_ipc_dir = /var/run/openstack
# Name of this node. Must be a valid hostname, FQDN, or IP address.
# Must match "host" option, if running Nova. (string value)
#rpc_zmq_host = localhost
# Seconds to wait before a cast expires (TTL). Only supported by
# impl_zmq. (integer value)
#rpc_cast_timeout = 30
# Heartbeat frequency. (integer value)
#matchmaker_heartbeat_freq = 300
# Heartbeat time-to-live. (integer value)
#matchmaker_heartbeat_ttl = 600
# Size of RPC thread pool. (integer value)
#rpc_thread_pool_size = 64
# The Drivers(s) to handle sending notifications. Possible values are
# messaging, messagingv2, routing,log, test, noop (multi valued)
#notification_driver =
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
#notification_topics = notifications
# Seconds to wait for a response from a call. (integer value)
#rpc_response_timeout = 60
# A URL representing the messaging driver to use and its full
# configuration. If not set, we fall back to the rpc_backend option
# and driver specific configuration. (string value)
#transport_url = <None>
# The messaging driver to use, defaults to rabbit. Other drivers
# include qpid and zmq. (string value)
#rpc_backend = rabbit
# The default exchange under which topics are scoped. May be
# overridden by an exchange name specified in the transport_url
# option. (string value)
#control_exchange = openstack
[api]
#
# From mistral.config
#
# Mistral API server host (string value)
#host = 0.0.0.0
# Mistral API server port (integer value)
#port = 8989
[database]
#
# From oslo.db
#
# The file name to use with SQLite. (string value)
# Deprecated group/name - [DEFAULT]/sqlite_db
#sqlite_db = oslo.sqlite
# If True, SQLite uses synchronous mode. (boolean value)
# Deprecated group/name - [DEFAULT]/sqlite_synchronous
#sqlite_synchronous = true
# The back end to use for the database. (string value)
# Deprecated group/name - [DEFAULT]/db_backend
#backend = sqlalchemy
# The SQLAlchemy connection string to use to connect to the database.
# (string value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection = <None>
connection = postgresql://mistral:m1stral@localhost/mistral
# The SQLAlchemy connection string to use to connect to the slave
# database. (string value)
#slave_connection = <None>
# The SQL mode to be used for MySQL sessions. This option, including
# the default, overrides any server-set SQL mode. To use whatever SQL
# mode is set by the server configuration, set this to no value.
# Example: mysql_sql_mode= (string value)
#mysql_sql_mode = TRADITIONAL
# Timeout before idle SQL connections are reaped. (integer value)
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout
# Deprecated group/name - [sql]/idle_timeout
#idle_timeout = 3600
# Minimum number of SQL connections to keep open in a pool. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
# Deprecated group/name - [DATABASE]/sql_min_pool_size
#min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
# Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size = <None>
# Maximum number of database connection retries during startup. Set to
# -1 to specify an infinite retry count. (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_retries
# Deprecated group/name - [DATABASE]/sql_max_retries
#max_retries = 10
# Interval between retries of opening a SQL connection. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_retry_interval
# Deprecated group/name - [DATABASE]/reconnect_interval
#retry_interval = 10
# If set, use this value for max_overflow with SQLAlchemy. (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_max_overflow
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
#max_overflow = <None>
# Verbosity of SQL debugging information: 0=None, 100=Everything.
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug = 0
# Add Python stack traces to SQL as comment strings. (boolean value)
# Deprecated group/name - [DEFAULT]/sql_connection_trace
#connection_trace = false
# If set, use this value for pool_timeout with SQLAlchemy. (integer
# value)
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout = <None>
# Enable the experimental use of database reconnect on connection
# lost. (boolean value)
#use_db_reconnect = false
# Seconds between retries of a database transaction. (integer value)
#db_retry_interval = 1
# If True, increases the interval between retries of a database
# operation up to db_max_retry_interval. (boolean value)
#db_inc_retry_interval = true
# If db_inc_retry_interval is set, the maximum seconds between retries
# of a database operation. (integer value)
#db_max_retry_interval = 10
# Maximum retries in case of connection error or deadlock error before
# error is raised. Set to -1 to specify an infinite retry count.
# (integer value)
#db_max_retries = 20
[engine]
#
# From mistral.config
#
# Mistral engine plugin (string value)
#engine = default
# Name of the engine node. This can be an opaque identifier. It is not
# necessarily a hostname, FQDN, or IP address. (string value)
#host = 0.0.0.0
# The message topic that the engine listens on. (string value)
#topic = mistral_engine
# The version of the engine. (string value)
#version = 1.0
[executor]
#
# From mistral.config
#
# Name of the executor node. This can be an opaque identifier. It is
# not necessarily a hostname, FQDN, or IP address. (string value)
#host = 0.0.0.0
# The message topic that the executor listens on. (string value)
#topic = mistral_executor
# The version of the executor. (string value)
#version = 1.0
[keystone_authtoken]
#
# From keystonemiddleware.auth_token
#
# Complete public Identity API endpoint. (string value)
#auth_uri = <None>
# API version of the admin Identity API endpoint. (string value)
#auth_version = <None>
# Do not handle authorization requests within the middleware, but
# delegate the authorization decision to downstream WSGI components.
# (boolean value)
#delay_auth_decision = false
# Request timeout value for communicating with Identity API server.
# (integer value)
#http_connect_timeout = <None>
# How many times are we trying to reconnect when communicating with
# Identity API Server. (integer value)
#http_request_max_retries = 3
# Env key for the swift cache. (string value)
#cache = <None>
# Required if identity server requires client certificate (string
# value)
#certfile = <None>
# Required if identity server requires client certificate (string
# value)
#keyfile = <None>
# A PEM encoded Certificate Authority to use when verifying HTTPs
# connections. Defaults to system CAs. (string value)
#cafile = <None>
# Verify HTTPS connections. (boolean value)
#insecure = false
# Directory used to cache files related to PKI tokens. (string value)
#signing_dir = <None>
# Optionally specify a list of memcached server(s) to use for caching.
# If left undefined, tokens will instead be cached in-process. (list
# value)
# Deprecated group/name - [DEFAULT]/memcache_servers
#memcached_servers = <None>
# In order to prevent excessive effort spent validating tokens, the
# middleware caches previously-seen tokens for a configurable duration
# (in seconds). Set to -1 to disable caching completely. (integer
# value)
#token_cache_time = 300
# Determines the frequency at which the list of revoked tokens is
# retrieved from the Identity service (in seconds). A high number of
# revocation events combined with a low cache duration may
# significantly reduce performance. (integer value)
#revocation_cache_time = 10
# (Optional) If defined, indicate whether token data should be
# authenticated or authenticated and encrypted. Acceptable values are
# MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in
# the cache. If ENCRYPT, token data is encrypted and authenticated in
# the cache. If the value is not one of these options or empty,
# auth_token will raise an exception on initialization. (string value)
#memcache_security_strategy = <None>
# (Optional, mandatory if memcache_security_strategy is defined) This
# string is used for key derivation. (string value)
#memcache_secret_key = <None>
# (Optional) Number of seconds memcached server is considered dead
# before it is tried again. (integer value)
#memcache_pool_dead_retry = 300
# (Optional) Maximum total number of open connections to every
# memcached server. (integer value)
#memcache_pool_maxsize = 10
# (Optional) Socket timeout in seconds for communicating with a
# memcached server. (integer value)
#memcache_pool_socket_timeout = 3
# (Optional) Number of seconds a connection to memcached is held
# unused in the pool before it is closed. (integer value)
#memcache_pool_unused_timeout = 60
# (Optional) Number of seconds that an operation will wait to get a
# memcached client connection from the pool. (integer value)
#memcache_pool_conn_get_timeout = 10
# (Optional) Use the advanced (eventlet safe) memcached client pool.
# The advanced pool will only work under python 2.x. (boolean value)
#memcache_use_advanced_pool = false
# (Optional) Indicate whether to set the X-Service-Catalog header. If
# False, middleware will not ask for service catalog on token
# validation and will not set the X-Service-Catalog header. (boolean
# value)
#include_service_catalog = true
# Used to control the use and type of token binding. Can be set to:
# "disabled" to not check token binding. "permissive" (default) to
# validate binding information if the bind type is of a form known to
# the server and ignore it if not. "strict" like "permissive" but if
# the bind type is unknown the token will be rejected. "required" any
# form of token binding is needed to be allowed. Finally the name of a
# binding method that must be present in tokens. (string value)
#enforce_token_bind = permissive
# If true, the revocation list will be checked for cached tokens. This
# requires that PKI tokens are configured on the identity server.
# (boolean value)
#check_revocations_for_cached = false
# Hash algorithms to use for hashing PKI tokens. This may be a single
# algorithm or multiple. The algorithms are those supported by Python
# standard hashlib.new(). The hashes will be tried in the order given,
# so put the preferred one first for performance. The result of the
# first hash will be stored in the cache. This will typically be set
# to multiple values only while migrating from a less secure algorithm
# to a more secure one. Once all the old tokens are expired this
# option should be set to a single value for better performance. (list
# value)
#hash_algorithms = md5
# Prefix to prepend at the beginning of the path. Deprecated, use
# identity_uri. (string value)
#auth_admin_prefix =
# Host providing the admin Identity API endpoint. Deprecated, use
# identity_uri. (string value)
#auth_host = 127.0.0.1
# Port of the admin Identity API endpoint. Deprecated, use
# identity_uri. (integer value)
#auth_port = 35357
# Protocol of the admin Identity API endpoint (http or https).
# Deprecated, use identity_uri. (string value)
#auth_protocol = https
# Complete admin Identity API endpoint. This should specify the
# unversioned root endpoint e.g. https://localhost:35357/ (string
# value)
#identity_uri = <None>
# This option is deprecated and may be removed in a future release.
# Single shared secret with the Keystone configuration used for
# bootstrapping a Keystone installation, or otherwise bypassing the
# normal authentication process. This option should not be used, use
# `admin_user` and `admin_password` instead. (string value)
#admin_token = <None>
# Service username. (string value)
#admin_user = <None>
# Service user password. (string value)
#admin_password = <None>
# Service tenant name. (string value)
#admin_tenant_name = admin
[matchmaker_redis]
#
# From oslo.messaging
#
# Host to locate redis. (string value)
#host = 127.0.0.1
# Use this port to connect to redis host. (integer value)
#port = 6379
# Password for Redis server (optional). (string value)
#password = <None>
[matchmaker_ring]
#
# From oslo.messaging
#
# Matchmaker ring file (JSON). (string value)
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
#ringfile = /etc/oslo/matchmaker_ring.json
[oslo_messaging_amqp]
#
# From oslo.messaging
#
# address prefix used when sending to a specific server (string value)
# Deprecated group/name - [amqp1]/server_request_prefix
#server_request_prefix = exclusive
# address prefix used when broadcasting to all servers (string value)
# Deprecated group/name - [amqp1]/broadcast_prefix
#broadcast_prefix = broadcast
# address prefix when sending to any server in group (string value)
# Deprecated group/name - [amqp1]/group_request_prefix
#group_request_prefix = unicast
# Name for the AMQP container (string value)
# Deprecated group/name - [amqp1]/container_name
#container_name = <None>
# Timeout for inactive connections (in seconds) (integer value)
# Deprecated group/name - [amqp1]/idle_timeout
#idle_timeout = 0
# Debug: dump AMQP frames to stdout (boolean value)
# Deprecated group/name - [amqp1]/trace
#trace = false
# CA certificate PEM file to verify server certificate (string value)
# Deprecated group/name - [amqp1]/ssl_ca_file
#ssl_ca_file =
# Identifying certificate PEM file to present to clients (string
# value)
# Deprecated group/name - [amqp1]/ssl_cert_file
#ssl_cert_file =
# Private key PEM file used to sign cert_file certificate (string
# value)
# Deprecated group/name - [amqp1]/ssl_key_file
#ssl_key_file =
# Password for decrypting ssl_key_file (if encrypted) (string value)
# Deprecated group/name - [amqp1]/ssl_key_password
#ssl_key_password = <None>
# Accept clients using either SSL or plain TCP (boolean value)
# Deprecated group/name - [amqp1]/allow_insecure_clients
#allow_insecure_clients = false
[oslo_messaging_qpid]
#
# From oslo.messaging
#
# Use durable queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_durable_queues
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
#amqp_durable_queues = false
# Auto-delete queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
#amqp_auto_delete = false
# Size of RPC connection pool. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
#rpc_conn_pool_size = 30
# Qpid broker hostname. (string value)
# Deprecated group/name - [DEFAULT]/qpid_hostname
#qpid_hostname = localhost
# Qpid broker port. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_port
#qpid_port = 5672
# Qpid HA cluster host:port pairs. (list value)
# Deprecated group/name - [DEFAULT]/qpid_hosts
#qpid_hosts = $qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
# Deprecated group/name - [DEFAULT]/qpid_username
#qpid_username =
# Password for Qpid connection. (string value)
# Deprecated group/name - [DEFAULT]/qpid_password
#qpid_password =
# Space separated list of SASL mechanisms to use for auth. (string
# value)
# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
#qpid_sasl_mechanisms =
# Seconds between connection keepalive heartbeats. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_heartbeat
#qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'. (string value)
# Deprecated group/name - [DEFAULT]/qpid_protocol
#qpid_protocol = tcp
# Whether to disable the Nagle algorithm. (boolean value)
# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
#qpid_tcp_nodelay = true
# The number of prefetched messages held by receiver. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
#qpid_receiver_capacity = 1
# The qpid topology version to use. Version 1 is what was originally
# used by impl_qpid. Version 2 includes some backwards-incompatible
# changes that allow broker federation to work. Users should update
# to version 2 when they are able to take everything down, as it
# requires a clean break. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_topology_version
#qpid_topology_version = 1
[oslo_messaging_rabbit]
#
# From oslo.messaging
#
# Use durable queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_durable_queues
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
#amqp_durable_queues = false
# Auto-delete queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
#amqp_auto_delete = false
# Size of RPC connection pool. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
#rpc_conn_pool_size = 30
# SSL version to use (valid only if SSL enabled). Valid values are
# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
# available on some distributions. (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_version
#kombu_ssl_version =
# SSL key file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
#kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
#kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled).
# (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
#kombu_ssl_ca_certs =
# How long to wait before reconnecting in response to an AMQP consumer
# cancel notification. (floating point value)
# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
#kombu_reconnect_delay = 1.0
# The RabbitMQ broker address where a single node is used. (string
# value)
# Deprecated group/name - [DEFAULT]/rabbit_host
#rabbit_host = localhost
# The RabbitMQ broker port where a single node is used. (integer
# value)
# Deprecated group/name - [DEFAULT]/rabbit_port
#rabbit_port = 5672
# RabbitMQ HA cluster host:port pairs. (list value)
# Deprecated group/name - [DEFAULT]/rabbit_hosts
#rabbit_hosts = $rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
#rabbit_use_ssl = false
# The RabbitMQ userid. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_userid
#rabbit_userid = guest
# The RabbitMQ password. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_password
#rabbit_password = guest
# The RabbitMQ login method. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_login_method
#rabbit_login_method = AMQPLAIN
# The RabbitMQ virtual host. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
#rabbit_virtual_host = /
# How frequently to retry connecting with RabbitMQ. (integer value)
#rabbit_retry_interval = 1
# How long to backoff for between retries when connecting to RabbitMQ.
# (integer value)
# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
#rabbit_retry_backoff = 2
# Maximum number of RabbitMQ connection retries. Default is 0
# (infinite retry count). (integer value)
# Deprecated group/name - [DEFAULT]/rabbit_max_retries
#rabbit_max_retries = 0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this
# option, you must wipe the RabbitMQ database. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
#rabbit_ha_queues = false
# Number of seconds after which the Rabbit broker is considered down
# if heartbeat's keep-alive fails (0 disable the heartbeat). (integer
# value)
#heartbeat_timeout_threshold = 60
# How often times during the heartbeat_timeout_threshold we check the
# heartbeat. (integer value)
#heartbeat_rate = 2
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
# (boolean value)
# Deprecated group/name - [DEFAULT]/fake_rabbit
#fake_rabbit = false
[pecan]
#
# From mistral.config
#
# Pecan root controller (string value)
#root = mistral.api.controllers.root.RootController
# A list of modules where pecan will search for applications. (list
# value)
#modules = mistral.api
# Enables the ability to display tracebacks in the browser and
# interactively debug during development. (boolean value)
#debug = false
# Enables user authentication in pecan. (boolean value)
#auth_enable = true

View File

@ -94,7 +94,7 @@ def acquire_lock(model, id, session=None):
query.update(
{'updated_at': timeutils.utcnow()},
synchronize_session=False
synchronize_session='fetch',
)
else:
sqlite_lock.acquire_lock(id, session)

View File

@ -196,7 +196,8 @@ class DefaultEngine(base.Engine):
wf_ctrl.evaluate_workflow_final_context()
)
else:
result_str = str(action_ex.output.get('result', "Unknown"))
result_str = (str(action_ex.output.get('result', 'Unknown'))
if action_ex.output else 'Unknown')
state_info = (
"Failure caused by error in task '%s': %s" %

View File

@ -1,4 +1,5 @@
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -101,10 +102,10 @@ class CallScheduler(periodic_task.PeriodicTasks):
#
# 'REPEATABLE-READ' is by default in MySQL and
# 'READ-COMMITTED is by default in PostgreSQL.
with db_api.transaction():
delayed_calls = db_api.get_delayed_calls_to_start(time_filter)
delayed_calls = []
for call in delayed_calls:
with db_api.transaction():
for call in db_api.get_delayed_calls_to_start(time_filter):
# Delete this delayed call from DB before the making call in
# order to prevent calling from parallel transaction.
db_api.delete_delayed_call(call.id)
@ -136,6 +137,16 @@ class CallScheduler(periodic_task.PeriodicTasks):
)
method_args[arg_name] = deserialized
delayed_calls.append((target_method, method_args))
# TODO(m4dcoder): Troubleshoot deadlocks with PostgreSQL and MySQL.
# The queries in the target method such as
# mistral.engine.task_handler.run_action can deadlock
# with delete_delayed_call. Please keep the scope of the
# transaction short.
for (target_method, method_args) in delayed_calls:
with db_api.transaction():
try:
# Call the method.
target_method(**method_args)

View File

@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -18,6 +17,7 @@ import pkg_resources as pkg
import sys
import time
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslotest import base
@ -28,6 +28,8 @@ from mistral.db.sqlalchemy import base as db_sa_base
from mistral.db.sqlalchemy import sqlite_lock
from mistral.db.v2 import api as db_api_v2
from mistral.services import action_manager
from mistral.services import security
from mistral.tests import config as test_config
from mistral import version
@ -35,12 +37,34 @@ RESOURCES_PATH = 'tests/resources/'
LOG = logging.getLogger(__name__)
test_config.parse_args()
def get_resource(resource_name):
return open(pkg.resource_filename(
version.version_info.package,
RESOURCES_PATH + resource_name)).read()
def get_context(default=True, admin=False):
if default:
return auth_context.MistralContext(
user_id='1-2-3-4',
project_id=security.DEFAULT_PROJECT_ID,
user_name='test-user',
project_name='test-project',
is_admin=admin
)
else:
return auth_context.MistralContext(
user_id='9-0-44-5',
project_id='99-88-33',
user_name='test-user',
project_name='test-another',
is_admin=admin
)
class BaseTest(base.BaseTestCase):
def assertListEqual(self, l1, l2):
if tuple(sys.version_info)[0:2] < (2, 7):
@ -160,7 +184,10 @@ class DbTestCase(BaseTest):
"""Runs a long initialization (runs once by class)
and can be extended by child classes.
"""
cfg.CONF.set_default('connection', 'sqlite://', group='database')
# If using sqlite, change to memory. The default is file based.
if cfg.CONF.database.connection.startswith('sqlite'):
cfg.CONF.set_default('connection', 'sqlite://', group='database')
cfg.CONF.set_default('max_overflow', -1, group='database')
cfg.CONF.set_default('max_pool_size', 1000, group='database')
@ -169,27 +196,34 @@ class DbTestCase(BaseTest):
action_manager.sync_db()
def _clean_db(self):
with db_api_v2.transaction():
db_api_v2.delete_workbooks()
db_api_v2.delete_executions()
db_api_v2.delete_cron_triggers()
db_api_v2.delete_workflow_definitions()
contexts = [
get_context(default=False),
get_context(default=True)
]
for ctx in contexts:
auth_context.set_ctx(ctx)
with mock.patch('mistral.services.security.get_project_id',
new=mock.MagicMock(return_value=ctx.project_id)):
with db_api_v2.transaction():
db_api_v2.delete_executions()
db_api_v2.delete_workbooks()
db_api_v2.delete_cron_triggers()
db_api_v2.delete_workflow_definitions()
db_api_v2.delete_environments()
sqlite_lock.cleanup()
if not cfg.CONF.database.connection.startswith('sqlite'):
db_sa_base.get_engine().dispose()
def setUp(self):
super(DbTestCase, self).setUp()
self.__heavy_init()
self.ctx = auth_context.MistralContext(
user_id='1-2-3-4',
project_id='<default-project>',
user_name='test-user',
project_name='test-project',
is_admin=False
)
self.ctx = get_context()
auth_context.set_ctx(self.ctx)
self.addCleanup(auth_context.set_ctx, None)

25
mistral/tests/config.py Normal file
View File

@ -0,0 +1,25 @@
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo.config import cfg
def parse_args():
# Look for .mistral.conf in the project directory by default.
project_dir = '%s/../..' % os.path.dirname(__file__)
config_file = '%s/.mistral.conf' % os.path.realpath(project_dir)
config_files = [config_file] if os.path.isfile(config_file) else None
cfg.CONF(args=[], default_config_files=config_files)

View File

@ -17,6 +17,7 @@
import eventlet
from oslo_config import cfg
import random
import testtools
from mistral.db.sqlalchemy import sqlite_lock
from mistral.db.v2.sqlalchemy import api as db_api
@ -38,6 +39,9 @@ WF_EXEC = {
}
@testtools.skipIf(
'sqlite' not in cfg.CONF.database.connection,
'Not using SQLite for DB backend.')
class SQLiteLocksTest(test_base.DbTestCase):
def setUp(self):
super(SQLiteLocksTest, self).setUp()

View File

@ -164,15 +164,7 @@ class WorkbookTest(SQLAlchemyTest):
self.assertEqual(created, fetched[0])
# Create a new user.
ctx = auth_context.MistralContext(
user_id='9-0-44-5',
project_id='99-88-33',
user_name='test-user',
project_name='test-another',
is_admin=False
)
auth_context.set_ctx(ctx)
auth_context.set_ctx(test_base.get_context(default=False))
created = db_api.create_workbook(WORKBOOKS[1])
fetched = db_api.get_workbooks()
@ -191,15 +183,7 @@ class WorkbookTest(SQLAlchemyTest):
self.assertEqual(created1, fetched[0])
# Create a new user.
ctx = auth_context.MistralContext(
user_id='9-0-44-5',
project_id='99-88-33',
user_name='test-user',
project_name='test-another',
is_admin=False
)
auth_context.set_ctx(ctx)
auth_context.set_ctx(test_base.get_context(default=False))
fetched = db_api.get_workbooks()
@ -222,15 +206,7 @@ class WorkbookTest(SQLAlchemyTest):
auth_context.ctx().project_id)
# Create a new user.
ctx = auth_context.MistralContext(
user_id='9-0-44-5',
project_id='99-88-33',
user_name='test-user',
project_name='test-another',
is_admin=False
)
auth_context.set_ctx(ctx)
auth_context.set_ctx(test_base.get_context(default=False))
fetched = db_api.get_workbooks()
@ -378,15 +354,7 @@ class WorkflowDefinitionTest(SQLAlchemyTest):
self.assertEqual(created1, fetched[0])
# Create a new user.
ctx = auth_context.MistralContext(
user_id='9-0-44-5',
project_id='99-88-33',
user_name='test-user',
project_name='test-another',
is_admin=False
)
auth_context.set_ctx(ctx)
auth_context.set_ctx(test_base.get_context(default=False))
fetched = db_api.get_workflow_definitions()
@ -411,15 +379,7 @@ class WorkflowDefinitionTest(SQLAlchemyTest):
)
# Create a new user.
ctx = auth_context.MistralContext(
user_id='9-0-44-5',
project_id='99-88-33',
user_name='test-user',
project_name='test-another',
is_admin=False
)
auth_context.set_ctx(ctx)
auth_context.set_ctx(test_base.get_context(default=False))
fetched = db_api.get_workflow_definitions()

View File

@ -16,7 +16,8 @@
import eventlet
from eventlet import semaphore
from oslo_config import cfg
from oslo.config import cfg
import testtools
from mistral.db.v2.sqlalchemy import api as db_api
from mistral.tests import base as test_base
@ -36,6 +37,9 @@ WF_EXEC = {
}
@testtools.skipIf(
'sqlite' not in cfg.CONF.database.connection,
'SQLite is not used for the database backend.')
class SQLiteTransactionsTest(test_base.DbTestCase):
"""The purpose of this test is to research transactions of SQLite."""

View File

@ -1,4 +1,5 @@
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -39,7 +40,7 @@ def launch_engine_server(transport, engine):
transport,
target,
[rpc.EngineServer(engine)],
executor='eventlet',
executor='blocking',
serializer=ctx.RpcContextSerializer(ctx.JsonPayloadSerializer())
)
@ -57,7 +58,7 @@ def launch_executor_server(transport, executor):
transport,
target,
[rpc.ExecutorServer(executor)],
executor='eventlet',
executor='blocking',
serializer=ctx.RpcContextSerializer(ctx.JsonPayloadSerializer())
)

View File

@ -186,9 +186,8 @@ workflows:
tasks:
task1:
action: std.echo output="Hi!"
wait-after: 4
timeout: 3
action: std.async_noop
timeout: 1
"""
@ -760,7 +759,7 @@ class PoliciesTest(base.EngineTestCase):
self._await(lambda: self.is_execution_error(wf_ex.id))
# Wait until timeout exceeds.
self._sleep(2)
self._sleep(1)
wf_ex = db_api.get_workflow_execution(wf_ex.id)
tasks_db = wf_ex.task_executions

View File

@ -16,6 +16,7 @@ from eventlet import corolocal
from eventlet import semaphore
from oslo_config import cfg
from oslo_log import log as logging
import testtools
from mistral.actions import base as action_base
from mistral.db.v2 import api as db_api
@ -172,6 +173,7 @@ class LongActionTest(base.EngineTestCase):
self.assertDictEqual({'result': 'test'}, wf_ex.output)
# TODO(rakhmerov): Should periodically fail now. Fix race condition.
@testtools.skip('Skip until the race condition is fixed.')
def test_short_action(self):
wf_service.create_workflows(WF_SHORT_ACTION)

View File

@ -1,4 +1,5 @@
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -26,6 +27,9 @@ FACTORY_METHOD_NAME = ('mistral.tests.unit.services.test_scheduler.'
'factory_method')
TARGET_METHOD_NAME = FACTORY_METHOD_NAME
DELAY = 1.5
WAIT = DELAY * 3
def factory_method():
return type(
@ -47,12 +51,11 @@ class SchedulerServiceTest(base.DbTestCase):
def test_scheduler_with_factory(self, factory):
target_method = 'run_something'
method_args = {'name': 'task', 'id': '123'}
delay = 1.5
scheduler.schedule_call(
FACTORY_METHOD_NAME,
target_method,
delay,
DELAY,
**method_args
)
@ -67,7 +70,7 @@ class SchedulerServiceTest(base.DbTestCase):
self.assertIn('name', call['method_arguments'])
eventlet.sleep(delay)
eventlet.sleep(WAIT)
factory().run_something.assert_called_once_with(name='task', id='123')
@ -79,12 +82,11 @@ class SchedulerServiceTest(base.DbTestCase):
@mock.patch(TARGET_METHOD_NAME)
def test_scheduler_without_factory(self, method):
method_args = {'name': 'task', 'id': '321'}
delay = 1.5
scheduler.schedule_call(
None,
TARGET_METHOD_NAME,
delay,
DELAY,
**method_args
)
@ -98,7 +100,7 @@ class SchedulerServiceTest(base.DbTestCase):
self.assertIn('name', call['method_arguments'])
eventlet.sleep(delay)
eventlet.sleep(WAIT)
method.assert_called_once_with(name='task', id='321')
@ -123,18 +125,16 @@ class SchedulerServiceTest(base.DbTestCase):
'result': 'mistral.workflow.utils.ResultSerializer'
}
delay = 1.5
scheduler.schedule_call(
FACTORY_METHOD_NAME,
target_method,
delay,
DELAY,
serializers=serializers,
**method_args
)
calls = db_api.get_delayed_calls_to_start(
datetime.datetime.now() + datetime.timedelta(seconds=2)
datetime.datetime.now() + datetime.timedelta(seconds=WAIT)
)
call = self._assert_single_item(
@ -144,7 +144,7 @@ class SchedulerServiceTest(base.DbTestCase):
self.assertIn('name', call['method_arguments'])
eventlet.sleep(delay)
eventlet.sleep(WAIT)
result = factory().run_something.call_args[1].get('result')
@ -167,12 +167,11 @@ class SchedulerServiceTest(base.DbTestCase):
self.addCleanup(stop_thread_groups)
method_args = {'name': 'task', 'id': '321'}
delay = 1.5
scheduler.schedule_call(
None,
TARGET_METHOD_NAME,
delay,
DELAY,
**method_args
)
@ -181,7 +180,7 @@ class SchedulerServiceTest(base.DbTestCase):
self._assert_single_item(calls, target_method_name=TARGET_METHOD_NAME)
eventlet.sleep(delay)
eventlet.sleep(WAIT)
method.assert_called_once_with(name='task', id='321')

View File

@ -24,6 +24,8 @@ function usage {
echo " Default: .venv"
echo " --tools-path <dir> Location of the tools directory"
echo " Default: \$(pwd)"
echo " --db-type <name> Database type"
echo " Default: sqlite"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
@ -59,6 +61,10 @@ function process_options {
(( i++ ))
tools_path=${!i}
;;
--db-type)
(( i++ ))
db_type=${!i}
;;
-*) testropts="$testropts ${!i}";;
*) testrargs="$testrargs ${!i}"
esac
@ -66,6 +72,7 @@ function process_options {
done
}
db_type=${db_type:-sqlite}
tool_path=${tools_path:-$(pwd)}
venv_path=${venv_path:-$(pwd)}
venv_dir=${venv_name:-.venv}
@ -102,6 +109,48 @@ if [ $no_site_packages -eq 1 ]; then
fi
function setup_db {
case ${db_type} in
sqlite )
rm -f tests.sqlite
;;
postgresql )
echo "Setting up Mistral DB in PostgreSQL"
# Create the user and database.
# Assume trust is setup on localhost in the postgresql config file.
sudo -u postgres psql -c "DROP DATABASE IF EXISTS mistral;"
sudo -u postgres psql -c "DROP USER IF EXISTS mistral;"
sudo -u postgres psql -c "CREATE USER mistral WITH ENCRYPTED PASSWORD 'm1stral';"
sudo -u postgres psql -c "CREATE DATABASE mistral OWNER mistral;"
;;
esac
}
function setup_db_pylib {
case ${db_type} in
postgresql )
echo "Installing python library for PostgreSQL."
${wrapper} pip install psycopg2
;;
esac
}
function setup_db_cfg {
case ${db_type} in
sqlite )
rm -f .mistral.conf
;;
postgresql )
cp ./etc/mistral.conf.sample.postgresql .mistral.conf
;;
esac
}
function cleanup {
rm -f .mistral.conf
}
function run_tests {
# Cleanup *pyc
${wrapper} find . -type f -name "*.pyc" -delete
@ -142,6 +191,7 @@ function run_tests {
set -e
copy_subunit_log
cleanup
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
@ -210,9 +260,11 @@ if [ $just_pep8 -eq 1 ]; then
fi
if [ $recreate_db -eq 1 ]; then
rm -f tests.sqlite
setup_db
fi
setup_db_pylib
setup_db_cfg
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,