Complete session hadling refactoring

All parts of system have been refactored. Session creation has been
simplified. Now session is created only when it is desperately needed and
its using is maximized instead of uncontrollable producing for each and
every block of code which is supposed to communicate with db.

This refactoring simplifies db data transfer management and makes it
more explicit for further developing.

Unit and functional tests have been updated.

Change-Id: I15a980432f9da5cac5f04b1f4f4af64d1a002b60
This commit is contained in:
Artem Roma 2014-02-24 15:49:38 +02:00
parent fbf9be7c2f
commit b296839ccc
13 changed files with 282 additions and 305 deletions

View File

@ -56,18 +56,18 @@ def main():
if getattr(cli_args, 'after_init_hook'): if getattr(cli_args, 'after_init_hook'):
return nailgun_hooks.after_initialization_environment_hook() return nailgun_hooks.after_initialization_environment_hook()
#performing cleaning of expired data (if any) in db with engine.contexted_session(pecan.conf.dbpath) as session:
mixins.clean_db(engine.get_engine()) #performing cleaning of expired data (if any) in db
mixins.clean_db(session)
#discover testsets and their tests #discover testsets and their tests
CORE_PATH = pecan.conf.debug_tests if \ CORE_PATH = pecan.conf.debug_tests if \
pecan.conf.get('debug_tests') else 'fuel_health' pecan.conf.get('debug_tests') else 'fuel_health'
session = engine.get_session() nose_discovery.discovery(path=CORE_PATH, session=session)
nose_discovery.discovery(path=CORE_PATH, session=session)
#cache needed data from test repository #cache needed data from test repository
mixins.cache_test_repository(session) mixins.cache_test_repository(session)
host, port = pecan.conf.server.host, pecan.conf.server.port host, port = pecan.conf.server.host, pecan.conf.server.port
srv = pywsgi.WSGIServer((host, int(port)), root) srv = pywsgi.WSGIServer((host, int(port)), root)

View File

@ -16,10 +16,13 @@
import requests import requests
from pecan import conf from pecan import conf
from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload
import logging
from fuel_plugin.ostf_adapter.storage import models, engine from fuel_plugin.ostf_adapter.storage import models
from fuel_plugin.ostf_adapter.nose_plugin import nose_utils from fuel_plugin.ostf_adapter.nose_plugin import nose_utils
LOG = logging.getLogger(__name__)
REQ_SES = requests.Session() REQ_SES = requests.Session()
REQ_SES.trust_env = False REQ_SES.trust_env = False
@ -29,36 +32,33 @@ NAILGUN_API_URL = 'api/clusters/{0}'
TEST_REPOSITORY = [] TEST_REPOSITORY = []
def clean_db(eng): def clean_db(session):
conn = eng.connect() session.query(models.ClusterTestingPattern).delete()
session.query(models.ClusterState).delete()
session.query(models.TestSet).delete()
conn.execute('delete from cluster_testing_pattern;') session.commit()
conn.execute('delete from cluster_state;')
conn.execute('delete from test_sets;')
conn.close()
def cache_test_repository(session): def cache_test_repository(session):
with session.begin(subtransactions=True): test_repository = session.query(models.TestSet)\
test_repository = session.query(models.TestSet)\ .options(joinedload('tests'))\
.options(joinedload('tests'))\ .all()
.all()
crucial_tests_attrs = ['name', 'deployment_tags'] crucial_tests_attrs = ['name', 'deployment_tags']
for test_set in test_repository: for test_set in test_repository:
data_elem = dict() data_elem = dict()
data_elem['test_set_id'] = test_set.id data_elem['test_set_id'] = test_set.id
data_elem['deployment_tags'] = test_set.deployment_tags data_elem['deployment_tags'] = test_set.deployment_tags
data_elem['tests'] = [] data_elem['tests'] = []
for test in test_set.tests: for test in test_set.tests:
test_dict = dict([(attr_name, getattr(test, attr_name)) test_dict = dict([(attr_name, getattr(test, attr_name))
for attr_name in crucial_tests_attrs]) for attr_name in crucial_tests_attrs])
data_elem['tests'].append(test_dict) data_elem['tests'].append(test_dict)
TEST_REPOSITORY.append(data_elem) TEST_REPOSITORY.append(data_elem)
def discovery_check(session, cluster): def discovery_check(session, cluster):
@ -69,44 +69,38 @@ def discovery_check(session, cluster):
'deployment_tags': cluster_deployment_args 'deployment_tags': cluster_deployment_args
} }
with session.begin(subtransactions=True): cluster_state = session.query(models.ClusterState)\
cluster_state = session.query(models.ClusterState)\ .filter_by(id=cluster_data['cluster_id'])\
.filter_by(id=cluster_data['cluster_id'])\ .first()
.first()
if not cluster_state: if not cluster_state:
with session.begin(subtransactions=True): session.add(
session.add( models.ClusterState(
models.ClusterState( id=cluster_data['cluster_id'],
id=cluster_data['cluster_id'], deployment_tags=list(cluster_data['deployment_tags'])
deployment_tags=list(cluster_data['deployment_tags'])
)
) )
)
with session.begin(subtransactions=True): #flush data to db, cuz _add_cluster_testing_pattern
_add_cluster_testing_pattern(session, cluster_data) #is dependent on it
session.flush()
_add_cluster_testing_pattern(session, cluster_data)
return return
old_deployment_tags = cluster_state.deployment_tags old_deployment_tags = cluster_state.deployment_tags
if set(old_deployment_tags) != cluster_data['deployment_tags']: if set(old_deployment_tags) != cluster_data['deployment_tags']:
with session.begin(subtransactions=True): session.query(models.ClusterTestingPattern)\
#delete testruns and their tests if cluster was redeployed .filter_by(cluster_id=cluster_state.id)\
session.query(models.ClusterTestingPattern)\ .delete()
.filter_by(cluster_id=cluster_state.id)\
.delete()
#separate block "with" is need here to resolve _add_cluster_testing_pattern(session, cluster_data)
#situation where previous deletion blocks table
#that is using in following update
with session.begin(subtransactions=True):
#make "rediscovering" of testsets for redeployed cluster
_add_cluster_testing_pattern(session, cluster_data)
cluster_state.deployment_tags = \ cluster_state.deployment_tags = \
list(cluster_data['deployment_tags']) list(cluster_data['deployment_tags'])
session.merge(cluster_state) session.merge(cluster_state)
def _get_cluster_depl_tags(cluster_id): def _get_cluster_depl_tags(cluster_id):
@ -166,29 +160,28 @@ def _get_cluster_depl_tags(cluster_id):
def _add_cluster_testing_pattern(session, cluster_data): def _add_cluster_testing_pattern(session, cluster_data):
with session.begin(subtransactions=True): to_database = []
to_database = [] for test_set in TEST_REPOSITORY:
for test_set in TEST_REPOSITORY: if nose_utils.process_deployment_tags(
if nose_utils.process_deployment_tags( cluster_data['deployment_tags'],
cluster_data['deployment_tags'], test_set['deployment_tags']
test_set['deployment_tags'] ):
):
testing_pattern = dict() testing_pattern = dict()
testing_pattern['cluster_id'] = cluster_data['cluster_id'] testing_pattern['cluster_id'] = cluster_data['cluster_id']
testing_pattern['test_set_id'] = test_set['test_set_id'] testing_pattern['test_set_id'] = test_set['test_set_id']
testing_pattern['tests'] = [] testing_pattern['tests'] = []
for test in test_set['tests']: for test in test_set['tests']:
if nose_utils.process_deployment_tags( if nose_utils.process_deployment_tags(
cluster_data['deployment_tags'], cluster_data['deployment_tags'],
test['deployment_tags'] test['deployment_tags']
): ):
testing_pattern['tests'].append(test['name']) testing_pattern['tests'].append(test['name'])
to_database.append( to_database.append(
models.ClusterTestingPattern(**testing_pattern) models.ClusterTestingPattern(**testing_pattern)
) )
session.add_all(to_database) session.add_all(to_database)

View File

@ -19,7 +19,7 @@ from pecan import conf
from fuel_plugin.ostf_adapter.nose_plugin import nose_storage_plugin from fuel_plugin.ostf_adapter.nose_plugin import nose_storage_plugin
from fuel_plugin.ostf_adapter.nose_plugin import nose_test_runner from fuel_plugin.ostf_adapter.nose_plugin import nose_test_runner
from fuel_plugin.ostf_adapter.nose_plugin import nose_utils from fuel_plugin.ostf_adapter.nose_plugin import nose_utils
from fuel_plugin.ostf_adapter.storage import storage_utils, engine, models from fuel_plugin.ostf_adapter.storage import engine, models
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -29,9 +29,6 @@ class NoseDriver(object):
def __init__(self): def __init__(self):
LOG.warning('Initializing Nose Driver') LOG.warning('Initializing Nose Driver')
self._named_threads = {} self._named_threads = {}
session = engine.get_session()
with session.begin(subtransactions=True):
storage_utils.update_all_running_test_runs(session)
def check_current_running(self, unique_id): def check_current_running(self, unique_id):
return unique_id in self._named_threads return unique_id in self._named_threads
@ -46,25 +43,29 @@ class NoseDriver(object):
argv_add = [test_set.test_path] + test_set.additional_arguments argv_add = [test_set.test_path] + test_set.additional_arguments
self._named_threads[test_run.id] = nose_utils.run_proc( self._named_threads[test_run.id] = nose_utils.run_proc(
self._run_tests, test_run.id, test_run.cluster_id, argv_add) self._run_tests,
conf.dbpath,
test_run.id,
test_run.cluster_id,
argv_add
)
def _run_tests(self, test_run_id, cluster_id, argv_add): def _run_tests(self, dbpath, test_run_id, cluster_id, argv_add):
session = engine.get_session() with engine.contexted_session(dbpath) as session:
try: try:
nose_test_runner.SilentTestProgram( nose_test_runner.SilentTestProgram(
addplugins=[nose_storage_plugin.StoragePlugin( addplugins=[nose_storage_plugin.StoragePlugin(
test_run_id, str(cluster_id))], session, test_run_id, str(cluster_id))],
exit=False, exit=False,
argv=['ostf_tests'] + argv_add) argv=['ostf_tests'] + argv_add)
self._named_threads.pop(int(test_run_id), None) self._named_threads.pop(int(test_run_id), None)
except Exception: except Exception:
LOG.exception('Test run ID: %s', test_run_id) LOG.exception('Test run ID: %s', test_run_id)
finally: finally:
models.TestRun.update_test_run( models.TestRun.update_test_run(
session, test_run_id, status='finished') session, test_run_id, status='finished')
def kill(self, test_run_id, cluster_id, cleanup=None): def kill(self, session, test_run_id, cluster_id, cleanup=None):
session = engine.get_session()
if test_run_id in self._named_threads: if test_run_id in self._named_threads:
try: try:
@ -83,6 +84,7 @@ class NoseDriver(object):
if cleanup: if cleanup:
nose_utils.run_proc( nose_utils.run_proc(
self._clean_up, self._clean_up,
conf.dbpath,
test_run_id, test_run_id,
cluster_id, cluster_id,
cleanup) cleanup)
@ -93,31 +95,30 @@ class NoseDriver(object):
return True return True
return False return False
def _clean_up(self, test_run_id, cluster_id, cleanup): def _clean_up(self, dbpath, test_run_id, cluster_id, cleanup):
session = engine.get_session() with engine.contexted_session(dbpath) as session:
#need for performing proper cleaning up for current cluster
cluster_deployment_info = \
session.query(models.ClusterState.deployment_tags)\
.filter_by(id=cluster_id)\
.scalar()
#need for performing proper cleaning up for current cluster try:
cluster_deployment_info = \ module_obj = __import__(cleanup, -1)
session.query(models.ClusterState.deployment_tags)\
.filter_by(id=cluster_id)\
.scalar()
try: os.environ['NAILGUN_HOST'] = str(conf.nailgun.host)
module_obj = __import__(cleanup, -1) os.environ['NAILGUN_PORT'] = str(conf.nailgun.port)
os.environ['CLUSTER_ID'] = str(cluster_id)
os.environ['NAILGUN_HOST'] = str(conf.nailgun.host) module_obj.cleanup.cleanup(cluster_deployment_info)
os.environ['NAILGUN_PORT'] = str(conf.nailgun.port)
os.environ['CLUSTER_ID'] = str(cluster_id)
module_obj.cleanup.cleanup(cluster_deployment_info) except Exception:
LOG.exception(
'Cleanup error. Test Run ID %s. Cluster ID %s',
test_run_id,
cluster_id
)
except Exception: finally:
LOG.exception( models.TestRun.update_test_run(
'Cleanup error. Test Run ID %s. Cluster ID %s', session, test_run_id, status='finished')
test_run_id,
cluster_id
)
finally:
models.TestRun.update_test_run(
session, test_run_id, status='finished')

View File

@ -52,51 +52,54 @@ class DiscoveryPlugin(plugins.Plugin):
tag.lower() for tag in profile.get('deployment_tags', []) tag.lower() for tag in profile.get('deployment_tags', [])
] ]
with self.session.begin(subtransactions=True): try:
try: test_set = models.TestSet(**profile)
test_set = models.TestSet(**profile) self.session.merge(test_set)
self.session.merge(test_set) self.test_sets[test_set.id] = test_set
self.test_sets[test_set.id] = test_set
except Exception as e: #flush test_sets data into db
LOG.error( self.session.commit()
('An error has occured while processing' except Exception as e:
' data entity for %s. Error message: %s'), LOG.error(
module.__name__, ('An error has occured while processing'
e.message ' data entity for %s. Error message: %s'),
) module.__name__,
LOG.info('%s discovered.', module.__name__) e.message
)
LOG.info('%s discovered.', module.__name__)
def addSuccess(self, test): def addSuccess(self, test):
test_id = test.id() test_id = test.id()
for test_set_id in self.test_sets.keys(): for test_set_id in self.test_sets.keys():
if test_set_id in test_id: if test_set_id in test_id:
with self.session.begin(subtransactions=True): data = dict()
data = dict() (data['title'], data['description'],
data['duration'], data['deployment_tags']) = \
nose_utils.get_description(test)
(data['title'], data['description'], data.update(
data['duration'], data['deployment_tags']) = \ {
nose_utils.get_description(test) 'test_set_id': test_set_id,
'name': test_id
}
)
data.update( try:
{ test_obj = models.Test(**data)
'test_set_id': test_set_id, self.session.merge(test_obj)
'name': test_id
} #flush tests data into db
self.session.commit()
except Exception as e:
LOG.error(
('An error has occured while '
'processing data entity for '
'test with name %s. Error message: %s'),
test_id,
e.message
) )
LOG.info('%s added for %s', test_id, test_set_id)
try:
test_obj = models.Test(**data)
self.session.merge(test_obj)
except Exception as e:
LOG.error(
('An error has occured while '
'processing data entity for '
'test with name %s. Error message: %s'),
test_id,
e.message
)
LOG.info('%s added for %s', test_id, test_set_id)
def discovery(path, session): def discovery(path, session):

View File

@ -20,7 +20,7 @@ from pecan import conf
import unittest2 import unittest2
from fuel_plugin.ostf_adapter.nose_plugin import nose_utils from fuel_plugin.ostf_adapter.nose_plugin import nose_utils
from fuel_plugin.ostf_adapter.storage import models, engine from fuel_plugin.ostf_adapter.storage import models
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -31,15 +31,13 @@ class StoragePlugin(plugins.Plugin):
name = 'storage' name = 'storage'
score = 15000 score = 15000
def __init__( def __init__(self, session, test_run_id, cluster_id):
self, test_run_id, cluster_id): self.session = session
self.test_run_id = test_run_id self.test_run_id = test_run_id
self.cluster_id = cluster_id self.cluster_id = cluster_id
super(StoragePlugin, self).__init__() super(StoragePlugin, self).__init__()
self._start_time = None self._start_time = None
self.session = engine.get_session()
def options(self, parser, env=os.environ): def options(self, parser, env=os.environ):
env['NAILGUN_HOST'] = str(conf.nailgun.host) env['NAILGUN_HOST'] = str(conf.nailgun.host)
env['NAILGUN_PORT'] = str(conf.nailgun.port) env['NAILGUN_PORT'] = str(conf.nailgun.port)
@ -64,17 +62,16 @@ class StoragePlugin(plugins.Plugin):
data['step'], data['message'] = \ data['step'], data['message'] = \
nose_utils.format_failure_message(exc_value) nose_utils.format_failure_message(exc_value)
with self.session.begin(subtransactions=True): tests_to_update = nose_utils.get_tests_ids_to_update(test)
tests_to_update = nose_utils.get_tests_ids_to_update(test) for test_id in tests_to_update:
models.Test.add_result(
for test_id in tests_to_update: self.session,
models.Test.add_result( self.test_run_id,
self.session, test_id,
self.test_run_id, data
test_id, )
data self.session.commit()
)
def addSuccess(self, test, capt=None): def addSuccess(self, test, capt=None):
self._add_message(test, status='success') self._add_message(test, status='success')

View File

@ -12,48 +12,22 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from pecan import conf from contextlib import contextmanager
from sqlalchemy import create_engine, orm, pool from sqlalchemy import create_engine, orm
_ENGINE = None @contextmanager
_MAKER = None def contexted_session(dbpath):
_REDISINST = None '''Allows to handle session via context manager
'''
engine = create_engine(dbpath)
session = orm.Session(bind=engine)
try:
def get_session(autocommit=True, expire_on_commit=False): yield session
"""Return a SQLAlchemy session.""" session.commit()
global _MAKER except Exception:
global _SLAVE_MAKER session.rollback()
maker = _MAKER raise
finally:
if maker is None: session.close()
engine = get_engine()
maker = get_maker(engine, autocommit, expire_on_commit)
else:
_MAKER = maker
session = maker()
return session
def get_engine(dbpath=None, pool_type=None):
"""Return a SQLAlchemy engine."""
global _ENGINE
engine = _ENGINE
if engine is None:
dbpath = dbpath if dbpath is not None else conf.dbpath
engine = create_engine(dbpath,
poolclass=pool_type or pool.NullPool)
_ENGINE = engine
return engine
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return orm.sessionmaker(
bind=engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)

View File

@ -250,13 +250,8 @@ class TestRun(BASE):
@property @property
def enabled_tests(self): def enabled_tests(self):
session = engine.get_session()
tests = session.query(Test)\
.filter_by(test_run_id=self.id)\
.order_by(Test.name)
return [test.name for test return [test.name for test
in tests if test.status != 'disabled'] in self.tests if test.status != 'disabled']
def is_finished(self): def is_finished(self):
return self.status == 'finished' return self.status == 'finished'
@ -372,17 +367,16 @@ class TestRun(BASE):
if cls.is_last_running(session, test_set.id, if cls.is_last_running(session, test_set.id,
metadata['cluster_id']): metadata['cluster_id']):
with session.begin(subtransactions=True): test_run = cls.add_test_run(
test_run = cls.add_test_run( session, test_set.id,
session, test_set.id, metadata['cluster_id'], tests=tests)
metadata['cluster_id'], tests=tests)
retvalue = test_run.frontend #flush test_run data to db
session.close() session.flush()
plugin.run(test_run, test_set) plugin.run(test_run, test_set)
return retvalue return test_run.frontend
return {} return {}
def restart(self, session, tests=None): def restart(self, session, tests=None):
@ -406,7 +400,7 @@ class TestRun(BASE):
""" """
plugin = nose_plugin.get_plugin(self.test_set.driver) plugin = nose_plugin.get_plugin(self.test_set.driver)
killed = plugin.kill( killed = plugin.kill(
self.id, self.cluster_id, session, self.id, self.cluster_id,
cleanup=self.test_set.cleanup_path) cleanup=self.test_set.cleanup_path)
if killed: if killed:
Test.update_running_tests( Test.update_running_tests(

View File

@ -48,7 +48,8 @@ def setup_config(custom_pecan_config):
def setup_app(config=None): def setup_app(config=None):
setup_config(config or {}) setup_config(config or {})
app_hooks = [hooks.SessionHook(), hooks.ExceptionHandling()] app_hooks = [hooks.SessionHook(dbpath=pecan.conf.dbpath),
hooks.ExceptionHandling()]
app = pecan.make_app( app = pecan.make_app(
pecan.conf.app.root, pecan.conf.app.root,
debug=pecan.conf.debug, debug=pecan.conf.debug,

View File

@ -45,19 +45,19 @@ class TestsetsController(BaseRestController):
@expose('json') @expose('json')
def get(self, cluster): def get(self, cluster):
mixins.discovery_check(request.session, cluster) mixins.discovery_check(request.session, cluster)
with request.session.begin(subtransactions=True):
needed_testsets = request.session\
.query(models.ClusterTestingPattern.test_set_id)\
.filter_by(cluster_id=cluster)
test_sets = request.session.query(models.TestSet)\ needed_testsets = request.session\
.filter(models.TestSet.id.in_(needed_testsets))\ .query(models.ClusterTestingPattern.test_set_id)\
.order_by(models.TestSet.test_runs_ordering_priority)\ .filter_by(cluster_id=cluster)
.all()
if test_sets: test_sets = request.session.query(models.TestSet)\
return [item.frontend for item in test_sets] .filter(models.TestSet.id.in_(needed_testsets))\
return {} .order_by(models.TestSet.test_runs_ordering_priority)\
.all()
if test_sets:
return [item.frontend for item in test_sets]
return {}
class TestsController(BaseRestController): class TestsController(BaseRestController):
@ -65,25 +65,24 @@ class TestsController(BaseRestController):
@expose('json') @expose('json')
def get(self, cluster): def get(self, cluster):
mixins.discovery_check(request.session, cluster) mixins.discovery_check(request.session, cluster)
with request.session.begin(subtransactions=True): needed_tests_list = request.session\
needed_tests_list = request.session\ .query(models.ClusterTestingPattern.tests)\
.query(models.ClusterTestingPattern.tests)\ .filter_by(cluster_id=cluster)
.filter_by(cluster_id=cluster)
result = [] result = []
for tests in needed_tests_list: for tests in needed_tests_list:
tests_to_return = request.session.query(models.Test)\ tests_to_return = request.session.query(models.Test)\
.filter(models.Test.name.in_(tests[0]))\ .filter(models.Test.name.in_(tests[0]))\
.all() .all()
result.extend(tests_to_return) result.extend(tests_to_return)
result.sort(key=lambda test: test.name) result.sort(key=lambda test: test.name)
if result: if result:
return [item.frontend for item in result] return [item.frontend for item in result]
return {} return {}
class TestrunsController(BaseRestController): class TestrunsController(BaseRestController):
@ -94,32 +93,29 @@ class TestrunsController(BaseRestController):
@expose('json') @expose('json')
def get_all(self): def get_all(self):
with request.session.begin(subtransactions=True): test_runs = request.session.query(models.TestRun).all()
test_runs = request.session.query(models.TestRun).all()
return [item.frontend for item in test_runs] return [item.frontend for item in test_runs]
@expose('json') @expose('json')
def get_one(self, test_run_id): def get_one(self, test_run_id):
with request.session.begin(subtransactions=True): test_run = request.session.query(models.TestRun)\
test_run = request.session.query(models.TestRun)\ .filter_by(id=test_run_id).first()
.filter_by(id=test_run_id).first() if test_run and isinstance(test_run, models.TestRun):
if test_run and isinstance(test_run, models.TestRun): return test_run.frontend
return test_run.frontend return {}
return {}
@expose('json') @expose('json')
def get_last(self, cluster_id): def get_last(self, cluster_id):
with request.session.begin(subtransactions=True): test_run_ids = request.session.query(func.max(models.TestRun.id)) \
test_run_ids = request.session.query(func.max(models.TestRun.id)) \ .group_by(models.TestRun.test_set_id)\
.group_by(models.TestRun.test_set_id)\ .filter_by(cluster_id=cluster_id)
.filter_by(cluster_id=cluster_id)
test_runs = request.session.query(models.TestRun)\ test_runs = request.session.query(models.TestRun)\
.options(joinedload('tests'))\ .options(joinedload('tests'))\
.filter(models.TestRun.id.in_(test_run_ids)) .filter(models.TestRun.id.in_(test_run_ids))
return [item.frontend for item in test_runs] return [item.frontend for item in test_runs]
@expose('json') @expose('json')
def post(self): def post(self):
@ -131,11 +127,10 @@ class TestrunsController(BaseRestController):
metadata = test_run['metadata'] metadata = test_run['metadata']
tests = test_run.get('tests', []) tests = test_run.get('tests', [])
with request.session.begin(subtransactions=True): test_set = models.TestSet.get_test_set(
test_set = models.TestSet.get_test_set( request.session,
request.session, test_set
test_set )
)
test_run = models.TestRun.start( test_run = models.TestRun.start(
request.session, request.session,

View File

@ -13,9 +13,9 @@
# under the License. # under the License.
import logging import logging
from sqlalchemy import create_engine, orm, pool
from pecan import hooks from pecan import hooks
from fuel_plugin.ostf_adapter.storage import engine
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -28,5 +28,23 @@ class ExceptionHandling(hooks.PecanHook):
class SessionHook(hooks.PecanHook): class SessionHook(hooks.PecanHook):
def __init__(self, dbpath):
self.engine = create_engine(dbpath, poolclass=pool.NullPool)
def before(self, state): def before(self, state):
state.request.session = engine.get_session() state.request.session = orm.Session(bind=self.engine)
def after(self, state):
try:
state.request.session.commit()
except Exception:
state.request.session.rollback()
raise
finally:
state.request.session.close()
def on_error(self, state, e):
LOG.exception('Pecan state %r', state)
state.session.rollback()
state.session.close()

View File

@ -385,7 +385,7 @@ class AdapterTests(BaseAdapterTest):
]) ])
self.compare(resp, assertions) self.compare(resp, assertions)
time.sleep(5) time.sleep(10)
resp = self.client.testruns_last(cluster_id) resp = self.client.testruns_last(cluster_id)
@ -420,7 +420,8 @@ class AdapterTests(BaseAdapterTest):
self.client.run_with_timeout(testset, tests, cluster_id, 70) self.client.run_with_timeout(testset, tests, cluster_id, 70)
self.client.restart_with_timeout(testset, tests, cluster_id, 10) self.client.restart_with_timeout(testset, tests, cluster_id, 10)
resp = self.client.restart_tests_last(testset, disabled_test, cluster_id) resp = self.client.restart_tests_last(testset, disabled_test,
cluster_id)
assertions = Response([ assertions = Response([
{ {

View File

@ -68,10 +68,9 @@ class BaseWSGITest(unittest2.TestCase):
self.Session.configure( self.Session.configure(
bind=self.connection bind=self.connection
) )
self.session = self.Session(autocommit=True) self.session = self.Session()
with self.session.begin(subtransactions=True): test_sets = self.session.query(models.TestSet).all()
test_sets = self.session.query(models.TestSet).all()
#need this if start unit tests in conjuction with integration #need this if start unit tests in conjuction with integration
if not test_sets: if not test_sets:
@ -120,23 +119,22 @@ class BaseWSGITest(unittest2.TestCase):
def is_background_working(self): def is_background_working(self):
is_working = True is_working = True
with self.session.begin(subtransactions=True): cluster_state = self.session.query(models.ClusterState)\
cluster_state = self.session.query(models.ClusterState)\ .filter_by(id=self.expected['cluster']['id'])\
.filter_by(id=self.expected['cluster']['id'])\ .one()
.one() is_working = is_working and set(cluster_state.deployment_tags) == \
is_working = is_working and set(cluster_state.deployment_tags) == \ self.expected['cluster']['deployment_tags']
self.expected['cluster']['deployment_tags']
cluster_testing_patterns = self.session\ cluster_testing_patterns = self.session\
.query(models.ClusterTestingPattern)\ .query(models.ClusterTestingPattern)\
.filter_by(cluster_id=self.expected['cluster']['id'])\ .filter_by(cluster_id=self.expected['cluster']['id'])\
.all() .all()
for testing_pattern in cluster_testing_patterns: for testing_pattern in cluster_testing_patterns:
is_working = is_working and \ is_working = is_working and \
(testing_pattern.test_set_id in self.expected['test_sets']) (testing_pattern.test_set_id in self.expected['test_sets'])
is_working = is_working and set(testing_pattern.tests)\ is_working = is_working and set(testing_pattern.tests)\
.issubset(set(self.expected['tests'])) .issubset(set(self.expected['tests']))
return is_working return is_working

View File

@ -140,7 +140,7 @@ class TestTestRunsPostController(TestTestRunsController):
self.expected['testrun_post'][key] == res[key] self.expected['testrun_post'][key] == res[key]
) )
test_run = self.session.query(models.TestRun)\ self.session.query(models.TestRun)\
.filter_by(test_set_id=self.expected['testrun_post']['testset'])\ .filter_by(test_set_id=self.expected['testrun_post']['testset'])\
.filter_by(cluster_id=self.expected['testrun_post']['cluster_id'])\ .filter_by(cluster_id=self.expected['testrun_post']['cluster_id'])\
.one() .one()
@ -164,10 +164,12 @@ class TestTestRunsPutController(TestTestRunsController):
super(TestTestRunsPutController, self).setUp() super(TestTestRunsPutController, self).setUp()
self.test_run = self.controller.post()[0] self.test_run = self.controller.post()[0]
with self.session.begin(subtransactions=True): self.session.query(models.Test)\
self.session.query(models.Test)\ .filter_by(test_run_id=int(self.test_run['id']))\
.filter_by(test_run_id=int(self.test_run['id']))\ .update({'status': 'running'})
.update({'status': 'running'})
#flush data which test is depend on into db
self.session.commit()
self.request_mock.body = json.dumps( self.request_mock.body = json.dumps(
[{ [{