Add test_set dependency to models

Field "exclusive_testsets" was added to TestSet entity
in models.py. This data defines test sets hierarchy for building test
runs execution chains. New migration version was generated for new data
scheme. Fixture tests were updated with purpose to support new logic

Change-Id: I654ed7edeadad47164644b2ed580a103e26c111e
Implements: blueprint ostf-adapter-testruns-orchestrator
This commit is contained in:
Artem Roma 2014-03-03 16:18:06 +02:00
parent 8b8ae7bccd
commit 608f109b6f
8 changed files with 47 additions and 6 deletions

View File

@ -0,0 +1,31 @@
"""list_of_excl_testsets
Revision ID: 54904076d82d
Revises: 53af7c2d9ccc
Create Date: 2014-02-13 18:57:46.854934
"""
# revision identifiers, used by Alembic.
revision = '54904076d82d'
down_revision = '5133b1e66258'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('test_sets', sa.Column('exclusive_testsets',
postgresql.ARRAY(
sa.String(length=128)
),
nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('test_sets', 'exclusive_testsets')
### end Alembic commands ###

View File

@ -88,6 +88,10 @@ class TestSet(BASE):
deployment_tags = sa.Column(ARRAY(sa.String(64)))
test_runs_ordering_priority = sa.Column(sa.Integer)
#list of test sets that cannot be executed simultaneously
#with current test set
exclusive_testsets = sa.Column(ARRAY(sa.String(128)))
tests = relationship(
'Test',
backref='test_set',

View File

@ -19,7 +19,8 @@ __profile__ = {
"deployment_types_tests/alternative_depl_tags_test.py"),
"description": "Fake testset to test alternative deployment tags",
"deployment_tags": ["alternative | alternative_test"],
"test_runs_ordering_priority": 5
"test_runs_ordering_priority": 5,
"exclusive_testsets": []
}
import unittest

View File

@ -19,7 +19,8 @@ __profile__ = {
"ha_deployment_test.py"),
"description": "Fake tests for HA deployment",
"deployment_tags": ["Ha"],
"test_runs_ordering_priority": 3
"test_runs_ordering_priority": 3,
"exclusive_testsets": []
}
import unittest

View File

@ -19,7 +19,8 @@ __profile__ = {
"multinode_deployment.py"),
"description": "Fake tests for multinode deployment on ubuntu",
"deployment_tags": ["multinode", "ubuntu"],
"test_runs_ordering_priority": 4
"test_runs_ordering_priority": 4,
"exclusive_testsets": []
}
import unittest

View File

@ -18,7 +18,8 @@ __profile__ = {
"test_path": "fuel_plugin/tests/functional/dummy_tests/general_test.py",
"description": "General fake tests",
"deployment_tags": [],
"test_runs_ordering_priority": 1
"test_runs_ordering_priority": 1,
"exclusive_testsets": []
}
import time

View File

@ -18,7 +18,8 @@ __profile__ = {
"test_path": "fuel_plugin/tests/functional/dummy_tests/stopped_test.py",
"description": "Long running 25 secs fake tests",
"deployment_tags": [],
"test_runs_ordering_priority": 2
"test_runs_ordering_priority": 2,
"exclusive_testsets": []
}
import time

View File

@ -18,7 +18,8 @@ __profile__ = {
"test_path": "fuel_plugin/tests/functional/dummy_tests/test_with_error.py",
"description": "Test that introduces error while setting up",
"deployment_tags": ['test_error'],
"test_runs_ordering_priority": 6
"test_runs_ordering_priority": 6,
"exclusive_testsets": []
}
import unittest