Fix Redis cluster grow scenario tests

Cluster tests were enabled for Redis as they
should be working now.  The tests were disabled for
MariaDB (and PXC) until the galera issues are sorted out.

Support for neutron was added in cluster tests by
making sure that the proper nic is passed in on
cluster cre
ate/grow tests.

The changes for client non-caching was also implemented.

The scenario tests were also broken apart into
sections to facilitate easier manual testing.

Support for cluster upgrade (currently in review)
was also added so that when datastore implementations
are pushed up they can be tested immediately.

Change-Id: I637cbe30fa42528e8abd2a301baa2ec5048b61fc
Closes-Bug: 1645775
This commit is contained in:
Peter Stachowski 2016-10-13 22:35:23 -04:00
parent 07f547a7e9
commit 773069cb0b
6 changed files with 714 additions and 345 deletions

View File

@ -34,7 +34,7 @@ from trove.tests.api import users
from trove.tests.api import versions
from trove.tests.scenario import groups
from trove.tests.scenario.groups import backup_group
from trove.tests.scenario.groups import cluster_actions_group
from trove.tests.scenario.groups import cluster_group
from trove.tests.scenario.groups import configuration_group
from trove.tests.scenario.groups import database_actions_group
from trove.tests.scenario.groups import guest_log_group
@ -148,9 +148,25 @@ base_groups = [
]
# Cluster-based groups
cluster_actions_groups = list(base_groups)
cluster_actions_groups.extend([cluster_actions_group.GROUP,
negative_cluster_actions_group.GROUP])
cluster_create_groups = list(base_groups)
cluster_create_groups.extend([groups.CLUSTER_DELETE_WAIT])
cluster_actions_groups = list(cluster_create_groups)
cluster_actions_groups.extend([groups.CLUSTER_ACTIONS_SHRINK_WAIT])
cluster_negative_actions_groups = list(negative_cluster_actions_group.GROUP)
cluster_root_groups = list(cluster_create_groups)
cluster_root_groups.extend([groups.CLUSTER_ACTIONS_ROOT_ENABLE])
cluster_root_actions_groups = list(cluster_actions_groups)
cluster_root_actions_groups.extend([groups.CLUSTER_ACTIONS_ROOT_ACTIONS])
cluster_upgrade_groups = list(cluster_create_groups)
cluster_upgrade_groups.extend([groups.CLUSTER_UPGRADE_WAIT])
cluster_groups = list(cluster_actions_groups)
cluster_groups.extend([cluster_group.GROUP])
# Single-instance based groups
instance_create_groups = list(base_groups)
@ -228,6 +244,12 @@ register(["backup"], backup_groups)
register(["backup_incremental"], backup_incremental_groups)
register(["backup_negative"], backup_negative_groups)
register(["cluster"], cluster_actions_groups)
register(["cluster_actions"], cluster_actions_groups)
register(["cluster_create"], cluster_create_groups)
register(["cluster_negative_actions"], cluster_negative_actions_groups)
register(["cluster_root"], cluster_root_groups)
register(["cluster_root_actions"], cluster_root_actions_groups)
register(["cluster_upgrade"], cluster_upgrade_groups)
register(["common"], common_groups)
register(["configuration"], configuration_groups)
register(["configuration_create"], configuration_create_groups)
@ -266,7 +288,9 @@ register(
database_actions_groups,
configuration_groups,
user_actions_groups, ],
multi=[cluster_actions_groups, ]
multi=[cluster_actions_groups,
cluster_negative_actions_groups,
cluster_root_actions_groups, ]
)
register(
@ -287,29 +311,6 @@ register(
multi=[]
)
register(
["postgresql_supported"],
single=[common_groups,
backup_incremental_groups,
database_actions_groups,
configuration_groups,
root_actions_groups,
user_actions_groups, ],
multi=[replication_groups, ]
)
register(
["mysql_supported", "percona_supported"],
single=[common_groups,
backup_incremental_groups,
configuration_groups,
database_actions_groups,
instance_upgrade_groups,
root_actions_groups,
user_actions_groups, ],
multi=[replication_promote_groups, ]
)
register(
["mariadb_supported"],
single=[common_groups,
@ -318,8 +319,11 @@ register(
database_actions_groups,
root_actions_groups,
user_actions_groups, ],
multi=[replication_promote_groups,
cluster_actions_groups, ]
multi=[replication_promote_groups, ]
# multi=[cluster_actions_groups,
# cluster_negative_actions_groups,
# cluster_root_actions_groups,
# replication_promote_groups, ]
)
register(
@ -333,6 +337,41 @@ register(
multi=[cluster_actions_groups, ]
)
register(
["mysql_supported"],
single=[common_groups,
backup_incremental_groups,
configuration_groups,
database_actions_groups,
instance_upgrade_groups,
root_actions_groups,
user_actions_groups, ],
multi=[replication_promote_groups, ]
)
register(
["percona_supported"],
single=[common_groups,
backup_incremental_groups,
configuration_groups,
database_actions_groups,
instance_upgrade_groups,
root_actions_groups,
user_actions_groups, ],
multi=[replication_promote_groups, ]
)
register(
["postgresql_supported"],
single=[common_groups,
backup_incremental_groups,
database_actions_groups,
configuration_groups,
root_actions_groups,
user_actions_groups, ],
multi=[replication_groups, ]
)
register(
["pxc_supported"],
single=[common_groups,
@ -342,6 +381,9 @@ register(
root_actions_groups,
user_actions_groups, ],
multi=[]
# multi=[cluster_actions_groups,
# cluster_negative_actions_groups,
# cluster_root_actions_groups, ]
)
register(
@ -349,7 +391,9 @@ register(
single=[common_groups,
backup_groups,
backup_negative_groups, ],
multi=[replication_promote_groups, ]
multi=[cluster_actions_groups,
cluster_negative_actions_groups,
replication_promote_groups, ]
)
register(
@ -357,5 +401,7 @@ register(
single=[common_groups,
configuration_groups,
root_actions_groups, ],
multi=[cluster_actions_groups, ]
multi=[cluster_actions_groups,
cluster_negative_actions_groups,
cluster_root_actions_groups, ]
)

View File

@ -50,6 +50,34 @@ CFGGRP_INST_DELETE = "scenario.cfggrp_inst_delete_grp"
CFGGRP_INST_DELETE_WAIT = "scenario.cfggrp_inst_delete_wait_grp"
# Cluster Actions Group
CLUSTER_ACTIONS = "scenario.cluster_actions_grp"
CLUSTER_ACTIONS_ROOT_ENABLE = "scenario.cluster_actions_root_enable_grp"
CLUSTER_ACTIONS_ROOT_ACTIONS = "scenario.cluster_actions_root_actions_grp"
CLUSTER_ACTIONS_ROOT_GROW = "scenario.cluster_actions_root_grow_grp"
CLUSTER_ACTIONS_ROOT_SHRINK = "scenario.cluster_actions_root_shrink_grp"
CLUSTER_ACTIONS_GROW_SHRINK = "scenario.cluster_actions_grow_shrink_grp"
CLUSTER_ACTIONS_GROW = "scenario.cluster_actions_grow_grp"
CLUSTER_ACTIONS_GROW_WAIT = "scenario.cluster_actions_grow_wait_grp"
CLUSTER_ACTIONS_SHRINK = "scenario.cluster_actions_shrink_grp"
CLUSTER_ACTIONS_SHRINK_WAIT = "scenario.cluster_actions_shrink_wait_grp"
# Cluster Create Group (in cluster_actions file)
CLUSTER_CREATE = "scenario.cluster_create_grp"
CLUSTER_CREATE_WAIT = "scenario.cluster_create_wait_grp"
# Cluster Delete Group (in cluster_actions file)
CLUSTER_DELETE = "scenario.cluster_delete_grp"
CLUSTER_DELETE_WAIT = "scenario.cluster_delete_wait_grp"
# Cluster Upgrade Group (in cluster_actions file)
CLUSTER_UPGRADE = "scenario.cluster_upgrade_grp"
CLUSTER_UPGRADE_WAIT = "scenario.cluster_upgrade_wait_grp"
# Database Actions Group
DB_ACTION_CREATE = "scenario.db_action_create_grp"
DB_ACTION_DELETE = "scenario.db_action_delete_grp"

View File

@ -1,162 +0,0 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
GROUP = "scenario.cluster_actions_group"
class ClusterActionsRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'cluster_actions_runners'
_runner_cls = 'ClusterActionsRunner'
@test(groups=[GROUP],
runs_after_groups=[groups.MODULE_INST_DELETE,
groups.CFGGRP_INST_DELETE,
groups.INST_ACTIONS_RESIZE_WAIT,
groups.DB_ACTION_INST_DELETE,
groups.USER_ACTION_DELETE,
groups.USER_ACTION_INST_DELETE,
groups.ROOT_ACTION_INST_DELETE,
groups.REPL_INST_DELETE_WAIT,
groups.INST_DELETE_WAIT])
class ClusterActionsGroup(TestGroup):
def __init__(self):
super(ClusterActionsGroup, self).__init__(
ClusterActionsRunnerFactory.instance())
@test
def cluster_create(self):
"""Create a cluster."""
self.test_runner.run_cluster_create()
@test(depends_on=[cluster_create])
def cluster_list(self):
"""List the clusters."""
self.test_runner.run_cluster_list()
@test(depends_on=[cluster_create])
def cluster_show(self):
"""Show a cluster."""
self.test_runner.run_cluster_show()
@test(depends_on=[cluster_create])
def add_initial_cluster_data(self):
"""Add data to cluster."""
self.test_runner.run_add_initial_cluster_data()
@test(depends_on=[add_initial_cluster_data])
def verify_initial_cluster_data(self):
"""Verify the initial data exists on cluster."""
self.test_runner.run_verify_initial_cluster_data()
@test(depends_on=[cluster_create])
def cluster_root_enable(self):
"""Root Enable."""
self.test_runner.run_cluster_root_enable()
@test(depends_on=[cluster_root_enable])
def verify_cluster_root_enable(self):
"""Verify Root Enable."""
self.test_runner.run_verify_cluster_root_enable()
@test(depends_on=[cluster_create],
runs_after=[verify_initial_cluster_data, verify_cluster_root_enable,
cluster_list, cluster_show])
def cluster_grow(self):
"""Grow cluster."""
self.test_runner.run_cluster_grow()
@test(depends_on=[cluster_grow])
def verify_cluster_root_enable_after_grow(self):
"""Verify Root Enabled after grow."""
self.test_runner.run_verify_cluster_root_enable()
@test(depends_on=[cluster_grow, add_initial_cluster_data])
def verify_initial_cluster_data_after_grow(self):
"""Verify the initial data still exists after cluster grow."""
self.test_runner.run_verify_initial_cluster_data()
@test(depends_on=[cluster_grow],
runs_after=[verify_initial_cluster_data_after_grow])
def add_extra_cluster_data_after_grow(self):
"""Add more data to cluster."""
self.test_runner.run_add_extra_cluster_data()
@test(depends_on=[add_extra_cluster_data_after_grow])
def verify_extra_cluster_data_after_grow(self):
"""Verify the data added after cluster grow."""
self.test_runner.run_verify_extra_cluster_data()
@test(depends_on=[add_extra_cluster_data_after_grow],
runs_after=[verify_extra_cluster_data_after_grow])
def remove_extra_cluster_data_after_grow(self):
"""Remove the data added after cluster grow."""
self.test_runner.run_remove_extra_cluster_data()
@test(depends_on=[cluster_create],
runs_after=[remove_extra_cluster_data_after_grow,
verify_cluster_root_enable_after_grow])
def cluster_shrink(self):
"""Shrink cluster."""
self.test_runner.run_cluster_shrink()
@test(depends_on=[cluster_shrink])
def verify_cluster_root_enable_after_shrink(self):
"""Verify Root Enable after shrink."""
self.test_runner.run_verify_cluster_root_enable()
@test(depends_on=[cluster_shrink, add_initial_cluster_data])
def verify_initial_cluster_data_after_shrink(self):
"""Verify the initial data still exists after cluster shrink."""
self.test_runner.run_verify_initial_cluster_data()
@test(depends_on=[cluster_shrink],
runs_after=[verify_initial_cluster_data_after_shrink])
def add_extra_cluster_data_after_shrink(self):
"""Add more data to cluster."""
self.test_runner.run_add_extra_cluster_data()
@test(depends_on=[add_extra_cluster_data_after_shrink])
def verify_extra_cluster_data_after_shrink(self):
"""Verify the data added after cluster shrink."""
self.test_runner.run_verify_extra_cluster_data()
@test(depends_on=[add_extra_cluster_data_after_shrink],
runs_after=[verify_extra_cluster_data_after_shrink])
def remove_extra_cluster_data_after_shrink(self):
"""Remove the data added after cluster shrink."""
self.test_runner.run_remove_extra_cluster_data()
@test(depends_on=[add_initial_cluster_data],
runs_after=[remove_extra_cluster_data_after_shrink])
def remove_initial_cluster_data(self):
"""Remove the initial data from cluster."""
self.test_runner.run_remove_initial_cluster_data()
@test(depends_on=[cluster_create],
runs_after=[remove_initial_cluster_data,
verify_cluster_root_enable_after_shrink])
def cluster_delete(self):
"""Delete an existing cluster."""
self.test_runner.run_cluster_delete()

View File

@ -0,0 +1,341 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
GROUP = "scenario.cluster_group"
class ClusterRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'cluster_runners'
_runner_cls = 'ClusterRunner'
@test(groups=[GROUP, groups.CLUSTER_CREATE],
runs_after_groups=[groups.MODULE_DELETE,
groups.CFGGRP_INST_DELETE,
groups.INST_ACTIONS_RESIZE_WAIT,
groups.DB_ACTION_INST_DELETE,
groups.USER_ACTION_DELETE,
groups.USER_ACTION_INST_DELETE,
groups.ROOT_ACTION_INST_DELETE,
groups.REPL_INST_DELETE_WAIT,
groups.INST_DELETE])
class ClusterCreateGroup(TestGroup):
def __init__(self):
super(ClusterCreateGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def cluster_create(self):
"""Create a cluster."""
self.test_runner.run_cluster_create()
@test(groups=[GROUP, groups.CLUSTER_CREATE_WAIT],
depends_on_groups=[groups.CLUSTER_CREATE],
runs_after_groups=[groups.MODULE_INST_DELETE_WAIT,
groups.CFGGRP_INST_DELETE_WAIT,
groups.DB_ACTION_INST_DELETE_WAIT,
groups.USER_ACTION_INST_DELETE_WAIT,
groups.ROOT_ACTION_INST_DELETE_WAIT,
groups.INST_DELETE_WAIT])
class ClusterCreateWaitGroup(TestGroup):
def __init__(self):
super(ClusterCreateWaitGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def cluster_create_wait(self):
"""Wait for cluster create to complete."""
self.test_runner.run_cluster_create_wait()
@test(depends_on=[cluster_create_wait])
def add_initial_cluster_data(self):
"""Add data to cluster."""
self.test_runner.run_add_initial_cluster_data()
@test(depends_on=[add_initial_cluster_data])
def verify_initial_cluster_data(self):
"""Verify the initial data exists on cluster."""
self.test_runner.run_verify_initial_cluster_data()
@test(depends_on=[cluster_create_wait])
def cluster_list(self):
"""List the clusters."""
self.test_runner.run_cluster_list()
@test(depends_on=[cluster_create_wait])
def cluster_show(self):
"""Show a cluster."""
self.test_runner.run_cluster_show()
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
groups.CLUSTER_ACTIONS_ROOT_ENABLE],
depends_on_groups=[groups.CLUSTER_CREATE_WAIT])
class ClusterRootEnableGroup(TestGroup):
def __init__(self):
super(ClusterRootEnableGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def cluster_root_enable(self):
"""Root Enable."""
self.test_runner.run_cluster_root_enable()
@test(depends_on=[cluster_root_enable])
def verify_cluster_root_enable(self):
"""Verify Root Enable."""
self.test_runner.run_verify_cluster_root_enable()
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
groups.CLUSTER_ACTIONS_GROW_SHRINK,
groups.CLUSTER_ACTIONS_GROW],
depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
runs_after_groups=[groups.CLUSTER_ACTIONS_ROOT_ENABLE])
class ClusterGrowGroup(TestGroup):
def __init__(self):
super(ClusterGrowGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def cluster_grow(self):
"""Grow cluster."""
self.test_runner.run_cluster_grow()
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
groups.CLUSTER_ACTIONS_GROW_SHRINK,
groups.CLUSTER_ACTIONS_GROW_WAIT],
depends_on_groups=[groups.CLUSTER_ACTIONS_GROW])
class ClusterGrowWaitGroup(TestGroup):
def __init__(self):
super(ClusterGrowWaitGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def cluster_grow_wait(self):
"""Wait for cluster grow to complete."""
self.test_runner.run_cluster_grow_wait()
@test(depends_on=[cluster_grow_wait])
def verify_initial_cluster_data_after_grow(self):
"""Verify the initial data still exists after cluster grow."""
self.test_runner.run_verify_initial_cluster_data()
@test(depends_on=[cluster_grow_wait],
runs_after=[verify_initial_cluster_data_after_grow])
def add_grow_cluster_data(self):
"""Add more data to cluster after grow."""
self.test_runner.run_add_grow_cluster_data()
@test(depends_on=[add_grow_cluster_data])
def verify_grow_cluster_data(self):
"""Verify the data added after cluster grow."""
self.test_runner.run_verify_grow_cluster_data()
@test(depends_on=[add_grow_cluster_data],
runs_after=[verify_grow_cluster_data])
def remove_grow_cluster_data(self):
"""Remove the data added after cluster grow."""
self.test_runner.run_remove_grow_cluster_data()
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
groups.CLUSTER_ACTIONS_ROOT_ACTIONS,
groups.CLUSTER_ACTIONS_ROOT_GROW],
depends_on_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT])
class ClusterRootEnableGrowGroup(TestGroup):
def __init__(self):
super(ClusterRootEnableGrowGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def verify_cluster_root_enable_after_grow(self):
"""Verify Root Enabled after grow."""
self.test_runner.run_verify_cluster_root_enable()
@test(groups=[GROUP, groups.CLUSTER_UPGRADE],
depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
runs_after_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT,
groups.CLUSTER_ACTIONS_ROOT_GROW])
class ClusterUpgradeGroup(TestGroup):
def __init__(self):
super(ClusterUpgradeGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def cluster_upgrade(self):
"""Upgrade cluster."""
self.test_runner.run_cluster_upgrade()
@test(groups=[GROUP, groups.CLUSTER_UPGRADE_WAIT],
depends_on_groups=[groups.CLUSTER_UPGRADE])
class ClusterUpgradeWaitGroup(TestGroup):
def __init__(self):
super(ClusterUpgradeWaitGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def cluster_upgrade_wait(self):
"""Wait for cluster upgrade to complete."""
self.test_runner.run_cluster_upgrade_wait()
@test(depends_on=[cluster_upgrade_wait])
def verify_initial_cluster_data_after_upgrade(self):
"""Verify the initial data still exists after cluster upgrade."""
self.test_runner.run_verify_initial_cluster_data()
@test(depends_on=[cluster_upgrade_wait],
runs_after=[verify_initial_cluster_data_after_upgrade])
def add_upgrade_cluster_data_after_upgrade(self):
"""Add more data to cluster after upgrade."""
self.test_runner.run_add_upgrade_cluster_data()
@test(depends_on=[add_upgrade_cluster_data_after_upgrade])
def verify_upgrade_cluster_data_after_upgrade(self):
"""Verify the data added after cluster upgrade."""
self.test_runner.run_verify_upgrade_cluster_data()
@test(depends_on=[add_upgrade_cluster_data_after_upgrade],
runs_after=[verify_upgrade_cluster_data_after_upgrade])
def remove_upgrade_cluster_data_after_upgrade(self):
"""Remove the data added after cluster upgrade."""
self.test_runner.run_remove_upgrade_cluster_data()
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
groups.CLUSTER_ACTIONS_GROW_SHRINK,
groups.CLUSTER_ACTIONS_SHRINK],
depends_on_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT],
runs_after_groups=[groups.CLUSTER_UPGRADE_WAIT])
class ClusterShrinkGroup(TestGroup):
def __init__(self):
super(ClusterShrinkGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def cluster_shrink(self):
"""Shrink cluster."""
self.test_runner.run_cluster_shrink()
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
groups.CLUSTER_ACTIONS_SHRINK_WAIT],
depends_on_groups=[groups.CLUSTER_ACTIONS_SHRINK])
class ClusterShrinkWaitGroup(TestGroup):
def __init__(self):
super(ClusterShrinkWaitGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def cluster_shrink_wait(self):
"""Wait for the cluster shrink to complete."""
self.test_runner.run_cluster_shrink_wait()
@test(depends_on=[cluster_shrink_wait])
def verify_initial_cluster_data_after_shrink(self):
"""Verify the initial data still exists after cluster shrink."""
self.test_runner.run_verify_initial_cluster_data()
@test(runs_after=[verify_initial_cluster_data_after_shrink])
def add_shrink_cluster_data(self):
"""Add more data to cluster after shrink."""
self.test_runner.run_add_shrink_cluster_data()
@test(depends_on=[add_shrink_cluster_data])
def verify_shrink_cluster_data(self):
"""Verify the data added after cluster shrink."""
self.test_runner.run_verify_shrink_cluster_data()
@test(depends_on=[add_shrink_cluster_data],
runs_after=[verify_shrink_cluster_data])
def remove_shrink_cluster_data(self):
"""Remove the data added after cluster shrink."""
self.test_runner.run_remove_shrink_cluster_data()
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
groups.CLUSTER_ACTIONS_ROOT_ACTIONS,
groups.CLUSTER_ACTIONS_ROOT_SHRINK],
depends_on_groups=[groups.CLUSTER_ACTIONS_SHRINK_WAIT])
class ClusterRootEnableShrinkGroup(TestGroup):
def __init__(self):
super(ClusterRootEnableShrinkGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def verify_cluster_root_enable_after_shrink(self):
"""Verify Root Enable after shrink."""
self.test_runner.run_verify_cluster_root_enable()
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
groups.CLUSTER_DELETE],
depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
runs_after_groups=[groups.CLUSTER_ACTIONS_ROOT_ENABLE,
groups.CLUSTER_ACTIONS_ROOT_GROW,
groups.CLUSTER_ACTIONS_ROOT_SHRINK,
groups.CLUSTER_ACTIONS_GROW_WAIT,
groups.CLUSTER_ACTIONS_SHRINK_WAIT,
groups.CLUSTER_UPGRADE_WAIT])
class ClusterDeleteGroup(TestGroup):
def __init__(self):
super(ClusterDeleteGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def remove_initial_cluster_data(self):
"""Remove the initial data from cluster."""
self.test_runner.run_remove_initial_cluster_data()
@test(runs_after=[remove_initial_cluster_data])
def cluster_delete(self):
"""Delete an existing cluster."""
self.test_runner.run_cluster_delete()
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
groups.CLUSTER_DELETE_WAIT],
depends_on_groups=[groups.CLUSTER_DELETE])
class ClusterDeleteWaitGroup(TestGroup):
def __init__(self):
super(ClusterDeleteWaitGroup, self).__init__(
ClusterRunnerFactory.instance())
@test
def cluster_delete_wait(self):
"""Wait for the existing cluster to be gone."""
self.test_runner.run_cluster_delete_wait()

View File

@ -44,11 +44,13 @@ class DataType(Enum):
tiny2 = 4
# a third tiny dataset (also for replication propagation)
tiny3 = 5
# a forth tiny dataset (for cluster propagation)
tiny4 = 6
# small amount of data (this can be added to each instance
# after creation, for example).
small = 6
small = 7
# large data, enough to make creating a backup take 20s or more.
large = 7
large = 8
class TestHelper(object):
@ -130,6 +132,9 @@ class TestHelper(object):
DataType.tiny3.name: {
self.DATA_START: 3000,
self.DATA_SIZE: 100},
DataType.tiny4.name: {
self.DATA_START: 4000,
self.DATA_SIZE: 100},
DataType.small.name: {
self.DATA_START: 10000,
self.DATA_SIZE: 1000},
@ -481,9 +486,24 @@ class TestHelper(object):
"""
return False
##############
################
# Module related
##############
################
def get_valid_module_type(self):
"""Return a valid module type."""
return "Ping"
#################
# Cluster related
#################
def get_cluster_types(self):
"""Returns a list of cluster type lists to use when creating instances.
The list should be the same size as the number of cluster instances
that will be created. If not specified, no types are sent to
cluster-create. Cluster grow uses the first type in the list for the
first instance, and doesn't use anything for the second instance
(i.e. doesn't pass in anything for 'type').
An example for this method would be:
return [['data', 'other_type'], ['third_type']]
"""
return None

View File

@ -29,7 +29,7 @@ from trove.tests.util.check import TypeCheck
from troveclient.compat import exceptions
class ClusterActionsRunner(TestRunner):
class ClusterRunner(TestRunner):
USE_CLUSTER_ID_FLAG = 'TESTS_USE_CLUSTER_ID'
DO_NOT_DELETE_CLUSTER_FLAG = 'TESTS_DO_NOT_DELETE_CLUSTER'
@ -37,7 +37,7 @@ class ClusterActionsRunner(TestRunner):
EXTRA_INSTANCE_NAME = "named_instance"
def __init__(self):
super(ClusterActionsRunner, self).__init__()
super(ClusterRunner, self).__init__()
self.cluster_name = 'test_cluster'
self.cluster_id = 0
@ -46,6 +46,9 @@ class ClusterActionsRunner(TestRunner):
self.srv_grp_id = None
self.current_root_creds = None
self.locality = 'affinity'
self.initial_instance_count = None
self.cluster_instances = None
self.cluster_removed_instances = None
@property
def is_using_existing_cluster(self):
@ -60,7 +63,6 @@ class ClusterActionsRunner(TestRunner):
return 2
def run_cluster_create(self, num_nodes=None, expected_task_name='BUILDING',
expected_instance_states=['BUILD', 'ACTIVE'],
expected_http_code=200):
self.cluster_count_before_create = len(
self.auth_client.clusters.list())
@ -69,59 +71,70 @@ class ClusterActionsRunner(TestRunner):
instance_flavor = self.get_instance_flavor()
instances_def = [
instance_defs = [
self.build_flavor(
flavor_id=self.get_flavor_href(instance_flavor),
volume_size=self.instance_info.volume['size'])] * num_nodes
volume_size=self.instance_info.volume['size'])
for count in range(0, num_nodes)]
types = self.test_helper.get_cluster_types()
for index, instance_def in enumerate(instance_defs):
instance_def['nics'] = self.instance_info.nics
if types and index < len(types):
instance_def['type'] = types[index]
self.cluster_id = self.assert_cluster_create(
self.cluster_name, instances_def, self.locality,
expected_task_name, expected_instance_states, expected_http_code)
self.cluster_name, instance_defs, self.locality,
expected_task_name, expected_http_code)
def assert_cluster_create(
self, cluster_name, instances_def, locality, expected_task_name,
expected_instance_states, expected_http_code):
expected_http_code):
self.report.log("Testing cluster create: %s" % cluster_name)
client = self.auth_client
cluster = self.get_existing_cluster()
if cluster:
self.report.log("Using an existing cluster: %s" % cluster.id)
cluster_instances = self._get_cluster_instances(cluster.id)
self.assert_all_instance_states(
cluster_instances, expected_instance_states[-1:])
else:
cluster = self.auth_client.clusters.create(
cluster = client.clusters.create(
cluster_name, self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version,
instances=instances_def, locality=locality)
self.assert_client_code(client, expected_http_code)
self._assert_cluster_values(cluster, expected_task_name)
# Don't give an expected task here or it will do a 'get' on
# the cluster. We tested the cluster values above.
self._assert_cluster_action(cluster.id, None,
expected_http_code)
cluster_instances = self._get_cluster_instances(cluster.id)
self.assert_all_instance_states(
cluster_instances, expected_instance_states)
# Create the helper user/database on the first node.
# The cluster should handle the replication itself.
self.create_test_helper_on_instance(cluster_instances[0])
# make sure the server_group was created
self.cluster_inst_ids = [inst.id for inst in cluster_instances]
for id in self.cluster_inst_ids:
srv_grp_id = self.assert_server_group_exists(id)
if self.srv_grp_id and self.srv_grp_id != srv_grp_id:
self.fail("Found multiple server groups for cluster")
self.srv_grp_id = srv_grp_id
return cluster.id
cluster_id = cluster.id
def run_cluster_create_wait(self,
expected_instance_states=['BUILD', 'ACTIVE']):
self.assert_cluster_create_wait(
self.cluster_id, expected_instance_states=expected_instance_states)
def assert_cluster_create_wait(
self, cluster_id, expected_instance_states):
client = self.auth_client
cluster_instances = self._get_cluster_instances(client, cluster_id)
self.assert_all_instance_states(
cluster_instances, expected_instance_states)
# Create the helper user/database on the first node.
# The cluster should handle the replication itself.
if not self.get_existing_cluster():
self.create_test_helper_on_instance(cluster_instances[0])
# Although all instances have already acquired the expected state,
# we still need to poll for the final cluster task, because
# it may take up to the periodic task interval until the task name
# gets updated in the Trove database.
self._assert_cluster_states(cluster_id, ['NONE'])
self._assert_cluster_states(client, cluster_id, ['NONE'])
return cluster_id
# make sure the server_group was created
self.cluster_inst_ids = [inst.id for inst in cluster_instances]
for id in self.cluster_inst_ids:
srv_grp_id = self.assert_server_group_exists(id)
if self.srv_grp_id and self.srv_grp_id != srv_grp_id:
self.fail("Found multiple server groups for cluster")
self.srv_grp_id = srv_grp_id
def get_existing_cluster(self):
if self.is_using_existing_cluster:
@ -134,10 +147,10 @@ class ClusterActionsRunner(TestRunner):
self.cluster_count_before_create + 1,
expected_http_code)
def assert_cluster_list(self, expected_count,
expected_http_code):
count = len(self.auth_client.clusters.list())
self.assert_client_code(expected_http_code, client=self.auth_client)
def assert_cluster_list(self, expected_count, expected_http_code):
client = self.auth_client
count = len(client.clusters.list())
self.assert_client_code(client, expected_http_code)
self.assert_equal(expected_count, count, "Unexpected cluster count")
def run_cluster_show(self, expected_http_code=200,
@ -147,19 +160,23 @@ class ClusterActionsRunner(TestRunner):
def assert_cluster_show(self, cluster_id, expected_task_name,
expected_http_code):
self._assert_cluster_response(cluster_id, expected_task_name)
self._assert_cluster_response(self.auth_client,
cluster_id, expected_task_name)
def run_cluster_root_enable(self, expected_task_name=None,
expected_http_code=200):
root_credentials = self.test_helper.get_helper_credentials_root()
self.current_root_creds = self.auth_client.root.create_cluster_root(
if not root_credentials or not root_credentials.get('name'):
raise SkipTest("No root credentials provided.")
client = self.auth_client
self.current_root_creds = client.root.create_cluster_root(
self.cluster_id, root_credentials['password'])
self.assert_client_code(client, expected_http_code)
self._assert_cluster_response(client, cluster_id, expected_task_name)
self.assert_equal(root_credentials['name'],
self.current_root_creds[0])
self.assert_equal(root_credentials['password'],
self.current_root_creds[1])
self._assert_cluster_action(self.cluster_id, expected_task_name,
expected_http_code)
def run_verify_cluster_root_enable(self):
if not self.current_root_creds:
@ -181,9 +198,6 @@ class ClusterActionsRunner(TestRunner):
def run_add_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_add_cluster_data(data_type, self.cluster_id)
def run_add_extra_cluster_data(self, data_type=DataType.tiny2):
self.assert_add_cluster_data(data_type, self.cluster_id)
def assert_add_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
self.test_helper.add_data(data_type, self.extract_ipv4s(cluster.ip)[0])
@ -191,9 +205,6 @@ class ClusterActionsRunner(TestRunner):
def run_verify_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_verify_cluster_data(data_type, self.cluster_id)
def run_verify_extra_cluster_data(self, data_type=DataType.tiny2):
self.assert_verify_cluster_data(data_type, self.cluster_id)
def assert_verify_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
for ipv4 in self.extract_ipv4s(cluster.ip):
@ -203,9 +214,6 @@ class ClusterActionsRunner(TestRunner):
def run_remove_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_remove_cluster_data(data_type, self.cluster_id)
def run_remove_extra_cluster_data(self, data_type=DataType.tiny2):
self.assert_remove_cluster_data(data_type, self.cluster_id)
def assert_remove_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
self.test_helper.remove_data(
@ -221,6 +229,10 @@ class ClusterActionsRunner(TestRunner):
self._build_instance_def(flavor_href,
self.instance_info.volume['size'],
self.EXTRA_INSTANCE_NAME)]
types = self.test_helper.get_cluster_types()
if types and types[0]:
added_instance_defs[0]['type'] = types[0]
self.assert_cluster_grow(
self.cluster_id, added_instance_defs, expected_task_name,
expected_http_code)
@ -230,115 +242,200 @@ class ClusterActionsRunner(TestRunner):
flavor_id=flavor_id, volume_size=volume_size)
if name:
instance_def.update({'name': name})
instance_def.update({'nics': self.instance_info.nics})
return instance_def
def assert_cluster_grow(self, cluster_id, added_instance_defs,
expected_task_name, expected_http_code):
cluster = self.auth_client.clusters.get(cluster_id)
client = self.auth_client
cluster = client.clusters.get(cluster_id)
initial_instance_count = len(cluster.instances)
cluster = self.auth_client.clusters.grow(cluster_id,
added_instance_defs)
self._assert_cluster_action(cluster_id, expected_task_name,
expected_http_code)
cluster = client.clusters.grow(cluster_id, added_instance_defs)
self.assert_client_code(client, expected_http_code)
self._assert_cluster_response(client, cluster_id, expected_task_name)
self.assert_equal(len(added_instance_defs),
len(cluster.instances) - initial_instance_count,
"Unexpected number of added nodes.")
cluster_instances = self._get_cluster_instances(cluster_id)
def run_cluster_grow_wait(self):
self.assert_cluster_grow_wait(self.cluster_id)
def assert_cluster_grow_wait(self, cluster_id):
client = self.auth_client
cluster_instances = self._get_cluster_instances(client, cluster_id)
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
self._assert_cluster_states(cluster_id, ['NONE'])
self._assert_cluster_response(cluster_id, 'NONE')
self._assert_cluster_states(client, cluster_id, ['NONE'])
self._assert_cluster_response(client, cluster_id, 'NONE')
def run_cluster_shrink(
self, expected_task_name=None, expected_http_code=202):
self.assert_cluster_shrink(self.cluster_id, [self.EXTRA_INSTANCE_NAME],
def run_add_grow_cluster_data(self, data_type=DataType.tiny2):
self.assert_add_cluster_data(data_type, self.cluster_id)
def run_verify_grow_cluster_data(self, data_type=DataType.tiny2):
self.assert_verify_cluster_data(data_type, self.cluster_id)
def run_remove_grow_cluster_data(self, data_type=DataType.tiny2):
self.assert_remove_cluster_data(data_type, self.cluster_id)
def run_cluster_upgrade(self, expected_task_name='UPGRADING_CLUSTER',
expected_http_code=202):
self.assert_cluster_upgrade(self.cluster_id,
expected_task_name, expected_http_code)
def assert_cluster_upgrade(self, cluster_id,
expected_task_name, expected_http_code):
client = self.auth_client
cluster = client.clusters.get(cluster_id)
self.initial_instance_count = len(cluster.instances)
client.clusters.upgrade(
cluster_id, self.instance_info.dbaas_datastore_version)
self.assert_client_code(client, expected_http_code)
self._assert_cluster_response(client, cluster_id, expected_task_name)
def run_cluster_upgrade_wait(self):
self.assert_cluster_upgrade_wait(
self.cluster_id, expected_last_instance_state='ACTIVE')
def assert_cluster_upgrade_wait(self, cluster_id,
expected_last_instance_state):
client = self.auth_client
self._assert_cluster_states(client, cluster_id, ['NONE'])
cluster_instances = self._get_cluster_instances(client, cluster_id)
self.assert_equal(
self.initial_instance_count,
len(cluster_instances),
"Unexpected number of instances after upgrade.")
self.assert_all_instance_states(cluster_instances,
[expected_last_instance_state])
self._assert_cluster_response(client, cluster_id, 'NONE')
def run_add_upgrade_cluster_data(self, data_type=DataType.tiny3):
self.assert_add_cluster_data(data_type, self.cluster_id)
def run_verify_upgrade_cluster_data(self, data_type=DataType.tiny3):
self.assert_verify_cluster_data(data_type, self.cluster_id)
def run_remove_upgrade_cluster_data(self, data_type=DataType.tiny3):
self.assert_remove_cluster_data(data_type, self.cluster_id)
def run_cluster_shrink(self, expected_task_name='SHRINKING_CLUSTER',
expected_http_code=202):
self.assert_cluster_shrink(self.auth_client,
self.cluster_id, [self.EXTRA_INSTANCE_NAME],
expected_task_name, expected_http_code)
def assert_cluster_shrink(self, cluster_id, removed_instance_names,
def assert_cluster_shrink(self, client, cluster_id, removed_instance_names,
expected_task_name, expected_http_code):
cluster = self.auth_client.clusters.get(cluster_id)
initial_instance_count = len(cluster.instances)
cluster = client.clusters.get(cluster_id)
self.initial_instance_count = len(cluster.instances)
removed_instances = self._find_cluster_instances_by_name(
cluster, removed_instance_names)
self.cluster_removed_instances = (
self._find_cluster_instances_by_name(
cluster, removed_instance_names))
cluster = self.auth_client.clusters.shrink(
cluster_id, [{'id': instance['id']}
for instance in removed_instances])
client.clusters.shrink(
cluster_id, [{'id': instance.id}
for instance in self.cluster_removed_instances])
self._assert_cluster_action(cluster_id, expected_task_name,
expected_http_code)
self._assert_cluster_states(cluster_id, ['NONE'])
cluster = self.auth_client.clusters.get(cluster_id)
self.assert_equal(
len(removed_instance_names),
initial_instance_count - len(cluster.instances),
"Unexpected number of removed nodes.")
cluster_instances = self._get_cluster_instances(cluster_id)
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
self._assert_cluster_response(cluster_id, 'NONE')
self.assert_client_code(client, expected_http_code)
self._assert_cluster_response(client, cluster_id, expected_task_name)
def _find_cluster_instances_by_name(self, cluster, instance_names):
return [instance for instance in cluster.instances
return [self.auth_client.instances.get(instance['id'])
for instance in cluster.instances
if instance['name'] in instance_names]
def run_cluster_shrink_wait(self):
self.assert_cluster_shrink_wait(
self.cluster_id, expected_last_instance_state='SHUTDOWN')
def assert_cluster_shrink_wait(self, cluster_id,
expected_last_instance_state):
client = self.auth_client
self._assert_cluster_states(client, cluster_id, ['NONE'])
cluster = client.clusters.get(cluster_id)
self.assert_equal(
len(self.cluster_removed_instances),
self.initial_instance_count - len(cluster.instances),
"Unexpected number of removed nodes.")
cluster_instances = self._get_cluster_instances(client, cluster_id)
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
self.assert_all_gone(self.cluster_removed_instances,
expected_last_instance_state)
self._assert_cluster_response(client, cluster_id, 'NONE')
def run_add_shrink_cluster_data(self, data_type=DataType.tiny4):
self.assert_add_cluster_data(data_type, self.cluster_id)
def run_verify_shrink_cluster_data(self, data_type=DataType.tiny4):
self.assert_verify_cluster_data(data_type, self.cluster_id)
def run_remove_shrink_cluster_data(self, data_type=DataType.tiny4):
self.assert_remove_cluster_data(data_type, self.cluster_id)
def run_cluster_delete(
self, expected_task_name='DELETING',
expected_last_instance_state='SHUTDOWN', expected_http_code=202):
self, expected_task_name='DELETING', expected_http_code=202):
if self.has_do_not_delete_cluster:
self.report.log("TESTS_DO_NOT_DELETE_CLUSTER=True was "
"specified, skipping delete...")
raise SkipTest("TESTS_DO_NOT_DELETE_CLUSTER was specified.")
self.assert_cluster_delete(
self.cluster_id, expected_task_name, expected_last_instance_state,
expected_http_code)
self.cluster_id, expected_http_code)
def assert_cluster_delete(
self, cluster_id, expected_task_name, expected_last_instance_state,
expected_http_code):
def assert_cluster_delete(self, cluster_id, expected_http_code):
self.report.log("Testing cluster delete: %s" % cluster_id)
cluster_instances = self._get_cluster_instances(cluster_id)
client = self.auth_client
self.cluster_instances = self._get_cluster_instances(client,
cluster_id)
self.auth_client.clusters.delete(cluster_id)
client.clusters.delete(cluster_id)
self.assert_client_code(client, expected_http_code)
def _get_cluster_instances(self, client, cluster_id):
cluster = client.clusters.get(cluster_id)
return [client.instances.get(instance['id'])
for instance in cluster.instances]
def run_cluster_delete_wait(
self, expected_task_name='DELETING',
expected_last_instance_state='SHUTDOWN'):
if self.has_do_not_delete_cluster:
self.report.log("TESTS_DO_NOT_DELETE_CLUSTER=True was "
"specified, skipping delete wait...")
raise SkipTest("TESTS_DO_NOT_DELETE_CLUSTER was specified.")
self.assert_cluster_delete_wait(
self.cluster_id, expected_task_name, expected_last_instance_state)
def assert_cluster_delete_wait(
self, cluster_id, expected_task_name,
expected_last_instance_state):
client = self.auth_client
# Since the server_group is removed right at the beginning of the
# cluster delete process we can't check for locality anymore.
self._assert_cluster_action(cluster_id, expected_task_name,
expected_http_code, check_locality=False)
self._assert_cluster_response(client, cluster_id, expected_task_name,
check_locality=False)
self.assert_all_gone(cluster_instances, expected_last_instance_state)
self._assert_cluster_gone(cluster_id)
self.assert_all_gone(self.cluster_instances,
expected_last_instance_state)
self._assert_cluster_gone(client, cluster_id)
# make sure the server group is gone too
self.assert_server_group_gone(self.srv_grp_id)
def _get_cluster_instances(self, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
return [self.auth_client.instances.get(instance['id'])
for instance in cluster.instances]
def _assert_cluster_action(
self, cluster_id, expected_task_name, expected_http_code,
check_locality=True):
if expected_http_code is not None:
self.assert_client_code(expected_http_code,
client=self.auth_client)
if expected_task_name:
self._assert_cluster_response(cluster_id, expected_task_name,
check_locality=check_locality)
def _assert_cluster_states(self, cluster_id, expected_states,
def _assert_cluster_states(self, client, cluster_id, expected_states,
fast_fail_status=None):
for status in expected_states:
start_time = timer.time()
try:
poll_until(lambda: self._has_task(
cluster_id, status, fast_fail_status=fast_fail_status),
poll_until(
lambda: self._has_task(
client, cluster_id, status,
fast_fail_status=fast_fail_status),
sleep_time=self.def_sleep_time,
time_out=self.def_timeout)
self.report.log("Cluster has gone '%s' in %s." %
@ -351,8 +448,8 @@ class ClusterActionsRunner(TestRunner):
return True
def _has_task(self, cluster_id, task, fast_fail_status=None):
cluster = self.auth_client.clusters.get(cluster_id)
def _has_task(self, client, cluster_id, task, fast_fail_status=None):
cluster = client.clusters.get(cluster_id)
task_name = cluster.task['name']
self.report.log("Waiting for cluster '%s' to become '%s': %s"
% (cluster_id, task, task_name))
@ -361,10 +458,9 @@ class ClusterActionsRunner(TestRunner):
% (cluster_id, task))
return task_name == task
def _assert_cluster_response(self, cluster_id, expected_task_name,
expected_http_code=200, check_locality=True):
cluster = self.auth_client.clusters.get(cluster_id)
self.assert_client_code(expected_http_code, client=self.auth_client)
def _assert_cluster_response(self, client, cluster_id, expected_task_name,
check_locality=True):
cluster = client.clusters.get(cluster_id)
self._assert_cluster_values(cluster, expected_task_name,
check_locality=check_locality)
@ -391,63 +487,63 @@ class ClusterActionsRunner(TestRunner):
self.assert_equal(self.locality, cluster.locality,
"Unexpected cluster locality")
def _assert_cluster_gone(self, cluster_id):
def _assert_cluster_gone(self, client, cluster_id):
t0 = timer.time()
try:
# This will poll until the cluster goes away.
self._assert_cluster_states(cluster_id, ['NONE'])
self._assert_cluster_states(client, cluster_id, ['NONE'])
self.fail(
"Cluster '%s' still existed after %s seconds."
% (cluster_id, self._time_since(t0)))
except exceptions.NotFound:
self.assert_client_code(404, client=self.auth_client)
self.assert_client_code(client, 404)
class CassandraClusterActionsRunner(ClusterActionsRunner):
class CassandraClusterRunner(ClusterRunner):
def run_cluster_root_enable(self):
raise SkipTest("Operation is currently not supported.")
class MariadbClusterActionsRunner(ClusterActionsRunner):
class MariadbClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
return self.get_datastore_config_property('min_cluster_member_count')
def run_cluster_root_enable(self):
raise SkipTest("Operation is currently not supported.")
class PxcClusterActionsRunner(ClusterActionsRunner):
class MongodbClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
return self.get_datastore_config_property('min_cluster_member_count')
return 3
def run_cluster_delete(self, expected_task_name='NONE',
expected_http_code=202):
raise SkipKnownBug(runners.BUG_STOP_DB_IN_CLUSTER)
class VerticaClusterActionsRunner(ClusterActionsRunner):
class PxcClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
return self.get_datastore_config_property('min_cluster_member_count')
class RedisClusterRunner(ClusterRunner):
# Since Redis runs all the shrink code in the API server, the call
# will not return until the task name has been set back to 'NONE' so
# we can't check it.
def run_cluster_shrink(self, expected_task_name='NONE',
expected_http_code=202):
return super(RedisClusterRunner, self).run_cluster_shrink(
expected_task_name=expected_task_name,
expected_http_code=expected_http_code)
class VerticaClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
return self.get_datastore_config_property('cluster_member_count')
class RedisClusterActionsRunner(ClusterActionsRunner):
def run_cluster_root_enable(self):
raise SkipTest("Operation is currently not supported.")
class MongodbClusterActionsRunner(ClusterActionsRunner):
def run_cluster_root_enable(self):
raise SkipTest("Operation is currently not supported.")
@property
def min_cluster_node_count(self):
return 3