Finish cluster int-tests

1) Fix the cluster comminication test.
   Replace the old way of testing an existing cluster by
   creating a database on the cluster and checking its existence
   afterwards, which is not compatible with datastores that do not
   support databases.
   Use the 'test_helper' infrastructure to test operations on an actual
   data set - the same method used by single-instance tests as well.

2) Add missing int-tests for cluster grow and shrink operations.
   The test verifies data operations after grow and shrink
   operations respectively.

3) Move the Redis and PXC cluster tests to the new framework.

4) Make cluster tests independent of instance test by deriving
   them from 'base' rather than 'instance_create' test group -
   - i.e. no need to run instance tests for clusters.

5) Added (disabled until https://review.openstack.org/#/c/224363/
   merges) code to create a helper user/database on a cluster.

6) Added test flags:
       - TESTS_USE_CLUSTER_ID
       - TESTS_DO_NOT_DELETE_CLUSTER

Change-Id: I4c97c77669b56295745fa2af8bb14f07ac52dd7a
This commit is contained in:
Petr Malik 2015-11-10 14:57:13 -05:00
parent 2b98c53862
commit a232c53fde
9 changed files with 439 additions and 741 deletions

View File

@ -1,325 +0,0 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration tests for PXC datastore.
APIs tested for PXC are:
1. create
2. restart
3. resize-volume
4. resize-instance
5. delete
6. cluster-create
7. cluster-delete
"""
from proboscis import asserts
from proboscis.decorators import before_class
from proboscis import SkipTest
from proboscis import test
from troveclient.compat import exceptions
from trove.common.utils import poll_until
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.config import CONFIG
from trove.tests.util.check import TypeCheck
from trove.tests.util import create_dbaas_client
PXC_GROUP = "dbaas.api.pxc"
TIMEOUT = 2300
SLEEP_TIME = 60
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[PXC_GROUP],
runs_after=[WaitForGuestInstallationToFinish])
class PXCTest(object):
"""Tests PXC Datastore Features."""
@before_class
def setUp(self):
self.instance = instance_info
self.rd_client = create_dbaas_client(self.instance.user)
self.report = CONFIG.get_report()
def _find_status(self, rd_client, instance_id, expected_status):
"""Tracks instance status, until it gets to expected_status."""
instance = rd_client.instances.get(instance_id)
self.report.log("Instance info %s." % instance._info)
if instance.status == expected_status:
self.report.log("Instance: %s is ready." % instance_id)
return True
else:
return False
@test
def test_instance_restart(self):
"""Tests the restart API."""
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.restart(self.instance.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("REBOOT", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Restarted Instance: %s." % self.instance.id)
@test(depends_on=[test_instance_restart])
def test_instance_resize_volume(self):
"""Tests the resize volume API."""
old_volume_size = int(instance_info.volume['size'])
new_volume_size = old_volume_size + 1
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.resize_volume(self.instance.id,
new_volume_size)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("RESIZE", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal(instance.volume['size'], new_volume_size)
self.report.log("Resized Volume for Instance ID: %s to %s." % (
self.instance.id, new_volume_size))
@test(depends_on=[test_instance_resize_volume])
def test_instance_resize_flavor(self):
"""Tests the resize instance/flavor API."""
flavor_name = CONFIG.values.get('instance_bigger_flavor_name',
'm1.medium')
flavors = self.instance.dbaas.find_flavors_by_name(flavor_name)
new_flavor = flavors[0]
asserts.assert_true(new_flavor is not None,
"Flavor '%s' not found!" % flavor_name)
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.resize_instance(self.instance.id,
new_flavor.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("RESIZE", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal(int(test_instance.flavor['id']), new_flavor.id)
self.report.log("Resized Flavor for Instance ID: %s to %s." % (
self.instance.id, new_flavor.id))
@test(depends_on=[test_instance_resize_flavor])
def test_instance_delete(self):
"""Tests the instance delete."""
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.delete(self.instance.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("SHUTDOWN", test_instance.status)
def _poll():
try:
instance = self.rd_client.instances.get(self.instance.id)
self.report.log("Instance info %s" % instance._info)
asserts.assert_equal("SHUTDOWN", instance.status)
return False
except exceptions.NotFound:
self.report.log("Instance has gone.")
asserts.assert_equal(404, self.rd_client.last_http_code)
return True
poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Deleted Instance ID: %s " % self.instance.id)
@test(depends_on=[test_instance_delete])
def test_create_cluster_with_fewer_instances(self):
invalid_request_body_with_few_instances = [
{"flavorRef": 2, "volume": {"size": 1}}]
self.rd_client = create_dbaas_client(self.instance.user)
asserts.assert_raises(
exceptions.BadRequest,
self.rd_client.clusters.create,
"test_cluster",
self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=invalid_request_body_with_few_instances)
asserts.assert_equal(400, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_with_fewer_instances])
def test_create_cluster_with_different_flavors(self):
invalid_request_body_with_different_flavors = [
{"flavorRef": 3, "volume": {"size": 1}},
{"flavorRef": 4, "volume": {"size": 1}}]
asserts.assert_raises(
exceptions.BadRequest,
self.rd_client.clusters.create,
"test_cluster",
self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=invalid_request_body_with_different_flavors)
asserts.assert_equal(400, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_with_different_flavors])
def test_create_cluster_with_different_volumes(self):
invalid_request_body_with_different_volumes = [
{"flavorRef": 2, "volume": {"size": 2}},
{"flavorRef": 2, "volume": {"size": 3}}]
asserts.assert_raises(
exceptions.BadRequest,
self.rd_client.clusters.create,
"test_cluster",
self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=invalid_request_body_with_different_volumes)
asserts.assert_equal(400, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_with_different_volumes])
def test_create_cluster_successfuly(self):
valid_request_body = [
{"flavorRef": self.instance.dbaas_flavor_href,
"volume": self.instance.volume},
{"flavorRef": self.instance.dbaas_flavor_href,
"volume": self.instance.volume}]
self.cluster = self.rd_client.clusters.create(
"test_cluster", self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=valid_request_body)
with TypeCheck('Cluster', self.cluster) as check:
check.has_field("id", basestring)
check.has_field("name", basestring)
check.has_field("datastore", dict)
check.has_field("instances", list)
check.has_field("links", list)
check.has_field("created", unicode)
check.has_field("updated", unicode)
for instance in self.cluster.instances:
isinstance(instance, dict)
asserts.assert_is_not_none(instance['id'])
asserts.assert_is_not_none(instance['links'])
asserts.assert_is_not_none(instance['name'])
asserts.assert_equal(200, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_successfuly])
def test_wait_until_cluster_is_active(self):
if not getattr(self, 'cluster', None):
raise SkipTest(
"Skipping this test since cluster is not available.")
def result_is_active():
cluster = self.rd_client.clusters.get(self.cluster.id)
cluster_instances = [
self.rd_client.instances.get(instance['id'])
for instance in cluster.instances]
self.report.log("Cluster info %s." % cluster._info)
self.report.log("Cluster instances info %s." % cluster_instances)
if cluster.task['name'] == "NONE":
if ["ERROR"] * len(cluster_instances) == [
str(instance.status) for instance in cluster_instances]:
self.report.log("Cluster provisioning failed.")
asserts.fail("Cluster provisioning failed.")
if ["ACTIVE"] * len(cluster_instances) == [
str(instance.status) for instance in cluster_instances]:
self.report.log("Cluster is ready.")
return True
else:
asserts.assert_not_equal(
["ERROR"] * len(cluster_instances),
[instance.status
for instance in cluster_instances])
self.report.log("Continue polling, cluster is not ready yet.")
poll_until(result_is_active, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Created cluster, ID = %s." % self.cluster.id)
@test(depends_on=[test_wait_until_cluster_is_active])
def test_cluster_communication(self):
databases = []
databases.append({"name": 'somenewdb'})
cluster = self.rd_client.clusters.get(self.cluster.id)
cluster_instances = [
self.rd_client.instances.get(instance['id'])
for instance in cluster.instances]
databases_before = self.rd_client.databases.list(
cluster_instances[0].id)
self.rd_client.databases.create(cluster_instances[0].id,
databases)
for instance in cluster_instances:
databases_after = self.rd_client.databases.list(
cluster_instances[0].id)
asserts.assert_true(len(databases_before) < len(databases_after))
@test(depends_on=[test_wait_until_cluster_is_active],
runs_after=[test_cluster_communication])
def test_cluster_delete(self):
if not getattr(self, 'cluster', None):
raise SkipTest(
"Skipping this test since cluster is not available.")
self.rd_client.clusters.delete(self.cluster.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
def _poll():
try:
cluster = self.rd_client.clusters.get(
self.cluster.id)
self.report.log("Cluster info %s" % cluster._info)
asserts.assert_equal("DELETING", cluster.task['name'])
return False
except exceptions.NotFound:
self.report.log("Cluster is not available.")
asserts.assert_equal(404, self.rd_client.last_http_code)
return True
poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Deleted cluster: %s." % self.cluster.id)

View File

@ -1,318 +0,0 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration tests for Redis datastore.
APIs tested for Redis are:
1. create
2. restart
3. resize-volume
4. resize-instance
5. delete
6. cluster-create
7. cluster-delete
"""
from proboscis import asserts
from proboscis.decorators import before_class
from proboscis import SkipTest
from proboscis import test
from troveclient.compat import exceptions
from trove.common import cfg
from trove.common.utils import poll_until
from trove.tests.api.instances import EPHEMERAL_SUPPORT
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.config import CONFIG
from trove.tests.util.check import TypeCheck
from trove.tests.util import create_dbaas_client
CONF = cfg.CONF
REDIS_GROUP = "dbaas.api.redis"
TIMEOUT = 2300
SLEEP_TIME = 60
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[REDIS_GROUP],
runs_after=[WaitForGuestInstallationToFinish])
class RedisTest(object):
"""Tests Redis Datastore Features."""
@before_class
def setUp(self):
self.instance = instance_info
self.rd_client = create_dbaas_client(self.instance.user)
self.report = CONFIG.get_report()
def _find_status(self, rd_client, instance_id, expected_status):
"""Tracks instance status, until it gets to expected_status."""
instance = rd_client.instances.get(instance_id)
self.report.log("Instance info %s." % instance._info)
if instance.status == expected_status:
self.report.log("Instance: %s is ready." % instance_id)
return True
else:
return False
@test
def test_instance_restart(self):
"""Tests the restart API."""
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.restart(self.instance.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("REBOOT", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Restarted Instance: %s." % self.instance.id)
@test(depends_on=[test_instance_restart], enabled=False)
def test_instance_resize_volume(self):
"""Tests the resize volume API."""
old_volume_size = int(instance_info.volume['size'])
new_volume_size = old_volume_size + 1
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.resize_volume(self.instance.id,
new_volume_size)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("RESIZE", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal(instance.volume['size'], new_volume_size)
self.report.log("Resized Volume for Instance ID: %s to %s." % (
self.instance.id, new_volume_size))
@test(depends_on=[test_instance_resize_volume])
def test_instance_resize_flavor(self):
"""Tests the resize instance/flavor API."""
if EPHEMERAL_SUPPORT:
flavor_name = CONFIG.values.get('instance_bigger_eph_flavor_name',
'eph.rd-smaller')
else:
flavor_name = CONFIG.values.get('instance_bigger_flavor_name',
'm1.small')
flavors = self.instance.dbaas.find_flavors_by_name(flavor_name)
new_flavor = flavors[0]
asserts.assert_true(new_flavor is not None,
"Flavor '%s' not found!" % flavor_name)
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.resize_instance(self.instance.id,
new_flavor.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("RESIZE", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal(int(test_instance.flavor['id']), new_flavor.id)
self.report.log("Resized Flavor for Instance ID: %s to %s." % (
self.instance.id, new_flavor.id))
@test(depends_on=[test_instance_resize_flavor])
def test_instance_delete(self):
"""Tests the instance delete."""
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.delete(self.instance.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("SHUTDOWN", test_instance.status)
def _poll():
try:
instance = self.rd_client.instances.get(self.instance.id)
self.report.log("Instance info %s" % instance._info)
asserts.assert_equal("SHUTDOWN", instance.status)
return False
except exceptions.NotFound:
self.report.log("Instance has gone.")
asserts.assert_equal(404, self.rd_client.last_http_code)
return True
poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Deleted Instance ID: %s " % self.instance.id)
@test(depends_on=[test_instance_delete])
def test_create_cluster_successfuly(self):
valid_request_body = [{"flavorRef": self.instance.dbaas_flavor_href,
'volume': {'size': 1}}] * 2
self.cluster = self.rd_client.clusters.create(
"test_cluster", self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=valid_request_body)
with TypeCheck('Cluster', self.cluster) as check:
check.has_field("id", basestring)
check.has_field("name", basestring)
check.has_field("datastore", dict)
check.has_field("instances", list)
check.has_field("links", list)
check.has_field("created", unicode)
check.has_field("updated", unicode)
for instance in self.cluster.instances:
isinstance(instance, dict)
asserts.assert_is_not_none(instance['id'])
asserts.assert_is_not_none(instance['links'])
asserts.assert_is_not_none(instance['name'])
asserts.assert_equal(200, self.rd_client.last_http_code)
def _cluster_is_active(self):
cluster = self.rd_client.clusters.get(self.cluster.id)
cluster_instances = [
self.rd_client.instances.get(instance['id'])
for instance in cluster.instances]
self.report.log("Cluster info %s." % cluster._info)
self.report.log("Cluster instances info %s." % cluster_instances)
if cluster.task['name'] == "NONE":
if ["ERROR"] * len(cluster_instances) == [
str(instance.status) for instance in cluster_instances]:
self.report.log("Cluster provisioning failed.")
asserts.fail("Cluster provisioning failed.")
if ["ACTIVE"] * len(cluster_instances) == [
str(instance.status) for instance in cluster_instances]:
self.report.log("Cluster is ready.")
return True
else:
asserts.assert_not_equal(
["ERROR"] * len(cluster_instances),
[instance.status
for instance in cluster_instances])
self.report.log("Continue polling, cluster is not ready yet.")
@test(depends_on=[test_create_cluster_successfuly])
def test_wait_until_cluster_is_active(self):
if not getattr(self, 'cluster', None):
raise SkipTest(
"Skipping this test since cluster is not available.")
poll_until(self._cluster_is_active,
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Created cluster, ID = %s." % self.cluster.id)
@test(depends_on=[test_wait_until_cluster_is_active])
def test_cluster_grow(self):
if not getattr(self, 'cluster', None):
raise SkipTest(
"Skipping this test since cluster is not available.")
beginning_instance_count = len(self.cluster.instances)
valid_request_body = [
{"name": "foo", "flavorRef": self.instance.dbaas_flavor_href,
'volume': {'size': 1}},
{"name": "bar", "flavorRef": self.instance.dbaas_flavor_href,
'volume': {'size': 1}}]
self.cluster = self.rd_client.clusters.grow(self.cluster.id,
valid_request_body)
asserts.assert_equal(2, len(self.cluster.instances)
- beginning_instance_count)
asserts.assert_equal(202, self.rd_client.last_http_code)
poll_until(self._cluster_is_active,
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
@test(depends_on=[test_cluster_grow])
def test_cluster_shrink(self):
if not getattr(self, 'cluster', None):
raise SkipTest(
"Skipping this test since cluster is not available.")
foo_instance = None
for instance in self.cluster.instances:
if instance['name'] == 'foo':
foo_instance = instance
break
asserts.assert_is_not_none(foo_instance, "Could not find foo instance")
beginning_instance_count = len(self.cluster.instances)
valid_request_body = [{"id": foo_instance['id']}]
self.cluster = self.rd_client.clusters.shrink(self.cluster.id,
valid_request_body)
asserts.assert_equal(-1, len(self.cluster.instances)
- beginning_instance_count)
asserts.assert_equal(202, self.rd_client.last_http_code)
poll_until(self._cluster_is_active,
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
@test(depends_on=[test_create_cluster_successfuly],
runs_after=[test_cluster_shrink])
def test_cluster_delete(self):
if not getattr(self, 'cluster', None):
raise SkipTest(
"Skipping this test since cluster is not available.")
self.rd_client.clusters.delete(self.cluster.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
def _poll():
try:
cluster = self.rd_client.clusters.get(
self.cluster.id)
self.report.log("Cluster info %s" % cluster._info)
asserts.assert_equal("DELETING", cluster.task['name'])
return False
except exceptions.NotFound:
self.report.log("Cluster is not available.")
asserts.assert_equal(404, self.rd_client.last_http_code)
return True
poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Deleted cluster: %s." % self.cluster.id)

View File

@ -27,8 +27,6 @@ from trove.tests.api.mgmt import datastore_versions
from trove.tests.api.mgmt import hosts
from trove.tests.api.mgmt import instances as mgmt_instances
from trove.tests.api.mgmt import storage
from trove.tests.api import pxc
from trove.tests.api import redis
from trove.tests.api import replication
from trove.tests.api import root
from trove.tests.api import user_access
@ -46,6 +44,7 @@ from trove.tests.scenario.groups import user_actions_group
GROUP_SERVICES_INITIALIZE = "services.initialize"
GROUP_SETUP = 'dbaas.setup'
def build_group(*groups):
@ -120,14 +119,17 @@ proboscis.register(groups=["blackbox_mgmt"],
#
# Group designations for datastore agnostic int-tests
#
instance_create_groups = [
base_groups = [
GROUP_SERVICES_INITIALIZE,
flavors.GROUP,
versions.GROUP,
instance_create_group.GROUP,
instance_delete_group.GROUP
GROUP_SETUP
]
instance_create_groups = list(base_groups)
instance_create_groups.extend([instance_create_group.GROUP,
instance_delete_group.GROUP])
backup_groups = list(instance_create_groups)
backup_groups.extend([backup_group.GROUP])
@ -137,7 +139,7 @@ user_actions_groups.extend([user_actions_group.GROUP])
database_actions_groups = list(instance_create_groups)
database_actions_groups.extend([database_actions_group.GROUP])
cluster_actions_groups = list(instance_create_groups)
cluster_actions_groups = list(base_groups)
cluster_actions_groups.extend([cluster_actions_group.GROUP,
negative_cluster_actions_group.GROUP])
@ -171,22 +173,3 @@ register(["redis_supported"], backup_groups, instance_actions_groups,
register(["vertica_supported"], cluster_actions_groups,
instance_actions_groups)
register(["pxc_supported"], instance_actions_groups, cluster_actions_groups)
# Redis int-tests
redis_group = [
GROUP_SERVICES_INITIALIZE,
flavors.GROUP,
versions.GROUP,
instances.GROUP_START_SIMPLE,
instances.GROUP_QUOTAS,
redis.REDIS_GROUP,
]
proboscis.register(groups=["redis"],
depends_on_groups=redis_group)
# PXC int-tests
pxc_group = [
pxc.PXC_GROUP,
]
proboscis.register(groups=["pxc"],
depends_on_groups=pxc_group)

View File

@ -34,11 +34,79 @@ class ClusterActionsGroup(TestGroup):
self.test_runner.run_cluster_create()
@test(depends_on=[cluster_create])
def test_cluster_communication(self):
"""Validate the cluster data and properties."""
self.test_runner.run_cluster_communication()
def add_initial_cluster_data(self):
"""Add data to cluster."""
self.test_runner.run_add_initial_cluster_data()
@test(depends_on=[cluster_create], runs_after=[test_cluster_communication])
@test(depends_on=[add_initial_cluster_data])
def verify_initial_cluster_data(self):
"""Verify the initial data exists on cluster."""
self.test_runner.run_verify_initial_cluster_data()
@test(depends_on=[cluster_create],
runs_after=[verify_initial_cluster_data])
def cluster_grow(self):
"""Grow cluster."""
self.test_runner.run_cluster_grow()
@test(depends_on=[cluster_grow, add_initial_cluster_data])
def verify_initial_cluster_data_after_grow(self):
"""Verify the initial data still exists after cluster grow."""
self.test_runner.run_verify_initial_cluster_data()
@test(depends_on=[cluster_grow],
runs_after=[verify_initial_cluster_data_after_grow])
def add_extra_cluster_data_after_grow(self):
"""Add more data to cluster."""
self.test_runner.run_add_extra_cluster_data()
@test(depends_on=[add_extra_cluster_data_after_grow])
def verify_extra_cluster_data_after_grow(self):
"""Verify the data added after cluster grow."""
self.test_runner.run_verify_extra_cluster_data()
@test(depends_on=[add_extra_cluster_data_after_grow],
runs_after=[verify_extra_cluster_data_after_grow])
def remove_extra_cluster_data_after_grow(self):
"""Remove the data added after cluster grow."""
self.test_runner.run_remove_extra_cluster_data()
@test(depends_on=[cluster_create],
runs_after=[remove_extra_cluster_data_after_grow])
def cluster_shrink(self):
"""Shrink cluster."""
self.test_runner.run_cluster_shrink()
@test(depends_on=[cluster_shrink, add_initial_cluster_data])
def verify_initial_cluster_data_after_shrink(self):
"""Verify the initial data still exists after cluster shrink."""
self.test_runner.run_verify_initial_cluster_data()
@test(depends_on=[cluster_shrink],
runs_after=[verify_initial_cluster_data_after_shrink])
def add_extra_cluster_data_after_shrink(self):
"""Add more data to cluster."""
self.test_runner.run_add_extra_cluster_data()
@test(depends_on=[add_extra_cluster_data_after_shrink])
def verify_extra_cluster_data_after_shrink(self):
"""Verify the data added after cluster shrink."""
self.test_runner.run_verify_extra_cluster_data()
@test(depends_on=[add_extra_cluster_data_after_shrink],
runs_after=[verify_extra_cluster_data_after_shrink])
def remove_extra_cluster_data_after_shrink(self):
"""Remove the data added after cluster shrink."""
self.test_runner.run_remove_extra_cluster_data()
@test(depends_on=[add_initial_cluster_data],
runs_after=[remove_extra_cluster_data_after_shrink])
def remove_initial_cluster_data(self):
"""Remove the initial data from cluster."""
self.test_runner.run_remove_initial_cluster_data()
@test(depends_on=[cluster_create],
runs_after=[remove_initial_cluster_data])
def cluster_delete(self):
"""Delete an existing cluster."""
self.test_runner.run_cluster_delete()

View File

@ -0,0 +1,22 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.tests.scenario.helpers.mysql_helper import MysqlHelper
class PxcHelper(MysqlHelper):
def __init__(self, expected_override_name):
super(PxcHelper, self).__init__(expected_override_name)

View File

@ -37,32 +37,87 @@ class RedisHelper(TestHelper):
# Add data overrides
def add_actual_data(self, data_label, data_start, data_size, host,
*args, **kwargs):
client = self.get_client(host, *args, **kwargs)
test_set = client.get(data_label)
test_set = self._get_data_point(host, data_label, *args, **kwargs)
if not test_set:
for num in range(data_start, data_start + data_size):
client.set(self.key_pattern % str(num),
self.value_pattern % str(num))
self._set_data_point(
host,
self.key_pattern % str(num), self.value_pattern % str(num),
*args, **kwargs)
# now that the data is there, add the label
client.set(data_label, self.label_value)
self._set_data_point(
host,
data_label, self.label_value,
*args, **kwargs)
def _set_data_point(self, host, key, value, *args, **kwargs):
def set_point(client, key, value):
return client.set(key, value)
self._execute_with_redirection(
host, set_point, [key, value], *args, **kwargs)
def _get_data_point(self, host, key, *args, **kwargs):
def get_point(client, key):
return client.get(key)
return self._execute_with_redirection(
host, get_point, [key], *args, **kwargs)
def _execute_with_redirection(self, host, callback, callback_args,
*args, **kwargs):
"""Redis clustering is a relatively new feature still not supported
in a fully transparent way by all clients.
The application itself is responsible for connecting to the right node
when accessing a key in a Redis cluster instead.
Clients may be redirected to other nodes by redirection errors:
redis.exceptions.ResponseError: MOVED 10778 10.64.0.2:6379
This method tries to execute a given callback on a given host.
If it gets a redirection error it parses the new host from the response
and issues the same callback on this new host.
"""
client = self.get_client(host, *args, **kwargs)
try:
return callback(client, *callback_args)
except redis.exceptions.ResponseError as ex:
response = str(ex)
if response:
tokens = response.split()
if tokens[0] == 'MOVED':
redirected_host = tokens[2].split(':')[0]
if redirected_host:
return self._execute_with_redirection(
redirected_host, callback, callback_args,
*args, **kwargs)
raise ex
# Remove data overrides
def remove_actual_data(self, data_label, data_start, data_size, host,
*args, **kwargs):
client = self.get_client(host, *args, **kwargs)
test_set = client.get(data_label)
test_set = self._get_data_point(host, data_label, *args, **kwargs)
if test_set:
for num in range(data_start, data_start + data_size):
client.expire(self.key_pattern % str(num), 0)
self._expire_data_point(host, self.key_pattern % str(num),
*args, **kwargs)
# now that the data is gone, remove the label
client.expire(data_label, 0)
self._expire_data_point(host, data_label, *args, **kwargs)
def _expire_data_point(self, host, key, *args, **kwargs):
def expire_point(client, key):
return client.expire(key, 0)
self._execute_with_redirection(
host, expire_point, [key], *args, **kwargs)
# Verify data overrides
def verify_actual_data(self, data_label, data_start, data_size, host,
*args, **kwargs):
client = self.get_client(host, *args, **kwargs)
# make sure the data is there - tests edge cases and a random one
self._verify_data_point(client, data_label, self.label_value)
self._verify_data_point(host, data_label, self.label_value,
*args, **kwargs)
midway_num = data_start + int(data_size / 2)
random_num = random.randint(data_start + 2,
data_start + data_size - 3)
@ -72,16 +127,18 @@ class RedisHelper(TestHelper):
random_num,
data_start + data_size - 2,
data_start + data_size - 1]:
self._verify_data_point(client,
self._verify_data_point(host,
self.key_pattern % num,
self.value_pattern % num)
self.value_pattern % num,
*args, **kwargs)
# negative tests
for num in [data_start - 1,
data_start + data_size]:
self._verify_data_point(client, self.key_pattern % num, None)
self._verify_data_point(host, self.key_pattern % num, None,
*args, **kwargs)
def _verify_data_point(self, client, key, expected_value):
value = client.get(key)
def _verify_data_point(self, host, key, expected_value, *args, **kwargs):
value = self._get_data_point(host, key, *args, **kwargs)
TestRunner.assert_equal(expected_value, value,
"Unexpected value '%s' returned from Redis "
"key '%s'" % (value, key))

View File

@ -13,8 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from proboscis import SkipTest
import time as timer
from trove.common import exception
from trove.common.utils import poll_until
from trove.tests.scenario.helpers.test_helper import DataType
from trove.tests.scenario.runners.test_runners import TestRunner
from trove.tests.util.check import TypeCheck
from troveclient.compat import exceptions
@ -22,76 +28,205 @@ from troveclient.compat import exceptions
class ClusterActionsRunner(TestRunner):
USE_CLUSTER_ID_FLAG = 'TESTS_USE_CLUSTER_ID'
DO_NOT_DELETE_CLUSTER_FLAG = 'TESTS_DO_NOT_DELETE_CLUSTER'
EXTRA_INSTANCE_NAME = "named_instance"
def __init__(self):
super(ClusterActionsRunner, self).__init__()
self.cluster_id = 0
def run_cluster_create(
self, num_nodes=2, expected_instance_states=['BUILD', 'ACTIVE'],
expected_http_code=200):
@property
def is_using_existing_cluster(self):
return self.has_env_flag(self.USE_CLUSTER_ID_FLAG)
@property
def has_do_not_delete_cluster(self):
return self.has_env_flag(self.DO_NOT_DELETE_CLUSTER_FLAG)
def run_cluster_create(self, num_nodes=2, expected_task_name='BUILDING',
expected_instance_states=['BUILD', 'ACTIVE'],
expected_http_code=200):
instances_def = [
self.build_flavor(
flavor_id=self.instance_info.dbaas_flavor_href,
volume_size=self.instance_info.volume['size'])] * num_nodes
self.cluster_id = self.assert_cluster_create(
'test_cluster', instances_def,
expected_instance_states,
expected_http_code)
'test_cluster', instances_def, expected_task_name,
expected_instance_states, expected_http_code)
def assert_cluster_create(self, cluster_name, instances_def,
expected_instance_states, expected_http_code):
def assert_cluster_create(
self, cluster_name, instances_def, expected_task_name,
expected_instance_states, expected_http_code):
self.report.log("Testing cluster create: %s" % cluster_name)
cluster = self.auth_client.clusters.create(
cluster_name, self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version,
instances=instances_def)
cluster = self.get_existing_cluster()
if cluster:
self.report.log("Using an existing cluster: %s" % cluster.id)
cluster_instances = self._get_cluster_instances(cluster.id)
self.assert_all_instance_states(
cluster_instances, expected_instance_states[-1:])
else:
cluster = self.auth_client.clusters.create(
cluster_name, self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version,
instances=instances_def)
self._assert_cluster_action(cluster.id, expected_task_name,
expected_http_code)
cluster_instances = self._get_cluster_instances(cluster.id)
self.assert_all_instance_states(
cluster_instances, expected_instance_states)
# Create the helper user/database on the first node.
# The cluster should handle the replication itself.
self.create_test_helper_on_instance(cluster_instances[0])
cluster_id = cluster.id
self._assert_cluster_action(cluster_id, 'BUILDING', expected_http_code)
cluster_instances = self._get_cluster_instances(cluster_id)
self.assert_all_instance_states(
cluster_instances, expected_instance_states)
self._assert_cluster_state(cluster_id, 'NONE')
# Although all instances have already acquired the expected state,
# we still need to poll for the final cluster task, because
# it may take up to the periodic task interval until the task name
# gets updated in the Trove database.
self._assert_cluster_states(cluster_id, ['NONE'])
self._assert_cluster_response(cluster_id, 'NONE')
return cluster_id
def run_cluster_communication(self):
# TODO(pmalik): This will need to be generalized
# (using a datastore test_helper) to add and verify data.
# Creating and checking databases like this would not work with
# datastores that do not support them (Redis).
databases = []
databases.append({"name": 'somenewdb'})
cluster = self.auth_client.clusters.get(self.cluster_id)
cluster_instances = [
self.auth_client.instances.get(instance['id'])
for instance in cluster.instances]
databases_before = self.auth_client.databases.list(
cluster_instances[0].id)
self.auth_client.databases.create(cluster_instances[0].id,
databases)
for instance in cluster_instances:
databases_after = self.auth_client.databases.list(
cluster_instances[0].id)
self.assert_true(len(databases_before) < len(databases_after))
def get_existing_cluster(self):
if self.is_using_existing_cluster:
cluster_id = os.environ.get(self.USE_CLUSTER_ID_FLAG)
return self.auth_client.clusters.get(cluster_id)
return None
def run_add_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_add_cluster_data(data_type, self.cluster_id)
def run_add_extra_cluster_data(self, data_type=DataType.tiny2):
self.assert_add_cluster_data(data_type, self.cluster_id)
def assert_add_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
self.test_helper.add_data(data_type, cluster.ip[0])
def run_verify_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_verify_cluster_data(data_type, self.cluster_id)
def run_verify_extra_cluster_data(self, data_type=DataType.tiny2):
self.assert_verify_cluster_data(data_type, self.cluster_id)
def assert_verify_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
self.test_helper.verify_data(data_type, cluster.ip[0])
def run_remove_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_remove_cluster_data(data_type, self.cluster_id)
def run_remove_extra_cluster_data(self, data_type=DataType.tiny2):
self.assert_remove_cluster_data(data_type, self.cluster_id)
def assert_remove_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
self.test_helper.remove_data(data_type, cluster.ip[0])
def run_cluster_grow(self, expected_task_name='GROWING_CLUSTER',
expected_http_code=202):
# Add two instances. One with an explicit name.
added_instance_defs = [
self._build_instance_def(self.instance_info.dbaas_flavor_href,
self.instance_info.volume['size']),
self._build_instance_def(self.instance_info.dbaas_flavor_href,
self.instance_info.volume['size'],
self.EXTRA_INSTANCE_NAME)]
self.assert_cluster_grow(
self.cluster_id, added_instance_defs, expected_task_name,
expected_http_code)
def _build_instance_def(self, flavor_id, volume_size, name=None):
instance_def = self.build_flavor(
flavor_id=flavor_id, volume_size=volume_size)
if name:
instance_def.update({'name': name})
return instance_def
def assert_cluster_grow(self, cluster_id, added_instance_defs,
expected_task_name, expected_http_code):
cluster = self.auth_client.clusters.get(cluster_id)
initial_instance_count = len(cluster.instances)
cluster = self.auth_client.clusters.grow(cluster_id,
added_instance_defs)
self._assert_cluster_action(cluster_id, expected_task_name,
expected_http_code)
self.assert_equal(len(added_instance_defs),
len(cluster.instances) - initial_instance_count,
"Unexpected number of added nodes.")
cluster_instances = self._get_cluster_instances(cluster_id)
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
self._assert_cluster_states(cluster_id, ['NONE'])
self._assert_cluster_response(cluster_id, 'NONE')
def run_cluster_shrink(
self, expected_task_name=None, expected_http_code=202):
self.assert_cluster_shrink(self.cluster_id, [self.EXTRA_INSTANCE_NAME],
expected_task_name, expected_http_code)
def assert_cluster_shrink(self, cluster_id, removed_instance_names,
expected_task_name, expected_http_code):
cluster = self.auth_client.clusters.get(cluster_id)
initial_instance_count = len(cluster.instances)
removed_instances = self._find_cluster_instances_by_name(
cluster, removed_instance_names)
cluster = self.auth_client.clusters.shrink(
cluster_id, [{'id': instance['id']}
for instance in removed_instances])
self._assert_cluster_action(cluster_id, expected_task_name,
expected_http_code)
self.assert_equal(
len(removed_instance_names),
initial_instance_count - len(cluster.instances),
"Unexpected number of removed nodes.")
cluster_instances = self._get_cluster_instances(cluster_id)
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
self._assert_cluster_states(cluster_id, ['NONE'])
self._assert_cluster_response(cluster_id, 'NONE')
def _find_cluster_instances_by_name(self, cluster, instance_names):
return [instance for instance in cluster.instances
if instance['name'] in instance_names]
def run_cluster_delete(
self, expected_last_instance_state='SHUTDOWN',
expected_http_code=202):
self.assert_cluster_delete(
self.cluster_id, expected_last_instance_state, expected_http_code)
self, expected_task_name='DELETING',
expected_last_instance_state='SHUTDOWN', expected_http_code=202):
if self.has_do_not_delete_cluster:
self.report.log("TESTS_DO_NOT_DELETE_CLUSTER=True was "
"specified, skipping delete...")
raise SkipTest("TESTS_DO_NOT_DELETE_CLUSTER was specified.")
def assert_cluster_delete(self, cluster_id, expected_last_instance_state,
expected_http_code):
self.assert_cluster_delete(
self.cluster_id, expected_task_name, expected_last_instance_state,
expected_http_code)
def assert_cluster_delete(
self, cluster_id, expected_task_name, expected_last_instance_state,
expected_http_code):
self.report.log("Testing cluster delete: %s" % cluster_id)
cluster_instances = self._get_cluster_instances(cluster_id)
self.auth_client.clusters.delete(cluster_id)
self._assert_cluster_action(cluster_id, 'DELETING', expected_http_code)
self._assert_cluster_action(cluster_id, expected_task_name,
expected_http_code)
self.assert_all_gone(cluster_instances, expected_last_instance_state)
self._assert_cluster_gone(cluster_id)
@ -106,9 +241,38 @@ class ClusterActionsRunner(TestRunner):
if expected_http_code is not None:
self.assert_client_code(expected_http_code)
if expected_state:
self._assert_cluster_state(cluster_id, expected_state)
self._assert_cluster_response(cluster_id, expected_state)
def _assert_cluster_state(self, cluster_id, expected_state):
def _assert_cluster_states(self, cluster_id, expected_states,
fast_fail_status=None):
for status in expected_states:
start_time = timer.time()
try:
poll_until(lambda: self._has_task(
cluster_id, status, fast_fail_status=fast_fail_status),
sleep_time=self.def_sleep_time,
time_out=self.def_timeout)
self.report.log("Cluster has gone '%s' in %s." %
(status, self._time_since(start_time)))
except exception.PollTimeOut:
self.report.log(
"Status of cluster '%s' did not change to '%s' after %s."
% (cluster_id, status, self._time_since(start_time)))
return False
return True
def _has_task(self, cluster_id, task, fast_fail_status=None):
cluster = self.auth_client.clusters.get(cluster_id)
task_name = cluster.task['name']
self.report.log("Waiting for cluster '%s' to become '%s': %s"
% (cluster_id, task, task_name))
if fast_fail_status and task_name == fast_fail_status:
raise RuntimeError("Cluster '%s' acquired a fast-fail task: %s"
% (cluster_id, task))
return task_name == task
def _assert_cluster_response(self, cluster_id, expected_state):
cluster = self.auth_client.clusters.get(cluster_id)
with TypeCheck('Cluster', cluster) as check:
check.has_field("id", basestring)
@ -129,7 +293,8 @@ class ClusterActionsRunner(TestRunner):
def _assert_cluster_gone(self, cluster_id):
t0 = timer.time()
try:
self.auth_client.clusters.get(cluster_id)
# This will poll until the cluster goes away.
self._assert_cluster_states(cluster_id, ['NONE'])
self.fail(
"Cluster '%s' still existed after %s seconds."
% (cluster_id, self._time_since(t0)))
@ -139,10 +304,27 @@ class ClusterActionsRunner(TestRunner):
class MongodbClusterActionsRunner(ClusterActionsRunner):
def run_cluster_create(self, num_nodes=3,
def run_cluster_create(self, num_nodes=3, expected_task_name='BUILDING',
expected_instance_states=['BUILD', 'ACTIVE'],
expected_http_code=200):
super(MongodbClusterActionsRunner, self).run_cluster_create(
num_nodes=num_nodes,
num_nodes=num_nodes, expected_task_name=expected_task_name,
expected_instance_states=expected_instance_states,
expected_http_code=expected_http_code)
class PxcClusterActionsRunner(ClusterActionsRunner):
def run_cluster_create(self, num_nodes=3, expected_task_name='BUILDING',
expected_instance_states=['BUILD', 'ACTIVE'],
expected_http_code=200):
super(PxcClusterActionsRunner, self).run_cluster_create(
num_nodes=num_nodes, expected_task_name=expected_task_name,
expected_instance_states=expected_instance_states,
expected_http_code=expected_http_code)
def run_cluster_shrink(self):
raise SkipTest("Operation not supported by the datastore.")
def run_cluster_grow(self):
raise SkipTest("Operation not supported by the datastore.")

View File

@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from trove.tests.scenario.runners.test_runners import TestRunner
from troveclient.compat import exceptions
@ -72,3 +74,18 @@ class MongodbNegativeClusterActionsRunner(NegativeClusterActionsRunner):
super(NegativeClusterActionsRunner,
self).run_create_constrained_size_cluster(min_nodes=3,
max_nodes=3)
class RedisNegativeClusterActionsRunner(NegativeClusterActionsRunner):
def run_create_constrained_size_cluster(self):
raise SkipTest("No constraints apply to the number of cluster nodes.")
def run_create_heterogeneous_cluster(self):
raise SkipTest("No constraints apply to the size of cluster nodes.")
class PxcNegativeClusterActionsRunner(NegativeClusterActionsRunner):
def run_create_constrained_size_cluster(self):
raise SkipTest("No constraints apply to the number of cluster nodes.")

View File

@ -68,7 +68,15 @@ class TestRunner(object):
def __init__(self, sleep_time=10, timeout=1200):
self.def_sleep_time = sleep_time
self.def_timeout = timeout
self.instance_info = instance_info
instance_info.dbaas_datastore = CONFIG.dbaas_datastore
instance_info.dbaas_datastore_version = CONFIG.dbaas_datastore_version
if self.VOLUME_SUPPORT:
instance_info.volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
instance_info.volume = None
self.auth_client = create_dbaas_client(self.instance_info.user)
self.unauth_client = None
self._test_helper = None
@ -173,7 +181,12 @@ class TestRunner(object):
@property
def is_using_existing_instance(self):
return os.environ.get(self.USE_INSTANCE_ID_FLAG, None) is not None
return self.has_env_flag(self.USE_INSTANCE_ID_FLAG)
@staticmethod
def has_env_flag(flag_name):
"""Return whether a given flag was set."""
return os.environ.get(flag_name, None) is not None
def get_existing_instance(self):
if self.is_using_existing_instance:
@ -184,8 +197,7 @@ class TestRunner(object):
@property
def has_do_not_delete_instance(self):
return os.environ.get(
self.DO_NOT_DELETE_INSTANCE_FLAG, None) is not None
return self.has_env_flag(self.DO_NOT_DELETE_INSTANCE_FLAG)
def assert_instance_action(
self, instance_ids, expected_states, expected_http_code):