Always clean up nodes and cluster
This was hiding a bug with flavor_id. Make sure we delete the cluster so that it will always fail gracefully. In addition we also stop re-using the same private network for cluster tests and instead create custom networks to prevent race conditions between tests. * Add a small retry for logic for deleting subnets and networks. * Add override to network name to allow for randomized names. Change-Id: Ibcf1b25bc6e5e3a54678646a397dc1c41b1d29af
This commit is contained in:
parent
beb8dad555
commit
ec20720626
|
@ -60,13 +60,16 @@ def prepare_and_cleanup_for_nova_server(base, cidr, spec=None):
|
|||
base.addCleanup(delete_a_subnet, base, subnet_id)
|
||||
|
||||
|
||||
def create_spec_from_config():
|
||||
def create_spec_from_config(network_name=None):
|
||||
"""Utility function that creates a spec object from tempest config"""
|
||||
spec = constants.spec_nova_server
|
||||
|
||||
spec['properties']['flavor'] = CONF.compute.flavor_ref
|
||||
spec['properties']['image'] = CONF.compute.image_ref
|
||||
|
||||
if network_name:
|
||||
spec['properties']['networks'][0]['network'] = network_name
|
||||
|
||||
return spec
|
||||
|
||||
|
||||
|
@ -562,13 +565,18 @@ def create_a_network(base, name=None):
|
|||
return body['body']['id']
|
||||
|
||||
|
||||
@tenacity.retry(
|
||||
retry=tenacity.retry_if_exception_type(exceptions.Conflict),
|
||||
wait=tenacity.wait_fixed(5),
|
||||
retry_error_callback=_return_last_value,
|
||||
stop=tenacity.stop_after_attempt(3)
|
||||
)
|
||||
def delete_a_network(base, network_id, ignore_missing=False,
|
||||
wait_timeout=None):
|
||||
"""Utility function that deletes a Neutron network."""
|
||||
|
||||
res = base.network_client.delete_obj('networks', network_id)
|
||||
if res['status'] == 404:
|
||||
if ignore_missing is True:
|
||||
if ignore_missing:
|
||||
return
|
||||
raise exceptions.NotFound()
|
||||
|
||||
|
@ -594,12 +602,17 @@ def create_a_subnet(base, network_id, cidr, ip_version=4, name=None):
|
|||
return body['body']['id']
|
||||
|
||||
|
||||
@tenacity.retry(
|
||||
retry=tenacity.retry_if_exception_type(exceptions.Conflict),
|
||||
wait=tenacity.wait_fixed(5),
|
||||
retry_error_callback=_return_last_value,
|
||||
stop=tenacity.stop_after_attempt(3)
|
||||
)
|
||||
def delete_a_subnet(base, subnet_id, ignore_missing=False, wait_timeout=None):
|
||||
"""Utility function that deletes a Neutron subnet."""
|
||||
|
||||
res = base.network_client.delete_obj('subnets', subnet_id)
|
||||
if res['status'] == 404:
|
||||
if ignore_missing is True:
|
||||
if ignore_missing:
|
||||
return
|
||||
raise exceptions.NotFound()
|
||||
|
||||
|
|
|
@ -32,33 +32,40 @@ class TestClusterCreateListDelete(base.BaseSenlinFunctionalTest):
|
|||
max_size = 3
|
||||
metadata = {'k1': 'v1'}
|
||||
timeout = 120
|
||||
cluster_id1 = utils.create_a_cluster(
|
||||
self, self.profile_id, desired_capacity, min_size, max_size,
|
||||
timeout, metadata)
|
||||
|
||||
# Verify creation result
|
||||
cluster1 = utils.get_a_cluster(self, cluster_id1)
|
||||
self.assertIsNotNone(cluster1)
|
||||
self.assertEqual(desired_capacity, cluster1['desired_capacity'])
|
||||
self.assertEqual(desired_capacity, len(cluster1['nodes']))
|
||||
self.assertEqual(min_size, cluster1['min_size'])
|
||||
self.assertEqual(max_size, cluster1['max_size'])
|
||||
self.assertEqual(metadata, cluster1['metadata'])
|
||||
self.assertEqual(timeout, cluster1['timeout'])
|
||||
cluster_id1 = None
|
||||
cluster_id2 = None
|
||||
|
||||
# Create another cluster
|
||||
cluster_id2 = utils.create_a_cluster(self, self.profile_id)
|
||||
try:
|
||||
cluster_id1 = utils.create_a_cluster(
|
||||
self, self.profile_id, desired_capacity, min_size, max_size,
|
||||
timeout, metadata)
|
||||
|
||||
# List clusters
|
||||
clusters = utils.list_clusters(self)
|
||||
self.assertIsNotNone(clusters)
|
||||
cluster_ids = [c['id'] for c in clusters]
|
||||
self.assertIn(cluster_id1, cluster_ids)
|
||||
self.assertIn(cluster_id2, cluster_ids)
|
||||
# Verify creation result
|
||||
cluster1 = utils.get_a_cluster(self, cluster_id1)
|
||||
self.assertIsNotNone(cluster1)
|
||||
self.assertEqual(desired_capacity, cluster1['desired_capacity'])
|
||||
self.assertEqual(desired_capacity, len(cluster1['nodes']))
|
||||
self.assertEqual(min_size, cluster1['min_size'])
|
||||
self.assertEqual(max_size, cluster1['max_size'])
|
||||
self.assertEqual(metadata, cluster1['metadata'])
|
||||
self.assertEqual(timeout, cluster1['timeout'])
|
||||
|
||||
# Delete cluster
|
||||
utils.delete_a_cluster(self, cluster_id1)
|
||||
utils.delete_a_cluster(self, cluster_id2)
|
||||
# Create another cluster
|
||||
cluster_id2 = utils.create_a_cluster(self, self.profile_id)
|
||||
|
||||
# List clusters
|
||||
clusters = utils.list_clusters(self)
|
||||
self.assertIsNotNone(clusters)
|
||||
cluster_ids = [c['id'] for c in clusters]
|
||||
self.assertIn(cluster_id1, cluster_ids)
|
||||
self.assertIn(cluster_id2, cluster_ids)
|
||||
finally:
|
||||
# Delete clusters
|
||||
if cluster_id1:
|
||||
utils.delete_a_cluster(self, cluster_id1)
|
||||
if cluster_id2:
|
||||
utils.delete_a_cluster(self, cluster_id2)
|
||||
|
||||
|
||||
class TestClusterUpdate(base.BaseSenlinFunctionalTest):
|
||||
|
|
|
@ -33,47 +33,54 @@ class TestNodeCreateShowListDelete(base.BaseSenlinFunctionalTest):
|
|||
name = data_utils.rand_name('node')
|
||||
metadata = {'k1': 'v1'}
|
||||
role = 'individual'
|
||||
node_id1 = utils.create_a_node(
|
||||
self, self.profile_id, metadata=metadata, role=role, name=name)
|
||||
|
||||
# Verify creation result
|
||||
node1 = utils.get_a_node(self, node_id1)
|
||||
self.assertIsNotNone(node1)
|
||||
self.assertEqual(name, node1['name'])
|
||||
self.assertEqual(metadata, node1['metadata'])
|
||||
self.assertEqual(role, node1['role'])
|
||||
self.assertEqual('', node1['cluster_id'])
|
||||
self.assertNotIn('details', node1)
|
||||
node_id1 = None
|
||||
node_id2 = None
|
||||
|
||||
# Get node with detail
|
||||
node1 = utils.get_a_node(self, node_id1, show_details=True)
|
||||
self.assertIn('details', node1)
|
||||
self.assertIsNotNone(node1['details'])
|
||||
try:
|
||||
node_id1 = utils.create_a_node(
|
||||
self, self.profile_id, metadata=metadata, role=role, name=name)
|
||||
|
||||
# Create second node with target cluster
|
||||
name = data_utils.rand_name('node')
|
||||
node_id2 = utils.create_a_node(
|
||||
self, self.profile_id, cluster_id=self.cluster_id,
|
||||
metadata=metadata, role=role, name=name)
|
||||
# Verify creation result
|
||||
node1 = utils.get_a_node(self, node_id1)
|
||||
self.assertIsNotNone(node1)
|
||||
self.assertEqual(name, node1['name'])
|
||||
self.assertEqual(metadata, node1['metadata'])
|
||||
self.assertEqual(role, node1['role'])
|
||||
self.assertEqual('', node1['cluster_id'])
|
||||
self.assertNotIn('details', node1)
|
||||
|
||||
# Verify creation result
|
||||
node2 = utils.get_a_node(self, node_id2)
|
||||
self.assertIsNotNone(node2)
|
||||
self.assertEqual(self.cluster_id, node2['cluster_id'])
|
||||
cluster = utils.get_a_cluster(self, self.cluster_id)
|
||||
self.assertIn(node_id2, cluster['nodes'])
|
||||
# Get node with detail
|
||||
node1 = utils.get_a_node(self, node_id1, show_details=True)
|
||||
self.assertIn('details', node1)
|
||||
self.assertIsNotNone(node1['details'])
|
||||
|
||||
# List nodes
|
||||
nodes = utils.list_nodes(self)
|
||||
self.assertIsNotNone(nodes)
|
||||
self.assertEqual(2, len(nodes))
|
||||
node_ids = [n['id'] for n in nodes]
|
||||
self.assertIn(node_id1, node_ids)
|
||||
self.assertIn(node_id2, node_ids)
|
||||
# Create second node with target cluster
|
||||
name = data_utils.rand_name('node')
|
||||
node_id2 = utils.create_a_node(
|
||||
self, self.profile_id, cluster_id=self.cluster_id,
|
||||
metadata=metadata, role=role, name=name)
|
||||
|
||||
# Delete nodes
|
||||
utils.delete_a_node(self, node_id1)
|
||||
utils.delete_a_node(self, node_id2)
|
||||
# Verify creation result
|
||||
node2 = utils.get_a_node(self, node_id2)
|
||||
self.assertIsNotNone(node2)
|
||||
self.assertEqual(self.cluster_id, node2['cluster_id'])
|
||||
cluster = utils.get_a_cluster(self, self.cluster_id)
|
||||
self.assertIn(node_id2, cluster['nodes'])
|
||||
|
||||
# List nodes
|
||||
nodes = utils.list_nodes(self)
|
||||
self.assertIsNotNone(nodes)
|
||||
self.assertEqual(2, len(nodes))
|
||||
node_ids = [n['id'] for n in nodes]
|
||||
self.assertIn(node_id1, node_ids)
|
||||
self.assertIn(node_id2, node_ids)
|
||||
finally:
|
||||
# Delete nodes
|
||||
if node_id1:
|
||||
utils.delete_a_node(self, node_id1)
|
||||
if node_id2:
|
||||
utils.delete_a_node(self, node_id2)
|
||||
|
||||
|
||||
class TestNodeUpdate(base.BaseSenlinFunctionalTest):
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
import time
|
||||
|
@ -26,10 +27,12 @@ class TestHealthPolicy(base.BaseSenlinIntegrationTest):
|
|||
def setUp(self):
|
||||
super(TestHealthPolicy, self).setUp()
|
||||
|
||||
spec = utils.create_spec_from_config()
|
||||
spec['properties']['networks'][0]['network'] = 'private-hp'
|
||||
utils.prepare_and_cleanup_for_nova_server(self, "192.168.199.0/24",
|
||||
spec)
|
||||
self.spec = utils.create_spec_from_config(
|
||||
network_name=data_utils.rand_name('tempest-created-network')
|
||||
)
|
||||
utils.prepare_and_cleanup_for_nova_server(
|
||||
self, '192.168.198.0/24', spec=self.spec
|
||||
)
|
||||
self.profile_id = utils.create_a_profile(self)
|
||||
self.addCleanup(utils.delete_a_profile, self, self.profile_id)
|
||||
self.cluster_id = utils.create_a_cluster(self, self.profile_id,
|
||||
|
|
|
@ -34,19 +34,24 @@ class TestHeatStackCluster(base.BaseSenlinIntegrationNonAdminTest):
|
|||
max_size = 3
|
||||
metadata = {'k1': 'v1'}
|
||||
timeout = 300
|
||||
cluster_id = utils.create_a_cluster(
|
||||
self, self.profile_id, desired_capacity, min_size, max_size,
|
||||
timeout, metadata)
|
||||
|
||||
# Verify creation result
|
||||
cluster = utils.get_a_cluster(self, cluster_id)
|
||||
self.assertIsNotNone(cluster)
|
||||
self.assertEqual(desired_capacity, cluster['desired_capacity'])
|
||||
self.assertEqual(desired_capacity, len(cluster['nodes']))
|
||||
for nid in cluster['nodes']:
|
||||
node = utils.get_a_node(self, nid)
|
||||
self.assertEqual('ACTIVE', node['status'])
|
||||
self.assertEqual(cluster_id, node['cluster_id'])
|
||||
self.cluster_id = None
|
||||
|
||||
# Delete cluster
|
||||
utils.delete_a_cluster(self, cluster_id)
|
||||
try:
|
||||
self.cluster_id = utils.create_a_cluster(
|
||||
self, self.profile_id, desired_capacity, min_size, max_size,
|
||||
timeout, metadata)
|
||||
|
||||
# Verify creation result
|
||||
cluster = utils.get_a_cluster(self, self.cluster_id)
|
||||
self.assertIsNotNone(cluster)
|
||||
self.assertEqual(desired_capacity, cluster['desired_capacity'])
|
||||
self.assertEqual(desired_capacity, len(cluster['nodes']))
|
||||
for nid in cluster['nodes']:
|
||||
node = utils.get_a_node(self, nid)
|
||||
self.assertEqual('ACTIVE', node['status'])
|
||||
self.assertEqual(self.cluster_id, node['cluster_id'])
|
||||
finally:
|
||||
# Delete cluster
|
||||
if self.cluster_id:
|
||||
utils.delete_a_cluster(self, self.cluster_id)
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
import six
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
from senlin_tempest_plugin.common import constants
|
||||
|
@ -23,8 +24,12 @@ class TestNovaServerCluster(base.BaseSenlinIntegrationNonAdminTest):
|
|||
|
||||
def setUp(self):
|
||||
super(TestNovaServerCluster, self).setUp()
|
||||
self.spec = utils.create_spec_from_config()
|
||||
utils.prepare_and_cleanup_for_nova_server(self, "192.168.199.0/24")
|
||||
self.spec = utils.create_spec_from_config(
|
||||
network_name=data_utils.rand_name('tempest-created-network')
|
||||
)
|
||||
utils.prepare_and_cleanup_for_nova_server(
|
||||
self, '192.168.199.0/24', spec=self.spec
|
||||
)
|
||||
self.profile_id = utils.create_a_profile(self, self.spec)
|
||||
self.addCleanup(utils.delete_a_profile, self, self.profile_id)
|
||||
|
||||
|
@ -37,34 +42,39 @@ class TestNovaServerCluster(base.BaseSenlinIntegrationNonAdminTest):
|
|||
max_size = 3
|
||||
metadata = {'k1': 'v1'}
|
||||
timeout = 600
|
||||
cluster_id = utils.create_a_cluster(
|
||||
self, self.profile_id, desired_capacity, min_size, max_size,
|
||||
timeout, metadata)
|
||||
|
||||
# Verify creation result
|
||||
cluster = utils.get_a_cluster(self, cluster_id)
|
||||
self.assertIsNotNone(cluster)
|
||||
self.assertEqual(desired_capacity, cluster['desired_capacity'])
|
||||
self.assertEqual(desired_capacity, len(cluster['nodes']))
|
||||
for nid in cluster['nodes']:
|
||||
node = utils.get_a_node(self, nid, show_details=True)
|
||||
self.assertEqual('ACTIVE', node['status'])
|
||||
self.assertEqual(cluster_id, node['cluster_id'])
|
||||
self.assertIsNotNone(node['details'])
|
||||
self.assertEqual('ACTIVE', node['details']['status'])
|
||||
self.assertEqual(self.spec['properties']['flavor'],
|
||||
node['details']['flavor'])
|
||||
self.assertEqual(self.spec['properties']['name'],
|
||||
node['details']['name'])
|
||||
metadata = {
|
||||
'cluster_id': cluster['id'],
|
||||
'cluster_node_id': node['id'],
|
||||
'cluster_node_index': six.text_type(node['index'])
|
||||
}
|
||||
self.assertEqual(metadata, node['details']['metadata'])
|
||||
self.cluster_id = None
|
||||
|
||||
# Delete cluster
|
||||
utils.delete_a_cluster(self, cluster_id)
|
||||
try:
|
||||
self.cluster_id = utils.create_a_cluster(
|
||||
self, self.profile_id, desired_capacity, min_size, max_size,
|
||||
timeout, metadata)
|
||||
|
||||
# Verify creation result
|
||||
cluster = utils.get_a_cluster(self, self.cluster_id)
|
||||
self.assertIsNotNone(cluster)
|
||||
self.assertEqual(desired_capacity, cluster['desired_capacity'])
|
||||
self.assertEqual(desired_capacity, len(cluster['nodes']))
|
||||
for nid in cluster['nodes']:
|
||||
node = utils.get_a_node(self, nid, show_details=True)
|
||||
self.assertEqual('ACTIVE', node['status'])
|
||||
self.assertEqual(self.cluster_id, node['cluster_id'])
|
||||
self.assertIsNotNone(node['details'])
|
||||
self.assertEqual('ACTIVE', node['details']['status'])
|
||||
self.assertEqual(self.spec['properties']['flavor'],
|
||||
node['details']['flavor'])
|
||||
self.assertEqual(self.spec['properties']['name'],
|
||||
node['details']['name'])
|
||||
metadata = {
|
||||
'cluster_id': cluster['id'],
|
||||
'cluster_node_id': node['id'],
|
||||
'cluster_node_index': six.text_type(node['index'])
|
||||
}
|
||||
self.assertEqual(metadata, node['details']['metadata'])
|
||||
finally:
|
||||
# Delete cluster
|
||||
if self.cluster_id:
|
||||
utils.delete_a_cluster(self, self.cluster_id)
|
||||
|
||||
@decorators.attr(type=['integration'])
|
||||
@decorators.idempotent_id('9ac7ed9d-7338-45fb-b749-f67ddeb6caa2')
|
||||
|
|
Loading…
Reference in New Issue