fix test_compute_with_volumes

The test_integration_1 test was waiting on volume being in an
'in-use' state before proceeding. However the 'in-use' state
doesn't actually mean that the volume is attached, as 'attaching'
gets mapped to 'in-use' in nova.

This makes it possible for the test to proceed when the volume is
in an attaching state, and not actually ever attached. Which means
you can get to the detach phase on an non attached volume, and
thus explode.

Use the attachment status to filter out when then volume is not
actually ready.

In the process, make the test name something that's actually
descriptive for what it does.

Related-Bug: #1302774

Change-Id: Iaeb6a42a39b4adf8cb4bdff64efe00bf14821725
This commit is contained in:
Sean Dague 2014-04-22 20:00:46 -04:00 committed by Sean Dague
parent 7919447107
commit 3b563355ec
1 changed files with 23 additions and 8 deletions

View File

@ -218,10 +218,8 @@ class InstanceRunTest(boto_test.BotoTestCase):
else:
self.assertNotEqual(instance.state, "running")
# NOTE(afazekas): doctored test case,
# with normal validation it would fail
@test.attr(type='smoke')
def test_integration_1(self):
def test_compute_with_volumes(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
sec_group_name = data_utils.rand_name("securitygroup-")
@ -249,14 +247,20 @@ class InstanceRunTest(boto_test.BotoTestCase):
instance_type=self.instance_type,
key_name=self.keypair_name,
security_groups=(sec_group_name,))
LOG.debug("Instance booted - state: %s",
reservation.instances[0].state)
self.addResourceCleanUp(self.destroy_reservation,
reservation)
volume = self.ec2_client.create_volume(1, self.zone)
LOG.debug("Volume created - status: %s", volume.status)
self.addResourceCleanUp(self.destroy_volume_wait, volume)
instance = reservation.instances[0]
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
LOG.debug("Instance now running - state: %s", instance.state)
address = self.ec2_client.allocate_address()
rcuk_a = self.addResourceCleanUp(address.delete)
@ -284,10 +288,21 @@ class InstanceRunTest(boto_test.BotoTestCase):
volume.attach(instance.id, "/dev/vdh")
def _volume_state():
"""Return volume state realizing that 'in-use' is overloaded."""
volume.update(validate=True)
return volume.status
status = volume.status
attached = volume.attach_data.status
LOG.debug("Volume %s is in status: %s, attach_status: %s",
volume.id, status, attached)
# Nova reports 'in-use' on 'attaching' volumes because we
# have a single volume status, and EC2 has 2. Ensure that
# if we aren't attached yet we return something other than
# 'in-use'
if status == 'in-use' and attached != 'attached':
return 'attaching'
else:
return status
self.assertVolumeStatusWait(_volume_state, "in-use")
wait.re_search_wait(_volume_state, "in-use")
# NOTE(afazekas): Different Hypervisor backends names
@ -296,6 +311,7 @@ class InstanceRunTest(boto_test.BotoTestCase):
def _part_state():
current = ssh.get_partitions().split('\n')
LOG.debug("Partition map for instance: %s", current)
if current > part_lines:
return 'INCREASE'
if current < part_lines:
@ -311,7 +327,6 @@ class InstanceRunTest(boto_test.BotoTestCase):
self.assertVolumeStatusWait(_volume_state, "available")
wait.re_search_wait(_volume_state, "available")
LOG.info("Volume %s state: %s", volume.id, volume.status)
wait.state_wait(_part_state, 'DECREASE')
@ -323,7 +338,7 @@ class InstanceRunTest(boto_test.BotoTestCase):
self.assertAddressReleasedWait(address)
self.cancelResourceCleanUp(rcuk_a)
LOG.info("state: %s", instance.state)
LOG.debug("Instance %s state: %s", instance.id, instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
# TODO(afazekas): move steps from teardown to the test case