Fix instability with volume attachment RBAC tests

This patchset fixes instability with volume attachment
RBAC tests. The tests are currently failing due to volumes
failing to reach the expected statuses due to insufficiently
robust waiters or due to sharing of class-wide resources
(volumes, servers) [0]. Lack of resource idempotency is likely
compounding waiter issues.

So this patchset resolves the issue by creating a volume/server
per test and removing the shared class-wide versions. It also
adds a `wait_for_server_volume_swap` call to
`test_update_volume_attachment` which was previously missing
even though the test does a volume swap.

Finally, the test `test_create_volume_attachment` now
more atomically tests `attach_volume` API action by
calling the API directly in the contextmanager (while
the role is overridden) and only afterward invokes the
waiter to wait for the volume status to change.

[0] e.g. http://logs.openstack.org/21/571621/1/check/patrole-multinode-admin/395b4fb/job-output.txt.gz#_2018-06-01_04_06_10_109536

Change-Id: Ib78ccc3cad7689c0c5a7daf10ec5eeb2ee7a03ab
This commit is contained in:
Felipe Monteiro 2018-06-08 21:57:38 -04:00 committed by Sergey Vilgelm
parent 12afdade52
commit 588e806772
1 changed files with 104 additions and 16 deletions

View File

@ -12,17 +12,22 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
import testtools
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from patrole_tempest_plugin import rbac_rule_validation
from patrole_tempest_plugin.tests.api.compute import rbac_base
CONF = config.CONF
LOG = logging.getLogger(__name__)
# FIXME(felipemonteiro): `@decorators.attr(type='slow')` are added to tests
@ -49,6 +54,80 @@ class ServerVolumeAttachmentRbacTest(rbac_base.BaseV2ComputeRbacTest):
cls.server = cls.create_test_server(wait_until='ACTIVE')
cls.volume = cls.create_volume()
def _recreate_volume(self):
try:
# In case detachment failed, update the DB status of the volume
# to avoid error getting thrown when deleting the volume.
self.volumes_client.reset_volume_status(
self.volume['id'], status='available',
attach_status='detached')
waiters.wait_for_volume_resource_status(
self.volumes_client, self.volume['id'], 'available')
# Next, forcibly delete the volume.
self.volumes_client.force_delete_volume(self.volume['id'])
self.volumes_client.wait_for_resource_deletion(self.volume['id'])
except lib_exc.TimeoutException:
LOG.exception('Failed to delete volume %s', self.volume['id'])
# Finally, re-create the volume.
self.__class__.volume = self.create_volume()
def _restore_volume_status(self):
# Forcibly detach any attachments still attached to the volume.
try:
attachments = self.volumes_client.show_volume(
self.volume['id'])['volume']['attachments']
if attachments:
# Tests below only ever create one attachment for the volume.
attachment = attachments[0]
self.volumes_client.force_detach_volume(
self.volume['id'], connector=None,
attachment_id=attachment['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'],
'available')
except lib_exc.TimeoutException:
# If all else fails, rebuild the volume.
self._recreate_volume()
def setUp(self):
super(ServerVolumeAttachmentRbacTest, self).setUp()
self._restore_volume_status()
def wait_for_server_volume_swap(self, server_id, old_volume_id,
new_volume_id):
"""Waits for a server to swap the old volume to a new one."""
volume_attachments = self.servers_client.list_volume_attachments(
server_id)['volumeAttachments']
attached_volume_ids = [attachment['volumeId']
for attachment in volume_attachments]
start = int(time.time())
while (old_volume_id in attached_volume_ids) \
or (new_volume_id not in attached_volume_ids):
time.sleep(self.servers_client.build_interval)
volume_attachments = self.servers_client.list_volume_attachments(
server_id)['volumeAttachments']
attached_volume_ids = [attachment['volumeId']
for attachment in volume_attachments]
if int(time.time()) - start >= self.servers_client.build_timeout:
old_vol_bdm_status = 'in BDM' \
if old_volume_id in attached_volume_ids else 'not in BDM'
new_vol_bdm_status = 'in BDM' \
if new_volume_id in attached_volume_ids else 'not in BDM'
message = ('Failed to swap old volume %(old_volume_id)s '
'(current %(old_vol_bdm_status)s) to new volume '
'%(new_volume_id)s (current %(new_vol_bdm_status)s)'
' on server %(server_id)s within the required time '
'(%(timeout)s s)' %
{'old_volume_id': old_volume_id,
'old_vol_bdm_status': old_vol_bdm_status,
'new_volume_id': new_volume_id,
'new_vol_bdm_status': new_vol_bdm_status,
'server_id': server_id,
'timeout': self.servers_client.build_timeout})
raise lib_exc.TimeoutException(message)
@rbac_rule_validation.action(
service="nova",
rules=["os_compute_api:os-volumes-attachments:index"])
@ -64,7 +143,18 @@ class ServerVolumeAttachmentRbacTest(rbac_base.BaseV2ComputeRbacTest):
@decorators.idempotent_id('21c2c3fd-fbe8-41b1-8ef8-115ec47d54c1')
def test_create_volume_attachment(self):
with self.rbac_utils.override_role(self):
self.attach_volume(self.server, self.volume)
self.servers_client.attach_volume(self.server['id'],
volumeId=self.volume['id'])
# On teardown detach the volume and wait for it to be available. This
# is so we don't error out when trying to delete the volume during
# teardown.
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client, self.volume['id'], 'available')
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
self.addCleanup(self._detach_volume, self.server, self.volume)
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'in-use')
@decorators.attr(type='slow')
@rbac_rule_validation.action(
@ -86,24 +176,22 @@ class ServerVolumeAttachmentRbacTest(rbac_base.BaseV2ComputeRbacTest):
rules=["os_compute_api:os-volumes-attachments:update"])
@decorators.idempotent_id('bd667186-eca6-4b78-ab6a-3e2fabcb971f')
def test_update_volume_attachment(self):
attachment = self.attach_volume(self.server, self.volume)
alt_volume = self.create_volume()
volume1 = self.volume
volume2 = self.create_volume()
# Attach "volume1" to server
self.attach_volume(self.server, volume1)
with self.rbac_utils.override_role(self):
# Swap volume from "volume1" to "volume2"
self.servers_client.update_attached_volume(
self.server['id'], attachment['id'], volumeId=alt_volume['id'])
self.server['id'], volume1['id'], volumeId=volume2['id'])
self.addCleanup(self._detach_volume, self.server, volume2)
waiters.wait_for_volume_resource_status(self.volumes_client,
alt_volume['id'], 'in-use')
# On teardown detach the volume and wait for it to be available. This
# is so we don't error out when trying to delete the volume during
# teardown.
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client, alt_volume['id'], 'available')
# Ignore 404s on detach in case the server is deleted or the volume
# is already detached.
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.servers_client.detach_volume,
self.server['id'], alt_volume['id'])
volume1['id'], 'available')
waiters.wait_for_volume_resource_status(self.volumes_client,
volume2['id'], 'in-use')
self.wait_for_server_volume_swap(self.server['id'], volume1['id'],
volume2['id'])
@decorators.attr(type='slow')
@rbac_rule_validation.action(