Merge "Properly clean up BDMs when _provision_instances fails" into stable/mitaka

This commit is contained in:
Jenkins 2016-07-02 02:11:44 +00:00 committed by Gerrit Code Review
commit 98b38df57b
3 changed files with 50 additions and 0 deletions

View File

@ -1952,6 +1952,9 @@ def instance_destroy(context, instance_uuid, constraint=None):
model_query(context, models.InstanceGroupMember).\
filter_by(instance_id=instance_uuid).\
soft_delete()
model_query(context, models.BlockDeviceMapping).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
# NOTE(snikitin): We can't use model_query here, because there is no
# column 'deleted' in 'tags' table.
context.session.query(models.Tag).filter_by(

View File

@ -18,6 +18,9 @@ import time
from oslo_config import cfg
from nova import context
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
@ -265,6 +268,33 @@ class ServerGroupTestV2(ServerGroupTestBase):
self.assertIn(server['id'], members)
self.assertEqual(host, server['OS-EXT-SRV-ATTR:host'])
def test_boot_servers_with_affinity_overquota(self):
# Tests that we check server group member quotas and cleanup created
# resources when we fail with OverQuota.
self.flags(quota_server_group_members=1)
# make sure we start with 0 servers
servers = self.api.get_servers(detail=False)
self.assertEqual(0, len(servers))
created_group = self.api.post_server_groups(self.affinity)
ex = self.assertRaises(client.OpenStackApiException,
self._boot_servers_to_group,
created_group)
self.assertEqual(403, ex.response.status_code)
# _boot_servers_to_group creates 2 instances in the group in order, not
# multiple servers in a single request. Since our quota is 1, the first
# server create would pass, the second should fail, and we should be
# left with 1 server and it's 1 block device mapping.
servers = self.api.get_servers(detail=False)
self.assertEqual(1, len(servers))
ctxt = context.get_admin_context()
servers = db.instance_get_all(ctxt)
self.assertEqual(1, len(servers))
ctxt_mgr = db_api.get_context_manager(ctxt)
with ctxt_mgr.reader.using(ctxt):
bdms = db_api._block_device_mapping_get_query(ctxt).all()
self.assertEqual(1, len(bdms))
self.assertEqual(servers[0]['uuid'], bdms[0]['instance_uuid'])
def test_boot_servers_with_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.affinity)
# Using big enough flavor to use up the resources on the host

View File

@ -2774,6 +2774,23 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
self.assertEqual('baz', system_meta['original_image_ref'])
def test_delete_block_device_mapping_on_instance_destroy(self):
# Makes sure that the block device mapping is deleted when the
# related instance is deleted.
ctxt = context.get_admin_context()
instance = db.instance_create(ctxt, dict(display_name='bdm-test'))
bdm = {
'volume_id': uuidutils.generate_uuid(),
'device_name': '/dev/vdb',
'instance_uuid': instance['uuid'],
}
bdm = db.block_device_mapping_create(ctxt, bdm, legacy=False)
db.instance_destroy(ctxt, instance['uuid'])
# make sure the bdm is deleted as well
bdms = db.block_device_mapping_get_all_by_instance(
ctxt, instance['uuid'])
self.assertEqual([], bdms)
def test_delete_instance_metadata_on_instance_destroy(self):
ctxt = context.get_admin_context()
# Create an instance with some metadata