Port cleanup on failed VM instance launch

Currently if a VM instance launch fails, stale VM ports are left behind

Change-Id: I12fcd8263af81b81f4512a95da6e42b08e2e41ec
Closes-Bug: 1423453
This commit is contained in:
Saksham Varma 2015-07-15 18:07:31 -07:00
parent 61520479bb
commit 32d55a36f1
2 changed files with 150 additions and 2 deletions

View File

@ -4449,3 +4449,129 @@ class ConsoleManagerTests(helpers.TestCase):
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@helpers.update_settings(
OPENSTACK_NEUTRON_NETWORK={'profile_support': 'cisco'})
@helpers.create_stubs({api.glance: ('image_list_detailed',),
api.neutron: ('network_list',
'profile_list',
'port_create',
'port_delete'),
api.nova: ('extension_supported',
'flavor_list',
'keypair_list',
'availability_zone_list',
'server_create',),
api.network: ('security_group_list',),
cinder: ('volume_list',
'volume_snapshot_list',),
quotas: ('tenant_quota_usages',)})
def test_port_cleanup_called_on_failed_vm_launch(self):
flavor = self.flavors.first()
image = self.images.first()
keypair = self.keypairs.first()
server = self.servers.first()
sec_group = self.security_groups.first()
avail_zone = self.availability_zones.first()
customization_script = 'user data'
quota_usages = self.quota_usages.first()
api.nova.extension_supported('BlockDeviceMappingV2Boot',
IsA(http.HttpRequest)) \
.AndReturn(True)
volumes = [v for v in self.volumes.list() if (v.status == AVAILABLE
and v.bootable ==
'true')]
cinder.volume_list(IsA(http.HttpRequest),
search_opts=VOLUME_SEARCH_OPTS) \
.AndReturn(volumes)
volumes = [v for v in self.volumes.list() if (v.status == AVAILABLE)]
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SNAPSHOT_SEARCH_OPTS) \
.AndReturn(volumes)
api.nova.flavor_list(IgnoreArg()).AndReturn(self.flavors.list())
api.nova.keypair_list(IgnoreArg()).AndReturn(self.keypairs.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
api.nova.availability_zone_list(IsA(http.HttpRequest)) \
.AndReturn(self.availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False) \
.AndReturn(self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True) \
.AndReturn(self.networks.list()[1:])
policy_profiles = self.policy_profiles.list()
policy_profile_id = self.policy_profiles.first().id
port = self.ports.first()
api.neutron.profile_list(
IsA(http.HttpRequest), 'policy').AndReturn(policy_profiles)
api.neutron.port_create(
IsA(http.HttpRequest),
self.networks.first().id,
policy_profile_id=policy_profile_id).AndReturn(port)
nics = [{"port-id": port.id}]
api.nova.extension_supported('DiskConfig',
IsA(http.HttpRequest)) \
.AndReturn(True)
api.nova.extension_supported('ConfigDrive',
IsA(http.HttpRequest)).AndReturn(True)
api.nova.server_create(IsA(http.HttpRequest),
server.name,
image.id,
flavor.id,
keypair.name,
customization_script,
[sec_group.name],
block_device_mapping=None,
block_device_mapping_v2=None,
nics=nics,
availability_zone=avail_zone.zoneName,
instance_count=IsA(int),
admin_pass='password',
disk_config='AUTO',
config_drive=False) \
.AndRaise(self.exceptions.neutron)
api.neutron.port_delete(IsA(http.HttpRequest), port.id)
quotas.tenant_quota_usages(IsA(http.HttpRequest)) \
.AndReturn(quota_usages)
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
form_data = {'flavor': flavor.id,
'source_type': 'image_id',
'source_id': image.id,
'volume_size': '1',
'image_id': image.id,
'availability_zone': avail_zone.zoneName,
'keypair': keypair.name,
'name': server.name,
'script_source': 'raw',
'script_data': customization_script,
'project_id': self.tenants.first().id,
'user_id': self.user.id,
'groups': sec_group.name,
'volume_type': '',
'network': self.networks.first().id,
'count': 1,
'admin_pass': 'password',
'confirm_admin_pass': 'password',
'disk_config': 'AUTO',
'config_drive': False,
'profile': self.policy_profiles.first().id}
url = reverse('horizon:project:instances:launch')
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)

View File

@ -912,7 +912,9 @@ class LaunchInstance(workflows.Workflow):
avail_zone = context.get('availability_zone', None)
if api.neutron.is_port_profiles_supported():
port_profiles_supported = api.neutron.is_port_profiles_supported()
if port_profiles_supported:
nics = self.set_network_port_profiles(request,
context['network_id'],
context['profile_id'])
@ -935,8 +937,16 @@ class LaunchInstance(workflows.Workflow):
config_drive=context.get('config_drive'))
return True
except Exception:
if port_profiles_supported:
ports_failing_deletes = _cleanup_ports_on_failed_vm_launch(
request, nics)
if ports_failing_deletes:
ports_str = ', '.join(ports_failing_deletes)
msg = (_('Port cleanup failed for these port-ids (%s).')
% ports_str)
exceptions.handle(request, msg)
exceptions.handle(request)
return False
return False
def set_network_port_profiles(self, request, net_ids, profile_id):
# Create port with Network ID and Port Profile
@ -974,3 +984,15 @@ class LaunchInstance(workflows.Workflow):
'profile_id': profile_id})
return nics
def _cleanup_ports_on_failed_vm_launch(request, nics):
ports_failing_deletes = []
LOG.debug('Cleaning up stale VM ports.')
for nic in nics:
try:
LOG.debug('Deleting port with id: %s' % nic['port-id'])
api.neutron.port_delete(request, nic['port-id'])
except Exception:
ports_failing_deletes.append(nic['port-id'])
return ports_failing_deletes