Call placement after instance delete
Blazar called placement to delete reservation inventory on reservation resource provider *before* blazar asks nova to delete the instance. This caused inventory in use 409 conflict error in placement. This patch re-orders it to call placement after instance delete and adds a function to ensure the deletion is completed not to cause the error. Change-Id: I3b4a892a783140b4a25810aeed71c79f6207e155 Depends-On: https://review.openstack.org/#/c/633408/ Closes-Bug: #1813252
This commit is contained in:
parent
7dc9a6a7bf
commit
1855e059fc
|
@ -105,6 +105,10 @@ class NovaClientError(exceptions.BlazarException):
|
|||
msg_fmt = _("Failed to create Nova resources for the reservation")
|
||||
|
||||
|
||||
class ServerDeletionTimeout(exceptions.BlazarException):
|
||||
msg_fmt = _("Timed out waiting for server deletion")
|
||||
|
||||
|
||||
# oshost plugin related exceptions
|
||||
|
||||
class CantAddExtraCapability(exceptions.BlazarException):
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# under the License.
|
||||
|
||||
import datetime
|
||||
import retrying
|
||||
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
from oslo_config import cfg
|
||||
|
@ -37,6 +38,7 @@ LOG = logging.getLogger(__name__)
|
|||
|
||||
RESERVATION_PREFIX = 'reservation'
|
||||
FLAVOR_EXTRA_SPEC = "aggregate_instance_extra_specs:" + RESERVATION_PREFIX
|
||||
INSTANCE_DELETION_TIMEOUT = 10 * 60 * 1000 # 10 minutes
|
||||
|
||||
|
||||
class VirtualInstancePlugin(base.BasePlugin, nova.NovaClientWrapper):
|
||||
|
@ -485,25 +487,47 @@ class VirtualInstancePlugin(base.BasePlugin, nova.NovaClientWrapper):
|
|||
except nova_exceptions.NotFound:
|
||||
pass
|
||||
|
||||
hostnames = []
|
||||
allocations = db_api.host_allocation_get_all_by_values(
|
||||
reservation_id=reservation_id)
|
||||
for allocation in allocations:
|
||||
host = db_api.host_get(allocation['compute_host_id'])
|
||||
db_api.host_allocation_destroy(allocation['id'])
|
||||
try:
|
||||
self.placement_client.delete_reservation_inventory(
|
||||
host['hypervisor_hostname'], reservation_id)
|
||||
except openstack_ex.ResourceProviderNotFound:
|
||||
pass
|
||||
hostnames.append(host['hypervisor_hostname'])
|
||||
|
||||
for server in self.nova.servers.list(search_opts={
|
||||
'flavor': reservation_id,
|
||||
'all_tenants': 1}, detailed=False):
|
||||
server.delete()
|
||||
|
||||
# We need to check the deletion is complete before deleting the
|
||||
# reservation inventory. See the bug #1813252 for details.
|
||||
if not self._check_server_deletion(reservation_id):
|
||||
LOG.error('Timed out while deleting servers on reservation %s',
|
||||
reservation_id)
|
||||
raise mgr_exceptions.ServerDeletionTimeout()
|
||||
|
||||
self.cleanup_resources(instance_reservation)
|
||||
|
||||
for host_name in hostnames:
|
||||
try:
|
||||
self.placement_client.delete_reservation_inventory(
|
||||
host_name, reservation_id)
|
||||
except openstack_ex.ResourceProviderNotFound:
|
||||
pass
|
||||
self.placement_client.delete_reservation_class(reservation_id)
|
||||
|
||||
@retrying.retry(stop_max_delay=INSTANCE_DELETION_TIMEOUT,
|
||||
wait_fixed=5000, # 5 seconds interval
|
||||
retry_on_result=lambda x: x is False)
|
||||
def _check_server_deletion(self, reservation_id):
|
||||
servers = self.nova.servers.list(search_opts={
|
||||
'flavor': reservation_id, 'all_tenants': 1}, detailed=False)
|
||||
if servers:
|
||||
LOG.info('Waiting to delete servers: %s ', servers)
|
||||
return False
|
||||
return True
|
||||
|
||||
def heal_reservations(self, failed_resources, interval_begin,
|
||||
interval_end):
|
||||
"""Heal reservations which suffer from resource failures.
|
||||
|
|
|
@ -923,7 +923,10 @@ class TestVirtualInstancePlugin(tests.TestCase):
|
|||
fake_servers = [mock.MagicMock(method='delete') for i in range(5)]
|
||||
mock_nova = mock.MagicMock()
|
||||
type(plugin).nova = mock_nova
|
||||
mock_nova.servers.list.return_value = fake_servers
|
||||
# First, we return the fake servers to delete. Second, on the check in
|
||||
# _check_server_deletion(), we mock they are still in nova DB to
|
||||
# exercise retry and at last we mock they are deleted completely.
|
||||
mock_nova.servers.list.side_effect = [fake_servers, fake_servers, []]
|
||||
|
||||
mock_cleanup_resources = self.patch(plugin, 'cleanup_resources')
|
||||
|
||||
|
@ -932,9 +935,10 @@ class TestVirtualInstancePlugin(tests.TestCase):
|
|||
mock_nova.flavor_access.remove_tenant_access.assert_called_once_with(
|
||||
'reservation-id1', 'fake-project-id')
|
||||
|
||||
mock_nova.servers.list.assert_called_once_with(
|
||||
mock_nova.servers.list.assert_called_with(
|
||||
search_opts={'flavor': 'reservation-id1', 'all_tenants': 1},
|
||||
detailed=False)
|
||||
mock_nova.servers.list.call_count = 3
|
||||
for fake in fake_servers:
|
||||
fake.delete.assert_called_once()
|
||||
for i in range(2):
|
||||
|
|
|
@ -91,6 +91,7 @@ reno==2.5.0
|
|||
repoze.lru==0.7
|
||||
requests==2.18.4
|
||||
requestsexceptions==1.4.0
|
||||
retrying==1.3.3
|
||||
rfc3986==1.1.0
|
||||
Routes==2.3.1
|
||||
simplegeneric==0.8.1
|
||||
|
|
|
@ -29,6 +29,7 @@ python-keystoneclient>=3.8.0 # Apache-2.0
|
|||
pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD
|
||||
sqlalchemy-migrate>=0.11.0 # Apache-2.0
|
||||
requests>=2.18.4 # Apache-2.0
|
||||
retrying>=1.3.3,!=1.3.0 # Apache-2.0
|
||||
Routes>=2.3.1 # MIT
|
||||
six>=1.10.0 # MIT
|
||||
SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT
|
||||
|
|
Loading…
Reference in New Issue