Generic vif pool driver extension to precreate reusable ports

This patch enhances the generic vif pool driver to also pre-create
ports in bulk request, so that containers can make use of them
when being boot -- even if not that many containers have been
created and deleted before.

This patch also removes the port deletion/recycle from the pod deletion
pipeline by having a dedicated thread performing periodic recycling
actions.

Partially Implements blueprint ports-pool

Change-Id: I7a3165b8a43e314c360b04cb0cefc69e0e5e768f
This commit is contained in:
Luis Tomas Bolivar 2017-02-22 12:00:48 +01:00
parent e01b4d559b
commit b09d7ce195
5 changed files with 412 additions and 76 deletions

View File

@ -219,6 +219,31 @@ class PodVIFDriver(DriverBase):
"""
raise NotImplementedError()
def request_vifs(self, pod, project_id, subnets, security_groups,
num_ports):
"""Creates Neutron ports for pods and returns them as VIF objects list.
It follows the same pattern as request_vif but creating the specified
amount of ports and vif objects at num_ports parameter.
The port creation request is generic as it not going to be used by the
pod -- at least not all of them. Additionally, in order to save Neutron
calls, the ports creation is handled in a bulk request.
:param pod: dict containing Kubernetes Pod object
:param project_id: OpenStack project ID
:param subnets: dict containing subnet mapping as returned by
`PodSubnetsDriver.get_subnets`. If multiple entries
are present in that mapping, it is guaranteed that
all entries have the same value of `Network.id`.
:param security_groups: list containing security groups' IDs as
returned by
`PodSecurityGroupsDriver.get_security_groups`
:param num_ports: number of ports to be created
:return: VIF objects list
"""
raise NotImplementedError()
@abc.abstractmethod
def release_vif(self, pod, vif, project_id=None, security_groups=None):
"""Unlinks Neutron port corresponding to VIF object from pod.
@ -235,6 +260,22 @@ class PodVIFDriver(DriverBase):
"""
raise NotImplementedError()
def release_vifs(self, pods, vifs, project_id=None, security_groups=None):
"""Unlinks Neutron ports corresponding to VIF objects.
It follows the same pattern as release_vif but releasing num_ports
ports. Ideally it will also make use of bulk request to save Neutron
calls in the release/recycle process.
:param pods: list of dict containing Kubernetes Pod objects
:param vifs: list of VIF objects as returned by
`PodVIFDriver.request_vif`
:param project_id: (optional) OpenStack project ID
:param security_groups: (optional) list containing security groups'
IDs as returned by
`PodSecurityGroupsDriver.get_security_groups`
"""
raise NotImplementedError()
@abc.abstractmethod
def activate_vif(self, pod, vif):
"""Updates VIF to become active.
@ -257,7 +298,6 @@ class PodVIFDriver(DriverBase):
raise NotImplementedError()
@six.add_metaclass(abc.ABCMeta)
class LBaaSDriver(DriverBase):
"""Manages Neutron/Octavia load balancer to support Kubernetes Services."""

View File

@ -38,6 +38,37 @@ class NeutronPodVIFDriver(base.PodVIFDriver):
return ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
def request_vifs(self, pod, project_id, subnets, security_groups,
num_ports):
neutron = clients.get_neutron_client()
rq = self._get_port_request(pod, project_id, subnets, security_groups,
unbound=True)
bulk_port_rq = {'ports': [rq for _ in range(num_ports)]}
try:
ports = neutron.create_port(bulk_port_rq).get('ports')
except n_exc.NeutronClientException as ex:
LOG.error("Error creating bulk ports: %s", bulk_port_rq)
raise ex
vif_plugin = self._get_vif_plugin(ports[0])
# NOTE(ltomasbo): Due to the bug (1696051) on neutron bulk port
# creation request returning the port objects without binding
# information, an additional (non-bulk) port creation is performed to
# get the right vif binding information
if vif_plugin == 'unbound':
single_port = neutron.create_port(rq).get('port')
vif_plugin = self._get_vif_plugin(single_port)
ports.append(single_port)
vifs = []
for port in ports:
vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
vifs.append(vif)
return vifs
def release_vif(self, pod, vif):
neutron = clients.get_neutron_client()
@ -59,7 +90,8 @@ class NeutronPodVIFDriver(base.PodVIFDriver):
vif.active = True
def _get_port_request(self, pod, project_id, subnets, security_groups):
def _get_port_request(self, pod, project_id, subnets, security_groups,
unbound=False):
port_req_body = {'project_id': project_id,
'name': self._get_port_name(pod),
'network_id': self._get_network_id(subnets),
@ -69,6 +101,13 @@ class NeutronPodVIFDriver(base.PodVIFDriver):
'admin_state_up': True,
'binding:host_id': self._get_host_id(pod)}
# if unbound argument is set to true, it means the port requested
# should not be bound and not associated to the pod. Thus the port dict
# is filled with a generic name (available-port) and without device_id
if unbound:
port_req_body['name'] = 'available-port'
port_req_body['device_id'] = ''
if security_groups:
port_req_body['security_groups'] = security_groups

View File

@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
import eventlet
import time
from kuryr.lib._i18n import _
from neutronclient.common import exceptions as n_exc
@ -30,6 +32,15 @@ vif_pool_driver_opts = [
oslo_cfg.IntOpt('ports_pool_max',
help=_("Set a maximun amount of ports per pool. 0 to disable"),
default=0),
oslo_cfg.IntOpt('ports_pool_min',
help=_("Set a target minimum size of the pool of ports"),
default=5),
oslo_cfg.IntOpt('ports_pool_batch',
help=_("Number of ports to be created in a bulk request"),
default=10),
oslo_cfg.IntOpt('ports_pool_update_frequency',
help=_("Minimun interval (in seconds) between pool updates"),
default=20),
]
oslo_cfg.CONF.register_opts(vif_pool_driver_opts, "vif_pool")
@ -62,16 +73,31 @@ class GenericVIFPool(base.VIFPoolDriver):
are the 'port_id' and the values are the vif objects.
_recyclable_ports is a dictionary with the Neutron ports to be
recycled. The keys are the 'port_id' and their values are the 'pool_key'.
_last_update is a dictionary with the timestamp of the last population
action for each pool. The keys are the pool_keys and the values are the
timestamps.
The following driver configuration options exist:
- ports_pool_max: it specifies how many ports can be kept at each pool.
If the pool already reached the specified size, the ports to be recycled
are deleted instead. If set to 0, the limit is disabled and ports are
always recycled.
- ports_pool_min: minimum desired number of ready to use ports at populated
pools. Should be smaller than ports_pool_max (if enabled).
- ports_pool_batch: target number of ports to be created in bulk requests
when populating pools.
- ports_pool_update_frequency: interval in seconds between ports pool
updates, both for populating pools as well as for recycling ports.
"""
_available_ports_pools = collections.defaultdict(collections.deque)
_existing_vifs = collections.defaultdict(collections.defaultdict)
_recyclable_ports = collections.defaultdict(collections.defaultdict)
_last_update = collections.defaultdict(collections.defaultdict)
def __init__(self):
# Note(ltomasbo) Execute the port recycling periodic actions in a
# background thread
eventlet.spawn(self._return_ports_to_pool)
def set_vif_driver(self, driver):
self._drv_vif = driver
@ -80,22 +106,43 @@ class GenericVIFPool(base.VIFPoolDriver):
try:
host_addr = pod['status']['hostIP']
except KeyError:
LOG.error("Pod has not been scheduled yet.")
LOG.warning("Pod has not been scheduled yet.")
raise
pool_key = (host_addr, project_id, tuple(security_groups))
try:
return self._get_port_from_pool(pool_key, pod)
except exceptions.ResourceNotReady:
LOG.error("Ports pool does not have available ports!")
# TODO(ltomasbo): This is to be removed in the next patch when the
# pre-creation of several ports in a bulk request is included.
vif = self._drv_vif.request_vif(pod, project_id, subnets,
security_groups)
self._existing_vifs[vif.id] = vif
return vif
return self._get_port_from_pool(pool_key, pod, subnets)
except exceptions.ResourceNotReady as ex:
LOG.warning("Ports pool does not have available ports!")
eventlet.spawn(self._populate_pool, pool_key, pod, subnets)
raise ex
def _get_port_from_pool(self, pool_key, pod):
def _populate_pool(self, pool_key, pod, subnets):
# REVISIT(ltomasbo): Drop the subnets parameter and get the information
# from the pool_key, which will be required when multi-network is
# supported
now = time.time()
if (now - oslo_cfg.CONF.vif_pool.ports_pool_update_frequency <
self._last_update.get(pool_key, 0)):
LOG.info("Not enough time since the last pool update")
return
self._last_update[pool_key] = now
pool_size = self._get_pool_size(pool_key)
if pool_size < oslo_cfg.CONF.vif_pool.ports_pool_min:
num_ports = max(oslo_cfg.CONF.vif_pool.ports_pool_batch,
oslo_cfg.CONF.vif_pool.ports_pool_min - pool_size)
vifs = self._drv_vif.request_vifs(pod=pod,
project_id=pool_key[1],
subnets=subnets,
security_groups=list(pool_key[2]),
num_ports=num_ports)
for vif in vifs:
self._existing_vifs[vif.id] = vif
self._available_ports_pools.setdefault(pool_key,
[]).append(vif.id)
def _get_port_from_pool(self, pool_key, pod, subnets):
try:
port_id = self._available_ports_pools[pool_key].popleft()
except IndexError:
@ -108,6 +155,10 @@ class GenericVIFPool(base.VIFPoolDriver):
'device_id': pod['metadata']['uid']
}
})
# check if the pool needs to be populated
if (self._get_pool_size(pool_key) <
oslo_cfg.CONF.vif_pool.ports_pool_min):
eventlet.spawn(self._populate_pool, pool_key, pod, subnets)
return self._existing_vifs[port_id]
def release_vif(self, pod, vif, project_id, security_groups):
@ -115,8 +166,6 @@ class GenericVIFPool(base.VIFPoolDriver):
pool_key = (host_addr, project_id, tuple(security_groups))
self._recyclable_ports[vif.id] = pool_key
# TODO(ltomasbo) Make the port update in another thread
self._return_ports_to_pool()
def activate_vif(self, pod, vif):
self._drv_vif.activate_vif(pod, vif)
@ -136,32 +185,34 @@ class GenericVIFPool(base.VIFPoolDriver):
deleted if the maximun has been already reached.
"""
neutron = clients.get_neutron_client()
for port_id, pool_key in self._recyclable_ports.copy().items():
if (not oslo_cfg.CONF.vif_pool.ports_pool_max or
self._get_pool_size(pool_key) <
oslo_cfg.CONF.vif_pool.ports_pool_max):
try:
neutron.update_port(port_id,
{
"port": {
'name': 'available-port',
'device_id': '',
'security_groups': list(pool_key[2])
}
})
except n_exc.NeutronClientException:
LOG.warning("Error preparing port %s to be reused, put"
" back on the cleanable pool.", port_id)
continue
self._available_ports_pools.setdefault(pool_key, []).append(
port_id)
else:
try:
del self._existing_vifs[port_id]
neutron.delete_port(port_id)
except n_exc.PortNotFoundClient:
LOG.debug('Unable to release port %s as it no longer '
'exists.', port_id)
except KeyError:
LOG.debug('Port %s is not in the ports list.', port_id)
del self._recyclable_ports[port_id]
while True:
for port_id, pool_key in self._recyclable_ports.copy().items():
if (not oslo_cfg.CONF.vif_pool.ports_pool_max or
self._get_pool_size(pool_key) <
oslo_cfg.CONF.vif_pool.ports_pool_max):
try:
neutron.update_port(port_id,
{
"port": {
'name': 'available-port',
'device_id': '',
'security_groups': list(pool_key[2])
}
})
except n_exc.NeutronClientException:
LOG.warning("Error preparing port %s to be reused, put"
" back on the cleanable pool.", port_id)
continue
self._available_ports_pools.setdefault(
pool_key, []).append(port_id)
else:
try:
del self._existing_vifs[port_id]
neutron.delete_port(port_id)
except n_exc.PortNotFoundClient:
LOG.debug('Unable to release port %s as it no longer '
'exists.', port_id)
except KeyError:
LOG.debug('Port %s is not in the ports list.', port_id)
del self._recyclable_ports[port_id]
eventlet.sleep(oslo_cfg.CONF.vif_pool.ports_pool_update_frequency)

View File

@ -55,6 +55,67 @@ class NeutronPodVIFDriver(test_base.TestCase):
m_driver._get_vif_plugin.assert_called_once_with(port)
m_to_vif.assert_called_once_with(vif_plugin, port, subnets)
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif')
def test_request_vifs(self, m_to_vif):
cls = neutron_vif.NeutronPodVIFDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pod = mock.sentinel.pod
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = mock.sentinel.security_groups
num_ports = 2
port_request = mock.sentinel.port_request
m_driver._get_port_request.return_value = port_request
port = mock.sentinel.port
vif_plugin = mock.sentinel.vif_plugin
vif = mock.sentinel.vif
bulk_rq = {'ports': [port_request for _ in range(num_ports)]}
neutron.create_port.return_value = {'ports': [port, port]}
m_driver._get_vif_plugin.return_value = vif_plugin
m_to_vif.return_value = vif
self.assertEqual([vif, vif], cls.request_vifs(
m_driver, pod, project_id, subnets, security_groups, num_ports))
m_driver._get_port_request.assert_called_once_with(
pod, project_id, subnets, security_groups, unbound=True)
neutron.create_port.assert_called_once_with(bulk_rq)
m_driver._get_vif_plugin.assert_called_once_with(port)
calls = [mock.call(vif_plugin, port, subnets),
mock.call(vif_plugin, port, subnets)]
m_to_vif.assert_has_calls(calls)
@mock.patch('kuryr_kubernetes.os_vif_util.neutron_to_osvif_vif')
def test_request_vifs_exception(self, m_to_vif):
cls = neutron_vif.NeutronPodVIFDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pod = mock.sentinel.pod
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = mock.sentinel.security_groups
num_ports = 2
port_request = mock.sentinel.port_request
m_driver._get_port_request.return_value = port_request
bulk_rq = {'ports': [port_request for _ in range(num_ports)]}
neutron.create_port.side_effect = n_exc.NeutronClientException
self.assertRaises(n_exc.NeutronClientException, cls.request_vifs,
m_driver, pod, project_id, subnets, security_groups, num_ports)
m_driver._get_port_request.assert_called_once_with(
pod, project_id, subnets, security_groups, unbound=True)
neutron.create_port.assert_called_once_with(bulk_rq)
m_driver._get_vif_plugin.assert_not_called()
m_to_vif.assert_not_called()
def test_release_vif(self):
cls = neutron_vif.NeutronPodVIFDriver
m_driver = mock.Mock(spec=cls)
@ -127,7 +188,8 @@ class NeutronPodVIFDriver(test_base.TestCase):
self.assertRaises(k_exc.ResourceNotReady, cls.activate_vif,
m_driver, pod, vif)
def _test_get_port_request(self, m_to_fips, security_groups):
def _test_get_port_request(self, m_to_fips, security_groups,
unbound=False):
cls = neutron_vif.NeutronPodVIFDriver
m_driver = mock.Mock(spec=cls)
@ -158,8 +220,12 @@ class NeutronPodVIFDriver(test_base.TestCase):
if security_groups:
expected['port']['security_groups'] = security_groups
if unbound:
expected['port']['name'] = 'available-port'
expected['port']['device_id'] = ''
ret = cls._get_port_request(m_driver, pod, project_id, subnets,
security_groups)
security_groups, unbound)
self.assertEqual(expected, ret)
m_driver._get_port_name.assert_called_once_with(pod)
@ -178,6 +244,11 @@ class NeutronPodVIFDriver(test_base.TestCase):
security_groups = []
self._test_get_port_request(m_to_fips, security_groups)
@mock.patch('kuryr_kubernetes.os_vif_util.osvif_to_neutron_fixed_ips')
def test_get_port_request_unbound(self, m_to_fips):
security_groups = mock.sentinel.security_groups
self._test_get_port_request(m_to_fips, security_groups, unbound=True)
def test_get_vif_plugin(self):
cls = neutron_vif.NeutronPodVIFDriver
m_driver = mock.Mock(spec=cls)

View File

@ -66,33 +66,34 @@ class GenericVIFPool(test_base.TestCase):
vif = mock.sentinel.vif
m_driver._get_port_from_pool.return_value = vif
oslo_cfg.CONF.set_override('ports_pool_min',
5,
group='vif_pool')
pool_length = 5
m_driver._get_pool_size.return_value = pool_length
self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id,
subnets, security_groups))
def test_request_vif_empty_pool(self):
@mock.patch('eventlet.spawn')
def test_request_vif_empty_pool(self, m_eventlet):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
m_driver._existing_vifs = {}
cls_vif_driver = neutron_vif.NeutronPodVIFDriver
vif_driver = mock.MagicMock(spec=cls_vif_driver)
m_driver._drv_vif = vif_driver
pod = self._get_pod_obj()
host_addr = mock.sentinel.host_addr
pod_status = mock.MagicMock()
pod_status.__getitem__.return_value = host_addr
pod = mock.MagicMock()
pod.__getitem__.return_value = pod_status
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = [mock.sentinel.security_groups]
vif = osv_vif.VIFOpenVSwitch(id='0fa0e837-d34e-4580-a6c4-04f5f607d93e')
m_driver._get_port_from_pool.side_effect = (
exceptions.ResourceNotReady(pod))
m_driver._get_port_from_pool.side_effect = exceptions.ResourceNotReady(
pod)
m_driver._drv_vif.request_vif.return_value = vif
self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id,
subnets, security_groups))
m_driver._drv_vif.request_vif.assert_called_with(
pod, project_id, subnets, security_groups)
self.assertRaises(exceptions.ResourceNotReady, cls.request_vif,
m_driver, pod, project_id, subnets, security_groups)
m_eventlet.assert_called_once()
def test_request_vif_pod_without_host_id(self):
cls = vif_pool.GenericVIFPool
@ -107,7 +108,92 @@ class GenericVIFPool(test_base.TestCase):
self.assertRaises(KeyError, cls.request_vif, m_driver, pod, project_id,
subnets, security_groups)
def test__get_port_from_pool(self):
@mock.patch('time.time', return_value=50)
def test__populate_pool(self, m_time):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
cls_vif_driver = neutron_vif.NeutronPodVIFDriver
vif_driver = mock.MagicMock(spec=cls_vif_driver)
m_driver._drv_vif = vif_driver
pod = mock.sentinel.pod
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = [mock.sentinel.security_groups]
pool_key = (mock.sentinel.host_addr, project_id,
tuple(security_groups))
vif = osv_vif.VIFOpenVSwitch(id='0fa0e837-d34e-4580-a6c4-04f5f607d93e')
vifs = [vif]
m_driver._existing_vifs = {}
m_driver._available_ports_pools = {}
m_driver._last_update = {pool_key: 1}
oslo_cfg.CONF.set_override('ports_pool_min',
5,
group='vif_pool')
oslo_cfg.CONF.set_override('ports_pool_update_frequency',
15,
group='vif_pool')
m_driver._get_pool_size.return_value = 2
vif_driver.request_vifs.return_value = vifs
cls._populate_pool(m_driver, pool_key, pod, subnets)
m_driver._get_pool_size.assert_called_once()
m_driver._drv_vif.request_vifs.assert_called_once()
@mock.patch('time.time', return_value=0)
def test__populate_pool_no_update(self, m_time):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
pod = mock.sentinel.pod
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = [mock.sentinel.security_groups]
pool_key = (mock.sentinel.host_addr, project_id,
tuple(security_groups))
oslo_cfg.CONF.set_override('ports_pool_update_frequency',
15,
group='vif_pool')
m_driver._last_update = {pool_key: 1}
cls._populate_pool(m_driver, pool_key, pod, subnets)
m_driver._get_pool_size.assert_not_called()
@mock.patch('time.time', return_value=50)
def test__populate_pool_large_pool(self, m_time):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
cls_vif_driver = neutron_vif.NeutronPodVIFDriver
vif_driver = mock.MagicMock(spec=cls_vif_driver)
m_driver._drv_vif = vif_driver
pod = mock.sentinel.pod
project_id = mock.sentinel.project_id
subnets = mock.sentinel.subnets
security_groups = [mock.sentinel.security_groups]
pool_key = (mock.sentinel.host_addr, project_id,
tuple(security_groups))
oslo_cfg.CONF.set_override('ports_pool_update_frequency',
15,
group='vif_pool')
oslo_cfg.CONF.set_override('ports_pool_min',
5,
group='vif_pool')
m_driver._last_update = {pool_key: 1}
m_driver._get_pool_size.return_value = 10
cls._populate_pool(m_driver, pool_key, pod, subnets)
m_driver._get_pool_size.assert_called_once()
m_driver._drv_vif.request_vifs.assert_not_called()
@mock.patch('eventlet.spawn')
def test__get_port_from_pool(self, m_eventlet):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
@ -115,6 +201,7 @@ class GenericVIFPool(test_base.TestCase):
pool_key = mock.sentinel.pool_key
port_id = mock.sentinel.port_id
port = mock.sentinel.port
subnets = mock.sentinel.subnets
pod = self._get_pod_obj()
@ -122,8 +209,14 @@ class GenericVIFPool(test_base.TestCase):
pool_key: collections.deque([port_id])}
m_driver._existing_vifs = {port_id: port}
oslo_cfg.CONF.set_override('ports_pool_min',
5,
group='vif_pool')
pool_length = 5
m_driver._get_pool_size.return_value = pool_length
self.assertEqual(port, cls._get_port_from_pool(
m_driver, pool_key, pod))
m_driver, pool_key, pod, subnets))
neutron.update_port.assert_called_once_with(port_id,
{
@ -132,6 +225,42 @@ class GenericVIFPool(test_base.TestCase):
'device_id': pod['metadata']['uid']
}
})
m_eventlet.assert_not_called()
@mock.patch('eventlet.spawn')
def test__get_port_from_pool_pool_populate(self, m_eventlet):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
pool_key = mock.sentinel.pool_key
port_id = mock.sentinel.port_id
port = mock.sentinel.port
subnets = mock.sentinel.subnets
pod = self._get_pod_obj()
m_driver._available_ports_pools = {
pool_key: collections.deque([port_id])}
m_driver._existing_vifs = {port_id: port}
oslo_cfg.CONF.set_override('ports_pool_min',
5,
group='vif_pool')
pool_length = 3
m_driver._get_pool_size.return_value = pool_length
self.assertEqual(port, cls._get_port_from_pool(
m_driver, pool_key, pod, subnets))
neutron.update_port.assert_called_once_with(port_id,
{
"port": {
'name': pod['metadata']['name'],
'device_id': pod['metadata']['uid']
}
})
m_eventlet.assert_called_once()
def test__get_port_from_pool_empty_pool(self):
cls = vif_pool.GenericVIFPool
@ -140,11 +269,12 @@ class GenericVIFPool(test_base.TestCase):
pod = self._get_pod_obj()
pool_key = mock.sentinel.pool_key
subnets = mock.sentinel.subnets
m_driver._available_ports_pools = {pool_key: collections.deque([])}
self.assertRaises(exceptions.ResourceNotReady, cls._get_port_from_pool,
m_driver, pool_key, pod)
m_driver, pool_key, pod, subnets)
neutron.update_port.assert_not_called()
@ -162,10 +292,11 @@ class GenericVIFPool(test_base.TestCase):
cls.release_vif(m_driver, pod, vif, project_id, security_groups)
m_driver._return_ports_to_pool.assert_called_once()
m_driver._return_ports_to_pool.assert_not_called()
@mock.patch('eventlet.sleep', side_effect=SystemExit)
@ddt.data((0), (10))
def test__return_ports_to_pool(self, max_pool):
def test__return_ports_to_pool(self, max_pool, m_sleep):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
@ -181,7 +312,7 @@ class GenericVIFPool(test_base.TestCase):
group='vif_pool')
m_driver._get_pool_size.return_value = pool_length
cls._return_ports_to_pool(m_driver)
self.assertRaises(SystemExit, cls._return_ports_to_pool, m_driver)
neutron.update_port.assert_called_once_with(port_id,
{
@ -193,7 +324,8 @@ class GenericVIFPool(test_base.TestCase):
})
neutron.delete_port.assert_not_called()
def test__return_ports_to_pool_delete_port(self):
@mock.patch('eventlet.sleep', side_effect=SystemExit)
def test__return_ports_to_pool_delete_port(self, m_sleep):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
@ -211,12 +343,13 @@ class GenericVIFPool(test_base.TestCase):
group='vif_pool')
m_driver._get_pool_size.return_value = pool_length
cls._return_ports_to_pool(m_driver)
self.assertRaises(SystemExit, cls._return_ports_to_pool, m_driver)
neutron.update_port.assert_not_called()
neutron.delete_port.assert_called_once_with(port_id)
def test__return_ports_to_pool_update_exception(self):
@mock.patch('eventlet.sleep', side_effect=SystemExit)
def test__return_ports_to_pool_update_exception(self, m_sleep):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
@ -233,7 +366,7 @@ class GenericVIFPool(test_base.TestCase):
m_driver._get_pool_size.return_value = pool_length
neutron.update_port.side_effect = n_exc.NeutronClientException
cls._return_ports_to_pool(m_driver)
self.assertRaises(SystemExit, cls._return_ports_to_pool, m_driver)
neutron.update_port.assert_called_once_with(port_id,
{
@ -245,7 +378,8 @@ class GenericVIFPool(test_base.TestCase):
})
neutron.delete_port.assert_not_called()
def test__return_ports_to_pool_delete_exception(self):
@mock.patch('eventlet.sleep', side_effect=SystemExit)
def test__return_ports_to_pool_delete_exception(self, m_sleep):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
@ -264,12 +398,13 @@ class GenericVIFPool(test_base.TestCase):
m_driver._get_pool_size.return_value = pool_length
neutron.delete_port.side_effect = n_exc.PortNotFoundClient
cls._return_ports_to_pool(m_driver)
self.assertRaises(SystemExit, cls._return_ports_to_pool, m_driver)
neutron.update_port.assert_not_called()
neutron.delete_port.assert_called_once_with(port_id)
def test__return_ports_to_pool_delete_key_error(self):
@mock.patch('eventlet.sleep', side_effect=SystemExit)
def test__return_ports_to_pool_delete_key_error(self, m_sleep):
cls = vif_pool.GenericVIFPool
m_driver = mock.MagicMock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
@ -286,7 +421,7 @@ class GenericVIFPool(test_base.TestCase):
group='vif_pool')
m_driver._get_pool_size.return_value = pool_length
cls._return_ports_to_pool(m_driver)
self.assertRaises(SystemExit, cls._return_ports_to_pool, m_driver)
neutron.update_port.assert_not_called()
neutron.delete_port.assert_not_called()