Nested vif driver extension to enable ports reuse
In order to speed up containers creation/deletion a new nested vif pool driver is proposed to ensure subports already created and attached to the VM trunk port can be reused in the future. Note this remove the neutron.create_port and the neutron.attach_subport (if a port is reused) from the container creation process. As measured in the performance evaluation performed in [0], just the API time for create_port is, on average, around 2 seconds. On top of that, as the attached subports are already in ACTIVE status, there is no need for waiting for the status to become ACTIVE, which is where most of the time is spend now. [0] https://blog.russellbryant.net/2016/12/19/comparing-openstack-neutron-ml2ovs-and-ovn-control-plane/ Partially Implements blueprint ports-pool Change-Id: Ibae054ef38d7aa7ea3b2829642f9544d61e76798
This commit is contained in:
parent
b09d7ce195
commit
3a776d5ed7
|
@ -40,23 +40,12 @@ oslo_cfg.CONF.register_opts(nested_vif_driver_opts, "pod_vif_nested")
|
|||
class NestedPodVIFDriver(neutron_vif.NeutronPodVIFDriver):
|
||||
"""Skeletal handler driver for VIFs for Nested Pods."""
|
||||
|
||||
def _get_parent_port(self, neutron, pod):
|
||||
def _get_parent_port_by_host_ip(self, neutron, node_fixed_ip):
|
||||
node_subnet_id = oslo_cfg.CONF.pod_vif_nested.worker_nodes_subnet
|
||||
if not node_subnet_id:
|
||||
raise oslo_cfg.RequiredOptError('worker_nodes_subnet',
|
||||
oslo_cfg.OptGroup('pod_vif_nested'))
|
||||
|
||||
try:
|
||||
# REVISIT(vikasc): Assumption is being made that hostIP is the IP
|
||||
# of trunk interface on the node(vm).
|
||||
node_fixed_ip = pod['status']['hostIP']
|
||||
except KeyError:
|
||||
if pod['status']['conditions'][0]['type'] != "Initialized":
|
||||
LOG.debug("Pod condition type is not 'Initialized'")
|
||||
|
||||
LOG.error("Failed to get parent vm port ip")
|
||||
raise
|
||||
|
||||
try:
|
||||
fixed_ips = ['subnet_id=%s' % str(node_subnet_id),
|
||||
'ip_address=%s' % str(node_fixed_ip)]
|
||||
|
@ -72,3 +61,16 @@ class NestedPodVIFDriver(neutron_vif.NeutronPodVIFDriver):
|
|||
LOG.error("Neutron port for vm port with fixed ips %s"
|
||||
" not found!", fixed_ips)
|
||||
raise kl_exc.NoResourceException
|
||||
|
||||
def _get_parent_port(self, neutron, pod):
|
||||
try:
|
||||
# REVISIT(vikasc): Assumption is being made that hostIP is the IP
|
||||
# of trunk interface on the node(vm).
|
||||
node_fixed_ip = pod['status']['hostIP']
|
||||
except KeyError:
|
||||
if pod['status']['conditions'][0]['type'] != "Initialized":
|
||||
LOG.debug("Pod condition type is not 'Initialized'")
|
||||
|
||||
LOG.error("Failed to get parent vm port ip")
|
||||
raise
|
||||
return self._get_parent_port_by_host_ip(neutron, node_fixed_ip)
|
||||
|
|
|
@ -12,8 +12,11 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import collections
|
||||
import eventlet
|
||||
import six
|
||||
import time
|
||||
|
||||
from kuryr.lib._i18n import _
|
||||
|
@ -63,8 +66,9 @@ class NoopVIFPool(base.VIFPoolDriver):
|
|||
self._drv_vif.activate_vif(pod, vif)
|
||||
|
||||
|
||||
class GenericVIFPool(base.VIFPoolDriver):
|
||||
"""Manages VIFs for Bare Metal Kubernetes Pods.
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class BaseVIFPool(base.VIFPoolDriver):
|
||||
"""Skeletal pool driver.
|
||||
|
||||
In order to handle the pools of ports, a few dicts are used:
|
||||
_available_ports_pool is a dictionary with the ready to use Neutron ports
|
||||
|
@ -73,9 +77,6 @@ class GenericVIFPool(base.VIFPoolDriver):
|
|||
are the 'port_id' and the values are the vif objects.
|
||||
_recyclable_ports is a dictionary with the Neutron ports to be
|
||||
recycled. The keys are the 'port_id' and their values are the 'pool_key'.
|
||||
_last_update is a dictionary with the timestamp of the last population
|
||||
action for each pool. The keys are the pool_keys and the values are the
|
||||
timestamps.
|
||||
|
||||
The following driver configuration options exist:
|
||||
- ports_pool_max: it specifies how many ports can be kept at each pool.
|
||||
|
@ -94,14 +95,24 @@ class GenericVIFPool(base.VIFPoolDriver):
|
|||
_recyclable_ports = collections.defaultdict(collections.defaultdict)
|
||||
_last_update = collections.defaultdict(collections.defaultdict)
|
||||
|
||||
def set_vif_driver(self, driver):
|
||||
self._drv_vif = driver
|
||||
|
||||
def activate_vif(self, pod, vif):
|
||||
self._drv_vif.activate_vif(pod, vif)
|
||||
|
||||
def _get_pool_size(self, pool_key=None):
|
||||
return len(self._available_ports_pools.get(pool_key, []))
|
||||
|
||||
|
||||
class GenericVIFPool(BaseVIFPool):
|
||||
"""Manages VIFs for Bare Metal Kubernetes Pods."""
|
||||
|
||||
def __init__(self):
|
||||
# Note(ltomasbo) Execute the port recycling periodic actions in a
|
||||
# background thread
|
||||
eventlet.spawn(self._return_ports_to_pool)
|
||||
|
||||
def set_vif_driver(self, driver):
|
||||
self._drv_vif = driver
|
||||
|
||||
def request_vif(self, pod, project_id, subnets, security_groups):
|
||||
try:
|
||||
host_addr = pod['status']['hostIP']
|
||||
|
@ -167,12 +178,6 @@ class GenericVIFPool(base.VIFPoolDriver):
|
|||
|
||||
self._recyclable_ports[vif.id] = pool_key
|
||||
|
||||
def activate_vif(self, pod, vif):
|
||||
self._drv_vif.activate_vif(pod, vif)
|
||||
|
||||
def _get_pool_size(self, pool_key=None):
|
||||
return len(self._available_ports_pools.get(pool_key, []))
|
||||
|
||||
def _return_ports_to_pool(self):
|
||||
"""Recycle ports to be reused by future pods.
|
||||
|
||||
|
@ -216,3 +221,106 @@ class GenericVIFPool(base.VIFPoolDriver):
|
|||
LOG.debug('Port %s is not in the ports list.', port_id)
|
||||
del self._recyclable_ports[port_id]
|
||||
eventlet.sleep(oslo_cfg.CONF.vif_pool.ports_pool_update_frequency)
|
||||
|
||||
|
||||
class NestedVIFPool(BaseVIFPool):
|
||||
"""Manages VIFs for nested Kubernetes Pods.
|
||||
|
||||
In order to handle the pools of ports for nested Pods, an extra dict is
|
||||
used:
|
||||
_known_trunk_ids is a dictionary that keeps the trunk port ids associated
|
||||
to each pool_key to skip calls to neutron to get the trunk information.
|
||||
"""
|
||||
_known_trunk_ids = collections.defaultdict(collections.defaultdict)
|
||||
|
||||
def request_vif(self, pod, project_id, subnets, security_groups):
|
||||
try:
|
||||
host_addr = pod['status']['hostIP']
|
||||
except KeyError:
|
||||
LOG.warning("Pod has not been scheduled yet.")
|
||||
raise
|
||||
pool_key = (host_addr, project_id, tuple(security_groups))
|
||||
|
||||
try:
|
||||
return self._get_port_from_pool(pool_key, pod)
|
||||
except exceptions.ResourceNotReady:
|
||||
LOG.warning("Ports pool does not have available ports!")
|
||||
# TODO(ltomasbo): This is to be removed in the next patch when the
|
||||
# pre-creation of several ports in a bulk request is included.
|
||||
vif = self._drv_vif.request_vif(pod, project_id, subnets,
|
||||
security_groups)
|
||||
self._existing_vifs[vif.id] = vif
|
||||
return vif
|
||||
|
||||
def _get_port_from_pool(self, pool_key, pod):
|
||||
try:
|
||||
port_id = self._available_ports_pools[pool_key].popleft()
|
||||
except IndexError:
|
||||
raise exceptions.ResourceNotReady(pod)
|
||||
neutron = clients.get_neutron_client()
|
||||
neutron.update_port(port_id,
|
||||
{
|
||||
"port": {
|
||||
'name': pod['metadata']['name'],
|
||||
}
|
||||
})
|
||||
return self._existing_vifs[port_id]
|
||||
|
||||
def release_vif(self, pod, vif, project_id, security_groups):
|
||||
host_addr = pod['status']['hostIP']
|
||||
pool_key = (host_addr, project_id, tuple(security_groups))
|
||||
|
||||
self._recyclable_ports[vif.id] = pool_key
|
||||
# TODO(ltomasbo) Make the port update in another thread
|
||||
self._return_ports_to_pool()
|
||||
|
||||
def _return_ports_to_pool(self):
|
||||
"""Recycle ports to be reused by future pods.
|
||||
|
||||
For each port in the recyclable_ports dict it reaplies
|
||||
security group and changes the port name to available_port.
|
||||
Upon successful port update, the port_id is included in the dict
|
||||
with the available_ports.
|
||||
|
||||
If a maximun number of ports per pool is set, the port will be
|
||||
deleted if the maximun has been already reached.
|
||||
"""
|
||||
neutron = clients.get_neutron_client()
|
||||
for port_id, pool_key in self._recyclable_ports.copy().items():
|
||||
if (not oslo_cfg.CONF.vif_pool.ports_pool_max or
|
||||
self._get_pool_size(pool_key) <
|
||||
oslo_cfg.CONF.vif_pool.ports_pool_max):
|
||||
try:
|
||||
neutron.update_port(port_id,
|
||||
{
|
||||
"port": {
|
||||
'name': 'available-port',
|
||||
'security_groups': list(pool_key[2])
|
||||
}
|
||||
})
|
||||
except n_exc.NeutronClientException:
|
||||
LOG.warning("Error preparing port %s to be reused, put"
|
||||
" back on the cleanable pool.", port_id)
|
||||
continue
|
||||
self._available_ports_pools.setdefault(pool_key, []).append(
|
||||
port_id)
|
||||
else:
|
||||
trunk_id = self._known_trunk_ids.get(pool_key, None)
|
||||
if not trunk_id:
|
||||
parent_port = self._drv_vif._get_parent_port_by_host_ip(
|
||||
neutron, pool_key[0])
|
||||
trunk_id = self._drv_vif._get_trunk_id(parent_port)
|
||||
self._known_trunk_ids.setdefault(pool_key, []).append(
|
||||
port_id)
|
||||
self._drv_vif._remove_subport(neutron, trunk_id, port_id)
|
||||
try:
|
||||
self._drv_vif._release_vlan_id(
|
||||
self._existing_vifs[port_id]['vlan_id'])
|
||||
del self._existing_vifs[port_id]
|
||||
neutron.delete_port(port_id)
|
||||
except n_exc.PortNotFoundClient:
|
||||
LOG.debug('Unable to release port %s as it no longer '
|
||||
'exists.', port_id)
|
||||
except KeyError:
|
||||
LOG.debug('Port %s is not in the ports list.', port_id)
|
||||
del self._recyclable_ports[port_id]
|
||||
|
|
|
@ -27,39 +27,53 @@ class TestNestedPodVIFDriver(test_base.TestCase):
|
|||
m_driver = mock.Mock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
|
||||
node_subnet_id = mock.sentinel.node_subnet_id
|
||||
oslo_cfg.CONF.set_override('worker_nodes_subnet',
|
||||
node_subnet_id,
|
||||
group='pod_vif_nested')
|
||||
|
||||
node_fixed_ip = mock.sentinel.node_fixed_ip
|
||||
pod_status = mock.MagicMock()
|
||||
pod_status.__getitem__.return_value = node_fixed_ip
|
||||
|
||||
pod = mock.MagicMock()
|
||||
pod.__getitem__.return_value = pod_status
|
||||
parent_port = mock.sentinel.parent_port
|
||||
|
||||
m_driver._get_parent_port_by_host_ip.return_value = parent_port
|
||||
|
||||
cls._get_parent_port(m_driver, neutron, pod)
|
||||
m_driver._get_parent_port_by_host_ip.assert_called_once()
|
||||
|
||||
def test_get_parent_port_by_host_ip(self):
|
||||
cls = nested_vif.NestedPodVIFDriver
|
||||
m_driver = mock.Mock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
|
||||
node_subnet_id = mock.sentinel.node_subnet_id
|
||||
oslo_cfg.CONF.set_override('worker_nodes_subnet',
|
||||
node_subnet_id,
|
||||
group='pod_vif_nested')
|
||||
|
||||
node_fixed_ip = mock.sentinel.node_fixed_ip
|
||||
|
||||
port = mock.sentinel.port
|
||||
ports = {'ports': [port]}
|
||||
neutron.list_ports.return_value = ports
|
||||
|
||||
self.assertEqual(port, cls._get_parent_port(m_driver, neutron, pod))
|
||||
self.assertEqual(port, cls._get_parent_port_by_host_ip(
|
||||
m_driver, neutron, node_fixed_ip))
|
||||
fixed_ips = ['subnet_id=%s' % str(node_subnet_id),
|
||||
'ip_address=%s' % str(node_fixed_ip)]
|
||||
neutron.list_ports.assert_called_once_with(fixed_ips=fixed_ips)
|
||||
|
||||
def test_get_parent_port_subnet_id_not_configured(self):
|
||||
def test_get_parent_port_by_host_ip_subnet_id_not_configured(self):
|
||||
cls = nested_vif.NestedPodVIFDriver
|
||||
m_driver = mock.Mock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
oslo_cfg.CONF.set_override('worker_nodes_subnet',
|
||||
'',
|
||||
group='pod_vif_nested')
|
||||
pod = mock.MagicMock()
|
||||
node_fixed_ip = mock.sentinel.node_fixed_ip
|
||||
self.assertRaises(oslo_cfg.RequiredOptError,
|
||||
cls._get_parent_port, m_driver, neutron, pod)
|
||||
cls._get_parent_port_by_host_ip, m_driver, neutron, node_fixed_ip)
|
||||
|
||||
def test_get_parent_port_trunk_not_found(self):
|
||||
def test_get_parent_port_by_host_ip_trunk_not_found(self):
|
||||
cls = nested_vif.NestedPodVIFDriver
|
||||
m_driver = mock.Mock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
|
@ -71,17 +85,13 @@ class TestNestedPodVIFDriver(test_base.TestCase):
|
|||
group='pod_vif_nested')
|
||||
|
||||
node_fixed_ip = mock.sentinel.node_fixed_ip
|
||||
pod_status = mock.MagicMock()
|
||||
pod_status.__getitem__.return_value = node_fixed_ip
|
||||
|
||||
pod = mock.MagicMock()
|
||||
pod.__getitem__.return_value = pod_status
|
||||
|
||||
ports = {'ports': []}
|
||||
neutron.list_ports.return_value = ports
|
||||
|
||||
self.assertRaises(kl_exc.NoResourceException,
|
||||
cls._get_parent_port, m_driver, neutron, pod)
|
||||
cls._get_parent_port_by_host_ip, m_driver, neutron,
|
||||
node_fixed_ip)
|
||||
fixed_ips = ['subnet_id=%s' % str(node_subnet_id),
|
||||
'ip_address=%s' % str(node_fixed_ip)]
|
||||
neutron.list_ports.assert_called_once_with(fixed_ips=fixed_ips)
|
||||
|
|
|
@ -21,6 +21,7 @@ from oslo_config import cfg as oslo_cfg
|
|||
|
||||
from os_vif.objects import vif as osv_vif
|
||||
|
||||
from kuryr_kubernetes.controller.drivers import nested_vlan_vif
|
||||
from kuryr_kubernetes.controller.drivers import neutron_vif
|
||||
from kuryr_kubernetes.controller.drivers import vif_pool
|
||||
from kuryr_kubernetes import exceptions
|
||||
|
@ -28,38 +29,39 @@ from kuryr_kubernetes.tests import base as test_base
|
|||
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
|
||||
|
||||
|
||||
def get_pod_obj():
|
||||
return {
|
||||
'status': {
|
||||
'qosClass': 'BestEffort',
|
||||
'hostIP': '192.168.1.2',
|
||||
},
|
||||
'kind': 'Pod',
|
||||
'spec': {
|
||||
'schedulerName': 'default-scheduler',
|
||||
'containers': [{
|
||||
'name': 'busybox',
|
||||
'image': 'busybox',
|
||||
'resources': {}
|
||||
}],
|
||||
'nodeName': 'kuryr-devstack'
|
||||
},
|
||||
'metadata': {
|
||||
'name': 'busybox-sleep1',
|
||||
'namespace': 'default',
|
||||
'resourceVersion': '53808',
|
||||
'selfLink': '/api/v1/namespaces/default/pods/busybox-sleep1',
|
||||
'uid': '452176db-4a85-11e7-80bd-fa163e29dbbb'
|
||||
}}
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class GenericVIFPool(test_base.TestCase):
|
||||
|
||||
def _get_pod_obj(self):
|
||||
return {
|
||||
'status': {
|
||||
'qosClass': 'BestEffort',
|
||||
'hostIP': '192.168.1.2',
|
||||
},
|
||||
'kind': 'Pod',
|
||||
'spec': {
|
||||
'schedulerName': 'default-scheduler',
|
||||
'containers': [{
|
||||
'name': 'busybox',
|
||||
'image': 'busybox',
|
||||
'resources': {}
|
||||
}],
|
||||
'nodeName': 'kuryr-devstack'
|
||||
},
|
||||
'metadata': {
|
||||
'name': 'busybox-sleep1',
|
||||
'namespace': 'default',
|
||||
'resourceVersion': '53808',
|
||||
'selfLink': '/api/v1/namespaces/default/pods/busybox-sleep1',
|
||||
'uid': '452176db-4a85-11e7-80bd-fa163e29dbbb'
|
||||
}}
|
||||
|
||||
def test_request_vif(self):
|
||||
cls = vif_pool.GenericVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
|
||||
pod = self._get_pod_obj()
|
||||
pod = get_pod_obj()
|
||||
project_id = mock.sentinel.project_id
|
||||
subnets = mock.sentinel.subnets
|
||||
security_groups = [mock.sentinel.security_groups]
|
||||
|
@ -99,7 +101,7 @@ class GenericVIFPool(test_base.TestCase):
|
|||
cls = vif_pool.GenericVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
|
||||
pod = self._get_pod_obj()
|
||||
pod = get_pod_obj()
|
||||
del pod['status']['hostIP']
|
||||
project_id = mock.sentinel.project_id
|
||||
subnets = mock.sentinel.subnets
|
||||
|
@ -203,7 +205,7 @@ class GenericVIFPool(test_base.TestCase):
|
|||
port = mock.sentinel.port
|
||||
subnets = mock.sentinel.subnets
|
||||
|
||||
pod = self._get_pod_obj()
|
||||
pod = get_pod_obj()
|
||||
|
||||
m_driver._available_ports_pools = {
|
||||
pool_key: collections.deque([port_id])}
|
||||
|
@ -238,7 +240,7 @@ class GenericVIFPool(test_base.TestCase):
|
|||
port = mock.sentinel.port
|
||||
subnets = mock.sentinel.subnets
|
||||
|
||||
pod = self._get_pod_obj()
|
||||
pod = get_pod_obj()
|
||||
|
||||
m_driver._available_ports_pools = {
|
||||
pool_key: collections.deque([port_id])}
|
||||
|
@ -267,7 +269,7 @@ class GenericVIFPool(test_base.TestCase):
|
|||
m_driver = mock.MagicMock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
|
||||
pod = self._get_pod_obj()
|
||||
pod = get_pod_obj()
|
||||
pool_key = mock.sentinel.pool_key
|
||||
subnets = mock.sentinel.subnets
|
||||
|
||||
|
@ -283,7 +285,7 @@ class GenericVIFPool(test_base.TestCase):
|
|||
m_driver = mock.MagicMock(spec=cls)
|
||||
m_driver._recyclable_ports = {}
|
||||
|
||||
pod = self._get_pod_obj()
|
||||
pod = get_pod_obj()
|
||||
project_id = mock.sentinel.project_id
|
||||
security_groups = [mock.sentinel.security_groups]
|
||||
vif = osv_vif.VIFOpenVSwitch(id='0fa0e837-d34e-4580-a6c4-04f5f607d93e')
|
||||
|
@ -425,3 +427,279 @@ class GenericVIFPool(test_base.TestCase):
|
|||
|
||||
neutron.update_port.assert_not_called()
|
||||
neutron.delete_port.assert_not_called()
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class NestedVIFPool(test_base.TestCase):
|
||||
|
||||
def test_request_vif(self):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
|
||||
pod = get_pod_obj()
|
||||
project_id = mock.sentinel.project_id
|
||||
subnets = mock.sentinel.subnets
|
||||
security_groups = [mock.sentinel.security_groups]
|
||||
vif = mock.sentinel.vif
|
||||
|
||||
m_driver._get_port_from_pool.return_value = vif
|
||||
|
||||
self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id,
|
||||
subnets, security_groups))
|
||||
|
||||
def test_request_vif_empty_pool(self):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
|
||||
m_driver._existing_vifs = {}
|
||||
cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver
|
||||
vif_driver = mock.MagicMock(spec=cls_vif_driver)
|
||||
m_driver._drv_vif = vif_driver
|
||||
|
||||
pod = get_pod_obj()
|
||||
project_id = mock.sentinel.project_id
|
||||
subnets = mock.sentinel.subnets
|
||||
security_groups = [mock.sentinel.security_groups]
|
||||
vif = osv_vif.VIFOpenVSwitch(id='0fa0e837-d34e-4580-a6c4-04f5f607d93e')
|
||||
|
||||
m_driver._get_port_from_pool.side_effect = exceptions.ResourceNotReady(
|
||||
pod)
|
||||
m_driver._drv_vif.request_vif.return_value = vif
|
||||
|
||||
self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id,
|
||||
subnets, security_groups))
|
||||
m_driver._drv_vif.request_vif.assert_called_with(
|
||||
pod, project_id, subnets, security_groups)
|
||||
|
||||
def test_request_vif_pod_without_host_id(self):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
|
||||
pod = get_pod_obj()
|
||||
del pod['status']['hostIP']
|
||||
project_id = mock.sentinel.project_id
|
||||
subnets = mock.sentinel.subnets
|
||||
security_groups = [mock.sentinel.security_groups]
|
||||
|
||||
self.assertRaises(KeyError, cls.request_vif, m_driver, pod, project_id,
|
||||
subnets, security_groups)
|
||||
|
||||
def test__get_port_from_pool(self):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
|
||||
pool_key = mock.sentinel.pool_key
|
||||
port_id = mock.sentinel.port_id
|
||||
port = mock.sentinel.port
|
||||
|
||||
pod = get_pod_obj()
|
||||
|
||||
m_driver._available_ports_pools = {
|
||||
pool_key: collections.deque([port_id])}
|
||||
m_driver._existing_vifs = {port_id: port}
|
||||
|
||||
self.assertEqual(port, cls._get_port_from_pool(
|
||||
m_driver, pool_key, pod))
|
||||
|
||||
neutron.update_port.assert_called_once_with(port_id,
|
||||
{
|
||||
"port": {
|
||||
'name': pod['metadata']['name'],
|
||||
}
|
||||
})
|
||||
|
||||
def test__get_port_from_pool_empty_pool(self):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
|
||||
pod = mock.sentinel.pod
|
||||
pool_key = mock.sentinel.pool_key
|
||||
|
||||
m_driver._available_ports_pools = {pool_key: collections.deque([])}
|
||||
|
||||
self.assertRaises(exceptions.ResourceNotReady, cls._get_port_from_pool,
|
||||
m_driver, pool_key, pod)
|
||||
|
||||
neutron.update_port.assert_not_called()
|
||||
|
||||
def test_release_vif(self):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
m_driver._recyclable_ports = {}
|
||||
|
||||
pod = get_pod_obj()
|
||||
project_id = mock.sentinel.project_id
|
||||
security_groups = [mock.sentinel.security_groups]
|
||||
vif = osv_vif.VIFOpenVSwitch(id='0fa0e837-d34e-4580-a6c4-04f5f607d93e')
|
||||
|
||||
m_driver._return_ports_to_pool.return_value = None
|
||||
|
||||
cls.release_vif(m_driver, pod, vif, project_id, security_groups)
|
||||
|
||||
m_driver._return_ports_to_pool.assert_called_once()
|
||||
|
||||
@ddt.data((0), (10))
|
||||
def test__return_ports_to_pool(self, max_pool):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
|
||||
pool_key = ('node_ip', 'project_id', tuple(['security_group']))
|
||||
port_id = mock.sentinel.port_id
|
||||
pool_length = 5
|
||||
|
||||
m_driver._recyclable_ports = {port_id: pool_key}
|
||||
m_driver._available_ports_pools = {}
|
||||
oslo_cfg.CONF.set_override('ports_pool_max',
|
||||
max_pool,
|
||||
group='vif_pool')
|
||||
m_driver._get_pool_size.return_value = pool_length
|
||||
|
||||
cls._return_ports_to_pool(m_driver)
|
||||
|
||||
neutron.update_port.assert_called_once_with(port_id,
|
||||
{
|
||||
"port": {
|
||||
'name': 'available-port',
|
||||
'security_groups': ['security_group']
|
||||
}
|
||||
})
|
||||
neutron.delete_port.assert_not_called()
|
||||
|
||||
def test__return_ports_to_pool_delete_port(self):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver
|
||||
vif_driver = mock.MagicMock(spec=cls_vif_driver)
|
||||
m_driver._drv_vif = vif_driver
|
||||
|
||||
pool_key = ('node_ip', 'project_id', tuple(['security_group']))
|
||||
port_id = mock.sentinel.port_id
|
||||
pool_length = 10
|
||||
vif = {'vlan_id': mock.sentinel.vlan_id}
|
||||
p_port = mock.sentinel.p_port
|
||||
trunk_id = mock.sentinel.trunk_id
|
||||
|
||||
m_driver._recyclable_ports = {port_id: pool_key}
|
||||
m_driver._available_ports_pools = {}
|
||||
m_driver._existing_vifs = {port_id: vif}
|
||||
oslo_cfg.CONF.set_override('ports_pool_max',
|
||||
10,
|
||||
group='vif_pool')
|
||||
m_driver._get_pool_size.return_value = pool_length
|
||||
m_driver._known_trunk_ids = {}
|
||||
m_driver._drv_vif._get_parent_port_by_host_ip.return_value = p_port
|
||||
m_driver._drv_vif._get_trunk_id.return_value = trunk_id
|
||||
|
||||
cls._return_ports_to_pool(m_driver)
|
||||
|
||||
neutron.update_port.assert_not_called()
|
||||
neutron.delete_port.assert_called_once_with(port_id)
|
||||
m_driver._drv_vif._get_parent_port_by_host_ip.assert_called_once()
|
||||
m_driver._drv_vif._get_trunk_id.assert_called_once_with(p_port)
|
||||
m_driver._drv_vif._remove_subport.assert_called_once_with(neutron,
|
||||
trunk_id,
|
||||
port_id)
|
||||
|
||||
def test__return_ports_to_pool_update_exception(self):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
|
||||
pool_key = ('node_ip', 'project_id', tuple(['security_group']))
|
||||
port_id = mock.sentinel.port_id
|
||||
pool_length = 5
|
||||
|
||||
m_driver._recyclable_ports = {port_id: pool_key}
|
||||
m_driver._available_ports_pools = {}
|
||||
oslo_cfg.CONF.set_override('ports_pool_max',
|
||||
0,
|
||||
group='vif_pool')
|
||||
m_driver._get_pool_size.return_value = pool_length
|
||||
neutron.update_port.side_effect = n_exc.NeutronClientException
|
||||
|
||||
cls._return_ports_to_pool(m_driver)
|
||||
|
||||
neutron.update_port.assert_called_once_with(port_id,
|
||||
{
|
||||
"port": {
|
||||
'name': 'available-port',
|
||||
'security_groups': ['security_group']
|
||||
}
|
||||
})
|
||||
neutron.delete_port.assert_not_called()
|
||||
|
||||
def test__return_ports_to_pool_delete_exception(self):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver
|
||||
vif_driver = mock.MagicMock(spec=cls_vif_driver)
|
||||
m_driver._drv_vif = vif_driver
|
||||
|
||||
pool_key = ('node_ip', 'project_id', tuple(['security_group']))
|
||||
port_id = mock.sentinel.port_id
|
||||
pool_length = 10
|
||||
vif = {'vlan_id': mock.sentinel.vlan_id}
|
||||
p_port = mock.sentinel.p_port
|
||||
trunk_id = mock.sentinel.trunk_id
|
||||
|
||||
m_driver._recyclable_ports = {port_id: pool_key}
|
||||
m_driver._available_ports_pools = {}
|
||||
m_driver._existing_vifs = {port_id: vif}
|
||||
oslo_cfg.CONF.set_override('ports_pool_max',
|
||||
5,
|
||||
group='vif_pool')
|
||||
m_driver._get_pool_size.return_value = pool_length
|
||||
neutron.delete_port.side_effect = n_exc.PortNotFoundClient
|
||||
m_driver._known_trunk_ids = {}
|
||||
m_driver._drv_vif._get_parent_port_by_host_ip.return_value = p_port
|
||||
m_driver._drv_vif._get_trunk_id.return_value = trunk_id
|
||||
|
||||
cls._return_ports_to_pool(m_driver)
|
||||
|
||||
neutron.update_port.assert_not_called()
|
||||
m_driver._drv_vif._get_parent_port_by_host_ip.assert_called_once()
|
||||
m_driver._drv_vif._get_trunk_id.assert_called_once_with(p_port)
|
||||
m_driver._drv_vif._remove_subport.assert_called_once_with(neutron,
|
||||
trunk_id,
|
||||
port_id)
|
||||
neutron.delete_port.assert_called_once_with(port_id)
|
||||
|
||||
def test__return_ports_to_pool_delete_key_error(self):
|
||||
cls = vif_pool.NestedVIFPool
|
||||
m_driver = mock.MagicMock(spec=cls)
|
||||
neutron = self.useFixture(k_fix.MockNeutronClient()).client
|
||||
cls_vif_driver = nested_vlan_vif.NestedVlanPodVIFDriver
|
||||
vif_driver = mock.MagicMock(spec=cls_vif_driver)
|
||||
m_driver._drv_vif = vif_driver
|
||||
|
||||
pool_key = ('node_ip', 'project_id', tuple(['security_group']))
|
||||
port_id = mock.sentinel.port_id
|
||||
pool_length = 10
|
||||
p_port = mock.sentinel.p_port
|
||||
trunk_id = mock.sentinel.trunk_id
|
||||
|
||||
m_driver._recyclable_ports = {port_id: pool_key}
|
||||
m_driver._available_ports_pools = {}
|
||||
m_driver._existing_vifs = {}
|
||||
oslo_cfg.CONF.set_override('ports_pool_max',
|
||||
5,
|
||||
group='vif_pool')
|
||||
m_driver._get_pool_size.return_value = pool_length
|
||||
m_driver._known_trunk_ids = {}
|
||||
m_driver._drv_vif._get_parent_port_by_host_ip.return_value = p_port
|
||||
m_driver._drv_vif._get_trunk_id.return_value = trunk_id
|
||||
|
||||
cls._return_ports_to_pool(m_driver)
|
||||
|
||||
neutron.update_port.assert_not_called()
|
||||
m_driver._drv_vif._get_parent_port_by_host_ip.assert_called_once()
|
||||
m_driver._drv_vif._get_trunk_id.assert_called_once_with(p_port)
|
||||
m_driver._drv_vif._remove_subport.assert_called_once_with(neutron,
|
||||
trunk_id,
|
||||
port_id)
|
||||
neutron.delete_port.assert_not_called()
|
||||
|
|
|
@ -67,6 +67,7 @@ kuryr_kubernetes.controller.drivers.endpoints_lbaas =
|
|||
kuryr_kubernetes.controller.drivers.vif_pool =
|
||||
noop = kuryr_kubernetes.controller.drivers.vif_pool:NoopVIFPool
|
||||
generic = kuryr_kubernetes.controller.drivers.vif_pool:GenericVIFPool
|
||||
nested = kuryr_kubernetes.controller.drivers.vif_pool:NestedVIFPool
|
||||
|
||||
[files]
|
||||
packages =
|
||||
|
|
Loading…
Reference in New Issue