Add namespace isolation for services

This patch ensures pods from namespace X cannot access services
pointing to pods on namespace Y, and vice versa.

The exceptions are:
- Pods on default namespace can access all the services
- Services on default namespace can be accessed by all the pods

Depends-On: I37025bf65b67fe04f2a6d9b14bbe1b7bc387e370
Implements: blueprint openshift-project-isolation-support
Change-Id: I7b78e12cdf2bce5d0780e582814ef51ef0c459a7
This commit is contained in:
Luis Tomas Bolivar 2018-07-10 18:29:07 +02:00
parent f62bc0844d
commit 66fb9d18df
17 changed files with 240 additions and 119 deletions

View File

@ -78,19 +78,20 @@ function ovs_bind_for_kubelet() {
sudo ip link set dev "$ifname" address "$port_mac"
sudo ip link set dev "$ifname" up
for ((i=0; i < ${#port_ips[@]}; i++)); do
if [[ "$KURYR_SG_DRIVER" == "namespace" ]]; then
subnetpool_id=$(openstack subnet show "${port_subnets[$i]}" \
-c subnetpool_id -f value | cut -f2)
prefix=$(openstack subnet pool show "${subnetpool_id}" \
-c prefixes -f value | cut -f2 -d/)
else
prefix=$(openstack subnet show "${port_subnets[$i]}" \
-c cidr -f value | \
cut -f2 -d/)
fi
prefix=$(openstack subnet show "${port_subnets[$i]}" \
-c cidr -f value | \
cut -f2 -d/)
sudo ip addr add "${port_ips[$i]}/${prefix}" dev "$ifname"
done
sudo ip route add "$service_subnet_cidr" via "$pod_subnet_gw" dev "$ifname"
if [[ "$KURYR_SG_DRIVER" == "namespace" ]]; then
subnetpool_id=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID:-${SUBNETPOOL_V4_ID}}
subnetpool_cidr=$(openstack subnet pool show "${subnetpool_id}" \
-c prefixes -f value | cut -f2)
sudo ip route add "$subnetpool_cidr" via "$pod_subnet_gw" dev "$ifname"
else
sudo ip route add "$service_subnet_cidr" via "$pod_subnet_gw" dev "$ifname"
fi
if [ -n "$port_number" ]; then
# if openstack-INPUT chain doesn't exist we create it in INPUT (for
# local development envs since openstack-INPUT is usually only in gates)

View File

@ -72,6 +72,7 @@ function configure_kuryr {
iniset "$KURYR_CONFIG" kubernetes pod_subnets_driver "$KURYR_SUBNET_DRIVER"
iniset "$KURYR_CONFIG" kubernetes pod_security_groups_driver "$KURYR_SG_DRIVER"
iniset "$KURYR_CONFIG" kubernetes service_security_groups_driver "$KURYR_SG_DRIVER"
iniset "$KURYR_CONFIG" kubernetes enabled_handlers "$KURYR_ENABLED_HANDLERS"
# Let Kuryr retry connections to K8s API for 20 minutes.
@ -413,11 +414,21 @@ function configure_neutron_defaults {
--description "allow traffic from default namespace" \
--remote-group "$allow_namespace_sg_id" --ethertype IPv4 --protocol tcp \
"$allow_default_sg_id"
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
security group rule create --project "$project_id" \
--description "allow traffic from default namespace" \
--remote-group "$allow_namespace_sg_id" --ethertype IPv4 --protocol icmp \
"$allow_default_sg_id"
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
security group rule create --project "$project_id" \
--description "allow traffic from namespaces at default namespace" \
--remote-group "$allow_default_sg_id" --ethertype IPv4 --protocol tcp \
"$allow_namespace_sg_id"
openstack --os-cloud devstack-admin --os-region "$REGION_NAME" \
security group rule create --project "$project_id" \
--description "allow traffic from namespaces at default namespace" \
--remote-group "$allow_default_sg_id" --ethertype IPv4 --protocol icmp \
"$allow_namespace_sg_id"
iniset "$KURYR_CONFIG" namespace_sg sg_allow_from_namespaces "$allow_namespace_sg_id"
iniset "$KURYR_CONFIG" namespace_sg sg_allow_from_default "$allow_default_sg_id"

View File

@ -20,13 +20,14 @@ the next steps are needed:
pod_subnets_driver = namespace
In addition, to ensure that pods at one given namespace cannot reach (or be
reached by) the ones at another namespace, except the pods at the default
namespace that can reach (and be reached by) any pod at a different
namespace, the next security group driver needs to be set too::
In addition, to ensure that pods and services at one given namespace
cannot reach (or be reached by) the ones at another namespace, except the
pods at the default namespace that can reach (and be reached by) any pod at
a different namespace, the next security group driver needs to be set too::
[kubernetes]
pod_security_groups_driver = namespace
service_security_groups_driver = namespace
3. Select (and create if needed) the subnet pool from where the new subnets
@ -81,54 +82,66 @@ to add the namespace handler and state the namespace subnet driver with::
Testing the network per namespace functionality
-----------------------------------------------
1. Create a namespace::
1. Create two namespaces::
$ kubectl create namespace test
$ kubectl create namespace test1
$ kubectl create namespace test2
2. Check resources has been created::
$ kubectl get namespaces
NAME STATUS AGE
test Active 4s
test1 Active 14s
test2 Active 5s
... ... ...
$ kubectl get kuryrnets
NAME AGE
ns-test 1m
ns-test1 1m
ns-test2 1m
$ openstack network list | grep test
| 7c7b68c5-d3c4-431c-9f69-fbc777b43ee5 | ns/test-net | 8640d134-5ea2-437d-9e2a-89236f6c0198 |
$ openstack network list | grep test1
| 7c7b68c5-d3c4-431c-9f69-fbc777b43ee5 | ns/test1-net | 8640d134-5ea2-437d-9e2a-89236f6c0198 |
$ openstack subnet list | grep test
| 8640d134-5ea2-437d-9e2a-89236f6c0198 | ns/test-subnet | 7c7b68c5-d3c4-431c-9f69-fbc777b43ee5 | 10.0.1.128/26 |
$ openstack subnet list | grep test1
| 8640d134-5ea2-437d-9e2a-89236f6c0198 | ns/test1-subnet | 7c7b68c5-d3c4-431c-9f69-fbc777b43ee5 | 10.0.1.128/26 |
3. Create a pod in the created namespace::
3. Create a pod in the created namespaces::
$ kubectl run -n test --image kuryr/demo demo
$ kubectl run -n test1 --image kuryr/demo demo
deployment "demo" created
$ kubectl -n test get pod -o wide
$ kubectl run -n test1 --image kuryr/demo demo
deployment "demo" created
$ kubectl -n test1 get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
demo-5995548848-lmmjc 1/1 Running 0 7s 10.0.1.136 node1
4. Create a service::
$ kubectl expose -n test deploy/demo --port 80 --target-port 8080
$ kubectl expose -n test1 deploy/demo --port 80 --target-port 8080
service "demo" exposed
$ kubectl -n test get svc
$ kubectl -n test1 get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
demo ClusterIP 10.0.0.141 <none> 80/TCP 18s
5. Test service connectivity::
5. Test service connectivity from both namespaces::
$ curl 10.0.0.141
$ kubectl exec -n test1 -it demo-5995548848-lmmjc /bin/sh
test-1-pod$ curl 10.0.0.141
demo-5995548848-lmmjc: HELLO! I AM ALIVE!!!
$ kubectl exec -n test2 -it demo-5135352253-dfghd /bin/sh
test-2-pod$ curl 10.0.0.141
## No response
6. And finally, to remove the namespace and all its resources, including
openstack networks, kuryrnet CRD, svc, pods, you just need to do::
$ kubectl delete namespace test
$ kubectl delete namespace test1
$ kubectl delete namespace test2

View File

@ -231,6 +231,12 @@ ingress = [
help=_("UUID of the L7 Router")),
]
nested_vif_driver_opts = [
cfg.StrOpt('worker_nodes_subnet',
help=_("Neutron subnet ID for k8s worker node vms."),
default=''),
]
CONF = cfg.CONF
CONF.register_opts(kuryr_k8s_opts)
CONF.register_opts(daemon_opts, group='cni_daemon')
@ -239,6 +245,7 @@ CONF.register_opts(neutron_defaults, group='neutron_defaults')
CONF.register_opts(octavia_defaults, group='octavia_defaults')
CONF.register_opts(cache_defaults, group='cache_defaults')
CONF.register_opts(ingress, group='ingress')
CONF.register_opts(nested_vif_driver_opts, group='pod_vif_nested')
CONF.register_opts(lib_config.core_opts)
CONF.register_opts(lib_config.binding_opts, 'binding')

View File

@ -214,11 +214,13 @@ class PodSecurityGroupsDriver(DriverBase):
"""
raise NotImplementedError()
def create_namespace_sg(self, namespace, project_id):
def create_namespace_sg(self, namespace, project_id, crd_spec):
"""Create security group resources for a namespace.
:param namespace: string with the namespace name
:param project_id: OpenStack project ID
:param crd_spec: dict with the keys and values for the CRD spec, such
as subnetId or subnetCIDR
:return: dict with the keys and values for the CRD spec, such as sgId
"""
raise NotImplementedError()

View File

@ -129,7 +129,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
neutron.delete_security_group(sg_id)
raise
def _ensure_security_group_rules(self, loadbalancer, listener):
def _ensure_lb_security_group_rule(self, loadbalancer, listener):
sg_id = self._find_listeners_sg(loadbalancer)
if sg_id:
try:
@ -149,7 +149,74 @@ class LBaaSv2Driver(base.LBaaSDriver):
LOG.exception('Failed when creating security group rule '
'for listener %s.', listener.name)
def ensure_listener(self, loadbalancer, protocol, port):
def _extend_lb_security_group_rules(self, loadbalancer, listener):
neutron = clients.get_neutron_client()
sg_id = self._get_vip_port(loadbalancer).get('security_groups')[0]
for sg in loadbalancer.security_groups:
try:
neutron.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'port_range_min': listener.port,
'port_range_max': listener.port,
'protocol': listener.protocol,
'security_group_id': sg_id,
'remote_group_id': sg,
'description': listener.name,
},
})
except n_exc.NeutronClientException as ex:
if ex.status_code != requests.codes.conflict:
LOG.exception('Failed when creating security group rule '
'for listener %s.', listener.name)
# ensure routes have access to the services
service_subnet_cidr = self._get_subnet_cidr(loadbalancer.subnet_id)
try:
# add access from service subnet
neutron.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'port_range_min': listener.port,
'port_range_max': listener.port,
'protocol': listener.protocol,
'security_group_id': sg_id,
'remote_ip_prefix': service_subnet_cidr,
'description': listener.name,
},
})
# add access from worker node VM subnet for non-native route
# support
worker_subnet_id = CONF.pod_vif_nested.worker_nodes_subnet
if worker_subnet_id:
worker_subnet_cidr = self._get_subnet_cidr(worker_subnet_id)
neutron.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'port_range_min': listener.port,
'port_range_max': listener.port,
'protocol': listener.protocol,
'security_group_id': sg_id,
'remote_ip_prefix': worker_subnet_cidr,
'description': listener.name,
},
})
except n_exc.NeutronClientException as ex:
if ex.status_code != requests.codes.conflict:
LOG.exception('Failed when creating security group rule '
'to enable routes for listener %s.',
listener.name)
def _ensure_security_group_rules(self, loadbalancer, listener,
service_type):
if loadbalancer.provider == const.NEUTRON_LBAAS_HAPROXY_PROVIDER:
self._ensure_lb_security_group_rule(loadbalancer, listener)
elif service_type == 'ClusterIP':
self._extend_lb_security_group_rules(loadbalancer, listener)
def ensure_listener(self, loadbalancer, protocol, port,
service_type='ClusterIP'):
if protocol not in _SUPPORTED_LISTENER_PROT:
LOG.info("Protocol: %(prot)s: is not supported by LBaaSV2", {
'prot': protocol})
@ -164,7 +231,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
self._create_listener,
self._find_listener)
self._ensure_security_group_rules(loadbalancer, result)
self._ensure_security_group_rules(loadbalancer, result, service_type)
return result
@ -236,7 +303,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
lbaas.delete_lbaas_member,
member.id, member.pool_id)
def _get_vip_port_id(self, loadbalancer):
def _get_vip_port(self, loadbalancer):
neutron = clients.get_neutron_client()
try:
fixed_ips = ['subnet_id=%s' % str(loadbalancer.subnet_id),
@ -247,10 +314,19 @@ class LBaaSv2Driver(base.LBaaSDriver):
raise ex
if ports['ports']:
return ports['ports'][0].get("id")
return ports['ports'][0]
return None
def _get_subnet_cidr(self, subnet_id):
neutron = clients.get_neutron_client()
try:
subnet_obj = neutron.show_subnet(subnet_id)
except n_exc.NeutronClientException:
LOG.exception("Subnet %s CIDR not found!", subnet_id)
raise
return subnet_obj.get('subnet')['cidr']
def _create_loadbalancer(self, loadbalancer):
lbaas = clients.get_loadbalancer_client()
response = lbaas.create_loadbalancer({'loadbalancer': {
@ -259,7 +335,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
'vip_address': str(loadbalancer.ip),
'vip_subnet_id': loadbalancer.subnet_id}})
loadbalancer.id = response['loadbalancer']['id']
loadbalancer.port_id = self._get_vip_port_id(loadbalancer)
loadbalancer.port_id = self._get_vip_port(loadbalancer).get("id")
loadbalancer.provider = response['loadbalancer']['provider']
return loadbalancer
@ -273,7 +349,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
try:
loadbalancer.id = response['loadbalancers'][0]['id']
loadbalancer.port_id = self._get_vip_port_id(loadbalancer)
loadbalancer.port_id = self._get_vip_port(loadbalancer).get("id")
loadbalancer.provider = response['loadbalancers'][0]['provider']
except (KeyError, IndexError):
return None

View File

@ -41,12 +41,38 @@ cfg.CONF.register_opts(namespace_sg_driver_opts, "namespace_sg")
DEFAULT_NAMESPACE = 'default'
def _get_net_crd(namespace):
kubernetes = clients.get_kubernetes_client()
try:
ns = kubernetes.get('%s/namespaces/%s' % (constants.K8S_API_BASE,
namespace))
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise exceptions.ResourceNotReady(namespace)
try:
annotations = ns['metadata']['annotations']
net_crd_name = annotations[constants.K8S_ANNOTATION_NET_CRD]
except KeyError:
LOG.exception("Namespace missing CRD annotations for selecting "
"the corresponding security group.")
raise exceptions.ResourceNotReady(namespace)
try:
net_crd = kubernetes.get('%s/kuryrnets/%s' % (constants.K8S_API_CRD,
net_crd_name))
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return net_crd
class NamespacePodSecurityGroupsDriver(base.PodSecurityGroupsDriver):
"""Provides security groups for Pod based on a configuration option."""
def get_security_groups(self, pod, project_id):
namespace = pod['metadata']['namespace']
net_crd = self._get_net_crd(namespace)
net_crd = _get_net_crd(namespace)
sg_list = [str(net_crd['spec']['sgId'])]
@ -58,31 +84,6 @@ class NamespacePodSecurityGroupsDriver(base.PodSecurityGroupsDriver):
return sg_list[:]
def _get_net_crd(self, namespace):
kubernetes = clients.get_kubernetes_client()
try:
ns = kubernetes.get('%s/namespaces/%s' % (constants.K8S_API_BASE,
namespace))
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise exceptions.ResourceNotReady(namespace)
try:
annotations = ns['metadata']['annotations']
net_crd_name = annotations[constants.K8S_ANNOTATION_NET_CRD]
except KeyError:
LOG.exception("Namespace missing CRD annotations for selecting "
"the corresponding security group.")
raise exceptions.ResourceNotReady(namespace)
try:
net_crd = kubernetes.get('%s/kuryrnets/%s' % (
constants.K8S_API_CRD, net_crd_name))
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return net_crd
def _get_extra_sg(self, namespace):
# Differentiates between default namespace and the rest
if namespace == DEFAULT_NAMESPACE:
@ -90,7 +91,7 @@ class NamespacePodSecurityGroupsDriver(base.PodSecurityGroupsDriver):
else:
return [cfg.CONF.namespace_sg.sg_allow_from_default]
def create_namespace_sg(self, namespace, project_id):
def create_namespace_sg(self, namespace, project_id, crd_spec):
neutron = clients.get_neutron_client()
sg_name = "ns/" + namespace + "-sg"
@ -110,7 +111,7 @@ class NamespacePodSecurityGroupsDriver(base.PodSecurityGroupsDriver):
{
"security_group_rule": {
"direction": "ingress",
"remote_group_id": sg['id'],
"remote_ip_prefix": crd_spec['subnetCIDR'],
"security_group_id": sg['id']
}
})
@ -129,3 +130,27 @@ class NamespacePodSecurityGroupsDriver(base.PodSecurityGroupsDriver):
except n_exc.NeutronClientException:
LOG.exception("Error deleting security group %s.", sg_id)
raise
class NamespaceServiceSecurityGroupsDriver(base.ServiceSecurityGroupsDriver):
"""Provides security groups for Service based on a configuration option."""
def get_security_groups(self, service, project_id):
namespace = service['metadata']['namespace']
net_crd = _get_net_crd(namespace)
sg_list = []
sg_list.append(str(net_crd['spec']['sgId']))
extra_sgs = self._get_extra_sg(namespace)
for sg in extra_sgs:
sg_list.append(str(sg))
return sg_list[:]
def _get_extra_sg(self, namespace):
# Differentiates between default namespace and the rest
if namespace == DEFAULT_NAMESPACE:
return [cfg.CONF.namespace_sg.sg_allow_from_default]
else:
return [cfg.CONF.namespace_sg.sg_allow_from_namespaces]

View File

@ -143,7 +143,8 @@ class NamespacePodSubnetDriver(default_subnet.DefaultPodSubnetDriver):
raise ex
return {'netId': neutron_net['id'],
'routerId': router_id,
'subnetId': neutron_subnet['id']}
'subnetId': neutron_subnet['id'],
'subnetCIDR': neutron_subnet['cidr']}
def rollback_network_resources(self, net_crd_spec, namespace):
neutron = clients.get_neutron_client()

View File

@ -15,7 +15,6 @@
import abc
import six
from kuryr.lib._i18n import _
from kuryr.lib import exceptions as kl_exc
from neutronclient.common import exceptions as n_exc
from oslo_config import cfg as oslo_cfg
@ -27,15 +26,6 @@ from kuryr_kubernetes.controller.drivers import neutron_vif
LOG = logging.getLogger(__name__)
# Moved out from neutron_defaults group
nested_vif_driver_opts = [
oslo_cfg.StrOpt('worker_nodes_subnet',
help=_("Neutron subnet ID for k8s worker node vms.")),
]
oslo_cfg.CONF.register_opts(nested_vif_driver_opts, "pod_vif_nested")
@six.add_metaclass(abc.ABCMeta)
class NestedPodVIFDriver(neutron_vif.NeutronPodVIFDriver):
"""Skeletal handler driver for VIFs for Nested Pods."""

View File

@ -503,7 +503,8 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
listener = self._drv_lbaas.ensure_listener(
loadbalancer=lbaas_state.loadbalancer,
protocol=protocol,
port=port)
port=port,
service_type=lbaas_spec.type)
if listener is not None:
lbaas_state.listeners.append(listener)
changed = True

View File

@ -50,7 +50,8 @@ class NamespaceHandler(k8s_base.ResourceEventHandler):
net_crd_spec = self._drv_subnets.create_namespace_network(ns_name,
project_id)
try:
net_crd_sg = self._drv_sg.create_namespace_sg(ns_name, project_id)
net_crd_sg = self._drv_sg.create_namespace_sg(ns_name, project_id,
net_crd_spec)
except n_exc.NeutronClientException:
LOG.exception("Error creating security group for the namespace. "
"Rolling back created network resources.")

View File

@ -247,6 +247,7 @@ class TestLBaaSv2Driver(test_base.TestCase):
}}
resp = {'loadbalancer': {'id': loadbalancer_id, 'provider': 'haproxy'}}
lbaas.create_loadbalancer.return_value = resp
m_driver._get_vip_port.return_value = {'id': mock.sentinel.port_id}
ret = cls._create_loadbalancer(m_driver, loadbalancer)
lbaas.create_loadbalancer.assert_called_once_with(req)
@ -267,6 +268,7 @@ class TestLBaaSv2Driver(test_base.TestCase):
resp = {'loadbalancers': [{'id': loadbalancer_id,
'provider': 'haproxy'}]}
lbaas.list_loadbalancers.return_value = resp
m_driver._get_vip_port.return_value = {'id': mock.sentinel.port_id}
ret = cls._find_loadbalancer(m_driver, loadbalancer)
lbaas.list_loadbalancers.assert_called_once_with(

View File

@ -63,8 +63,10 @@ def get_namespace_obj():
class TestNamespacePodSecurityGroupsDriver(test_base.TestCase):
@mock.patch('kuryr_kubernetes.controller.drivers.'
'namespace_security_groups._get_net_crd')
@mock.patch('kuryr_kubernetes.config.CONF')
def test_get_security_groups(self, m_cfg):
def test_get_security_groups(self, m_cfg, m_get_crd):
cls = namespace_security_groups.NamespacePodSecurityGroupsDriver
m_driver = mock.MagicMock(spec=cls)
@ -79,38 +81,14 @@ class TestNamespacePodSecurityGroupsDriver(test_base.TestCase):
'sgId': sg_id
}
}
m_driver._get_net_crd.return_value = net_crd
m_get_crd.return_value = net_crd
m_driver._get_extra_sg.return_value = [extra_sg]
ret = cls.get_security_groups(m_driver, pod, project_id)
expected_sg = [str(sg_id), str(extra_sg), sg_list[0]]
self.assertEqual(ret, expected_sg)
def test__get_net_crd(self):
cls = namespace_security_groups.NamespacePodSecurityGroupsDriver
m_driver = mock.MagicMock(spec=cls)
namespace = mock.sentinel.namespace
subnet_id = mock.sentinel.subnet_id
net_id = mock.sentinel.net_id
sg_id = mock.sentinel.sg_id
ns = get_namespace_obj()
crd = {
'spec': {
'netId': net_id,
'subnetId': subnet_id,
'sgId': sg_id
}
}
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = [ns, crd]
ret = cls._get_net_crd(m_driver, namespace)
self.assertEqual(ret, crd)
m_get_crd.assert_called_once_with(pod['metadata']['namespace'])
def test_create_namespace_sg(self):
cls = namespace_security_groups.NamespacePodSecurityGroupsDriver
@ -119,11 +97,15 @@ class TestNamespacePodSecurityGroupsDriver(test_base.TestCase):
namespace = 'test'
project_id = mock.sentinel.project_id
sg = {'id': mock.sentinel.sg}
subnet_cidr = mock.sentinel.subnet_cidr
crd_spec = {
'subnetCIDR': subnet_cidr
}
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.create_security_group.return_value = {'security_group': sg}
create_sg_resp = cls.create_namespace_sg(m_driver, namespace,
project_id)
project_id, crd_spec)
self.assertEqual(create_sg_resp, {'sgId': sg['id']})
neutron.create_security_group.assert_called_once()
@ -135,13 +117,17 @@ class TestNamespacePodSecurityGroupsDriver(test_base.TestCase):
namespace = 'test'
project_id = mock.sentinel.project_id
subnet_cidr = mock.sentinel.subnet_cidr
crd_spec = {
'subnetCIDR': subnet_cidr
}
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.create_security_group.side_effect = (
n_exc.NeutronClientException)
self.assertRaises(n_exc.NeutronClientException,
cls.create_namespace_sg, m_driver,
namespace, project_id)
namespace, project_id, crd_spec)
neutron.create_security_group.assert_called_once()
neutron.create_security_group_rule.assert_not_called()

View File

@ -226,7 +226,8 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
neutron = self.useFixture(k_fix.MockNeutronClient()).client
net = {'id': mock.sentinel.net}
neutron.create_network.return_value = {'network': net}
subnet = {'id': mock.sentinel.subnet}
subnet = {'id': mock.sentinel.subnet,
'cidr': mock.sentinel.cidr}
neutron.create_subnet.return_value = {'subnet': subnet}
router_id = 'router1'
oslo_cfg.CONF.set_override('pod_router',
@ -234,7 +235,8 @@ class TestNamespacePodSubnetDriver(test_base.TestCase):
group='namespace_subnet')
net_crd = {'netId': net['id'],
'routerId': router_id,
'subnetId': subnet['id']}
'subnetId': subnet['id'],
'subnetCIDR': subnet['cidr']}
net_crd_resp = cls.create_namespace_network(m_driver, namespace,
project_id)

View File

@ -358,7 +358,8 @@ class FakeLBaaSDriver(drv_base.LBaaSDriver):
ip=ip,
id=uuidutils.generate_uuid())
def ensure_listener(self, loadbalancer, protocol, port):
def ensure_listener(self, loadbalancer, protocol, port,
service_type='ClusterIP'):
if protocol not in _SUPPORTED_LISTENER_PROT:
return None
@ -677,7 +678,7 @@ class TestLoadBalancerHandler(test_base.TestCase):
members=list(members.values()))
def _generate_lbaas_spec(self, vip, targets, project_id,
subnet_id, prot='TCP'):
subnet_id, prot='TCP', lbaas_type='ClusterIP'):
return obj_lbaas.LBaaSServiceSpec(
ip=vip,
project_id=project_id,
@ -685,7 +686,8 @@ class TestLoadBalancerHandler(test_base.TestCase):
ports=[obj_lbaas.LBaaSPortSpec(name=str(port),
protocol=prot,
port=port)
for port in set(t[0] for t in targets.values())])
for port in set(t[0] for t in targets.values())],
type=lbaas_type)
def _generate_endpoints(self, targets):
def _target_to_port(item):

View File

@ -125,7 +125,7 @@ class TestNamespaceHandler(test_base.TestCase):
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id)
self._namespace_name, self._project_id, net_crd_spec)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)
@ -169,7 +169,7 @@ class TestNamespaceHandler(test_base.TestCase):
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id)
self._namespace_name, self._project_id, {'test_net': 'uuid'})
self._set_net_crd.assert_not_called()
def test_on_present_add_kuryrnet_crd_exception(self):
@ -185,7 +185,7 @@ class TestNamespaceHandler(test_base.TestCase):
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id)
self._namespace_name, self._project_id, net_crd_spec)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_not_called()
@ -207,7 +207,7 @@ class TestNamespaceHandler(test_base.TestCase):
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id)
self._namespace_name, self._project_id, net_crd_spec)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)
@ -233,7 +233,7 @@ class TestNamespaceHandler(test_base.TestCase):
self._create_namespace_network.assert_called_once_with(
self._namespace_name, self._project_id)
self._create_namespace_sg.assert_called_once_with(
self._namespace_name, self._project_id)
self._namespace_name, self._project_id, net_crd_spec)
self._add_kuryrnet_crd.assert_called_once_with(self._namespace_name,
net_crd_spec)
self._set_net_crd.assert_called_once_with(self._namespace, net_crd)

View File

@ -65,6 +65,7 @@ kuryr_kubernetes.controller.drivers.pod_security_groups =
kuryr_kubernetes.controller.drivers.service_security_groups =
default = kuryr_kubernetes.controller.drivers.default_security_groups:DefaultServiceSecurityGroupsDriver
namespace = kuryr_kubernetes.controller.drivers.namespace_security_groups:NamespaceServiceSecurityGroupsDriver
kuryr_kubernetes.controller.drivers.network_policy =
default = kuryr_kubernetes.controller.drivers.network_policy:NetworkPolicyDriver