summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuis Tomas Bolivar <ltomasbo@redhat.com>2018-12-24 13:01:12 +0100
committerDaniel Mellado <dmellado@redhat.com>2019-01-08 06:35:55 -0500
commitb200d368cd1e50366071257f914b4cb709ee7b26 (patch)
tree3bad15e948fe8fb2d7972b5617d9f47e649b4faf
parent71a8ebd1f047efcd11210bceec886e4984a409a8 (diff)
Add Network Policy support to services
This patch adds support for Network Policy on services. It applies pods' security groups onto the services in front of them. It makes the next assumptions: - All the pods pointed by one svc have the same labels, thus the same sgs being enforced - Only copies the SG rules that have the same protocol and direction as the listener being created - Adds a default rule to NP to enable traffic from services subnet CIDR Partially Implements: blueprint k8s-network-policies Change-Id: Ibd4b51ff40b69af26ab7e7b81d18e63abddf775b
Notes
Notes (review): Code-Review+2: Daniel Mellado <dmellado@redhat.com> Code-Review+1: Maysa de Macedo Souza <maysa.macedo95@gmail.com> Code-Review+2: Michał Dulko <mdulko@redhat.com> Workflow+1: Michał Dulko <mdulko@redhat.com> Verified+2: Zuul Submitted-by: Zuul Submitted-at: Tue, 08 Jan 2019 19:17:06 +0000 Reviewed-on: https://review.openstack.org/627175 Project: openstack/kuryr-kubernetes Branch: refs/heads/master
-rw-r--r--kuryr_kubernetes/controller/drivers/lbaasv2.py84
-rw-r--r--kuryr_kubernetes/controller/drivers/network_policy.py67
-rw-r--r--kuryr_kubernetes/controller/drivers/network_policy_security_groups.py97
-rw-r--r--kuryr_kubernetes/controller/drivers/utils.py31
-rw-r--r--kuryr_kubernetes/controller/handlers/lbaas.py19
-rw-r--r--kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py2
-rw-r--r--kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py17
-rw-r--r--kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py2
-rw-r--r--kuryr_kubernetes/utils.py12
9 files changed, 214 insertions, 117 deletions
diff --git a/kuryr_kubernetes/controller/drivers/lbaasv2.py b/kuryr_kubernetes/controller/drivers/lbaasv2.py
index 3b592c4..0da4e9b 100644
--- a/kuryr_kubernetes/controller/drivers/lbaasv2.py
+++ b/kuryr_kubernetes/controller/drivers/lbaasv2.py
@@ -31,6 +31,7 @@ from kuryr_kubernetes import constants as const
31from kuryr_kubernetes.controller.drivers import base 31from kuryr_kubernetes.controller.drivers import base
32from kuryr_kubernetes import exceptions as k_exc 32from kuryr_kubernetes import exceptions as k_exc
33from kuryr_kubernetes.objects import lbaas as obj_lbaas 33from kuryr_kubernetes.objects import lbaas as obj_lbaas
34from kuryr_kubernetes import utils
34 35
35CONF = cfg.CONF 36CONF = cfg.CONF
36LOG = logging.getLogger(__name__) 37LOG = logging.getLogger(__name__)
@@ -199,6 +200,53 @@ class LBaaSv2Driver(base.LBaaSDriver):
199 LOG.exception('Failed when creating security group rule ' 200 LOG.exception('Failed when creating security group rule '
200 'for listener %s.', listener.name) 201 'for listener %s.', listener.name)
201 202
203 def _apply_members_security_groups(self, loadbalancer, port, target_port,
204 protocol, sg_rule_name):
205 neutron = clients.get_neutron_client()
206 if CONF.octavia_defaults.sg_mode == 'create':
207 sg_id = self._find_listeners_sg(loadbalancer)
208 else:
209 sg_id = self._get_vip_port(loadbalancer).get('security_groups')[0]
210
211 # Check if Network Policy allows listener on the pods
212 for sg in loadbalancer.security_groups:
213 if sg != sg_id:
214 rules = neutron.list_security_group_rules(
215 security_group_id=sg)
216 for rule in rules['security_group_rules']:
217 # copying ingress rules with same protocol onto the
218 # loadbalancer sg rules
219 # NOTE(ltomasbo): NP security groups only have
220 # remote_ip_prefix, not remote_group_id, therefore only
221 # applying the ones with remote_ip_prefix
222 if (rule['protocol'] == protocol.lower() and
223 rule['direction'] == 'ingress' and
224 rule['remote_ip_prefix']):
225 # If listener port not in allowed range, skip
226 min_port = rule.get('port_range_min')
227 max_port = rule.get('port_range_max')
228 if (min_port and target_port not in range(min_port,
229 max_port+1)):
230 continue
231 try:
232 neutron.create_security_group_rule({
233 'security_group_rule': {
234 'direction': 'ingress',
235 'port_range_min': port,
236 'port_range_max': port,
237 'protocol': protocol,
238 'remote_ip_prefix': rule[
239 'remote_ip_prefix'],
240 'security_group_id': sg_id,
241 'description': sg_rule_name,
242 },
243 })
244 except n_exc.NeutronClientException as ex:
245 if ex.status_code != requests.codes.conflict:
246 LOG.exception('Failed when creating security '
247 'group rule for listener %s.',
248 sg_rule_name)
249
202 def _extend_lb_security_group_rules(self, loadbalancer, listener): 250 def _extend_lb_security_group_rules(self, loadbalancer, listener):
203 neutron = clients.get_neutron_client() 251 neutron = clients.get_neutron_client()
204 252
@@ -242,7 +290,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
242 'rule for listener %s.', listener.name) 290 'rule for listener %s.', listener.name)
243 291
244 # ensure routes have access to the services 292 # ensure routes have access to the services
245 service_subnet_cidr = self._get_subnet_cidr(loadbalancer.subnet_id) 293 service_subnet_cidr = utils.get_subnet_cidr(loadbalancer.subnet_id)
246 try: 294 try:
247 # add access from service subnet 295 # add access from service subnet
248 neutron.create_security_group_rule({ 296 neutron.create_security_group_rule({
@@ -261,7 +309,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
261 # support 309 # support
262 worker_subnet_id = CONF.pod_vif_nested.worker_nodes_subnet 310 worker_subnet_id = CONF.pod_vif_nested.worker_nodes_subnet
263 if worker_subnet_id: 311 if worker_subnet_id:
264 worker_subnet_cidr = self._get_subnet_cidr(worker_subnet_id) 312 worker_subnet_cidr = utils.get_subnet_cidr(worker_subnet_id)
265 neutron.create_security_group_rule({ 313 neutron.create_security_group_rule({
266 'security_group_rule': { 314 'security_group_rule': {
267 'direction': 'ingress', 315 'direction': 'ingress',
@@ -321,7 +369,10 @@ class LBaaSv2Driver(base.LBaaSDriver):
321 lbaas.delete_listener, 369 lbaas.delete_listener,
322 listener.id) 370 listener.id)
323 371
324 sg_id = self._find_listeners_sg(loadbalancer) 372 if CONF.octavia_defaults.sg_mode == 'create':
373 sg_id = self._find_listeners_sg(loadbalancer)
374 else:
375 sg_id = self._get_vip_port(loadbalancer).get('security_groups')[0]
325 if sg_id: 376 if sg_id:
326 rules = neutron.list_security_group_rules( 377 rules = neutron.list_security_group_rules(
327 security_group_id=sg_id, description=listener.name) 378 security_group_id=sg_id, description=listener.name)
@@ -363,7 +414,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
363 414
364 def ensure_member(self, loadbalancer, pool, 415 def ensure_member(self, loadbalancer, pool,
365 subnet_id, ip, port, target_ref_namespace, 416 subnet_id, ip, port, target_ref_namespace,
366 target_ref_name): 417 target_ref_name, listener_port=None):
367 name = ("%s/%s" % (target_ref_namespace, target_ref_name)) 418 name = ("%s/%s" % (target_ref_namespace, target_ref_name))
368 name += ":%s" % port 419 name += ":%s" % port
369 member = obj_lbaas.LBaaSMember(name=name, 420 member = obj_lbaas.LBaaSMember(name=name,
@@ -372,9 +423,19 @@ class LBaaSv2Driver(base.LBaaSDriver):
372 subnet_id=subnet_id, 423 subnet_id=subnet_id,
373 ip=ip, 424 ip=ip,
374 port=port) 425 port=port)
375 return self._ensure_provisioned(loadbalancer, member, 426 result = self._ensure_provisioned(loadbalancer, member,
376 self._create_member, 427 self._create_member,
377 self._find_member) 428 self._find_member)
429
430 network_policy = (
431 'policy' in CONF.kubernetes.enabled_handlers and
432 CONF.kubernetes.service_security_groups_driver == 'policy')
433 if network_policy and listener_port:
434 protocol = pool.protocol
435 sg_rule_name = pool.name
436 self._apply_members_security_groups(loadbalancer, listener_port,
437 port, protocol, sg_rule_name)
438 return result
378 439
379 def release_member(self, loadbalancer, member): 440 def release_member(self, loadbalancer, member):
380 lbaas = clients.get_loadbalancer_client() 441 lbaas = clients.get_loadbalancer_client()
@@ -397,15 +458,6 @@ class LBaaSv2Driver(base.LBaaSDriver):
397 458
398 return None 459 return None
399 460
400 def _get_subnet_cidr(self, subnet_id):
401 neutron = clients.get_neutron_client()
402 try:
403 subnet_obj = neutron.show_subnet(subnet_id)
404 except n_exc.NeutronClientException:
405 LOG.exception("Subnet %s CIDR not found!", subnet_id)
406 raise
407 return subnet_obj.get('subnet')['cidr']
408
409 def _create_loadbalancer(self, loadbalancer): 461 def _create_loadbalancer(self, loadbalancer):
410 lbaas = clients.get_loadbalancer_client() 462 lbaas = clients.get_loadbalancer_client()
411 463
diff --git a/kuryr_kubernetes/controller/drivers/network_policy.py b/kuryr_kubernetes/controller/drivers/network_policy.py
index 8016257..62e3bda 100644
--- a/kuryr_kubernetes/controller/drivers/network_policy.py
+++ b/kuryr_kubernetes/controller/drivers/network_policy.py
@@ -17,10 +17,12 @@ from oslo_log import log as logging
17from neutronclient.common import exceptions as n_exc 17from neutronclient.common import exceptions as n_exc
18 18
19from kuryr_kubernetes import clients 19from kuryr_kubernetes import clients
20from kuryr_kubernetes import config
20from kuryr_kubernetes import constants 21from kuryr_kubernetes import constants
21from kuryr_kubernetes.controller.drivers import base 22from kuryr_kubernetes.controller.drivers import base
22from kuryr_kubernetes.controller.drivers import utils 23from kuryr_kubernetes.controller.drivers import utils as driver_utils
23from kuryr_kubernetes import exceptions 24from kuryr_kubernetes import exceptions
25from kuryr_kubernetes import utils
24 26
25LOG = logging.getLogger(__name__) 27LOG = logging.getLogger(__name__)
26 28
@@ -93,14 +95,14 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
93 current_sg_rules] 95 current_sg_rules]
94 for sg_rule in sg_rules_to_delete: 96 for sg_rule in sg_rules_to_delete:
95 try: 97 try:
96 utils.delete_security_group_rule(sgr_ids[sg_rule]) 98 driver_utils.delete_security_group_rule(sgr_ids[sg_rule])
97 except n_exc.NotFound: 99 except n_exc.NotFound:
98 LOG.debug('Trying to delete non existing sg_rule %s', sg_rule) 100 LOG.debug('Trying to delete non existing sg_rule %s', sg_rule)
99 # Create new rules that weren't already on the security group 101 # Create new rules that weren't already on the security group
100 sg_rules_to_add = [rule for rule in current_sg_rules if rule not in 102 sg_rules_to_add = [rule for rule in current_sg_rules if rule not in
101 existing_sg_rules] 103 existing_sg_rules]
102 for sg_rule in sg_rules_to_add: 104 for sg_rule in sg_rules_to_add:
103 sgr_id = utils.create_security_group_rule(sg_rule) 105 sgr_id = driver_utils.create_security_group_rule(sg_rule)
104 if sg_rule['security_group_rule'].get('direction') == 'ingress': 106 if sg_rule['security_group_rule'].get('direction') == 'ingress':
105 for i_rule in i_rules: 107 for i_rule in i_rules:
106 if sg_rule == i_rule: 108 if sg_rule == i_rule:
@@ -111,8 +113,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
111 e_rule["security_group_rule"]["id"] = sgr_id 113 e_rule["security_group_rule"]["id"] = sgr_id
112 # Annotate kuryrnetpolicy CRD with current policy and ruleset 114 # Annotate kuryrnetpolicy CRD with current policy and ruleset
113 pod_selector = policy['spec'].get('podSelector') 115 pod_selector = policy['spec'].get('podSelector')
114 utils.patch_kuryr_crd(crd, i_rules, e_rules, pod_selector, 116 driver_utils.patch_kuryr_crd(crd, i_rules, e_rules, pod_selector,
115 np_spec=policy['spec']) 117 np_spec=policy['spec'])
116 118
117 if existing_pod_selector != pod_selector: 119 if existing_pod_selector != pod_selector:
118 return existing_pod_selector 120 return existing_pod_selector
@@ -142,13 +144,26 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
142 sg_id = sg['security_group']['id'] 144 sg_id = sg['security_group']['id']
143 i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id) 145 i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id)
144 for i_rule in i_rules: 146 for i_rule in i_rules:
145 sgr_id = utils.create_security_group_rule(i_rule) 147 sgr_id = driver_utils.create_security_group_rule(i_rule)
146 i_rule['security_group_rule']['id'] = sgr_id 148 i_rule['security_group_rule']['id'] = sgr_id
147 149
148 for e_rule in e_rules: 150 for e_rule in e_rules:
149 sgr_id = utils.create_security_group_rule(e_rule) 151 sgr_id = driver_utils.create_security_group_rule(e_rule)
150 e_rule['security_group_rule']['id'] = sgr_id 152 e_rule['security_group_rule']['id'] = sgr_id
151 153
154 # NOTE(ltomasbo): Add extra SG rule to allow traffic from services
155 # subnet
156 svc_cidr = utils.get_subnet_cidr(
157 config.CONF.neutron_defaults.service_subnet)
158 svc_rule = {
159 u'security_group_rule': {
160 u'ethertype': 'IPv4',
161 u'security_group_id': sg_id,
162 u'direction': 'ingress',
163 u'description': 'Kuryr-Kubernetes NetPolicy SG rule',
164 u'remote_ip_prefix': svc_cidr
165 }}
166 driver_utils.create_security_group_rule(svc_rule)
152 except (n_exc.NeutronClientException, exceptions.ResourceNotReady): 167 except (n_exc.NeutronClientException, exceptions.ResourceNotReady):
153 LOG.exception("Error creating security group for network policy " 168 LOG.exception("Error creating security group for network policy "
154 " %s", policy['metadata']['name']) 169 " %s", policy['metadata']['name'])
@@ -179,12 +194,13 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
179 ips = [] 194 ips = []
180 matching_pods = [] 195 matching_pods = []
181 if namespace_selector: 196 if namespace_selector:
182 matching_namespaces = utils.get_namespaces(namespace_selector) 197 matching_namespaces = driver_utils.get_namespaces(
198 namespace_selector)
183 for ns in matching_namespaces.get('items'): 199 for ns in matching_namespaces.get('items'):
184 matching_pods = utils.get_pods(pod_selector, 200 matching_pods = driver_utils.get_pods(pod_selector,
185 ns['metadata']['name']) 201 ns['metadata']['name'])
186 else: 202 else:
187 matching_pods = utils.get_pods(pod_selector, namespace) 203 matching_pods = driver_utils.get_pods(pod_selector, namespace)
188 for pod in matching_pods.get('items'): 204 for pod in matching_pods.get('items'):
189 if pod['status']['podIP']: 205 if pod['status']['podIP']:
190 ips.append(pod['status']['podIP']) 206 ips.append(pod['status']['podIP'])
@@ -214,7 +230,8 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
214 ns_cidr = self._get_namespace_subnet_cidr(ns) 230 ns_cidr = self._get_namespace_subnet_cidr(ns)
215 cidrs.append(ns_cidr) 231 cidrs.append(ns_cidr)
216 else: 232 else:
217 matching_namespaces = utils.get_namespaces(namespace_selector) 233 matching_namespaces = driver_utils.get_namespaces(
234 namespace_selector)
218 for ns in matching_namespaces.get('items'): 235 for ns in matching_namespaces.get('items'):
219 # NOTE(ltomasbo): This requires the namespace handler to be 236 # NOTE(ltomasbo): This requires the namespace handler to be
220 # also enabled 237 # also enabled
@@ -280,7 +297,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
280 if rule_list[0] == {}: 297 if rule_list[0] == {}:
281 LOG.debug('Applying default all open policy from %s', 298 LOG.debug('Applying default all open policy from %s',
282 policy['metadata']['selfLink']) 299 policy['metadata']['selfLink'])
283 rule = utils.create_security_group_rule_body( 300 rule = driver_utils.create_security_group_rule_body(
284 sg_id, direction, port_range_min=1, port_range_max=65535) 301 sg_id, direction, port_range_min=1, port_range_max=65535)
285 sg_rule_body_list.append(rule) 302 sg_rule_body_list.append(rule)
286 303
@@ -294,31 +311,33 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
294 for port in rule_block['ports']: 311 for port in rule_block['ports']:
295 if allowed_cidrs or allow_all or selectors: 312 if allowed_cidrs or allow_all or selectors:
296 for cidr in allowed_cidrs: 313 for cidr in allowed_cidrs:
297 rule = utils.create_security_group_rule_body( 314 rule = (
298 sg_id, direction, port.get('port'), 315 driver_utils.create_security_group_rule_body(
299 protocol=port.get('protocol'), 316 sg_id, direction, port.get('port'),
300 cidr=cidr) 317 protocol=port.get('protocol'),
318 cidr=cidr))
301 sg_rule_body_list.append(rule) 319 sg_rule_body_list.append(rule)
302 if allow_all: 320 if allow_all:
303 rule = utils.create_security_group_rule_body( 321 rule = (
304 sg_id, direction, port.get('port'), 322 driver_utils.create_security_group_rule_body(
305 protocol=port.get('protocol')) 323 sg_id, direction, port.get('port'),
324 protocol=port.get('protocol')))
306 sg_rule_body_list.append(rule) 325 sg_rule_body_list.append(rule)
307 else: 326 else:
308 rule = utils.create_security_group_rule_body( 327 rule = driver_utils.create_security_group_rule_body(
309 sg_id, direction, port.get('port'), 328 sg_id, direction, port.get('port'),
310 protocol=port.get('protocol')) 329 protocol=port.get('protocol'))
311 sg_rule_body_list.append(rule) 330 sg_rule_body_list.append(rule)
312 elif allowed_cidrs or allow_all or selectors: 331 elif allowed_cidrs or allow_all or selectors:
313 for cidr in allowed_cidrs: 332 for cidr in allowed_cidrs:
314 rule = utils.create_security_group_rule_body( 333 rule = driver_utils.create_security_group_rule_body(
315 sg_id, direction, 334 sg_id, direction,
316 port_range_min=1, 335 port_range_min=1,
317 port_range_max=65535, 336 port_range_max=65535,
318 cidr=cidr) 337 cidr=cidr)
319 sg_rule_body_list.append(rule) 338 sg_rule_body_list.append(rule)
320 if allow_all: 339 if allow_all:
321 rule = utils.create_security_group_rule_body( 340 rule = driver_utils.create_security_group_rule_body(
322 sg_id, direction, 341 sg_id, direction,
323 port_range_min=1, 342 port_range_min=1,
324 port_range_max=65535) 343 port_range_max=65535)
@@ -456,7 +475,7 @@ class NetworkPolicyDriver(base.NetworkPolicyDriver):
456 pod_selector = policy['spec'].get('podSelector') 475 pod_selector = policy['spec'].get('podSelector')
457 if pod_selector: 476 if pod_selector:
458 policy_namespace = policy['metadata']['namespace'] 477 policy_namespace = policy['metadata']['namespace']
459 pods = utils.get_pods(pod_selector, policy_namespace) 478 pods = driver_utils.get_pods(pod_selector, policy_namespace)
460 return pods.get('items') 479 return pods.get('items')
461 else: 480 else:
462 # NOTE(ltomasbo): It affects all the pods on the namespace 481 # NOTE(ltomasbo): It affects all the pods on the namespace
diff --git a/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py b/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py
index 10e6d1a..00a3d20 100644
--- a/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py
+++ b/kuryr_kubernetes/controller/drivers/network_policy_security_groups.py
@@ -192,36 +192,40 @@ def _parse_rules(direction, crd, pod):
192 return matched, crd_rules 192 return matched, crd_rules
193 193
194 194
195class NetworkPolicySecurityGroupsDriver(base.PodSecurityGroupsDriver): 195def _get_pod_sgs(pod, project_id):
196 """Provides security groups for pods based on network policies""" 196 sg_list = []
197
198 def get_security_groups(self, pod, project_id):
199 sg_list = []
200 197
201 pod_labels = pod['metadata'].get('labels') 198 pod_labels = pod['metadata'].get('labels')
202 pod_namespace = pod['metadata']['namespace'] 199 pod_namespace = pod['metadata']['namespace']
203 200
204 knp_crds = _get_kuryrnetpolicy_crds(namespace=pod_namespace) 201 knp_crds = _get_kuryrnetpolicy_crds(namespace=pod_namespace)
205 for crd in knp_crds.get('items'): 202 for crd in knp_crds.get('items'):
206 pod_selector = crd['spec'].get('podSelector') 203 pod_selector = crd['spec'].get('podSelector')
207 if pod_selector: 204 if pod_selector:
208 if _match_selector(pod_selector, pod_labels): 205 if _match_selector(pod_selector, pod_labels):
209 LOG.debug("Appending %s", 206 LOG.debug("Appending %s",
210 str(crd['spec']['securityGroupId'])) 207 str(crd['spec']['securityGroupId']))
211 sg_list.append(str(crd['spec']['securityGroupId']))
212 else:
213 LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
214 sg_list.append(str(crd['spec']['securityGroupId'])) 208 sg_list.append(str(crd['spec']['securityGroupId']))
209 else:
210 LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
211 sg_list.append(str(crd['spec']['securityGroupId']))
215 212
216 # NOTE(maysams) Pods that are not selected by any Networkpolicy 213 # NOTE(maysams) Pods that are not selected by any Networkpolicy
217 # are fully accessible. Thus, the default security group is associated. 214 # are fully accessible. Thus, the default security group is associated.
215 if not sg_list:
216 sg_list = config.CONF.neutron_defaults.pod_security_groups
218 if not sg_list: 217 if not sg_list:
219 sg_list = config.CONF.neutron_defaults.pod_security_groups 218 raise cfg.RequiredOptError('pod_security_groups',
220 if not sg_list: 219 cfg.OptGroup('neutron_defaults'))
221 raise cfg.RequiredOptError('pod_security_groups', 220
222 cfg.OptGroup('neutron_defaults')) 221 return sg_list[:]
223 222
224 return sg_list[:] 223
224class NetworkPolicySecurityGroupsDriver(base.PodSecurityGroupsDriver):
225 """Provides security groups for pods based on network policies"""
226
227 def get_security_groups(self, pod, project_id):
228 return _get_pod_sgs(pod, project_id)
225 229
226 def create_sg_rules(self, pod): 230 def create_sg_rules(self, pod):
227 LOG.debug("Creating sg rule for pod: %s", pod['metadata']['name']) 231 LOG.debug("Creating sg rule for pod: %s", pod['metadata']['name'])
@@ -297,36 +301,17 @@ class NetworkPolicyServiceSecurityGroupsDriver(
297 def get_security_groups(self, service, project_id): 301 def get_security_groups(self, service, project_id):
298 sg_list = [] 302 sg_list = []
299 svc_namespace = service['metadata']['namespace'] 303 svc_namespace = service['metadata']['namespace']
300 svc_labels = service['metadata'].get('labels') 304 svc_selector = service['spec'].get('selector')
301 LOG.debug("Using labels %s", svc_labels) 305
302 306 # skip is no selector
303 knp_crds = _get_kuryrnetpolicy_crds(namespace=svc_namespace) 307 if svc_selector:
304 for crd in knp_crds.get('items'): 308 # get affected pods by svc selector
305 pod_selector = crd['spec'].get('podSelector') 309 pods = driver_utils.get_pods({'selector': svc_selector},
306 if pod_selector: 310 svc_namespace).get('items')
307 crd_labels = pod_selector.get('matchLabels', None) 311 # NOTE(ltomasbo): We assume all the pods pointed by a service
308 crd_expressions = pod_selector.get('matchExpressions', None) 312 # have the same labels, and the same policy will be applied to
309 313 # all of them. Hence only considering the security groups applied
310 match_exp = match_lb = True 314 # to the first one.
311 if crd_expressions: 315 if pods:
312 match_exp = _match_expressions(crd_expressions, 316 return _get_pod_sgs(pods[0], project_id)
313 svc_labels)
314 if crd_labels and svc_labels:
315 match_lb = _match_labels(crd_labels, svc_labels)
316 if match_exp and match_lb:
317 LOG.debug("Appending %s",
318 str(crd['spec']['securityGroupId']))
319 sg_list.append(str(crd['spec']['securityGroupId']))
320 else:
321 LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
322 sg_list.append(str(crd['spec']['securityGroupId']))
323
324 # NOTE(maysams) Pods that are not selected by any Networkpolicy
325 # are fully accessible. Thus, the default security group is associated.
326 if not sg_list:
327 sg_list = config.CONF.neutron_defaults.pod_security_groups
328 if not sg_list:
329 raise cfg.RequiredOptError('pod_security_groups',
330 cfg.OptGroup('neutron_defaults'))
331
332 return sg_list[:] 317 return sg_list[:]
diff --git a/kuryr_kubernetes/controller/drivers/utils.py b/kuryr_kubernetes/controller/drivers/utils.py
index 7dd0ec3..6bf8eb0 100644
--- a/kuryr_kubernetes/controller/drivers/utils.py
+++ b/kuryr_kubernetes/controller/drivers/utils.py
@@ -102,21 +102,26 @@ def get_pods(selector, namespace=None):
102 102
103 """ 103 """
104 kubernetes = clients.get_kubernetes_client() 104 kubernetes = clients.get_kubernetes_client()
105 labels = selector.get('matchLabels', None)
106 if labels:
107 # Removing pod-template-hash as pods will not have it and
108 # otherwise there will be no match
109 labels.pop('pod-template-hash', None)
110 labels = replace_encoded_characters(labels)
111 105
112 exps = selector.get('matchExpressions', None) 106 svc_selector = selector.get('selector')
113 if exps: 107 if svc_selector:
114 exps = ', '.join(format_expression(exp) for exp in exps) 108 labels = replace_encoded_characters(svc_selector)
109 else:
110 labels = selector.get('matchLabels', None)
115 if labels: 111 if labels:
116 expressions = parse.quote("," + exps) 112 # Removing pod-template-hash as pods will not have it and
117 labels += expressions 113 # otherwise there will be no match
118 else: 114 labels.pop('pod-template-hash', None)
119 labels = parse.quote(exps) 115 labels = replace_encoded_characters(labels)
116
117 exps = selector.get('matchExpressions', None)
118 if exps:
119 exps = ', '.join(format_expression(exp) for exp in exps)
120 if labels:
121 expressions = parse.quote("," + exps)
122 labels += expressions
123 else:
124 labels = parse.quote(exps)
120 125
121 if namespace: 126 if namespace:
122 pods = kubernetes.get( 127 pods = kubernetes.get(
diff --git a/kuryr_kubernetes/controller/handlers/lbaas.py b/kuryr_kubernetes/controller/handlers/lbaas.py
index 9027877..62bbf70 100644
--- a/kuryr_kubernetes/controller/handlers/lbaas.py
+++ b/kuryr_kubernetes/controller/handlers/lbaas.py
@@ -364,7 +364,8 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
364 p.port] 364 p.port]
365 except KeyError: 365 except KeyError:
366 continue 366 continue
367 current_targets = {(str(m.ip), m.port) for m in lbaas_state.members} 367 current_targets = {(str(m.ip), m.port, m.pool_id)
368 for m in lbaas_state.members}
368 369
369 for subset in endpoints.get('subsets', []): 370 for subset in endpoints.get('subsets', []):
370 subset_ports = subset.get('ports', []) 371 subset_ports = subset.get('ports', [])
@@ -380,14 +381,14 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
380 continue 381 continue
381 for subset_port in subset_ports: 382 for subset_port in subset_ports:
382 target_port = subset_port['port'] 383 target_port = subset_port['port']
383 if (target_ip, target_port) in current_targets:
384 continue
385 port_name = subset_port.get('name') 384 port_name = subset_port.get('name')
386 try: 385 try:
387 pool = pool_by_tgt_name[port_name] 386 pool = pool_by_tgt_name[port_name]
388 except KeyError: 387 except KeyError:
389 LOG.debug("No pool found for port: %r", port_name) 388 LOG.debug("No pool found for port: %r", port_name)
390 continue 389 continue
390 if (target_ip, target_port, pool.id) in current_targets:
391 continue
391 # TODO(apuimedo): Do not pass subnet_id at all when in 392 # TODO(apuimedo): Do not pass subnet_id at all when in
392 # L3 mode once old neutron-lbaasv2 is not supported, as 393 # L3 mode once old neutron-lbaasv2 is not supported, as
393 # octavia does not require it 394 # octavia does not require it
@@ -400,6 +401,15 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
400 # from VIP to pods happens in layer 3 mode, i.e., 401 # from VIP to pods happens in layer 3 mode, i.e.,
401 # routed. 402 # routed.
402 member_subnet_id = lbaas_state.loadbalancer.subnet_id 403 member_subnet_id = lbaas_state.loadbalancer.subnet_id
404 first_member_of_the_pool = True
405 for member in lbaas_state.members:
406 if pool.id == member.pool_id:
407 first_member_of_the_pool = False
408 break
409 if first_member_of_the_pool:
410 listener_port = lsnr_by_id[pool.listener_id].port
411 else:
412 listener_port = None
403 member = self._drv_lbaas.ensure_member( 413 member = self._drv_lbaas.ensure_member(
404 loadbalancer=lbaas_state.loadbalancer, 414 loadbalancer=lbaas_state.loadbalancer,
405 pool=pool, 415 pool=pool,
@@ -407,7 +417,8 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler):
407 ip=target_ip, 417 ip=target_ip,
408 port=target_port, 418 port=target_port,
409 target_ref_namespace=target_ref['namespace'], 419 target_ref_namespace=target_ref['namespace'],
410 target_ref_name=target_ref['name']) 420 target_ref_name=target_ref['name'],
421 listener_port=listener_port)
411 lbaas_state.members.append(member) 422 lbaas_state.members.append(member)
412 changed = True 423 changed = True
413 424
diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py
index cba04eb..a6ff739 100644
--- a/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py
+++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_lbaasv2.py
@@ -159,6 +159,8 @@ class TestLBaaSv2Driver(test_base.TestCase):
159 'security_group_rules': []} 159 'security_group_rules': []}
160 cls = d_lbaasv2.LBaaSv2Driver 160 cls = d_lbaasv2.LBaaSv2Driver
161 m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver) 161 m_driver = mock.Mock(spec=d_lbaasv2.LBaaSv2Driver)
162 m_driver._get_vip_port.return_value = {
163 'security_groups': [mock.sentinel.sg_id]}
162 loadbalancer = mock.Mock() 164 loadbalancer = mock.Mock()
163 listener = mock.Mock() 165 listener = mock.Mock()
164 166
diff --git a/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py b/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py
index cce5303..8924419 100644
--- a/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py
+++ b/kuryr_kubernetes/tests/unit/controller/drivers/test_network_policy.py
@@ -19,6 +19,7 @@ from kuryr_kubernetes.controller.drivers import network_policy
19from kuryr_kubernetes import exceptions 19from kuryr_kubernetes import exceptions
20from kuryr_kubernetes.tests import base as test_base 20from kuryr_kubernetes.tests import base as test_base
21from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix 21from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
22from kuryr_kubernetes import utils
22 23
23from neutronclient.common import exceptions as n_exc 24from neutronclient.common import exceptions as n_exc
24 25
@@ -185,11 +186,15 @@ class TestNetworkPolicyDriver(test_base.TestCase):
185 '_add_kuryrnetpolicy_crd') 186 '_add_kuryrnetpolicy_crd')
186 @mock.patch.object(network_policy.NetworkPolicyDriver, 187 @mock.patch.object(network_policy.NetworkPolicyDriver,
187 'parse_network_policy_rules') 188 'parse_network_policy_rules')
188 def test_create_security_group_rules_from_network_policy(self, m_parse, 189 @mock.patch.object(utils, 'get_subnet_cidr')
190 def test_create_security_group_rules_from_network_policy(self, m_utils,
191 m_parse,
189 m_add_crd, 192 m_add_crd,
190 m_get_crd): 193 m_get_crd):
191 self._driver.neutron.create_security_group.return_value = { 194 self._driver.neutron.create_security_group.return_value = {
192 'security_group': {'id': mock.sentinel.id}} 195 'security_group': {'id': mock.sentinel.id}}
196 m_utils.get_subnet_cidr.return_value = {
197 'subnet': {'cidr': mock.sentinel.cidr}}
193 m_parse.return_value = (self._i_rules, self._e_rules) 198 m_parse.return_value = (self._i_rules, self._e_rules)
194 self._driver.neutron.create_security_group_rule.return_value = { 199 self._driver.neutron.create_security_group_rule.return_value = {
195 'security_group_rule': {'id': mock.sentinel.id}} 200 'security_group_rule': {'id': mock.sentinel.id}}
@@ -204,10 +209,13 @@ class TestNetworkPolicyDriver(test_base.TestCase):
204 '_add_kuryrnetpolicy_crd') 209 '_add_kuryrnetpolicy_crd')
205 @mock.patch.object(network_policy.NetworkPolicyDriver, 210 @mock.patch.object(network_policy.NetworkPolicyDriver,
206 'parse_network_policy_rules') 211 'parse_network_policy_rules')
207 def test_create_security_group_rules_with_k8s_exc(self, m_parse, 212 @mock.patch.object(utils, 'get_subnet_cidr')
213 def test_create_security_group_rules_with_k8s_exc(self, m_utils, m_parse,
208 m_add_crd, m_get_crd): 214 m_add_crd, m_get_crd):
209 self._driver.neutron.create_security_group.return_value = { 215 self._driver.neutron.create_security_group.return_value = {
210 'security_group': {'id': mock.sentinel.id}} 216 'security_group': {'id': mock.sentinel.id}}
217 m_utils.get_subnet_cidr.return_value = {
218 'subnet': {'cidr': mock.sentinel.cidr}}
211 m_parse.return_value = (self._i_rules, self._e_rules) 219 m_parse.return_value = (self._i_rules, self._e_rules)
212 m_get_crd.side_effect = exceptions.K8sClientException 220 m_get_crd.side_effect = exceptions.K8sClientException
213 self._driver.neutron.create_security_group_rule.return_value = { 221 self._driver.neutron.create_security_group_rule.return_value = {
@@ -224,10 +232,13 @@ class TestNetworkPolicyDriver(test_base.TestCase):
224 '_add_kuryrnetpolicy_crd') 232 '_add_kuryrnetpolicy_crd')
225 @mock.patch.object(network_policy.NetworkPolicyDriver, 233 @mock.patch.object(network_policy.NetworkPolicyDriver,
226 'parse_network_policy_rules') 234 'parse_network_policy_rules')
227 def test_create_security_group_rules_error_add_crd(self, m_parse, 235 @mock.patch.object(utils, 'get_subnet_cidr')
236 def test_create_security_group_rules_error_add_crd(self, m_utils, m_parse,
228 m_add_crd, m_get_crd): 237 m_add_crd, m_get_crd):
229 self._driver.neutron.create_security_group.return_value = { 238 self._driver.neutron.create_security_group.return_value = {
230 'security_group': {'id': mock.sentinel.id}} 239 'security_group': {'id': mock.sentinel.id}}
240 m_utils.get_subnet_cidr.return_value = {
241 'subnet': {'cidr': mock.sentinel.cidr}}
231 m_parse.return_value = (self._i_rules, self._e_rules) 242 m_parse.return_value = (self._i_rules, self._e_rules)
232 m_add_crd.side_effect = exceptions.K8sClientException 243 m_add_crd.side_effect = exceptions.K8sClientException
233 self._driver.neutron.create_security_group_rule.return_value = { 244 self._driver.neutron.create_security_group_rule.return_value = {
diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py
index 80a97d2..9cb315c 100644
--- a/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py
+++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py
@@ -382,7 +382,7 @@ class FakeLBaaSDriver(drv_base.LBaaSDriver):
382 id=str(uuid.uuid4())) 382 id=str(uuid.uuid4()))
383 383
384 def ensure_member(self, loadbalancer, pool, subnet_id, ip, port, 384 def ensure_member(self, loadbalancer, pool, subnet_id, ip, port,
385 target_ref_namespace, target_ref_name 385 target_ref_namespace, target_ref_name, listener_port=None
386 ): 386 ):
387 name = "%s:%s:%s" % (loadbalancer.name, ip, port) 387 name = "%s:%s:%s" % (loadbalancer.name, ip, port)
388 return obj_lbaas.LBaaSMember(name=name, 388 return obj_lbaas.LBaaSMember(name=name,
diff --git a/kuryr_kubernetes/utils.py b/kuryr_kubernetes/utils.py
index c367968..8d16e68 100644
--- a/kuryr_kubernetes/utils.py
+++ b/kuryr_kubernetes/utils.py
@@ -16,6 +16,7 @@ import time
16 16
17import requests 17import requests
18 18
19from neutronclient.common import exceptions as n_exc
19from os_vif import objects 20from os_vif import objects
20from oslo_cache import core as cache 21from oslo_cache import core as cache
21from oslo_config import cfg 22from oslo_config import cfg
@@ -161,6 +162,17 @@ def get_subnet(subnet_id):
161 return network 162 return network
162 163
163 164
165@MEMOIZE
166def get_subnet_cidr(subnet_id):
167 neutron = clients.get_neutron_client()
168 try:
169 subnet_obj = neutron.show_subnet(subnet_id)
170 except n_exc.NeutronClientException:
171 LOG.exception("Subnet %s CIDR not found!", subnet_id)
172 raise
173 return subnet_obj.get('subnet')['cidr']
174
175
164def extract_pod_annotation(annotation): 176def extract_pod_annotation(annotation):
165 obj = objects.base.VersionedObject.obj_from_primitive(annotation) 177 obj = objects.base.VersionedObject.obj_from_primitive(annotation)
166 # FIXME(dulek): This is code to maintain compatibility with Queens. We can 178 # FIXME(dulek): This is code to maintain compatibility with Queens. We can