Ensure NP changes are applied to services

When a Network Policy is changed, services must also be updated,
deleting the unnecessary rules that do not match the NP anymore
and create needed ones.

Closes-Bug: #1811242

Partially Implements: blueprint k8s-network-policies

Change-Id: I800477d08fd1f46c2a94d3653496f8f1188a3844
This commit is contained in:
Maysa Macedo 2019-01-10 13:07:03 +00:00 committed by Luis Tomas Bolivar
parent c6253fedef
commit 70692f86a4
5 changed files with 195 additions and 12 deletions

View File

@ -690,7 +690,7 @@ class LBaaSDriver(DriverBase):
raise NotImplementedError()
@abc.abstractmethod
def is_pool_used_by_other_l7policies(l7policy, pool):
def is_pool_used_by_other_l7policies(self, l7policy, pool):
"""Checks if pool used by other L7policy.
:param l7policy: `LBaaSL7Policy` object
@ -699,6 +699,15 @@ class LBaaSDriver(DriverBase):
"""
raise NotImplementedError()
@abc.abstractmethod
def update_lbaas_sg(self, service, sgs):
"""Update security group rules associated to the loadbalancer
:param service: k8s service object
:param sgs: list of security group ids to use for updating the rules
"""
raise NotImplementedError()
@six.add_metaclass(abc.ABCMeta)
class VIFPoolDriver(PodVIFDriver):

View File

@ -27,6 +27,7 @@ from oslo_log import log as logging
from oslo_utils import timeutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.objects import lbaas as obj_lbaas
@ -121,7 +122,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
neutron.update_port(
vip_port.get('id'),
{'port': {
'security_groups': loadbalancer.security_groups}})
'security_groups': [sg_id]}})
try:
neutron.create_security_group_rule({
@ -140,19 +141,37 @@ class LBaaSv2Driver(base.LBaaSDriver):
'for listener %s.', listener.name)
def _apply_members_security_groups(self, loadbalancer, port, target_port,
protocol, sg_rule_name):
protocol, sg_rule_name, new_sgs=None):
LOG.debug("Applying members security groups.")
neutron = clients.get_neutron_client()
if CONF.octavia_defaults.sg_mode == 'create':
sg_id = self._find_listeners_sg(loadbalancer)
if new_sgs:
lb_name = sg_rule_name.split(":")[0]
lb_sg = self._find_listeners_sg(loadbalancer, lb_name=lb_name)
else:
lb_sg = self._find_listeners_sg(loadbalancer)
else:
sg_id = self._get_vip_port(loadbalancer).get('security_groups')[0]
lb_sg = self._get_vip_port(loadbalancer).get('security_groups')[0]
lbaas_sg_rules = neutron.list_security_group_rules(
security_group_id=sg_id)
security_group_id=lb_sg)
all_pod_rules = []
add_default_rules = False
if new_sgs:
sgs = new_sgs
else:
sgs = loadbalancer.security_groups
# Check if Network Policy allows listener on the pods
for sg in loadbalancer.security_groups:
if sg != sg_id:
for sg in sgs:
if sg != lb_sg:
if sg in config.CONF.neutron_defaults.pod_security_groups:
# If default sg is set, this means there is no NP
# associated to the service, thus falling back to the
# default listener rules
add_default_rules = True
break
rules = neutron.list_security_group_rules(
security_group_id=sg)
for rule in rules['security_group_rules']:
@ -172,6 +191,8 @@ class LBaaSv2Driver(base.LBaaSDriver):
continue
all_pod_rules.append(rule)
try:
LOG.debug("Creating LBaaS sg rule for sg: %r",
lb_sg)
neutron.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
@ -180,7 +201,7 @@ class LBaaSv2Driver(base.LBaaSDriver):
'protocol': protocol,
'remote_ip_prefix': rule[
'remote_ip_prefix'],
'security_group_id': sg_id,
'security_group_id': lb_sg,
'description': sg_rule_name,
},
})
@ -190,13 +211,38 @@ class LBaaSv2Driver(base.LBaaSDriver):
'group rule for listener %s.',
sg_rule_name)
# Delete LBaaS sg rules that do not match NP
for rule in lbaas_sg_rules['security_group_rules']:
if (rule.get('protocol') != protocol.lower() or
rule.get('port_range_min') != port or
rule.get('direction') != 'ingress' or
not rule.get('remote_ip_prefix')):
if all_pod_rules and self._is_default_rule(rule):
LOG.debug("Removing default LBaaS sg rule for sg: %r",
lb_sg)
neutron.delete_security_group_rule(rule['id'])
continue
self._delete_rule_if_no_match(rule, all_pod_rules)
if add_default_rules:
try:
LOG.debug("Restoring default LBaaS sg rule for sg: %r", lb_sg)
neutron.create_security_group_rule({
'security_group_rule': {
'direction': 'ingress',
'port_range_min': port,
'port_range_max': port,
'protocol': protocol,
'security_group_id': lb_sg,
'description': sg_rule_name,
},
})
except n_exc.NeutronClientException as ex:
if ex.status_code != requests.codes.conflict:
LOG.exception('Failed when creating security '
'group rule for listener %s.',
sg_rule_name)
def _delete_rule_if_no_match(self, rule, all_pod_rules):
for pod_rule in all_pod_rules:
if pod_rule['remote_ip_prefix'] == rule['remote_ip_prefix']:
@ -205,6 +251,12 @@ class LBaaSv2Driver(base.LBaaSDriver):
LOG.debug("Deleting sg rule: %r", rule['id'])
neutron.delete_security_group_rule(rule['id'])
def _is_default_rule(self, rule):
if (rule.get('direction') == 'ingress' and
not rule.get('remote_ip_prefix')):
return True
return False
def _remove_default_octavia_rules(self, sg_id, listener):
neutron = clients.get_neutron_client()
for remaining in self._provisioning_timer(
@ -678,8 +730,15 @@ class LBaaSv2Driver(base.LBaaSDriver):
if interval:
time.sleep(interval)
def _find_listeners_sg(self, loadbalancer):
def _find_listeners_sg(self, loadbalancer, lb_name=None):
neutron = clients.get_neutron_client()
if lb_name:
sgs = neutron.list_security_groups(
name=lb_name, project_id=loadbalancer.project_id)
# NOTE(ltomasbo): lb_name parameter is only passed when sg_mode
# is 'create' and in that case there is only one sg associated
# to the loadbalancer
return sgs['security_groups'][0]['id']
try:
sgs = neutron.list_security_groups(
name=loadbalancer.name, project_id=loadbalancer.project_id)
@ -837,3 +896,25 @@ class LBaaSv2Driver(base.LBaaSDriver):
entry['id'] != l7policy.id):
return True
return False
def update_lbaas_sg(self, service, sgs):
LOG.debug('Setting SG for LBaaS VIP port')
svc_namespace = service['metadata']['namespace']
svc_name = service['metadata']['name']
svc_ports = service['spec']['ports']
lbaas_name = "%s/%s" % (svc_namespace, svc_name)
lbaas = utils.get_lbaas_spec(service)
if not lbaas:
return
for port in svc_ports:
port_protocol = port['protocol']
lbaas_port = port['port']
target_port = port['targetPort']
sg_rule_name = "%s:%s:%s" % (lbaas_name, port_protocol, lbaas_port)
self._apply_members_security_groups(lbaas, lbaas_port,
target_port, port_protocol,
sg_rule_name, sgs)

View File

@ -20,6 +20,7 @@ from kuryr_kubernetes import clients
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base as drivers
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes import utils
@ -56,6 +57,7 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
self._drv_vif_pool.set_vif_driver()
self._drv_pod_sg = drivers.PodSecurityGroupsDriver.get_instance()
self._drv_svc_sg = drivers.ServiceSecurityGroupsDriver.get_instance()
self._drv_lbaas = drivers.LBaaSDriver.get_instance()
def on_present(self, policy):
LOG.debug("Created or updated: %s", policy)
@ -76,6 +78,19 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
if pods_to_update:
# NOTE(ltomasbo): only need to change services if the pods that
# they point to are updated
services = self._get_services(policy['metadata']['namespace'])
for service in services.get('items'):
# TODO(ltomasbo): Skip other services that are not affected
# by the policy
if service['metadata']['name'] == 'kubernetes':
continue
sgs = self._drv_svc_sg.get_security_groups(service,
project_id)
self._drv_lbaas.update_lbaas_sg(service, sgs)
def on_deleted(self, policy):
LOG.debug("Deleted network policy: %s", policy)
project_id = self._drv_project.get_project(policy)
@ -98,6 +113,13 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
self._drv_policy.release_network_policy(netpolicy_crd)
services = self._get_services(policy['metadata']['namespace'])
for service in services.get('items'):
if service['metadata']['name'] == 'kubernetes':
continue
sgs = self._drv_svc_sg.get_security_groups(service, project_id)
self._drv_lbaas.update_lbaas_sg(service, sgs)
def is_ready(self, quota):
if not utils.has_kuryr_crd(k_const.K8S_API_CRD_KURYRNETPOLICIES):
return False
@ -111,3 +133,15 @@ class NetworkPolicyHandler(k8s_base.ResourceEventHandler):
if utils.has_limit(sg_quota):
return utils.is_available('security_groups', sg_quota, sg_func)
return True
def _get_services(self, namespace):
kubernetes = clients.get_kubernetes_client()
services = {"items": []}
try:
services = kubernetes.get(
'{}/namespaces/{}/services'.format(k_const.K8S_API_BASE,
namespace))
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception.")
raise
return services

View File

@ -64,6 +64,8 @@ class TestPolicyHandler(test_base.TestCase):
spec=drivers.ServiceSecurityGroupsDriver)
self._handler._drv_vif_pool = mock.MagicMock(
spec=drivers.VIFPoolDriver)
self._handler._drv_lbaas = mock.Mock(
spec=drivers.LBaaSDriver)
self._get_project = self._handler._drv_project.get_project
self._get_project.return_value = self._project_id
@ -74,6 +76,8 @@ class TestPolicyHandler(test_base.TestCase):
spec=drivers.PodVIFDriver)
self._update_vif_sgs = self._handler._drv_vif_pool.update_vif_sgs
self._update_vif_sgs.return_value = None
self._update_lbaas_sg = self._handler._drv_lbaas.update_lbaas_sg
self._update_lbaas_sg.return_value = None
def _get_knp_obj(self):
knp_obj = {
@ -89,13 +93,15 @@ class TestPolicyHandler(test_base.TestCase):
}}
return knp_obj
@mock.patch.object(drivers.LBaaSDriver, 'get_instance')
@mock.patch.object(drivers.ServiceSecurityGroupsDriver, 'get_instance')
@mock.patch.object(drivers.PodSecurityGroupsDriver, 'get_instance')
@mock.patch.object(drivers.VIFPoolDriver, 'get_instance')
@mock.patch.object(drivers.NetworkPolicyDriver, 'get_instance')
@mock.patch.object(drivers.NetworkPolicyProjectDriver, 'get_instance')
def test_init(self, m_get_project_driver, m_get_policy_driver,
m_get_vif_driver, m_get_pod_sg_driver, m_get_svc_sg_driver):
m_get_vif_driver, m_get_pod_sg_driver, m_get_svc_sg_driver,
m_get_lbaas_driver):
handler = policy.NetworkPolicyHandler()
m_get_project_driver.assert_called_once()
@ -103,6 +109,7 @@ class TestPolicyHandler(test_base.TestCase):
m_get_vif_driver.assert_called_once()
m_get_pod_sg_driver.assert_called_once()
m_get_svc_sg_driver.assert_called_once()
m_get_lbaas_driver.assert_called_once()
self.assertEqual(m_get_project_driver.return_value,
handler._drv_project)
@ -124,6 +131,7 @@ class TestPolicyHandler(test_base.TestCase):
sg1 = [mock.sentinel.sg1]
sg2 = [mock.sentinel.sg2]
self._get_security_groups.side_effect = [sg1, sg2]
self._handler._get_services.return_value = {'items': []}
policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
namespaced_pods.assert_not_called()
@ -137,6 +145,7 @@ class TestPolicyHandler(test_base.TestCase):
calls = [mock.call(modified_pod, sg1), mock.call(match_pod, sg2)]
self._update_vif_sgs.assert_has_calls(calls)
self._update_lbaas_sg.assert_not_called()
@mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
def test_on_present_without_knps_on_namespace(self, m_host_network):
@ -151,6 +160,7 @@ class TestPolicyHandler(test_base.TestCase):
sg2 = [mock.sentinel.sg2]
sg3 = [mock.sentinel.sg3]
self._get_security_groups.side_effect = [sg2, sg3]
self._handler._get_services.return_value = {'items': []}
policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
ensure_nw_policy.assert_called_once_with(self._policy,
@ -164,6 +174,40 @@ class TestPolicyHandler(test_base.TestCase):
calls = [mock.call(modified_pod, sg2),
mock.call(match_pod, sg3)]
self._update_vif_sgs.assert_has_calls(calls)
self._update_lbaas_sg.assert_not_called()
@mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
def test_on_present_with_services(self, m_host_network):
modified_pod = mock.sentinel.modified_pod
match_pod = mock.sentinel.match_pod
m_host_network.return_value = False
knp_on_ns = self._handler._drv_policy.knps_on_namespace
knp_on_ns.return_value = True
namespaced_pods = self._handler._drv_policy.namespaced_pods
ensure_nw_policy = self._handler._drv_policy.ensure_network_policy
ensure_nw_policy.return_value = [modified_pod]
affected_pods = self._handler._drv_policy.affected_pods
affected_pods.return_value = [match_pod]
sg1 = [mock.sentinel.sg1]
sg2 = [mock.sentinel.sg2]
self._get_security_groups.side_effect = [sg1, sg2]
service = {'metadata': {'name': 'service-test'}}
self._handler._get_services.return_value = {'items': [service]}
policy.NetworkPolicyHandler.on_present(self._handler, self._policy)
namespaced_pods.assert_not_called()
ensure_nw_policy.assert_called_once_with(self._policy,
self._project_id)
affected_pods.assert_called_once_with(self._policy)
calls = [mock.call(modified_pod, self._project_id),
mock.call(match_pod, self._project_id)]
self._get_security_groups.assert_has_calls(calls)
calls = [mock.call(modified_pod, sg1), mock.call(match_pod, sg2)]
self._update_vif_sgs.assert_has_calls(calls)
self._update_lbaas_sg.assert_called_once()
@mock.patch('kuryr_kubernetes.controller.drivers.utils.is_host_network')
def test_on_deleted(self, m_host_network):
@ -178,6 +222,7 @@ class TestPolicyHandler(test_base.TestCase):
sg1 = [mock.sentinel.sg1]
sg2 = [mock.sentinel.sg2]
self._get_security_groups.side_effect = [sg1, sg2]
self._handler._get_services.return_value = {'items': []}
release_nw_policy = self._handler._drv_policy.release_network_policy
knp_on_ns = self._handler._drv_policy.knps_on_namespace
knp_on_ns.return_value = False
@ -189,3 +234,4 @@ class TestPolicyHandler(test_base.TestCase):
self._get_security_groups.assert_called_once_with(match_pod,
self._project_id)
self._update_vif_sgs.assert_called_once_with(match_pod, sg1)
self._update_lbaas_sg.assert_not_called()

View File

@ -24,11 +24,12 @@ from oslo_log import log
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import constants
from kuryr_kubernetes import exceptions
from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes.objects import vif
from kuryr_kubernetes import os_vif_util
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@ -210,3 +211,15 @@ def has_kuryr_crd(crd_url):
" CRD. %s" % exceptions.K8sClientException)
return False
return True
def get_lbaas_spec(service):
try:
annotations = service['metadata']['annotations']
annotation = annotations[constants.K8S_ANNOTATION_LBAAS_SPEC]
except KeyError:
return None
obj_dict = jsonutils.loads(annotation)
obj = obj_lbaas.LBaaSServiceSpec.obj_from_primitive(obj_dict)
LOG.debug("Got LBaaSServiceSpec from annotation: %r", obj)
return obj