From 300dc36b062aaa2c5eb1c3ee2f8c041e1e33a99d Mon Sep 17 00:00:00 2001 From: Yossi Boaron Date: Tue, 4 Dec 2018 13:47:37 +0200 Subject: [PATCH] Allocate service FIP after LB was provisioned This patch changes the FIP allocation to be only after all LB components were provisioned. With this approach, the FIP allocation is performed just before Kuryr annotates endpoints details, this should reduce the probability for orphan FIPs in case of kuryr-controller restart. In addition it updates FIP allocation to check if FIP already allocated for LB vip before allocating new FIP. Change-Id: Idb734f81a1f4fc60155b0700de275d0b01f52a30 Closes-Bug: 1806647 --- kuryr_kubernetes/controller/drivers/base.py | 4 +- .../controller/drivers/lb_public_ip.py | 13 +++-- .../controller/drivers/public_ip.py | 31 ++++++++--- kuryr_kubernetes/controller/handlers/lbaas.py | 35 ++++++------ .../unit/controller/handlers/test_lbaas.py | 53 +++++++++++++++---- 5 files changed, 95 insertions(+), 41 deletions(-) diff --git a/kuryr_kubernetes/controller/drivers/base.py b/kuryr_kubernetes/controller/drivers/base.py index 8f65e789d..68db9ea7b 100644 --- a/kuryr_kubernetes/controller/drivers/base.py +++ b/kuryr_kubernetes/controller/drivers/base.py @@ -681,12 +681,14 @@ class ServicePubIpDriver(DriverBase): ALIAS = 'service_public_ip' @abc.abstractmethod - def acquire_service_pub_ip_info(self, spec_type, spec_lb_ip, project_id): + def acquire_service_pub_ip_info(self, spec_type, spec_lb_ip, project_id, + port_id_to_be_associated=None): """Get k8s service loadbalancer IP info based on service spec :param spec_type: service.spec.type field :param spec_lb_ip: service spec LoadBlaceIP field :param project_id: openstack project id + :param port_id_to_be_associated: port id to associate """ raise NotImplementedError() diff --git a/kuryr_kubernetes/controller/drivers/lb_public_ip.py b/kuryr_kubernetes/controller/drivers/lb_public_ip.py index 3fc32d8c5..876899faf 100644 --- a/kuryr_kubernetes/controller/drivers/lb_public_ip.py +++ b/kuryr_kubernetes/controller/drivers/lb_public_ip.py @@ -39,14 +39,16 @@ class FloatingIpServicePubIPDriver(base.ServicePubIpDriver): super(FloatingIpServicePubIPDriver, self).__init__() self._drv_pub_ip = public_ip.FipPubIpDriver() - def acquire_service_pub_ip_info(self, spec_type, spec_lb_ip, project_id): + def acquire_service_pub_ip_info(self, spec_type, spec_lb_ip, project_id, + port_id_to_be_associated=None): if spec_type != 'LoadBalancer': return None if spec_lb_ip: user_specified_ip = spec_lb_ip.format() - res_id = self._drv_pub_ip.is_ip_available(user_specified_ip) + res_id = self._drv_pub_ip.is_ip_available(user_specified_ip, + port_id_to_be_associated) if res_id: service_pub_ip_info = (obj_lbaas.LBaaSPubIp( ip_id=res_id, @@ -69,9 +71,10 @@ class FloatingIpServicePubIPDriver(base.ServicePubIpDriver): cfg.OptGroup('neutron_defaults')) res_id, alloc_ip_addr = ( - self._drv_pub_ip.allocate_ip(public_network_id, project_id, - pub_subnet_id=public_subnet_id, - description='kuryr_lb')) + self._drv_pub_ip.allocate_ip( + public_network_id, project_id, pub_subnet_id=public_subnet_id, + description='kuryr_lb', + port_id_to_be_associated=port_id_to_be_associated)) service_pub_ip_info = obj_lbaas.LBaaSPubIp(ip_id=res_id, ip_addr=alloc_ip_addr, alloc_method='pool') diff --git a/kuryr_kubernetes/controller/drivers/public_ip.py b/kuryr_kubernetes/controller/drivers/public_ip.py index bbbb59018..e68468fbf 100644 --- a/kuryr_kubernetes/controller/drivers/public_ip.py +++ b/kuryr_kubernetes/controller/drivers/public_ip.py @@ -26,10 +26,11 @@ class BasePubIpDriver(object): """Base class for public IP functionality.""" @abc.abstractmethod - def is_ip_available(self, ip_addr): + def is_ip_available(self, ip_addr, port_id_to_be_associated): """check availability of ip address :param ip_address: + :param port_id_to_be_associated :returns res_id in case ip is available returns resources id else None """ @@ -37,13 +38,14 @@ class BasePubIpDriver(object): @abc.abstractmethod def allocate_ip(self, pub_net_id, project_id, pub_subnet_id=None, - description=None): + description=None, port_id_to_be_associated=None): """allocate ip address from public network id :param pub_net_id: public network id :param project_id: :param pub_subnet_id: public subnet id (Optional) :param description: string describing request (Optional) + :param port_id_to_be_associated: (optional) :returns res_id , ip_addr :res_id - resource id :ip_addr - ip aaddress @@ -84,7 +86,7 @@ class BasePubIpDriver(object): class FipPubIpDriver(BasePubIpDriver): """Floating IP implementation for public IP capability .""" - def is_ip_available(self, ip_addr): + def is_ip_available(self, ip_addr, port_id_to_be_associated=None): if ip_addr: neutron = clients.get_neutron_client() floating_ips_list = neutron.list_floatingips( @@ -92,9 +94,11 @@ class FipPubIpDriver(BasePubIpDriver): for entry in floating_ips_list['floatingips']: if not entry: continue - if (entry['floating_ip_address'] == ip_addr and - not entry['port_id']): - return entry['id'] + if (entry['floating_ip_address'] == ip_addr): + if not entry['port_id'] or ( + port_id_to_be_associated is not None + and entry['port_id'] == port_id_to_be_associated): + return entry['id'] # floating IP not available LOG.error("Floating IP=%s not available", ip_addr) else: @@ -102,8 +106,21 @@ class FipPubIpDriver(BasePubIpDriver): return None def allocate_ip(self, pub_net_id, project_id, pub_subnet_id=None, - description=None): + description=None, port_id_to_be_associated=None): neutron = clients.get_neutron_client() + + if port_id_to_be_associated is not None: + floating_ips_list = neutron.list_floatingips( + port_id=port_id_to_be_associated) + for entry in floating_ips_list['floatingips']: + if not entry: + continue + if (entry['floating_ip_address']): + LOG.debug('FIP %s already allocated to port %s', + entry['floating_ip_address'], + port_id_to_be_associated) + return entry['id'], entry['floating_ip_address'] + request = {'floatingip': { 'tenant_id': project_id, 'project_id': project_id, diff --git a/kuryr_kubernetes/controller/handlers/lbaas.py b/kuryr_kubernetes/controller/handlers/lbaas.py index 36114d4d5..263ec3387 100644 --- a/kuryr_kubernetes/controller/handlers/lbaas.py +++ b/kuryr_kubernetes/controller/handlers/lbaas.py @@ -248,14 +248,23 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler): if not lbaas_state: lbaas_state = obj_lbaas.LBaaSState() - prev_service_pub_ip_info = lbaas_state.service_pub_ip_info if self._sync_lbaas_members(endpoints, lbaas_state, lbaas_spec): - # For LoadBalancer service type, update k8s-service status with - # floating IP address if needed. - if (prev_service_pub_ip_info != lbaas_state.service_pub_ip_info - and lbaas_state.service_pub_ip_info is not None): - self._update_lb_status( - endpoints, lbaas_state.service_pub_ip_info.ip_addr) + # Note(yboaron) For LoadBalancer services, we should allocate FIP, + # associate it to LB VIP and update K8S service status + if lbaas_state.service_pub_ip_info is None: + service_pub_ip_info = ( + self._drv_service_pub_ip.acquire_service_pub_ip_info( + lbaas_spec.type, + lbaas_spec.lb_ip, + lbaas_spec.project_id, + lbaas_state.loadbalancer.port_id)) + if service_pub_ip_info: + self._drv_service_pub_ip.associate_pub_ip( + service_pub_ip_info, lbaas_state.loadbalancer.port_id) + lbaas_state.service_pub_ip_info = service_pub_ip_info + self._update_lb_status( + endpoints, + lbaas_state.service_pub_ip_info.ip_addr) # REVISIT(ivc): since _sync_lbaas_members is responsible for # creating all lbaas components (i.e. load balancer, listeners, # pools, members), it is currently possible for it to fail (due @@ -588,18 +597,6 @@ class LoadBalancerHandler(k8s_base.ResourceEventHandler): security_groups_ids=lbaas_spec.security_groups_ids, service_type=lbaas_spec.type, provider=self._lb_provider) - if lbaas_state.service_pub_ip_info is None: - service_pub_ip_info = ( - self._drv_service_pub_ip.acquire_service_pub_ip_info( - lbaas_spec.type, - lbaas_spec.lb_ip, - lbaas_spec.project_id)) - if service_pub_ip_info: - # if loadbalancerIP should be defined for lbaas, - # associate it to lbaas VIP - self._drv_service_pub_ip.associate_pub_ip( - service_pub_ip_info, lb.port_id) - lbaas_state.service_pub_ip_info = service_pub_ip_info changed = True elif lbaas_state.service_pub_ip_info: self._drv_service_pub_ip.release_pub_ip( diff --git a/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py b/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py index 2e2c1112c..a5dc11dd2 100644 --- a/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py +++ b/kuryr_kubernetes/tests/unit/controller/handlers/test_lbaas.py @@ -498,15 +498,27 @@ class TestLoadBalancerHandler(test_base.TestCase): def test_on_present(self): lbaas_spec = mock.sentinel.lbaas_spec + lbaas_spec.type = 'DummyType' + lbaas_spec.lb_ip = "1.2.3.4" + lbaas_spec.project_id = 12345678 + lbaas_state = mock.sentinel.lbaas_state lbaas_state.service_pub_ip_info = None + loadbalancer = mock.Mock() + loadbalancer.port_id = 12345678 + lbaas_state.loadbalancer = loadbalancer endpoints = mock.sentinel.endpoints + m_drv_service_pub_ip = mock.Mock() + m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = None + m_drv_service_pub_ip.associate_pub_ip.return_value = True + m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler) m_handler._get_lbaas_spec.return_value = lbaas_spec m_handler._should_ignore.return_value = False m_handler._get_lbaas_state.return_value = lbaas_state m_handler._sync_lbaas_members.return_value = True + m_handler._drv_service_pub_ip = m_drv_service_pub_ip h_lbaas.LoadBalancerHandler.on_present(m_handler, endpoints) @@ -520,6 +532,22 @@ class TestLoadBalancerHandler(test_base.TestCase): m_handler._update_lb_status.assert_not_called() def _fake_sync_lbaas_members(self, endpoints, lbaas_state, lbaas_spec): + loadbalancer = mock.Mock() + loadbalancer.port_id = 12345678 + lbaas_state.loadbalancer = loadbalancer + lbaas_state.service_pub_ip_info = None + return True + + def test_on_present_loadbalancer_service(self): + lbaas_spec = mock.sentinel.lbaas_spec + lbaas_spec.type = 'LoadBalancer' + lbaas_spec.lb_ip = "1.2.3.4" + lbaas_spec.project_id = 12345678 + + lbaas_state = mock.sentinel.lbaas_state + lbaas_state.service_pub_ip_info = None + endpoints = mock.sentinel.endpoints + floating_ip = {'floating_ip_address': '1.2.3.5', 'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'} @@ -527,20 +555,17 @@ class TestLoadBalancerHandler(test_base.TestCase): ip_id=floating_ip['id'], ip_addr=floating_ip['floating_ip_address'], alloc_method='kk') - lbaas_state.service_pub_ip_info = service_pub_ip_info - return True - - def test_on_present_loadbalancer_service(self): - lbaas_spec = mock.sentinel.lbaas_spec - lbaas_state = mock.sentinel.lbaas_state - lbaas_state.service_pub_ip_info = None - endpoints = mock.sentinel.endpoints + m_drv_service_pub_ip = mock.Mock() + m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = ( + service_pub_ip_info) + m_drv_service_pub_ip.associate_pub_ip.return_value = True m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler) m_handler._get_lbaas_spec.return_value = lbaas_spec m_handler._should_ignore.return_value = False m_handler._get_lbaas_state.return_value = lbaas_state m_handler._sync_lbaas_members = self._fake_sync_lbaas_members + m_handler._drv_service_pub_ip = m_drv_service_pub_ip h_lbaas.LoadBalancerHandler.on_present(m_handler, endpoints) @@ -553,8 +578,18 @@ class TestLoadBalancerHandler(test_base.TestCase): def test_on_present_rollback(self): lbaas_spec = mock.sentinel.lbaas_spec + lbaas_spec.type = 'ClusterIp' + lbaas_spec.lb_ip = '1.2.3.4' + lbaas_spec.project_id = '12345678' lbaas_state = mock.sentinel.lbaas_state lbaas_state.service_pub_ip_info = None + loadbalancer = mock.Mock() + loadbalancer.port_id = 12345678 + lbaas_state.loadbalancer = loadbalancer + m_drv_service_pub_ip = mock.Mock() + m_drv_service_pub_ip.acquire_service_pub_ip_info.return_value = None + m_drv_service_pub_ip.associate_pub_ip.return_value = True + endpoints = mock.sentinel.endpoints m_handler = mock.Mock(spec=h_lbaas.LoadBalancerHandler) @@ -564,7 +599,7 @@ class TestLoadBalancerHandler(test_base.TestCase): m_handler._sync_lbaas_members.return_value = True m_handler._set_lbaas_state.side_effect = ( k_exc.K8sResourceNotFound('ep')) - + m_handler._drv_service_pub_ip = m_drv_service_pub_ip h_lbaas.LoadBalancerHandler.on_present(m_handler, endpoints) m_handler._get_lbaas_spec.assert_called_once_with(endpoints)