From 361f7f7a2711f1a8e4da4006a51e43de9d31cded Mon Sep 17 00:00:00 2001 From: zhiyuan_cai Date: Thu, 28 Jul 2016 14:16:07 +0800 Subject: [PATCH] Support l3 networking in shared vlan network 1. What is the problem Shared vlan type driver has been merged, so we can run two VMs in the same network but across two pods. However if we attach a network to the router, the tricircle plugin still check if the network is bound to one AZ, so one network cannot cross different pods if we are going to attach it to a router. 2. What is the solution to the problem The reason we require network to be bound to AZ is that when the network is attaching the one router, we know where to create the bottom network resources. To support l3 networking in shared vlan network, we need to remove this restriction. In the previous patches[1, 2], we have already move bottom router setup to an asynchronouse job, so we just remove the AZ restriction and make the tricircle plugin and the nova_apigw to use the job. Floating ip association and disassociation are also moved to bottom router setup job. 3. What the features need to be implemented to the Tricircle to realize the solution Now network can be attached to a router without specifying AZ, so l3 networking can work with across-pod network. [1] https://review.openstack.org/#/c/343568/ [2] https://review.openstack.org/#/c/345863/ Change-Id: I9aaf908a5de55575d63533f1574a0a6edb3c66b8 --- tricircle/common/constants.py | 2 + tricircle/network/exceptions.py | 2 +- tricircle/network/helper.py | 118 +++- tricircle/network/plugin.py | 318 +++------ tricircle/nova_apigw/controllers/server.py | 47 +- tricircle/tests/unit/network/test_helper.py | 64 ++ tricircle/tests/unit/network/test_plugin.py | 662 ++++++++++-------- .../nova_apigw/controllers/test_server.py | 199 ++++-- tricircle/tests/unit/xjob/test_xmanager.py | 31 +- tricircle/xjob/xmanager.py | 409 ++++++++--- 10 files changed, 1118 insertions(+), 734 deletions(-) create mode 100644 tricircle/tests/unit/network/test_helper.py diff --git a/tricircle/common/constants.py b/tricircle/common/constants.py index f619b6e..51470f7 100644 --- a/tricircle/common/constants.py +++ b/tricircle/common/constants.py @@ -58,6 +58,7 @@ ns_bridge_subnet_name = 'ns_bridge_subnet_%s' # project_id ns_bridge_port_name = 'ns_bridge_port_%s_%s_%s' dhcp_port_name = 'dhcp_port_%s' # subnet_id +interface_port_name = 'interface_%s_%s' # b_pod_id t_subnet_id MAX_INT = 0x7FFFFFFF expire_time = datetime.datetime(2000, 1, 1) @@ -70,6 +71,7 @@ JS_Fail = 'Fail' SP_EXTRA_ID = '00000000-0000-0000-0000-000000000000' TOP = 'top' +POD_NOT_SPECIFIED = 'not_specified_pod' # job type JT_ROUTER = 'router' diff --git a/tricircle/network/exceptions.py b/tricircle/network/exceptions.py index d2b2812..11ae2e9 100644 --- a/tricircle/network/exceptions.py +++ b/tricircle/network/exceptions.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -from neutron.common import exceptions +from neutron_lib import exceptions from tricircle.common.i18n import _ diff --git a/tricircle/network/helper.py b/tricircle/network/helper.py index c7a7980..0477658 100644 --- a/tricircle/network/helper.py +++ b/tricircle/network/helper.py @@ -13,7 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. +import netaddr + from neutron_lib import constants +import neutronclient.common.exceptions as q_cli_exceptions from tricircle.common import client import tricircle.common.constants as t_constants @@ -21,9 +24,12 @@ import tricircle.common.context as t_context import tricircle.common.lock_handle as t_lock from tricircle.common import utils import tricircle.db.api as db_api +import tricircle.network.exceptions as t_network_exc # manually define these constants to avoid depending on neutron repos +# neutron.extensions.availability_zone.AZ_HINTS +AZ_HINTS = 'availability_zone_hints' EXTERNAL = 'router:external' # neutron.extensions.external_net.EXTERNAL TYPE_VLAN = 'vlan' # neutron.plugins.common.constants.TYPE_VLAN @@ -210,22 +216,44 @@ class NetworkHelper(object): return body @staticmethod - def get_create_subnet_body(project_id, t_subnet, b_net_id): + def get_create_subnet_body(project_id, t_subnet, b_net_id, gateway_ip): """Get request body to create bottom subnet :param project_id: project id :param t_subnet: top subnet dict :param b_net_id: bottom network id + :param gateway_ip: bottom gateway ip :return: request body to create bottom subnet """ + pools = t_subnet['allocation_pools'] + new_pools = [] + g_ip = netaddr.IPAddress(gateway_ip) + ip_found = False + for pool in pools: + if ip_found: + new_pools.append({'start': pool['start'], + 'end': pool['end']}) + continue + ip_range = netaddr.IPRange(pool['start'], pool['end']) + ip_num = len(ip_range) + for i, ip in enumerate(ip_range): + if g_ip == ip: + ip_found = True + if i > 0: + new_pools.append({'start': ip_range[0].format(), + 'end': ip_range[i - 1].format()}) + if i < ip_num - 1: + new_pools.append( + {'start': ip_range[i + 1].format(), + 'end': ip_range[ip_num - 1].format()}) body = { 'subnet': { 'network_id': b_net_id, 'name': t_subnet['id'], 'ip_version': t_subnet['ip_version'], 'cidr': t_subnet['cidr'], - 'gateway_ip': t_subnet['gateway_ip'], - 'allocation_pools': t_subnet['allocation_pools'], + 'gateway_ip': gateway_ip, + 'allocation_pools': new_pools, 'enable_dhcp': False, 'tenant_id': project_id } @@ -264,11 +292,40 @@ class NetworkHelper(object): body['port']['security_groups'] = b_security_group_ids return body - def prepare_bottom_network_subnets(self, t_ctx, project_id, pod, + def get_create_interface_body(self, project_id, t_net_id, b_pod_id, + t_subnet_id): + """Get request body to create top interface + + :param project_id: project id + :param t_net_id: top network id + :param b_pod_id: bottom pod id + :param t_subnet_id: top subnet id + :return: + """ + t_interface_name = t_constants.interface_port_name % (b_pod_id, + t_subnet_id) + t_interface_body = { + 'port': { + 'tenant_id': project_id, + 'admin_state_up': True, + 'name': t_interface_name, + 'network_id': t_net_id, + 'device_id': '', + 'device_owner': 'network:router_interface', + } + } + if self.call_obj: + t_interface_body['port'].update( + {'mac_address': constants.ATTR_NOT_SPECIFIED, + 'fixed_ips': constants.ATTR_NOT_SPECIFIED}) + return t_interface_body + + def prepare_bottom_network_subnets(self, t_ctx, q_ctx, project_id, pod, t_net, t_subnets): """Get or create bottom network, subnet and dhcp port :param t_ctx: tricircle context + :param q_ctx: neutron context :param project_id: project id :param pod: dict of bottom pod :param t_net: dict of top network @@ -295,8 +352,22 @@ class NetworkHelper(object): subnet_dhcp_map = {} for subnet in t_subnets: + # gateway + t_interface_name = t_constants.interface_port_name % ( + pod['pod_id'], subnet['id']) + + t_interface_body = self.get_create_interface_body( + project_id, t_net['id'], pod['pod_id'], subnet['id']) + + _, t_interface_id = self.prepare_top_element( + t_ctx, q_ctx, project_id, pod, {'id': t_interface_name}, + t_constants.RT_PORT, t_interface_body) + t_interface = self._get_top_element( + t_ctx, q_ctx, t_constants.RT_PORT, t_interface_id) + gateway_ip = t_interface['fixed_ips'][0]['ip_address'] + subnet_body = self.get_create_subnet_body( - project_id, subnet, b_net_id) + project_id, subnet, b_net_id, gateway_ip) _, b_subnet_id = self.prepare_bottom_element( t_ctx, project_id, pod, subnet, t_constants.RT_SUBNET, subnet_body) @@ -445,3 +516,40 @@ class NetworkHelper(object): project_id, t_dhcp_port, b_subnet_id, b_net_id) self.prepare_bottom_element(ctx, project_id, b_pod, t_dhcp_port, t_constants.RT_PORT, dhcp_port_body) + + @staticmethod + def _safe_create_bottom_floatingip(t_ctx, pod, client, fip_net_id, + fip_address, port_id): + try: + client.create_floatingips( + t_ctx, {'floatingip': {'floating_network_id': fip_net_id, + 'floating_ip_address': fip_address, + 'port_id': port_id}}) + except q_cli_exceptions.IpAddressInUseClient: + fips = client.list_floatingips(t_ctx, + [{'key': 'floating_ip_address', + 'comparator': 'eq', + 'value': fip_address}]) + if not fips: + # this is rare case that we got IpAddressInUseClient exception + # a second ago but now the floating ip is missing + raise t_network_exc.BottomPodOperationFailure( + resource='floating ip', pod_name=pod['pod_name']) + associated_port_id = fips[0].get('port_id') + if associated_port_id == port_id: + # the internal port associated with the existing fip is what + # we expect, just ignore this exception + pass + elif not associated_port_id: + # the existing fip is not associated with any internal port, + # update the fip to add association + client.update_floatingips(t_ctx, fips[0]['id'], + {'floatingip': {'port_id': port_id}}) + else: + raise + + def _get_top_element(self, t_ctx, q_ctx, _type, _id): + if self.call_obj: + return getattr(self.call_obj, 'get_%s' % _type)(q_ctx, _id) + else: + return getattr(self._get_client(), 'get_%ss' % _type)(t_ctx, _id) diff --git a/tricircle/network/plugin.py b/tricircle/network/plugin.py index 1561567..1238a2c 100644 --- a/tricircle/network/plugin.py +++ b/tricircle/network/plugin.py @@ -13,6 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +import copy + from oslo_config import cfg import oslo_log.helpers as log_helpers from oslo_log import log @@ -34,7 +36,6 @@ from neutron.extensions import availability_zone as az_ext from neutron.extensions import external_net from neutron.extensions import l3 from neutron.extensions import providernet as provider -import neutron.plugins.common.constants as p_constants from neutron_lib import constants import neutronclient.common.exceptions as q_cli_exceptions @@ -403,6 +404,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, bottom_port_id = mappings[0][1] port = self._get_client(pod_name).get_ports( t_ctx, bottom_port_id) + # TODO(zhiyuan) handle the case that bottom port does not exist port['id'] = port_id if fields: port = dict( @@ -679,20 +681,6 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, def delete_router(self, context, _id): super(TricirclePlugin, self).delete_router(context, _id) - def _judge_network_across_pods(self, context, interface, add_by_port): - if add_by_port: - port = self.get_port(context, interface['port_id']) - net_id = port['network_id'] - else: - subnet = self.get_subnet(context, interface['subnet_id']) - net_id = subnet['network_id'] - network = self.get_network(context, net_id) - if len(network.get(az_ext.AZ_HINTS, [])) != 1: - # Currently not support cross pods l3 networking so - # raise an exception here - raise Exception('Cross pods L3 networking not support') - return network[az_ext.AZ_HINTS][0], network - def _prepare_top_element(self, t_ctx, q_ctx, project_id, pod, ele, _type, body): return self.helper.prepare_top_element( @@ -783,17 +771,24 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, b_port_id, is_ew) return super(TricirclePlugin, self).get_port(q_ctx, port_id) - @staticmethod - def _transfer_network_type(network_type): - network_type_map = {t_constants.NT_SHARED_VLAN: p_constants.TYPE_VLAN} - return network_type_map.get(network_type, network_type) - def _get_bottom_bridge_elements(self, q_ctx, project_id, pod, t_net, is_external, t_subnet, t_port): t_ctx = t_context.get_context_from_neutron_context(q_ctx) return self.helper.get_bottom_bridge_elements( t_ctx, project_id, pod, t_net, is_external, t_subnet, t_port) + def _get_net_pods_by_interface_info(self, t_ctx, q_ctx, add_by_port, + interface_info): + if add_by_port: + port = self.get_port(q_ctx, interface_info['port_id']) + net_id = port['network_id'] + else: + subnet = self.get_subnet(q_ctx, interface_info['subnet_id']) + net_id = subnet['network_id'] + mappings = db_api.get_bottom_mappings_by_top_id( + t_ctx, net_id, t_constants.RT_NETWORK) + return net_id, [mapping[0] for mapping in mappings] + # NOTE(zhiyuan) the origin implementation in l3_db uses port returned from # get_port in core plugin to check, change it to base plugin, since only # top port information should be checked. @@ -808,14 +803,6 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, raise exceptions.BadRequest(resource='router', msg=msg) return port - def _unbound_top_interface(self, context, router_id, port_id): - super(TricirclePlugin, self).update_port( - context, port_id, {'port': {'device_id': '', - 'device_owner': ''}}) - with context.session.begin(): - query = context.session.query(l3_db.RouterPort) - query.filter_by(port_id=port_id, router_id=router_id).delete() - def _add_router_gateway(self, context, router_id, router_data): # get top external network information ext_net_id = router_data[l3.EXTERNAL_GW_INFO].get('network_id') @@ -918,15 +905,17 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, b_client = self._get_client(pod_name) b_client.action_routers(t_ctx, 'remove_gateway', b_router_id) - def _update_bottom_router_gateway(self, context, router_id, router_data): - ext_net_id = router_data[l3.EXTERNAL_GW_INFO].get('network_id') - if ext_net_id: - self._add_router_gateway(context, router_id, router_data) - else: - self._remove_router_gateway(context, router_id) - def update_router(self, context, router_id, router): - router_data = router['router'] + # TODO(zhiyuan) handle the case that SNAT is disabled + # and check if bridge network solution works with IPv6 + router_data = copy.deepcopy(router['router']) + need_update_bottom = False + is_add = False + if attributes.is_attr_set(router_data.get(l3.EXTERNAL_GW_INFO)): + need_update_bottom = True + ext_net_id = router_data[l3.EXTERNAL_GW_INFO].get('network_id') + if ext_net_id: + is_add = True # TODO(zhiyuan) solve ip address conflict issue # if user creates floating ip before set router gateway, we may trigger # ip address conflict here. let's say external cidr is 163.3.124.0/24, @@ -938,10 +927,19 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, # # before this issue is solved, user should set router gateway before # create floating ip. - if attributes.is_attr_set(router_data.get(l3.EXTERNAL_GW_INFO)): - self._update_bottom_router_gateway(context, router_id, router_data) - return super(TricirclePlugin, self).update_router(context, router_id, - router) + if not need_update_bottom: + return super(TricirclePlugin, self).update_router( + context, router_id, router) + if is_add: + ret = super(TricirclePlugin, self).update_router( + context, router_id, router) + router_data[l3.EXTERNAL_GW_INFO].update(ret[l3.EXTERNAL_GW_INFO]) + self._add_router_gateway(context, router_id, router_data) + return ret + else: + self._remove_router_gateway(context, router_id) + return super(TricirclePlugin, self).update_router( + context, router_id, router) def add_router_interface(self, context, router_id, interface_info): t_ctx = t_context.get_context_from_neutron_context(context) @@ -949,11 +947,9 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, router = self._get_router(context, router_id) project_id = router['tenant_id'] add_by_port, _ = self._validate_interface_info(interface_info) - # make sure network not crosses pods - # TODO(zhiyuan) support cross-pod tenant network - az, t_net = self._judge_network_across_pods( - context, interface_info, add_by_port) - b_pod, b_az = az_ag.get_pod_by_az_tenant(t_ctx, az, project_id) + + net_id, b_pods = self._get_net_pods_by_interface_info( + t_ctx, context, add_by_port, interface_info) t_pod = db_api.get_top_pod(t_ctx) assert t_pod @@ -970,10 +966,11 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, else: ext_net_pod_names = set( [ext_net[az_ext.AZ_HINTS][0] for ext_net in ext_nets]) - if b_pod['pod_name'] in ext_net_pod_names: - need_ns_bridge = False - else: - need_ns_bridge = True + need_ns_bridge = False + for b_pod in b_pods: + if b_pod['pod_name'] not in ext_net_pod_names: + need_ns_bridge = True + break if need_ns_bridge: pool_id = self._get_bridge_subnet_pool_id( t_ctx, context, None, t_pod, False) @@ -982,9 +979,15 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, return_info = super(TricirclePlugin, self).add_router_interface( context, router_id, interface_info) + if not b_pods: + return return_info try: - self.xjob_handler.setup_bottom_router( - t_ctx, t_net['id'], router_id, b_pod['pod_id']) + if len(b_pods) == 1: + self.xjob_handler.setup_bottom_router( + t_ctx, net_id, router_id, b_pods[0]['pod_id']) + else: + self.xjob_handler.setup_bottom_router( + t_ctx, net_id, router_id, t_constants.POD_NOT_SPECIFIED) except Exception: # NOTE(zhiyuan) we fail to submit the job, so bottom router # operations are not started, it's safe for us to remove the top @@ -1003,32 +1006,35 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, def remove_router_interface(self, context, router_id, interface_info): t_ctx = t_context.get_context_from_neutron_context(context) - router = self._get_router(context, router_id) - project_id = router['tenant_id'] add_by_port, _ = self._validate_interface_info(interface_info, for_removal=True) - # make sure network not crosses pods - # TODO(zhiyuan) support cross-pod tenant network - az, t_net = self._judge_network_across_pods( - context, interface_info, add_by_port) - b_pod, b_az = az_ag.get_pod_by_az_tenant(t_ctx, az, project_id) + net_id, b_pods = self._get_net_pods_by_interface_info( + t_ctx, context, add_by_port, interface_info) return_info = super(TricirclePlugin, self).remove_router_interface( context, router_id, interface_info) + if not b_pods: + return return_info try: - self.xjob_handler.setup_bottom_router( - t_ctx, t_net['id'], router_id, b_pod['pod_id']) + if len(b_pods) == 1: + self.xjob_handler.setup_bottom_router( + t_ctx, net_id, router_id, b_pods[0]['pod_id']) + else: + self.xjob_handler.setup_bottom_router( + t_ctx, net_id, router_id, t_constants.POD_NOT_SPECIFIED) except Exception: # NOTE(zhiyuan) we fail to submit the job, so if bottom router # interface exists, it would not be deleted, then after we add - # the top interface again, the relation of top and bottom router - # interfaces are not updated in the resource routing entry. this - # inconsistency would not cause problem because: - # (1) when querying interface port, top port information is - # returned, not rely on routing entry - # (2) when setting up bottom router, xjob directly queries top - # and bottom interfaces, not rely on routing entry neither - # we may need some routing entry clean up process` + # the top interface again, the bottom router setup job will reuse + # the existing bottom interface. + # + # we don't create a routing entry between top interface and bottom + # interface, instead, when we create bottom subnet, we specify the + # ip of the top interface as the gateway ip of the bottom subnet. + # later when we attach the bottom subnet to bottom router, neutron + # server in bottom pod will create the bottom interface using the + # gateway ip automatically. + interface_info = {'subnet_id': return_info['subnet_id']} super(TricirclePlugin, self).add_router_interface( context, router_id, interface_info) raise @@ -1125,96 +1131,19 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, fip = floatingip['floatingip'] floatingip_db = self._get_floatingip(context, _id) int_port_id = fip['port_id'] - project_id = floatingip_db['tenant_id'] - fip_address = floatingip_db['floating_ip_address'] mappings = db_api.get_bottom_mappings_by_top_id( t_ctx, int_port_id, t_constants.RT_PORT) if not mappings: - int_port = self.get_port(context, int_port_id) - int_network = self.get_network(context, int_port['network_id']) - if az_ext.AZ_HINTS not in int_network: - raise Exception('Cross pods L3 networking not support') - self._validate_availability_zones( - context, int_network[az_ext.AZ_HINTS], False) - int_net_pod, _ = az_ag.get_pod_by_az_tenant( - t_ctx, int_network[az_ext.AZ_HINTS][0], project_id) - b_int_net_id = db_api.get_bottom_id_by_top_id_pod_name( - t_ctx, int_network['id'], int_net_pod['pod_name'], - t_constants.RT_NETWORK) - b_int_port_body = { - 'port': { - 'tenant_id': project_id, - 'admin_state_up': True, - 'name': int_port['id'], - 'network_id': b_int_net_id, - 'mac_address': int_port['mac_address'], - 'fixed_ips': [{'ip_address': int_port['fixed_ips'][0][ - 'ip_address']}] - } - } - # TODO(zhiyuan) handle DHCP port ip address conflict problem - _, b_int_port_id = self._prepare_bottom_element( - t_ctx, project_id, int_net_pod, int_port, - t_constants.RT_PORT, b_int_port_body) - else: - int_net_pod, b_int_port_id = mappings[0] - ext_net_id = floatingip_db['floating_network_id'] - ext_net = self.get_network(context, ext_net_id) - ext_net_pod = db_api.get_pod_by_name(t_ctx, - ext_net[az_ext.AZ_HINTS][0]) - - # external network and internal network are in the same pod, no - # need to use bridge network. - if int_net_pod['pod_name'] == ext_net_pod['pod_name']: - client = self._get_client(int_net_pod['pod_name']) - b_ext_net_id = db_api.get_bottom_id_by_top_id_pod_name( - t_ctx, ext_net_id, ext_net_pod['pod_name'], - t_constants.RT_NETWORK) - self._safe_create_bottom_floatingip( - t_ctx, int_net_pod, client, b_ext_net_id, fip_address, - b_int_port_id) + # mapping does not exist, meaning that the bottom port has not + # been created, we just return and leave the work to setup bottom + # floating ip to nova api gateway return - # below handle the case that external network and internal network - # are in different pods - int_client = self._get_client(int_net_pod['pod_name']) - ext_client = self._get_client(ext_net_pod['pod_name']) - ns_bridge_net_name = t_constants.ns_bridge_net_name % project_id - ns_bridge_net = self.get_networks( - context, {'name': [ns_bridge_net_name]})[0] - int_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name( - t_ctx, ns_bridge_net['id'], int_net_pod['pod_name'], - t_constants.RT_NETWORK) - ext_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name( - t_ctx, ns_bridge_net['id'], ext_net_pod['pod_name'], - t_constants.RT_NETWORK) - - t_pod = db_api.get_top_pod(t_ctx) - t_ns_bridge_port = self._get_bridge_interface( - t_ctx, context, project_id, t_pod, ns_bridge_net['id'], - None, b_int_port_id, False) - port_body = { - 'port': { - 'tenant_id': project_id, - 'admin_state_up': True, - 'name': 'ns_bridge_port', - 'network_id': ext_bridge_net_id, - 'fixed_ips': [{'ip_address': t_ns_bridge_port[ - 'fixed_ips'][0]['ip_address']}] - } - } - _, b_ns_bridge_port_id = self._prepare_bottom_element( - t_ctx, project_id, ext_net_pod, t_ns_bridge_port, - t_constants.RT_PORT, port_body) - b_ext_net_id = db_api.get_bottom_id_by_top_id_pod_name( - t_ctx, ext_net_id, ext_net_pod['pod_name'], - t_constants.RT_NETWORK) - self._safe_create_bottom_floatingip( - t_ctx, ext_net_pod, ext_client, b_ext_net_id, fip_address, - b_ns_bridge_port_id) - self._safe_create_bottom_floatingip( - t_ctx, int_net_pod, int_client, int_bridge_net_id, - t_ns_bridge_port['fixed_ips'][0]['ip_address'], b_int_port_id) + int_net_pod, b_int_port_id = mappings[0] + int_port = self.get_port(context, int_port_id) + net_id = int_port['network_id'] + self.xjob_handler.setup_bottom_router( + t_ctx, net_id, floatingip_db['router_id'], int_net_pod['pod_id']) def _disassociate_floatingip(self, context, ori_floatingip_db): if not ori_floatingip_db['port_id']: @@ -1223,7 +1152,6 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, return t_ctx = t_context.get_context_from_neutron_context(context) - project_id = ori_floatingip_db['tenant_id'] t_int_port_id = ori_floatingip_db['port_id'] mappings = db_api.get_bottom_mappings_by_top_id( @@ -1238,80 +1166,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, return b_int_net_pod, b_int_port_id = mappings[0] - t_ext_net_id = ori_floatingip_db['floating_network_id'] - t_ext_net = self.get_network(context, t_ext_net_id) - b_ext_net_pod = db_api.get_pod_by_name(t_ctx, - t_ext_net[az_ext.AZ_HINTS][0]) - b_ext_net_id = db_api.get_bottom_id_by_top_id_pod_name( - t_ctx, t_ext_net_id, b_ext_net_pod['pod_name'], - t_constants.RT_NETWORK) - - # external network and internal network are in the same pod, so - # bridge network is not created in this pod - if b_int_net_pod['pod_name'] == b_ext_net_pod['pod_name']: - b_client = self._get_client(b_int_net_pod['pod_name']) - b_fips = b_client.list_floatingips( - t_ctx, - [{'key': 'floating_ip_address', - 'comparator': 'eq', - 'value': ori_floatingip_db['floating_ip_address']}, - {'key': 'floating_network_id', - 'comparator': 'eq', - 'value': b_ext_net_id}]) - if not b_fips: - return - b_client.update_floatingips(t_ctx, b_fips[0]['id'], - {'floatingip': {'port_id': None}}) - return - - # below handle the case that external network and internal network - # are in different pods - b_int_client = self._get_client(b_int_net_pod['pod_name']) - b_ext_client = self._get_client(b_ext_net_pod['pod_name']) - ns_bridge_net_name = t_constants.ns_bridge_net_name % project_id - t_ns_bridge_net = self.get_networks( - context, {'name': [ns_bridge_net_name]})[0] - b_int_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name( - t_ctx, t_ns_bridge_net['id'], b_int_net_pod['pod_name'], - t_constants.RT_NETWORK) - t_pod = db_api.get_top_pod(t_ctx) - t_ns_bridge_port = self._get_bridge_interface( - t_ctx, context, project_id, t_pod, t_ns_bridge_net['id'], - None, b_int_port_id, False) - - b_int_fips = b_int_client.list_floatingips( - t_ctx, - [{'key': 'floating_ip_address', - 'comparator': 'eq', - 'value': t_ns_bridge_port['fixed_ips'][0]['ip_address']}, - {'key': 'floating_network_id', - 'comparator': 'eq', - 'value': b_int_bridge_net_id}]) - b_ext_fips = b_ext_client.list_floatingips( - t_ctx, - [{'key': 'floating_ip_address', - 'comparator': 'eq', - 'value': ori_floatingip_db['floating_ip_address']}, - {'key': 'floating_network_id', - 'comparator': 'eq', - 'value': b_ext_net_id}]) - - if b_int_fips: - b_int_client.delete_floatingips( - t_ctx, b_int_fips[0]['id']) - if b_ext_fips: - b_ext_client.update_floatingips( - t_ctx, b_ext_fips[0]['id'], - {'floatingip': {'port_id': None}}) - # delete bridge port - self.delete_port(context, t_ns_bridge_port['id'], l3_port_check=False) - # for bridge port, we have two resource routing entries, one for bridge - # port in top pod, another for bridge port in bottom pod. calling - # delete_port above will delete bridge port in bottom pod as well as - # routing entry for it, but we also need to remove routing entry for - # bridge port in top pod - # bridge network will be deleted when deleting router - with t_ctx.session.begin(): - core.delete_resources(t_ctx, models.ResourceRouting, - [{'key': 'top_id', 'comparator': 'eq', - 'value': t_ns_bridge_port['name']}]) + int_port = self.get_port(context, t_int_port_id) + net_id = int_port['network_id'] + self.xjob_handler.setup_bottom_router( + t_ctx, net_id, ori_floatingip_db['router_id'], + b_int_net_pod['pod_id']) diff --git a/tricircle/nova_apigw/controllers/server.py b/tricircle/nova_apigw/controllers/server.py index f033497..9e9a55e 100644 --- a/tricircle/nova_apigw/controllers/server.py +++ b/tricircle/nova_apigw/controllers/server.py @@ -175,8 +175,9 @@ class ServerController(rest.RestController): 400, _('Network %s could not be ' 'found') % net_info['uuid']) - if not self._check_network_server_the_same_az( - network, kw['server']['availability_zone']): + if not self._check_network_server_az_match( + context, network, + kw['server']['availability_zone']): return utils.format_nova_error( 400, _('Network and server not in the same ' 'availability zone')) @@ -300,11 +301,29 @@ class ServerController(rest.RestController): self.project_id, pod, {'id': _id}, _type, list_resources) + def _handle_router(self, context, pod, net): + top_client = self._get_client() + + interfaces = top_client.list_ports( + context, filters=[{'key': 'network_id', + 'comparator': 'eq', + 'value': net['id']}, + {'key': 'device_owner', + 'comparator': 'eq', + 'value': 'network:router_interface'}]) + interfaces = [inf for inf in interfaces if inf['device_id']] + if not interfaces: + return + # TODO(zhiyuan) change xjob invoking from "cast" to "call" to guarantee + # the job can be successfully registered + self.xjob_handler.setup_bottom_router( + context, net['id'], interfaces[0]['device_id'], pod['pod_id']) + def _handle_network(self, context, pod, net, subnets, port=None, top_sg_ids=None, bottom_sg_ids=None): (bottom_net_id, subnet_map) = self.helper.prepare_bottom_network_subnets( - context, self.project_id, pod, net, subnets) + context, None, self.project_id, pod, net, subnets) top_client = self._get_client() top_port_body = {'port': {'network_id': net['id'], @@ -324,6 +343,8 @@ class ServerController(rest.RestController): _, bottom_port_id = self.helper.prepare_bottom_element( context, self.project_id, pod, port, constants.RT_PORT, port_body) + self._handle_router(context, pod, net) + return port['id'], bottom_port_id def _handle_port(self, context, pod, port): @@ -535,15 +556,25 @@ class ServerController(rest.RestController): filters) @staticmethod - def _check_network_server_the_same_az(network, server_az): + def _check_network_server_az_match(context, network, server_az): az_hints = 'availability_zone_hints' + network_type = 'provider:network_type' + + # for local type network, we make sure it's created in only one az + + # NOTE(zhiyuan) race condition exists when creating vms in the same + # local type network but different azs at the same time + if network.get(network_type) == constants.NT_LOCAL: + mappings = db_api.get_bottom_mappings_by_top_id( + context, network['id'], constants.RT_NETWORK) + if mappings: + pod, _ = mappings[0] + if pod['az_name'] != server_az: + return False # if neutron az not assigned, server az is used if not network.get(az_hints): return True - # temporally not support cross-pod network - if len(network[az_hints]) > 1: - return False - if network[az_hints][0] == server_az: + if server_az in network[az_hints]: return True else: return False diff --git a/tricircle/tests/unit/network/test_helper.py b/tricircle/tests/unit/network/test_helper.py new file mode 100644 index 0000000..0074cb2 --- /dev/null +++ b/tricircle/tests/unit/network/test_helper.py @@ -0,0 +1,64 @@ +# Copyright 2015 Huawei Technologies Co., Ltd. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import unittest + +from oslo_utils import uuidutils + +from tricircle.network import helper + + +class HelperTest(unittest.TestCase): + def setUp(self): + self.helper = helper.NetworkHelper() + + def test_get_create_subnet_body(self): + t_net_id = uuidutils.generate_uuid() + t_subnet_id = uuidutils.generate_uuid() + b_net_id = uuidutils.generate_uuid() + project_id = uuidutils.generate_uuid() + + t_subnet = { + 'network_id': t_net_id, + 'id': t_subnet_id, + 'ip_version': 4, + 'cidr': '10.0.1.0/24', + 'gateway_ip': '10.0.1.1', + 'allocation_pools': [{'start': '10.0.1.2', 'end': '10.0.1.254'}], + 'enable_dhcp': True, + 'tenant_id': project_id + } + body = self.helper.get_create_subnet_body(project_id, t_subnet, + b_net_id, '10.0.1.2') + self.assertItemsEqual([{'start': '10.0.1.3', 'end': '10.0.1.254'}], + body['subnet']['allocation_pools']) + self.assertEqual('10.0.1.2', body['subnet']['gateway_ip']) + + body = self.helper.get_create_subnet_body(project_id, t_subnet, + b_net_id, '10.0.1.254') + self.assertItemsEqual([{'start': '10.0.1.2', 'end': '10.0.1.253'}], + body['subnet']['allocation_pools']) + self.assertEqual('10.0.1.254', body['subnet']['gateway_ip']) + + t_subnet['allocation_pools'] = [ + {'start': '10.0.1.2', 'end': '10.0.1.10'}, + {'start': '10.0.1.20', 'end': '10.0.1.254'}] + body = self.helper.get_create_subnet_body(project_id, t_subnet, + b_net_id, '10.0.1.5') + self.assertItemsEqual([{'start': '10.0.1.2', 'end': '10.0.1.4'}, + {'start': '10.0.1.6', 'end': '10.0.1.10'}, + {'start': '10.0.1.20', 'end': '10.0.1.254'}], + body['subnet']['allocation_pools']) + self.assertEqual('10.0.1.5', body['subnet']['gateway_ip']) diff --git a/tricircle/tests/unit/network/test_plugin.py b/tricircle/tests/unit/network/test_plugin.py index 7690289..773f55e 100644 --- a/tricircle/tests/unit/network/test_plugin.py +++ b/tricircle/tests/unit/network/test_plugin.py @@ -24,14 +24,18 @@ from sqlalchemy.orm import attributes from sqlalchemy.orm import exc from sqlalchemy.sql import elements +import neutron_lib.constants as q_constants + import neutron.conf.common as q_config from neutron.db import db_base_plugin_common from neutron.db import db_base_plugin_v2 -from neutron.db import ipam_non_pluggable_backend +from neutron.db import ipam_pluggable_backend from neutron.db import l3_db from neutron.db import models_v2 from neutron.extensions import availability_zone as az_ext -from neutron.ipam import subnet_alloc +from neutron.ipam import driver +from neutron.ipam import requests +import neutron.ipam.utils as ipam_utils from neutron import manager import neutronclient.common.exceptions as q_exceptions @@ -103,6 +107,21 @@ RES_MAP = {'networks': TOP_NETS, 'floatingips': TOP_FLOATINGIPS, 'securitygroups': TOP_SGS, 'securitygrouprules': TOP_SG_RULES} +SUBNET_INFOS = {} + + +def _fill_external_gateway_info(router): + if router.gw_port: + ext_gw_info = { + 'network_id': router.gw_port['network_id'], + 'external_fixed_ips': [ + {'subnet_id': ip["subnet_id"], + 'ip_address': ip["ip_address"]} + for ip in router.gw_port['fixed_ips']]} + else: + ext_gw_info = None + router['external_gateway_info'] = ext_gw_info + return router def _transform_az(network): @@ -115,6 +134,60 @@ def _transform_az(network): return network +class FakeIpamSubnet(driver.Subnet): + def __init__(self, subnet): + self._subnet = subnet + + def allocate(self, address_request): + pass + + def deallocate(self, address): + pass + + def get_details(self): + return requests.SpecificSubnetRequest(self._subnet['tenant_id'], + self._subnet['id'], + self._subnet['cidr'], + self._subnet['gateway'], + self._subnet['pools']) + + +class FakePool(driver.Pool): + def allocate_subnet(self, subnet_request): + if isinstance(subnet_request, requests.SpecificSubnetRequest): + subnet_info = {'id': subnet_request.subnet_id, + 'tenant_id': subnet_request.tenant_id, + 'cidr': subnet_request.subnet_cidr, + 'gateway': subnet_request.gateway_ip, + 'pools': subnet_request.allocation_pools} + SUBNET_INFOS[subnet_info['id']] = subnet_info + return FakeIpamSubnet(subnet_info) + prefix = self._subnetpool.prefixes[0] + subnet = next(prefix.subnet(subnet_request.prefixlen)) + gateway = subnet.network + 1 + pools = ipam_utils.generate_pools(subnet.cidr, + gateway) + subnet_info = {'id': subnet_request.subnet_id, + 'tenant_id': subnet_request.tenant_id, + 'cidr': subnet.cidr, + 'gateway': gateway, + 'pools': pools} + SUBNET_INFOS[subnet_info['id']] = subnet_info + return FakeIpamSubnet(subnet_info) + + def get_subnet(self, subnet_id): + return FakeIpamSubnet(SUBNET_INFOS[subnet_id]) + + def get_allocator(self, subnet_ids): + return driver.SubnetGroup() + + def update_subnet(self, subnet_request): + return FakeIpamSubnet() + + def remove_subnet(self, subnet_id): + pass + + class DotDict(dict): def __init__(self, normal_dict=None): if normal_dict: @@ -150,7 +223,8 @@ class FakeNeutronClient(object): for port in port_list: is_selected = True for key, value in params['filters'].iteritems(): - if key not in port or port[key] not in value: + if key not in port or not port[key] or ( + port[key] not in value): is_selected = False break if is_selected: @@ -197,6 +271,10 @@ class FakeClient(object): def get_native_client(self, resource, ctx): return self.client + def _get_connection(self): + # only for mock purpose + pass + def _allocate_ip(self, port_body): subnet_list = self._res_map[self.pod_name]['subnet'] for subnet in subnet_list: @@ -232,6 +310,10 @@ class FakeClient(object): body[_type]['device_id'] = '' if 'fixed_ips' not in body[_type]: body[_type]['fixed_ips'] = [self._allocate_ip(body)] + if _type == 'subnet': + if 'gateway_ip' not in body[_type]: + cidr = body[_type]['cidr'] + body[_type]['gateway_ip'] = cidr[:cidr.rindex('.')] + '.1' if 'id' not in body[_type]: body[_type]['id'] = uuidutils.generate_uuid() res_list = self._res_map[self.pod_name][_type] @@ -272,6 +354,9 @@ class FakeClient(object): 'comparator': 'eq', 'value': net_id}])[0] + def list_subnets(self, ctx, filters=None): + return self.list_resources('subnet', ctx, filters) + def get_subnets(self, ctx, subnet_id): return self.list_resources('subnet', ctx, [{'key': 'id', 'comparator': 'eq', @@ -298,31 +383,65 @@ class FakeClient(object): def delete_ports(self, ctx, port_id): index = -1 - for i, port in enumerate(self._res_map[self.pod_name]['port']): + if self.pod_name == 'top': + port_list = self._res_map[self.pod_name]['ports'] + else: + port_list = self._res_map[self.pod_name]['port'] + for i, port in enumerate(port_list): if port['id'] == port_id: index = i if index != -1: - del self._res_map[self.pod_name]['port'][index] + del port_list[index] def add_gateway_routers(self, ctx, *args, **kwargs): # only for mock purpose pass def add_interface_routers(self, ctx, *args, **kwargs): + self._get_connection() + + router_id, body = args + if 'port_id' in body: + for port in self._res_map[self.pod_name]['port']: + if port['id'] == body['port_id']: + port['device_id'] = router_id + port['device_owner'] = 'network:router_interface' + else: + subnet_id = body['subnet_id'] + subnet = self.get_subnets(ctx, subnet_id) + self.create_ports(ctx, {'port': { + 'tenant_id': subnet['tenant_id'], + 'admin_state_up': True, + 'id': uuidutils.generate_uuid(), + 'name': '', + 'network_id': subnet['network_id'], + 'fixed_ips': [ + {'subnet_id': subnet_id, + 'ip_address': subnet['gateway_ip']} + ], + 'mac_address': '', + 'device_id': router_id, + 'device_owner': 'network:router_interface' + }}) + + def remove_interface_routers(self, ctx, *args, **kwargs): # only for mock purpose pass def get_routers(self, ctx, router_id): - return self.list_resources('router', ctx, [{'key': 'id', - 'comparator': 'eq', - 'value': router_id}])[0] + router = self.list_resources('router', ctx, [{'key': 'id', + 'comparator': 'eq', + 'value': router_id}])[0] + return _fill_external_gateway_info(router) def action_routers(self, ctx, action, *args, **kwargs): - # divide into two functions for test purpose + # divide into three functions for test purpose if action == 'add_interface': - return self.add_interface_routers(ctx, args, kwargs) + return self.add_interface_routers(ctx, *args, **kwargs) elif action == 'add_gateway': - return self.add_gateway_routers(ctx, args, kwargs) + return self.add_gateway_routers(ctx, *args, **kwargs) + elif action == 'remove_interface': + return self.remove_interface_routers(ctx, *args, **kwargs) def create_floatingips(self, ctx, body): fip = self.create_resources('floatingip', ctx, body) @@ -332,7 +451,11 @@ class FakeClient(object): return fip def list_floatingips(self, ctx, filters=None): - return self.list_resources('floatingip', ctx, filters) + fips = self.list_resources('floatingip', ctx, filters) + for fip in fips: + if 'port_id' not in fip: + fip['port_id'] = None + return fips def update_floatingips(self, ctx, _id, body): pass @@ -446,6 +569,7 @@ def update_floatingip(self, context, _id, floatingip): if not floatingip['floatingip']['port_id']: update_dict['fixed_port_id'] = None update_dict['fixed_ip_address'] = None + update_dict['router_id'] = None fip.update(update_dict) return for port in TOP_PORTS: @@ -454,6 +578,13 @@ def update_floatingip(self, context, _id, floatingip): update_dict['fixed_port_id'] = port['id'] update_dict[ 'fixed_ip_address'] = port['fixed_ips'][0]['ip_address'] + for router_port in TOP_ROUTERPORT: + for _port in TOP_PORTS: + if _port['id'] != router_port['port_id']: + continue + if _port['network_id'] == port['network_id']: + update_dict['router_id'] = router_port['router_id'] + fip.update(update_dict) @@ -649,6 +780,7 @@ class FakeSession(object): for net in TOP_NETS: if net['id'] == model_dict['network_id']: net['external'] = True + net['router:external'] = True break link_models(model_obj, model_dict, 'routerports', 'router_id', @@ -677,7 +809,7 @@ class FakeSession(object): def flush(self): pass - def expire(self, obj): + def expire(self, obj, fields=None): pass @@ -726,6 +858,12 @@ class FakeHelper(helper.NetworkHelper): return super(FakeHelper, self)._prepare_top_element_by_call( t_ctx, q_ctx, project_id, pod, ele, _type, body) + def _get_top_element(self, t_ctx, q_ctx, _type, _id): + if not q_ctx: + q_ctx = FakeNeutronContext() + return super(FakeHelper, self)._get_top_element( + t_ctx, q_ctx, _type, _id) + class FakeTypeManager(managers.TricircleTypeManager): def _register_types(self): @@ -818,7 +956,7 @@ def fake_make_subnet_dict(self, subnet, fields=None, context=None): def fake_make_router_dict(self, router, fields=None, process_extensions=True): - return router + return _fill_external_gateway_info(router) def fake_generate_ip(subnet): @@ -836,11 +974,20 @@ def fake_generate_ip(subnet): def fake_allocate_ips_for_port(self, context, port): + if 'fixed_ips' in port['port'] and ( + port['port'][ + 'fixed_ips'] is not q_constants.ATTR_NOT_SPECIFIED): + return port['port']['fixed_ips'] for subnet in TOP_SUBNETS: if subnet['network_id'] == port['port']['network_id']: return [fake_generate_ip(subnet)] +@classmethod +def fake_get_instance(cls, subnet_pool, context): + return FakePool(subnet_pool, context) + + class PluginTest(unittest.TestCase, test_security_groups.TricircleSecurityGroupTestMixin): def setUp(self): @@ -849,9 +996,6 @@ class PluginTest(unittest.TestCase, cfg.CONF.register_opts(q_config.core_opts) plugin_path = 'tricircle.tests.unit.network.test_plugin.FakePlugin' cfg.CONF.set_override('core_plugin', plugin_path) - cfg.CONF.set_override('ipam_driver', '') - # set ipam_driver as empty string to use IpamNonPluggableBackend, which - # is enough for test purpose self.context = context.Context() self.save_method = manager.NeutronManager._get_default_service_plugins manager.NeutronManager._get_default_service_plugins = mock.Mock() @@ -1082,13 +1226,12 @@ class PluginTest(unittest.TestCase, 'availability_zone_hints': ['az_name_1', 'az_name_2']}} fake_plugin.create_network(neutron_context, network) - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) @patch.object(context, 'get_context_from_neutron_context') - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) def test_prepare_element(self, mock_context): self._basic_pod_route_setup() @@ -1148,53 +1291,104 @@ class PluginTest(unittest.TestCase, self.assertEqual(bottom_entry_map['port']['top_id'], port['id']) self.assertEqual(bottom_entry_map['port']['bottom_id'], b_port_id) - def _prepare_router_test(self, tenant_id): + @staticmethod + def _prepare_router_test(tenant_id, ctx, pod_name, index): t_net_id = uuidutils.generate_uuid() t_subnet_id = uuidutils.generate_uuid() - t_router_id = uuidutils.generate_uuid() + b_net_id = uuidutils.generate_uuid() + b_subnet_id = uuidutils.generate_uuid() + # no need to specify az, we will setup router in the pod where bottom + # network is created t_net = { 'id': t_net_id, - 'name': 'top_net', - 'availability_zone_hints': '["az_name_1"]', + 'name': 'top_net_%d' % index, 'tenant_id': tenant_id } t_subnet = { 'id': t_subnet_id, 'network_id': t_net_id, - 'name': 'top_subnet', + 'name': 'top_subnet_%d' % index, 'ip_version': 4, - 'cidr': '10.0.0.0/24', + 'cidr': '10.0.%d.0/24' % index, 'allocation_pools': [], 'enable_dhcp': True, - 'gateway_ip': '10.0.0.1', + 'gateway_ip': '10.0.%d.1' % index, 'ipv6_address_mode': '', 'ipv6_ra_mode': '', 'tenant_id': tenant_id } - t_router = { - 'id': t_router_id, - 'name': 'top_router', - 'distributed': False, - 'tenant_id': tenant_id, - 'attached_ports': [] - } TOP_NETS.append(DotDict(t_net)) TOP_SUBNETS.append(DotDict(t_subnet)) - TOP_ROUTERS.append(DotDict(t_router)) + subnet_info = {'id': t_subnet['id'], + 'tenant_id': t_subnet['tenant_id'], + 'cidr': t_subnet['cidr'], + 'gateway': t_subnet['gateway_ip'], + 'pools': t_subnet['allocation_pools']} + SUBNET_INFOS[subnet_info['id']] = subnet_info - return t_net_id, t_subnet_id, t_router_id + b_net = { + 'id': b_net_id, + 'name': t_net_id, + 'tenant_id': tenant_id + } + b_subnet = { + 'id': b_subnet_id, + 'network_id': b_net_id, + 'name': b_subnet_id, + 'ip_version': 4, + 'cidr': '10.0.%d.0/24' % index, + 'allocation_pools': [], + 'enable_dhcp': True, + 'gateway_ip': '10.0.%d.1' % index, + 'ipv6_address_mode': '', + 'ipv6_ra_mode': '', + 'tenant_id': tenant_id + } + if pod_name == 'pod_1': + BOTTOM1_NETS.append(DotDict(b_net)) + BOTTOM1_SUBNETS.append(DotDict(b_subnet)) + else: + BOTTOM2_NETS.append(DotDict(b_net)) + BOTTOM2_SUBNETS.append(DotDict(b_subnet)) - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2' + core.create_resource(ctx, models.ResourceRouting, + {'top_id': t_net_id, + 'bottom_id': b_net_id, + 'pod_id': pod_id, + 'project_id': tenant_id, + 'resource_type': constants.RT_NETWORK}) + core.create_resource(ctx, models.ResourceRouting, + {'top_id': t_subnet_id, + 'bottom_id': b_subnet_id, + 'pod_id': pod_id, + 'project_id': tenant_id, + 'resource_type': constants.RT_SUBNET}) + + if len(TOP_ROUTERS) == 0: + t_router_id = uuidutils.generate_uuid() + t_router = { + 'id': t_router_id, + 'name': 'top_router', + 'distributed': False, + 'tenant_id': tenant_id, + 'attached_ports': [] + } + TOP_ROUTERS.append(DotDict(t_router)) + else: + t_router_id = TOP_ROUTERS[0]['id'] + + return t_net_id, t_subnet_id, t_router_id, b_net_id, b_subnet_id + + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) @patch.object(FakeBaseRPCAPI, 'configure_extra_routes') - @patch.object(FakeClient, 'action_routers') @patch.object(context, 'get_context_from_neutron_context') - def test_add_interface(self, mock_context, mock_action, mock_rpc): + def test_add_interface(self, mock_context, mock_rpc): self._basic_pod_route_setup() fake_plugin = FakePlugin() @@ -1203,25 +1397,16 @@ class PluginTest(unittest.TestCase, mock_context.return_value = t_ctx tenant_id = 'test_tenant_id' - t_net_id, t_subnet_id, t_router_id = self._prepare_router_test( - tenant_id) + (t_net_id, t_subnet_id, + t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test( + tenant_id, t_ctx, 'pod_1', 1) - t_port_id = fake_plugin.add_router_interface( + fake_plugin.add_router_interface( q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id'] - _, b_port_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, t_port_id, 'port')[0] - b_port = fake_plugin._get_client('pod_1').get_ports(q_ctx, b_port_id) - b_net_id = b_port['network_id'] - b_subnet_id = b_port['fixed_ips'][0]['subnet_id'] - _, map_net_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, t_net_id, 'network')[0] - _, map_subnet_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, t_subnet_id, 'subnet')[0] + _, b_router_id = db_api.get_bottom_mappings_by_top_id( t_ctx, t_router_id, 'router')[0] - self.assertEqual(b_net_id, map_net_id) - self.assertEqual(b_subnet_id, map_subnet_id) mock_rpc.assert_called_once_with(t_ctx, t_router_id) for b_net in BOTTOM1_NETS: if 'provider:segmentation_id' in b_net: @@ -1240,41 +1425,12 @@ class PluginTest(unittest.TestCase, _, b_bridge_port_id = db_api.get_bottom_mappings_by_top_id( t_ctx, t_bridge_port_id, 'port')[0] - t_net_id = uuidutils.generate_uuid() - t_subnet_id = uuidutils.generate_uuid() - t_net = { - 'id': t_net_id, - 'name': 'another_top_net', - 'availability_zone_hints': '["az_name_1"]', - 'tenant_id': tenant_id - } - t_subnet = { - 'id': t_subnet_id, - 'network_id': t_net_id, - 'name': 'another_top_subnet', - 'ip_version': 4, - 'cidr': '10.0.1.0/24', - 'allocation_pools': [], - 'enable_dhcp': True, - 'gateway_ip': '10.0.1.1', - 'ipv6_address_mode': '', - 'ipv6_ra_mode': '', - 'tenant_id': tenant_id - } - TOP_NETS.append(DotDict(t_net)) - TOP_SUBNETS.append(DotDict(t_subnet)) + (t_net_id, t_subnet_id, t_router_id, + b_another_net_id, b_another_subnet_id) = self._prepare_router_test( + tenant_id, t_ctx, 'pod_1', 2) - # action_routers is mocked, manually add device_id - for port in BOTTOM1_PORTS: - if port['id'] == b_bridge_port_id: - port['device_id'] = b_router_id - - another_t_port_id = fake_plugin.add_router_interface( + fake_plugin.add_router_interface( q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id'] - _, another_b_port_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, another_t_port_id, 'port')[0] - another_b_port = fake_plugin._get_client('pod_1').get_ports( - q_ctx, another_b_port_id) t_ns_bridge_net_id = None for net in TOP_NETS: @@ -1282,23 +1438,27 @@ class PluginTest(unittest.TestCase, t_ns_bridge_net_id = net['id'] # N-S bridge not created since no extenal network created self.assertIsNone(t_ns_bridge_net_id) - calls = [mock.call(t_ctx, 'add_interface', b_router_id, - {'port_id': b_bridge_port_id}), - mock.call(t_ctx, 'add_interface', b_router_id, - {'port_id': b_port['id']}), - mock.call(t_ctx, 'add_interface', b_router_id, - {'port_id': another_b_port['id']})] - mock_action.assert_has_calls(calls) - self.assertEqual(mock_action.call_count, 3) - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + device_ids = ['', '', ''] + for port in BOTTOM1_PORTS: + if port['id'] == b_bridge_port_id: + device_ids[0] = port['device_id'] + elif port['network_id'] == b_net_id and ( + port['device_owner'] == 'network:router_interface'): + device_ids[1] = port['device_id'] + elif port['network_id'] == b_another_net_id and ( + port['device_owner'] == 'network:router_interface'): + device_ids[2] = port['device_id'] + + self.assertEqual(device_ids, [b_router_id, b_router_id, b_router_id]) + + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) @patch.object(FakeBaseRPCAPI, 'configure_extra_routes') - @patch.object(FakeClient, 'action_routers') + @patch.object(FakeClient, 'add_gateway_routers') @patch.object(context, 'get_context_from_neutron_context') def test_add_interface_with_external_network(self, mock_context, mock_action, mock_rpc): @@ -1310,8 +1470,9 @@ class PluginTest(unittest.TestCase, mock_context.return_value = t_ctx tenant_id = 'test_tenant_id' - t_net_id, t_subnet_id, t_router_id = self._prepare_router_test( - tenant_id) + (t_net_id, t_subnet_id, + t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test( + tenant_id, t_ctx, 'pod_1', 1) e_net_id = uuidutils.generate_uuid() e_net = {'id': e_net_id, @@ -1323,22 +1484,12 @@ class PluginTest(unittest.TestCase, 'availability_zone_hints': '["pod_2"]'} TOP_NETS.append(e_net) - t_port_id = fake_plugin.add_router_interface( + fake_plugin.add_router_interface( q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id'] - _, b_port_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, t_port_id, 'port')[0] - b_port = fake_plugin._get_client('pod_1').get_ports(q_ctx, b_port_id) - b_net_id = b_port['network_id'] - b_subnet_id = b_port['fixed_ips'][0]['subnet_id'] - _, map_net_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, t_net_id, 'network')[0] - _, map_subnet_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, t_subnet_id, 'subnet')[0] - _, b_router_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, t_router_id, 'router')[0] - self.assertEqual(b_net_id, map_net_id) - self.assertEqual(b_subnet_id, map_subnet_id) + b_router_id = db_api.get_bottom_id_by_top_id_pod_name( + t_ctx, t_router_id, 'pod_1', 'router') + mock_rpc.assert_called_once_with(t_ctx, t_router_id) for b_net in BOTTOM1_NETS: if 'provider:segmentation_id' in b_net: @@ -1357,39 +1508,12 @@ class PluginTest(unittest.TestCase, _, b_bridge_port_id = db_api.get_bottom_mappings_by_top_id( t_ctx, t_bridge_port_id, 'port')[0] - t_net_id = uuidutils.generate_uuid() - t_subnet_id = uuidutils.generate_uuid() - t_net = { - 'id': t_net_id, - 'name': 'another_top_net', - 'availability_zone_hints': '["az_name_1"]', - 'tenant_id': tenant_id - } - t_subnet = { - 'id': t_subnet_id, - 'network_id': t_net_id, - 'name': 'another_top_subnet', - 'ip_version': 4, - 'cidr': '10.0.1.0/24', - 'allocation_pools': [], - 'enable_dhcp': True, - 'gateway_ip': '10.0.1.1', - 'ipv6_address_mode': '', - 'ipv6_ra_mode': '', - 'tenant_id': tenant_id - } - TOP_NETS.append(DotDict(t_net)) - TOP_SUBNETS.append(DotDict(t_subnet)) + (t_net_id, t_subnet_id, t_router_id, + b_another_net_id, b_another_subnet_id) = self._prepare_router_test( + tenant_id, t_ctx, 'pod_1', 2) - # action_routers is mocked, manually add device_id - for port in BOTTOM1_PORTS: - if port['id'] == b_bridge_port_id: - port['device_id'] = b_router_id - - another_t_port_id = fake_plugin.add_router_interface( + fake_plugin.add_router_interface( q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id'] - _, another_b_port_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, another_t_port_id, 'port')[0] for net in TOP_NETS: if net['name'].startswith('ns_bridge'): @@ -1408,51 +1532,37 @@ class PluginTest(unittest.TestCase, # add_router_interface is called, bottom router is already attached # to E-W bridge network, only need to attach internal network to # bottom router - calls = [mock.call(t_ctx, 'add_interface', b_router_id, - {'port_id': b_bridge_port_id}), - mock.call(t_ctx, 'add_gateway', b_router_id, + calls = [mock.call(t_ctx, b_router_id, {'network_id': b_ns_bridge_net_id, 'external_fixed_ips': [ {'subnet_id': b_ns_bridge_subnet_id, 'ip_address': '100.128.0.2'}]}), - mock.call(t_ctx, 'add_interface', b_router_id, - {'port_id': b_port['id']}), - mock.call(t_ctx, 'add_gateway', b_router_id, + mock.call(t_ctx, b_router_id, {'network_id': b_ns_bridge_net_id, 'external_fixed_ips': [ {'subnet_id': b_ns_bridge_subnet_id, - 'ip_address': '100.128.0.2'}]}), - mock.call(t_ctx, 'add_interface', b_router_id, - {'port_id': another_b_port_id})] + 'ip_address': '100.128.0.2'}]})] mock_action.assert_has_calls(calls) - t_net_id = uuidutils.generate_uuid() - t_subnet_id = uuidutils.generate_uuid() - t_net = { - 'id': t_net_id, - 'name': 'another_top_net', - 'availability_zone_hints': '["az_name_2"]', - 'tenant_id': tenant_id - } - t_subnet = { - 'id': t_subnet_id, - 'network_id': t_net_id, - 'name': 'another_top_subnet', - 'ip_version': 4, - 'cidr': '10.0.2.0/24', - 'allocation_pools': [], - 'enable_dhcp': True, - 'gateway_ip': '10.0.2.1', - 'ipv6_address_mode': '', - 'ipv6_ra_mode': '', - 'tenant_id': tenant_id - } - TOP_NETS.append(DotDict(t_net)) - TOP_SUBNETS.append(DotDict(t_subnet)) - another_t_port_id = fake_plugin.add_router_interface( + device_ids = ['', '', ''] + for port in BOTTOM1_PORTS: + if port['id'] == b_bridge_port_id: + device_ids[0] = port['device_id'] + elif port['network_id'] == b_net_id and ( + port['device_owner'] == 'network:router_interface'): + device_ids[1] = port['device_id'] + elif port['network_id'] == b_another_net_id and ( + port['device_owner'] == 'network:router_interface'): + device_ids[2] = port['device_id'] + self.assertEqual(device_ids, [b_router_id, b_router_id, b_router_id]) + + (t_net_id, t_subnet_id, + t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test( + tenant_id, t_ctx, 'pod_2', 2) + + fake_plugin.add_router_interface( q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id'] - _, another_b_port_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, another_t_port_id, 'port')[0] + b_router_id = db_api.get_bottom_id_by_top_id_pod_name( t_ctx, t_router_id, 'pod_2', 'router') bridge_port_name = constants.ew_bridge_port_name % (tenant_id, @@ -1465,20 +1575,20 @@ class PluginTest(unittest.TestCase, # to create N-S bridge network when attaching router interface(N-S # bridge network is created when setting router external gateway), so # add_gateway is not called. - calls.extend([mock.call(t_ctx, 'add_interface', b_router_id, - {'port_id': b_bridge_port_id}), - mock.call(t_ctx, 'add_interface', b_router_id, - {'port_id': another_b_port_id})]) - mock_action.assert_has_calls(calls) - # all together 7 times calling - self.assertEqual(mock_action.call_count, 7) + device_ids = ['', ''] + for port in BOTTOM2_PORTS: + if port['id'] == b_bridge_port_id: + device_ids[0] = port['device_id'] + elif port['network_id'] == b_net_id and ( + port['device_owner'] == 'network:router_interface'): + device_ids[1] = port['device_id'] + self.assertEqual(device_ids, [b_router_id, b_router_id]) - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) @patch.object(FakeRPCAPI, 'configure_extra_routes', new=mock.Mock) @patch.object(FakeClient, 'action_routers') @patch.object(context, 'get_context_from_neutron_context') @@ -1491,8 +1601,9 @@ class PluginTest(unittest.TestCase, mock_context.return_value = t_ctx tenant_id = 'test_tenant_id' - t_net_id, t_subnet_id, t_router_id = self._prepare_router_test( - tenant_id) + (t_net_id, t_subnet_id, + t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test( + tenant_id, t_ctx, 'pod_1', 1) with t_ctx.session.begin(): entries = core.query_resource(t_ctx, models.ResourceRouting, @@ -1521,29 +1632,26 @@ class PluginTest(unittest.TestCase, mock_action.side_effect = None fake_plugin.add_router_interface(q_ctx, t_router_id, {'subnet_id': t_subnet_id}) - # bottom dhcp port, bottom interface and bridge port - self.assertEqual(3, len(BOTTOM1_PORTS)) + # bottom dhcp port and bridge port + self.assertEqual(2, len(BOTTOM1_PORTS)) with t_ctx.session.begin(): entries = core.query_resource(t_ctx, models.ResourceRouting, [{'key': 'resource_type', 'comparator': 'eq', 'value': 'port'}], []) - # three more entries, for top and bottom dhcp ports and - # bottom interface - self.assertEqual(entry_num + 5, len(entries)) + # three more entries, for top and bottom dhcp ports, top interface + self.assertEqual(entry_num + 2 + 3, len(entries)) - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) @patch.object(FakeBaseRPCAPI, 'configure_extra_routes', new=mock.Mock) - @patch.object(FakeClient, 'delete_ports') - @patch.object(FakeClient, 'add_interface_routers') + @patch.object(FakeClient, '_get_connection') @patch.object(context, 'get_context_from_neutron_context') def test_add_interface_exception_port_left(self, mock_context, - mock_action, mock_delete): + mock_connect): self._basic_pod_route_setup() fake_plugin = FakePlugin() @@ -1552,34 +1660,32 @@ class PluginTest(unittest.TestCase, mock_context.return_value = t_ctx tenant_id = 'test_tenant_id' - t_net_id, t_subnet_id, t_router_id = self._prepare_router_test( - tenant_id) - mock_action.side_effect = q_exceptions.ConnectionFailed - mock_delete.side_effect = q_exceptions.ConnectionFailed + (t_net_id, t_subnet_id, + t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test( + tenant_id, t_ctx, 'pod_1', 1) + mock_connect.side_effect = q_exceptions.ConnectionFailed self.assertRaises(q_exceptions.ConnectionFailed, fake_plugin.add_router_interface, q_ctx, t_router_id, {'subnet_id': t_subnet_id}) # top interface is removed self.assertEqual(0, len(TOP_ROUTERS[0]['attached_ports'])) - mock_action.side_effect = None - mock_delete.side_effect = None + mock_connect.side_effect = None # test that we can success when bottom pod comes back fake_plugin.add_router_interface( q_ctx, t_router_id, {'subnet_id': t_subnet_id}) # bottom dhcp port, bottom interface and bridge port self.assertEqual(3, len(BOTTOM1_PORTS)) - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) @patch.object(FakeBaseRPCAPI, 'configure_extra_routes') - @patch.object(FakeClient, 'action_routers') + @patch.object(FakeClient, 'remove_interface_routers') @patch.object(context, 'get_context_from_neutron_context') - def test_remove_interface(self, mock_context, mock_action, mock_rpc): + def test_remove_interface(self, mock_context, mock_remove, mock_rpc): self._basic_pod_route_setup() fake_plugin = FakePlugin() @@ -1588,20 +1694,24 @@ class PluginTest(unittest.TestCase, mock_context.return_value = t_ctx tenant_id = 'test_tenant_id' - t_net_id, t_subnet_id, t_router_id = self._prepare_router_test( - tenant_id) + (t_net_id, t_subnet_id, + t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test( + tenant_id, t_ctx, 'pod_1', 1) t_port_id = fake_plugin.add_router_interface( q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id'] _, b_router_id = db_api.get_bottom_mappings_by_top_id( t_ctx, t_router_id, constants.RT_ROUTER)[0] - _, b_port_id = db_api.get_bottom_mappings_by_top_id( - t_ctx, t_port_id, constants.RT_PORT)[0] + + for port in BOTTOM1_PORTS: + if port['network_id'] == b_net_id and ( + port['device_owner'] == 'network:router_interface'): + b_interface_id = port['id'] fake_plugin.remove_router_interface( q_ctx, t_router_id, {'port_id': t_port_id}) - mock_action.assert_called_with( - t_ctx, 'remove_interface', b_router_id, {'port_id': b_port_id}) + mock_remove.assert_called_with( + t_ctx, b_router_id, {'port_id': b_interface_id}) mock_rpc.assert_called_with(t_ctx, t_router_id) @patch.object(context, 'get_context_from_neutron_context') @@ -1669,14 +1779,13 @@ class PluginTest(unittest.TestCase, t_ctx, top_net['id'], constants.RT_NETWORK) self.assertEqual(mappings[0][1], bottom_net['id']) - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict', new=fake_make_router_dict) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) @patch.object(FakeClient, 'action_routers') @patch.object(context, 'get_context_from_neutron_context') def test_set_gateway(self, mock_context, mock_action): @@ -1752,14 +1861,13 @@ class PluginTest(unittest.TestCase, {'subnet_id': b_ns_bridge_subnet_id})] mock_action.assert_has_calls(calls) - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict', new=fake_make_router_dict) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) @patch.object(FakeClient, 'action_routers') @patch.object(context, 'get_context_from_neutron_context') def test_unset_gateway(self, mock_context, mock_action): @@ -1827,8 +1935,9 @@ class PluginTest(unittest.TestCase, def _prepare_associate_floatingip_test(self, t_ctx, q_ctx, fake_plugin): tenant_id = 'test_tenant_id' self._basic_pod_route_setup() - t_net_id, t_subnet_id, t_router_id = self._prepare_router_test( - tenant_id) + (t_net_id, t_subnet_id, + t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test( + tenant_id, t_ctx, 'pod_1', 1) net_body = { 'name': 'ext_net', @@ -1900,16 +2009,15 @@ class PluginTest(unittest.TestCase, return t_port_id, b_port_id, fip, e_net - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict', new=fake_make_router_dict) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) @patch.object(l3_db.L3_NAT_dbonly_mixin, 'update_floatingip', - new=mock.Mock) + new=update_floatingip) @patch.object(FakeClient, 'create_floatingips') @patch.object(context, 'get_context_from_neutron_context') def test_associate_floatingip(self, mock_context, mock_create): @@ -1945,85 +2053,24 @@ class PluginTest(unittest.TestCase, mock.call(t_ctx, {'floatingip': { 'floating_network_id': b_bridge_net_id, - 'floating_ip_address': '100.128.0.2', + 'floating_ip_address': '100.128.0.3', 'port_id': b_port_id}})] mock_create.assert_has_calls(calls) - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict', new=fake_make_router_dict) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) @patch.object(l3_db.L3_NAT_dbonly_mixin, 'update_floatingip', - new=mock.Mock) - @patch.object(FakeClient, 'create_floatingips') - @patch.object(context, 'get_context_from_neutron_context') - def test_associate_floatingip_port_not_bound(self, mock_context, - mock_create): - fake_plugin = FakePlugin() - q_ctx = FakeNeutronContext() - t_ctx = context.get_db_context() - mock_context.return_value = t_ctx - - (t_port_id, b_port_id, - fip, e_net) = self._prepare_associate_floatingip_test(t_ctx, q_ctx, - fake_plugin) - # remove bottom port for this test case - for port in BOTTOM1_PORTS: - if port['id'] == b_port_id: - BOTTOM1_PORTS.remove(port) - break - filters = [{'key': 'top_id', 'comparator': 'eq', 'value': t_port_id}] - with t_ctx.session.begin(): - core.delete_resources(t_ctx, models.ResourceRouting, filters) - - # associate floating ip - fip_body = {'port_id': t_port_id} - fake_plugin.update_floatingip(q_ctx, fip['id'], - {'floatingip': fip_body}) - - b_ext_net_id = db_api.get_bottom_id_by_top_id_pod_name( - t_ctx, e_net['id'], 'pod_2', constants.RT_NETWORK) - b_port_id = db_api.get_bottom_id_by_top_id_pod_name( - t_ctx, t_port_id, 'pod_1', constants.RT_PORT) - for port in BOTTOM2_PORTS: - if port['name'] == 'ns_bridge_port': - ns_bridge_port = port - for net in TOP_NETS: - if net['name'].startswith('ns_bridge'): - b_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name( - t_ctx, net['id'], 'pod_1', constants.RT_NETWORK) - calls = [mock.call(t_ctx, - {'floatingip': { - 'floating_network_id': b_ext_net_id, - 'floating_ip_address': fip[ - 'floating_ip_address'], - 'port_id': ns_bridge_port['id']}}), - mock.call(t_ctx, - {'floatingip': { - 'floating_network_id': b_bridge_net_id, - 'floating_ip_address': '100.128.0.2', - 'port_id': b_port_id}})] - mock_create.assert_has_calls(calls) - - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, - '_allocate_ips_for_port', new=fake_allocate_ips_for_port) - @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict', - new=fake_make_router_dict) - @patch.object(db_base_plugin_common.DbBasePluginCommon, - '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) - @patch.object(l3_db.L3_NAT_dbonly_mixin, 'update_floatingip', - new=mock.Mock) + new=update_floatingip) @patch.object(FakePlugin, '_rollback_floatingip_data') - @patch.object(FakeClient, 'create_floatingips') + @patch.object(FakeRPCAPI, 'setup_bottom_router') @patch.object(context, 'get_context_from_neutron_context') def test_associate_floatingip_port_exception( - self, mock_context, mock_create, mock_rollback): + self, mock_context, mock_setup, mock_rollback): fake_plugin = FakePlugin() q_ctx = FakeNeutronContext() t_ctx = context.get_db_context() @@ -2034,7 +2081,9 @@ class PluginTest(unittest.TestCase, fake_plugin) # associate floating ip and exception occurs - mock_create.side_effect = q_exceptions.ConnectionFailed + # actually we will not get this exception when calling + # setup_bottom_router, we set this exception for test purpose + mock_setup.side_effect = q_exceptions.ConnectionFailed fip_body = {'port_id': t_port_id} self.assertRaises(q_exceptions.ConnectionFailed, fake_plugin.update_floatingip, q_ctx, fip['id'], @@ -2043,19 +2092,14 @@ class PluginTest(unittest.TestCase, 'fixed_ip_address': None, 'router_id': None} mock_rollback.assert_called_once_with(q_ctx, fip['id'], data) - # check the association information is cleared - self.assertIsNone(TOP_FLOATINGIPS[0]['fixed_port_id']) - self.assertIsNone(TOP_FLOATINGIPS[0]['fixed_ip_address']) - self.assertIsNone(TOP_FLOATINGIPS[0]['router_id']) - @patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend, + @patch.object(driver.Pool, 'get_instance', new=fake_get_instance) + @patch.object(ipam_pluggable_backend.IpamPluggableBackend, '_allocate_ips_for_port', new=fake_allocate_ips_for_port) @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict', new=fake_make_router_dict) @patch.object(db_base_plugin_common.DbBasePluginCommon, '_make_subnet_dict', new=fake_make_subnet_dict) - @patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool', - new=mock.Mock) @patch.object(l3_db.L3_NAT_dbonly_mixin, 'update_floatingip', new=update_floatingip) @patch.object(FakeClient, 'delete_floatingips') diff --git a/tricircle/tests/unit/nova_apigw/controllers/test_server.py b/tricircle/tests/unit/nova_apigw/controllers/test_server.py index 0dd87ce..8640fc5 100644 --- a/tricircle/tests/unit/nova_apigw/controllers/test_server.py +++ b/tricircle/tests/unit/nova_apigw/controllers/test_server.py @@ -59,6 +59,18 @@ RES_LIST = [TOP_NETS, TOP_SUBNETS, TOP_PORTS, TOP_SGS, BOTTOM_SERVERS, BOTTOM2_NETS, BOTTOM2_SUBNETS, BOTTOM2_PORTS, BOTTOM2_SGS] +def _get_ip_suffix(): + # four elements are enough currently + suffix_list = ['3', '4', '5', '6'] + index = 0 + while True: + yield suffix_list[index] + index += 1 + index %= 4 + +ip_suffix = _get_ip_suffix() + + class FakeException(Exception): pass @@ -108,7 +120,6 @@ class FakeClient(object): if not pod_name: pod_name = 't_region' self.pod_name = pod_name - self.ip_suffix_gen = self._get_ip_suffix() def _get_res_list(self, _type): if self.pod_name == 'b_region_2': @@ -140,11 +151,11 @@ class FakeClient(object): cidr = subnet['cidr'] ip_prefix = cidr[:cidr.rindex('.') + 1] mac_prefix = 'fa:16:3e:96:41:0' - if 'device_owner' in body['port']: + if body['port'].get('device_owner') == 'network:dhcp': ip = ip_prefix + '2' body['port']['mac_address'] = mac_prefix + '2' else: - suffix = self.ip_suffix_gen.next() + suffix = ip_suffix.next() ip = ip_prefix + suffix body['port']['mac_address'] = mac_prefix + suffix fixed_ip_list.append({'ip_address': ip, @@ -194,16 +205,6 @@ class FakeClient(object): ret_list.append(res) return ret_list - @staticmethod - def _get_ip_suffix(): - # three elements should be enough - suffix_list = ['3', '4', '5'] - index = 0 - while True: - yield suffix_list[index] - index += 1 - index %= 3 - def create_ports(self, ctx, body): return self.create_resources('port', ctx, body) @@ -418,25 +419,31 @@ class ServerTest(unittest.TestCase): 'top_net_id', 'network') self.assertEqual(0, len(mappings)) - def _check_routes(self): + def _check_routes(self, b_pod): for res in (TOP_NETS, TOP_SUBNETS, BOTTOM_NETS, BOTTOM_SUBNETS): self.assertEqual(1, len(res)) enable_dhcp = TOP_SUBNETS[0]['enable_dhcp'] self.assertEqual(enable_dhcp, BOTTOM_SUBNETS[0]['enable_dhcp']) - port_num = 2 if enable_dhcp else 1 - self.assertEqual(port_num, len(TOP_PORTS)) - self.assertEqual(port_num, len(BOTTOM_PORTS)) + # top vm port, top interface port, top dhcp port + t_port_num = 3 if enable_dhcp else 2 + # bottom vm port, bottom dhcp port + b_port_num = 2 if enable_dhcp else 1 + self.assertEqual(t_port_num, len(TOP_PORTS)) + self.assertEqual(b_port_num, len(BOTTOM_PORTS)) with self.context.session.begin(): routes = core.query_resource(self.context, models.ResourceRouting, [], []) # bottom network, bottom subnet, bottom port, no top dhcp and bottom # dhcp if dhcp disabled - entry_num = 5 if enable_dhcp else 3 + entry_num = 6 if enable_dhcp else 4 self.assertEqual(entry_num, len(routes)) - actual = [[], [], []] - if entry_num > 3: + actual = [[], [], [], []] + actual[3].append(constants.interface_port_name % ( + b_pod['pod_id'], TOP_SUBNETS[0]['id'])) + if entry_num > 4: actual.extend([[], []]) + actual[5].append(constants.dhcp_port_name % TOP_SUBNETS[0]['id']) for region in ('t_region', 'b_region'): actual[0].append(self.controller._get_client( @@ -445,24 +452,22 @@ class ServerTest(unittest.TestCase): region).list_resources('subnet', self.context, [])[0]['id']) ports = self.controller._get_client( region).list_resources('port', self.context, []) - if 'device_id' not in ports[0]: - actual[2].append(ports[0]['id']) - else: - actual[2].append(ports[1]['id']) - if entry_num > 3: - actual[4].append(constants.dhcp_port_name % TOP_SUBNETS[0]['id']) - for region in ('t_region', 'b_region'): - ports = self.controller._get_client( - region).list_resources('port', self.context, []) - if 'device_id' in ports[0]: - actual[3].append(ports[0]['id']) - if region == 't_region': - actual[4].append(ports[0]['id']) + for port in ports: + if port.get('device_id'): + dhcp_port_id = port['id'] + elif port.get('device_owner'): + gateway_port_id = port['id'] else: - actual[3].append(ports[1]['id']) - if region == 't_region': - actual[4].append(ports[1]['id']) + vm_port_id = port['id'] + + actual[2].append(vm_port_id) + if region == 't_region': + actual[3].append(gateway_port_id) + if entry_num > 4: + actual[4].append(dhcp_port_id) + if region == 't_region': + actual[5].append(dhcp_port_id) expect = [[route['top_id'], route['bottom_id']] for route in routes] self.assertItemsEqual(expect, actual) @@ -475,13 +480,13 @@ class ServerTest(unittest.TestCase): 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', - 'allocation_pools': {'start': '10.0.0.2', - 'end': '10.0.0.254'}, + 'allocation_pools': [{'start': '10.0.0.2', + 'end': '10.0.0.254'}], 'enable_dhcp': True} TOP_NETS.append(net) TOP_SUBNETS.append(subnet) self.controller._handle_network(self.context, b_pod, net, [subnet]) - self._check_routes() + self._check_routes(b_pod) def test_handle_network_dhcp_disable(self): t_pod, b_pod = self._prepare_pod() @@ -491,13 +496,13 @@ class ServerTest(unittest.TestCase): 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', - 'allocation_pools': {'start': '10.0.0.2', - 'end': '10.0.0.254'}, + 'allocation_pools': [{'start': '10.0.0.2', + 'end': '10.0.0.254'}], 'enable_dhcp': False} TOP_NETS.append(net) TOP_SUBNETS.append(subnet) self.controller._handle_network(self.context, b_pod, net, [subnet]) - self._check_routes() + self._check_routes(b_pod) def test_handle_port(self): t_pod, b_pod = self._prepare_pod() @@ -507,21 +512,21 @@ class ServerTest(unittest.TestCase): 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', - 'allocation_pools': {'start': '10.0.0.2', - 'end': '10.0.0.254'}, + 'allocation_pools': [{'start': '10.0.0.2', + 'end': '10.0.0.254'}], 'enable_dhcp': True} port = { 'id': 'top_port_id', 'network_id': 'top_net_id', - 'mac_address': 'fa:16:3e:96:41:03', + 'mac_address': 'fa:16:3e:96:41:07', 'fixed_ips': [{'subnet_id': 'top_subnet_id', - 'ip_address': '10.0.0.3'}] + 'ip_address': '10.0.0.7'}] } TOP_NETS.append(net) TOP_SUBNETS.append(subnet) TOP_PORTS.append(port) self.controller._handle_port(self.context, b_pod, port) - self._check_routes() + self._check_routes(b_pod) @patch.object(pecan, 'response', new=FakeResponse) @patch.object(FakeClient, 'create_servers') @@ -537,8 +542,8 @@ class ServerTest(unittest.TestCase): 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', - 'allocation_pools': {'start': '10.0.0.2', - 'end': '10.0.0.254'}, + 'allocation_pools': [{'start': '10.0.0.2', + 'end': '10.0.0.254'}], 'enable_dhcp': True} t_sg = {'id': top_sg_id, 'name': 'default', 'description': '', 'tenant_id': self.project_id, @@ -586,11 +591,6 @@ class ServerTest(unittest.TestCase): res = self.controller.post(**body) self._validate_error_code(res, 400) - # update top net for test purpose, correct az and wrong az - TOP_NETS[0]['availability_zone_hints'] = ['b_az', 'fake_az'] - res = self.controller.post(**body) - self._validate_error_code(res, 400) - @patch.object(pecan, 'response', new=FakeResponse) @patch.object(FakeClient, 'create_servers') @patch.object(context, 'extract_context_from_environ') @@ -606,8 +606,8 @@ class ServerTest(unittest.TestCase): 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', - 'allocation_pools': {'start': '10.0.0.2', - 'end': '10.0.0.254'}, + 'allocation_pools': [{'start': '10.0.0.2', + 'end': '10.0.0.254'}], 'enable_dhcp': True} t_sg = {'id': top_sg_id, 'name': 'default', 'description': '', 'tenant_id': self.project_id, @@ -702,8 +702,8 @@ class ServerTest(unittest.TestCase): 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', - 'allocation_pools': {'start': '10.0.0.2', - 'end': '10.0.0.254'}, + 'allocation_pools': [{'start': '10.0.0.2', + 'end': '10.0.0.254'}], 'enable_dhcp': True} t_sg = {'id': top_sg_id, 'name': 'test_sg', 'description': '', 'tenant_id': self.project_id, @@ -794,8 +794,8 @@ class ServerTest(unittest.TestCase): 'ip_version': 4, 'cidr': '10.0.1.0/24', 'gateway_ip': '10.0.1.1', - 'allocation_pools': {'start': '10.0.1.2', - 'end': '10.0.1.254'}, + 'allocation_pools': [{'start': '10.0.1.2', + 'end': '10.0.1.254'}], 'enable_dhcp': True} t_net2 = {'id': top_net2_id, 'name': 'net2'} t_subnet2 = {'id': top_subnet2_id, @@ -804,8 +804,8 @@ class ServerTest(unittest.TestCase): 'ip_version': 4, 'cidr': '10.0.2.0/24', 'gateway_ip': '10.0.2.1', - 'allocation_pools': {'start': '10.0.2.2', - 'end': '10.0.2.254'}, + 'allocation_pools': [{'start': '10.0.2.2', + 'end': '10.0.2.254'}], 'enable_dhcp': True} t_sg = {'id': top_sg_id, 'name': 'default', 'description': '', 'tenant_id': self.project_id, @@ -973,6 +973,77 @@ class ServerTest(unittest.TestCase): res['Error']['message']) self.assertEqual(404, res['Error']['code']) + @patch.object(pecan, 'response', new=FakeResponse) + @patch.object(xrpcapi.XJobAPI, 'setup_bottom_router') + @patch.object(FakeClient, 'create_servers') + @patch.object(context, 'extract_context_from_environ') + def test_post_l3_involved(self, mock_ctx, mock_create, mock_setup): + t_pod, b_pod = self._prepare_pod(1) + + top_net_id = 'top_net_id' + top_subnet_id = 'top_subnet_id' + top_port_id = 'top_port_id' + top_sg_id = 'top_sg_id' + top_router_id = 'top_router_id' + + t_net = {'id': top_net_id, 'name': 'net'} + t_subnet = {'id': top_subnet_id, + 'network_id': top_net_id, + 'ip_version': 4, + 'cidr': '10.0.0.0/24', + 'gateway_ip': '10.0.0.1', + 'allocation_pools': [{'start': '10.0.0.2', + 'end': '10.0.0.254'}], + 'enable_dhcp': True} + t_port = {'id': top_port_id, + 'network_id': top_net_id, + 'device_id': top_router_id, + 'device_owner': 'network:router_interface', + 'fixed_ips': [{'subnet_id': top_subnet_id, + 'ip_address': '10.0.0.1'}], + 'mac_address': 'fa:16:3e:96:41:03'} + t_sg = {'id': top_sg_id, 'name': 'default', 'description': '', + 'tenant_id': self.project_id, + 'security_group_rules': [ + {'remote_group_id': top_sg_id, + 'direction': 'ingress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4'}, + {'remote_group_id': None, + 'direction': 'egress', + 'remote_ip_prefix': None, + 'protocol': None, + 'port_range_max': None, + 'port_range_min': None, + 'ethertype': 'IPv4'}, + ]} + TOP_NETS.append(t_net) + TOP_SUBNETS.append(t_subnet) + TOP_PORTS.append(t_port) + TOP_SGS.append(t_sg) + + server_name = 'test_server' + image_id = 'image_id' + flavor_id = 1 + body = { + 'server': { + 'name': server_name, + 'imageRef': image_id, + 'flavorRef': flavor_id, + 'availability_zone': b_pod['az_name'], + 'networks': [{'port': top_port_id}] + } + } + mock_create.return_value = {'id': 'bottom_server_id'} + mock_ctx.return_value = self.context + + self.controller.post(**body)['server'] + mock_setup.assert_called_with(self.context, top_net_id, top_router_id, + b_pod['pod_id']) + @patch.object(pecan, 'response', new=FakeResponse) def test_process_injected_file_quota(self): ctx = self.context.elevated() @@ -1132,8 +1203,8 @@ class ServerTest(unittest.TestCase): 'ip_version': 4, 'cidr': '10.0.0.0/24', 'gateway_ip': '10.0.0.1', - 'allocation_pools': {'start': '10.0.0.2', - 'end': '10.0.0.254'}, + 'allocation_pools': [{'start': '10.0.0.2', + 'end': '10.0.0.254'}], 'enable_dhcp': True} t_sg = {'id': top_sg_id, 'name': 'default', 'description': '', 'tenant_id': self.project_id, diff --git a/tricircle/tests/unit/xjob/test_xmanager.py b/tricircle/tests/unit/xjob/test_xmanager.py index 4e0d506..b5c096c 100644 --- a/tricircle/tests/unit/xjob/test_xmanager.py +++ b/tricircle/tests/unit/xjob/test_xmanager.py @@ -138,6 +138,13 @@ class XManagerTest(unittest.TestCase): 'fixed_ips': [{'subnet_id': subnet['id'], 'ip_address': subnet['gateway_ip']}] } + vm_port = { + 'network_id': network['id'], + 'device_id': 'vm%d_id' % i, + 'device_owner': 'compute:None', + 'fixed_ips': [{'subnet_id': subnet['id'], + 'ip_address': '10.0.%d.3' % i}] + } bridge_port = { 'network_id': bridge_network['id'], 'device_id': router['id'], @@ -151,6 +158,7 @@ class XManagerTest(unittest.TestCase): RES_MAP[pod_name]['subnet'].append(subnet) RES_MAP[pod_name]['subnet'].append(bridge_subnet) RES_MAP[pod_name]['port'].append(port) + RES_MAP[pod_name]['port'].append(vm_port) RES_MAP[pod_name]['port'].append(bridge_port) RES_MAP[pod_name]['router'].append(router) @@ -169,20 +177,35 @@ class XManagerTest(unittest.TestCase): 'device_owner': 'network:router_interface', 'fixed_ips': [{'subnet_id': 'subnet_3_id', 'ip_address': '10.0.3.1'}]}) + BOTTOM1_PORT.append({'network_id': 'network_3_id', + 'device_id': 'vm3_id', + 'device_owner': 'compute:None', + 'fixed_ips': [{'subnet_id': 'subnet_3_id', + 'ip_address': '10.0.3.3'}]}) self.xmanager.configure_extra_routes(self.context, payload={'router': top_router_id}) calls = [mock.call(self.context, 'router_1_id', {'router': { 'routes': [{'nexthop': '100.0.1.2', - 'destination': '10.0.2.0/24'}]}}), + 'destination': '10.0.2.3/32'}]}}), mock.call(self.context, 'router_2_id', {'router': { 'routes': [{'nexthop': '100.0.1.1', - 'destination': '10.0.1.0/24'}, + 'destination': '10.0.1.3/32'}, {'nexthop': '100.0.1.1', - 'destination': '10.0.3.0/24'}]}})] - mock_update.assert_has_calls(calls) + 'destination': '10.0.3.3/32'}]}}), + mock.call(self.context, 'router_2_id', + {'router': { + 'routes': [{'nexthop': '100.0.1.1', + 'destination': '10.0.3.3/32'}, + {'nexthop': '100.0.1.1', + 'destination': '10.0.1.3/32'}]}})] + + called = mock_update.call_args_list[1] == calls[1] + called = called or (mock_update.call_args_list[1] == calls[2]) + called = called and (mock_update.call_args_list[0] == calls[0]) + self.assertTrue(called) def test_job_handle(self): @xmanager._job_handle('fake_resource') diff --git a/tricircle/xjob/xmanager.py b/tricircle/xjob/xmanager.py index a521f1b..f1f4793 100644 --- a/tricircle/xjob/xmanager.py +++ b/tricircle/xjob/xmanager.py @@ -24,6 +24,8 @@ from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import periodic_task +import neutronclient.common.exceptions as q_cli_exceptions + from tricircle.common import client from tricircle.common import constants from tricircle.common.i18n import _ @@ -34,6 +36,7 @@ from tricircle.common import xrpcapi import tricircle.db.api as db_api from tricircle.db import core from tricircle.db import models +import tricircle.network.exceptions as t_network_exc from tricircle.network import helper @@ -259,22 +262,43 @@ class XManager(PeriodicTasks): 'job_type': job_type}) self.job_handles[job_type](ctx, payload=payload) - @_job_handle(constants.JT_ROUTER_SETUP) - def setup_bottom_router(self, ctx, payload): - (b_pod_id, - t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#') + @staticmethod + def _safe_create_bottom_floatingip(t_ctx, pod, client, fip_net_id, + fip_address, port_id): + try: + client.create_floatingips( + t_ctx, {'floatingip': {'floating_network_id': fip_net_id, + 'floating_ip_address': fip_address, + 'port_id': port_id}}) + except q_cli_exceptions.IpAddressInUseClient: + fips = client.list_floatingips(t_ctx, + [{'key': 'floating_ip_address', + 'comparator': 'eq', + 'value': fip_address}]) + if not fips: + # this is rare case that we got IpAddressInUseClient exception + # a second ago but now the floating ip is missing + raise t_network_exc.BottomPodOperationFailure( + resource='floating ip', pod_name=pod['pod_name']) + associated_port_id = fips[0].get('port_id') + if associated_port_id == port_id: + # if the internal port associated with the existing fip is what + # we expect, just ignore this exception + pass + elif not associated_port_id: + # if the existing fip is not associated with any internal port, + # update the fip to add association + client.update_floatingips(t_ctx, fips[0]['id'], + {'floatingip': {'port_id': port_id}}) + else: + raise - t_client = self._get_client() - t_pod = db_api.get_top_pod(ctx) - b_pod = db_api.get_pod(ctx, b_pod_id) + def _setup_router_one_pod(self, ctx, t_pod, b_pod, t_client, t_net, + t_router, t_ew_bridge_net, t_ew_bridge_subnet, + need_ns_bridge): b_client = self._get_client(b_pod['pod_name']) - b_az = b_pod['az_name'] - t_router = t_client.get_routers(ctx, t_router_id) - if not t_router: - # we just end this job if top router no longer exists - return - router_body = {'router': {'name': t_router_id, + router_body = {'router': {'name': t_router['id'], 'distributed': False}} project_id = t_router['tenant_id'] @@ -282,89 +306,66 @@ class XManager(PeriodicTasks): _, b_router_id = self.helper.prepare_bottom_element( ctx, project_id, b_pod, t_router, 'router', router_body) + # handle E-W networking # create top E-W bridge port - t_bridge_net_name = constants.ew_bridge_net_name % project_id - t_bridge_subnet_name = constants.ew_bridge_subnet_name % project_id - t_bridge_net = self._get_resource_by_name(t_client, ctx, 'network', - t_bridge_net_name) - t_bridge_subnet = self._get_resource_by_name(t_client, ctx, 'subnet', - t_bridge_subnet_name) - q_cxt = None # no need to pass neutron context when using client - t_bridge_port_id = self.helper.get_bridge_interface( - ctx, q_cxt, project_id, t_pod, t_bridge_net['id'], + q_ctx = None # no need to pass neutron context when using client + t_ew_bridge_port_id = self.helper.get_bridge_interface( + ctx, q_ctx, project_id, t_pod, t_ew_bridge_net['id'], b_router_id, None, True) # create bottom E-W bridge port - t_bridge_port = t_client.get_ports(ctx, t_bridge_port_id) - (is_new, b_bridge_port_id, + t_ew_bridge_port = t_client.get_ports(ctx, t_ew_bridge_port_id) + (is_new, b_ew_bridge_port_id, _, _) = self.helper.get_bottom_bridge_elements( - ctx, project_id, b_pod, t_bridge_net, False, t_bridge_subnet, - t_bridge_port) + ctx, project_id, b_pod, t_ew_bridge_net, False, t_ew_bridge_subnet, + t_ew_bridge_port) # attach bottom E-W bridge port to bottom router if is_new: # only attach bridge port the first time b_client.action_routers(ctx, 'add_interface', b_router_id, - {'port_id': b_bridge_port_id}) + {'port_id': b_ew_bridge_port_id}) else: # still need to check if the bridge port is bound - port = b_client.get_ports(ctx, b_bridge_port_id) + port = b_client.get_ports(ctx, b_ew_bridge_port_id) if not port.get('device_id'): b_client.action_routers(ctx, 'add_interface', b_router_id, - {'port_id': b_bridge_port_id}) + {'port_id': b_ew_bridge_port_id}) # handle N-S networking - ext_nets = t_client.list_networks(ctx, - filters=[{'key': 'router:external', - 'comparator': 'eq', - 'value': True}]) - if not ext_nets: - need_ns_bridge = False - else: - ext_net_pod_names = set( - [ext_net[AZ_HINTS][0] for ext_net in ext_nets]) - if b_pod['pod_name'] in ext_net_pod_names: - need_ns_bridge = False - else: - need_ns_bridge = True if need_ns_bridge: - t_bridge_net_name = constants.ns_bridge_net_name % project_id - t_bridge_subnet_name = constants.ns_bridge_subnet_name % project_id - t_bridge_net = self._get_resource_by_name( - t_client, ctx, 'network', t_bridge_net_name) - t_bridge_subnet = self._get_resource_by_name( - t_client, ctx, 'subnet', t_bridge_subnet_name) + t_ns_bridge_net_name = constants.ns_bridge_net_name % project_id + t_ns_bridge_subnet_name = constants.ns_bridge_subnet_name % ( + project_id) + t_ns_bridge_net = self._get_resource_by_name( + t_client, ctx, 'network', t_ns_bridge_net_name) + t_ns_bridge_subnet = self._get_resource_by_name( + t_client, ctx, 'subnet', t_ns_bridge_subnet_name) # create bottom N-S bridge network and subnet - (_, _, b_bridge_subnet_id, - b_bridge_net_id) = self.helper.get_bottom_bridge_elements( - ctx, project_id, b_pod, t_bridge_net, True, - t_bridge_subnet, None) - # create top N-S bridge port - ns_bridge_port_id = self.helper.get_bridge_interface( - ctx, q_cxt, project_id, t_pod, t_bridge_net['id'], + (_, _, b_ns_bridge_subnet_id, + b_ns_bridge_net_id) = self.helper.get_bottom_bridge_elements( + ctx, project_id, b_pod, t_ns_bridge_net, True, + t_ns_bridge_subnet, None) + # create top N-S bridge gateway port + t_ns_bridge_gateway_id = self.helper.get_bridge_interface( + ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'], b_router_id, None, False) - ns_bridge_port = t_client.get_ports(ctx, ns_bridge_port_id) + t_ns_bridge_gateway = t_client.get_ports(ctx, + t_ns_bridge_gateway_id) # add external gateway for bottom router # add gateway is update operation, can run multiple times - gateway_ip = ns_bridge_port['fixed_ips'][0]['ip_address'] + gateway_ip = t_ns_bridge_gateway['fixed_ips'][0]['ip_address'] b_client.action_routers( ctx, 'add_gateway', b_router_id, - {'network_id': b_bridge_net_id, - 'external_fixed_ips': [{'subnet_id': b_bridge_subnet_id, + {'network_id': b_ns_bridge_net_id, + 'external_fixed_ips': [{'subnet_id': b_ns_bridge_subnet_id, 'ip_address': gateway_ip}]}) # attach internal port to bottom router - t_net = t_client.get_networks(ctx, t_net_id) - if not t_net: - # we just end this job if top network no longer exists - return - net_azs = t_net.get(AZ_HINTS, []) - if net_azs and b_az not in net_azs: - return - t_ports = self._get_router_interfaces(t_client, ctx, t_router_id, - t_net_id) + t_ports = self._get_router_interfaces(t_client, ctx, t_router['id'], + t_net['id']) b_net_id = db_api.get_bottom_id_by_top_id_pod_name( - ctx, t_net_id, b_pod['pod_name'], constants.RT_NETWORK) + ctx, t_net['id'], b_pod['pod_name'], constants.RT_NETWORK) if b_net_id: b_ports = self._get_router_interfaces(b_client, ctx, b_router_id, b_net_id) @@ -376,42 +377,209 @@ class XManager(PeriodicTasks): request_body = {'port_id': b_port['id']} b_client.action_routers(ctx, 'remove_interface', b_router_id, request_body) - with ctx.session.begin(): - core.delete_resources(ctx, models.ResourceRouting, - filters=[{'key': 'bottom_id', - 'comparator': 'eq', - 'value': b_port['id']}]) elif t_ports and not b_ports: # create new bottom interface t_port = t_ports[0] + # only consider ipv4 address currently t_subnet_id = t_port['fixed_ips'][0]['subnet_id'] t_subnet = t_client.get_subnets(ctx, t_subnet_id) (b_net_id, subnet_map) = self.helper.prepare_bottom_network_subnets( - ctx, project_id, b_pod, t_net, [t_subnet]) - port_body = self.helper.get_create_port_body( - project_id, t_port, subnet_map, b_net_id) - _, b_port_id = self.helper.prepare_bottom_element( - ctx, project_id, b_pod, t_port, constants.RT_PORT, port_body) + ctx, q_ctx, project_id, b_pod, t_net, [t_subnet]) + + # the gateway ip of bottom subnet is set to the ip of t_port, so + # we just attach the bottom subnet to the bottom router and neutron + # server in the bottom pod will create the interface for us, using + # the gateway ip. b_client.action_routers(ctx, 'add_interface', b_router_id, - {'port_id': b_port_id}) - elif t_ports and b_ports: - # when users remove the interface again, it's possible that top - # interface is removed but deletion of bottom interface fails. - # if users add the interface again during the retry of the job, - # we have top and bottom interfaces exist but the id mapping - # in the routing entry is incorrect, so we update it here - t_port = t_ports[0] - b_port = b_ports[0] - with ctx.session.begin(): - core.update_resources(ctx, models.ResourceRouting, - [{'key': 'bottom_id', 'comparator': 'eq', - 'value': b_port['id']}, - {'key': 'pod_id', 'comparator': 'eq', - 'value': b_pod_id} - ], {'top_id': t_port['id']}) + {'subnet_id': subnet_map[t_subnet_id]}) + + if not t_router['external_gateway_info']: + return + + # handle floatingip + t_ext_net_id = t_router['external_gateway_info']['network_id'] + t_fips = t_client.list_floatingips(ctx, [{'key': 'floating_network_id', + 'comparator': 'eq', + 'value': t_ext_net_id}]) + # skip unbound top floatingip + t_ip_fip_map = dict([(fip['floating_ip_address'], + fip) for fip in t_fips if fip['port_id']]) + mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_ext_net_id, + constants.RT_NETWORK) + # bottom external network should exist + b_ext_pod, b_ext_net_id = mappings[0] + b_ext_client = self._get_client(b_ext_pod['pod_name']) + b_fips = b_ext_client.list_floatingips( + ctx, [{'key': 'floating_network_id', 'comparator': 'eq', + 'value': b_ext_net_id}]) + # skip unbound bottom floatingip + b_ip_fip_map = dict([(fip['floating_ip_address'], + fip) for fip in b_fips if fip['port_id']]) + add_fips = [ip for ip in t_ip_fip_map if ip not in b_ip_fip_map] + del_fips = [ip for ip in b_ip_fip_map if ip not in t_ip_fip_map] + + for add_fip in add_fips: + fip = t_ip_fip_map[add_fip] + t_int_port_id = fip['port_id'] + b_int_port_id = db_api.get_bottom_id_by_top_id_pod_name( + ctx, t_int_port_id, b_pod['pod_name'], constants.RT_PORT) + if not b_int_port_id: + LOG.warning(_LW('Port %(port_id)s associated with floating ip ' + '%(fip)s is not mapped to bottom pod'), + {'port_id': t_int_port_id, 'fip': add_fip}) + continue + t_int_port = t_client.get_ports(ctx, t_int_port_id) + if t_int_port['network_id'] != t_net['id']: + # only handle floating ip association for the given top network + continue + if need_ns_bridge: + # create top N-S bridge interface port + t_ns_bridge_port_id = self.helper.get_bridge_interface( + ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'], None, + b_int_port_id, False) + t_ns_bridge_port = t_client.get_ports(ctx, t_ns_bridge_port_id) + b_ext_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name( + ctx, t_ns_bridge_net['id'], b_ext_pod['pod_name'], + constants.RT_NETWORK) + port_body = { + 'port': { + 'tenant_id': project_id, + 'admin_state_up': True, + 'name': 'ns_bridge_port', + 'network_id': b_ext_bridge_net_id, + 'fixed_ips': [{'ip_address': t_ns_bridge_port[ + 'fixed_ips'][0]['ip_address']}] + } + } + _, b_ns_bridge_port_id = self.helper.prepare_bottom_element( + ctx, project_id, b_ext_pod, t_ns_bridge_port, + constants.RT_PORT, port_body) + self._safe_create_bottom_floatingip( + ctx, b_ext_pod, b_ext_client, b_ext_net_id, add_fip, + b_ns_bridge_port_id) + self._safe_create_bottom_floatingip( + ctx, b_pod, b_client, b_ns_bridge_net_id, + t_ns_bridge_port['fixed_ips'][0]['ip_address'], + b_int_port_id) + else: + self._safe_create_bottom_floatingip( + ctx, b_pod, b_client, b_ext_net_id, add_fip, + b_int_port_id) + + for del_fip in del_fips: + fip = b_ip_fip_map[del_fip] + if need_ns_bridge: + b_ns_bridge_port = b_ext_client.get_ports(ctx, fip['port_id']) + entries = core.query_resource( + ctx, models.ResourceRouting, + [{'key': 'bottom_id', 'comparator': 'eq', + 'value': b_ns_bridge_port['id']}, + {'key': 'pod_id', 'comparator': 'eq', + 'value': b_ext_pod['pod_id']}], []) + t_ns_bridge_port_id = entries[0]['top_id'] + b_int_fips = b_client.list_floatingips( + ctx, + [{'key': 'floating_ip_address', + 'comparator': 'eq', + 'value': b_ns_bridge_port['fixed_ips'][0]['ip_address']}, + {'key': 'floating_network_id', + 'comparator': 'eq', + 'value': b_ns_bridge_net_id}]) + if b_int_fips: + b_client.delete_floatingips(ctx, b_int_fips[0]['id']) + b_ext_client.update_floatingips( + ctx, fip['id'], {'floatingip': {'port_id': None}}) + + # for bridge port, we have two resource routing entries, one + # for bridge port in top pod, another for bridge port in bottom + # pod. calling t_client.delete_ports will delete bridge port in + # bottom pod as well as routing entry for it, but we also need + # to remove routing entry for bridge port in top pod, bridge + # network will be deleted when deleting router + + # first we update the routing entry to set bottom_id to None + # and expire the entry, so if we succeed to delete the bridge + # port next, this expired entry will be deleted; otherwise, we + # fail to delete the bridge port, when the port is accessed via + # lock_handle module, that module will find the port and update + # the entry + with ctx.session.begin(): + core.update_resources( + ctx, models.ResourceRouting, + [{'key': 'bottom_id', 'comparator': 'eq', + 'value': t_ns_bridge_port_id}], + {'bottom_id': None, + 'created_at': constants.expire_time, + 'updated_at': constants.expire_time}) + # delete bridge port + t_client.delete_ports(ctx, t_ns_bridge_port_id) + # delete the expired entry, even if this deletion fails, we + # still have a chance that lock_handle module will delete it + with ctx.session.begin(): + core.delete_resources(ctx, models.ResourceRouting, + [{'key': 'bottom_id', + 'comparator': 'eq', + 'value': t_ns_bridge_port_id}]) + else: + b_client.update_floatingips(ctx, fip['id'], + {'floatingip': {'port_id': None}}) + + @_job_handle(constants.JT_ROUTER_SETUP) + def setup_bottom_router(self, ctx, payload): + (b_pod_id, + t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#') + + if b_pod_id == constants.POD_NOT_SPECIFIED: + mappings = db_api.get_bottom_mappings_by_top_id( + ctx, t_net_id, constants.RT_NETWORK) + b_pods = [mapping[0] for mapping in mappings] + for b_pod in b_pods: + # NOTE(zhiyuan) we create one job for each pod to avoid + # conflict caused by different workers operating the same pod + self.xjob_handler.setup_bottom_router( + ctx, t_net_id, t_router_id, b_pod['pod_id']) + return + + t_client = self._get_client() + t_pod = db_api.get_top_pod(ctx) + t_router = t_client.get_routers(ctx, t_router_id) + if not t_router: + # we just end this job if top router no longer exists + return + t_net = t_client.get_networks(ctx, t_net_id) + if not t_net: + # we just end this job if top network no longer exists + return + project_id = t_router['tenant_id'] + + b_pod = db_api.get_pod(ctx, b_pod_id) + + t_ew_bridge_net_name = constants.ew_bridge_net_name % project_id + t_ew_bridge_subnet_name = constants.ew_bridge_subnet_name % project_id + t_ew_bridge_net = self._get_resource_by_name(t_client, ctx, 'network', + t_ew_bridge_net_name) + t_ew_bridge_subnet = self._get_resource_by_name( + t_client, ctx, 'subnet', t_ew_bridge_subnet_name) + + ext_nets = t_client.list_networks(ctx, + filters=[{'key': 'router:external', + 'comparator': 'eq', + 'value': True}]) + ext_net_pod_names = set( + [ext_net[AZ_HINTS][0] for ext_net in ext_nets]) + + if not ext_net_pod_names: + need_ns_bridge = False + elif b_pod['pod_name'] in ext_net_pod_names: + need_ns_bridge = False + else: + need_ns_bridge = True + self._setup_router_one_pod(ctx, t_pod, b_pod, t_client, t_net, + t_router, t_ew_bridge_net, + t_ew_bridge_subnet, need_ns_bridge) self.xjob_handler.configure_extra_routes(ctx, t_router_id) @@ -419,23 +587,27 @@ class XManager(PeriodicTasks): def configure_extra_routes(self, ctx, payload): t_router_id = payload[constants.JT_ROUTER] + non_vm_port_types = ['network:router_interface', + 'network:router_gateway', + 'network:dhcp'] + b_pods, b_router_ids = zip(*db_api.get_bottom_mappings_by_top_id( ctx, t_router_id, constants.RT_ROUTER)) router_bridge_ip_map = {} - router_cidr_map = {} + router_ips_map = {} for i, b_pod in enumerate(b_pods): bottom_client = self._get_client(pod_name=b_pod['pod_name']) - b_inferfaces = bottom_client.list_ports( + b_interfaces = bottom_client.list_ports( ctx, filters=[{'key': 'device_id', 'comparator': 'eq', 'value': b_router_ids[i]}, {'key': 'device_owner', 'comparator': 'eq', 'value': 'network:router_interface'}]) - cidrs = [] - for b_inferface in b_inferfaces: - ip = b_inferface['fixed_ips'][0]['ip_address'] + router_ips_map[b_router_ids[i]] = {} + for b_interface in b_interfaces: + ip = b_interface['fixed_ips'][0]['ip_address'] ew_bridge_cidr = '100.0.0.0/9' ns_bridge_cidr = '100.128.0.0/9' if netaddr.IPAddress(ip) in netaddr.IPNetwork(ew_bridge_cidr): @@ -443,25 +615,38 @@ class XManager(PeriodicTasks): continue if netaddr.IPAddress(ip) in netaddr.IPNetwork(ns_bridge_cidr): continue + b_net_id = b_interface['network_id'] b_subnet = bottom_client.get_subnets( - ctx, b_inferface['fixed_ips'][0]['subnet_id']) - cidrs.append(b_subnet['cidr']) - router_cidr_map[b_router_ids[i]] = cidrs + ctx, b_interface['fixed_ips'][0]['subnet_id']) + b_ports = bottom_client.list_ports( + ctx, filters=[{'key': 'network_id', + 'comparator': 'eq', + 'value': b_net_id}]) + b_vm_ports = [b_port for b_port in b_ports if b_port.get( + 'device_owner', '') not in non_vm_port_types] + ips = [vm_port['fixed_ips'][0][ + 'ip_address'] for vm_port in b_vm_ports] + router_ips_map[b_router_ids[i]][b_subnet['cidr']] = ips for i, b_router_id in enumerate(b_router_ids): - if b_router_id not in router_bridge_ip_map: - continue bottom_client = self._get_client(pod_name=b_pods[i]['pod_name']) extra_routes = [] - for router_id, cidrs in router_cidr_map.iteritems(): + if not router_ips_map[b_router_id]: + bottom_client.update_routers( + ctx, b_router_id, {'router': {'routes': extra_routes}}) + continue + for router_id, cidr_ips_map in router_ips_map.iteritems(): if router_id == b_router_id: continue - for cidr in cidrs: - extra_routes.append( - {'nexthop': router_bridge_ip_map[router_id], - 'destination': cidr}) - bottom_client.update_routers(ctx, b_router_id, - {'router': {'routes': extra_routes}}) + for cidr, ips in cidr_ips_map.iteritems(): + if cidr in router_ips_map[b_router_id]: + continue + for ip in ips: + extra_routes.append( + {'nexthop': router_bridge_ip_map[router_id], + 'destination': ip + '/32'}) + bottom_client.update_routers( + ctx, b_router_id, {'router': {'routes': extra_routes}}) @_job_handle(constants.JT_PORT_DELETE) def delete_server_port(self, ctx, payload):