Support l3 networking in shared vlan network

1. What is the problem
Shared vlan type driver has been merged, so we can run two VMs in
the same network but across two pods. However if we attach a network
to the router, the tricircle plugin still check if the network is
bound to one AZ, so one network cannot cross different pods if we
are going to attach it to a router.

2. What is the solution to the problem
The reason we require network to be bound to AZ is that when the
network is attaching the one router, we know where to create the
bottom network resources. To support l3 networking in shared vlan
network, we need to remove this restriction.

In the previous patches[1, 2], we have already move bottom router
setup to an asynchronouse job, so we just remove the AZ restriction
and make the tricircle plugin and the nova_apigw to use the job.

Floating ip association and disassociation are also moved to bottom
router setup job.

3. What the features need to be implemented to the Tricircle
   to realize the solution
Now network can be attached to a router without specifying AZ, so
l3 networking can work with across-pod network.

[1] https://review.openstack.org/#/c/343568/
[2] https://review.openstack.org/#/c/345863/

Change-Id: I9aaf908a5de55575d63533f1574a0a6edb3c66b8
This commit is contained in:
zhiyuan_cai 2016-07-28 14:16:07 +08:00
parent 04e317336d
commit 361f7f7a27
10 changed files with 1118 additions and 734 deletions

View File

@ -58,6 +58,7 @@ ns_bridge_subnet_name = 'ns_bridge_subnet_%s' # project_id
ns_bridge_port_name = 'ns_bridge_port_%s_%s_%s'
dhcp_port_name = 'dhcp_port_%s' # subnet_id
interface_port_name = 'interface_%s_%s' # b_pod_id t_subnet_id
MAX_INT = 0x7FFFFFFF
expire_time = datetime.datetime(2000, 1, 1)
@ -70,6 +71,7 @@ JS_Fail = 'Fail'
SP_EXTRA_ID = '00000000-0000-0000-0000-000000000000'
TOP = 'top'
POD_NOT_SPECIFIED = 'not_specified_pod'
# job type
JT_ROUTER = 'router'

View File

@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import exceptions
from neutron_lib import exceptions
from tricircle.common.i18n import _

View File

@ -13,7 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib import constants
import neutronclient.common.exceptions as q_cli_exceptions
from tricircle.common import client
import tricircle.common.constants as t_constants
@ -21,9 +24,12 @@ import tricircle.common.context as t_context
import tricircle.common.lock_handle as t_lock
from tricircle.common import utils
import tricircle.db.api as db_api
import tricircle.network.exceptions as t_network_exc
# manually define these constants to avoid depending on neutron repos
# neutron.extensions.availability_zone.AZ_HINTS
AZ_HINTS = 'availability_zone_hints'
EXTERNAL = 'router:external' # neutron.extensions.external_net.EXTERNAL
TYPE_VLAN = 'vlan' # neutron.plugins.common.constants.TYPE_VLAN
@ -210,22 +216,44 @@ class NetworkHelper(object):
return body
@staticmethod
def get_create_subnet_body(project_id, t_subnet, b_net_id):
def get_create_subnet_body(project_id, t_subnet, b_net_id, gateway_ip):
"""Get request body to create bottom subnet
:param project_id: project id
:param t_subnet: top subnet dict
:param b_net_id: bottom network id
:param gateway_ip: bottom gateway ip
:return: request body to create bottom subnet
"""
pools = t_subnet['allocation_pools']
new_pools = []
g_ip = netaddr.IPAddress(gateway_ip)
ip_found = False
for pool in pools:
if ip_found:
new_pools.append({'start': pool['start'],
'end': pool['end']})
continue
ip_range = netaddr.IPRange(pool['start'], pool['end'])
ip_num = len(ip_range)
for i, ip in enumerate(ip_range):
if g_ip == ip:
ip_found = True
if i > 0:
new_pools.append({'start': ip_range[0].format(),
'end': ip_range[i - 1].format()})
if i < ip_num - 1:
new_pools.append(
{'start': ip_range[i + 1].format(),
'end': ip_range[ip_num - 1].format()})
body = {
'subnet': {
'network_id': b_net_id,
'name': t_subnet['id'],
'ip_version': t_subnet['ip_version'],
'cidr': t_subnet['cidr'],
'gateway_ip': t_subnet['gateway_ip'],
'allocation_pools': t_subnet['allocation_pools'],
'gateway_ip': gateway_ip,
'allocation_pools': new_pools,
'enable_dhcp': False,
'tenant_id': project_id
}
@ -264,11 +292,40 @@ class NetworkHelper(object):
body['port']['security_groups'] = b_security_group_ids
return body
def prepare_bottom_network_subnets(self, t_ctx, project_id, pod,
def get_create_interface_body(self, project_id, t_net_id, b_pod_id,
t_subnet_id):
"""Get request body to create top interface
:param project_id: project id
:param t_net_id: top network id
:param b_pod_id: bottom pod id
:param t_subnet_id: top subnet id
:return:
"""
t_interface_name = t_constants.interface_port_name % (b_pod_id,
t_subnet_id)
t_interface_body = {
'port': {
'tenant_id': project_id,
'admin_state_up': True,
'name': t_interface_name,
'network_id': t_net_id,
'device_id': '',
'device_owner': 'network:router_interface',
}
}
if self.call_obj:
t_interface_body['port'].update(
{'mac_address': constants.ATTR_NOT_SPECIFIED,
'fixed_ips': constants.ATTR_NOT_SPECIFIED})
return t_interface_body
def prepare_bottom_network_subnets(self, t_ctx, q_ctx, project_id, pod,
t_net, t_subnets):
"""Get or create bottom network, subnet and dhcp port
:param t_ctx: tricircle context
:param q_ctx: neutron context
:param project_id: project id
:param pod: dict of bottom pod
:param t_net: dict of top network
@ -295,8 +352,22 @@ class NetworkHelper(object):
subnet_dhcp_map = {}
for subnet in t_subnets:
# gateway
t_interface_name = t_constants.interface_port_name % (
pod['pod_id'], subnet['id'])
t_interface_body = self.get_create_interface_body(
project_id, t_net['id'], pod['pod_id'], subnet['id'])
_, t_interface_id = self.prepare_top_element(
t_ctx, q_ctx, project_id, pod, {'id': t_interface_name},
t_constants.RT_PORT, t_interface_body)
t_interface = self._get_top_element(
t_ctx, q_ctx, t_constants.RT_PORT, t_interface_id)
gateway_ip = t_interface['fixed_ips'][0]['ip_address']
subnet_body = self.get_create_subnet_body(
project_id, subnet, b_net_id)
project_id, subnet, b_net_id, gateway_ip)
_, b_subnet_id = self.prepare_bottom_element(
t_ctx, project_id, pod, subnet, t_constants.RT_SUBNET,
subnet_body)
@ -445,3 +516,40 @@ class NetworkHelper(object):
project_id, t_dhcp_port, b_subnet_id, b_net_id)
self.prepare_bottom_element(ctx, project_id, b_pod, t_dhcp_port,
t_constants.RT_PORT, dhcp_port_body)
@staticmethod
def _safe_create_bottom_floatingip(t_ctx, pod, client, fip_net_id,
fip_address, port_id):
try:
client.create_floatingips(
t_ctx, {'floatingip': {'floating_network_id': fip_net_id,
'floating_ip_address': fip_address,
'port_id': port_id}})
except q_cli_exceptions.IpAddressInUseClient:
fips = client.list_floatingips(t_ctx,
[{'key': 'floating_ip_address',
'comparator': 'eq',
'value': fip_address}])
if not fips:
# this is rare case that we got IpAddressInUseClient exception
# a second ago but now the floating ip is missing
raise t_network_exc.BottomPodOperationFailure(
resource='floating ip', pod_name=pod['pod_name'])
associated_port_id = fips[0].get('port_id')
if associated_port_id == port_id:
# the internal port associated with the existing fip is what
# we expect, just ignore this exception
pass
elif not associated_port_id:
# the existing fip is not associated with any internal port,
# update the fip to add association
client.update_floatingips(t_ctx, fips[0]['id'],
{'floatingip': {'port_id': port_id}})
else:
raise
def _get_top_element(self, t_ctx, q_ctx, _type, _id):
if self.call_obj:
return getattr(self.call_obj, 'get_%s' % _type)(q_ctx, _id)
else:
return getattr(self._get_client(), 'get_%ss' % _type)(t_ctx, _id)

View File

@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_config import cfg
import oslo_log.helpers as log_helpers
from oslo_log import log
@ -34,7 +36,6 @@ from neutron.extensions import availability_zone as az_ext
from neutron.extensions import external_net
from neutron.extensions import l3
from neutron.extensions import providernet as provider
import neutron.plugins.common.constants as p_constants
from neutron_lib import constants
import neutronclient.common.exceptions as q_cli_exceptions
@ -403,6 +404,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
bottom_port_id = mappings[0][1]
port = self._get_client(pod_name).get_ports(
t_ctx, bottom_port_id)
# TODO(zhiyuan) handle the case that bottom port does not exist
port['id'] = port_id
if fields:
port = dict(
@ -679,20 +681,6 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
def delete_router(self, context, _id):
super(TricirclePlugin, self).delete_router(context, _id)
def _judge_network_across_pods(self, context, interface, add_by_port):
if add_by_port:
port = self.get_port(context, interface['port_id'])
net_id = port['network_id']
else:
subnet = self.get_subnet(context, interface['subnet_id'])
net_id = subnet['network_id']
network = self.get_network(context, net_id)
if len(network.get(az_ext.AZ_HINTS, [])) != 1:
# Currently not support cross pods l3 networking so
# raise an exception here
raise Exception('Cross pods L3 networking not support')
return network[az_ext.AZ_HINTS][0], network
def _prepare_top_element(self, t_ctx, q_ctx,
project_id, pod, ele, _type, body):
return self.helper.prepare_top_element(
@ -783,17 +771,24 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
b_port_id, is_ew)
return super(TricirclePlugin, self).get_port(q_ctx, port_id)
@staticmethod
def _transfer_network_type(network_type):
network_type_map = {t_constants.NT_SHARED_VLAN: p_constants.TYPE_VLAN}
return network_type_map.get(network_type, network_type)
def _get_bottom_bridge_elements(self, q_ctx, project_id,
pod, t_net, is_external, t_subnet, t_port):
t_ctx = t_context.get_context_from_neutron_context(q_ctx)
return self.helper.get_bottom_bridge_elements(
t_ctx, project_id, pod, t_net, is_external, t_subnet, t_port)
def _get_net_pods_by_interface_info(self, t_ctx, q_ctx, add_by_port,
interface_info):
if add_by_port:
port = self.get_port(q_ctx, interface_info['port_id'])
net_id = port['network_id']
else:
subnet = self.get_subnet(q_ctx, interface_info['subnet_id'])
net_id = subnet['network_id']
mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, net_id, t_constants.RT_NETWORK)
return net_id, [mapping[0] for mapping in mappings]
# NOTE(zhiyuan) the origin implementation in l3_db uses port returned from
# get_port in core plugin to check, change it to base plugin, since only
# top port information should be checked.
@ -808,14 +803,6 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
raise exceptions.BadRequest(resource='router', msg=msg)
return port
def _unbound_top_interface(self, context, router_id, port_id):
super(TricirclePlugin, self).update_port(
context, port_id, {'port': {'device_id': '',
'device_owner': ''}})
with context.session.begin():
query = context.session.query(l3_db.RouterPort)
query.filter_by(port_id=port_id, router_id=router_id).delete()
def _add_router_gateway(self, context, router_id, router_data):
# get top external network information
ext_net_id = router_data[l3.EXTERNAL_GW_INFO].get('network_id')
@ -918,15 +905,17 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
b_client = self._get_client(pod_name)
b_client.action_routers(t_ctx, 'remove_gateway', b_router_id)
def _update_bottom_router_gateway(self, context, router_id, router_data):
ext_net_id = router_data[l3.EXTERNAL_GW_INFO].get('network_id')
if ext_net_id:
self._add_router_gateway(context, router_id, router_data)
else:
self._remove_router_gateway(context, router_id)
def update_router(self, context, router_id, router):
router_data = router['router']
# TODO(zhiyuan) handle the case that SNAT is disabled
# and check if bridge network solution works with IPv6
router_data = copy.deepcopy(router['router'])
need_update_bottom = False
is_add = False
if attributes.is_attr_set(router_data.get(l3.EXTERNAL_GW_INFO)):
need_update_bottom = True
ext_net_id = router_data[l3.EXTERNAL_GW_INFO].get('network_id')
if ext_net_id:
is_add = True
# TODO(zhiyuan) solve ip address conflict issue
# if user creates floating ip before set router gateway, we may trigger
# ip address conflict here. let's say external cidr is 163.3.124.0/24,
@ -938,10 +927,19 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
#
# before this issue is solved, user should set router gateway before
# create floating ip.
if attributes.is_attr_set(router_data.get(l3.EXTERNAL_GW_INFO)):
self._update_bottom_router_gateway(context, router_id, router_data)
return super(TricirclePlugin, self).update_router(context, router_id,
router)
if not need_update_bottom:
return super(TricirclePlugin, self).update_router(
context, router_id, router)
if is_add:
ret = super(TricirclePlugin, self).update_router(
context, router_id, router)
router_data[l3.EXTERNAL_GW_INFO].update(ret[l3.EXTERNAL_GW_INFO])
self._add_router_gateway(context, router_id, router_data)
return ret
else:
self._remove_router_gateway(context, router_id)
return super(TricirclePlugin, self).update_router(
context, router_id, router)
def add_router_interface(self, context, router_id, interface_info):
t_ctx = t_context.get_context_from_neutron_context(context)
@ -949,11 +947,9 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
router = self._get_router(context, router_id)
project_id = router['tenant_id']
add_by_port, _ = self._validate_interface_info(interface_info)
# make sure network not crosses pods
# TODO(zhiyuan) support cross-pod tenant network
az, t_net = self._judge_network_across_pods(
context, interface_info, add_by_port)
b_pod, b_az = az_ag.get_pod_by_az_tenant(t_ctx, az, project_id)
net_id, b_pods = self._get_net_pods_by_interface_info(
t_ctx, context, add_by_port, interface_info)
t_pod = db_api.get_top_pod(t_ctx)
assert t_pod
@ -970,10 +966,11 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
else:
ext_net_pod_names = set(
[ext_net[az_ext.AZ_HINTS][0] for ext_net in ext_nets])
if b_pod['pod_name'] in ext_net_pod_names:
need_ns_bridge = False
else:
need_ns_bridge = True
need_ns_bridge = False
for b_pod in b_pods:
if b_pod['pod_name'] not in ext_net_pod_names:
need_ns_bridge = True
break
if need_ns_bridge:
pool_id = self._get_bridge_subnet_pool_id(
t_ctx, context, None, t_pod, False)
@ -982,9 +979,15 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
return_info = super(TricirclePlugin, self).add_router_interface(
context, router_id, interface_info)
if not b_pods:
return return_info
try:
self.xjob_handler.setup_bottom_router(
t_ctx, t_net['id'], router_id, b_pod['pod_id'])
if len(b_pods) == 1:
self.xjob_handler.setup_bottom_router(
t_ctx, net_id, router_id, b_pods[0]['pod_id'])
else:
self.xjob_handler.setup_bottom_router(
t_ctx, net_id, router_id, t_constants.POD_NOT_SPECIFIED)
except Exception:
# NOTE(zhiyuan) we fail to submit the job, so bottom router
# operations are not started, it's safe for us to remove the top
@ -1003,32 +1006,35 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
def remove_router_interface(self, context, router_id, interface_info):
t_ctx = t_context.get_context_from_neutron_context(context)
router = self._get_router(context, router_id)
project_id = router['tenant_id']
add_by_port, _ = self._validate_interface_info(interface_info,
for_removal=True)
# make sure network not crosses pods
# TODO(zhiyuan) support cross-pod tenant network
az, t_net = self._judge_network_across_pods(
context, interface_info, add_by_port)
b_pod, b_az = az_ag.get_pod_by_az_tenant(t_ctx, az, project_id)
net_id, b_pods = self._get_net_pods_by_interface_info(
t_ctx, context, add_by_port, interface_info)
return_info = super(TricirclePlugin, self).remove_router_interface(
context, router_id, interface_info)
if not b_pods:
return return_info
try:
self.xjob_handler.setup_bottom_router(
t_ctx, t_net['id'], router_id, b_pod['pod_id'])
if len(b_pods) == 1:
self.xjob_handler.setup_bottom_router(
t_ctx, net_id, router_id, b_pods[0]['pod_id'])
else:
self.xjob_handler.setup_bottom_router(
t_ctx, net_id, router_id, t_constants.POD_NOT_SPECIFIED)
except Exception:
# NOTE(zhiyuan) we fail to submit the job, so if bottom router
# interface exists, it would not be deleted, then after we add
# the top interface again, the relation of top and bottom router
# interfaces are not updated in the resource routing entry. this
# inconsistency would not cause problem because:
# (1) when querying interface port, top port information is
# returned, not rely on routing entry
# (2) when setting up bottom router, xjob directly queries top
# and bottom interfaces, not rely on routing entry neither
# we may need some routing entry clean up process`
# the top interface again, the bottom router setup job will reuse
# the existing bottom interface.
#
# we don't create a routing entry between top interface and bottom
# interface, instead, when we create bottom subnet, we specify the
# ip of the top interface as the gateway ip of the bottom subnet.
# later when we attach the bottom subnet to bottom router, neutron
# server in bottom pod will create the bottom interface using the
# gateway ip automatically.
interface_info = {'subnet_id': return_info['subnet_id']}
super(TricirclePlugin, self).add_router_interface(
context, router_id, interface_info)
raise
@ -1125,96 +1131,19 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
fip = floatingip['floatingip']
floatingip_db = self._get_floatingip(context, _id)
int_port_id = fip['port_id']
project_id = floatingip_db['tenant_id']
fip_address = floatingip_db['floating_ip_address']
mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, int_port_id, t_constants.RT_PORT)
if not mappings:
int_port = self.get_port(context, int_port_id)
int_network = self.get_network(context, int_port['network_id'])
if az_ext.AZ_HINTS not in int_network:
raise Exception('Cross pods L3 networking not support')
self._validate_availability_zones(
context, int_network[az_ext.AZ_HINTS], False)
int_net_pod, _ = az_ag.get_pod_by_az_tenant(
t_ctx, int_network[az_ext.AZ_HINTS][0], project_id)
b_int_net_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, int_network['id'], int_net_pod['pod_name'],
t_constants.RT_NETWORK)
b_int_port_body = {
'port': {
'tenant_id': project_id,
'admin_state_up': True,
'name': int_port['id'],
'network_id': b_int_net_id,
'mac_address': int_port['mac_address'],
'fixed_ips': [{'ip_address': int_port['fixed_ips'][0][
'ip_address']}]
}
}
# TODO(zhiyuan) handle DHCP port ip address conflict problem
_, b_int_port_id = self._prepare_bottom_element(
t_ctx, project_id, int_net_pod, int_port,
t_constants.RT_PORT, b_int_port_body)
else:
int_net_pod, b_int_port_id = mappings[0]
ext_net_id = floatingip_db['floating_network_id']
ext_net = self.get_network(context, ext_net_id)
ext_net_pod = db_api.get_pod_by_name(t_ctx,
ext_net[az_ext.AZ_HINTS][0])
# external network and internal network are in the same pod, no
# need to use bridge network.
if int_net_pod['pod_name'] == ext_net_pod['pod_name']:
client = self._get_client(int_net_pod['pod_name'])
b_ext_net_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, ext_net_id, ext_net_pod['pod_name'],
t_constants.RT_NETWORK)
self._safe_create_bottom_floatingip(
t_ctx, int_net_pod, client, b_ext_net_id, fip_address,
b_int_port_id)
# mapping does not exist, meaning that the bottom port has not
# been created, we just return and leave the work to setup bottom
# floating ip to nova api gateway
return
# below handle the case that external network and internal network
# are in different pods
int_client = self._get_client(int_net_pod['pod_name'])
ext_client = self._get_client(ext_net_pod['pod_name'])
ns_bridge_net_name = t_constants.ns_bridge_net_name % project_id
ns_bridge_net = self.get_networks(
context, {'name': [ns_bridge_net_name]})[0]
int_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, ns_bridge_net['id'], int_net_pod['pod_name'],
t_constants.RT_NETWORK)
ext_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, ns_bridge_net['id'], ext_net_pod['pod_name'],
t_constants.RT_NETWORK)
t_pod = db_api.get_top_pod(t_ctx)
t_ns_bridge_port = self._get_bridge_interface(
t_ctx, context, project_id, t_pod, ns_bridge_net['id'],
None, b_int_port_id, False)
port_body = {
'port': {
'tenant_id': project_id,
'admin_state_up': True,
'name': 'ns_bridge_port',
'network_id': ext_bridge_net_id,
'fixed_ips': [{'ip_address': t_ns_bridge_port[
'fixed_ips'][0]['ip_address']}]
}
}
_, b_ns_bridge_port_id = self._prepare_bottom_element(
t_ctx, project_id, ext_net_pod, t_ns_bridge_port,
t_constants.RT_PORT, port_body)
b_ext_net_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, ext_net_id, ext_net_pod['pod_name'],
t_constants.RT_NETWORK)
self._safe_create_bottom_floatingip(
t_ctx, ext_net_pod, ext_client, b_ext_net_id, fip_address,
b_ns_bridge_port_id)
self._safe_create_bottom_floatingip(
t_ctx, int_net_pod, int_client, int_bridge_net_id,
t_ns_bridge_port['fixed_ips'][0]['ip_address'], b_int_port_id)
int_net_pod, b_int_port_id = mappings[0]
int_port = self.get_port(context, int_port_id)
net_id = int_port['network_id']
self.xjob_handler.setup_bottom_router(
t_ctx, net_id, floatingip_db['router_id'], int_net_pod['pod_id'])
def _disassociate_floatingip(self, context, ori_floatingip_db):
if not ori_floatingip_db['port_id']:
@ -1223,7 +1152,6 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
return
t_ctx = t_context.get_context_from_neutron_context(context)
project_id = ori_floatingip_db['tenant_id']
t_int_port_id = ori_floatingip_db['port_id']
mappings = db_api.get_bottom_mappings_by_top_id(
@ -1238,80 +1166,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
return
b_int_net_pod, b_int_port_id = mappings[0]
t_ext_net_id = ori_floatingip_db['floating_network_id']
t_ext_net = self.get_network(context, t_ext_net_id)
b_ext_net_pod = db_api.get_pod_by_name(t_ctx,
t_ext_net[az_ext.AZ_HINTS][0])
b_ext_net_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, t_ext_net_id, b_ext_net_pod['pod_name'],
t_constants.RT_NETWORK)
# external network and internal network are in the same pod, so
# bridge network is not created in this pod
if b_int_net_pod['pod_name'] == b_ext_net_pod['pod_name']:
b_client = self._get_client(b_int_net_pod['pod_name'])
b_fips = b_client.list_floatingips(
t_ctx,
[{'key': 'floating_ip_address',
'comparator': 'eq',
'value': ori_floatingip_db['floating_ip_address']},
{'key': 'floating_network_id',
'comparator': 'eq',
'value': b_ext_net_id}])
if not b_fips:
return
b_client.update_floatingips(t_ctx, b_fips[0]['id'],
{'floatingip': {'port_id': None}})
return
# below handle the case that external network and internal network
# are in different pods
b_int_client = self._get_client(b_int_net_pod['pod_name'])
b_ext_client = self._get_client(b_ext_net_pod['pod_name'])
ns_bridge_net_name = t_constants.ns_bridge_net_name % project_id
t_ns_bridge_net = self.get_networks(
context, {'name': [ns_bridge_net_name]})[0]
b_int_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, t_ns_bridge_net['id'], b_int_net_pod['pod_name'],
t_constants.RT_NETWORK)
t_pod = db_api.get_top_pod(t_ctx)
t_ns_bridge_port = self._get_bridge_interface(
t_ctx, context, project_id, t_pod, t_ns_bridge_net['id'],
None, b_int_port_id, False)
b_int_fips = b_int_client.list_floatingips(
t_ctx,
[{'key': 'floating_ip_address',
'comparator': 'eq',
'value': t_ns_bridge_port['fixed_ips'][0]['ip_address']},
{'key': 'floating_network_id',
'comparator': 'eq',
'value': b_int_bridge_net_id}])
b_ext_fips = b_ext_client.list_floatingips(
t_ctx,
[{'key': 'floating_ip_address',
'comparator': 'eq',
'value': ori_floatingip_db['floating_ip_address']},
{'key': 'floating_network_id',
'comparator': 'eq',
'value': b_ext_net_id}])
if b_int_fips:
b_int_client.delete_floatingips(
t_ctx, b_int_fips[0]['id'])
if b_ext_fips:
b_ext_client.update_floatingips(
t_ctx, b_ext_fips[0]['id'],
{'floatingip': {'port_id': None}})
# delete bridge port
self.delete_port(context, t_ns_bridge_port['id'], l3_port_check=False)
# for bridge port, we have two resource routing entries, one for bridge
# port in top pod, another for bridge port in bottom pod. calling
# delete_port above will delete bridge port in bottom pod as well as
# routing entry for it, but we also need to remove routing entry for
# bridge port in top pod
# bridge network will be deleted when deleting router
with t_ctx.session.begin():
core.delete_resources(t_ctx, models.ResourceRouting,
[{'key': 'top_id', 'comparator': 'eq',
'value': t_ns_bridge_port['name']}])
int_port = self.get_port(context, t_int_port_id)
net_id = int_port['network_id']
self.xjob_handler.setup_bottom_router(
t_ctx, net_id, ori_floatingip_db['router_id'],
b_int_net_pod['pod_id'])

View File

@ -175,8 +175,9 @@ class ServerController(rest.RestController):
400, _('Network %s could not be '
'found') % net_info['uuid'])
if not self._check_network_server_the_same_az(
network, kw['server']['availability_zone']):
if not self._check_network_server_az_match(
context, network,
kw['server']['availability_zone']):
return utils.format_nova_error(
400, _('Network and server not in the same '
'availability zone'))
@ -300,11 +301,29 @@ class ServerController(rest.RestController):
self.project_id, pod, {'id': _id},
_type, list_resources)
def _handle_router(self, context, pod, net):
top_client = self._get_client()
interfaces = top_client.list_ports(
context, filters=[{'key': 'network_id',
'comparator': 'eq',
'value': net['id']},
{'key': 'device_owner',
'comparator': 'eq',
'value': 'network:router_interface'}])
interfaces = [inf for inf in interfaces if inf['device_id']]
if not interfaces:
return
# TODO(zhiyuan) change xjob invoking from "cast" to "call" to guarantee
# the job can be successfully registered
self.xjob_handler.setup_bottom_router(
context, net['id'], interfaces[0]['device_id'], pod['pod_id'])
def _handle_network(self, context, pod, net, subnets, port=None,
top_sg_ids=None, bottom_sg_ids=None):
(bottom_net_id,
subnet_map) = self.helper.prepare_bottom_network_subnets(
context, self.project_id, pod, net, subnets)
context, None, self.project_id, pod, net, subnets)
top_client = self._get_client()
top_port_body = {'port': {'network_id': net['id'],
@ -324,6 +343,8 @@ class ServerController(rest.RestController):
_, bottom_port_id = self.helper.prepare_bottom_element(
context, self.project_id, pod, port, constants.RT_PORT, port_body)
self._handle_router(context, pod, net)
return port['id'], bottom_port_id
def _handle_port(self, context, pod, port):
@ -535,15 +556,25 @@ class ServerController(rest.RestController):
filters)
@staticmethod
def _check_network_server_the_same_az(network, server_az):
def _check_network_server_az_match(context, network, server_az):
az_hints = 'availability_zone_hints'
network_type = 'provider:network_type'
# for local type network, we make sure it's created in only one az
# NOTE(zhiyuan) race condition exists when creating vms in the same
# local type network but different azs at the same time
if network.get(network_type) == constants.NT_LOCAL:
mappings = db_api.get_bottom_mappings_by_top_id(
context, network['id'], constants.RT_NETWORK)
if mappings:
pod, _ = mappings[0]
if pod['az_name'] != server_az:
return False
# if neutron az not assigned, server az is used
if not network.get(az_hints):
return True
# temporally not support cross-pod network
if len(network[az_hints]) > 1:
return False
if network[az_hints][0] == server_az:
if server_az in network[az_hints]:
return True
else:
return False

View File

@ -0,0 +1,64 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from oslo_utils import uuidutils
from tricircle.network import helper
class HelperTest(unittest.TestCase):
def setUp(self):
self.helper = helper.NetworkHelper()
def test_get_create_subnet_body(self):
t_net_id = uuidutils.generate_uuid()
t_subnet_id = uuidutils.generate_uuid()
b_net_id = uuidutils.generate_uuid()
project_id = uuidutils.generate_uuid()
t_subnet = {
'network_id': t_net_id,
'id': t_subnet_id,
'ip_version': 4,
'cidr': '10.0.1.0/24',
'gateway_ip': '10.0.1.1',
'allocation_pools': [{'start': '10.0.1.2', 'end': '10.0.1.254'}],
'enable_dhcp': True,
'tenant_id': project_id
}
body = self.helper.get_create_subnet_body(project_id, t_subnet,
b_net_id, '10.0.1.2')
self.assertItemsEqual([{'start': '10.0.1.3', 'end': '10.0.1.254'}],
body['subnet']['allocation_pools'])
self.assertEqual('10.0.1.2', body['subnet']['gateway_ip'])
body = self.helper.get_create_subnet_body(project_id, t_subnet,
b_net_id, '10.0.1.254')
self.assertItemsEqual([{'start': '10.0.1.2', 'end': '10.0.1.253'}],
body['subnet']['allocation_pools'])
self.assertEqual('10.0.1.254', body['subnet']['gateway_ip'])
t_subnet['allocation_pools'] = [
{'start': '10.0.1.2', 'end': '10.0.1.10'},
{'start': '10.0.1.20', 'end': '10.0.1.254'}]
body = self.helper.get_create_subnet_body(project_id, t_subnet,
b_net_id, '10.0.1.5')
self.assertItemsEqual([{'start': '10.0.1.2', 'end': '10.0.1.4'},
{'start': '10.0.1.6', 'end': '10.0.1.10'},
{'start': '10.0.1.20', 'end': '10.0.1.254'}],
body['subnet']['allocation_pools'])
self.assertEqual('10.0.1.5', body['subnet']['gateway_ip'])

File diff suppressed because it is too large Load Diff

View File

@ -59,6 +59,18 @@ RES_LIST = [TOP_NETS, TOP_SUBNETS, TOP_PORTS, TOP_SGS, BOTTOM_SERVERS,
BOTTOM2_NETS, BOTTOM2_SUBNETS, BOTTOM2_PORTS, BOTTOM2_SGS]
def _get_ip_suffix():
# four elements are enough currently
suffix_list = ['3', '4', '5', '6']
index = 0
while True:
yield suffix_list[index]
index += 1
index %= 4
ip_suffix = _get_ip_suffix()
class FakeException(Exception):
pass
@ -108,7 +120,6 @@ class FakeClient(object):
if not pod_name:
pod_name = 't_region'
self.pod_name = pod_name
self.ip_suffix_gen = self._get_ip_suffix()
def _get_res_list(self, _type):
if self.pod_name == 'b_region_2':
@ -140,11 +151,11 @@ class FakeClient(object):
cidr = subnet['cidr']
ip_prefix = cidr[:cidr.rindex('.') + 1]
mac_prefix = 'fa:16:3e:96:41:0'
if 'device_owner' in body['port']:
if body['port'].get('device_owner') == 'network:dhcp':
ip = ip_prefix + '2'
body['port']['mac_address'] = mac_prefix + '2'
else:
suffix = self.ip_suffix_gen.next()
suffix = ip_suffix.next()
ip = ip_prefix + suffix
body['port']['mac_address'] = mac_prefix + suffix
fixed_ip_list.append({'ip_address': ip,
@ -194,16 +205,6 @@ class FakeClient(object):
ret_list.append(res)
return ret_list
@staticmethod
def _get_ip_suffix():
# three elements should be enough
suffix_list = ['3', '4', '5']
index = 0
while True:
yield suffix_list[index]
index += 1
index %= 3
def create_ports(self, ctx, body):
return self.create_resources('port', ctx, body)
@ -418,25 +419,31 @@ class ServerTest(unittest.TestCase):
'top_net_id', 'network')
self.assertEqual(0, len(mappings))
def _check_routes(self):
def _check_routes(self, b_pod):
for res in (TOP_NETS, TOP_SUBNETS, BOTTOM_NETS, BOTTOM_SUBNETS):
self.assertEqual(1, len(res))
enable_dhcp = TOP_SUBNETS[0]['enable_dhcp']
self.assertEqual(enable_dhcp, BOTTOM_SUBNETS[0]['enable_dhcp'])
port_num = 2 if enable_dhcp else 1
self.assertEqual(port_num, len(TOP_PORTS))
self.assertEqual(port_num, len(BOTTOM_PORTS))
# top vm port, top interface port, top dhcp port
t_port_num = 3 if enable_dhcp else 2
# bottom vm port, bottom dhcp port
b_port_num = 2 if enable_dhcp else 1
self.assertEqual(t_port_num, len(TOP_PORTS))
self.assertEqual(b_port_num, len(BOTTOM_PORTS))
with self.context.session.begin():
routes = core.query_resource(self.context,
models.ResourceRouting, [], [])
# bottom network, bottom subnet, bottom port, no top dhcp and bottom
# dhcp if dhcp disabled
entry_num = 5 if enable_dhcp else 3
entry_num = 6 if enable_dhcp else 4
self.assertEqual(entry_num, len(routes))
actual = [[], [], []]
if entry_num > 3:
actual = [[], [], [], []]
actual[3].append(constants.interface_port_name % (
b_pod['pod_id'], TOP_SUBNETS[0]['id']))
if entry_num > 4:
actual.extend([[], []])
actual[5].append(constants.dhcp_port_name % TOP_SUBNETS[0]['id'])
for region in ('t_region', 'b_region'):
actual[0].append(self.controller._get_client(
@ -445,24 +452,22 @@ class ServerTest(unittest.TestCase):
region).list_resources('subnet', self.context, [])[0]['id'])
ports = self.controller._get_client(
region).list_resources('port', self.context, [])
if 'device_id' not in ports[0]:
actual[2].append(ports[0]['id'])
else:
actual[2].append(ports[1]['id'])
if entry_num > 3:
actual[4].append(constants.dhcp_port_name % TOP_SUBNETS[0]['id'])
for region in ('t_region', 'b_region'):
ports = self.controller._get_client(
region).list_resources('port', self.context, [])
if 'device_id' in ports[0]:
actual[3].append(ports[0]['id'])
if region == 't_region':
actual[4].append(ports[0]['id'])
for port in ports:
if port.get('device_id'):
dhcp_port_id = port['id']
elif port.get('device_owner'):
gateway_port_id = port['id']
else:
actual[3].append(ports[1]['id'])
if region == 't_region':
actual[4].append(ports[1]['id'])
vm_port_id = port['id']
actual[2].append(vm_port_id)
if region == 't_region':
actual[3].append(gateway_port_id)
if entry_num > 4:
actual[4].append(dhcp_port_id)
if region == 't_region':
actual[5].append(dhcp_port_id)
expect = [[route['top_id'], route['bottom_id']] for route in routes]
self.assertItemsEqual(expect, actual)
@ -475,13 +480,13 @@ class ServerTest(unittest.TestCase):
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': {'start': '10.0.0.2',
'end': '10.0.0.254'},
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.0.254'}],
'enable_dhcp': True}
TOP_NETS.append(net)
TOP_SUBNETS.append(subnet)
self.controller._handle_network(self.context, b_pod, net, [subnet])
self._check_routes()
self._check_routes(b_pod)
def test_handle_network_dhcp_disable(self):
t_pod, b_pod = self._prepare_pod()
@ -491,13 +496,13 @@ class ServerTest(unittest.TestCase):
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': {'start': '10.0.0.2',
'end': '10.0.0.254'},
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.0.254'}],
'enable_dhcp': False}
TOP_NETS.append(net)
TOP_SUBNETS.append(subnet)
self.controller._handle_network(self.context, b_pod, net, [subnet])
self._check_routes()
self._check_routes(b_pod)
def test_handle_port(self):
t_pod, b_pod = self._prepare_pod()
@ -507,21 +512,21 @@ class ServerTest(unittest.TestCase):
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': {'start': '10.0.0.2',
'end': '10.0.0.254'},
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.0.254'}],
'enable_dhcp': True}
port = {
'id': 'top_port_id',
'network_id': 'top_net_id',
'mac_address': 'fa:16:3e:96:41:03',
'mac_address': 'fa:16:3e:96:41:07',
'fixed_ips': [{'subnet_id': 'top_subnet_id',
'ip_address': '10.0.0.3'}]
'ip_address': '10.0.0.7'}]
}
TOP_NETS.append(net)
TOP_SUBNETS.append(subnet)
TOP_PORTS.append(port)
self.controller._handle_port(self.context, b_pod, port)
self._check_routes()
self._check_routes(b_pod)
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(FakeClient, 'create_servers')
@ -537,8 +542,8 @@ class ServerTest(unittest.TestCase):
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': {'start': '10.0.0.2',
'end': '10.0.0.254'},
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.0.254'}],
'enable_dhcp': True}
t_sg = {'id': top_sg_id, 'name': 'default', 'description': '',
'tenant_id': self.project_id,
@ -586,11 +591,6 @@ class ServerTest(unittest.TestCase):
res = self.controller.post(**body)
self._validate_error_code(res, 400)
# update top net for test purpose, correct az and wrong az
TOP_NETS[0]['availability_zone_hints'] = ['b_az', 'fake_az']
res = self.controller.post(**body)
self._validate_error_code(res, 400)
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(FakeClient, 'create_servers')
@patch.object(context, 'extract_context_from_environ')
@ -606,8 +606,8 @@ class ServerTest(unittest.TestCase):
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': {'start': '10.0.0.2',
'end': '10.0.0.254'},
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.0.254'}],
'enable_dhcp': True}
t_sg = {'id': top_sg_id, 'name': 'default', 'description': '',
'tenant_id': self.project_id,
@ -702,8 +702,8 @@ class ServerTest(unittest.TestCase):
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': {'start': '10.0.0.2',
'end': '10.0.0.254'},
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.0.254'}],
'enable_dhcp': True}
t_sg = {'id': top_sg_id, 'name': 'test_sg', 'description': '',
'tenant_id': self.project_id,
@ -794,8 +794,8 @@ class ServerTest(unittest.TestCase):
'ip_version': 4,
'cidr': '10.0.1.0/24',
'gateway_ip': '10.0.1.1',
'allocation_pools': {'start': '10.0.1.2',
'end': '10.0.1.254'},
'allocation_pools': [{'start': '10.0.1.2',
'end': '10.0.1.254'}],
'enable_dhcp': True}
t_net2 = {'id': top_net2_id, 'name': 'net2'}
t_subnet2 = {'id': top_subnet2_id,
@ -804,8 +804,8 @@ class ServerTest(unittest.TestCase):
'ip_version': 4,
'cidr': '10.0.2.0/24',
'gateway_ip': '10.0.2.1',
'allocation_pools': {'start': '10.0.2.2',
'end': '10.0.2.254'},
'allocation_pools': [{'start': '10.0.2.2',
'end': '10.0.2.254'}],
'enable_dhcp': True}
t_sg = {'id': top_sg_id, 'name': 'default', 'description': '',
'tenant_id': self.project_id,
@ -973,6 +973,77 @@ class ServerTest(unittest.TestCase):
res['Error']['message'])
self.assertEqual(404, res['Error']['code'])
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(xrpcapi.XJobAPI, 'setup_bottom_router')
@patch.object(FakeClient, 'create_servers')
@patch.object(context, 'extract_context_from_environ')
def test_post_l3_involved(self, mock_ctx, mock_create, mock_setup):
t_pod, b_pod = self._prepare_pod(1)
top_net_id = 'top_net_id'
top_subnet_id = 'top_subnet_id'
top_port_id = 'top_port_id'
top_sg_id = 'top_sg_id'
top_router_id = 'top_router_id'
t_net = {'id': top_net_id, 'name': 'net'}
t_subnet = {'id': top_subnet_id,
'network_id': top_net_id,
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.0.254'}],
'enable_dhcp': True}
t_port = {'id': top_port_id,
'network_id': top_net_id,
'device_id': top_router_id,
'device_owner': 'network:router_interface',
'fixed_ips': [{'subnet_id': top_subnet_id,
'ip_address': '10.0.0.1'}],
'mac_address': 'fa:16:3e:96:41:03'}
t_sg = {'id': top_sg_id, 'name': 'default', 'description': '',
'tenant_id': self.project_id,
'security_group_rules': [
{'remote_group_id': top_sg_id,
'direction': 'ingress',
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None,
'ethertype': 'IPv4'},
{'remote_group_id': None,
'direction': 'egress',
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None,
'ethertype': 'IPv4'},
]}
TOP_NETS.append(t_net)
TOP_SUBNETS.append(t_subnet)
TOP_PORTS.append(t_port)
TOP_SGS.append(t_sg)
server_name = 'test_server'
image_id = 'image_id'
flavor_id = 1
body = {
'server': {
'name': server_name,
'imageRef': image_id,
'flavorRef': flavor_id,
'availability_zone': b_pod['az_name'],
'networks': [{'port': top_port_id}]
}
}
mock_create.return_value = {'id': 'bottom_server_id'}
mock_ctx.return_value = self.context
self.controller.post(**body)['server']
mock_setup.assert_called_with(self.context, top_net_id, top_router_id,
b_pod['pod_id'])
@patch.object(pecan, 'response', new=FakeResponse)
def test_process_injected_file_quota(self):
ctx = self.context.elevated()
@ -1132,8 +1203,8 @@ class ServerTest(unittest.TestCase):
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': {'start': '10.0.0.2',
'end': '10.0.0.254'},
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.0.254'}],
'enable_dhcp': True}
t_sg = {'id': top_sg_id, 'name': 'default', 'description': '',
'tenant_id': self.project_id,

View File

@ -138,6 +138,13 @@ class XManagerTest(unittest.TestCase):
'fixed_ips': [{'subnet_id': subnet['id'],
'ip_address': subnet['gateway_ip']}]
}
vm_port = {
'network_id': network['id'],
'device_id': 'vm%d_id' % i,
'device_owner': 'compute:None',
'fixed_ips': [{'subnet_id': subnet['id'],
'ip_address': '10.0.%d.3' % i}]
}
bridge_port = {
'network_id': bridge_network['id'],
'device_id': router['id'],
@ -151,6 +158,7 @@ class XManagerTest(unittest.TestCase):
RES_MAP[pod_name]['subnet'].append(subnet)
RES_MAP[pod_name]['subnet'].append(bridge_subnet)
RES_MAP[pod_name]['port'].append(port)
RES_MAP[pod_name]['port'].append(vm_port)
RES_MAP[pod_name]['port'].append(bridge_port)
RES_MAP[pod_name]['router'].append(router)
@ -169,20 +177,35 @@ class XManagerTest(unittest.TestCase):
'device_owner': 'network:router_interface',
'fixed_ips': [{'subnet_id': 'subnet_3_id',
'ip_address': '10.0.3.1'}]})
BOTTOM1_PORT.append({'network_id': 'network_3_id',
'device_id': 'vm3_id',
'device_owner': 'compute:None',
'fixed_ips': [{'subnet_id': 'subnet_3_id',
'ip_address': '10.0.3.3'}]})
self.xmanager.configure_extra_routes(self.context,
payload={'router': top_router_id})
calls = [mock.call(self.context, 'router_1_id',
{'router': {
'routes': [{'nexthop': '100.0.1.2',
'destination': '10.0.2.0/24'}]}}),
'destination': '10.0.2.3/32'}]}}),
mock.call(self.context, 'router_2_id',
{'router': {
'routes': [{'nexthop': '100.0.1.1',
'destination': '10.0.1.0/24'},
'destination': '10.0.1.3/32'},
{'nexthop': '100.0.1.1',
'destination': '10.0.3.0/24'}]}})]
mock_update.assert_has_calls(calls)
'destination': '10.0.3.3/32'}]}}),
mock.call(self.context, 'router_2_id',
{'router': {
'routes': [{'nexthop': '100.0.1.1',
'destination': '10.0.3.3/32'},
{'nexthop': '100.0.1.1',
'destination': '10.0.1.3/32'}]}})]
called = mock_update.call_args_list[1] == calls[1]
called = called or (mock_update.call_args_list[1] == calls[2])
called = called and (mock_update.call_args_list[0] == calls[0])
self.assertTrue(called)
def test_job_handle(self):
@xmanager._job_handle('fake_resource')

View File

@ -24,6 +24,8 @@ from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import periodic_task
import neutronclient.common.exceptions as q_cli_exceptions
from tricircle.common import client
from tricircle.common import constants
from tricircle.common.i18n import _
@ -34,6 +36,7 @@ from tricircle.common import xrpcapi
import tricircle.db.api as db_api
from tricircle.db import core
from tricircle.db import models
import tricircle.network.exceptions as t_network_exc
from tricircle.network import helper
@ -259,22 +262,43 @@ class XManager(PeriodicTasks):
'job_type': job_type})
self.job_handles[job_type](ctx, payload=payload)
@_job_handle(constants.JT_ROUTER_SETUP)
def setup_bottom_router(self, ctx, payload):
(b_pod_id,
t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#')
@staticmethod
def _safe_create_bottom_floatingip(t_ctx, pod, client, fip_net_id,
fip_address, port_id):
try:
client.create_floatingips(
t_ctx, {'floatingip': {'floating_network_id': fip_net_id,
'floating_ip_address': fip_address,
'port_id': port_id}})
except q_cli_exceptions.IpAddressInUseClient:
fips = client.list_floatingips(t_ctx,
[{'key': 'floating_ip_address',
'comparator': 'eq',
'value': fip_address}])
if not fips:
# this is rare case that we got IpAddressInUseClient exception
# a second ago but now the floating ip is missing
raise t_network_exc.BottomPodOperationFailure(
resource='floating ip', pod_name=pod['pod_name'])
associated_port_id = fips[0].get('port_id')
if associated_port_id == port_id:
# if the internal port associated with the existing fip is what
# we expect, just ignore this exception
pass
elif not associated_port_id:
# if the existing fip is not associated with any internal port,
# update the fip to add association
client.update_floatingips(t_ctx, fips[0]['id'],
{'floatingip': {'port_id': port_id}})
else:
raise
t_client = self._get_client()
t_pod = db_api.get_top_pod(ctx)
b_pod = db_api.get_pod(ctx, b_pod_id)
def _setup_router_one_pod(self, ctx, t_pod, b_pod, t_client, t_net,
t_router, t_ew_bridge_net, t_ew_bridge_subnet,
need_ns_bridge):
b_client = self._get_client(b_pod['pod_name'])
b_az = b_pod['az_name']
t_router = t_client.get_routers(ctx, t_router_id)
if not t_router:
# we just end this job if top router no longer exists
return
router_body = {'router': {'name': t_router_id,
router_body = {'router': {'name': t_router['id'],
'distributed': False}}
project_id = t_router['tenant_id']
@ -282,89 +306,66 @@ class XManager(PeriodicTasks):
_, b_router_id = self.helper.prepare_bottom_element(
ctx, project_id, b_pod, t_router, 'router', router_body)
# handle E-W networking
# create top E-W bridge port
t_bridge_net_name = constants.ew_bridge_net_name % project_id
t_bridge_subnet_name = constants.ew_bridge_subnet_name % project_id
t_bridge_net = self._get_resource_by_name(t_client, ctx, 'network',
t_bridge_net_name)
t_bridge_subnet = self._get_resource_by_name(t_client, ctx, 'subnet',
t_bridge_subnet_name)
q_cxt = None # no need to pass neutron context when using client
t_bridge_port_id = self.helper.get_bridge_interface(
ctx, q_cxt, project_id, t_pod, t_bridge_net['id'],
q_ctx = None # no need to pass neutron context when using client
t_ew_bridge_port_id = self.helper.get_bridge_interface(
ctx, q_ctx, project_id, t_pod, t_ew_bridge_net['id'],
b_router_id, None, True)
# create bottom E-W bridge port
t_bridge_port = t_client.get_ports(ctx, t_bridge_port_id)
(is_new, b_bridge_port_id,
t_ew_bridge_port = t_client.get_ports(ctx, t_ew_bridge_port_id)
(is_new, b_ew_bridge_port_id,
_, _) = self.helper.get_bottom_bridge_elements(
ctx, project_id, b_pod, t_bridge_net, False, t_bridge_subnet,
t_bridge_port)
ctx, project_id, b_pod, t_ew_bridge_net, False, t_ew_bridge_subnet,
t_ew_bridge_port)
# attach bottom E-W bridge port to bottom router
if is_new:
# only attach bridge port the first time
b_client.action_routers(ctx, 'add_interface', b_router_id,
{'port_id': b_bridge_port_id})
{'port_id': b_ew_bridge_port_id})
else:
# still need to check if the bridge port is bound
port = b_client.get_ports(ctx, b_bridge_port_id)
port = b_client.get_ports(ctx, b_ew_bridge_port_id)
if not port.get('device_id'):
b_client.action_routers(ctx, 'add_interface', b_router_id,
{'port_id': b_bridge_port_id})
{'port_id': b_ew_bridge_port_id})
# handle N-S networking
ext_nets = t_client.list_networks(ctx,
filters=[{'key': 'router:external',
'comparator': 'eq',
'value': True}])
if not ext_nets:
need_ns_bridge = False
else:
ext_net_pod_names = set(
[ext_net[AZ_HINTS][0] for ext_net in ext_nets])
if b_pod['pod_name'] in ext_net_pod_names:
need_ns_bridge = False
else:
need_ns_bridge = True
if need_ns_bridge:
t_bridge_net_name = constants.ns_bridge_net_name % project_id
t_bridge_subnet_name = constants.ns_bridge_subnet_name % project_id
t_bridge_net = self._get_resource_by_name(
t_client, ctx, 'network', t_bridge_net_name)
t_bridge_subnet = self._get_resource_by_name(
t_client, ctx, 'subnet', t_bridge_subnet_name)
t_ns_bridge_net_name = constants.ns_bridge_net_name % project_id
t_ns_bridge_subnet_name = constants.ns_bridge_subnet_name % (
project_id)
t_ns_bridge_net = self._get_resource_by_name(
t_client, ctx, 'network', t_ns_bridge_net_name)
t_ns_bridge_subnet = self._get_resource_by_name(
t_client, ctx, 'subnet', t_ns_bridge_subnet_name)
# create bottom N-S bridge network and subnet
(_, _, b_bridge_subnet_id,
b_bridge_net_id) = self.helper.get_bottom_bridge_elements(
ctx, project_id, b_pod, t_bridge_net, True,
t_bridge_subnet, None)
# create top N-S bridge port
ns_bridge_port_id = self.helper.get_bridge_interface(
ctx, q_cxt, project_id, t_pod, t_bridge_net['id'],
(_, _, b_ns_bridge_subnet_id,
b_ns_bridge_net_id) = self.helper.get_bottom_bridge_elements(
ctx, project_id, b_pod, t_ns_bridge_net, True,
t_ns_bridge_subnet, None)
# create top N-S bridge gateway port
t_ns_bridge_gateway_id = self.helper.get_bridge_interface(
ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'],
b_router_id, None, False)
ns_bridge_port = t_client.get_ports(ctx, ns_bridge_port_id)
t_ns_bridge_gateway = t_client.get_ports(ctx,
t_ns_bridge_gateway_id)
# add external gateway for bottom router
# add gateway is update operation, can run multiple times
gateway_ip = ns_bridge_port['fixed_ips'][0]['ip_address']
gateway_ip = t_ns_bridge_gateway['fixed_ips'][0]['ip_address']
b_client.action_routers(
ctx, 'add_gateway', b_router_id,
{'network_id': b_bridge_net_id,
'external_fixed_ips': [{'subnet_id': b_bridge_subnet_id,
{'network_id': b_ns_bridge_net_id,
'external_fixed_ips': [{'subnet_id': b_ns_bridge_subnet_id,
'ip_address': gateway_ip}]})
# attach internal port to bottom router
t_net = t_client.get_networks(ctx, t_net_id)
if not t_net:
# we just end this job if top network no longer exists
return
net_azs = t_net.get(AZ_HINTS, [])
if net_azs and b_az not in net_azs:
return
t_ports = self._get_router_interfaces(t_client, ctx, t_router_id,
t_net_id)
t_ports = self._get_router_interfaces(t_client, ctx, t_router['id'],
t_net['id'])
b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
ctx, t_net_id, b_pod['pod_name'], constants.RT_NETWORK)
ctx, t_net['id'], b_pod['pod_name'], constants.RT_NETWORK)
if b_net_id:
b_ports = self._get_router_interfaces(b_client, ctx, b_router_id,
b_net_id)
@ -376,42 +377,209 @@ class XManager(PeriodicTasks):
request_body = {'port_id': b_port['id']}
b_client.action_routers(ctx, 'remove_interface', b_router_id,
request_body)
with ctx.session.begin():
core.delete_resources(ctx, models.ResourceRouting,
filters=[{'key': 'bottom_id',
'comparator': 'eq',
'value': b_port['id']}])
elif t_ports and not b_ports:
# create new bottom interface
t_port = t_ports[0]
# only consider ipv4 address currently
t_subnet_id = t_port['fixed_ips'][0]['subnet_id']
t_subnet = t_client.get_subnets(ctx, t_subnet_id)
(b_net_id,
subnet_map) = self.helper.prepare_bottom_network_subnets(
ctx, project_id, b_pod, t_net, [t_subnet])
port_body = self.helper.get_create_port_body(
project_id, t_port, subnet_map, b_net_id)
_, b_port_id = self.helper.prepare_bottom_element(
ctx, project_id, b_pod, t_port, constants.RT_PORT, port_body)
ctx, q_ctx, project_id, b_pod, t_net, [t_subnet])
# the gateway ip of bottom subnet is set to the ip of t_port, so
# we just attach the bottom subnet to the bottom router and neutron
# server in the bottom pod will create the interface for us, using
# the gateway ip.
b_client.action_routers(ctx, 'add_interface', b_router_id,
{'port_id': b_port_id})
elif t_ports and b_ports:
# when users remove the interface again, it's possible that top
# interface is removed but deletion of bottom interface fails.
# if users add the interface again during the retry of the job,
# we have top and bottom interfaces exist but the id mapping
# in the routing entry is incorrect, so we update it here
t_port = t_ports[0]
b_port = b_ports[0]
with ctx.session.begin():
core.update_resources(ctx, models.ResourceRouting,
[{'key': 'bottom_id', 'comparator': 'eq',
'value': b_port['id']},
{'key': 'pod_id', 'comparator': 'eq',
'value': b_pod_id}
], {'top_id': t_port['id']})
{'subnet_id': subnet_map[t_subnet_id]})
if not t_router['external_gateway_info']:
return
# handle floatingip
t_ext_net_id = t_router['external_gateway_info']['network_id']
t_fips = t_client.list_floatingips(ctx, [{'key': 'floating_network_id',
'comparator': 'eq',
'value': t_ext_net_id}])
# skip unbound top floatingip
t_ip_fip_map = dict([(fip['floating_ip_address'],
fip) for fip in t_fips if fip['port_id']])
mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_ext_net_id,
constants.RT_NETWORK)
# bottom external network should exist
b_ext_pod, b_ext_net_id = mappings[0]
b_ext_client = self._get_client(b_ext_pod['pod_name'])
b_fips = b_ext_client.list_floatingips(
ctx, [{'key': 'floating_network_id', 'comparator': 'eq',
'value': b_ext_net_id}])
# skip unbound bottom floatingip
b_ip_fip_map = dict([(fip['floating_ip_address'],
fip) for fip in b_fips if fip['port_id']])
add_fips = [ip for ip in t_ip_fip_map if ip not in b_ip_fip_map]
del_fips = [ip for ip in b_ip_fip_map if ip not in t_ip_fip_map]
for add_fip in add_fips:
fip = t_ip_fip_map[add_fip]
t_int_port_id = fip['port_id']
b_int_port_id = db_api.get_bottom_id_by_top_id_pod_name(
ctx, t_int_port_id, b_pod['pod_name'], constants.RT_PORT)
if not b_int_port_id:
LOG.warning(_LW('Port %(port_id)s associated with floating ip '
'%(fip)s is not mapped to bottom pod'),
{'port_id': t_int_port_id, 'fip': add_fip})
continue
t_int_port = t_client.get_ports(ctx, t_int_port_id)
if t_int_port['network_id'] != t_net['id']:
# only handle floating ip association for the given top network
continue
if need_ns_bridge:
# create top N-S bridge interface port
t_ns_bridge_port_id = self.helper.get_bridge_interface(
ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'], None,
b_int_port_id, False)
t_ns_bridge_port = t_client.get_ports(ctx, t_ns_bridge_port_id)
b_ext_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
ctx, t_ns_bridge_net['id'], b_ext_pod['pod_name'],
constants.RT_NETWORK)
port_body = {
'port': {
'tenant_id': project_id,
'admin_state_up': True,
'name': 'ns_bridge_port',
'network_id': b_ext_bridge_net_id,
'fixed_ips': [{'ip_address': t_ns_bridge_port[
'fixed_ips'][0]['ip_address']}]
}
}
_, b_ns_bridge_port_id = self.helper.prepare_bottom_element(
ctx, project_id, b_ext_pod, t_ns_bridge_port,
constants.RT_PORT, port_body)
self._safe_create_bottom_floatingip(
ctx, b_ext_pod, b_ext_client, b_ext_net_id, add_fip,
b_ns_bridge_port_id)
self._safe_create_bottom_floatingip(
ctx, b_pod, b_client, b_ns_bridge_net_id,
t_ns_bridge_port['fixed_ips'][0]['ip_address'],
b_int_port_id)
else:
self._safe_create_bottom_floatingip(
ctx, b_pod, b_client, b_ext_net_id, add_fip,
b_int_port_id)
for del_fip in del_fips:
fip = b_ip_fip_map[del_fip]
if need_ns_bridge:
b_ns_bridge_port = b_ext_client.get_ports(ctx, fip['port_id'])
entries = core.query_resource(
ctx, models.ResourceRouting,
[{'key': 'bottom_id', 'comparator': 'eq',
'value': b_ns_bridge_port['id']},
{'key': 'pod_id', 'comparator': 'eq',
'value': b_ext_pod['pod_id']}], [])
t_ns_bridge_port_id = entries[0]['top_id']
b_int_fips = b_client.list_floatingips(
ctx,
[{'key': 'floating_ip_address',
'comparator': 'eq',
'value': b_ns_bridge_port['fixed_ips'][0]['ip_address']},
{'key': 'floating_network_id',
'comparator': 'eq',
'value': b_ns_bridge_net_id}])
if b_int_fips:
b_client.delete_floatingips(ctx, b_int_fips[0]['id'])
b_ext_client.update_floatingips(
ctx, fip['id'], {'floatingip': {'port_id': None}})
# for bridge port, we have two resource routing entries, one
# for bridge port in top pod, another for bridge port in bottom
# pod. calling t_client.delete_ports will delete bridge port in
# bottom pod as well as routing entry for it, but we also need
# to remove routing entry for bridge port in top pod, bridge
# network will be deleted when deleting router
# first we update the routing entry to set bottom_id to None
# and expire the entry, so if we succeed to delete the bridge
# port next, this expired entry will be deleted; otherwise, we
# fail to delete the bridge port, when the port is accessed via
# lock_handle module, that module will find the port and update
# the entry
with ctx.session.begin():
core.update_resources(
ctx, models.ResourceRouting,
[{'key': 'bottom_id', 'comparator': 'eq',
'value': t_ns_bridge_port_id}],
{'bottom_id': None,
'created_at': constants.expire_time,
'updated_at': constants.expire_time})
# delete bridge port
t_client.delete_ports(ctx, t_ns_bridge_port_id)
# delete the expired entry, even if this deletion fails, we
# still have a chance that lock_handle module will delete it
with ctx.session.begin():
core.delete_resources(ctx, models.ResourceRouting,
[{'key': 'bottom_id',
'comparator': 'eq',
'value': t_ns_bridge_port_id}])
else:
b_client.update_floatingips(ctx, fip['id'],
{'floatingip': {'port_id': None}})
@_job_handle(constants.JT_ROUTER_SETUP)
def setup_bottom_router(self, ctx, payload):
(b_pod_id,
t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#')
if b_pod_id == constants.POD_NOT_SPECIFIED:
mappings = db_api.get_bottom_mappings_by_top_id(
ctx, t_net_id, constants.RT_NETWORK)
b_pods = [mapping[0] for mapping in mappings]
for b_pod in b_pods:
# NOTE(zhiyuan) we create one job for each pod to avoid
# conflict caused by different workers operating the same pod
self.xjob_handler.setup_bottom_router(
ctx, t_net_id, t_router_id, b_pod['pod_id'])
return
t_client = self._get_client()
t_pod = db_api.get_top_pod(ctx)
t_router = t_client.get_routers(ctx, t_router_id)
if not t_router:
# we just end this job if top router no longer exists
return
t_net = t_client.get_networks(ctx, t_net_id)
if not t_net:
# we just end this job if top network no longer exists
return
project_id = t_router['tenant_id']
b_pod = db_api.get_pod(ctx, b_pod_id)
t_ew_bridge_net_name = constants.ew_bridge_net_name % project_id
t_ew_bridge_subnet_name = constants.ew_bridge_subnet_name % project_id
t_ew_bridge_net = self._get_resource_by_name(t_client, ctx, 'network',
t_ew_bridge_net_name)
t_ew_bridge_subnet = self._get_resource_by_name(
t_client, ctx, 'subnet', t_ew_bridge_subnet_name)
ext_nets = t_client.list_networks(ctx,
filters=[{'key': 'router:external',
'comparator': 'eq',
'value': True}])
ext_net_pod_names = set(
[ext_net[AZ_HINTS][0] for ext_net in ext_nets])
if not ext_net_pod_names:
need_ns_bridge = False
elif b_pod['pod_name'] in ext_net_pod_names:
need_ns_bridge = False
else:
need_ns_bridge = True
self._setup_router_one_pod(ctx, t_pod, b_pod, t_client, t_net,
t_router, t_ew_bridge_net,
t_ew_bridge_subnet, need_ns_bridge)
self.xjob_handler.configure_extra_routes(ctx, t_router_id)
@ -419,23 +587,27 @@ class XManager(PeriodicTasks):
def configure_extra_routes(self, ctx, payload):
t_router_id = payload[constants.JT_ROUTER]
non_vm_port_types = ['network:router_interface',
'network:router_gateway',
'network:dhcp']
b_pods, b_router_ids = zip(*db_api.get_bottom_mappings_by_top_id(
ctx, t_router_id, constants.RT_ROUTER))
router_bridge_ip_map = {}
router_cidr_map = {}
router_ips_map = {}
for i, b_pod in enumerate(b_pods):
bottom_client = self._get_client(pod_name=b_pod['pod_name'])
b_inferfaces = bottom_client.list_ports(
b_interfaces = bottom_client.list_ports(
ctx, filters=[{'key': 'device_id',
'comparator': 'eq',
'value': b_router_ids[i]},
{'key': 'device_owner',
'comparator': 'eq',
'value': 'network:router_interface'}])
cidrs = []
for b_inferface in b_inferfaces:
ip = b_inferface['fixed_ips'][0]['ip_address']
router_ips_map[b_router_ids[i]] = {}
for b_interface in b_interfaces:
ip = b_interface['fixed_ips'][0]['ip_address']
ew_bridge_cidr = '100.0.0.0/9'
ns_bridge_cidr = '100.128.0.0/9'
if netaddr.IPAddress(ip) in netaddr.IPNetwork(ew_bridge_cidr):
@ -443,25 +615,38 @@ class XManager(PeriodicTasks):
continue
if netaddr.IPAddress(ip) in netaddr.IPNetwork(ns_bridge_cidr):
continue
b_net_id = b_interface['network_id']
b_subnet = bottom_client.get_subnets(
ctx, b_inferface['fixed_ips'][0]['subnet_id'])
cidrs.append(b_subnet['cidr'])
router_cidr_map[b_router_ids[i]] = cidrs
ctx, b_interface['fixed_ips'][0]['subnet_id'])
b_ports = bottom_client.list_ports(
ctx, filters=[{'key': 'network_id',
'comparator': 'eq',
'value': b_net_id}])
b_vm_ports = [b_port for b_port in b_ports if b_port.get(
'device_owner', '') not in non_vm_port_types]
ips = [vm_port['fixed_ips'][0][
'ip_address'] for vm_port in b_vm_ports]
router_ips_map[b_router_ids[i]][b_subnet['cidr']] = ips
for i, b_router_id in enumerate(b_router_ids):
if b_router_id not in router_bridge_ip_map:
continue
bottom_client = self._get_client(pod_name=b_pods[i]['pod_name'])
extra_routes = []
for router_id, cidrs in router_cidr_map.iteritems():
if not router_ips_map[b_router_id]:
bottom_client.update_routers(
ctx, b_router_id, {'router': {'routes': extra_routes}})
continue
for router_id, cidr_ips_map in router_ips_map.iteritems():
if router_id == b_router_id:
continue
for cidr in cidrs:
extra_routes.append(
{'nexthop': router_bridge_ip_map[router_id],
'destination': cidr})
bottom_client.update_routers(ctx, b_router_id,
{'router': {'routes': extra_routes}})
for cidr, ips in cidr_ips_map.iteritems():
if cidr in router_ips_map[b_router_id]:
continue
for ip in ips:
extra_routes.append(
{'nexthop': router_bridge_ip_map[router_id],
'destination': ip + '/32'})
bottom_client.update_routers(
ctx, b_router_id, {'router': {'routes': extra_routes}})
@_job_handle(constants.JT_PORT_DELETE)
def delete_server_port(self, ctx, payload):