Merge remote-tracking branch 'origin/master' into merge-branch
Change-Id: I82695bcf1b78910dc415a92473ac7f539e85f36a
This commit is contained in:
commit
3a9975cbc3
|
@ -74,7 +74,7 @@ repo but are summarized here to describe the functionality they provide.
|
|||
+-------------------------------+-----------------------+
|
||||
| networking-arista_ | ml2,l3 |
|
||||
+-------------------------------+-----------------------+
|
||||
| networking-bagpipe-l2_ | vpn |
|
||||
| networking-bagpipe-l2_ | ml2 |
|
||||
+-------------------------------+-----------------------+
|
||||
| networking-bgpvpn_ | vpn |
|
||||
+-------------------------------+-----------------------+
|
||||
|
|
|
@ -329,6 +329,10 @@ class OVSBridge(BaseOVS):
|
|||
'Interface', columns=['name', 'external_ids', 'ofport'])
|
||||
by_name = {x['name']: x for x in port_info}
|
||||
for name in port_names:
|
||||
if not by_name.get(name):
|
||||
#NOTE(dprince): some ports (like bonds) won't have all
|
||||
# these attributes so we skip them entirely
|
||||
continue
|
||||
external_ids = by_name[name]['external_ids']
|
||||
ofport = by_name[name]['ofport']
|
||||
if "iface-id" in external_ids and "attached-mac" in external_ids:
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
||||
from oslo_db import exception as db_exc
|
||||
from sqlalchemy import orm
|
||||
|
||||
from neutron.api.v2 import attributes as attr
|
||||
|
@ -43,16 +45,21 @@ class AllowedAddressPairsMixin(object):
|
|||
allowed_address_pairs):
|
||||
if not attr.is_attr_set(allowed_address_pairs):
|
||||
return []
|
||||
with context.session.begin(subtransactions=True):
|
||||
for address_pair in allowed_address_pairs:
|
||||
# use port.mac_address if no mac address in address pair
|
||||
if 'mac_address' not in address_pair:
|
||||
address_pair['mac_address'] = port['mac_address']
|
||||
db_pair = AllowedAddressPair(
|
||||
port_id=port['id'],
|
||||
mac_address=address_pair['mac_address'],
|
||||
ip_address=address_pair['ip_address'])
|
||||
context.session.add(db_pair)
|
||||
try:
|
||||
with context.session.begin(subtransactions=True):
|
||||
for address_pair in allowed_address_pairs:
|
||||
# use port.mac_address if no mac address in address pair
|
||||
if 'mac_address' not in address_pair:
|
||||
address_pair['mac_address'] = port['mac_address']
|
||||
db_pair = AllowedAddressPair(
|
||||
port_id=port['id'],
|
||||
mac_address=address_pair['mac_address'],
|
||||
ip_address=address_pair['ip_address'])
|
||||
context.session.add(db_pair)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
raise addr_pair.DuplicateAddressPairInRequest(
|
||||
mac_address=address_pair['mac_address'],
|
||||
ip_address=address_pair['ip_address'])
|
||||
|
||||
return allowed_address_pairs
|
||||
|
||||
|
|
|
@ -228,7 +228,7 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin):
|
|||
return self._fields(res, fields)
|
||||
|
||||
def _make_subnet_args(self, shared, detail,
|
||||
subnet, subnetpool_id=None):
|
||||
subnet, subnetpool_id):
|
||||
gateway_ip = str(detail.gateway_ip) if detail.gateway_ip else None
|
||||
args = {'tenant_id': detail.tenant_id,
|
||||
'id': detail.subnet_id,
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
|
||||
import netaddr
|
||||
from oslo_config import cfg
|
||||
from oslo_db import api as oslo_db_api
|
||||
from oslo_db import exception as db_exc
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
|
@ -33,6 +32,7 @@ from neutron.common import exceptions as n_exc
|
|||
from neutron.common import ipv6_utils
|
||||
from neutron import context as ctx
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import db_base_plugin_common
|
||||
from neutron.db import ipam_non_pluggable_backend
|
||||
from neutron.db import models_v2
|
||||
from neutron.db import sqlalchemyutils
|
||||
|
@ -66,7 +66,7 @@ def _check_subnet_not_used(context, subnet_id):
|
|||
raise n_exc.SubnetInUse(subnet_id=subnet_id, reason=e)
|
||||
|
||||
|
||||
class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
||||
class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
||||
neutron_plugin_base_v2.NeutronPluginBaseV2):
|
||||
"""V2 Neutron plugin interface implementation using SQLAlchemy models.
|
||||
|
||||
|
@ -84,6 +84,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
__native_sorting_support = True
|
||||
|
||||
def __init__(self):
|
||||
self.set_ipam_backend()
|
||||
if cfg.CONF.notify_nova_on_port_status_changes:
|
||||
from neutron.notifiers import nova
|
||||
# NOTE(arosen) These event listeners are here to hook into when
|
||||
|
@ -96,6 +97,9 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
event.listen(models_v2.Port.status, 'set',
|
||||
self.nova_notifier.record_port_status_changed)
|
||||
|
||||
def set_ipam_backend(self):
|
||||
self.ipam = ipam_non_pluggable_backend.IpamNonPluggableBackend()
|
||||
|
||||
def _validate_host_route(self, route, ip_version):
|
||||
try:
|
||||
netaddr.IPNetwork(route['destination'])
|
||||
|
@ -439,18 +443,15 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
external_gateway_info}}
|
||||
l3plugin.update_router(context, id, info)
|
||||
|
||||
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
|
||||
retry_on_request=True,
|
||||
retry_on_deadlock=True)
|
||||
def _create_subnet(self, context, subnet, subnetpool_id):
|
||||
s = subnet['subnet']
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
network = self._get_network(context, s["network_id"])
|
||||
subnet = self._allocate_subnet(context,
|
||||
network,
|
||||
s,
|
||||
subnetpool_id)
|
||||
subnet = self.ipam.allocate_subnet(context,
|
||||
network,
|
||||
s,
|
||||
subnetpool_id)
|
||||
if hasattr(network, 'external') and network.external:
|
||||
self._update_router_gw_ports(context,
|
||||
network,
|
||||
|
@ -458,7 +459,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
# If this subnet supports auto-addressing, then update any
|
||||
# internal ports on the network with addresses for this subnet.
|
||||
if ipv6_utils.is_auto_address_subnet(subnet):
|
||||
self._add_auto_addrs_on_network_ports(context, subnet)
|
||||
self.ipam.add_auto_addrs_on_network_ports(context, subnet)
|
||||
return self._make_subnet_dict(subnet)
|
||||
|
||||
def _get_subnetpool_id(self, subnet):
|
||||
|
@ -514,7 +515,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
s['tenant_id'] = self._get_tenant_id_for_create(context, s)
|
||||
subnetpool_id = self._get_subnetpool_id(s)
|
||||
if subnetpool_id:
|
||||
self._validate_pools_with_subnetpool(s)
|
||||
self.ipam.validate_pools_with_subnetpool(s)
|
||||
else:
|
||||
if not has_cidr:
|
||||
msg = _('A cidr must be specified in the absence of a '
|
||||
|
@ -548,10 +549,11 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
allocation_pools = [{'start': p['first_ip'],
|
||||
'end': p['last_ip']}
|
||||
for p in db_subnet.allocation_pools]
|
||||
self._validate_gw_out_of_pools(s["gateway_ip"], allocation_pools)
|
||||
self.ipam.validate_gw_out_of_pools(s["gateway_ip"],
|
||||
allocation_pools)
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
subnet, changes = self._update_db_subnet(context, id, s)
|
||||
subnet, changes = self.ipam.update_db_subnet(context, id, s)
|
||||
result = self._make_subnet_dict(subnet)
|
||||
# Keep up with fields that changed
|
||||
result.update(changes)
|
||||
|
@ -832,7 +834,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
db_port = self._create_port_with_mac(
|
||||
context, network_id, port_data, p['mac_address'])
|
||||
|
||||
self._allocate_ips_for_port_and_store(context, port, port_id)
|
||||
self.ipam.allocate_ips_for_port_and_store(context, port, port_id)
|
||||
|
||||
return self._make_port_dict(db_port, process_extensions=False)
|
||||
|
||||
|
@ -859,8 +861,8 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
port = self._get_port(context, id)
|
||||
new_mac = new_port.get('mac_address')
|
||||
self._validate_port_for_update(context, port, new_port, new_mac)
|
||||
changes = self._update_port_with_ips(context, port,
|
||||
new_port, new_mac)
|
||||
changes = self.ipam.update_port_with_ips(context, port,
|
||||
new_port, new_mac)
|
||||
result = self._make_port_dict(port)
|
||||
# Keep up with fields that changed
|
||||
if changes.original or changes.add or changes.remove:
|
||||
|
@ -870,7 +872,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
|||
|
||||
def delete_port(self, context, id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
self._delete_port(context, id)
|
||||
self.ipam.delete_port(context, id)
|
||||
|
||||
def delete_ports_by_device_id(self, context, device_id, network_id=None):
|
||||
query = (context.session.query(models_v2.Port.id)
|
||||
|
|
|
@ -52,7 +52,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
return str(netaddr.IPNetwork(cidr_net).network + 1)
|
||||
return subnet.get('gateway_ip')
|
||||
|
||||
def _validate_pools_with_subnetpool(self, subnet):
|
||||
def validate_pools_with_subnetpool(self, subnet):
|
||||
"""Verifies that allocation pools are set correctly
|
||||
|
||||
Allocation pools can be set for specific subnet request only
|
||||
|
@ -155,7 +155,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
del s['allocation_pools']
|
||||
return result_pools
|
||||
|
||||
def _update_db_subnet(self, context, subnet_id, s):
|
||||
def update_db_subnet(self, context, subnet_id, s):
|
||||
changes = {}
|
||||
if "dns_nameservers" in s:
|
||||
changes['dns_nameservers'] = (
|
||||
|
@ -298,12 +298,11 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
|
||||
self._validate_allocation_pools(allocation_pools, cidr)
|
||||
if gateway_ip:
|
||||
self._validate_gw_out_of_pools(gateway_ip,
|
||||
allocation_pools)
|
||||
self.validate_gw_out_of_pools(gateway_ip, allocation_pools)
|
||||
return [netaddr.IPRange(p['start'], p['end'])
|
||||
for p in allocation_pools]
|
||||
|
||||
def _validate_gw_out_of_pools(self, gateway_ip, pools):
|
||||
def validate_gw_out_of_pools(self, gateway_ip, pools):
|
||||
for allocation_pool in pools:
|
||||
pool_range = netaddr.IPRange(
|
||||
allocation_pool['start'],
|
||||
|
@ -369,7 +368,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
original=prev_ips,
|
||||
remove=remove_ips)
|
||||
|
||||
def _delete_port(self, context, port_id):
|
||||
def delete_port(self, context, port_id):
|
||||
query = (context.session.query(models_v2.Port).
|
||||
enable_eagerloads(False).filter_by(id=port_id))
|
||||
if not context.is_admin:
|
||||
|
@ -403,7 +402,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
nexthop=rt['nexthop'])
|
||||
context.session.add(route)
|
||||
|
||||
self._save_allocation_pools(context, subnet,
|
||||
subnet_request.allocation_pools)
|
||||
self.save_allocation_pools(context, subnet,
|
||||
subnet_request.allocation_pools)
|
||||
|
||||
return subnet
|
||||
|
|
|
@ -27,7 +27,7 @@ from neutron.common import exceptions as n_exc
|
|||
from neutron.common import ipv6_utils
|
||||
from neutron.db import ipam_backend_mixin
|
||||
from neutron.db import models_v2
|
||||
from neutron import ipam
|
||||
from neutron.ipam import requests as ipam_req
|
||||
from neutron.ipam import subnet_alloc
|
||||
from neutron.ipam import utils as ipam_utils
|
||||
|
||||
|
@ -186,7 +186,7 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||
return True
|
||||
return False
|
||||
|
||||
def _save_allocation_pools(self, context, subnet, allocation_pools):
|
||||
def save_allocation_pools(self, context, subnet, allocation_pools):
|
||||
for pool in allocation_pools:
|
||||
first_ip = str(netaddr.IPAddress(pool.first, pool.version))
|
||||
last_ip = str(netaddr.IPAddress(pool.last, pool.version))
|
||||
|
@ -200,7 +200,7 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||
last_ip=last_ip)
|
||||
context.session.add(ip_range)
|
||||
|
||||
def _allocate_ips_for_port_and_store(self, context, port, port_id):
|
||||
def allocate_ips_for_port_and_store(self, context, port, port_id):
|
||||
network_id = port['port']['network_id']
|
||||
ips = self._allocate_ips_for_port(context, port)
|
||||
if ips:
|
||||
|
@ -210,7 +210,7 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||
self._store_ip_allocation(context, ip_address, network_id,
|
||||
subnet_id, port_id)
|
||||
|
||||
def _update_port_with_ips(self, context, db_port, new_port, new_mac):
|
||||
def update_port_with_ips(self, context, db_port, new_port, new_mac):
|
||||
changes = self.Changes(add=[], original=[], remove=[])
|
||||
# Check if the IPs need to be updated
|
||||
network_id = db_port['network_id']
|
||||
|
@ -431,7 +431,7 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||
|
||||
return ips
|
||||
|
||||
def _add_auto_addrs_on_network_ports(self, context, subnet):
|
||||
def add_auto_addrs_on_network_ports(self, context, subnet):
|
||||
"""For an auto-address subnet, add addrs for ports on the net."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
network_id = subnet['network_id']
|
||||
|
@ -470,7 +470,7 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||
ip_address=ip_address)
|
||||
return ip_address
|
||||
|
||||
def _allocate_subnet(self, context, network, subnet, subnetpool_id):
|
||||
def allocate_subnet(self, context, network, subnet, subnetpool_id):
|
||||
subnetpool = None
|
||||
if subnetpool_id:
|
||||
subnetpool = self._get_subnetpool(context, subnetpool_id)
|
||||
|
@ -486,9 +486,10 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||
subnet['allocation_pools'],
|
||||
subnet['cidr'],
|
||||
subnet['gateway_ip'])
|
||||
subnet_request = ipam.SubnetRequestFactory.get_request(context,
|
||||
subnet,
|
||||
subnetpool)
|
||||
|
||||
subnet_request = ipam_req.SubnetRequestFactory.get_request(context,
|
||||
subnet,
|
||||
subnetpool)
|
||||
|
||||
if subnetpool_id:
|
||||
driver = subnet_alloc.SubnetAllocator(subnetpool, context)
|
||||
|
|
|
@ -408,41 +408,6 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||
|
||||
return False
|
||||
|
||||
def get_snat_candidates(self, sync_router, l3_agents):
|
||||
"""Get the valid snat enabled l3 agents for the distributed router."""
|
||||
candidates = []
|
||||
is_router_distributed = sync_router.get('distributed', False)
|
||||
if not is_router_distributed:
|
||||
return candidates
|
||||
for l3_agent in l3_agents:
|
||||
if not l3_agent.admin_state_up:
|
||||
continue
|
||||
|
||||
agent_conf = self.get_configuration_dict(l3_agent)
|
||||
agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
|
||||
constants.L3_AGENT_MODE_LEGACY)
|
||||
if agent_mode != constants.L3_AGENT_MODE_DVR_SNAT:
|
||||
continue
|
||||
|
||||
router_id = agent_conf.get('router_id', None)
|
||||
use_namespaces = agent_conf.get('use_namespaces', True)
|
||||
if not use_namespaces and router_id != sync_router['id']:
|
||||
continue
|
||||
|
||||
handle_internal_only_routers = agent_conf.get(
|
||||
'handle_internal_only_routers', True)
|
||||
gateway_external_network_id = agent_conf.get(
|
||||
'gateway_external_network_id', None)
|
||||
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
|
||||
'network_id')
|
||||
if ((not ex_net_id and not handle_internal_only_routers) or
|
||||
(ex_net_id and gateway_external_network_id and
|
||||
ex_net_id != gateway_external_network_id)):
|
||||
continue
|
||||
|
||||
candidates.append(l3_agent)
|
||||
return candidates
|
||||
|
||||
def get_l3_agent_candidates(self, context, sync_router, l3_agents,
|
||||
ignore_admin_state=False):
|
||||
"""Get the valid l3 agents for the router from a list of l3_agents."""
|
||||
|
|
|
@ -513,6 +513,10 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase):
|
|||
port_id=port['id'],
|
||||
device_id=port['device_id'])
|
||||
|
||||
if not port['fixed_ips']:
|
||||
msg = _LE('Router port must have at least one fixed IP')
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
|
||||
# Only allow one router port with IPv6 subnets per network id
|
||||
if self._port_has_ipv6_address(port):
|
||||
for existing_port in (rp.port for rp in router.attached_ports):
|
||||
|
|
|
@ -299,6 +299,41 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
|||
CentralizedSnatL3AgentBinding.router_id.in_(router_ids))
|
||||
return query.all()
|
||||
|
||||
def get_snat_candidates(self, sync_router, l3_agents):
|
||||
"""Get the valid snat enabled l3 agents for the distributed router."""
|
||||
candidates = []
|
||||
is_router_distributed = sync_router.get('distributed', False)
|
||||
if not is_router_distributed:
|
||||
return candidates
|
||||
for l3_agent in l3_agents:
|
||||
if not l3_agent.admin_state_up:
|
||||
continue
|
||||
|
||||
agent_conf = self.get_configuration_dict(l3_agent)
|
||||
agent_mode = agent_conf.get(n_const.L3_AGENT_MODE,
|
||||
n_const.L3_AGENT_MODE_LEGACY)
|
||||
if agent_mode != n_const.L3_AGENT_MODE_DVR_SNAT:
|
||||
continue
|
||||
|
||||
router_id = agent_conf.get('router_id', None)
|
||||
use_namespaces = agent_conf.get('use_namespaces', True)
|
||||
if not use_namespaces and router_id != sync_router['id']:
|
||||
continue
|
||||
|
||||
handle_internal_only_routers = agent_conf.get(
|
||||
'handle_internal_only_routers', True)
|
||||
gateway_external_network_id = agent_conf.get(
|
||||
'gateway_external_network_id', None)
|
||||
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
|
||||
'network_id')
|
||||
if ((not ex_net_id and not handle_internal_only_routers) or
|
||||
(ex_net_id and gateway_external_network_id and
|
||||
ex_net_id != gateway_external_network_id)):
|
||||
continue
|
||||
|
||||
candidates.append(l3_agent)
|
||||
return candidates
|
||||
|
||||
def schedule_snat_router(self, context, router_id, sync_router):
|
||||
"""Schedule the snat router on l3 service agent."""
|
||||
active_l3_agents = self.get_l3_agents(context, active=True)
|
||||
|
|
|
@ -27,6 +27,7 @@ from neutron.callbacks import exceptions
|
|||
from neutron.callbacks import registry
|
||||
from neutron.callbacks import resources
|
||||
from neutron.common import constants
|
||||
from neutron.common import utils
|
||||
from neutron.db import api as db_api
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import model_base
|
||||
|
@ -735,3 +736,31 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
|
|||
port['port'][ext_sg.SECURITYGROUPS] != [])):
|
||||
return True
|
||||
return False
|
||||
|
||||
def update_security_group_on_port(self, context, id, port,
|
||||
original_port, updated_port):
|
||||
"""Update security groups on port.
|
||||
|
||||
This method returns a flag which indicates request notification
|
||||
is required and does not perform notification itself.
|
||||
It is because another changes for the port may require notification.
|
||||
"""
|
||||
need_notify = False
|
||||
port_updates = port['port']
|
||||
if (ext_sg.SECURITYGROUPS in port_updates and
|
||||
not utils.compare_elements(
|
||||
original_port.get(ext_sg.SECURITYGROUPS),
|
||||
port_updates[ext_sg.SECURITYGROUPS])):
|
||||
# delete the port binding and read it with the new rules
|
||||
port_updates[ext_sg.SECURITYGROUPS] = (
|
||||
self._get_security_groups_on_port(context, port))
|
||||
self._delete_port_security_group_bindings(context, id)
|
||||
self._process_port_create_security_group(
|
||||
context,
|
||||
updated_port,
|
||||
port_updates[ext_sg.SECURITYGROUPS])
|
||||
need_notify = True
|
||||
else:
|
||||
updated_port[ext_sg.SECURITYGROUPS] = (
|
||||
original_port[ext_sg.SECURITYGROUPS])
|
||||
return need_notify
|
||||
|
|
|
@ -91,34 +91,6 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin):
|
|||
self.notifier.security_groups_rule_updated(context,
|
||||
[rule['security_group_id']])
|
||||
|
||||
def update_security_group_on_port(self, context, id, port,
|
||||
original_port, updated_port):
|
||||
"""Update security groups on port.
|
||||
|
||||
This method returns a flag which indicates request notification
|
||||
is required and does not perform notification itself.
|
||||
It is because another changes for the port may require notification.
|
||||
"""
|
||||
need_notify = False
|
||||
port_updates = port['port']
|
||||
if (ext_sg.SECURITYGROUPS in port_updates and
|
||||
not utils.compare_elements(
|
||||
original_port.get(ext_sg.SECURITYGROUPS),
|
||||
port_updates[ext_sg.SECURITYGROUPS])):
|
||||
# delete the port binding and read it with the new rules
|
||||
port_updates[ext_sg.SECURITYGROUPS] = (
|
||||
self._get_security_groups_on_port(context, port))
|
||||
self._delete_port_security_group_bindings(context, id)
|
||||
self._process_port_create_security_group(
|
||||
context,
|
||||
updated_port,
|
||||
port_updates[ext_sg.SECURITYGROUPS])
|
||||
need_notify = True
|
||||
else:
|
||||
updated_port[ext_sg.SECURITYGROUPS] = (
|
||||
original_port[ext_sg.SECURITYGROUPS])
|
||||
return need_notify
|
||||
|
||||
def check_and_notify_security_group_member_changed(
|
||||
self, context, original_port, updated_port):
|
||||
sg_change = not utils.compare_elements(
|
||||
|
|
|
@ -1,289 +0,0 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import netaddr
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import constants
|
||||
from neutron.common import ipv6_utils
|
||||
from neutron.common import utils as common_utils
|
||||
from neutron.ipam import exceptions as ipam_exc
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class SubnetPool(object):
|
||||
"""Represents a pool of IPs available inside an address scope."""
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class SubnetRequest(object):
|
||||
"""Carries the data needed to make a subnet request
|
||||
|
||||
The data validated and carried by an instance of this class is the data
|
||||
that is common to any type of request. This class shouldn't be
|
||||
instantiated on its own. Rather, a subclass of this class should be used.
|
||||
"""
|
||||
def __init__(self, tenant_id, subnet_id,
|
||||
gateway_ip=None, allocation_pools=None):
|
||||
"""Initialize and validate
|
||||
|
||||
:param tenant_id: The tenant id who will own the subnet
|
||||
:type tenant_id: str uuid
|
||||
:param subnet_id: Neutron's subnet ID
|
||||
:type subnet_id: str uuid
|
||||
:param gateway_ip: An IP to reserve for the subnet gateway.
|
||||
:type gateway_ip: None or convertible to netaddr.IPAddress
|
||||
:param allocation_pools: The pool from which IPAM should allocate
|
||||
addresses. The allocator *may* allow allocating addresses outside
|
||||
of this range if specifically requested.
|
||||
:type allocation_pools: A list of netaddr.IPRange. None if not
|
||||
specified.
|
||||
"""
|
||||
self._tenant_id = tenant_id
|
||||
self._subnet_id = subnet_id
|
||||
self._gateway_ip = None
|
||||
self._allocation_pools = None
|
||||
|
||||
if gateway_ip is not None:
|
||||
self._gateway_ip = netaddr.IPAddress(gateway_ip)
|
||||
|
||||
if allocation_pools is not None:
|
||||
allocation_pools = sorted(allocation_pools)
|
||||
previous = None
|
||||
for pool in allocation_pools:
|
||||
if not isinstance(pool, netaddr.ip.IPRange):
|
||||
raise TypeError("Ranges must be netaddr.IPRange")
|
||||
if previous and pool.first <= previous.last:
|
||||
raise ValueError("Ranges must not overlap")
|
||||
previous = pool
|
||||
if 1 < len(allocation_pools):
|
||||
# Checks that all the ranges are in the same IP version.
|
||||
# IPRange sorts first by ip version so we can get by with just
|
||||
# checking the first and the last range having sorted them
|
||||
# above.
|
||||
first_version = allocation_pools[0].version
|
||||
last_version = allocation_pools[-1].version
|
||||
if first_version != last_version:
|
||||
raise ValueError("Ranges must be in the same IP version")
|
||||
self._allocation_pools = allocation_pools
|
||||
|
||||
if self.gateway_ip and self.allocation_pools:
|
||||
if self.gateway_ip.version != self.allocation_pools[0].version:
|
||||
raise ValueError("Gateway IP version inconsistent with "
|
||||
"allocation pool version")
|
||||
|
||||
@property
|
||||
def tenant_id(self):
|
||||
return self._tenant_id
|
||||
|
||||
@property
|
||||
def subnet_id(self):
|
||||
return self._subnet_id
|
||||
|
||||
@property
|
||||
def gateway_ip(self):
|
||||
return self._gateway_ip
|
||||
|
||||
@property
|
||||
def allocation_pools(self):
|
||||
return self._allocation_pools
|
||||
|
||||
def _validate_with_subnet(self, subnet_cidr):
|
||||
if self.gateway_ip and cfg.CONF.force_gateway_on_subnet:
|
||||
gw_ip = netaddr.IPAddress(self.gateway_ip)
|
||||
if (gw_ip.version == 4 or (gw_ip.version == 6
|
||||
and not gw_ip.is_link_local())):
|
||||
if self.gateway_ip not in subnet_cidr:
|
||||
raise ValueError("gateway_ip is not in the subnet")
|
||||
|
||||
if self.allocation_pools:
|
||||
if subnet_cidr.version != self.allocation_pools[0].version:
|
||||
raise ValueError("allocation_pools use the wrong ip version")
|
||||
for pool in self.allocation_pools:
|
||||
if pool not in subnet_cidr:
|
||||
raise ValueError("allocation_pools are not in the subnet")
|
||||
|
||||
|
||||
class AnySubnetRequest(SubnetRequest):
|
||||
"""A template for allocating an unspecified subnet from IPAM
|
||||
|
||||
A driver may not implement this type of request. For example, The initial
|
||||
reference implementation will not support this. The API has no way of
|
||||
creating a subnet without a specific address until subnet-allocation is
|
||||
implemented.
|
||||
"""
|
||||
WILDCARDS = {constants.IPv4: '0.0.0.0',
|
||||
constants.IPv6: '::'}
|
||||
|
||||
def __init__(self, tenant_id, subnet_id, version, prefixlen,
|
||||
gateway_ip=None, allocation_pools=None):
|
||||
"""
|
||||
:param version: Either constants.IPv4 or constants.IPv6
|
||||
:param prefixlen: The prefix len requested. Must be within the min and
|
||||
max allowed.
|
||||
:type prefixlen: int
|
||||
"""
|
||||
super(AnySubnetRequest, self).__init__(
|
||||
tenant_id=tenant_id,
|
||||
subnet_id=subnet_id,
|
||||
gateway_ip=gateway_ip,
|
||||
allocation_pools=allocation_pools)
|
||||
|
||||
net = netaddr.IPNetwork(self.WILDCARDS[version] + '/' + str(prefixlen))
|
||||
self._validate_with_subnet(net)
|
||||
|
||||
self._prefixlen = prefixlen
|
||||
|
||||
@property
|
||||
def prefixlen(self):
|
||||
return self._prefixlen
|
||||
|
||||
|
||||
class SpecificSubnetRequest(SubnetRequest):
|
||||
"""A template for allocating a specified subnet from IPAM
|
||||
|
||||
The initial reference implementation will probably just allow any
|
||||
allocation, even overlapping ones. This can be expanded on by future
|
||||
blueprints.
|
||||
"""
|
||||
def __init__(self, tenant_id, subnet_id, subnet_cidr,
|
||||
gateway_ip=None, allocation_pools=None):
|
||||
"""
|
||||
:param subnet: The subnet requested. Can be IPv4 or IPv6. However,
|
||||
when IPAM tries to fulfill this request, the IP version must match
|
||||
the version of the address scope being used.
|
||||
:type subnet: netaddr.IPNetwork or convertible to one
|
||||
"""
|
||||
super(SpecificSubnetRequest, self).__init__(
|
||||
tenant_id=tenant_id,
|
||||
subnet_id=subnet_id,
|
||||
gateway_ip=gateway_ip,
|
||||
allocation_pools=allocation_pools)
|
||||
|
||||
self._subnet_cidr = netaddr.IPNetwork(subnet_cidr)
|
||||
self._validate_with_subnet(self._subnet_cidr)
|
||||
|
||||
@property
|
||||
def subnet_cidr(self):
|
||||
return self._subnet_cidr
|
||||
|
||||
@property
|
||||
def prefixlen(self):
|
||||
return self._subnet_cidr.prefixlen
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class AddressRequest(object):
|
||||
"""Abstract base class for address requests"""
|
||||
|
||||
|
||||
class SpecificAddressRequest(AddressRequest):
|
||||
"""For requesting a specified address from IPAM"""
|
||||
def __init__(self, address):
|
||||
"""
|
||||
:param address: The address being requested
|
||||
:type address: A netaddr.IPAddress or convertible to one.
|
||||
"""
|
||||
super(SpecificAddressRequest, self).__init__()
|
||||
self._address = netaddr.IPAddress(address)
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return self._address
|
||||
|
||||
|
||||
class AnyAddressRequest(AddressRequest):
|
||||
"""Used to request any available address from the pool."""
|
||||
|
||||
|
||||
class AutomaticAddressRequest(SpecificAddressRequest):
|
||||
"""Used to create auto generated addresses, such as EUI64"""
|
||||
EUI64 = 'eui64'
|
||||
|
||||
def _generate_eui64_address(self, **kwargs):
|
||||
if set(kwargs) != set(['prefix', 'mac']):
|
||||
raise ipam_exc.AddressCalculationFailure(
|
||||
address_type='eui-64',
|
||||
reason='must provide exactly 2 arguments - cidr and MAC')
|
||||
prefix = kwargs['prefix']
|
||||
mac_address = kwargs['mac']
|
||||
return ipv6_utils.get_ipv6_addr_by_EUI64(prefix, mac_address)
|
||||
|
||||
_address_generators = {EUI64: _generate_eui64_address}
|
||||
|
||||
def __init__(self, address_type=EUI64, **kwargs):
|
||||
"""
|
||||
This constructor builds an automatic IP address. Parameter needed for
|
||||
generating it can be passed as optional keyword arguments.
|
||||
|
||||
:param address_type: the type of address to generate.
|
||||
It could be a eui-64 address, a random IPv6 address, or
|
||||
a ipv4 link-local address.
|
||||
For the Kilo release only eui-64 addresses will be supported.
|
||||
"""
|
||||
address_generator = self._address_generators.get(address_type)
|
||||
if not address_generator:
|
||||
raise ipam_exc.InvalidAddressType(address_type=address_type)
|
||||
address = address_generator(self, **kwargs)
|
||||
super(AutomaticAddressRequest, self).__init__(address)
|
||||
|
||||
|
||||
class RouterGatewayAddressRequest(AddressRequest):
|
||||
"""Used to request allocating the special router gateway address."""
|
||||
|
||||
|
||||
class AddressRequestFactory(object):
|
||||
"""Builds request using ip info
|
||||
|
||||
Additional parameters(port and context) are not used in default
|
||||
implementation, but planned to be used in sub-classes
|
||||
provided by specific ipam driver,
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_request(cls, context, port, ip):
|
||||
if not ip:
|
||||
return AnyAddressRequest()
|
||||
else:
|
||||
return SpecificAddressRequest(ip)
|
||||
|
||||
|
||||
class SubnetRequestFactory(object):
|
||||
"""Builds request using subnet info"""
|
||||
|
||||
@classmethod
|
||||
def get_request(cls, context, subnet, subnetpool):
|
||||
cidr = subnet.get('cidr')
|
||||
subnet_id = subnet.get('id', uuidutils.generate_uuid())
|
||||
is_any_subnetpool_request = not attributes.is_attr_set(cidr)
|
||||
|
||||
if is_any_subnetpool_request:
|
||||
prefixlen = subnet['prefixlen']
|
||||
if not attributes.is_attr_set(prefixlen):
|
||||
prefixlen = int(subnetpool['default_prefixlen'])
|
||||
|
||||
return AnySubnetRequest(
|
||||
subnet['tenant_id'],
|
||||
subnet_id,
|
||||
common_utils.ip_version_from_int(subnetpool['ip_version']),
|
||||
prefixlen)
|
||||
else:
|
||||
return SpecificSubnetRequest(subnet['tenant_id'],
|
||||
subnet_id,
|
||||
cidr,
|
||||
subnet.get('gateway_ip'),
|
||||
subnet.get('allocation_pools'))
|
|
@ -16,7 +16,7 @@ from oslo_config import cfg
|
|||
from oslo_log import log
|
||||
import six
|
||||
|
||||
from neutron import ipam
|
||||
from neutron.ipam import requests as ipam_req
|
||||
from neutron import manager
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
@ -101,14 +101,14 @@ class Pool(object):
|
|||
|
||||
Can be overridden on driver level to return custom factory
|
||||
"""
|
||||
return ipam.SubnetRequestFactory
|
||||
return ipam_req.SubnetRequestFactory
|
||||
|
||||
def get_address_request_factory(self):
|
||||
"""Returns default AddressRequestFactory
|
||||
|
||||
Can be overridden on driver level to return custom factory
|
||||
"""
|
||||
return ipam.AddressRequestFactory
|
||||
return ipam_req.AddressRequestFactory
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
|
|
|
@ -21,10 +21,10 @@ from neutron.common import exceptions as n_exc
|
|||
from neutron.common import ipv6_utils
|
||||
from neutron.db import api as db_api
|
||||
from neutron.i18n import _LE
|
||||
from neutron import ipam
|
||||
from neutron.ipam import driver as ipam_base
|
||||
from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api
|
||||
from neutron.ipam import exceptions as ipam_exc
|
||||
from neutron.ipam import requests as ipam_req
|
||||
from neutron.ipam import subnet_alloc
|
||||
from neutron.ipam import utils as ipam_utils
|
||||
from neutron import manager
|
||||
|
@ -319,7 +319,7 @@ class NeutronDbSubnet(ipam_base.Subnet):
|
|||
# NOTE(salv-orlando): It would probably better to have a simpler
|
||||
# model for address requests and just check whether there is a
|
||||
# specific IP address specified in address_request
|
||||
if isinstance(address_request, ipam.SpecificAddressRequest):
|
||||
if isinstance(address_request, ipam_req.SpecificAddressRequest):
|
||||
# This handles both specific and automatic address requests
|
||||
# Check availability of requested IP
|
||||
ip_address = str(address_request.address)
|
||||
|
@ -359,7 +359,7 @@ class NeutronDbSubnet(ipam_base.Subnet):
|
|||
|
||||
def get_details(self):
|
||||
"""Return subnet data as a SpecificSubnetRequest"""
|
||||
return ipam.SpecificSubnetRequest(
|
||||
return ipam_req.SpecificSubnetRequest(
|
||||
self._tenant_id, self.subnet_manager.neutron_id,
|
||||
self._cidr, self._gateway_ip, self._pools)
|
||||
|
||||
|
@ -404,7 +404,7 @@ class NeutronDbPool(subnet_alloc.SubnetAllocator):
|
|||
subnet_request = subnet.get_details()
|
||||
|
||||
# SubnetRequest must be an instance of SpecificSubnet
|
||||
if not isinstance(subnet_request, ipam.SpecificSubnetRequest):
|
||||
if not isinstance(subnet_request, ipam_req.SpecificSubnetRequest):
|
||||
raise ipam_exc.InvalidSubnetRequestType(
|
||||
subnet_type=type(subnet_request))
|
||||
return NeutronDbSubnet.create_from_subnet_request(subnet_request,
|
||||
|
|
|
@ -0,0 +1,289 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import netaddr
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import constants
|
||||
from neutron.common import ipv6_utils
|
||||
from neutron.common import utils as common_utils
|
||||
from neutron.ipam import exceptions as ipam_exc
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class SubnetPool(object):
|
||||
"""Represents a pool of IPs available inside an address scope."""
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class SubnetRequest(object):
|
||||
"""Carries the data needed to make a subnet request
|
||||
|
||||
The data validated and carried by an instance of this class is the data
|
||||
that is common to any type of request. This class shouldn't be
|
||||
instantiated on its own. Rather, a subclass of this class should be used.
|
||||
"""
|
||||
def __init__(self, tenant_id, subnet_id,
|
||||
gateway_ip=None, allocation_pools=None):
|
||||
"""Initialize and validate
|
||||
|
||||
:param tenant_id: The tenant id who will own the subnet
|
||||
:type tenant_id: str uuid
|
||||
:param subnet_id: Neutron's subnet ID
|
||||
:type subnet_id: str uuid
|
||||
:param gateway_ip: An IP to reserve for the subnet gateway.
|
||||
:type gateway_ip: None or convertible to netaddr.IPAddress
|
||||
:param allocation_pools: The pool from which IPAM should allocate
|
||||
addresses. The allocator *may* allow allocating addresses outside
|
||||
of this range if specifically requested.
|
||||
:type allocation_pools: A list of netaddr.IPRange. None if not
|
||||
specified.
|
||||
"""
|
||||
self._tenant_id = tenant_id
|
||||
self._subnet_id = subnet_id
|
||||
self._gateway_ip = None
|
||||
self._allocation_pools = None
|
||||
|
||||
if gateway_ip is not None:
|
||||
self._gateway_ip = netaddr.IPAddress(gateway_ip)
|
||||
|
||||
if allocation_pools is not None:
|
||||
allocation_pools = sorted(allocation_pools)
|
||||
previous = None
|
||||
for pool in allocation_pools:
|
||||
if not isinstance(pool, netaddr.ip.IPRange):
|
||||
raise TypeError("Ranges must be netaddr.IPRange")
|
||||
if previous and pool.first <= previous.last:
|
||||
raise ValueError("Ranges must not overlap")
|
||||
previous = pool
|
||||
if 1 < len(allocation_pools):
|
||||
# Checks that all the ranges are in the same IP version.
|
||||
# IPRange sorts first by ip version so we can get by with just
|
||||
# checking the first and the last range having sorted them
|
||||
# above.
|
||||
first_version = allocation_pools[0].version
|
||||
last_version = allocation_pools[-1].version
|
||||
if first_version != last_version:
|
||||
raise ValueError("Ranges must be in the same IP version")
|
||||
self._allocation_pools = allocation_pools
|
||||
|
||||
if self.gateway_ip and self.allocation_pools:
|
||||
if self.gateway_ip.version != self.allocation_pools[0].version:
|
||||
raise ValueError("Gateway IP version inconsistent with "
|
||||
"allocation pool version")
|
||||
|
||||
@property
|
||||
def tenant_id(self):
|
||||
return self._tenant_id
|
||||
|
||||
@property
|
||||
def subnet_id(self):
|
||||
return self._subnet_id
|
||||
|
||||
@property
|
||||
def gateway_ip(self):
|
||||
return self._gateway_ip
|
||||
|
||||
@property
|
||||
def allocation_pools(self):
|
||||
return self._allocation_pools
|
||||
|
||||
def _validate_with_subnet(self, subnet_cidr):
|
||||
if self.gateway_ip and cfg.CONF.force_gateway_on_subnet:
|
||||
gw_ip = netaddr.IPAddress(self.gateway_ip)
|
||||
if (gw_ip.version == 4 or (gw_ip.version == 6
|
||||
and not gw_ip.is_link_local())):
|
||||
if self.gateway_ip not in subnet_cidr:
|
||||
raise ValueError("gateway_ip is not in the subnet")
|
||||
|
||||
if self.allocation_pools:
|
||||
if subnet_cidr.version != self.allocation_pools[0].version:
|
||||
raise ValueError("allocation_pools use the wrong ip version")
|
||||
for pool in self.allocation_pools:
|
||||
if pool not in subnet_cidr:
|
||||
raise ValueError("allocation_pools are not in the subnet")
|
||||
|
||||
|
||||
class AnySubnetRequest(SubnetRequest):
|
||||
"""A template for allocating an unspecified subnet from IPAM
|
||||
|
||||
A driver may not implement this type of request. For example, The initial
|
||||
reference implementation will not support this. The API has no way of
|
||||
creating a subnet without a specific address until subnet-allocation is
|
||||
implemented.
|
||||
"""
|
||||
WILDCARDS = {constants.IPv4: '0.0.0.0',
|
||||
constants.IPv6: '::'}
|
||||
|
||||
def __init__(self, tenant_id, subnet_id, version, prefixlen,
|
||||
gateway_ip=None, allocation_pools=None):
|
||||
"""
|
||||
:param version: Either constants.IPv4 or constants.IPv6
|
||||
:param prefixlen: The prefix len requested. Must be within the min and
|
||||
max allowed.
|
||||
:type prefixlen: int
|
||||
"""
|
||||
super(AnySubnetRequest, self).__init__(
|
||||
tenant_id=tenant_id,
|
||||
subnet_id=subnet_id,
|
||||
gateway_ip=gateway_ip,
|
||||
allocation_pools=allocation_pools)
|
||||
|
||||
net = netaddr.IPNetwork(self.WILDCARDS[version] + '/' + str(prefixlen))
|
||||
self._validate_with_subnet(net)
|
||||
|
||||
self._prefixlen = prefixlen
|
||||
|
||||
@property
|
||||
def prefixlen(self):
|
||||
return self._prefixlen
|
||||
|
||||
|
||||
class SpecificSubnetRequest(SubnetRequest):
|
||||
"""A template for allocating a specified subnet from IPAM
|
||||
|
||||
The initial reference implementation will probably just allow any
|
||||
allocation, even overlapping ones. This can be expanded on by future
|
||||
blueprints.
|
||||
"""
|
||||
def __init__(self, tenant_id, subnet_id, subnet_cidr,
|
||||
gateway_ip=None, allocation_pools=None):
|
||||
"""
|
||||
:param subnet: The subnet requested. Can be IPv4 or IPv6. However,
|
||||
when IPAM tries to fulfill this request, the IP version must match
|
||||
the version of the address scope being used.
|
||||
:type subnet: netaddr.IPNetwork or convertible to one
|
||||
"""
|
||||
super(SpecificSubnetRequest, self).__init__(
|
||||
tenant_id=tenant_id,
|
||||
subnet_id=subnet_id,
|
||||
gateway_ip=gateway_ip,
|
||||
allocation_pools=allocation_pools)
|
||||
|
||||
self._subnet_cidr = netaddr.IPNetwork(subnet_cidr)
|
||||
self._validate_with_subnet(self._subnet_cidr)
|
||||
|
||||
@property
|
||||
def subnet_cidr(self):
|
||||
return self._subnet_cidr
|
||||
|
||||
@property
|
||||
def prefixlen(self):
|
||||
return self._subnet_cidr.prefixlen
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class AddressRequest(object):
|
||||
"""Abstract base class for address requests"""
|
||||
|
||||
|
||||
class SpecificAddressRequest(AddressRequest):
|
||||
"""For requesting a specified address from IPAM"""
|
||||
def __init__(self, address):
|
||||
"""
|
||||
:param address: The address being requested
|
||||
:type address: A netaddr.IPAddress or convertible to one.
|
||||
"""
|
||||
super(SpecificAddressRequest, self).__init__()
|
||||
self._address = netaddr.IPAddress(address)
|
||||
|
||||
@property
|
||||
def address(self):
|
||||
return self._address
|
||||
|
||||
|
||||
class AnyAddressRequest(AddressRequest):
|
||||
"""Used to request any available address from the pool."""
|
||||
|
||||
|
||||
class AutomaticAddressRequest(SpecificAddressRequest):
|
||||
"""Used to create auto generated addresses, such as EUI64"""
|
||||
EUI64 = 'eui64'
|
||||
|
||||
def _generate_eui64_address(self, **kwargs):
|
||||
if set(kwargs) != set(['prefix', 'mac']):
|
||||
raise ipam_exc.AddressCalculationFailure(
|
||||
address_type='eui-64',
|
||||
reason='must provide exactly 2 arguments - cidr and MAC')
|
||||
prefix = kwargs['prefix']
|
||||
mac_address = kwargs['mac']
|
||||
return ipv6_utils.get_ipv6_addr_by_EUI64(prefix, mac_address)
|
||||
|
||||
_address_generators = {EUI64: _generate_eui64_address}
|
||||
|
||||
def __init__(self, address_type=EUI64, **kwargs):
|
||||
"""
|
||||
This constructor builds an automatic IP address. Parameter needed for
|
||||
generating it can be passed as optional keyword arguments.
|
||||
|
||||
:param address_type: the type of address to generate.
|
||||
It could be a eui-64 address, a random IPv6 address, or
|
||||
a ipv4 link-local address.
|
||||
For the Kilo release only eui-64 addresses will be supported.
|
||||
"""
|
||||
address_generator = self._address_generators.get(address_type)
|
||||
if not address_generator:
|
||||
raise ipam_exc.InvalidAddressType(address_type=address_type)
|
||||
address = address_generator(self, **kwargs)
|
||||
super(AutomaticAddressRequest, self).__init__(address)
|
||||
|
||||
|
||||
class RouterGatewayAddressRequest(AddressRequest):
|
||||
"""Used to request allocating the special router gateway address."""
|
||||
|
||||
|
||||
class AddressRequestFactory(object):
|
||||
"""Builds request using ip info
|
||||
|
||||
Additional parameters(port and context) are not used in default
|
||||
implementation, but planned to be used in sub-classes
|
||||
provided by specific ipam driver,
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_request(cls, context, port, ip):
|
||||
if not ip:
|
||||
return AnyAddressRequest()
|
||||
else:
|
||||
return SpecificAddressRequest(ip)
|
||||
|
||||
|
||||
class SubnetRequestFactory(object):
|
||||
"""Builds request using subnet info"""
|
||||
|
||||
@classmethod
|
||||
def get_request(cls, context, subnet, subnetpool):
|
||||
cidr = subnet.get('cidr')
|
||||
subnet_id = subnet.get('id', uuidutils.generate_uuid())
|
||||
is_any_subnetpool_request = not attributes.is_attr_set(cidr)
|
||||
|
||||
if is_any_subnetpool_request:
|
||||
prefixlen = subnet['prefixlen']
|
||||
if not attributes.is_attr_set(prefixlen):
|
||||
prefixlen = int(subnetpool['default_prefixlen'])
|
||||
|
||||
return AnySubnetRequest(
|
||||
subnet['tenant_id'],
|
||||
subnet_id,
|
||||
common_utils.ip_version_from_int(subnetpool['ip_version']),
|
||||
prefixlen)
|
||||
else:
|
||||
return SpecificSubnetRequest(subnet['tenant_id'],
|
||||
subnet_id,
|
||||
cidr,
|
||||
subnet.get('gateway_ip'),
|
||||
subnet.get('allocation_pools'))
|
|
@ -23,8 +23,8 @@ from neutron.api.v2 import attributes
|
|||
from neutron.common import constants
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.db import models_v2
|
||||
import neutron.ipam as ipam
|
||||
from neutron.ipam import driver
|
||||
from neutron.ipam import requests as ipam_req
|
||||
from neutron.ipam import utils as ipam_utils
|
||||
|
||||
|
||||
|
@ -151,9 +151,9 @@ class SubnetAllocator(driver.Pool):
|
|||
prefixlen=request.prefixlen,
|
||||
min_prefixlen=min_prefixlen)
|
||||
|
||||
if isinstance(request, ipam.AnySubnetRequest):
|
||||
if isinstance(request, ipam_req.AnySubnetRequest):
|
||||
return self._allocate_any_subnet(request)
|
||||
elif isinstance(request, ipam.SpecificSubnetRequest):
|
||||
elif isinstance(request, ipam_req.SpecificSubnetRequest):
|
||||
return self._allocate_specific_subnet(request)
|
||||
else:
|
||||
msg = _("Unsupported request type")
|
||||
|
@ -177,7 +177,7 @@ class IpamSubnet(driver.Subnet):
|
|||
cidr,
|
||||
gateway_ip=None,
|
||||
allocation_pools=None):
|
||||
self._req = ipam.SpecificSubnetRequest(
|
||||
self._req = ipam_req.SpecificSubnetRequest(
|
||||
tenant_id,
|
||||
subnet_id,
|
||||
cidr,
|
||||
|
|
|
@ -8,11 +8,10 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: Neutron\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-05-29 06:16+0000\n"
|
||||
"PO-Revision-Date: 2015-05-28 20:54+0000\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: 2015-07-08 20:45+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language-Team: German (http://www.transifex.com/projects/p/neutron/language/"
|
||||
"de/)\n"
|
||||
"Language-Team: German (http://www.transifex.com/p/neutron/language/de/)\n"
|
||||
"Language: de\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
@ -72,26 +71,6 @@ msgstr "Versuch, ungefilterten Portfilter %r zu entfernen"
|
|||
msgid "Attempted to update port filter which is not filtered %s"
|
||||
msgstr "Versuch, ungefilterten Portfilter %s zu aktualisieren"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr "%s abgefangen. Vorgang wird beendet"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr "%s abgefangen, untergeordnete Elemente werden gestoppt"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr "Untergeordnetes Element %(pid)d durch Signal %(sig)d abgebrochen"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr "Untergeordnete %(pid)s mit Status %(code)d beendet"
|
||||
|
||||
#, python-format
|
||||
msgid "Child caught %s, exiting"
|
||||
msgstr "Untergeordnetes Element %s abgefangen; Vorgang wird beendet"
|
||||
|
||||
#, python-format
|
||||
msgid "Config paste file: %s"
|
||||
msgstr "Konfigurations-Paste-Datei: %s"
|
||||
|
@ -110,13 +89,6 @@ msgstr "Gerät %s nicht für Plug-in definiert"
|
|||
msgid "Disabled security-group extension."
|
||||
msgstr "Sicherheitsgruppenerweiterung wurde inaktiviert."
|
||||
|
||||
#, python-format
|
||||
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
|
||||
msgstr "Eventlet backdoor hört auf %(port)s für Prozess %(pid)d"
|
||||
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr "Verzweigung zu schnell; im Ruhemodus"
|
||||
|
||||
#, python-format
|
||||
msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
|
||||
msgstr "Ungültige IP-Adresse in Pool gefunden: %(start)s - %(end)s:"
|
||||
|
@ -180,10 +152,6 @@ msgstr "Kein %s-Plug-in geladen"
|
|||
msgid "OVS cleanup completed successfully"
|
||||
msgstr "OVS-Bereinigungsprozedur erfolgreich abgeschlossen"
|
||||
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr ""
|
||||
"Übergeordneter Prozess wurde unerwartet abgebrochen. Vorgang wird beendet"
|
||||
|
||||
#, python-format
|
||||
msgid "Port %(device)s updated. Details: %(details)s"
|
||||
msgstr "Port %(device)s aktualisiert. Details: %(details)s"
|
||||
|
@ -222,15 +190,6 @@ msgstr "Sicherheitsgruppenmitglied aktualisiert %r"
|
|||
msgid "Security group rule updated %r"
|
||||
msgstr "Sicherheitsgruppenregel aktualisiert %r"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because it is disabled"
|
||||
msgstr "Überspringe periodische Aufgabe %(task)s weil sie deaktiviert ist"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because its interval is negative"
|
||||
msgstr ""
|
||||
"Überspringe periodische Aufgabe %(task)s weil der Intervall negativ ist"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping port %s as no IP is configure on it"
|
||||
msgstr ""
|
||||
|
@ -245,14 +204,6 @@ msgid "Start IP (%(start)s) is greater than end IP (%(end)s)"
|
|||
msgstr ""
|
||||
"Anfangs-IP-Adresse (%(start)s) ist größer als Ende-IP-Adresse (%(end)s)"
|
||||
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr "Untergeordnetes Element %d gestartet"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr "Starten von %d Workers"
|
||||
|
||||
msgid "Synchronizing state"
|
||||
msgstr "Synchronisation von Status"
|
||||
|
||||
|
@ -264,10 +215,6 @@ msgstr ""
|
|||
"Überprüfung für CIDR: %(new_cidr)s fehlgeschlagen - Überschneidung mit "
|
||||
"Teilnetz %(subnet_id)s (CIDR: %(cidr)s)"
|
||||
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr "Warten auf Beenden von %d untergeordneten Elementen"
|
||||
|
||||
#, python-format
|
||||
msgid "agent_updated by server side %s!"
|
||||
msgstr "'agent_updated' (Agent aktualisiert) durch Serverseite %s!"
|
||||
|
|
|
@ -7,11 +7,10 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: Neutron\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-06-11 06:02+0000\n"
|
||||
"PO-Revision-Date: 2015-06-10 23:52+0000\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: 2015-07-08 20:45+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language-Team: Spanish (http://www.transifex.com/projects/p/neutron/language/"
|
||||
"es/)\n"
|
||||
"Language-Team: Spanish (http://www.transifex.com/p/neutron/language/es/)\n"
|
||||
"Language: es\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
@ -72,26 +71,6 @@ msgstr "Se ha intentado eliminar el filtro de puerto que no está filtrado %r"
|
|||
msgid "Attempted to update port filter which is not filtered %s"
|
||||
msgstr "Se ha intentado actualizar el filtro de puerto que no está filtrado %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr "Se ha captado %s, saliendo"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr "Se ha captado %s, deteniendo hijos"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr "Hijo %(pid)d matado por señal %(sig)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr "El hijo %(pid)s ha salido con el estado %(code)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child caught %s, exiting"
|
||||
msgstr "Hijo captado %s, saliendo"
|
||||
|
||||
#, python-format
|
||||
msgid "Config paste file: %s"
|
||||
msgstr "Archivo de configuración de pegar: %s"
|
||||
|
@ -110,13 +89,6 @@ msgstr "El dispositivo %s no está definido en el plug-in"
|
|||
msgid "Disabled security-group extension."
|
||||
msgstr "La extensión security-group se ha inhabilitado."
|
||||
|
||||
#, python-format
|
||||
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
|
||||
msgstr "Eventlet oculto escuchando en %(port)s para el proceso %(pid)d"
|
||||
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr "Bifurcación demasiado rápida, en reposo"
|
||||
|
||||
#, python-format
|
||||
msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
|
||||
msgstr ""
|
||||
|
@ -184,9 +156,6 @@ msgstr "No se ha cargado ningún plug-in de %s"
|
|||
msgid "OVS cleanup completed successfully"
|
||||
msgstr "La limpieza de OVS se ha completado satisfactoriamente"
|
||||
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr "El proceso padre se ha detenido inesperadamente, saliendo"
|
||||
|
||||
#, python-format
|
||||
msgid "Port %(device)s updated. Details: %(details)s"
|
||||
msgstr "Se ha actualizado el puerto %(device)s. Detalles: %(details)s"
|
||||
|
@ -225,14 +194,6 @@ msgstr "Se ha actualizado el miembro de grupo de seguridad %r"
|
|||
msgid "Security group rule updated %r"
|
||||
msgstr "Se ha actualizado la regla de grupo de seguridad %r"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because it is disabled"
|
||||
msgstr "Omitiendo la tarea periódica %(task)s porque está inhabilitada"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because its interval is negative"
|
||||
msgstr "Omitiendo la tarea periódica %(task)s porque el intervalo es negativo"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping port %s as no IP is configure on it"
|
||||
msgstr "Saltando el puerto %s, ya que no hay ninguna IP configurada en él"
|
||||
|
@ -246,14 +207,6 @@ msgid "Start IP (%(start)s) is greater than end IP (%(end)s)"
|
|||
msgstr ""
|
||||
"La IP de inicio (%(start)s) es mayor que la IP de finalización (%(end)s)"
|
||||
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr "Se ha iniciado el hijo %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr "Iniciando %d trabajadores"
|
||||
|
||||
msgid "Synchronizing state"
|
||||
msgstr "Sincronizando estado"
|
||||
|
||||
|
@ -265,13 +218,6 @@ msgstr ""
|
|||
"Se ha encontrado un error en validación para CIDR: %(new_cidr)s; se solapa "
|
||||
"con la subred %(subnet_id)s (CIDR: %(cidr)s)"
|
||||
|
||||
msgid "Wait called after thread killed. Cleaning up."
|
||||
msgstr "Esperar llamado después de cortar la línea. Limpiando."
|
||||
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr "En espera de %d hijos para salir"
|
||||
|
||||
#, python-format
|
||||
msgid "agent_updated by server side %s!"
|
||||
msgstr "agent_updated por el lado del servidor %s!"
|
||||
|
|
|
@ -9,11 +9,10 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: Neutron\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-06-02 06:15+0000\n"
|
||||
"PO-Revision-Date: 2015-06-01 16:41+0000\n"
|
||||
"Last-Translator: Maxime COQUEREL <max.coquerel@gmail.com>\n"
|
||||
"Language-Team: French (http://www.transifex.com/projects/p/neutron/language/"
|
||||
"fr/)\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: 2015-07-08 20:45+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language-Team: French (http://www.transifex.com/p/neutron/language/fr/)\n"
|
||||
"Language: fr\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
@ -115,26 +114,6 @@ msgstr ""
|
|||
"L'information de liaison pour le port %s n'a pas été trouvée, elle peut déjà "
|
||||
"avoir été effacée."
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr "%s interceptée, sortie"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr "%s interceptée, arrêt de l'enfant"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr "Enfant %(pid)d arrêté par le signal %(sig)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr "Processus fils %(pid)s terminé avec le status %(code)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child caught %s, exiting"
|
||||
msgstr "L'enfant a reçu %s, sortie"
|
||||
|
||||
#, python-format
|
||||
msgid "Cleaning bridge: %s"
|
||||
msgstr "Supprimer le pont: %s"
|
||||
|
@ -174,17 +153,10 @@ msgstr "Appareil avec adresse MAC %s non-défini dans le plugin"
|
|||
msgid "Disabled security-group extension."
|
||||
msgstr "Extension du groupe de sécurité désactivée."
|
||||
|
||||
#, python-format
|
||||
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
|
||||
msgstr "Eventlet backdoor en écoute sur le port %(port)s for process %(pid)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Exclude Devices: %s"
|
||||
msgstr "Equipements exclus: %s"
|
||||
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr "Bifurcation trop rapide, pause"
|
||||
|
||||
#, python-format
|
||||
msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
|
||||
msgstr "Adresse IP non valide trouvée dans le pool : %(start)s - %(end)s :"
|
||||
|
@ -280,9 +252,6 @@ msgstr "Aucun équipement avec MAC %s défini sur l'agent."
|
|||
msgid "OVS cleanup completed successfully"
|
||||
msgstr "Le nettoyage d'OVS s'est terminé avec succès."
|
||||
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr "Processus parent arrêté de manière inattendue, sortie"
|
||||
|
||||
#, python-format
|
||||
msgid "Physical Devices mappings: %s"
|
||||
msgstr "Mappages d'Équipements Physiques: %s"
|
||||
|
@ -354,14 +323,6 @@ msgstr "Règle de groupe de sécurité mise à jour %r"
|
|||
msgid "Service %s is supported by the core plugin"
|
||||
msgstr "Le service %s est supporté par le core plugin"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because it is disabled"
|
||||
msgstr "Tâche périodique %(task)s car elle est désactivée"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because its interval is negative"
|
||||
msgstr "Tâche périodique %(task)s ignorée car son intervalle est négatif"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping port %s as no IP is configure on it"
|
||||
msgstr "Ignorer le port %s car aucune adresse IP n'est configurée"
|
||||
|
@ -376,14 +337,6 @@ msgstr ""
|
|||
"L'adresse IP de début (%(start)s) est supérieure à l'adresse IP de fin "
|
||||
"(%(end)s)."
|
||||
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr "Enfant démarré %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr "Démarrage des travailleurs %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Subnet %s was deleted concurrently"
|
||||
msgstr "Le sous-réseau %s a été effacé en même temps"
|
||||
|
@ -402,13 +355,6 @@ msgstr ""
|
|||
"La validation du routage CIDR %(new_cidr)s a échoué : il chevauche le sous-"
|
||||
"réseau %(subnet_id)s (CIDR : %(cidr)s) "
|
||||
|
||||
msgid "Wait called after thread killed. Cleaning up."
|
||||
msgstr "Pause demandée après suppression de thread. Nettoyage."
|
||||
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr "En attente %d enfants pour sortie"
|
||||
|
||||
#, python-format
|
||||
msgid "agent_updated by server side %s!"
|
||||
msgstr "agent_updated au niveau du serveur %s !"
|
||||
|
|
|
@ -8,11 +8,10 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: Neutron\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-05-29 06:16+0000\n"
|
||||
"PO-Revision-Date: 2015-05-28 20:54+0000\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: 2015-07-08 20:45+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language-Team: Italian (http://www.transifex.com/projects/p/neutron/language/"
|
||||
"it/)\n"
|
||||
"Language-Team: Italian (http://www.transifex.com/p/neutron/language/it/)\n"
|
||||
"Language: it\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
@ -71,26 +70,6 @@ msgstr "Tentativo di rimuovere il filtro della porta che non è filtrata %r"
|
|||
msgid "Attempted to update port filter which is not filtered %s"
|
||||
msgstr "Tentativo di aggiornare il filtro della porta che non è filtrata %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr "Rilevato %s, esistente"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr "Intercettato %s, arresto in corso dei children"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr "Child %(pid)d interrotto dal segnale %(sig)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr "Child %(pid)s terminato con stato %(code)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child caught %s, exiting"
|
||||
msgstr "Cogliere Child %s, uscendo"
|
||||
|
||||
#, python-format
|
||||
msgid "Config paste file: %s"
|
||||
msgstr "Configurazione file paste: %s"
|
||||
|
@ -109,13 +88,6 @@ msgstr "Unità %s non definita nel plugin"
|
|||
msgid "Disabled security-group extension."
|
||||
msgstr "Estensione di security-group disabilitata."
|
||||
|
||||
#, python-format
|
||||
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
|
||||
msgstr "Ascolto di eventlet backdoor su %(port)s per il processo %(pid)d"
|
||||
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr "Sblocco troppo veloce, attendere"
|
||||
|
||||
#, python-format
|
||||
msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
|
||||
msgstr "Trovato un indirizzo IP invalido nel pool: %(start)s - %(end)s:"
|
||||
|
@ -177,9 +149,6 @@ msgstr "Nessun plugin %s caricato"
|
|||
msgid "OVS cleanup completed successfully"
|
||||
msgstr "Ripulitura di OVS completata correttamente"
|
||||
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr "Il processo principale è stato interrotto inaspettatamente, uscire"
|
||||
|
||||
#, python-format
|
||||
msgid "Port %(device)s updated. Details: %(details)s"
|
||||
msgstr "Porta %(device)s aggiornata. Dettagli: %(details)s"
|
||||
|
@ -218,15 +187,6 @@ msgstr "Membro gruppo di sicurezza aggiornato %r"
|
|||
msgid "Security group rule updated %r"
|
||||
msgstr "Regola gruppo di sicurezza aggiornata %r"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because it is disabled"
|
||||
msgstr "Abbadono dell'attività periodica %(task)s perché è disabilitata"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because its interval is negative"
|
||||
msgstr ""
|
||||
"Abbadono dell'attività periodica %(task)s perché l'intervalo è negativo"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping port %s as no IP is configure on it"
|
||||
msgstr "La porta %s viene ignorata in quanto non ha nessun IP configurato"
|
||||
|
@ -240,14 +200,6 @@ msgstr ""
|
|||
msgid "Start IP (%(start)s) is greater than end IP (%(end)s)"
|
||||
msgstr "L'IP iniziale (%(start)s) è superiore all'IP finale (%(end)s)"
|
||||
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr "Child avviato %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr "Avvio %d operatori"
|
||||
|
||||
msgid "Synchronizing state"
|
||||
msgstr "Stato sincronizzazione"
|
||||
|
||||
|
@ -259,10 +211,6 @@ msgstr ""
|
|||
"Convalida per CIDR: %(new_cidr)s non riuscita - si sovrappone con la "
|
||||
"sottorete %(subnet_id)s (CIDR: %(cidr)s)"
|
||||
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr "In attesa %d degli elementi secondari per uscire"
|
||||
|
||||
#, python-format
|
||||
msgid "agent_updated by server side %s!"
|
||||
msgstr "agent_updated dal lato server %s!"
|
||||
|
|
|
@ -8,11 +8,10 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: Neutron\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-05-29 06:16+0000\n"
|
||||
"PO-Revision-Date: 2015-05-28 20:54+0000\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: 2015-07-08 20:45+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language-Team: Japanese (http://www.transifex.com/projects/p/neutron/"
|
||||
"language/ja/)\n"
|
||||
"Language-Team: Japanese (http://www.transifex.com/p/neutron/language/ja/)\n"
|
||||
"Language: ja\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
@ -73,22 +72,6 @@ msgstr "フィルター処理されていないポート・フィルター %r
|
|||
msgid "Attempted to update port filter which is not filtered %s"
|
||||
msgstr "フィルター処理されていないポート・フィルター %s を更新しようとしました"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr "%s が見つかりました。終了しています"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr "%s が見つかりました。子を停止しています"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr "子 %(pid)d がシグナル %(sig)d によって強制終了されました"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr "子 %(pid)s が状況 %(code)d で終了しました"
|
||||
|
||||
#, python-format
|
||||
msgid "Config paste file: %s"
|
||||
msgstr "構成貼り付けファイル: %s"
|
||||
|
@ -107,13 +90,6 @@ msgstr "デバイス %s がプラグインで定義されていません"
|
|||
msgid "Disabled security-group extension."
|
||||
msgstr "security-group 拡張を無効にしました。"
|
||||
|
||||
#, python-format
|
||||
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
|
||||
msgstr "Eventlet backdoorは、プロセス%(pid)dの%(port)sをリスニングしています。"
|
||||
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr "fork が早すぎます。スリープ状態にしています"
|
||||
|
||||
#, python-format
|
||||
msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
|
||||
msgstr "プールで無効な IP アドレスが見つかりました: %(start)s から %(end)s:"
|
||||
|
@ -181,9 +157,6 @@ msgstr "%s プラグインはロードされませんでした"
|
|||
msgid "OVS cleanup completed successfully"
|
||||
msgstr "OVS のクリーンアップが正常に完了しました"
|
||||
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr "親プロセスが予期せずに停止しました。終了しています"
|
||||
|
||||
#, python-format
|
||||
msgid "Port %(device)s updated. Details: %(details)s"
|
||||
msgstr "ポート %(device)s が更新されました。詳細: %(details)s"
|
||||
|
@ -222,14 +195,6 @@ msgstr "セキュリティー・グループ・メンバーが %r を更新し
|
|||
msgid "Security group rule updated %r"
|
||||
msgstr "セキュリティー・グループ・ルールが %r を更新しました"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because it is disabled"
|
||||
msgstr "タスクが使用不可であるため、定期タスク %(task)s をスキップしています"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because its interval is negative"
|
||||
msgstr "タスクの間隔が負であるため、定期タスク %(task)s をスキップしています"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping port %s as no IP is configure on it"
|
||||
msgstr "ポート %s には IP が構成されていないため、このポートをスキップします"
|
||||
|
@ -241,14 +206,6 @@ msgstr "指定された IP アドレスが、サブネット IP バージョン
|
|||
msgid "Start IP (%(start)s) is greater than end IP (%(end)s)"
|
||||
msgstr "開始 IP (%(start)s) が終了 IP (%(end)s) より大きくなっています"
|
||||
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr "子 %d を開始しました"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr "%d ワーカーを開始しています"
|
||||
|
||||
msgid "Synchronizing state"
|
||||
msgstr "状態の同期中"
|
||||
|
||||
|
@ -260,10 +217,6 @@ msgstr ""
|
|||
"CIDR %(new_cidr)s の検証が失敗しました。サブネット %(subnet_id)s (CIDR: "
|
||||
"%(cidr)s) とオーバーラップしています"
|
||||
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr "%d 個の子で終了を待機しています"
|
||||
|
||||
#, python-format
|
||||
msgid "agent_updated by server side %s!"
|
||||
msgstr "サーバー・サイド %s による agent_updated!"
|
||||
|
|
|
@ -7,11 +7,11 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: Neutron\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-05-29 06:16+0000\n"
|
||||
"PO-Revision-Date: 2015-05-28 20:54+0000\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: 2015-07-08 20:45+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/neutron/"
|
||||
"language/ko_KR/)\n"
|
||||
"Language-Team: Korean (Korea) (http://www.transifex.com/p/neutron/language/"
|
||||
"ko_KR/)\n"
|
||||
"Language: ko_KR\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
@ -70,26 +70,6 @@ msgstr "필터링된 %r이(가) 아닌 포트 필터를 제거하려고 시도
|
|||
msgid "Attempted to update port filter which is not filtered %s"
|
||||
msgstr "필터링된 %s이(가) 아닌 포트 필터를 업데이트하려고 시도함"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr "%s 발견, 종료 중"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr "%s 발견, 하위 중지 중"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr "%(pid)d 하위가 %(sig)d 신호에 의해 강제 종료됨"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr "%(pid)s 하위가 %(code)d 상태와 함께 종료했음"
|
||||
|
||||
#, python-format
|
||||
msgid "Child caught %s, exiting"
|
||||
msgstr "자식으로 된 %s가 존재함."
|
||||
|
||||
#, python-format
|
||||
msgid "Config paste file: %s"
|
||||
msgstr "구성 붙여넣기 파일: %s"
|
||||
|
@ -116,13 +96,6 @@ msgstr "%s 디바이스가 플러그인에서 정의되지 않음"
|
|||
msgid "Disabled security-group extension."
|
||||
msgstr "보안 그룹 확장을 사용하지 않습니다. "
|
||||
|
||||
#, python-format
|
||||
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
|
||||
msgstr "Eventlet 백도어는 프로세스 %(pid)d 일 동안 %(port)s에서 수신"
|
||||
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr "포크가 너무 빠름. 정지 중"
|
||||
|
||||
#, python-format
|
||||
msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
|
||||
msgstr "풀에서 올바르지 않은 IP 주소 발견: %(start)s - %(end)s:"
|
||||
|
@ -198,9 +171,6 @@ msgstr "로드된 %s 플러그인이 없음"
|
|||
msgid "OVS cleanup completed successfully"
|
||||
msgstr "OVS 정리가 완료됨"
|
||||
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr "상위 프로세스가 예기치 않게 정지했습니다. 종료 중"
|
||||
|
||||
#, python-format
|
||||
msgid "Port %(device)s updated. Details: %(details)s"
|
||||
msgstr "%(device)s 포트가 업데이트되었습니다. 세부사항: %(details)s"
|
||||
|
@ -243,14 +213,6 @@ msgstr "보안 그룹 멤버가 %r을(를) 업데이트함"
|
|||
msgid "Security group rule updated %r"
|
||||
msgstr "보안 그룹 규칙이 %r을(를) 업데이트함"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because it is disabled"
|
||||
msgstr "사용 안하기 때문에 주기적 태스크 %(task)s을(를) 건너뜀"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because its interval is negative"
|
||||
msgstr "간격이 음수이기 때문에 주기적 태스크 %(task)s을(를) 건너뜀"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping port %s as no IP is configure on it"
|
||||
msgstr "구성된 IP가 없어서 포트 %s을(를) 건너뜀"
|
||||
|
@ -262,14 +224,6 @@ msgstr "지정된 IP 주소가 서브넷 IP 버전과 일치하지 않음"
|
|||
msgid "Start IP (%(start)s) is greater than end IP (%(end)s)"
|
||||
msgstr "시작 IP(%(start)s)가 끝 IP(%(end)s)보다 큼"
|
||||
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr "%d 하위를 시작했음"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr "%d 작업자 시작 중"
|
||||
|
||||
msgid "Synchronizing state"
|
||||
msgstr "상태 동기화 중"
|
||||
|
||||
|
@ -285,10 +239,6 @@ msgstr ""
|
|||
"CIDR %(new_cidr)s 유효성 검증 실패 - 서브넷 %(subnet_id)s(CIDR: %(cidr)s)과"
|
||||
"(와) 겹침"
|
||||
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr "%d 하위에서 종료하기를 대기 중임"
|
||||
|
||||
#, python-format
|
||||
msgid "agent_updated by server side %s!"
|
||||
msgstr "서버측 %s!에 의한 agent_updated"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -6,9 +6,9 @@
|
|||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: neutron 2015.2.0.dev464\n"
|
||||
"Project-Id-Version: neutron 7.0.0.0b2.dev192\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-06-11 06:02+0000\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
|
@ -17,17 +17,17 @@ msgstr ""
|
|||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
|
||||
#: neutron/manager.py:116
|
||||
#: neutron/manager.py:117
|
||||
#, python-format
|
||||
msgid "Loading core plugin: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/manager.py:164
|
||||
#: neutron/manager.py:165
|
||||
#, python-format
|
||||
msgid "Service %s is supported by the core plugin"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/manager.py:182
|
||||
#: neutron/manager.py:183
|
||||
#, python-format
|
||||
msgid "Loading Plugin: %s"
|
||||
msgstr ""
|
||||
|
@ -43,27 +43,27 @@ msgstr ""
|
|||
msgid "Loaded quota_driver: %s."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/service.py:181
|
||||
#: neutron/service.py:191
|
||||
#, python-format
|
||||
msgid "Neutron service started, listening on %(host)s:%(port)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/wsgi.py:792
|
||||
#: neutron/wsgi.py:793
|
||||
#, python-format
|
||||
msgid "%(method)s %(url)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/wsgi.py:809
|
||||
#: neutron/wsgi.py:810
|
||||
#, python-format
|
||||
msgid "HTTP exception thrown: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/wsgi.py:825
|
||||
#: neutron/wsgi.py:826
|
||||
#, python-format
|
||||
msgid "%(url)s returned with HTTP %(status)d"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/wsgi.py:828
|
||||
#: neutron/wsgi.py:829
|
||||
#, python-format
|
||||
msgid "%(url)s returned a fault: %(exception)s"
|
||||
msgstr ""
|
||||
|
@ -115,30 +115,30 @@ msgstr ""
|
|||
msgid "No ports here to refresh firewall"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/common/ovs_lib.py:423
|
||||
#: neutron/agent/common/ovs_lib.py:423 neutron/agent/common/ovs_lib.py:456
|
||||
#, python-format
|
||||
msgid "Port %(port_id)s not present in bridge %(br_name)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:585
|
||||
#: neutron/agent/dhcp/agent.py:96 neutron/agent/dhcp/agent.py:589
|
||||
msgid "DHCP agent started"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/dhcp/agent.py:149
|
||||
#: neutron/agent/dhcp/agent.py:152
|
||||
msgid "Synchronizing state"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/dhcp/agent.py:170
|
||||
#: neutron/agent/dhcp/agent.py:173
|
||||
msgid "Synchronizing state complete"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/dhcp/agent.py:582 neutron/agent/l3/agent.py:641
|
||||
#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:654
|
||||
#: neutron/services/metering/agents/metering_agent.py:286
|
||||
#, python-format
|
||||
msgid "agent_updated by server side %s!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/l3/agent.py:563 neutron/agent/l3/agent.py:631
|
||||
#: neutron/agent/l3/agent.py:575 neutron/agent/l3/agent.py:644
|
||||
msgid "L3 agent started"
|
||||
msgstr ""
|
||||
|
||||
|
@ -159,38 +159,38 @@ msgstr ""
|
|||
msgid "Process runs with uid/gid: %(uid)s/%(gid)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/linux/dhcp.py:749
|
||||
#: neutron/agent/linux/dhcp.py:793
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is "
|
||||
"not in port's address IP versions"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/linux/interface.py:164
|
||||
#: neutron/agent/linux/interface.py:192
|
||||
#, python-format
|
||||
msgid "Device %s already exists"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/linux/iptables_firewall.py:115
|
||||
#: neutron/agent/linux/iptables_firewall.py:142
|
||||
#, python-format
|
||||
msgid "Attempted to update port filter which is not filtered %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/linux/iptables_firewall.py:126
|
||||
#: neutron/agent/linux/iptables_firewall.py:153
|
||||
#, python-format
|
||||
msgid "Attempted to remove port filter which is not filtered %r"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/api/extensions.py:404
|
||||
#: neutron/api/extensions.py:381
|
||||
msgid "Initializing extension manager."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/api/extensions.py:562
|
||||
#: neutron/api/extensions.py:539
|
||||
#, python-format
|
||||
msgid "Loaded extension: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/api/v2/base.py:94
|
||||
#: neutron/api/v2/base.py:96
|
||||
msgid "Allow sorting is enabled because native pagination requires native sorting"
|
||||
msgstr ""
|
||||
|
||||
|
@ -199,6 +199,25 @@ msgstr ""
|
|||
msgid "%(action)s failed (client error): %(exc)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/cmd/ipset_cleanup.py:60
|
||||
#, python-format
|
||||
msgid "Removing iptables rule for IPset: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/cmd/ipset_cleanup.py:79
|
||||
#, python-format
|
||||
msgid "Destroying IPset: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/cmd/ipset_cleanup.py:89
|
||||
#, python-format
|
||||
msgid "Destroying IPsets with prefix: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/cmd/ipset_cleanup.py:97
|
||||
msgid "IPset cleanup completed successfully"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/cmd/ovs_cleanup.py:73
|
||||
#, python-format
|
||||
msgid "Deleting port: %s"
|
||||
|
@ -215,9 +234,9 @@ msgstr ""
|
|||
|
||||
#: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43
|
||||
#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:262
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1057
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1594
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1067
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:346
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1607
|
||||
msgid "Agent initialized successfully, now running... "
|
||||
msgstr ""
|
||||
|
||||
|
@ -239,6 +258,13 @@ msgstr ""
|
|||
msgid "IPv6 is not enabled on this system."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/agents_db.py:209
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Heartbeat received from %(type)s agent on host %(host)s, uuid %(uuid)s "
|
||||
"after %(delta)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/agentschedulers_db.py:165
|
||||
msgid ""
|
||||
"Skipping periodic DHCP agent status check because automatic network "
|
||||
|
@ -262,40 +288,40 @@ msgstr ""
|
|||
msgid "Adding network %(net)s to agent %(agent)s on host %(host)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/db_base_plugin_v2.py:201
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet "
|
||||
"%(subnet_id)s (CIDR: %(cidr)s)"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/db_base_plugin_v2.py:959 neutron/plugins/ml2/plugin.py:891
|
||||
#: neutron/db/db_base_plugin_v2.py:625 neutron/plugins/ml2/plugin.py:894
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Found port (%(port_id)s, %(ip)s) having IP allocation on subnet "
|
||||
"%(subnet)s, cannot delete"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/ipam_backend_mixin.py:103
|
||||
#: neutron/db/ipam_backend_mixin.py:208
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet "
|
||||
"%(subnet_id)s (CIDR: %(cidr)s)"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/ipam_backend_mixin.py:246
|
||||
#, python-format
|
||||
msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/ipam_backend_mixin.py:110
|
||||
#: neutron/db/ipam_backend_mixin.py:253
|
||||
msgid "Specified IP addresses do not match the subnet IP version"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/ipam_backend_mixin.py:114
|
||||
#: neutron/db/ipam_backend_mixin.py:257
|
||||
#, python-format
|
||||
msgid "Start IP (%(start)s) is greater than end IP (%(end)s)"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/ipam_backend_mixin.py:119
|
||||
#: neutron/db/ipam_backend_mixin.py:262
|
||||
#, python-format
|
||||
msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/ipam_backend_mixin.py:143
|
||||
#: neutron/db/ipam_backend_mixin.py:286
|
||||
#, python-format
|
||||
msgid "Found overlapping ranges: %(l_range)s and %(r_range)s"
|
||||
msgstr ""
|
||||
|
@ -326,7 +352,7 @@ msgstr ""
|
|||
msgid "SNAT interface port list does not exist, so create one: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/l3_dvrscheduler_db.py:315
|
||||
#: neutron/db/l3_dvrscheduler_db.py:350
|
||||
msgid "SNAT already bound to a service node."
|
||||
msgstr ""
|
||||
|
||||
|
@ -344,17 +370,12 @@ msgid ""
|
|||
"available: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/migration/alembic_migrations/heal_script.py:222
|
||||
#, python-format
|
||||
msgid "Table %(old_t)r was renamed to %(new_t)r"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/debug/commands.py:107
|
||||
#, python-format
|
||||
msgid "%d probe(s) deleted"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/extensions/vlantransparent.py:45
|
||||
#: neutron/extensions/vlantransparent.py:46
|
||||
msgid "Disabled vlantransparent extension."
|
||||
msgstr ""
|
||||
|
||||
|
@ -363,74 +384,7 @@ msgstr ""
|
|||
msgid "Nova event response: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/eventlet_backdoor.py:146
|
||||
#, python-format
|
||||
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/periodic_task.py:120
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because its interval is negative"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/periodic_task.py:125
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because it is disabled"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:173
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:239
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:270
|
||||
#, python-format
|
||||
msgid "Child caught %s, exiting"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:309
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:328
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:338
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:355
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:359
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:398
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:413
|
||||
msgid "Wait called after thread killed. Cleaning up."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:429
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/brocade/NeutronPlugin.py:307
|
||||
#: neutron/plugins/brocade/NeutronPlugin.py:306
|
||||
#, python-format
|
||||
msgid "Allocated vlan (%d) from the pool"
|
||||
msgstr ""
|
||||
|
@ -544,59 +498,6 @@ msgstr ""
|
|||
msgid "Controller IPs: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:796
|
||||
msgid "Stopping linuxbridge agent."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:826
|
||||
#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99
|
||||
#, python-format
|
||||
msgid "RPC agent_id: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209
|
||||
#, python-format
|
||||
msgid "Port %(device)s updated. Details: %(details)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:929
|
||||
#, python-format
|
||||
msgid "Device %s not defined on plugin"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:936
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1257
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1274
|
||||
#, python-format
|
||||
msgid "Attachment %s removed"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:948
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1286
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235
|
||||
#, python-format
|
||||
msgid "Port %s updated."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1001
|
||||
msgid "LinuxBridge Agent RPC Daemon Started!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1011
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1477
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251
|
||||
msgid "Agent out of sync with plugin!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1050
|
||||
#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43
|
||||
#, python-format
|
||||
msgid "Interface mappings: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/db.py:60
|
||||
#, python-format
|
||||
msgid "Added segment %(id)s of type %(network_type)s for network %(network_id)s"
|
||||
|
@ -687,26 +588,26 @@ msgstr ""
|
|||
msgid "Extended port dict for driver '%(drv)s'"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/plugin.py:139
|
||||
#: neutron/plugins/ml2/plugin.py:142
|
||||
msgid "Modular L2 Plugin initialization complete"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/plugin.py:290
|
||||
#: neutron/plugins/ml2/plugin.py:293
|
||||
#, python-format
|
||||
msgid "Attempt %(count)s to bind port %(port)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/plugin.py:692
|
||||
#: neutron/plugins/ml2/plugin.py:695
|
||||
#, python-format
|
||||
msgid "Port %s was deleted concurrently"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/plugin.py:704
|
||||
#: neutron/plugins/ml2/plugin.py:707
|
||||
#, python-format
|
||||
msgid "Subnet %s was deleted concurrently"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/plugin.py:1367
|
||||
#: neutron/plugins/ml2/plugin.py:1370
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Binding info for port %s was not found, it might have been deleted "
|
||||
|
@ -744,32 +645,32 @@ msgstr ""
|
|||
msgid "VlanTypeDriver initialization complete"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:104
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:112
|
||||
#, python-format
|
||||
msgid "Network %s is not created as it is not found in Arista DB"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:117
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:125
|
||||
#, python-format
|
||||
msgid "Network name changed to %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:147
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:157
|
||||
#, python-format
|
||||
msgid "Network %s is not updated as it is not found in Arista DB"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:243
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:266
|
||||
#, python-format
|
||||
msgid "VM %s is not created as it is not found in Arista DB"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:257
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:280
|
||||
#, python-format
|
||||
msgid "Port name changed to %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:310
|
||||
#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:354
|
||||
#, python-format
|
||||
msgid "VM %s is not updated as it is not found in Arista DB"
|
||||
msgstr ""
|
||||
|
@ -796,10 +697,169 @@ msgstr ""
|
|||
msgid "Initializing CRD client... "
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py:32
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:781
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Skipping ARP spoofing rules for port '%s' because it has port security "
|
||||
"disabled"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py:83
|
||||
#, python-format
|
||||
msgid "Clearing orphaned ARP spoofing entries for devices %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:798
|
||||
msgid "Stopping linuxbridge agent."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:828
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:100
|
||||
#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89
|
||||
#, python-format
|
||||
msgid "RPC agent_id: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:895
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:210
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1223
|
||||
#, python-format
|
||||
msgid "Port %(device)s updated. Details: %(details)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:933
|
||||
#, python-format
|
||||
msgid "Device %s not defined on plugin"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:940
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1270
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1287
|
||||
#, python-format
|
||||
msgid "Attachment %s removed"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:952
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:236
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1299
|
||||
#, python-format
|
||||
msgid "Port %s updated."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1010
|
||||
msgid "LinuxBridge Agent RPC Daemon Started!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1020
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:252
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1490
|
||||
msgid "Agent out of sync with plugin!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:1060
|
||||
#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43
|
||||
#, python-format
|
||||
msgid "Interface mappings: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:192
|
||||
#, python-format
|
||||
msgid "No device with MAC %s defined on agent."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:217
|
||||
#, python-format
|
||||
msgid "Device with MAC %s not defined on plugin"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:224
|
||||
#, python-format
|
||||
msgid "Removing device with mac_address %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:245
|
||||
msgid "SRIOV NIC Agent RPC Daemon Started!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:334
|
||||
#, python-format
|
||||
msgid "Physical Devices mappings: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py:335
|
||||
#, python-format
|
||||
msgid "Exclude Devices: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:54
|
||||
msgid "Agent initialised successfully, now running... "
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:205
|
||||
#, python-format
|
||||
msgid "L2 Agent operating in DVR Mode with MAC %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:588
|
||||
#, python-format
|
||||
msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:652
|
||||
#, python-format
|
||||
msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:774
|
||||
#, python-format
|
||||
msgid "Configuration for device %s completed."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:813
|
||||
#, python-format
|
||||
msgid "port_unbound(): net_uuid %s not in local_vlan_map"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:879
|
||||
#, python-format
|
||||
msgid "Adding %s to list of bridges."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:957
|
||||
#, python-format
|
||||
msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1113
|
||||
#, python-format
|
||||
msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1217
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Port %s was not found on the integration bridge and will therefore not be"
|
||||
" processed"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1258
|
||||
#, python-format
|
||||
msgid "Ancillary Port %s added"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1518
|
||||
msgid "Agent tunnel out of sync with plugin!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1617
|
||||
msgid "Agent caught SIGTERM, quitting daemon loop."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1623
|
||||
msgid "Agent caught SIGHUP, resetting."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/extensions/port_security.py:33
|
||||
msgid "PortSecurityExtensionDriver initialization complete"
|
||||
msgstr ""
|
||||
|
@ -808,106 +868,6 @@ msgstr ""
|
|||
msgid "NVSD Agent initialized successfully, now running... "
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:205
|
||||
#, python-format
|
||||
msgid "L2 Agent operating in DVR Mode with MAC %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:582
|
||||
#, python-format
|
||||
msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:646
|
||||
#, python-format
|
||||
msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:765
|
||||
#, python-format
|
||||
msgid "Configuration for device %s completed."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:772
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Skipping ARP spoofing rules for port '%s' because it has port security "
|
||||
"disabled"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:800
|
||||
#, python-format
|
||||
msgid "port_unbound(): net_uuid %s not in local_vlan_map"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:866
|
||||
#, python-format
|
||||
msgid "Adding %s to list of bridges."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:942
|
||||
#, python-format
|
||||
msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1090
|
||||
#, python-format
|
||||
msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1204
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Port %s was not found on the integration bridge and will therefore not be"
|
||||
" processed"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1245
|
||||
#, python-format
|
||||
msgid "Ancillary Port %s added"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1505
|
||||
msgid "Agent tunnel out of sync with plugin!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1604
|
||||
msgid "Agent caught SIGTERM, quitting daemon loop."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1610
|
||||
msgid "Agent caught SIGHUP, resetting."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191
|
||||
#, python-format
|
||||
msgid "No device with MAC %s defined on agent."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:216
|
||||
#, python-format
|
||||
msgid "Device with MAC %s not defined on plugin"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:223
|
||||
#, python-format
|
||||
msgid "Removing device with mac_address %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:244
|
||||
msgid "SRIOV NIC Agent RPC Daemon Started!"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:333
|
||||
#, python-format
|
||||
msgid "Physical Devices mappings: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:334
|
||||
#, python-format
|
||||
msgid "Exclude Devices: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/scheduler/dhcp_agent_scheduler.py:110
|
||||
#, python-format
|
||||
msgid "Agent %s already present"
|
||||
|
@ -917,7 +877,7 @@ msgstr ""
|
|||
msgid "RPC was already started in parent process by plugin."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/services/service_base.py:99
|
||||
#: neutron/services/service_base.py:90
|
||||
#, python-format
|
||||
msgid "Default provider is not specified for service type %s"
|
||||
msgstr ""
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: neutron 2015.2.0.dev464\n"
|
||||
"Project-Id-Version: neutron 7.0.0.0b2.dev192\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-06-11 06:02+0000\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
|
@ -17,7 +17,7 @@ msgstr ""
|
|||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 1.3\n"
|
||||
|
||||
#: neutron/policy.py:115
|
||||
#: neutron/policy.py:116
|
||||
#, python-format
|
||||
msgid "Unable to find data type descriptor for attribute %s"
|
||||
msgstr ""
|
||||
|
@ -40,11 +40,11 @@ msgid ""
|
|||
"care of registering resources with the quota engine."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/rpc.py:113
|
||||
#: neutron/agent/rpc.py:119
|
||||
msgid "DVR functionality requires a server upgrade."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/rpc.py:136
|
||||
#: neutron/agent/rpc.py:142
|
||||
msgid "Tunnel synchronization requires a server upgrade."
|
||||
msgstr ""
|
||||
|
||||
|
@ -59,22 +59,22 @@ msgid ""
|
|||
"falling back to old security_group_rules_for_devices which scales worse."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/common/ovs_lib.py:368
|
||||
#: neutron/agent/common/ovs_lib.py:373
|
||||
#, python-format
|
||||
msgid "Found not yet ready openvswitch port: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/common/ovs_lib.py:371
|
||||
#: neutron/agent/common/ovs_lib.py:376
|
||||
#, python-format
|
||||
msgid "Found failed openvswitch port: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/common/ovs_lib.py:417
|
||||
#: neutron/agent/common/ovs_lib.py:438
|
||||
#, python-format
|
||||
msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/dhcp/agent.py:117
|
||||
#: neutron/agent/dhcp/agent.py:120
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Unable to %(action)s dhcp for %(net_id)s: there is a conflict with its "
|
||||
|
@ -82,34 +82,34 @@ msgid ""
|
|||
"exist."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/dhcp/agent.py:132 neutron/agent/dhcp/agent.py:201
|
||||
#: neutron/agent/dhcp/agent.py:135 neutron/agent/dhcp/agent.py:204
|
||||
#, python-format
|
||||
msgid "Network %s has been deleted."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/dhcp/agent.py:218
|
||||
#: neutron/agent/dhcp/agent.py:221
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Network %s may have been deleted and its resources may have already been "
|
||||
"disposed."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/dhcp/agent.py:367
|
||||
#: neutron/agent/dhcp/agent.py:370
|
||||
#, python-format
|
||||
msgid ""
|
||||
"%(port_num)d router ports found on the metadata access network. Only the "
|
||||
"port %(port_id)s, for router %(router_id)s will be considered"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/dhcp/agent.py:567 neutron/agent/l3/agent.py:622
|
||||
#: neutron/agent/metadata/agent.py:306
|
||||
#: neutron/agent/dhcp/agent.py:571 neutron/agent/l3/agent.py:635
|
||||
#: neutron/agent/metadata/agent.py:310
|
||||
#: neutron/services/metering/agents/metering_agent.py:278
|
||||
msgid ""
|
||||
"Neutron server does not support state report. State report for this agent"
|
||||
" will be disabled."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/l3/agent.py:193
|
||||
#: neutron/agent/l3/agent.py:194
|
||||
#, python-format
|
||||
msgid ""
|
||||
"l3-agent cannot check service plugins enabled at the neutron server when "
|
||||
|
@ -118,16 +118,16 @@ msgid ""
|
|||
"warning. Detail message: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/l3/agent.py:205
|
||||
#: neutron/agent/l3/agent.py:206
|
||||
#, python-format
|
||||
msgid ""
|
||||
"l3-agent cannot check service plugins enabled on the neutron server. "
|
||||
"Retrying. Detail message: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/l3/agent.py:337
|
||||
#: neutron/agent/l3/agent.py:341
|
||||
#, python-format
|
||||
msgid "Info for router %s was not found. Skipping router removal"
|
||||
msgid "Info for router %s was not found. Performing router cleanup"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/l3/router_info.py:208
|
||||
|
@ -158,44 +158,44 @@ msgid ""
|
|||
"%(top)r"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/linux/iptables_manager.py:702
|
||||
#: neutron/agent/linux/iptables_manager.py:698
|
||||
#, python-format
|
||||
msgid "Attempted to get traffic counters of chain %s which does not exist"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/metadata/agent.py:131
|
||||
#: neutron/agent/metadata/agent.py:133
|
||||
msgid "Server does not support metadata RPC, fallback to using neutron client"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/agent/metadata/agent.py:244
|
||||
#: neutron/agent/metadata/agent.py:246
|
||||
msgid ""
|
||||
"The remote metadata server responded with Forbidden. This response "
|
||||
"usually occurs when shared secrets do not match."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/api/api_common.py:103
|
||||
#: neutron/api/api_common.py:104
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Invalid value for pagination_max_limit: %s. It should be an integer "
|
||||
"greater to 0"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/api/extensions.py:544
|
||||
#: neutron/api/extensions.py:521
|
||||
#, python-format
|
||||
msgid "Did not find expected name \"%(ext_name)s\" in %(file)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/api/extensions.py:552
|
||||
#: neutron/api/extensions.py:529
|
||||
#, python-format
|
||||
msgid "Extension file %(f)s wasn't loaded due to %(exception)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/api/extensions.py:593
|
||||
#: neutron/api/extensions.py:570
|
||||
#, python-format
|
||||
msgid "Extension %s not supported by any of loaded plugins"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/api/extensions.py:605
|
||||
#: neutron/api/extensions.py:582
|
||||
#, python-format
|
||||
msgid "Loaded plugins do not implement extension %s interface"
|
||||
msgstr ""
|
||||
|
@ -232,13 +232,13 @@ msgstr ""
|
|||
msgid "Updating lease expiration is now deprecated. Issued from host %s."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/api/rpc/handlers/securitygroups_rpc.py:177
|
||||
#: neutron/api/rpc/handlers/securitygroups_rpc.py:179
|
||||
msgid ""
|
||||
"Security group agent binding currently not set. This should be set by the"
|
||||
" end of the init process."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/cmd/sanity_check.py:75
|
||||
#: neutron/cmd/sanity_check.py:78
|
||||
msgid ""
|
||||
"The user that is executing neutron can read the namespaces without using "
|
||||
"the root_helper. Disable the use_helper_for_ns_read option to avoid a "
|
||||
|
@ -263,7 +263,11 @@ msgid ""
|
|||
"in case there was a clock adjustment."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/agentschedulers_db.py:275
|
||||
#: neutron/db/agentschedulers_db.py:280
|
||||
msgid "No DHCP agents available, skipping rescheduling"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/agentschedulers_db.py:284
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Removing network %(network)s from agent %(agent)s because the agent did "
|
||||
|
@ -277,20 +281,15 @@ msgid ""
|
|||
" not report to the server in the last %(dead_time)s seconds."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/l3_dvrscheduler_db.py:306
|
||||
#: neutron/db/l3_dvrscheduler_db.py:341
|
||||
msgid "No active L3 agents found for SNAT"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/securitygroups_rpc_base.py:383
|
||||
#: neutron/db/securitygroups_rpc_base.py:361
|
||||
#, python-format
|
||||
msgid "No valid gateway port on subnet %s is found for IPv6 RA"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/db/migration/alembic_migrations/heal_script.py:91
|
||||
#, python-format
|
||||
msgid "Ignoring alembic command %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/debug/debug_agent.py:121
|
||||
#, python-format
|
||||
msgid "Failed to delete namespace %s"
|
||||
|
@ -316,16 +315,6 @@ msgstr ""
|
|||
msgid "Nova event: %s returned with failed status"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/loopingcall.py:87
|
||||
#, python-format
|
||||
msgid "task %(func_name)r run outlasted interval by %(delay).2f sec"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/openstack/common/service.py:363
|
||||
#, python-format
|
||||
msgid "pid %d not in child list"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/cisco/db/n1kv_db_v2.py:584
|
||||
#, python-format
|
||||
msgid "vlan_id %(vlan)s on physical network %(network)s not found"
|
||||
|
@ -355,63 +344,23 @@ msgstr ""
|
|||
msgid "Ignoring admin_state_up=False for router=%r. Overriding with True"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:85
|
||||
msgid "VXLAN is enabled, a valid local_ip must be provided"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:99
|
||||
msgid "Invalid Network ID, will lead to incorrect bridge name"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:106
|
||||
msgid "Invalid VLAN ID, will lead to incorrect subinterface name"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:113
|
||||
msgid "Invalid Interface ID, will lead to incorrect tap device name"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:122
|
||||
#, python-format
|
||||
msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:527
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:563
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Option \"%(option)s\" must be supported by command \"%(command)s\" to "
|
||||
"enable %(mode)s mode"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:557
|
||||
msgid ""
|
||||
"VXLAN muticast group must be provided in vxlan_group option to enable "
|
||||
"VXLAN MCAST mode"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/driver_context.py:191
|
||||
#, python-format
|
||||
msgid "Could not expand segment %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/plugin.py:527
|
||||
#: neutron/plugins/ml2/plugin.py:530
|
||||
#, python-format
|
||||
msgid ""
|
||||
"In _notify_port_updated(), no bound segment for port %(port_id)s on "
|
||||
"network %(network_id)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/plugin.py:778
|
||||
#: neutron/plugins/ml2/plugin.py:781
|
||||
msgid "A concurrent port creation has occurred"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/plugin.py:1396
|
||||
#, python-format
|
||||
msgid "Port %(port)s updated up by agent not found"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/plugin.py:1428
|
||||
#: neutron/plugins/ml2/plugin.py:1435
|
||||
#, python-format
|
||||
msgid "Port %s not found during update"
|
||||
msgstr ""
|
||||
|
@ -429,7 +378,7 @@ msgid ""
|
|||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_agent.py:76
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver.py:116
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py:117
|
||||
#, python-format
|
||||
msgid "Attempting to bind with dead agent: %s"
|
||||
msgstr ""
|
||||
|
@ -439,7 +388,7 @@ msgstr ""
|
|||
msgid "No flat network found on physical network %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/type_gre.py:107
|
||||
#: neutron/plugins/ml2/drivers/type_gre.py:102
|
||||
msgid "Gre allocations were already created."
|
||||
msgstr ""
|
||||
|
||||
|
@ -504,11 +453,63 @@ msgstr ""
|
|||
msgid "Port %(port)s updated by agent %(agent)s isn't bound to any segment"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/oneconvergence/lib/plugin_helper.py:110
|
||||
msgid "No Token, Re-login"
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:90
|
||||
msgid "VXLAN is enabled, a valid local_ip must be provided"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:163
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:104
|
||||
msgid "Invalid Network ID, will lead to incorrect bridge name"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:111
|
||||
msgid "Invalid VLAN ID, will lead to incorrect subinterface name"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:118
|
||||
msgid "Invalid Interface ID, will lead to incorrect tap device name"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:127
|
||||
#, python-format
|
||||
msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:527
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:563
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Option \"%(option)s\" must be supported by command \"%(command)s\" to "
|
||||
"enable %(mode)s mode"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py:557
|
||||
msgid ""
|
||||
"VXLAN muticast group must be provided in vxlan_group option to enable "
|
||||
"VXLAN MCAST mode"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:149
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:162
|
||||
#, python-format
|
||||
msgid "Cannot find vf index for pci slot %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py:285
|
||||
#, python-format
|
||||
msgid "device pci mismatch: %(device_mac)s - %(pci_slot)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:126
|
||||
#, python-format
|
||||
msgid "Cannot find vfs %(vfs)s in device %(dev_name)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py:142
|
||||
#, python-format
|
||||
msgid "failed to parse vf link show line %(line)s: for %(device)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:163
|
||||
#, python-format
|
||||
msgid ""
|
||||
"L2 agent could not get DVR MAC address at startup due to RPC error. It "
|
||||
|
@ -516,69 +517,52 @@ msgid ""
|
|||
" %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:190
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py:190
|
||||
#, python-format
|
||||
msgid ""
|
||||
"L2 agent could not get DVR MAC address from server. Retrying. Detailed "
|
||||
"message: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:525
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:531
|
||||
#, python-format
|
||||
msgid "Action %s not supported"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:920
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:935
|
||||
#, python-format
|
||||
msgid ""
|
||||
"Creating an interface named %(name)s exceeds the %(limit)d character "
|
||||
"limitation. It was shortened to %(new_name)s to fit."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1120
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1130
|
||||
#, python-format
|
||||
msgid "VIF port: %s has no ofport configured, and might not be able to transmit"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1228
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1241
|
||||
#, python-format
|
||||
msgid "Device %s not defined on plugin"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1388
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1401
|
||||
#, python-format
|
||||
msgid "Invalid remote IP: %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1431
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1444
|
||||
msgid "OVS is restarted. OVSNeutronAgent will reset bridges and recover ports."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1434
|
||||
#: neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py:1447
|
||||
msgid ""
|
||||
"OVS is dead. OVSNeutronAgent will keep running and checking OVS status "
|
||||
"periodically."
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/sriovnicagent/eswitch_manager.py:148
|
||||
#: neutron/plugins/sriovnicagent/eswitch_manager.py:161
|
||||
#, python-format
|
||||
msgid "Cannot find vf index for pci slot %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/sriovnicagent/eswitch_manager.py:284
|
||||
#, python-format
|
||||
msgid "device pci mismatch: %(device_mac)s - %(pci_slot)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/sriovnicagent/pci_lib.py:125
|
||||
#, python-format
|
||||
msgid "Cannot find vfs %(vfs)s in device %(dev_name)s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/plugins/sriovnicagent/pci_lib.py:141
|
||||
#, python-format
|
||||
msgid "failed to parse vf link show line %(line)s: for %(device)s"
|
||||
#: neutron/plugins/oneconvergence/lib/plugin_helper.py:110
|
||||
msgid "No Token, Re-login"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/scheduler/dhcp_agent_scheduler.py:58
|
||||
|
@ -590,16 +574,16 @@ msgstr ""
|
|||
msgid "No more DHCP agents"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/scheduler/l3_agent_scheduler.py:157
|
||||
#: neutron/scheduler/l3_agent_scheduler.py:156
|
||||
#, python-format
|
||||
msgid "No routers compatible with L3 agent configuration on host %s"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/scheduler/l3_agent_scheduler.py:182
|
||||
#: neutron/scheduler/l3_agent_scheduler.py:181
|
||||
msgid "No active L3 agents"
|
||||
msgstr ""
|
||||
|
||||
#: neutron/scheduler/l3_agent_scheduler.py:194
|
||||
#: neutron/scheduler/l3_agent_scheduler.py:193
|
||||
#, python-format
|
||||
msgid "No L3 agents can host the router %s"
|
||||
msgstr ""
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -8,11 +8,11 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: Neutron\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-06-11 06:02+0000\n"
|
||||
"PO-Revision-Date: 2015-06-10 23:52+0000\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: 2015-07-08 20:45+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/"
|
||||
"neutron/language/pt_BR/)\n"
|
||||
"Language-Team: Portuguese (Brazil) (http://www.transifex.com/p/neutron/"
|
||||
"language/pt_BR/)\n"
|
||||
"Language: pt_BR\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
@ -97,26 +97,6 @@ msgstr "Tentou remover o filtro de porta que não foi filtrado %r"
|
|||
msgid "Attempted to update port filter which is not filtered %s"
|
||||
msgstr "Tentou atualizar o filtro de porta que não foi filtrado %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr "%s capturadas, saindo"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr "%s capturado, parando filhos"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr "%(pid)d filho eliminado pelo sinal %(sig)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr "Filho %(pid)s encerrando com status %(code)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child caught %s, exiting"
|
||||
msgstr "Filho capturado %s, terminando"
|
||||
|
||||
#, python-format
|
||||
msgid "Config paste file: %s"
|
||||
msgstr "Arquivo de colagem configurado: %s"
|
||||
|
@ -151,13 +131,6 @@ msgstr "Dispositivo %s não definido no plug-in"
|
|||
msgid "Disabled security-group extension."
|
||||
msgstr "Extensão de grupo de segurança desativada."
|
||||
|
||||
#, python-format
|
||||
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
|
||||
msgstr "Backdoor de Eventlet escutando na porta %(port)s pelo processo %(pid)d"
|
||||
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr "Bifurcação muito rápida, suspendendo"
|
||||
|
||||
#, python-format
|
||||
msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
|
||||
msgstr "Localizado endereço IP inválido no pool: %(start)s - %(end)s:"
|
||||
|
@ -260,9 +233,6 @@ msgstr "Nenhuma porta aqui para atualizar firewall"
|
|||
msgid "OVS cleanup completed successfully"
|
||||
msgstr "Limpeza de OVS concluída com êxito"
|
||||
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr "Processo pai saiu inesperadamente, saindo"
|
||||
|
||||
#, python-format
|
||||
msgid "Port %(device)s updated. Details: %(details)s"
|
||||
msgstr "Porta %(device)s atualizada. Detalhes: %(details)s"
|
||||
|
@ -316,14 +286,6 @@ msgstr "Regra do grupo de segurança atualizada %r"
|
|||
msgid "Service %s is supported by the core plugin"
|
||||
msgstr "Serviço %s é suportado pelo plugin núcleo"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because it is disabled"
|
||||
msgstr "Ignorando tarefa periódica %(task)s porque ela está desativada"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because its interval is negative"
|
||||
msgstr "Ignorando tarefa periódica %(task)s porque seu intervalo é negativo"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping port %s as no IP is configure on it"
|
||||
msgstr "Ignorando a porta %s porque nenhum IP está configurado nela"
|
||||
|
@ -335,14 +297,6 @@ msgstr "Endereços IP especificado não correspondem à versão do IP da sub-red
|
|||
msgid "Start IP (%(start)s) is greater than end IP (%(end)s)"
|
||||
msgstr "IP inicial (%(start)s) é maior que IP final (%(end)s)"
|
||||
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr "Filho %d iniciado"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr "Iniciando %d trabalhadores"
|
||||
|
||||
msgid "Synchronizing state"
|
||||
msgstr "Sincronizando estado"
|
||||
|
||||
|
@ -364,13 +318,6 @@ msgstr ""
|
|||
msgid "VlanTypeDriver initialization complete"
|
||||
msgstr "Inicialização do VlanTypeDriver concluída"
|
||||
|
||||
msgid "Wait called after thread killed. Cleaning up."
|
||||
msgstr "Espera requisitada depois que thread foi morta. Limpando."
|
||||
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr "Aguardando em %d filhos para sair"
|
||||
|
||||
#, python-format
|
||||
msgid "agent_updated by server side %s!"
|
||||
msgstr "agent_updated por lado do servidor %s!"
|
||||
|
|
|
@ -8,11 +8,11 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: Neutron\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-06-11 06:02+0000\n"
|
||||
"PO-Revision-Date: 2015-06-10 23:52+0000\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: 2015-07-08 20:45+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/neutron/"
|
||||
"language/zh_CN/)\n"
|
||||
"Language-Team: Chinese (China) (http://www.transifex.com/p/neutron/language/"
|
||||
"zh_CN/)\n"
|
||||
"Language: zh_CN\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
@ -111,26 +111,6 @@ msgstr "已尝试除去未过滤的端口过滤器 %r"
|
|||
msgid "Attempted to update port filter which is not filtered %s"
|
||||
msgstr "已尝试更新未过滤的端口过滤器 %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr "捕获到 %s,正在退出"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr "捕获到 %s,正在停止子代"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr "信号 %(sig)d 已终止子代 %(pid)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr "子代 %(pid)s 已退出,状态为 %(code)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child caught %s, exiting"
|
||||
msgstr "子代捕获 %s,正在退出"
|
||||
|
||||
#, python-format
|
||||
msgid "Config paste file: %s"
|
||||
msgstr "配置粘贴文件:%s"
|
||||
|
@ -169,13 +149,6 @@ msgstr "未在插件上定义设备 %s"
|
|||
msgid "Disabled security-group extension."
|
||||
msgstr "已禁用安全组扩展。"
|
||||
|
||||
#, python-format
|
||||
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
|
||||
msgstr "Eventlet为进程 %(pid)d 在后台监听 %(port)s "
|
||||
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr "派生速度太快,正在休眠"
|
||||
|
||||
#, python-format
|
||||
msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
|
||||
msgstr "在池中找到无效 IP 地址:%(start)s - %(end)s:"
|
||||
|
@ -287,9 +260,6 @@ msgstr "Nova 事件响应: %s"
|
|||
msgid "OVS cleanup completed successfully"
|
||||
msgstr "OVS 清除已成功完成"
|
||||
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr "父进程已意外终止,正在退出"
|
||||
|
||||
#, python-format
|
||||
msgid "Physical Devices mappings: %s"
|
||||
msgstr "物理设备映射:%s"
|
||||
|
@ -368,14 +338,6 @@ msgstr "已更新安全组规则 %r"
|
|||
msgid "Service %s is supported by the core plugin"
|
||||
msgstr "服务%s由核心插件支持"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because it is disabled"
|
||||
msgstr "正在跳过周期性任务 %(task)s,因为它已禁用"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because its interval is negative"
|
||||
msgstr "正在跳过周期性任务 %(task)s,因为其时间间隔为负"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping port %s as no IP is configure on it"
|
||||
msgstr "正在跳过端口 %s,因为没有在该端口上配置任何 IP"
|
||||
|
@ -387,14 +349,6 @@ msgstr "指定的 IP 地址与子网 IP 版本不匹配"
|
|||
msgid "Start IP (%(start)s) is greater than end IP (%(end)s)"
|
||||
msgstr "起始 IP (%(start)s) 大于结束 IP (%(end)s)"
|
||||
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr "已启动子代 %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr "正在启动 %d 工作程序"
|
||||
|
||||
#, python-format
|
||||
msgid "Subnet %s was deleted concurrently"
|
||||
msgstr "子网 %s 同时被删除 "
|
||||
|
@ -405,10 +359,6 @@ msgstr "正在使状态同步"
|
|||
msgid "Synchronizing state complete"
|
||||
msgstr "同步状态完成"
|
||||
|
||||
#, python-format
|
||||
msgid "Table %(old_t)r was renamed to %(new_t)r"
|
||||
msgstr "表 %(old_t)r 已经更名为 %(new_t)r"
|
||||
|
||||
#, python-format
|
||||
msgid "Tenant network_types: %s"
|
||||
msgstr "项目网络类型: %s"
|
||||
|
@ -432,13 +382,6 @@ msgstr ""
|
|||
msgid "VlanTypeDriver initialization complete"
|
||||
msgstr "Vlan类型驱动初始化完成"
|
||||
|
||||
msgid "Wait called after thread killed. Cleaning up."
|
||||
msgstr "线程结束,正在清理"
|
||||
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr "正在等待 %d 个子代退出"
|
||||
|
||||
#, python-format
|
||||
msgid "agent_updated by server side %s!"
|
||||
msgstr "服务器端 %s 已更新代理!"
|
||||
|
|
|
@ -7,11 +7,11 @@ msgid ""
|
|||
msgstr ""
|
||||
"Project-Id-Version: Neutron\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2015-05-29 06:16+0000\n"
|
||||
"PO-Revision-Date: 2015-05-28 20:54+0000\n"
|
||||
"POT-Creation-Date: 2015-07-11 06:09+0000\n"
|
||||
"PO-Revision-Date: 2015-07-08 20:45+0000\n"
|
||||
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
|
||||
"Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/neutron/"
|
||||
"language/zh_TW/)\n"
|
||||
"Language-Team: Chinese (Taiwan) (http://www.transifex.com/p/neutron/language/"
|
||||
"zh_TW/)\n"
|
||||
"Language: zh_TW\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
@ -68,22 +68,6 @@ msgstr "已嘗試移除未過濾的埠過濾器 %r"
|
|||
msgid "Attempted to update port filter which is not filtered %s"
|
||||
msgstr "已嘗試更新未過濾的埠過濾器 %s"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, exiting"
|
||||
msgstr "已捕捉到 %s,正在結束"
|
||||
|
||||
#, python-format
|
||||
msgid "Caught %s, stopping children"
|
||||
msgstr "已捕捉到 %s,正在停止子項"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)d killed by signal %(sig)d"
|
||||
msgstr "信號 %(sig)d 結束了子項 %(pid)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Child %(pid)s exited with status %(code)d"
|
||||
msgstr "子項 %(pid)s 已結束,狀態為 %(code)d"
|
||||
|
||||
#, python-format
|
||||
msgid "Config paste file: %s"
|
||||
msgstr "配置貼上檔案:%s"
|
||||
|
@ -102,9 +86,6 @@ msgstr "外掛程式上未定義裝置 %s"
|
|||
msgid "Disabled security-group extension."
|
||||
msgstr "已停用安全群組延伸。"
|
||||
|
||||
msgid "Forking too fast, sleeping"
|
||||
msgstr "分岔太快,正在休眠"
|
||||
|
||||
#, python-format
|
||||
msgid "Found invalid IP address in pool: %(start)s - %(end)s:"
|
||||
msgstr "在儲存區中發現無效的 IP 位址:%(start)s - %(end)s:"
|
||||
|
@ -165,9 +146,6 @@ msgstr "未載入 %s 外掛程式"
|
|||
msgid "OVS cleanup completed successfully"
|
||||
msgstr "已順利完成 OVS 清理"
|
||||
|
||||
msgid "Parent process has died unexpectedly, exiting"
|
||||
msgstr "母程序已非預期地當掉,正在結束"
|
||||
|
||||
#, python-format
|
||||
msgid "Port %(device)s updated. Details: %(details)s"
|
||||
msgstr "已更新埠 %(device)s。詳細資料:%(details)s"
|
||||
|
@ -206,14 +184,6 @@ msgstr "安全群組成員已更新 %r"
|
|||
msgid "Security group rule updated %r"
|
||||
msgstr "安全群組規則已更新 %r"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because it is disabled"
|
||||
msgstr "正在跳過定期作業 %(task)s,因為它已停用"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping periodic task %(task)s because its interval is negative"
|
||||
msgstr "正在跳過定期作業 %(task)s,因為其間隔為負數"
|
||||
|
||||
#, python-format
|
||||
msgid "Skipping port %s as no IP is configure on it"
|
||||
msgstr "正在跳過埠 %s,因為其上沒有配置 IP"
|
||||
|
@ -225,14 +195,6 @@ msgstr "指定的 IP 位址與子網路 IP 版本不符"
|
|||
msgid "Start IP (%(start)s) is greater than end IP (%(end)s)"
|
||||
msgstr "起始 IP (%(start)s) 大於結尾 IP (%(end)s)"
|
||||
|
||||
#, python-format
|
||||
msgid "Started child %d"
|
||||
msgstr "已開始子行程 %d"
|
||||
|
||||
#, python-format
|
||||
msgid "Starting %d workers"
|
||||
msgstr "正在啟動 %d 個工作程式"
|
||||
|
||||
msgid "Synchronizing state"
|
||||
msgstr "正在同步化狀態"
|
||||
|
||||
|
@ -243,10 +205,6 @@ msgid ""
|
|||
msgstr ""
|
||||
"驗證 CIDR %(new_cidr)s 失敗 - 與子網路 %(subnet_id)s (CIDR %(cidr)s) 重疊"
|
||||
|
||||
#, python-format
|
||||
msgid "Waiting on %d children to exit"
|
||||
msgstr "正在等待 %d 個子項結束"
|
||||
|
||||
#, python-format
|
||||
msgid "agent_updated by server side %s!"
|
||||
msgstr "agent_updated 是由伺服器端 %s 執行!"
|
||||
|
|
|
@ -22,6 +22,7 @@ from oslo_log import log as logging
|
|||
|
||||
from neutron.common import constants as n_const
|
||||
from neutron.i18n import _LI
|
||||
from neutron.plugins.common import constants as p_const
|
||||
from neutron.plugins.ml2.common import exceptions as ml2_exc
|
||||
from neutron.plugins.ml2 import driver_api
|
||||
from neutron.plugins.ml2.drivers.arista import config # noqa
|
||||
|
@ -69,6 +70,9 @@ class AristaDriver(driver_api.MechanismDriver):
|
|||
|
||||
network = context.current
|
||||
segments = context.network_segments
|
||||
if segments[0][driver_api.NETWORK_TYPE] != p_const.TYPE_VLAN:
|
||||
# If network type is not VLAN, do nothing
|
||||
return
|
||||
network_id = network['id']
|
||||
tenant_id = network['tenant_id']
|
||||
if not tenant_id:
|
||||
|
@ -165,6 +169,10 @@ class AristaDriver(driver_api.MechanismDriver):
|
|||
def delete_network_postcommit(self, context):
|
||||
"""Send network delete request to Arista HW."""
|
||||
network = context.current
|
||||
segments = context.network_segments
|
||||
if segments[0][driver_api.NETWORK_TYPE] != p_const.TYPE_VLAN:
|
||||
# If networtk type is not VLAN, do nothing
|
||||
return
|
||||
network_id = network['id']
|
||||
tenant_id = network['tenant_id']
|
||||
if not tenant_id:
|
||||
|
@ -202,6 +210,9 @@ class AristaDriver(driver_api.MechanismDriver):
|
|||
if not tenant_id:
|
||||
tenant_id = context._plugin_context.tenant_id
|
||||
with self.eos_sync_lock:
|
||||
if not db_lib.is_network_provisioned(tenant_id, network_id):
|
||||
# Ignore this request if network is not provisioned
|
||||
return
|
||||
db_lib.remember_tenant(tenant_id)
|
||||
db_lib.remember_vm(device_id, host, port_id,
|
||||
network_id, tenant_id)
|
||||
|
@ -267,6 +278,22 @@ class AristaDriver(driver_api.MechanismDriver):
|
|||
orig_port = context.original
|
||||
if new_port['name'] != orig_port['name']:
|
||||
LOG.info(_LI('Port name changed to %s'), new_port['name'])
|
||||
new_port = context.current
|
||||
device_id = new_port['device_id']
|
||||
device_owner = new_port['device_owner']
|
||||
host = context.host
|
||||
|
||||
# device_id and device_owner are set on VM boot
|
||||
is_vm_boot = device_id and device_owner
|
||||
if host and host != orig_port['binding:host_id'] and is_vm_boot:
|
||||
port_id = new_port['id']
|
||||
network_id = new_port['network_id']
|
||||
tenant_id = new_port['tenant_id']
|
||||
if not tenant_id:
|
||||
tenant_id = context._plugin_context.tenant_id
|
||||
with self.eos_sync_lock:
|
||||
db_lib.update_vm_host(device_id, host, port_id,
|
||||
network_id, tenant_id)
|
||||
|
||||
def update_port_postcommit(self, context):
|
||||
"""Update the name of a given port in EOS.
|
||||
|
@ -276,9 +303,6 @@ class AristaDriver(driver_api.MechanismDriver):
|
|||
"""
|
||||
port = context.current
|
||||
orig_port = context.original
|
||||
if port['name'] == orig_port['name']:
|
||||
# nothing to do
|
||||
return
|
||||
|
||||
device_id = port['device_id']
|
||||
device_owner = port['device_owner']
|
||||
|
@ -310,6 +334,12 @@ class AristaDriver(driver_api.MechanismDriver):
|
|||
)
|
||||
if vm_provisioned and net_provisioned:
|
||||
try:
|
||||
orig_host = orig_port['binding:host_id']
|
||||
if host != orig_host:
|
||||
# The port moved to a different host. So delete the
|
||||
# old port on the old host before creating a new
|
||||
# port on the new host.
|
||||
self._delete_port(port, orig_host, tenant_id)
|
||||
self.rpc.plug_port_into_network(device_id,
|
||||
hostname,
|
||||
port_id,
|
||||
|
@ -348,30 +378,45 @@ class AristaDriver(driver_api.MechanismDriver):
|
|||
from appropriate network.
|
||||
"""
|
||||
port = context.current
|
||||
device_id = port['device_id']
|
||||
host = context.host
|
||||
port_id = port['id']
|
||||
network_id = port['network_id']
|
||||
tenant_id = port['tenant_id']
|
||||
if not tenant_id:
|
||||
tenant_id = context._plugin_context.tenant_id
|
||||
|
||||
with self.eos_sync_lock:
|
||||
self._delete_port(port, host, tenant_id)
|
||||
|
||||
def _delete_port(self, port, host, tenant_id):
|
||||
"""Deletes the port from EOS.
|
||||
|
||||
param port: Port which is to be deleted
|
||||
param host: The host on which the port existed
|
||||
param tenant_id: The tenant to which the port belongs to. Some times
|
||||
the tenant id in the port dict is not present (as in
|
||||
the case of HA router).
|
||||
"""
|
||||
device_id = port['device_id']
|
||||
port_id = port['id']
|
||||
network_id = port['network_id']
|
||||
device_owner = port['device_owner']
|
||||
|
||||
try:
|
||||
with self.eos_sync_lock:
|
||||
hostname = self._host_name(host)
|
||||
if device_owner == n_const.DEVICE_OWNER_DHCP:
|
||||
self.rpc.unplug_dhcp_port_from_network(device_id,
|
||||
hostname,
|
||||
port_id,
|
||||
network_id,
|
||||
tenant_id)
|
||||
else:
|
||||
self.rpc.unplug_host_from_network(device_id,
|
||||
hostname,
|
||||
port_id,
|
||||
network_id,
|
||||
tenant_id)
|
||||
if not db_lib.is_network_provisioned(tenant_id, network_id):
|
||||
# If we do not have network associated with this, ignore it
|
||||
return
|
||||
hostname = self._host_name(host)
|
||||
if device_owner == n_const.DEVICE_OWNER_DHCP:
|
||||
self.rpc.unplug_dhcp_port_from_network(device_id,
|
||||
hostname,
|
||||
port_id,
|
||||
network_id,
|
||||
tenant_id)
|
||||
else:
|
||||
self.rpc.unplug_host_from_network(device_id,
|
||||
hostname,
|
||||
port_id,
|
||||
network_id,
|
||||
tenant_id)
|
||||
# if necessary, delete tenant as well.
|
||||
self.delete_tenant(tenant_id)
|
||||
except arista_exc.AristaRpcError:
|
||||
|
|
|
@ -111,7 +111,7 @@ def _build_subattr_match_rule(attr_name, attr, action, target):
|
|||
# typing for API attributes
|
||||
# Expect a dict as type descriptor
|
||||
validate = attr['validate']
|
||||
key = filter(lambda k: k.startswith('type:dict'), validate.keys())
|
||||
key = list(filter(lambda k: k.startswith('type:dict'), validate.keys()))
|
||||
if not key:
|
||||
LOG.warn(_LW("Unable to find data type descriptor for attribute %s"),
|
||||
attr_name)
|
||||
|
|
|
@ -154,7 +154,6 @@ class DietTestCase(testtools.TestCase):
|
|||
self.useFixture(fixtures.NestedTempfile())
|
||||
self.useFixture(fixtures.TempHomeDir())
|
||||
|
||||
self.setup_double_mock_guard()
|
||||
self.addCleanup(mock.patch.stopall)
|
||||
|
||||
if bool_from_env('OS_STDOUT_CAPTURE'):
|
||||
|
@ -167,34 +166,6 @@ class DietTestCase(testtools.TestCase):
|
|||
self.addOnException(self.check_for_systemexit)
|
||||
self.orig_pid = os.getpid()
|
||||
|
||||
def setup_double_mock_guard(self):
|
||||
# mock.patch.stopall() uses a set in python < 3.4 so patches may not
|
||||
# be unwound in the same order they were applied. This can leak mocks
|
||||
# and cause tests down the line to fail.
|
||||
# More info: http://bugs.python.org/issue21239
|
||||
#
|
||||
# Use mock to patch mock.patch.start to check if a target has already
|
||||
# been patched and fail if it has.
|
||||
self.first_traceback = {}
|
||||
orig_start = mock._patch.start
|
||||
|
||||
def new_start(mself):
|
||||
mytarget = mself.getter()
|
||||
myattr = mself.attribute
|
||||
for patch in mself._active_patches:
|
||||
if (mytarget, myattr) == (patch.target, patch.attribute):
|
||||
key = str((patch.target, patch.attribute))
|
||||
self.fail("mock.patch was setup on an already patched "
|
||||
"target %s.%s. Stop the original patch before "
|
||||
"starting a new one. Traceback of 1st patch: %s"
|
||||
% (mytarget, myattr,
|
||||
''.join(self.first_traceback.get(key, []))))
|
||||
self.first_traceback[
|
||||
str((mytarget, myattr))] = traceback.format_stack()[:-2]
|
||||
return orig_start(mself)
|
||||
|
||||
mock.patch('mock._patch.start', new=new_start).start()
|
||||
|
||||
def check_for_systemexit(self, exc_info):
|
||||
if isinstance(exc_info[1], SystemExit):
|
||||
if os.getpid() != self.orig_pid:
|
||||
|
|
|
@ -68,7 +68,6 @@ class L3AgentTestFramework(base.BaseSudoTestCase):
|
|||
self.mock_plugin_api = mock.patch(
|
||||
'neutron.agent.l3.agent.L3PluginApi').start().return_value
|
||||
mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
|
||||
mock.patch.object(ip_lib, '_arping').start()
|
||||
self.agent = self._configure_agent('agent1')
|
||||
|
||||
def _get_config_opts(self):
|
||||
|
|
|
@ -577,6 +577,23 @@ class OVS_Lib_Test(base.BaseTestCase):
|
|||
def test_get_vif_ports_xen(self):
|
||||
self._test_get_vif_ports(is_xen=True)
|
||||
|
||||
def test_get_vif_ports_with_bond(self):
|
||||
pname = "bond0"
|
||||
#NOTE(dprince): bond ports don't have records in the Interface table
|
||||
external_ids = ('{"data":[], "headings":[]}')
|
||||
|
||||
# Each element is a tuple of (expected mock call, return_value)
|
||||
expected_calls_and_values = [
|
||||
(self._vsctl_mock("list-ports", self.BR_NAME), "%s\n" % pname),
|
||||
(self._vsctl_mock("--columns=name,external_ids,ofport", "list",
|
||||
"Interface"), external_ids),
|
||||
]
|
||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
||||
|
||||
ports = self.br.get_vif_ports()
|
||||
self.assertEqual(0, len(ports))
|
||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
||||
|
||||
def test_get_vif_port_set_nonxen(self):
|
||||
self._test_get_vif_port_set(False)
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ class TestAsyncProcess(base.BaseTestCase):
|
|||
with mock.patch.object(self.proc, '_kill') as kill:
|
||||
self.proc._handle_process_error()
|
||||
|
||||
kill.assert_has_calls(mock.call(respawning=False))
|
||||
kill.assert_has_calls([mock.call(respawning=False)])
|
||||
|
||||
def test__handle_process_error_kills_without_respawn(self):
|
||||
self.proc.respawn_interval = 1
|
||||
|
@ -66,8 +66,8 @@ class TestAsyncProcess(base.BaseTestCase):
|
|||
with mock.patch('eventlet.sleep') as sleep:
|
||||
self.proc._handle_process_error()
|
||||
|
||||
kill.assert_has_calls(mock.call(respawning=True))
|
||||
sleep.assert_has_calls(mock.call(self.proc.respawn_interval))
|
||||
kill.assert_has_calls([mock.call(respawning=True)])
|
||||
sleep.assert_has_calls([mock.call(self.proc.respawn_interval)])
|
||||
spawn.assert_called_once_with()
|
||||
|
||||
def _test__watch_process(self, callback, kill_event):
|
||||
|
|
|
@ -49,7 +49,7 @@ class TestPrivileges(base.BaseTestCase):
|
|||
with mock.patch.object(daemon.LOG, 'critical') as log_critical:
|
||||
self.assertRaises(exceptions.FailToDropPrivilegesExit,
|
||||
daemon.setuid, '321')
|
||||
log_critical.assert_once_with(mock.ANY)
|
||||
log_critical.assert_called_once_with(mock.ANY)
|
||||
|
||||
def test_setgid_with_name(self):
|
||||
with mock.patch('grp.getgrnam', return_value=FakeEntry('gr_gid', 123)):
|
||||
|
@ -67,7 +67,7 @@ class TestPrivileges(base.BaseTestCase):
|
|||
with mock.patch.object(daemon.LOG, 'critical') as log_critical:
|
||||
self.assertRaises(exceptions.FailToDropPrivilegesExit,
|
||||
daemon.setgid, '321')
|
||||
log_critical.assert_once_with(mock.ANY)
|
||||
log_critical.assert_called_once_with(mock.ANY)
|
||||
|
||||
@mock.patch.object(os, 'setgroups')
|
||||
@mock.patch.object(daemon, 'setgid')
|
||||
|
@ -113,7 +113,7 @@ class TestPrivileges(base.BaseTestCase):
|
|||
with mock.patch.object(daemon.LOG, 'critical') as log_critical:
|
||||
self.assertRaises(exceptions.FailToDropPrivilegesExit,
|
||||
daemon.drop_privileges, 'user')
|
||||
log_critical.assert_once_with(mock.ANY)
|
||||
log_critical.assert_called_once_with(mock.ANY)
|
||||
|
||||
|
||||
class TestPidfile(base.BaseTestCase):
|
||||
|
|
|
@ -171,8 +171,9 @@ class TestProcessManager(base.BaseTestCase):
|
|||
|
||||
with mock.patch.object(ep, 'utils') as utils:
|
||||
manager.disable()
|
||||
utils.assert_has_calls(
|
||||
mock.call.execute(['kill', '-9', 4], run_as_root=True))
|
||||
utils.assert_has_calls([
|
||||
mock.call.execute(['kill', '-9', 4],
|
||||
run_as_root=True)])
|
||||
|
||||
def test_disable_namespace(self):
|
||||
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
|
||||
|
@ -184,8 +185,9 @@ class TestProcessManager(base.BaseTestCase):
|
|||
|
||||
with mock.patch.object(ep, 'utils') as utils:
|
||||
manager.disable()
|
||||
utils.assert_has_calls(
|
||||
mock.call.execute(['kill', '-9', 4], run_as_root=True))
|
||||
utils.assert_has_calls([
|
||||
mock.call.execute(['kill', '-9', 4],
|
||||
run_as_root=True)])
|
||||
|
||||
def test_disable_not_active(self):
|
||||
with mock.patch.object(ep.ProcessManager, 'pid') as pid:
|
||||
|
|
|
@ -695,4 +695,4 @@ class TestMidonetInterfaceDriver(TestBase):
|
|||
self.ip_dev.assert_has_calls([
|
||||
mock.call(self.device_name, namespace=self.namespace),
|
||||
mock.call().link.delete()])
|
||||
self.ip.assert_has_calls(mock.call().garbage_collect_namespace())
|
||||
self.ip.assert_has_calls([mock.call().garbage_collect_namespace()])
|
||||
|
|
|
@ -32,8 +32,8 @@ class TestGetPollingManager(base.BaseTestCase):
|
|||
with polling.get_polling_manager(minimize_polling=True) as pm:
|
||||
self.assertEqual(pm.__class__,
|
||||
polling.InterfacePollingMinimizer)
|
||||
mock_stop.assert_has_calls(mock.call())
|
||||
mock_start.assert_has_calls(mock.call())
|
||||
mock_stop.assert_has_calls([mock.call()])
|
||||
mock_start.assert_has_calls([mock.call()])
|
||||
|
||||
|
||||
class TestInterfacePollingMinimizer(base.BaseTestCase):
|
||||
|
|
|
@ -1637,23 +1637,12 @@ COMMIT
|
|||
|
||||
CHAINS_NAT = 'OUTPUT|POSTROUTING|PREROUTING|float-snat|snat'
|
||||
|
||||
# These Dicts use the same keys as devices2 and devices3 in
|
||||
# TestSecurityGroupAgentWithIptables() to ensure that the ordering
|
||||
# is consistent regardless of hashseed value
|
||||
PORTS = {'tap_port1': 'port1', 'tap_port2': 'port2'}
|
||||
MACS = {'tap_port1': '12:34:56:78:9A:BC', 'tap_port2': '12:34:56:78:9A:BD'}
|
||||
IPS = {'tap_port1': '10.0.0.3/32', 'tap_port2': '10.0.0.4/32'}
|
||||
|
||||
ports_values = list(PORTS.values())
|
||||
macs_values = list(MACS.values())
|
||||
ips_values = list(IPS.values())
|
||||
|
||||
IPTABLES_ARG['port1'] = ports_values[0]
|
||||
IPTABLES_ARG['port2'] = ports_values[1]
|
||||
IPTABLES_ARG['mac1'] = macs_values[0]
|
||||
IPTABLES_ARG['mac2'] = macs_values[1]
|
||||
IPTABLES_ARG['ip1'] = ips_values[0]
|
||||
IPTABLES_ARG['ip2'] = ips_values[1]
|
||||
IPTABLES_ARG['port1'] = 'port1'
|
||||
IPTABLES_ARG['port2'] = 'port2'
|
||||
IPTABLES_ARG['mac1'] = '12:34:56:78:9A:BC'
|
||||
IPTABLES_ARG['mac2'] = '12:34:56:78:9A:BD'
|
||||
IPTABLES_ARG['ip1'] = '10.0.0.3/32'
|
||||
IPTABLES_ARG['ip2'] = '10.0.0.4/32'
|
||||
IPTABLES_ARG['chains'] = CHAINS_NAT
|
||||
|
||||
IPTABLES_RAW_DEFAULT = """# Generated by iptables_manager
|
||||
|
@ -2136,12 +2125,6 @@ COMMIT
|
|||
# Completed by iptables_manager
|
||||
""" % IPTABLES_ARG
|
||||
|
||||
# These Dicts use the same keys as devices2 and devices3 in
|
||||
# TestSecurityGroupAgentWithIptables() to ensure that the ordering
|
||||
# is consistent regardless of hashseed value
|
||||
REVERSE_PORT_ORDER = {'tap_port1': False, 'tap_port2': True}
|
||||
reverse_port_order_values = list(REVERSE_PORT_ORDER.values())
|
||||
|
||||
IPTABLES_FILTER_2_2 = """# Generated by iptables_manager
|
||||
*filter
|
||||
:neutron-filter-top - [0:0]
|
||||
|
@ -2174,10 +2157,6 @@ IPTABLES_FILTER_2_2 = """# Generated by iptables_manager
|
|||
--dport 68 -j RETURN
|
||||
[0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN
|
||||
""" % IPTABLES_ARG
|
||||
if reverse_port_order_values[0]:
|
||||
IPTABLES_FILTER_2_2 += ("[0:0] -A %(bn)s-i_%(port1)s -s %(ip2)s "
|
||||
"-j RETURN\n"
|
||||
% IPTABLES_ARG)
|
||||
IPTABLES_FILTER_2_2 += """[0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback
|
||||
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
|
||||
%(physdev_is_bridged)s -j %(bn)s-sg-chain
|
||||
|
@ -2205,10 +2184,9 @@ IPTABLES_FILTER_2_2 += """[0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback
|
|||
--dport 68 -j RETURN
|
||||
[0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN
|
||||
""" % IPTABLES_ARG
|
||||
if not reverse_port_order_values[0]:
|
||||
IPTABLES_FILTER_2_2 += ("[0:0] -A %(bn)s-i_%(port2)s -s %(ip1)s "
|
||||
"-j RETURN\n"
|
||||
% IPTABLES_ARG)
|
||||
IPTABLES_FILTER_2_2 += ("[0:0] -A %(bn)s-i_%(port2)s -s %(ip1)s "
|
||||
"-j RETURN\n"
|
||||
% IPTABLES_ARG)
|
||||
IPTABLES_FILTER_2_2 += """[0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback
|
||||
[0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
|
||||
%(physdev_is_bridged)s -j %(bn)s-sg-chain
|
||||
|
@ -2554,27 +2532,39 @@ class TestSecurityGroupAgentWithIptables(base.BaseTestCase):
|
|||
'10.0.0.3/32',
|
||||
'12:34:56:78:9a:bc',
|
||||
rule1)}
|
||||
self.devices2 = {'tap_port1': self._device('tap_port1',
|
||||
'10.0.0.3/32',
|
||||
'12:34:56:78:9a:bc',
|
||||
rule2),
|
||||
'tap_port2': self._device('tap_port2',
|
||||
'10.0.0.4/32',
|
||||
'12:34:56:78:9a:bd',
|
||||
rule4)}
|
||||
self.devices3 = {'tap_port1': self._device('tap_port1',
|
||||
'10.0.0.3/32',
|
||||
'12:34:56:78:9a:bc',
|
||||
rule3),
|
||||
'tap_port2': self._device('tap_port2',
|
||||
'10.0.0.4/32',
|
||||
'12:34:56:78:9a:bd',
|
||||
rule5)}
|
||||
self.devices2 = collections.OrderedDict([
|
||||
('tap_port1', self._device('tap_port1',
|
||||
'10.0.0.3/32',
|
||||
'12:34:56:78:9a:bc',
|
||||
rule2)),
|
||||
('tap_port2', self._device('tap_port2',
|
||||
'10.0.0.4/32',
|
||||
'12:34:56:78:9a:bd',
|
||||
rule4))
|
||||
])
|
||||
self.devices3 = collections.OrderedDict([
|
||||
('tap_port1', self._device('tap_port1',
|
||||
'10.0.0.3/32',
|
||||
'12:34:56:78:9a:bc',
|
||||
rule3)),
|
||||
('tap_port2', self._device('tap_port2',
|
||||
'10.0.0.4/32',
|
||||
'12:34:56:78:9a:bd',
|
||||
rule5))
|
||||
])
|
||||
|
||||
@staticmethod
|
||||
def _enforce_order_in_firewall(firewall):
|
||||
# for the sake of the test, eliminate any order randomness:
|
||||
# it helps to match iptables output against regexps consistently
|
||||
for attr in ('filtered_ports', 'unfiltered_ports'):
|
||||
setattr(firewall, attr, collections.OrderedDict())
|
||||
|
||||
def _init_agent(self, defer_refresh_firewall):
|
||||
self.agent = sg_rpc.SecurityGroupAgentRpc(
|
||||
context=None, plugin_rpc=self.rpc,
|
||||
defer_refresh_firewall=defer_refresh_firewall)
|
||||
self._enforce_order_in_firewall(self.agent.firewall)
|
||||
|
||||
def _device(self, device, ip, mac_address, rule):
|
||||
return {'device': device,
|
||||
|
@ -2742,14 +2732,16 @@ class TestSecurityGroupAgentEnhancedRpcWithIptables(
|
|||
'security_group1': {
|
||||
'IPv4': ['10.0.0.3/32'], 'IPv6': []}},
|
||||
'devices': devices_info1}
|
||||
devices_info2 = {'tap_port1': self._device('tap_port1',
|
||||
'10.0.0.3/32',
|
||||
'12:34:56:78:9a:bc',
|
||||
[]),
|
||||
'tap_port2': self._device('tap_port2',
|
||||
'10.0.0.4/32',
|
||||
'12:34:56:78:9a:bd',
|
||||
[])}
|
||||
devices_info2 = collections.OrderedDict([
|
||||
('tap_port1', self._device('tap_port1',
|
||||
'10.0.0.3/32',
|
||||
'12:34:56:78:9a:bc',
|
||||
[])),
|
||||
('tap_port2', self._device('tap_port2',
|
||||
'10.0.0.4/32',
|
||||
'12:34:56:78:9a:bd',
|
||||
[]))
|
||||
])
|
||||
self.devices_info2 = {'security_groups': {'security_group1': rule1},
|
||||
'sg_member_ips': {
|
||||
'security_group1': {
|
||||
|
@ -2943,6 +2935,7 @@ class TestSecurityGroupAgentWithOVSIptables(
|
|||
context=None, plugin_rpc=self.rpc,
|
||||
local_vlan_map=local_vlan_map,
|
||||
defer_refresh_firewall=defer_refresh_firewall)
|
||||
self._enforce_order_in_firewall(self.agent.firewall)
|
||||
|
||||
def test_prepare_remove_port(self):
|
||||
self.rpc.security_group_rules_for_devices.return_value = self.devices1
|
||||
|
|
|
@ -84,7 +84,7 @@ class TestDhcpRpcCallback(base.BaseTestCase):
|
|||
def _test__port_action_good_action(self, action, port, expected_call):
|
||||
self.callbacks._port_action(self.plugin, mock.Mock(),
|
||||
port, action)
|
||||
self.plugin.assert_has_calls(expected_call)
|
||||
self.plugin.assert_has_calls([expected_call])
|
||||
|
||||
def test_port_action_create_port(self):
|
||||
self._test__port_action_good_action(
|
||||
|
@ -188,8 +188,8 @@ class TestDhcpRpcCallback(base.BaseTestCase):
|
|||
host='foo_host',
|
||||
port_id='foo_port_id',
|
||||
port=port)
|
||||
self.plugin.assert_has_calls(
|
||||
mock.call.update_port(mock.ANY, 'foo_port_id', expected_port))
|
||||
self.plugin.assert_has_calls([
|
||||
mock.call.update_port(mock.ANY, 'foo_port_id', expected_port)])
|
||||
|
||||
def test_release_dhcp_port(self):
|
||||
port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')])
|
||||
|
|
|
@ -279,6 +279,21 @@ class TestAllowedAddressPairs(AllowedAddressPairDBTestCase):
|
|||
res = req.get_response(self.api)
|
||||
self.assertEqual(409, res.status_int)
|
||||
|
||||
def test_update_with_none_and_own_mac_for_duplicate_ip(self):
|
||||
with self.network() as net:
|
||||
res = self._create_port(self.fmt, net['network']['id'])
|
||||
port = self.deserialize(self.fmt, res)
|
||||
mac_address = port['port']['mac_address']
|
||||
address_pairs = [{'ip_address': '10.0.0.1'},
|
||||
{'mac_address': mac_address,
|
||||
'ip_address': '10.0.0.1'}]
|
||||
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
|
||||
address_pairs}}
|
||||
req = self.new_update_request('ports', update_port,
|
||||
port['port']['id'])
|
||||
res = req.get_response(self.api)
|
||||
self.assertEqual(400, res.status_int)
|
||||
|
||||
def test_create_port_remove_allowed_address_pairs(self):
|
||||
with self.network() as net:
|
||||
address_pairs = [{'mac_address': '00:00:00:00:00:01',
|
||||
|
|
|
@ -40,6 +40,7 @@ from neutron.common import ipv6_utils
|
|||
from neutron.common import test_lib
|
||||
from neutron.common import utils
|
||||
from neutron import context
|
||||
from neutron.db import db_base_plugin_common
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import ipam_non_pluggable_backend as non_ipam
|
||||
from neutron.db import models_v2
|
||||
|
@ -1626,7 +1627,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
|
|||
self.assertEqual(res.status_int,
|
||||
webob.exc.HTTPClientError.code)
|
||||
|
||||
@mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
@mock.patch.object(non_ipam.IpamNonPluggableBackend,
|
||||
'_allocate_specific_ip')
|
||||
def test_requested_fixed_ip_address_v6_slaac_router_iface(
|
||||
self, alloc_specific_ip):
|
||||
|
@ -3812,6 +3813,9 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
|
|||
'dummy_key', 'dummy_key_table')
|
||||
mock.patch.object(orm.Session, 'add',
|
||||
side_effect=db_ref_err_for_ipalloc).start()
|
||||
mock.patch.object(non_ipam.IpamNonPluggableBackend,
|
||||
'_get_subnet',
|
||||
return_value=mock.Mock()).start()
|
||||
# Add an IPv6 auto-address subnet to the network
|
||||
v6_subnet = self._make_subnet(self.fmt, network, 'fe80::1',
|
||||
'fe80::/64', ip_version=6,
|
||||
|
@ -5374,7 +5378,7 @@ class TestNeutronDbPluginV2(base.BaseTestCase):
|
|||
context.session.query.side_effect = return_queries_side_effect
|
||||
subnets = [mock.MagicMock()]
|
||||
|
||||
db_base_plugin_v2.NeutronDbPluginV2._rebuild_availability_ranges(
|
||||
non_ipam.IpamNonPluggableBackend._rebuild_availability_ranges(
|
||||
context, subnets)
|
||||
|
||||
actual = [[args[0].allocation_pool_id,
|
||||
|
@ -5437,15 +5441,18 @@ class TestNeutronDbPluginV2(base.BaseTestCase):
|
|||
expected)
|
||||
|
||||
def _test__allocate_ips_for_port(self, subnets, port, expected):
|
||||
# this test is incompatible with pluggable ipam, because subnets
|
||||
# were not actually created, so no ipam_subnet exists
|
||||
cfg.CONF.set_override("ipam_driver", None)
|
||||
plugin = db_base_plugin_v2.NeutronDbPluginV2()
|
||||
with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
with mock.patch.object(db_base_plugin_common.DbBasePluginCommon,
|
||||
'_get_subnets') as get_subnets:
|
||||
with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
with mock.patch.object(non_ipam.IpamNonPluggableBackend,
|
||||
'_check_unique_ip') as check_unique:
|
||||
context = mock.Mock()
|
||||
get_subnets.return_value = subnets
|
||||
check_unique.return_value = True
|
||||
actual = plugin._allocate_ips_for_port(context, port)
|
||||
actual = plugin.ipam._allocate_ips_for_port(context, port)
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test__allocate_ips_for_port_2_slaac_subnets(self):
|
||||
|
@ -5537,7 +5544,7 @@ class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase,
|
|||
ip_version=4)]
|
||||
new_subnetpool_id = None
|
||||
self.assertRaises(n_exc.NetworkSubnetPoolAffinityError,
|
||||
self.plugin._validate_network_subnetpools,
|
||||
self.plugin.ipam._validate_network_subnetpools,
|
||||
network, new_subnetpool_id, 4)
|
||||
|
||||
|
||||
|
|
|
@ -1292,6 +1292,18 @@ class L3NatTestCaseBase(L3NatTestCaseMixin):
|
|||
expected_code=err_code,
|
||||
tenant_id='bad_tenant')
|
||||
|
||||
def test_router_add_interface_port_without_ips(self):
|
||||
with self.network() as network, self.router() as r:
|
||||
# Create a router port without ips
|
||||
p = self._make_port(self.fmt, network['network']['id'],
|
||||
device_owner=l3_constants.DEVICE_OWNER_ROUTER_INTF)
|
||||
err_code = exc.HTTPBadRequest.code
|
||||
self._router_interface_action('add',
|
||||
r['router']['id'],
|
||||
None,
|
||||
p['port']['id'],
|
||||
expected_code=err_code)
|
||||
|
||||
def test_router_add_interface_dup_subnet1_returns_400(self):
|
||||
with self.router() as r:
|
||||
with self.subnet() as s:
|
||||
|
|
|
@ -19,9 +19,9 @@ from neutron.api.v2 import attributes
|
|||
from neutron.common import constants
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron import context
|
||||
from neutron import ipam
|
||||
from neutron.ipam.drivers.neutrondb_ipam import driver
|
||||
from neutron.ipam import exceptions as ipam_exc
|
||||
from neutron.ipam import requests as ipam_req
|
||||
from neutron import manager
|
||||
|
||||
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin
|
||||
|
@ -102,7 +102,7 @@ class TestNeutronDbIpamPool(testlib_api.SqlTestCase,
|
|||
cidr = '10.0.0.0/24'
|
||||
allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'),
|
||||
netaddr.IPRange('10.0.0.200', '10.0.0.250')]
|
||||
subnet_req = ipam.SpecificSubnetRequest(
|
||||
subnet_req = ipam_req.SpecificSubnetRequest(
|
||||
self._tenant_id,
|
||||
None,
|
||||
cidr,
|
||||
|
@ -118,7 +118,7 @@ class TestNeutronDbIpamPool(testlib_api.SqlTestCase,
|
|||
def _prepare_specific_subnet_request(self, cidr):
|
||||
subnet = self._create_subnet(
|
||||
self.plugin, self.ctx, self.net_id, cidr)
|
||||
subnet_req = ipam.SpecificSubnetRequest(
|
||||
subnet_req = ipam_req.SpecificSubnetRequest(
|
||||
self._tenant_id,
|
||||
subnet['id'],
|
||||
cidr,
|
||||
|
@ -138,7 +138,8 @@ class TestNeutronDbIpamPool(testlib_api.SqlTestCase,
|
|||
self.assertRaises(
|
||||
ipam_exc.InvalidSubnetRequestType,
|
||||
self.ipam_pool.allocate_subnet,
|
||||
ipam.AnySubnetRequest(self._tenant_id, 'meh', constants.IPv4, 24))
|
||||
ipam_req.AnySubnetRequest(self._tenant_id, 'meh',
|
||||
constants.IPv4, 24))
|
||||
|
||||
def test_update_subnet_pools(self):
|
||||
cidr = '10.0.0.0/24'
|
||||
|
@ -147,7 +148,7 @@ class TestNeutronDbIpamPool(testlib_api.SqlTestCase,
|
|||
ipam_subnet.associate_neutron_subnet(subnet['id'])
|
||||
allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'),
|
||||
netaddr.IPRange('10.0.0.200', '10.0.0.250')]
|
||||
update_subnet_req = ipam.SpecificSubnetRequest(
|
||||
update_subnet_req = ipam_req.SpecificSubnetRequest(
|
||||
self._tenant_id,
|
||||
subnet['id'],
|
||||
cidr,
|
||||
|
@ -206,7 +207,7 @@ class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
|
|||
allocation_pool_ranges = [netaddr.IPRange(
|
||||
pool['start'], pool['end']) for pool in
|
||||
subnet['allocation_pools']]
|
||||
subnet_req = ipam.SpecificSubnetRequest(
|
||||
subnet_req = ipam_req.SpecificSubnetRequest(
|
||||
tenant_id,
|
||||
subnet['id'],
|
||||
cidr,
|
||||
|
@ -312,7 +313,7 @@ class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
|
|||
cidr = '10.0.0.0/24'
|
||||
subnet = self._create_subnet(
|
||||
self.plugin, self.ctx, self.net_id, cidr)
|
||||
subnet_req = ipam.SpecificSubnetRequest(
|
||||
subnet_req = ipam_req.SpecificSubnetRequest(
|
||||
'tenant_id', subnet, cidr, gateway_ip=subnet['gateway_ip'])
|
||||
ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req)
|
||||
with self.ctx.session.begin():
|
||||
|
@ -333,7 +334,7 @@ class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
|
|||
|
||||
def test_allocate_any_v4_address_succeeds(self):
|
||||
ip_address = self._allocate_address(
|
||||
'10.0.0.0/24', 4, ipam.AnyAddressRequest)
|
||||
'10.0.0.0/24', 4, ipam_req.AnyAddressRequest)
|
||||
# As the DB IPAM driver allocation logic is strictly sequential, we can
|
||||
# expect this test to allocate the .2 address as .1 is used by default
|
||||
# as subnet gateway
|
||||
|
@ -341,7 +342,7 @@ class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
|
|||
|
||||
def test_allocate_any_v6_address_succeeds(self):
|
||||
ip_address = self._allocate_address(
|
||||
'fde3:abcd:4321:1::/64', 6, ipam.AnyAddressRequest)
|
||||
'fde3:abcd:4321:1::/64', 6, ipam_req.AnyAddressRequest)
|
||||
# As the DB IPAM driver allocation logic is strictly sequential, we can
|
||||
# expect this test to allocate the .2 address as .1 is used by default
|
||||
# as subnet gateway
|
||||
|
@ -349,32 +350,32 @@ class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
|
|||
|
||||
def test_allocate_specific_v4_address_succeeds(self):
|
||||
ip_address = self._allocate_address(
|
||||
'10.0.0.0/24', 4, ipam.SpecificAddressRequest('10.0.0.33'))
|
||||
'10.0.0.0/24', 4, ipam_req.SpecificAddressRequest('10.0.0.33'))
|
||||
self.assertEqual('10.0.0.33', ip_address)
|
||||
|
||||
def test_allocate_specific_v6_address_succeeds(self):
|
||||
ip_address = self._allocate_address(
|
||||
'fde3:abcd:4321:1::/64', 6,
|
||||
ipam.SpecificAddressRequest('fde3:abcd:4321:1::33'))
|
||||
ipam_req.SpecificAddressRequest('fde3:abcd:4321:1::33'))
|
||||
self.assertEqual('fde3:abcd:4321:1::33', ip_address)
|
||||
|
||||
def test_allocate_specific_v4_address_out_of_range_fails(self):
|
||||
self.assertRaises(ipam_exc.InvalidIpForSubnet,
|
||||
self._allocate_address,
|
||||
'10.0.0.0/24', 4,
|
||||
ipam.SpecificAddressRequest('192.168.0.1'))
|
||||
ipam_req.SpecificAddressRequest('192.168.0.1'))
|
||||
|
||||
def test_allocate_specific_v6_address_out_of_range_fails(self):
|
||||
self.assertRaises(ipam_exc.InvalidIpForSubnet,
|
||||
self._allocate_address,
|
||||
'fde3:abcd:4321:1::/64', 6,
|
||||
ipam.SpecificAddressRequest(
|
||||
ipam_req.SpecificAddressRequest(
|
||||
'fde3:abcd:eeee:1::33'))
|
||||
|
||||
def test_allocate_specific_address_in_use_fails(self):
|
||||
ipam_subnet = self._create_and_allocate_ipam_subnet(
|
||||
'fde3:abcd:4321:1::/64', ip_version=6)[0]
|
||||
addr_req = ipam.SpecificAddressRequest('fde3:abcd:4321:1::33')
|
||||
addr_req = ipam_req.SpecificAddressRequest('fde3:abcd:4321:1::33')
|
||||
ipam_subnet.allocate(addr_req)
|
||||
self.assertRaises(ipam_exc.IpAddressAlreadyAllocated,
|
||||
ipam_subnet.allocate,
|
||||
|
@ -384,16 +385,16 @@ class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
|
|||
# Same as above, the ranges will be recalculated always
|
||||
ipam_subnet = self._create_and_allocate_ipam_subnet(
|
||||
'192.168.0.0/30', ip_version=4)[0]
|
||||
ipam_subnet.allocate(ipam.AnyAddressRequest)
|
||||
ipam_subnet.allocate(ipam_req.AnyAddressRequest)
|
||||
# The second address generation request on a /30 for v4 net must fail
|
||||
self.assertRaises(ipam_exc.IpAddressGenerationFailure,
|
||||
ipam_subnet.allocate,
|
||||
ipam.AnyAddressRequest)
|
||||
ipam_req.AnyAddressRequest)
|
||||
|
||||
def _test_deallocate_address(self, cidr, ip_version):
|
||||
ipam_subnet = self._create_and_allocate_ipam_subnet(
|
||||
cidr, ip_version=ip_version)[0]
|
||||
ip_address = ipam_subnet.allocate(ipam.AnyAddressRequest)
|
||||
ip_address = ipam_subnet.allocate(ipam_req.AnyAddressRequest)
|
||||
ipam_subnet.deallocate(ip_address)
|
||||
|
||||
def test_deallocate_v4_address(self):
|
||||
|
@ -416,14 +417,14 @@ class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
|
|||
pass
|
||||
|
||||
def _test_allocate_subnet(self, subnet_id):
|
||||
subnet_req = ipam.SpecificSubnetRequest(
|
||||
subnet_req = ipam_req.SpecificSubnetRequest(
|
||||
'tenant_id', subnet_id, '192.168.0.0/24')
|
||||
return self.ipam_pool.allocate_subnet(subnet_req)
|
||||
|
||||
def test_allocate_subnet_for_non_existent_subnet_pass(self):
|
||||
# This test should pass because neutron subnet is not checked
|
||||
# until associate neutron subnet step
|
||||
subnet_req = ipam.SpecificSubnetRequest(
|
||||
subnet_req = ipam_req.SpecificSubnetRequest(
|
||||
'tenant_id', 'meh', '192.168.0.0/24')
|
||||
self.ipam_pool.allocate_subnet(subnet_req)
|
||||
|
||||
|
@ -434,7 +435,7 @@ class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase,
|
|||
self.assertEqual(subnet['id'], details.subnet_id)
|
||||
|
||||
def test_associate_non_existing_neutron_subnet_fails(self):
|
||||
subnet_req = ipam.SpecificSubnetRequest(
|
||||
subnet_req = ipam_req.SpecificSubnetRequest(
|
||||
'tenant_id', 'meh', '192.168.0.0/24')
|
||||
ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req)
|
||||
self.assertRaises(n_exc.SubnetNotFound,
|
||||
|
|
88
neutron/tests/unit/test_ipam.py → neutron/tests/unit/ipam/test_requests.py
Executable file → Normal file
88
neutron/tests/unit/test_ipam.py → neutron/tests/unit/ipam/test_requests.py
Executable file → Normal file
|
@ -20,9 +20,9 @@ from oslo_utils import uuidutils
|
|||
from neutron.common import constants
|
||||
from neutron.common import ipv6_utils
|
||||
from neutron import context
|
||||
from neutron import ipam
|
||||
from neutron.ipam import driver
|
||||
from neutron.ipam import exceptions as ipam_exc
|
||||
from neutron.ipam import requests as ipam_req
|
||||
from neutron import manager
|
||||
from neutron.tests import base
|
||||
from neutron.tests.unit.ipam import fake_driver
|
||||
|
@ -41,7 +41,7 @@ class IpamSubnetRequestTestCase(base.BaseTestCase):
|
|||
class TestIpamSubnetRequests(IpamSubnetRequestTestCase):
|
||||
|
||||
def test_subnet_request(self):
|
||||
pool = ipam.SubnetRequest(self.tenant_id,
|
||||
pool = ipam_req.SubnetRequest(self.tenant_id,
|
||||
self.subnet_id)
|
||||
self.assertEqual(self.tenant_id, pool.tenant_id)
|
||||
self.assertEqual(self.subnet_id, pool.subnet_id)
|
||||
|
@ -49,14 +49,14 @@ class TestIpamSubnetRequests(IpamSubnetRequestTestCase):
|
|||
self.assertEqual(None, pool.allocation_pools)
|
||||
|
||||
def test_subnet_request_gateway(self):
|
||||
request = ipam.SubnetRequest(self.tenant_id,
|
||||
request = ipam_req.SubnetRequest(self.tenant_id,
|
||||
self.subnet_id,
|
||||
gateway_ip='1.2.3.1')
|
||||
self.assertEqual('1.2.3.1', str(request.gateway_ip))
|
||||
|
||||
def test_subnet_request_bad_gateway(self):
|
||||
self.assertRaises(netaddr.core.AddrFormatError,
|
||||
ipam.SubnetRequest,
|
||||
ipam_req.SubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
gateway_ip='1.2.3.')
|
||||
|
@ -64,21 +64,21 @@ class TestIpamSubnetRequests(IpamSubnetRequestTestCase):
|
|||
def test_subnet_request_with_range(self):
|
||||
allocation_pools = [netaddr.IPRange('1.2.3.4', '1.2.3.5'),
|
||||
netaddr.IPRange('1.2.3.7', '1.2.3.9')]
|
||||
request = ipam.SubnetRequest(self.tenant_id,
|
||||
request = ipam_req.SubnetRequest(self.tenant_id,
|
||||
self.subnet_id,
|
||||
allocation_pools=allocation_pools)
|
||||
self.assertEqual(allocation_pools, request.allocation_pools)
|
||||
|
||||
def test_subnet_request_range_not_list(self):
|
||||
self.assertRaises(TypeError,
|
||||
ipam.SubnetRequest,
|
||||
ipam_req.SubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
allocation_pools=1)
|
||||
|
||||
def test_subnet_request_bad_range(self):
|
||||
self.assertRaises(TypeError,
|
||||
ipam.SubnetRequest,
|
||||
ipam_req.SubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
allocation_pools=['1.2.3.4'])
|
||||
|
@ -87,7 +87,7 @@ class TestIpamSubnetRequests(IpamSubnetRequestTestCase):
|
|||
pools = [netaddr.IPRange('0.0.0.1', '0.0.0.2'),
|
||||
netaddr.IPRange('::1', '::2')]
|
||||
self.assertRaises(ValueError,
|
||||
ipam.SubnetRequest,
|
||||
ipam_req.SubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
allocation_pools=pools)
|
||||
|
@ -96,7 +96,7 @@ class TestIpamSubnetRequests(IpamSubnetRequestTestCase):
|
|||
pools = [netaddr.IPRange('0.0.0.10', '0.0.0.20'),
|
||||
netaddr.IPRange('0.0.0.8', '0.0.0.10')]
|
||||
self.assertRaises(ValueError,
|
||||
ipam.SubnetRequest,
|
||||
ipam_req.SubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
allocation_pools=pools)
|
||||
|
@ -105,7 +105,7 @@ class TestIpamSubnetRequests(IpamSubnetRequestTestCase):
|
|||
class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
|
||||
|
||||
def test_subnet_request(self):
|
||||
request = ipam.AnySubnetRequest(self.tenant_id,
|
||||
request = ipam_req.AnySubnetRequest(self.tenant_id,
|
||||
self.subnet_id,
|
||||
constants.IPv4,
|
||||
24,
|
||||
|
@ -114,7 +114,7 @@ class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
|
|||
|
||||
def test_subnet_request_bad_prefix_type(self):
|
||||
self.assertRaises(netaddr.core.AddrFormatError,
|
||||
ipam.AnySubnetRequest,
|
||||
ipam_req.AnySubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
constants.IPv4,
|
||||
|
@ -122,13 +122,13 @@ class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
|
|||
|
||||
def test_subnet_request_bad_prefix(self):
|
||||
self.assertRaises(netaddr.core.AddrFormatError,
|
||||
ipam.AnySubnetRequest,
|
||||
ipam_req.AnySubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
constants.IPv4,
|
||||
33)
|
||||
self.assertRaises(netaddr.core.AddrFormatError,
|
||||
ipam.AnySubnetRequest,
|
||||
ipam_req.AnySubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
constants.IPv6,
|
||||
|
@ -136,7 +136,7 @@ class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
|
|||
|
||||
def test_subnet_request_bad_gateway(self):
|
||||
self.assertRaises(ValueError,
|
||||
ipam.AnySubnetRequest,
|
||||
ipam_req.AnySubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
constants.IPv6,
|
||||
|
@ -146,7 +146,7 @@ class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
|
|||
def test_subnet_request_allocation_pool_wrong_version(self):
|
||||
pools = [netaddr.IPRange('0.0.0.4', '0.0.0.5')]
|
||||
self.assertRaises(ValueError,
|
||||
ipam.AnySubnetRequest,
|
||||
ipam_req.AnySubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
constants.IPv6,
|
||||
|
@ -156,7 +156,7 @@ class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
|
|||
def test_subnet_request_allocation_pool_not_in_net(self):
|
||||
pools = [netaddr.IPRange('0.0.0.64', '0.0.0.128')]
|
||||
self.assertRaises(ValueError,
|
||||
ipam.AnySubnetRequest,
|
||||
ipam_req.AnySubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
constants.IPv4,
|
||||
|
@ -167,7 +167,7 @@ class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
|
|||
class TestIpamSpecificSubnetRequest(IpamSubnetRequestTestCase):
|
||||
|
||||
def test_subnet_request(self):
|
||||
request = ipam.SpecificSubnetRequest(self.tenant_id,
|
||||
request = ipam_req.SpecificSubnetRequest(self.tenant_id,
|
||||
self.subnet_id,
|
||||
'1.2.3.0/24',
|
||||
gateway_ip='1.2.3.1')
|
||||
|
@ -177,7 +177,7 @@ class TestIpamSpecificSubnetRequest(IpamSubnetRequestTestCase):
|
|||
|
||||
def test_subnet_request_bad_gateway(self):
|
||||
self.assertRaises(ValueError,
|
||||
ipam.SpecificSubnetRequest,
|
||||
ipam_req.SpecificSubnetRequest,
|
||||
self.tenant_id,
|
||||
self.subnet_id,
|
||||
'2001::1',
|
||||
|
@ -189,28 +189,28 @@ class TestAddressRequest(base.BaseTestCase):
|
|||
# This class doesn't test much. At least running through all of the
|
||||
# constructors may shake out some trivial bugs.
|
||||
|
||||
EUI64 = ipam.AutomaticAddressRequest.EUI64
|
||||
EUI64 = ipam_req.AutomaticAddressRequest.EUI64
|
||||
|
||||
def setUp(self):
|
||||
super(TestAddressRequest, self).setUp()
|
||||
|
||||
def test_specific_address_ipv6(self):
|
||||
request = ipam.SpecificAddressRequest('2000::45')
|
||||
request = ipam_req.SpecificAddressRequest('2000::45')
|
||||
self.assertEqual(netaddr.IPAddress('2000::45'), request.address)
|
||||
|
||||
def test_specific_address_ipv4(self):
|
||||
request = ipam.SpecificAddressRequest('1.2.3.32')
|
||||
request = ipam_req.SpecificAddressRequest('1.2.3.32')
|
||||
self.assertEqual(netaddr.IPAddress('1.2.3.32'), request.address)
|
||||
|
||||
def test_any_address(self):
|
||||
ipam.AnyAddressRequest()
|
||||
ipam_req.AnyAddressRequest()
|
||||
|
||||
def test_automatic_address_request_eui64(self):
|
||||
subnet_cidr = '2607:f0d0:1002:51::/64'
|
||||
port_mac = 'aa:bb:cc:dd:ee:ff'
|
||||
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
|
||||
port_mac))
|
||||
request = ipam.AutomaticAddressRequest(
|
||||
request = ipam_req.AutomaticAddressRequest(
|
||||
address_type=self.EUI64,
|
||||
prefix=subnet_cidr,
|
||||
mac=port_mac)
|
||||
|
@ -218,18 +218,18 @@ class TestAddressRequest(base.BaseTestCase):
|
|||
|
||||
def test_automatic_address_request_invalid_address_type_raises(self):
|
||||
self.assertRaises(ipam_exc.InvalidAddressType,
|
||||
ipam.AutomaticAddressRequest,
|
||||
ipam_req.AutomaticAddressRequest,
|
||||
address_type='kaboom')
|
||||
|
||||
def test_automatic_address_request_eui64_no_mac_raises(self):
|
||||
self.assertRaises(ipam_exc.AddressCalculationFailure,
|
||||
ipam.AutomaticAddressRequest,
|
||||
ipam_req.AutomaticAddressRequest,
|
||||
address_type=self.EUI64,
|
||||
prefix='meh')
|
||||
|
||||
def test_automatic_address_request_eui64_alien_param_raises(self):
|
||||
self.assertRaises(ipam_exc.AddressCalculationFailure,
|
||||
ipam.AutomaticAddressRequest,
|
||||
ipam_req.AutomaticAddressRequest,
|
||||
address_type=self.EUI64,
|
||||
mac='meh',
|
||||
alien='et',
|
||||
|
@ -265,7 +265,7 @@ class TestIpamDriverLoader(base.BaseTestCase):
|
|||
|
||||
def test_ipam_driver_raises_import_error(self):
|
||||
self._verify_import_error_is_generated(
|
||||
'neutron.tests.unit.ipam.SomeNonExistentClass')
|
||||
'neutron.tests.unit.ipam_req.SomeNonExistentClass')
|
||||
|
||||
def test_ipam_driver_raises_import_error_for_none(self):
|
||||
self._verify_import_error_is_generated(None)
|
||||
|
@ -292,18 +292,18 @@ class TestAddressRequestFactory(base.BaseTestCase):
|
|||
def test_specific_address_request_is_loaded(self):
|
||||
for address in ('10.12.0.15', 'fffe::1'):
|
||||
self.assertIsInstance(
|
||||
ipam.AddressRequestFactory.get_request(None,
|
||||
None,
|
||||
address),
|
||||
ipam.SpecificAddressRequest)
|
||||
ipam_req.AddressRequestFactory.get_request(None,
|
||||
None,
|
||||
address),
|
||||
ipam_req.SpecificAddressRequest)
|
||||
|
||||
def test_any_address_request_is_loaded(self):
|
||||
for addr in [None, '']:
|
||||
self.assertIsInstance(
|
||||
ipam.AddressRequestFactory.get_request(None,
|
||||
ipam_req.AddressRequestFactory.get_request(None,
|
||||
None,
|
||||
addr),
|
||||
ipam.AnyAddressRequest)
|
||||
ipam_req.AnyAddressRequest)
|
||||
|
||||
|
||||
class TestSubnetRequestFactory(IpamSubnetRequestTestCase):
|
||||
|
@ -314,6 +314,8 @@ class TestSubnetRequestFactory(IpamSubnetRequestTestCase):
|
|||
'prefixlen': prefixlen,
|
||||
'ip_version': ip_version,
|
||||
'tenant_id': self.tenant_id,
|
||||
'gateway_ip': None,
|
||||
'allocation_pools': None,
|
||||
'id': id or self.subnet_id}
|
||||
subnetpool = {'ip_version': ip_version,
|
||||
'default_prefixlen': prefixlen}
|
||||
|
@ -328,34 +330,34 @@ class TestSubnetRequestFactory(IpamSubnetRequestTestCase):
|
|||
for address in addresses:
|
||||
subnet, subnetpool = self._build_subnet_dict(cidr=address)
|
||||
self.assertIsInstance(
|
||||
ipam.SubnetRequestFactory.get_request(None,
|
||||
ipam_req.SubnetRequestFactory.get_request(None,
|
||||
subnet,
|
||||
subnetpool),
|
||||
ipam.SpecificSubnetRequest)
|
||||
ipam_req.SpecificSubnetRequest)
|
||||
|
||||
def test_any_address_request_is_loaded_for_ipv4(self):
|
||||
subnet, subnetpool = self._build_subnet_dict(cidr=None, ip_version=4)
|
||||
self.assertIsInstance(
|
||||
ipam.SubnetRequestFactory.get_request(None,
|
||||
ipam_req.SubnetRequestFactory.get_request(None,
|
||||
subnet,
|
||||
subnetpool),
|
||||
ipam.AnySubnetRequest)
|
||||
ipam_req.AnySubnetRequest)
|
||||
|
||||
def test_any_address_request_is_loaded_for_ipv6(self):
|
||||
subnet, subnetpool = self._build_subnet_dict(cidr=None, ip_version=6)
|
||||
self.assertIsInstance(
|
||||
ipam.SubnetRequestFactory.get_request(None,
|
||||
ipam_req.SubnetRequestFactory.get_request(None,
|
||||
subnet,
|
||||
subnetpool),
|
||||
ipam.AnySubnetRequest)
|
||||
ipam_req.AnySubnetRequest)
|
||||
|
||||
def test_args_are_passed_to_specific_request(self):
|
||||
subnet, subnetpool = self._build_subnet_dict()
|
||||
request = ipam.SubnetRequestFactory.get_request(None,
|
||||
request = ipam_req.SubnetRequestFactory.get_request(None,
|
||||
subnet,
|
||||
subnetpool)
|
||||
self.assertIsInstance(request,
|
||||
ipam.SpecificSubnetRequest)
|
||||
ipam_req.SpecificSubnetRequest)
|
||||
self.assertEqual(self.tenant_id, request.tenant_id)
|
||||
self.assertEqual(self.subnet_id, request.subnet_id)
|
||||
self.assertEqual(None, request.gateway_ip)
|
||||
|
@ -372,9 +374,9 @@ class TestGetRequestFactory(base.BaseTestCase):
|
|||
def test_get_subnet_request_factory(self):
|
||||
self.assertEqual(
|
||||
self.driver.get_subnet_request_factory(),
|
||||
ipam.SubnetRequestFactory)
|
||||
ipam_req.SubnetRequestFactory)
|
||||
|
||||
def test_get_address_request_factory(self):
|
||||
self.assertEqual(
|
||||
self.driver.get_address_request_factory(),
|
||||
ipam.AddressRequestFactory)
|
||||
ipam_req.AddressRequestFactory)
|
|
@ -21,7 +21,7 @@ from neutron.api.v2 import attributes
|
|||
from neutron.common import constants
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron import context
|
||||
import neutron.ipam as ipam
|
||||
from neutron.ipam import requests as ipam_req
|
||||
from neutron.ipam import subnet_alloc
|
||||
from neutron import manager
|
||||
from neutron.tests.unit.db import test_db_base_plugin_v2
|
||||
|
@ -64,7 +64,7 @@ class TestSubnetAllocation(testlib_api.SqlTestCase):
|
|||
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
|
||||
with self.ctx.session.begin(subtransactions=True):
|
||||
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
|
||||
req = ipam.AnySubnetRequest(self._tenant_id,
|
||||
req = ipam_req.AnySubnetRequest(self._tenant_id,
|
||||
uuidutils.generate_uuid(),
|
||||
constants.IPv4, 21)
|
||||
res = sa.allocate_subnet(req)
|
||||
|
@ -81,7 +81,7 @@ class TestSubnetAllocation(testlib_api.SqlTestCase):
|
|||
with self.ctx.session.begin(subtransactions=True):
|
||||
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
|
||||
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
|
||||
req = ipam.SpecificSubnetRequest(self._tenant_id,
|
||||
req = ipam_req.SpecificSubnetRequest(self._tenant_id,
|
||||
uuidutils.generate_uuid(),
|
||||
'10.1.2.0/24')
|
||||
res = sa.allocate_subnet(req)
|
||||
|
@ -96,7 +96,7 @@ class TestSubnetAllocation(testlib_api.SqlTestCase):
|
|||
21, 4)
|
||||
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
|
||||
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
|
||||
req = ipam.AnySubnetRequest(self._tenant_id,
|
||||
req = ipam_req.AnySubnetRequest(self._tenant_id,
|
||||
uuidutils.generate_uuid(),
|
||||
constants.IPv4,
|
||||
21)
|
||||
|
@ -109,7 +109,7 @@ class TestSubnetAllocation(testlib_api.SqlTestCase):
|
|||
21, 4)
|
||||
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
|
||||
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
|
||||
req = ipam.SpecificSubnetRequest(self._tenant_id,
|
||||
req = ipam_req.SpecificSubnetRequest(self._tenant_id,
|
||||
uuidutils.generate_uuid(),
|
||||
'10.1.0.0/21')
|
||||
self.assertRaises(n_exc.SubnetAllocationError,
|
||||
|
@ -122,7 +122,7 @@ class TestSubnetAllocation(testlib_api.SqlTestCase):
|
|||
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
|
||||
with self.ctx.session.begin(subtransactions=True):
|
||||
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
|
||||
req = ipam.AnySubnetRequest(self._tenant_id,
|
||||
req = ipam_req.AnySubnetRequest(self._tenant_id,
|
||||
uuidutils.generate_uuid(),
|
||||
constants.IPv4, 21)
|
||||
res = sa.allocate_subnet(req)
|
||||
|
@ -137,7 +137,7 @@ class TestSubnetAllocation(testlib_api.SqlTestCase):
|
|||
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
|
||||
with self.ctx.session.begin(subtransactions=True):
|
||||
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
|
||||
req = ipam.SpecificSubnetRequest(self._tenant_id,
|
||||
req = ipam_req.SpecificSubnetRequest(self._tenant_id,
|
||||
uuidutils.generate_uuid(),
|
||||
'10.1.2.0/24',
|
||||
gateway_ip='10.1.2.254')
|
||||
|
@ -154,10 +154,10 @@ class TestSubnetAllocation(testlib_api.SqlTestCase):
|
|||
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
|
||||
with self.ctx.session.begin(subtransactions=True):
|
||||
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
|
||||
req = ipam.SpecificSubnetRequest(self._tenant_id,
|
||||
uuidutils.generate_uuid(),
|
||||
'2210::/64',
|
||||
'2210::ffff:ffff:ffff:ffff')
|
||||
req = ipam_req.SpecificSubnetRequest(self._tenant_id,
|
||||
uuidutils.generate_uuid(),
|
||||
'2210::/64',
|
||||
'2210::ffff:ffff:ffff:ffff')
|
||||
res = sa.allocate_subnet(req)
|
||||
detail = res.get_details()
|
||||
self.assertEqual(detail.gateway_ip,
|
||||
|
@ -177,7 +177,7 @@ class TestSubnetAllocation(testlib_api.SqlTestCase):
|
|||
48, 6, default_quota=1)
|
||||
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
|
||||
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
|
||||
req = ipam.SpecificSubnetRequest(self._tenant_id,
|
||||
req = ipam_req.SpecificSubnetRequest(self._tenant_id,
|
||||
uuidutils.generate_uuid(),
|
||||
'fe80::/63')
|
||||
self.assertRaises(n_exc.SubnetPoolQuotaExceeded,
|
||||
|
|
|
@ -254,12 +254,92 @@ class AristaDriverTestCase(testlib_api.SqlTestCase):
|
|||
|
||||
mechanism_arista.db_lib.assert_has_calls(expected_calls)
|
||||
|
||||
def test_update_port_precommit(self):
|
||||
tenant_id = 'ten-1'
|
||||
network_id = 'net1-id'
|
||||
segmentation_id = 1001
|
||||
vm_id = 'vm1'
|
||||
|
||||
network_context = self._get_network_context(tenant_id,
|
||||
network_id,
|
||||
segmentation_id,
|
||||
False)
|
||||
|
||||
port_context = self._get_port_context(tenant_id,
|
||||
network_id,
|
||||
vm_id,
|
||||
network_context)
|
||||
host_id = port_context.current['binding:host_id']
|
||||
port_context.original['binding:host_id'] = 'ubuntu0'
|
||||
port_id = port_context.current['id']
|
||||
self.drv.update_port_precommit(port_context)
|
||||
|
||||
expected_calls = [
|
||||
mock.call.update_vm_host(vm_id, host_id, port_id,
|
||||
network_id, tenant_id)
|
||||
]
|
||||
|
||||
mechanism_arista.db_lib.assert_has_calls(expected_calls)
|
||||
|
||||
def test_update_port_postcommit(self):
|
||||
tenant_id = 'ten-1'
|
||||
network_id = 'net1-id'
|
||||
segmentation_id = 1001
|
||||
vm_id = 'vm1'
|
||||
|
||||
network_context = self._get_network_context(tenant_id,
|
||||
network_id,
|
||||
segmentation_id,
|
||||
False)
|
||||
port_context = self._get_port_context(tenant_id,
|
||||
network_id,
|
||||
vm_id,
|
||||
network_context)
|
||||
|
||||
mechanism_arista.db_lib.is_vm_provisioned.return_value = True
|
||||
mechanism_arista.db_lib.is_network_provisioned.return_value = True
|
||||
mechanism_arista.db_lib.get_shared_network_owner_id.return_value = 1
|
||||
mechanism_arista.db_lib.get_segmentation_id.return_value = 1001
|
||||
mechanism_arista.db_lib.num_nets_provisioned.return_value = 1
|
||||
mechanism_arista.db_lib.num_vms_provisioned.return_value = 1
|
||||
|
||||
port = port_context.current
|
||||
device_id = port['device_id']
|
||||
device_owner = port['device_owner']
|
||||
host_id = port['binding:host_id']
|
||||
orig_host_id = 'ubuntu0'
|
||||
port_context.original['binding:host_id'] = orig_host_id
|
||||
port_id = port['id']
|
||||
port_name = port['name']
|
||||
|
||||
self.drv.update_port_postcommit(port_context)
|
||||
|
||||
expected_calls = [
|
||||
mock.call.NeutronNets(),
|
||||
mock.call.get_segmentation_id(tenant_id, network_id),
|
||||
mock.call.is_vm_provisioned(device_id, host_id, port_id,
|
||||
network_id, tenant_id),
|
||||
mock.call.is_network_provisioned(tenant_id, network_id,
|
||||
segmentation_id),
|
||||
mock.call.is_network_provisioned(tenant_id, network_id),
|
||||
mock.call.unplug_host_from_network(device_id, orig_host_id,
|
||||
port_id, network_id, tenant_id),
|
||||
mock.call.num_nets_provisioned(tenant_id),
|
||||
mock.call.num_vms_provisioned(tenant_id),
|
||||
mock.call.plug_port_into_network(device_id, host_id, port_id,
|
||||
network_id, tenant_id,
|
||||
port_name, device_owner)
|
||||
]
|
||||
|
||||
mechanism_arista.db_lib.assert_has_calls(expected_calls)
|
||||
|
||||
def _get_network_context(self, tenant_id, net_id, seg_id, shared):
|
||||
network = {'id': net_id,
|
||||
'tenant_id': tenant_id,
|
||||
'name': 'test-net',
|
||||
'shared': shared}
|
||||
network_segments = [{'segmentation_id': seg_id}]
|
||||
network_segments = [{'segmentation_id': seg_id,
|
||||
'network_type': 'vlan'}]
|
||||
return FakeNetworkContext(network, network_segments, network)
|
||||
|
||||
def _get_port_context(self, tenant_id, net_id, vm_id, network):
|
||||
|
@ -271,7 +351,7 @@ class AristaDriverTestCase(testlib_api.SqlTestCase):
|
|||
'id': 101,
|
||||
'network_id': net_id
|
||||
}
|
||||
return FakePortContext(port, port, network)
|
||||
return FakePortContext(port, dict(port), network)
|
||||
|
||||
|
||||
class fake_keystone_info_class(object):
|
||||
|
|
|
@ -1577,6 +1577,8 @@ class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase):
|
|||
'get_service_plugins',
|
||||
return_value={'L3_ROUTER_NAT': l3plugin}):
|
||||
plugin = self._create_plugin_for_create_update_port(mock.Mock())
|
||||
# Set backend manually here since __init__ was mocked
|
||||
plugin.set_ipam_backend()
|
||||
# deleting the port will call registry.notify, which will
|
||||
# run the transaction balancing function defined in this test
|
||||
plugin.delete_port(self.context, 'fake_id')
|
||||
|
|
|
@ -63,7 +63,7 @@ class TestMeteringOperations(base.BaseTestCase):
|
|||
self.metering_rpc_patch = mock.patch(metering_rpc, return_value=[])
|
||||
self.metering_rpc_patch.start()
|
||||
|
||||
self.driver_patch = mock.patch(self.noop_driver, autospec=True)
|
||||
self.driver_patch = mock.patch(self.noop_driver, spec=True)
|
||||
self.driver_patch.start()
|
||||
|
||||
loopingcall_patch = mock.patch(
|
||||
|
|
1
tox.ini
1
tox.ini
|
@ -195,6 +195,7 @@ commands = python -m testtools.run \
|
|||
neutron.tests.unit.agent.linux.test_ip_link_support \
|
||||
neutron.tests.unit.agent.linux.test_interface \
|
||||
neutron.tests.unit.test_auth \
|
||||
neutron.tests.unit.test_policy \
|
||||
neutron.tests.unit.extensions.v2attributes \
|
||||
neutron.tests.unit.extensions.test_l3_ext_gw_mode \
|
||||
neutron.tests.unit.extensions.test_extra_dhcp_opt \
|
||||
|
|
Loading…
Reference in New Issue