Fix some pylint indentation warnings

Running with a stricter .pylintrc generates a lot of
C0330 warnings (hanging/continued indentation). Fix
the ones in neutron/db.

Trivialfix

Change-Id: I9311cfe5efc51552008072d84aa238e5d0c9de60
This commit is contained in:
Brian Haley 2022-11-03 19:50:54 -04:00
parent ba795c6692
commit 55b16d7b7c
27 changed files with 165 additions and 165 deletions

View File

@ -146,7 +146,7 @@ class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase):
'name': k[0], 'resource': k[1], 'name': k[0], 'resource': k[1],
'tenant_id': context.tenant_id} 'tenant_id': context.tenant_id}
for k, v in self._list_availability_zones( for k, v in self._list_availability_zones(
context, filters).items() context, filters).items()
if not filter_states or v in filter_states] if not filter_states or v in filter_states]
else: else:
# NOTE(hichihara): 'tenant_id' is dummy for policy check. # NOTE(hichihara): 'tenant_id' is dummy for policy check.
@ -155,7 +155,7 @@ class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase):
'name': k[0], 'resource': k[1], 'name': k[0], 'resource': k[1],
'tenant_id': context.tenant_id} 'tenant_id': context.tenant_id}
for k, v in self._list_availability_zones( for k, v in self._list_availability_zones(
context, filters).items()] context, filters).items()]
@db_api.retry_if_session_inactive() @db_api.retry_if_session_inactive()
def validate_availability_zones(self, context, resource_type, def validate_availability_zones(self, context, resource_type,

View File

@ -219,7 +219,7 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
agent_dead_limit = datetime.timedelta( agent_dead_limit = datetime.timedelta(
seconds=self.agent_dead_limit_seconds()) seconds=self.agent_dead_limit_seconds())
network_count = network.NetworkDhcpAgentBinding.count( network_count = network.NetworkDhcpAgentBinding.count(
context, dhcp_agent_id=agent['id']) context, dhcp_agent_id=agent['id'])
# amount of networks assigned to agent affect amount of time we give # amount of networks assigned to agent affect amount of time we give
# it so startup. Tests show that it's more or less sage to assume # it so startup. Tests show that it's more or less sage to assume
# that DHCP agent processes each network in less than 2 seconds. # that DHCP agent processes each network in less than 2 seconds.
@ -367,7 +367,7 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
# get all the NDAB objects, which will also fetch (from DB) # get all the NDAB objects, which will also fetch (from DB)
# the related dhcp_agent objects because of the synthetic field # the related dhcp_agent objects because of the synthetic field
bindings = network.NetworkDhcpAgentBinding.get_objects( bindings = network.NetworkDhcpAgentBinding.get_objects(
context, network_id=network_ids) context, network_id=network_ids)
# get the already fetched dhcp_agent objects # get the already fetched dhcp_agent objects
agent_objs = [binding.db_obj.dhcp_agent for binding in bindings] agent_objs = [binding.db_obj.dhcp_agent for binding in bindings]
# filter the dhcp_agent objects on admin_state_up # filter the dhcp_agent objects on admin_state_up

View File

@ -103,7 +103,7 @@ class AllowedAddressPairsMixin(object):
def _has_address_pairs(self, port): def _has_address_pairs(self, port):
return (validators.is_attr_set( return (validators.is_attr_set(
port['port'][addr_apidef.ADDRESS_PAIRS]) and port['port'][addr_apidef.ADDRESS_PAIRS]) and
port['port'][addr_apidef.ADDRESS_PAIRS] != []) port['port'][addr_apidef.ADDRESS_PAIRS] != [])
def _check_update_has_allowed_address_pairs(self, port): def _check_update_has_allowed_address_pairs(self, port):

View File

@ -47,4 +47,4 @@ class DataPlaneStatusMixin(object):
if port_db.get(dps_lib.DATA_PLANE_STATUS): if port_db.get(dps_lib.DATA_PLANE_STATUS):
port_res[dps_lib.DATA_PLANE_STATUS] = ( port_res[dps_lib.DATA_PLANE_STATUS] = (
port_db[dps_lib.DATA_PLANE_STATUS].data_plane_status) port_db[dps_lib.DATA_PLANE_STATUS].data_plane_status)

View File

@ -168,7 +168,7 @@ class DbBasePluginCommon(object):
if isinstance(subnet, subnet_obj.Subnet): if isinstance(subnet, subnet_obj.Subnet):
res['cidr'] = str(subnet.cidr) res['cidr'] = str(subnet.cidr)
res['allocation_pools'] = [{'start': str(pool.start), res['allocation_pools'] = [{'start': str(pool.start),
'end': str(pool.end)} 'end': str(pool.end)}
for pool in subnet.allocation_pools] for pool in subnet.allocation_pools]
res['host_routes'] = [{'destination': str(route.destination), res['host_routes'] = [{'destination': str(route.destination),
'nexthop': str(route.nexthop)} 'nexthop': str(route.nexthop)}
@ -182,7 +182,7 @@ class DbBasePluginCommon(object):
else: else:
res['cidr'] = subnet['cidr'] res['cidr'] = subnet['cidr']
res['allocation_pools'] = [{'start': pool['first_ip'], res['allocation_pools'] = [{'start': pool['first_ip'],
'end': pool['last_ip']} 'end': pool['last_ip']}
for pool in subnet['allocation_pools']] for pool in subnet['allocation_pools']]
res['host_routes'] = [{'destination': route['destination'], res['host_routes'] = [{'destination': route['destination'],
'nexthop': route['nexthop']} 'nexthop': route['nexthop']}

View File

@ -239,7 +239,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
with db_api.CONTEXT_READER.using(elevated): with db_api.CONTEXT_READER.using(elevated):
ports = model_query.query_with_hooks( ports = model_query.query_with_hooks(
elevated, models_v2.Port).filter( elevated, models_v2.Port).filter(
models_v2.Port.network_id == network_id) models_v2.Port.network_id == network_id)
if tenant_id == '*': if tenant_id == '*':
# for the wildcard we need to get all of the rbac entries to # for the wildcard we need to get all of the rbac entries to
# see if any allow the remaining ports on the network. # see if any allow the remaining ports on the network.
@ -476,8 +476,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
def _ensure_network_not_in_use(self, context, net_id): def _ensure_network_not_in_use(self, context, net_id):
non_auto_ports = context.session.query( non_auto_ports = context.session.query(
models_v2.Port.id).filter_by(network_id=net_id).filter( models_v2.Port.id).filter_by(network_id=net_id).filter(
~models_v2.Port.device_owner.in_( ~models_v2.Port.device_owner.in_(
_constants.AUTO_DELETE_PORT_OWNERS)) _constants.AUTO_DELETE_PORT_OWNERS))
if non_auto_ports.count(): if non_auto_ports.count():
ports = [port.id for port in non_auto_ports.all()] ports = [port.id for port in non_auto_ports.all()]
reason = _("There are one or more ports still in use on the " reason = _("There are one or more ports still in use on the "
@ -493,8 +493,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
self._ensure_network_not_in_use(context, id) self._ensure_network_not_in_use(context, id)
auto_delete_port_ids = [p.id for p in context.session.query( auto_delete_port_ids = [p.id for p in context.session.query(
models_v2.Port.id).filter_by(network_id=id).filter( models_v2.Port.id).filter_by(network_id=id).filter(
models_v2.Port.device_owner.in_( models_v2.Port.device_owner.in_(
_constants.AUTO_DELETE_PORT_OWNERS))] _constants.AUTO_DELETE_PORT_OWNERS))]
for port_id in auto_delete_port_ids: for port_id in auto_delete_port_ids:
try: try:
self.delete_port(context.elevated(), port_id) self.delete_port(context.elevated(), port_id)
@ -748,7 +748,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
fips[0]['ip_address']).version == subnet['ip_version']: fips[0]['ip_address']).version == subnet['ip_version']:
return return
external_gateway_info['external_fixed_ips'].append( external_gateway_info['external_fixed_ips'].append(
{'subnet_id': subnet['id']}) {'subnet_id': subnet['id']})
info = {'router': {'external_gateway_info': external_gateway_info}} info = {'router': {'external_gateway_info': external_gateway_info}}
l3plugin.update_router(ctx_admin, router_id, info) l3plugin.update_router(ctx_admin, router_id, info)
@ -1053,7 +1053,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
# Do not allow a subnet to be deleted if a router is attached to it # Do not allow a subnet to be deleted if a router is attached to it
sid = subnet['id'] sid = subnet['id']
self._subnet_check_ip_allocations_internal_router_ports( self._subnet_check_ip_allocations_internal_router_ports(
context, sid) context, sid)
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
if not is_auto_addr_subnet: if not is_auto_addr_subnet:
# we only automatically remove IP addresses from user ports if # we only automatically remove IP addresses from user ports if
@ -1354,9 +1354,9 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
raise exc.SubnetPoolNotFound(subnetpool_id=id) raise exc.SubnetPoolNotFound(subnetpool_id=id)
subnets_to_onboard = subnet_obj.Subnet.get_objects( subnets_to_onboard = subnet_obj.Subnet.get_objects(
context, context,
network_id=network_id, network_id=network_id,
ip_version=subnetpool.ip_version) ip_version=subnetpool.ip_version)
self._onboard_network_subnets(context, subnets_to_onboard, subnetpool) self._onboard_network_subnets(context, subnets_to_onboard, subnetpool)
@ -1378,8 +1378,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
subnetpool): subnetpool):
allocated_prefix_set = netaddr.IPSet( allocated_prefix_set = netaddr.IPSet(
[x.cidr for x in subnet_obj.Subnet.get_objects( [x.cidr for x in subnet_obj.Subnet.get_objects(
context, context,
subnetpool_id=subnetpool.id)]) subnetpool_id=subnetpool.id)])
prefixes_to_add = [] prefixes_to_add = []
for subnet in subnets_to_onboard: for subnet in subnets_to_onboard:
@ -1405,8 +1405,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
def _check_mac_addr_update(self, context, port, new_mac, device_owner): def _check_mac_addr_update(self, context, port, new_mac, device_owner):
if (device_owner and if (device_owner and
device_owner.startswith( device_owner.startswith(
constants.DEVICE_OWNER_NETWORK_PREFIX)): constants.DEVICE_OWNER_NETWORK_PREFIX)):
raise exc.UnsupportedPortDeviceOwner( raise exc.UnsupportedPortDeviceOwner(
op=_("mac address update"), port_id=id, op=_("mac address update"), port_id=id,
device_owner=device_owner) device_owner=device_owner)

View File

@ -136,7 +136,7 @@ class External_net_db_mixin(object):
net_obj.ExternalNetwork.delete_objects( net_obj.ExternalNetwork.delete_objects(
context, network_id=net_id) context, network_id=net_id)
net_obj.NetworkRBAC.delete_objects( net_obj.NetworkRBAC.delete_objects(
context, object_id=net_id, action='access_as_external') context, object_id=net_id, action='access_as_external')
net_data[extnet_apidef.EXTERNAL] = False net_data[extnet_apidef.EXTERNAL] = False
def _process_l3_delete(self, context, network_id): def _process_l3_delete(self, context, network_id):

View File

@ -62,7 +62,7 @@ class ExtraDhcpOptMixin(object):
def _get_port_extra_dhcp_opts_binding(self, context, port_id): def _get_port_extra_dhcp_opts_binding(self, context, port_id):
opts = obj_extra_dhcp.ExtraDhcpOpt.get_objects( opts = obj_extra_dhcp.ExtraDhcpOpt.get_objects(
context, port_id=port_id) context, port_id=port_id)
# TODO(mhickey): When port serilization is available then # TODO(mhickey): When port serilization is available then
# the object list should be returned instead # the object list should be returned instead
return [{'opt_name': r.opt_name, 'opt_value': r.opt_value, return [{'opt_name': r.opt_name, 'opt_value': r.opt_value,

View File

@ -155,7 +155,7 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
subnet): subnet):
super(ExtraRoute_dbonly_mixin, super(ExtraRoute_dbonly_mixin,
self)._confirm_router_interface_not_in_use( self)._confirm_router_interface_not_in_use(
context, router_id, subnet) context, router_id, subnet)
subnet_cidr = netaddr.IPNetwork(subnet['cidr']) subnet_cidr = netaddr.IPNetwork(subnet['cidr'])
extra_routes = self._get_extra_routes_by_router_id(context, router_id) extra_routes = self._get_extra_routes_by_router_id(context, router_id)
for route in extra_routes: for route in extra_routes:
@ -224,8 +224,8 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
context, context,
router_id, router_id,
{'router': {'router':
{'routes': {'routes':
self._add_extra_routes(old_routes, routes)}}) self._add_extra_routes(old_routes, routes)}})
return {'router': router} return {'router': router}
@db_api.retry_if_session_inactive() @db_api.retry_if_session_inactive()
@ -241,8 +241,8 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
context, context,
router_id, router_id,
{'router': {'router':
{'routes': {'routes':
self._remove_extra_routes(old_routes, routes)}}) self._remove_extra_routes(old_routes, routes)}})
return {'router': router} return {'router': router}

View File

@ -196,7 +196,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
return updated_types return updated_types
def update_db_subnet(self, context, subnet_id, s, oldpools, def update_db_subnet(self, context, subnet_id, s, oldpools,
subnet_obj=None): subnet_obj=None):
changes = {} changes = {}
if "dns_nameservers" in s: if "dns_nameservers" in s:
changes['dns_nameservers'] = ( changes['dns_nameservers'] = (
@ -243,8 +243,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
str(subnet['cidr']) != const.PROVISIONAL_IPV6_PD_PREFIX): str(subnet['cidr']) != const.PROVISIONAL_IPV6_PD_PREFIX):
# don't give out details of the overlapping subnet # don't give out details of the overlapping subnet
err_msg = (_("Requested subnet with cidr: %(cidr)s for " err_msg = (_("Requested subnet with cidr: %(cidr)s for "
"network: %(network_id)s overlaps with another " "network: %(network_id)s overlaps with another "
"subnet") % "subnet") %
{'cidr': new_subnet_cidr, {'cidr': new_subnet_cidr,
'network_id': network.id}) 'network_id': network.id})
LOG.info("Validation for CIDR: %(new_cidr)s failed - " LOG.info("Validation for CIDR: %(new_cidr)s failed - "
@ -369,9 +369,9 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
to_create_subnet_id = None to_create_subnet_id = None
segments = subnet_obj.Subnet.get_subnet_segment_ids( segments = subnet_obj.Subnet.get_subnet_segment_ids(
context, network_id, context, network_id,
ignored_service_type=const.DEVICE_OWNER_ROUTED, ignored_service_type=const.DEVICE_OWNER_ROUTED,
subnet_id=to_create_subnet_id) subnet_id=to_create_subnet_id)
associated_segments = set(segments) associated_segments = set(segments)
if None in associated_segments and len(associated_segments) > 1: if None in associated_segments and len(associated_segments) > 1:
@ -427,9 +427,9 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
raise exc.InvalidInput(error_message=msg) raise exc.InvalidInput(error_message=msg)
# Ensure that the IP is valid on the subnet # Ensure that the IP is valid on the subnet
if ('ip_address' in fixed and if ('ip_address' in fixed and
not ipam_utils.check_subnet_ip(subnet['cidr'], not ipam_utils.check_subnet_ip(subnet['cidr'],
fixed['ip_address'], fixed['ip_address'],
fixed['device_owner'])): fixed['device_owner'])):
raise exc.InvalidIpForSubnet(ip_address=fixed['ip_address']) raise exc.InvalidIpForSubnet(ip_address=fixed['ip_address'])
return subnet return subnet

View File

@ -63,13 +63,13 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
def reschedule_routers_from_down_agents(self): def reschedule_routers_from_down_agents(self):
"""Reschedule routers from down l3 agents if admin state is up.""" """Reschedule routers from down l3 agents if admin state is up."""
self.reschedule_resources_from_down_agents( self.reschedule_resources_from_down_agents(
agent_type='L3', agent_type='L3',
get_down_bindings=self.get_down_router_bindings, get_down_bindings=self.get_down_router_bindings,
agent_id_attr='l3_agent_id', agent_id_attr='l3_agent_id',
resource_id_attr='router_id', resource_id_attr='router_id',
resource_name='router', resource_name='router',
reschedule_resource=self.reschedule_router, reschedule_resource=self.reschedule_router,
rescheduling_failed=l3agentscheduler.RouterReschedulingFailed) rescheduling_failed=l3agentscheduler.RouterReschedulingFailed)
def get_down_router_bindings(self, context, agent_dead_limit): def get_down_router_bindings(self, context, agent_dead_limit):
cutoff = self.get_cutoff_time(agent_dead_limit) cutoff = self.get_cutoff_time(agent_dead_limit)
@ -225,7 +225,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
def _unbind_router(self, context, router_id, agent_id): def _unbind_router(self, context, router_id, agent_id):
rb_obj.RouterL3AgentBinding.delete_objects( rb_obj.RouterL3AgentBinding.delete_objects(
context, router_id=router_id, l3_agent_id=agent_id) context, router_id=router_id, l3_agent_id=agent_id)
def _unschedule_router(self, context, router_id, agents_ids): def _unschedule_router(self, context, router_id, agents_ids):
with db_api.CONTEXT_WRITER.using(context): with db_api.CONTEXT_WRITER.using(context):
@ -284,7 +284,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
def list_routers_on_l3_agent(self, context, agent_id): def list_routers_on_l3_agent(self, context, agent_id):
binding_objs = rb_obj.RouterL3AgentBinding.get_objects( binding_objs = rb_obj.RouterL3AgentBinding.get_objects(
context, l3_agent_id=agent_id) context, l3_agent_id=agent_id)
router_ids = [item.router_id for item in binding_objs] router_ids = [item.router_id for item in binding_objs]
if router_ids: if router_ids:
@ -366,7 +366,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
if not router_ids: if not router_ids:
return [] return []
record_objs = rb_obj.RouterL3AgentBinding.get_objects( record_objs = rb_obj.RouterL3AgentBinding.get_objects(
context, router_id=router_ids) context, router_id=router_ids)
if admin_state_up is not None: if admin_state_up is not None:
l3_agents = ag_obj.Agent.get_objects( l3_agents = ag_obj.Agent.get_objects(
context, context,
@ -456,7 +456,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
agent_mode = agent_conf.get(constants.L3_AGENT_MODE, agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
constants.L3_AGENT_MODE_LEGACY) constants.L3_AGENT_MODE_LEGACY)
if (agent_mode == constants.L3_AGENT_MODE_DVR or if (agent_mode == constants.L3_AGENT_MODE_DVR or
agent_mode == constants.L3_AGENT_MODE_DVR_NO_EXTERNAL or agent_mode == constants.L3_AGENT_MODE_DVR_NO_EXTERNAL or
(agent_mode == constants.L3_AGENT_MODE_LEGACY and (agent_mode == constants.L3_AGENT_MODE_LEGACY and
is_router_distributed)): is_router_distributed)):
continue continue
@ -494,7 +494,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
if not agent_ids: if not agent_ids:
return None return None
agents = ag_obj.Agent.get_l3_agent_with_min_routers( agents = ag_obj.Agent.get_l3_agent_with_min_routers(
context, agent_ids) context, agent_ids)
return agents return agents
def get_hosts_to_notify(self, context, router_id): def get_hosts_to_notify(self, context, router_id):
@ -519,7 +519,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
pager = base_obj.Pager(sorts=[('binding_index', True)]) pager = base_obj.Pager(sorts=[('binding_index', True)])
bindings = rb_obj.RouterL3AgentBinding.get_objects( bindings = rb_obj.RouterL3AgentBinding.get_objects(
context, _pager=pager, router_id=router_id) context, _pager=pager, router_id=router_id)
return base_scheduler.get_vacant_binding_index( return base_scheduler.get_vacant_binding_index(
num_agents, bindings, rb_model.LOWEST_BINDING_INDEX, num_agents, bindings, rb_model.LOWEST_BINDING_INDEX,
force_scheduling=is_manual_scheduling) force_scheduling=is_manual_scheduling)

View File

@ -118,7 +118,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
if (subnet['ip_version'] == 6 and subnet['ipv6_ra_mode'] is None and if (subnet['ip_version'] == 6 and subnet['ipv6_ra_mode'] is None and
subnet['ipv6_address_mode'] is not None): subnet['ipv6_address_mode'] is not None):
msg = (_('IPv6 subnet %s configured to receive RAs from an ' msg = (_('IPv6 subnet %s configured to receive RAs from an '
'external router cannot be added to Neutron Router.') % 'external router cannot be added to Neutron Router.') %
subnet['id']) subnet['id'])
raise n_exc.BadRequest(resource='router', msg=msg) raise n_exc.BadRequest(resource='router', msg=msg)
@ -483,7 +483,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
new_valid_gw_port_attachment = ( new_valid_gw_port_attachment = (
new_network_id and new_network_id and
(not router.gw_port or (not router.gw_port or
router.gw_port['network_id'] != new_network_id)) router.gw_port['network_id'] != new_network_id))
if new_valid_gw_port_attachment: if new_valid_gw_port_attachment:
subnets = self._core_plugin.get_subnets_by_network(context, subnets = self._core_plugin.get_subnets_by_network(context,
new_network_id) new_network_id)
@ -611,7 +611,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
# TODO(ralonsoh): move this section (port deletion) out of the DB # TODO(ralonsoh): move this section (port deletion) out of the DB
# transaction. # transaction.
router_ports_ids = (rp.port_id for rp in router_ports_ids = (rp.port_id for rp in
l3_obj.RouterPort.get_objects(context, router_id=id)) l3_obj.RouterPort.get_objects(context,
router_id=id))
if db_api.is_session_active(context.session): if db_api.is_session_active(context.session):
context.GUARD_TRANSACTION = False context.GUARD_TRANSACTION = False
for rp_id in router_ports_ids: for rp_id in router_ports_ids:
@ -842,12 +843,12 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
if self._port_has_ipv6_address(port): if self._port_has_ipv6_address(port):
for existing_port in (rp.port for rp in router.attached_ports): for existing_port in (rp.port for rp in router.attached_ports):
if (existing_port["id"] != port["id"] and if (existing_port["id"] != port["id"] and
existing_port["network_id"] == port["network_id"] and existing_port["network_id"] == port["network_id"] and
self._port_has_ipv6_address(existing_port)): self._port_has_ipv6_address(existing_port)):
msg = _("Router already contains IPv6 port %(p)s " msg = _("Router already contains IPv6 port %(p)s "
"belonging to network id %(nid)s. Only one IPv6 port " "belonging to network id %(nid)s. Only one IPv6 "
"from the same network subnet can be connected to a " "port from the same network subnet can be "
"router.") "connected to a router.")
raise n_exc.BadRequest(resource='router', msg=msg % { raise n_exc.BadRequest(resource='router', msg=msg % {
'p': existing_port['id'], 'p': existing_port['id'],
'nid': existing_port['network_id']}) 'nid': existing_port['network_id']})
@ -884,7 +885,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
if context.project_id not in rbac_allowed_projects: if context.project_id not in rbac_allowed_projects:
msg = (_('Cannot add interface to router because subnet ' msg = (_('Cannot add interface to router because subnet '
'%s is not owned by project making the request') '%s is not owned by project making the request')
% subnet_id) % subnet_id)
raise n_exc.BadRequest(resource='router', msg=msg) raise n_exc.BadRequest(resource='router', msg=msg)
self._validate_subnet_address_mode(subnet) self._validate_subnet_address_mode(subnet)
self._check_for_dup_router_subnets(context, router, self._check_for_dup_router_subnets(context, router,
@ -902,8 +903,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
fixed_ips = list(map(dict, port['port']['fixed_ips'])) fixed_ips = list(map(dict, port['port']['fixed_ips']))
fixed_ips.append(fixed_ip) fixed_ips.append(fixed_ip)
return (self._core_plugin.update_port( return (self._core_plugin.update_port(
context, port['port_id'], context, port['port_id'],
{'port': {'fixed_ips': fixed_ips}}), {'port': {'fixed_ips': fixed_ips}}),
[subnet], [subnet],
False) False)
@ -952,7 +953,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
# _validate_interface_info ensures that either of add_by_* is True. # _validate_interface_info ensures that either of add_by_* is True.
else: else:
port, subnets, new_router_intf = self._add_interface_by_subnet( port, subnets, new_router_intf = self._add_interface_by_subnet(
context, router, interface_info['subnet_id'], device_owner) context, router, interface_info['subnet_id'], device_owner)
cleanup_port = new_router_intf # only cleanup port we created cleanup_port = new_router_intf # only cleanup port we created
revert_value = {'device_id': '', revert_value = {'device_id': '',
'device_owner': port['device_owner']} 'device_owner': port['device_owner']}
@ -1114,7 +1115,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
subnets = subnet_obj.Subnet.get_objects(context, id=port_subnet_ids) subnets = subnet_obj.Subnet.get_objects(context, id=port_subnet_ids)
for subnet in subnets: for subnet in subnets:
self._confirm_router_interface_not_in_use( self._confirm_router_interface_not_in_use(
context, router_id, subnet) context, router_id, subnet)
self._core_plugin.delete_port(context, port['id'], self._core_plugin.delete_port(context, port['id'],
l3_port_check=False) l3_port_check=False)
return port, subnets return port, subnets
@ -1162,7 +1163,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
device_owner) device_owner)
else: else:
port, subnets = self._remove_interface_by_subnet( port, subnets = self._remove_interface_by_subnet(
context, router_id, subnet_id, device_owner) context, router_id, subnet_id, device_owner)
gw_network_id = None gw_network_id = None
gw_ips = [] gw_ips = []
@ -1185,7 +1186,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
port['id'], port['network_id'], port['id'], port['network_id'],
subnets[0]['id'], subnets[0]['id'],
[subnet['id'] for subnet in [subnet['id'] for subnet in
subnets]) subnets])
def _get_floatingip(self, context, id): def _get_floatingip(self, context, id):
floatingip = l3_obj.FloatingIP.get_object(context, id=id) floatingip = l3_obj.FloatingIP.get_object(context, id=id)
@ -1248,16 +1249,21 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
RouterPort = l3_models.RouterPort RouterPort = l3_models.RouterPort
gw_port = orm.aliased(models_v2.Port, name="gw_port") gw_port = orm.aliased(models_v2.Port, name="gw_port")
# TODO(lujinluo): Need IPAllocation and Port object # TODO(lujinluo): Need IPAllocation and Port object
routerport_qry = context.session.query( routerport_qry = (context.session.query(
RouterPort.router_id, models_v2.IPAllocation.ip_address).join( RouterPort.router_id, models_v2.IPAllocation.ip_address).
RouterPort.port, models_v2.Port.fixed_ips).filter( join(RouterPort.port, models_v2.Port.fixed_ips).
models_v2.Port.network_id == internal_port['network_id'], filter(models_v2.Port.network_id ==
RouterPort.port_type.in_(constants.ROUTER_INTERFACE_OWNERS), internal_port['network_id'],
models_v2.IPAllocation.subnet_id == internal_subnet['id'] RouterPort.port_type.in_(
).join(gw_port, gw_port.device_id == RouterPort.router_id).filter( constants.ROUTER_INTERFACE_OWNERS),
gw_port.network_id == external_network_id, models_v2.IPAllocation.subnet_id ==
gw_port.device_owner == DEVICE_OWNER_ROUTER_GW internal_subnet['id']).
).distinct() join(gw_port,
gw_port.device_id == RouterPort.router_id).
filter(gw_port.network_id == external_network_id,
gw_port.device_owner ==
DEVICE_OWNER_ROUTER_GW).
distinct())
first_router_id = None first_router_id = None
for router_id, interface_ip in routerport_qry: for router_id, interface_ip in routerport_qry:
@ -1336,7 +1342,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
""" """
(internal_port, internal_subnet_id, (internal_port, internal_subnet_id,
internal_ip_address) = self._internal_fip_assoc_data( internal_ip_address) = self._internal_fip_assoc_data(
context, fip, floatingip_obj.project_id) context, fip, floatingip_obj.project_id)
router_id = self._get_router_for_floatingip( router_id = self._get_router_for_floatingip(
context, internal_port, context, internal_port,
internal_subnet_id, floatingip_obj.floating_network_id) internal_subnet_id, floatingip_obj.floating_network_id)
@ -1366,10 +1372,10 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
return port_id, internal_ip_address, router_id return port_id, internal_ip_address, router_id
fip_exists = l3_obj.FloatingIP.objects_exist( fip_exists = l3_obj.FloatingIP.objects_exist(
context, context,
fixed_port_id=fip['port_id'], fixed_port_id=fip['port_id'],
floating_network_id=floatingip_obj.floating_network_id, floating_network_id=floatingip_obj.floating_network_id,
fixed_ip_address=netaddr.IPAddress(internal_ip_address)) fixed_ip_address=netaddr.IPAddress(internal_ip_address))
if fip_exists: if fip_exists:
floating_ip_address = (str(floatingip_obj.floating_ip_address) floating_ip_address = (str(floatingip_obj.floating_ip_address)
if floatingip_obj.floating_ip_address if floatingip_obj.floating_ip_address
@ -1627,7 +1633,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
if (port['device_owner'] == if (port['device_owner'] ==
constants.DEVICE_OWNER_FLOATINGIP): constants.DEVICE_OWNER_FLOATINGIP):
registry.publish(resources.FLOATING_IP, events.PRECOMMIT_DELETE, registry.publish(resources.FLOATING_IP, events.PRECOMMIT_DELETE,
self, payload) self, payload)
def _delete_floatingip(self, context, id): def _delete_floatingip(self, context, id):
floatingip = self._get_floatingip(context, id) floatingip = self._get_floatingip(context, id)
@ -2083,10 +2089,10 @@ class L3RpcNotifierMixin(object):
subnet_id = updated['id'] subnet_id = updated['id']
with db_api.CONTEXT_READER.using(context): with db_api.CONTEXT_READER.using(context):
query = context.session.query(models_v2.Port.device_id).filter_by( query = context.session.query(models_v2.Port.device_id).filter_by(
network_id=network_id, network_id=network_id,
device_owner=DEVICE_OWNER_ROUTER_GW) device_owner=DEVICE_OWNER_ROUTER_GW)
query = query.join(models_v2.Port.fixed_ips).filter( query = query.join(models_v2.Port.fixed_ips).filter(
models_v2.IPAllocation.subnet_id == subnet_id) models_v2.IPAllocation.subnet_id == subnet_id)
router_ids = set(port.device_id for port in query) router_ids = set(port.device_id for port in query)
for router_id in router_ids: for router_id in router_ids:
l3plugin.notify_router_updated(context, router_id) l3plugin.notify_router_updated(context, router_id)

View File

@ -64,9 +64,10 @@ _IS_ADMIN_STATE_DOWN_NECESSARY = None
def is_admin_state_down_necessary(): def is_admin_state_down_necessary():
global _IS_ADMIN_STATE_DOWN_NECESSARY global _IS_ADMIN_STATE_DOWN_NECESSARY
if _IS_ADMIN_STATE_DOWN_NECESSARY is None: if _IS_ADMIN_STATE_DOWN_NECESSARY is None:
_IS_ADMIN_STATE_DOWN_NECESSARY = \ _IS_ADMIN_STATE_DOWN_NECESSARY = (
router_admin_state_down_before_update.ALIAS in (extensions. router_admin_state_down_before_update.ALIAS in (
PluginAwareExtensionManager.get_instance().extensions) extensions.PluginAwareExtensionManager.get_instance().
extensions))
return _IS_ADMIN_STATE_DOWN_NECESSARY return _IS_ADMIN_STATE_DOWN_NECESSARY
@ -621,8 +622,8 @@ class DVRResourceOperationHandler(object):
if cs_port: if cs_port:
fixed_ips = ( fixed_ips = (
[fixedip for fixedip in [fixedip for fixedip in
cs_port['fixed_ips'] cs_port['fixed_ips']
if fixedip['subnet_id'] != subnet_id]) if fixedip['subnet_id'] != subnet_id])
if len(fixed_ips) == len(cs_port['fixed_ips']): if len(fixed_ips) == len(cs_port['fixed_ips']):
# The subnet being detached from router is not part of # The subnet being detached from router is not part of
@ -1039,9 +1040,9 @@ class _DVRAgentInterfaceMixin(object):
# agent on re-syncs then we need to add the appropriate # agent on re-syncs then we need to add the appropriate
# port['agent'] before updating the dict. # port['agent'] before updating the dict.
if (l3_agent_mode == ( if (l3_agent_mode == (
const.L3_AGENT_MODE_DVR_NO_EXTERNAL) and const.L3_AGENT_MODE_DVR_NO_EXTERNAL) and
requesting_agent_mode == ( requesting_agent_mode == (
const.L3_AGENT_MODE_DVR_NO_EXTERNAL)): const.L3_AGENT_MODE_DVR_NO_EXTERNAL)):
port['agent'] = ( port['agent'] = (
const.L3_AGENT_MODE_DVR_NO_EXTERNAL) const.L3_AGENT_MODE_DVR_NO_EXTERNAL)
@ -1053,9 +1054,9 @@ class _DVRAgentInterfaceMixin(object):
# the portbinding host resides in dvr_no_external # the portbinding host resides in dvr_no_external
# agent then include the port. # agent then include the port.
if (l3_agent_mode == ( if (l3_agent_mode == (
const.L3_AGENT_MODE_DVR_NO_EXTERNAL) and const.L3_AGENT_MODE_DVR_NO_EXTERNAL) and
requesting_agent_mode == ( requesting_agent_mode == (
const.L3_AGENT_MODE_DVR_SNAT)): const.L3_AGENT_MODE_DVR_SNAT)):
port['agent'] = ( port['agent'] = (
const.L3_AGENT_MODE_DVR_NO_EXTERNAL) const.L3_AGENT_MODE_DVR_NO_EXTERNAL)
port_dict.update({port['id']: port}) port_dict.update({port['id']: port})

View File

@ -325,7 +325,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
are bound are bound
""" """
subnet_ids = self.get_subnet_ids_on_router(context, router_id, subnet_ids = self.get_subnet_ids_on_router(context, router_id,
keep_gateway_port=False) keep_gateway_port=False)
hosts = self._get_dvr_hosts_for_subnets(context, subnet_ids) hosts = self._get_dvr_hosts_for_subnets(context, subnet_ids)
LOG.debug('Hosts for router %s: %s', router_id, hosts) LOG.debug('Hosts for router %s: %s', router_id, hosts)
return hosts return hosts
@ -420,7 +420,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
with_dvr=True): with_dvr=True):
result_set = set(super(L3_DVRsch_db_mixin, result_set = set(super(L3_DVRsch_db_mixin,
self)._get_router_ids_for_agent( self)._get_router_ids_for_agent(
context, agent_db, router_ids, with_dvr)) context, agent_db, router_ids, with_dvr))
if not with_dvr: if not with_dvr:
return result_set return result_set
LOG.debug("Routers %(router_ids)s bound to L3 agent in host %(host)s", LOG.debug("Routers %(router_ids)s bound to L3 agent in host %(host)s",
@ -435,9 +435,9 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
# dvr routers are not explicitly scheduled to agents on hosts with # dvr routers are not explicitly scheduled to agents on hosts with
# dvr serviceable ports, so need special handling # dvr serviceable ports, so need special handling
if (self._get_agent_mode(agent_db) in if (self._get_agent_mode(agent_db) in
[n_const.L3_AGENT_MODE_DVR, [n_const.L3_AGENT_MODE_DVR,
n_const.L3_AGENT_MODE_DVR_NO_EXTERNAL, n_const.L3_AGENT_MODE_DVR_NO_EXTERNAL,
n_const.L3_AGENT_MODE_DVR_SNAT]): n_const.L3_AGENT_MODE_DVR_SNAT]):
dvr_routers = self._get_dvr_router_ids_for_host(context, dvr_routers = self._get_dvr_router_ids_for_host(context,
agent_db['host']) agent_db['host'])
if not router_ids: if not router_ids:
@ -448,10 +448,10 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
context, router_id, keep_gateway_port=False) context, router_id, keep_gateway_port=False)
if (subnet_ids and ( if (subnet_ids and (
self._check_dvr_serviceable_ports_on_host( self._check_dvr_serviceable_ports_on_host(
context, agent_db['host'], context, agent_db['host'],
list(subnet_ids)) or list(subnet_ids)) or
self._is_router_related_to_dvr_routers( self._is_router_related_to_dvr_routers(
context, router_id, dvr_routers))): context, router_id, dvr_routers))):
result_set.add(router_id) result_set.add(router_id)
LOG.debug("Routers %(router_ids)s are scheduled or have " LOG.debug("Routers %(router_ids)s are scheduled or have "
@ -557,7 +557,7 @@ def _notify_port_delete(event, resource, trigger, payload):
context = payload.context context = payload.context
port = payload.latest_state port = payload.latest_state
get_related_hosts_info = payload.metadata.get( get_related_hosts_info = payload.metadata.get(
"get_related_hosts_info", True) "get_related_hosts_info", True)
l3plugin = directory.get_plugin(plugin_constants.L3) l3plugin = directory.get_plugin(plugin_constants.L3)
if port: if port:
port_host = port.get(portbindings.HOST_ID) port_host = port.get(portbindings.HOST_ID)
@ -605,7 +605,7 @@ def _notify_l3_agent_port_update(resource, event, trigger, payload):
dest_host = new_port_profile.get('migrating_to') dest_host = new_port_profile.get('migrating_to')
if is_new_port_binding_changed or is_bound_port_moved or dest_host: if is_new_port_binding_changed or is_bound_port_moved or dest_host:
fips = l3plugin._get_floatingips_by_port_id( fips = l3plugin._get_floatingips_by_port_id(
context, port_id=original_port['id']) context, port_id=original_port['id'])
fip = fips[0] if fips else None fip = fips[0] if fips else None
if fip: if fip:
fip_router_id = fip['router_id'] fip_router_id = fip['router_id']

View File

@ -72,7 +72,7 @@ class L3_gw_ip_qos_dbonly_mixin(l3_gwmode_db.L3_NAT_dbonly_mixin):
# Calls superclass, pass router db object for avoiding re-loading # Calls superclass, pass router db object for avoiding re-loading
router = super(L3_gw_ip_qos_dbonly_mixin, router = super(L3_gw_ip_qos_dbonly_mixin,
self)._update_router_gw_info( self)._update_router_gw_info(
context, router_id, info, request_body, router) context, router_id, info, request_body, router)
if not self._is_gw_ip_qos_supported: if not self._is_gw_ip_qos_supported:
return router return router

View File

@ -389,7 +389,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
# net was deleted, throw a retry to start over to create another # net was deleted, throw a retry to start over to create another
raise db_exc.RetryRequest( raise db_exc.RetryRequest(
l3ha_exc.HANetworkConcurrentDeletion( l3ha_exc.HANetworkConcurrentDeletion(
tenant_id=router['tenant_id'])) tenant_id=router['tenant_id']))
@registry.receives(resources.ROUTER, [events.AFTER_CREATE], @registry.receives(resources.ROUTER, [events.AFTER_CREATE],
priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE)
@ -456,8 +456,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
'ha', requested_ha_state) 'ha', requested_ha_state)
return return
self._migrate_router_ports( self._migrate_router_ports(
payload.context, payload.desired_state, payload.context, payload.desired_state,
old_owner=old_owner, new_owner=new_owner) old_owner=old_owner, new_owner=new_owner)
self.set_extra_attr_value( self.set_extra_attr_value(
payload.context, payload.desired_state, 'ha', requested_ha_state) payload.context, payload.desired_state, 'ha', requested_ha_state)

View File

@ -95,13 +95,9 @@ def _contracts(context, directive, phase):
def _alter_column(context, directive, phase): def _alter_column(context, directive, phase):
is_expand = phase == 'expand' is_expand = phase == 'expand'
if is_expand and ( if is_expand and directive.modify_nullable is True:
directive.modify_nullable is True
):
return directive return directive
elif not is_expand and ( elif not is_expand and directive.modify_nullable is False:
directive.modify_nullable is False
):
return directive return directive
else: else:
raise NotImplementedError( raise NotImplementedError(

View File

@ -49,7 +49,7 @@ class NetworkSegmentRange(standard_attr.HasStandardAttributes,
constants.TYPE_GRE, constants.TYPE_GRE,
constants.TYPE_GENEVE, constants.TYPE_GENEVE,
name='network_segment_range_network_type'), name='network_segment_range_network_type'),
nullable=False) nullable=False)
# network segment range physical network, only applicable for VLAN. # network segment range physical network, only applicable for VLAN.
physical_network = sa.Column(sa.String(64)) physical_network = sa.Column(sa.String(64))

View File

@ -30,7 +30,7 @@ class SubnetServiceType(model_base.BASEV2):
sa.ForeignKey('subnets.id', ondelete="CASCADE")) sa.ForeignKey('subnets.id', ondelete="CASCADE"))
# Service types must be valid device owners, therefore share max length # Service types must be valid device owners, therefore share max length
service_type = sa.Column(sa.String( service_type = sa.Column(sa.String(
length=db_const.DEVICE_OWNER_FIELD_SIZE)) length=db_const.DEVICE_OWNER_FIELD_SIZE))
subnet = orm.relationship(models_v2.Subnet, load_on_pending=True, subnet = orm.relationship(models_v2.Subnet, load_on_pending=True,
backref=orm.backref('service_types', backref=orm.backref('service_types',
lazy='subquery', lazy='subquery',

View File

@ -110,8 +110,8 @@ class IpAvailabilityMixin(object):
query = query.outerjoin(mod.Subnet, query = query.outerjoin(mod.Subnet,
mod.Network.id == mod.Subnet.network_id) mod.Network.id == mod.Subnet.network_id)
query = query.outerjoin( query = query.outerjoin(
mod.IPAllocationPool, mod.IPAllocationPool,
mod.Subnet.id == mod.IPAllocationPool.subnet_id) mod.Subnet.id == mod.IPAllocationPool.subnet_id)
return cls._adjust_query_for_filters(query, filters) return cls._adjust_query_for_filters(query, filters)
@classmethod @classmethod
@ -130,13 +130,13 @@ class IpAvailabilityMixin(object):
# Add IPAllocationPool data # Add IPAllocationPool data
if row.last_ip: if row.last_ip:
pool_total = netaddr.IPRange( pool_total = netaddr.IPRange(
netaddr.IPAddress(row.first_ip), netaddr.IPAddress(row.first_ip),
netaddr.IPAddress(row.last_ip)).size netaddr.IPAddress(row.last_ip)).size
cur_total = subnet_totals_dict.get(row.subnet_id, 0) cur_total = subnet_totals_dict.get(row.subnet_id, 0)
subnet_totals_dict[row.subnet_id] = cur_total + pool_total subnet_totals_dict[row.subnet_id] = cur_total + pool_total
else: else:
subnet_totals_dict[row.subnet_id] = netaddr.IPNetwork( subnet_totals_dict[row.subnet_id] = netaddr.IPNetwork(
row.cidr, version=row.ip_version).size row.cidr, version=row.ip_version).size
return subnet_totals_dict return subnet_totals_dict

View File

@ -156,7 +156,7 @@ def get_revision_row(context, resource_uuid):
with db_api.CONTEXT_READER.using(context): with db_api.CONTEXT_READER.using(context):
return context.session.query( return context.session.query(
ovn_models.OVNRevisionNumbers).filter_by( ovn_models.OVNRevisionNumbers).filter_by(
resource_uuid=resource_uuid).one() resource_uuid=resource_uuid).one()
except exc.NoResultFound: except exc.NoResultFound:
pass pass

View File

@ -37,8 +37,8 @@ class QuotaUsageInfo(collections.namedtuple(
class ReservationInfo(collections.namedtuple( class ReservationInfo(collections.namedtuple(
'ReservationInfo', ['reservation_id', 'project_id', 'ReservationInfo', ['reservation_id', 'project_id',
'expiration', 'deltas'])): 'expiration', 'deltas'])):
"""Information about a resource reservation.""" """Information about a resource reservation."""
@ -176,7 +176,7 @@ def create_reservation(context, project_id, deltas, expiration=None):
# This method is usually called from within another transaction. # This method is usually called from within another transaction.
# Consider using begin_nested # Consider using begin_nested
expiration = expiration or ( expiration = expiration or (
utcnow() + datetime.timedelta(0, RESERVATION_EXPIRATION_TIMEOUT)) utcnow() + datetime.timedelta(0, RESERVATION_EXPIRATION_TIMEOUT))
delta_objs = [] delta_objs = []
for (resource, delta) in deltas.items(): for (resource, delta) in deltas.items():
delta_objs.append(quota_obj.ResourceDelta( delta_objs.append(quota_obj.ResourceDelta(

View File

@ -66,7 +66,7 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI):
# init with defaults # init with defaults
project_quota = dict((key, resource.default) project_quota = dict((key, resource.default)
for key, resource in resources.items()) for key, resource in resources.items())
# update with project specific limits # update with project specific limits
quota_objs = quota_obj.Quota.get_objects(context, quota_objs = quota_obj.Quota.get_objects(context,
@ -135,7 +135,7 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI):
resourcekey2: ... resourcekey2: ...
""" """
project_default = dict((key, resource.default) project_default = dict((key, resource.default)
for key, resource in resources.items()) for key, resource in resources.items())
all_project_quotas = {} all_project_quotas = {}

View File

@ -51,8 +51,7 @@ class Quota(model_base.BASEV2, model_base.HasId, model_base.HasProject):
'project_id', 'project_id',
'resource', 'resource',
name='uniq_quotas0project_id0resource'), name='uniq_quotas0project_id0resource'),
model_base.BASEV2.__table_args__ model_base.BASEV2.__table_args__)
)
class QuotaUsage(model_base.BASEV2, model_base.HasProjectPrimaryKeyIndex): class QuotaUsage(model_base.BASEV2, model_base.HasProjectPrimaryKeyIndex):

View File

@ -207,11 +207,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
try: try:
with db_api.CONTEXT_READER.using(context): with db_api.CONTEXT_READER.using(context):
ret = self._make_security_group_dict(self._get_security_group( ret = self._make_security_group_dict(self._get_security_group(
context, id, context, id, fields=fields), fields)
fields=fields),
fields)
if (fields is None or len(fields) == 0 or if (fields is None or len(fields) == 0 or
'security_group_rules' in fields): 'security_group_rules' in fields):
rules = self.get_security_group_rules( rules = self.get_security_group_rules(
context_lib.get_admin_context(), context_lib.get_admin_context(),
{'security_group_id': [id]}) {'security_group_id': [id]})
@ -311,13 +309,13 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
sg.update() sg.update()
sg_dict = self._make_security_group_dict(sg) sg_dict = self._make_security_group_dict(sg)
self._registry_publish( self._registry_publish(
resources.SECURITY_GROUP, resources.SECURITY_GROUP,
events.PRECOMMIT_UPDATE, events.PRECOMMIT_UPDATE,
exc_cls=ext_sg.SecurityGroupConflict, exc_cls=ext_sg.SecurityGroupConflict,
payload=events.DBEventPayload( payload=events.DBEventPayload(
context, request_body=s, context, request_body=s,
states=(original_security_group,), states=(original_security_group,),
resource_id=id, desired_state=sg_dict)) resource_id=id, desired_state=sg_dict))
registry.publish(resources.SECURITY_GROUP, events.AFTER_UPDATE, self, registry.publish(resources.SECURITY_GROUP, events.AFTER_UPDATE, self,
payload=events.DBEventPayload( payload=events.DBEventPayload(
context, request_body=s, context, request_body=s,
@ -411,9 +409,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
res = self._create_security_group_rule(context, security_group_rule) res = self._create_security_group_rule(context, security_group_rule)
registry.publish(resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, registry.publish(resources.SECURITY_GROUP_RULE, events.AFTER_CREATE,
self, payload=events.DBEventPayload( self, payload=events.DBEventPayload(
context, context,
resource_id=res['id'], resource_id=res['id'],
states=(res,))) states=(res,)))
return res return res
@ -592,7 +590,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
str(constants.PROTO_NUM_IPV6_ROUTE)]: str(constants.PROTO_NUM_IPV6_ROUTE)]:
if rule['ethertype'] == constants.IPv4: if rule['ethertype'] == constants.IPv4:
raise ext_sg.SecurityGroupEthertypeConflictWithProtocol( raise ext_sg.SecurityGroupEthertypeConflictWithProtocol(
ethertype=rule['ethertype'], protocol=rule['protocol']) ethertype=rule['ethertype'], protocol=rule['protocol'])
def _validate_single_tenant_and_group(self, security_group_rules): def _validate_single_tenant_and_group(self, security_group_rules):
"""Check that all rules belong to the same security group and tenant """Check that all rules belong to the same security group and tenant
@ -725,7 +723,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
return none_char return none_char
elif key == 'protocol': elif key == 'protocol':
return str(self._get_ip_proto_name_and_num( return str(self._get_ip_proto_name_and_num(
value, ethertype=rule.get('ethertype'))) value, ethertype=rule.get('ethertype')))
return str(value) return str(value)
comparison_keys = [ comparison_keys = [
@ -1001,9 +999,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
is either [] or not is_attr_set, otherwise return False is either [] or not is_attr_set, otherwise return False
""" """
if (ext_sg.SECURITYGROUPS in port['port'] and if (ext_sg.SECURITYGROUPS in port['port'] and
not (validators.is_attr_set( not (validators.is_attr_set(
port['port'][ext_sg.SECURITYGROUPS]) and port['port'][ext_sg.SECURITYGROUPS]) and
port['port'][ext_sg.SECURITYGROUPS] != [])): port['port'][ext_sg.SECURITYGROUPS] != [])):
return True return True
return False return False
@ -1013,8 +1011,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
This method is called both for port create and port update. This method is called both for port create and port update.
""" """
if (ext_sg.SECURITYGROUPS in port['port'] and if (ext_sg.SECURITYGROUPS in port['port'] and
(validators.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and (validators.is_attr_set(
port['port'][ext_sg.SECURITYGROUPS] != [])): port['port'][ext_sg.SECURITYGROUPS]) and
port['port'][ext_sg.SECURITYGROUPS] != [])):
return True return True
return False return False
@ -1030,9 +1029,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
need_notify = False need_notify = False
port_updates = port['port'] port_updates = port['port']
if (ext_sg.SECURITYGROUPS in port_updates and if (ext_sg.SECURITYGROUPS in port_updates and
not helpers.compare_elements( not helpers.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS), original_port.get(ext_sg.SECURITYGROUPS),
port_updates[ext_sg.SECURITYGROUPS])): port_updates[ext_sg.SECURITYGROUPS])):
# delete the port binding and read it with the new rules # delete the port binding and read it with the new rules
sgs = self._get_security_groups_on_port(context, port) sgs = self._get_security_groups_on_port(context, port)
port_updates[ext_sg.SECURITYGROUPS] = [sg.id for sg in sgs] port_updates[ext_sg.SECURITYGROUPS] = [sg.id for sg in sgs]

View File

@ -100,10 +100,10 @@ class SecurityGroupServerNotifierRpcMixin(sg_db.SecurityGroupDbMixin):
""" """
need_notify = False need_notify = False
if (original_port['fixed_ips'] != updated_port['fixed_ips'] or if (original_port['fixed_ips'] != updated_port['fixed_ips'] or
original_port['mac_address'] != updated_port['mac_address'] or original_port['mac_address'] != updated_port['mac_address'] or
not helpers.compare_elements( not helpers.compare_elements(
original_port.get(ext_sg.SECURITYGROUPS), original_port.get(ext_sg.SECURITYGROUPS),
updated_port.get(ext_sg.SECURITYGROUPS))): updated_port.get(ext_sg.SECURITYGROUPS))):
need_notify = True need_notify = True
return need_notify return need_notify
@ -189,8 +189,8 @@ class SecurityGroupInfoAPIMixin(object):
if remote_gid: if remote_gid:
if (remote_gid if (remote_gid
not in sg_info['devices'][port_id][ not in sg_info['devices'][port_id][
'security_group_source_groups']): 'security_group_source_groups']):
sg_info['devices'][port_id][ sg_info['devices'][port_id][
'security_group_source_groups'].append(remote_gid) 'security_group_source_groups'].append(remote_gid)
if remote_gid not in remote_security_group_info: if remote_gid not in remote_security_group_info:
@ -200,11 +200,11 @@ class SecurityGroupInfoAPIMixin(object):
remote_security_group_info[remote_gid][ethertype] = set() remote_security_group_info[remote_gid][ethertype] = set()
elif remote_ag_id: elif remote_ag_id:
if (remote_ag_id if (remote_ag_id
not in sg_info['devices'][port_id][ not in sg_info['devices'][port_id][
'security_group_remote_address_groups']): 'security_group_remote_address_groups']):
sg_info['devices'][port_id][ sg_info['devices'][port_id][
'security_group_remote_address_groups'].append( 'security_group_remote_address_groups'].append(
remote_ag_id) remote_ag_id)
if remote_ag_id not in remote_address_group_info: if remote_ag_id not in remote_address_group_info:
remote_address_group_info[remote_ag_id] = {} remote_address_group_info[remote_ag_id] = {}
if ethertype not in remote_address_group_info[remote_ag_id]: if ethertype not in remote_address_group_info[remote_ag_id]:

View File

@ -47,8 +47,7 @@ class ServiceTypeManager(object):
return list(chain.from_iterable( return list(chain.from_iterable(
self.config[svc_type].get_service_providers(filters, fields) self.config[svc_type].get_service_providers(filters, fields)
for svc_type in filters['service_type'] for svc_type in filters['service_type']
if svc_type in self.config) if svc_type in self.config))
)
return list( return list(
chain.from_iterable( chain.from_iterable(
self.config[p].get_service_providers(filters, fields) self.config[p].get_service_providers(filters, fields)