Fix some new pylint "R" warnings

After updating pylint, it started emitting additional "R"
warnings in some cases, fix some of them.

  use-a-generator,
  unnecessary-lambda-assignment,
  consider-using-max-builtin,
  consider-using-generator,
  consider-using-in,
  use-list-literal,
  consider-using-from-import

Trivialfix

Change-Id: Ife6565cefcc30b4e8a0df9121c9454cf744225df
This commit is contained in:
Brian Haley 2023-05-09 22:41:01 -04:00
parent ed274efcf7
commit 929b383743
22 changed files with 68 additions and 53 deletions

View File

@ -103,15 +103,8 @@ disable=
too-many-statements, too-many-statements,
consider-using-set-comprehension, consider-using-set-comprehension,
useless-object-inheritance, useless-object-inheritance,
use-a-generator,
unnecessary-lambda-assignment,
super-with-arguments, super-with-arguments,
consider-using-max-builtin, use-dict-literal
use-dict-literal,
consider-using-generator,
consider-using-in,
use-list-literal,
consider-using-from-import
[BASIC] [BASIC]
# Variable names can be 1 to 31 characters long, with lowercase and underscores # Variable names can be 1 to 31 characters long, with lowercase and underscores

View File

@ -609,6 +609,10 @@ class L3NATAgent(ha.AgentMixin,
self._queue.add(update) self._queue.add(update)
def _process_network_update(self, router_id, network_id): def _process_network_update(self, router_id, network_id):
def _port_belongs(p):
return p['network_id'] == network_id
ri = self.router_info.get(router_id) ri = self.router_info.get(router_id)
if not ri: if not ri:
return return
@ -617,8 +621,7 @@ class L3NATAgent(ha.AgentMixin,
ports = list(ri.internal_ports) ports = list(ri.internal_ports)
if ri.ex_gw_port: if ri.ex_gw_port:
ports.append(ri.ex_gw_port) ports.append(ri.ex_gw_port)
port_belongs = lambda p: p['network_id'] == network_id if any(_port_belongs(p) for p in ports):
if any(port_belongs(p) for p in ports):
update = queue.ResourceUpdate( update = queue.ResourceUpdate(
ri.router_id, PRIORITY_SYNC_ROUTERS_TASK) ri.router_id, PRIORITY_SYNC_ROUTERS_TASK)
self._resync_router(update) self._resync_router(update)

View File

@ -923,14 +923,14 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
del self.sg_members[sg_id] del self.sg_members[sg_id]
def _find_deleted_sg_rules(self, sg_id): def _find_deleted_sg_rules(self, sg_id):
del_rules = list() del_rules = []
for pre_rule in self.pre_sg_rules.get(sg_id, []): for pre_rule in self.pre_sg_rules.get(sg_id, []):
if pre_rule not in self.sg_rules.get(sg_id, []): if pre_rule not in self.sg_rules.get(sg_id, []):
del_rules.append(pre_rule) del_rules.append(pre_rule)
return del_rules return del_rules
def _find_devices_on_security_group(self, sg_id): def _find_devices_on_security_group(self, sg_id):
device_list = list() device_list = []
for device in self.filtered_ports.values(): for device in self.filtered_ports.values():
if sg_id in device.get('security_groups', []): if sg_id in device.get('security_groups', []):
device_list.append(device) device_list.append(device)

View File

@ -855,11 +855,14 @@ def _get_rules_by_chain(rules):
def _ensure_all_mac_addresses_are_uppercase(rules): def _ensure_all_mac_addresses_are_uppercase(rules):
def _to_upper(pat):
return pat.group(0).upper()
new_rules = [] new_rules = []
lowercase_mac_regex = re.compile(r"(?:[0-9a-f]{2}[:]){5}(?:[0-9a-f]{2})") lowercase_mac_regex = re.compile(r"(?:[0-9a-f]{2}[:]){5}(?:[0-9a-f]{2})")
callback = lambda pat: pat.group(0).upper()
for rule in rules: for rule in rules:
new_rules.append(re.sub(lowercase_mac_regex, callback, rule)) new_rules.append(re.sub(lowercase_mac_regex, _to_upper, rule))
return new_rules return new_rules

View File

@ -171,7 +171,7 @@ class OFPort(object):
self.lla_address = str(netutils.get_ipv6_addr_by_EUI64( self.lla_address = str(netutils.get_ipv6_addr_by_EUI64(
lib_const.IPv6_LLA_PREFIX, self.mac)) lib_const.IPv6_LLA_PREFIX, self.mac))
self.ofport = ovs_port.ofport self.ofport = ovs_port.ofport
self.sec_groups = list() self.sec_groups = []
self.fixed_ips = port_dict.get('fixed_ips', []) self.fixed_ips = port_dict.get('fixed_ips', [])
self.neutron_port_dict = port_dict.copy() self.neutron_port_dict = port_dict.copy()
self.allowed_pairs_v4 = self._get_allowed_pairs(port_dict, version=4) self.allowed_pairs_v4 = self._get_allowed_pairs(port_dict, version=4)
@ -307,7 +307,7 @@ class ConjIdMap(object):
for table in ovs_consts.OVS_FIREWALL_TABLES]) for table in ovs_consts.OVS_FIREWALL_TABLES])
conj_ids = CONJ_ID_REGEX.findall(" | ".join(flows_iter)) conj_ids = CONJ_ID_REGEX.findall(" | ".join(flows_iter))
try: try:
conj_id_max = max([int(conj_id) for conj_id in conj_ids]) conj_id_max = max(int(conj_id) for conj_id in conj_ids)
except ValueError: except ValueError:
conj_id_max = 0 conj_id_max = 0

View File

@ -167,9 +167,12 @@ class ResourceConsumerTracker(object):
def report(self): def report(self):
"""Output debug information about the consumer versions.""" """Output debug information about the consumer versions."""
format = lambda versions: pprint.pformat(dict(versions), indent=4)
debug_dict = {'pushed_versions': format(self._versions), def _format(versions):
'consumer_versions': format(self._versions_by_consumer)} return pprint.pformat(dict(versions), indent=4)
debug_dict = {'pushed_versions': _format(self._versions),
'consumer_versions': _format(self._versions_by_consumer)}
if self.last_report != debug_dict: if self.last_report != debug_dict:
self.last_report = debug_dict self.last_report = debug_dict
LOG.debug('Tracked resource versions report:\n' LOG.debug('Tracked resource versions report:\n'

View File

@ -68,11 +68,13 @@ def filter_fields(f):
except (IndexError, ValueError): except (IndexError, ValueError):
return result return result
do_filter = lambda d: {k: v for k, v in d.items() if k in fields} def _do_filter(d):
return {k: v for k, v in d.items() if k in fields}
if isinstance(result, list): if isinstance(result, list):
return [do_filter(obj) for obj in result] return [_do_filter(obj) for obj in result]
else: else:
return do_filter(result) return _do_filter(result)
return inner_filter return inner_filter

View File

@ -522,7 +522,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
dvr_routers): dvr_routers):
related_routers = self._get_other_dvr_router_ids_connected_router( related_routers = self._get_other_dvr_router_ids_connected_router(
context, router_id) context, router_id)
return any([r in dvr_routers for r in related_routers]) return any(r in dvr_routers for r in related_routers)
def _dvr_handle_unbound_allowed_addr_pair_add( def _dvr_handle_unbound_allowed_addr_pair_add(

View File

@ -16,9 +16,9 @@
from neutron_lib.api.definitions import network_ip_availability as apidef from neutron_lib.api.definitions import network_ip_availability as apidef
from neutron_lib.api import extensions as api_extensions from neutron_lib.api import extensions as api_extensions
import neutron.api.extensions as extensions from neutron.api import extensions
import neutron.api.v2.base as base from neutron.api.v2 import base
import neutron.services.network_ip_availability.plugin as plugin from neutron.services.network_ip_availability import plugin
class Network_ip_availability(api_extensions.APIExtensionDescriptor): class Network_ip_availability(api_extensions.APIExtensionDescriptor):

View File

@ -277,9 +277,12 @@ class PortForwarding(base.NeutronDbObject):
@staticmethod @staticmethod
def _unique_port_forwarding(query): def _unique_port_forwarding(query):
def _row_one(row):
return row[1]
q = query.order_by(l3.FloatingIP.router_id) q = query.order_by(l3.FloatingIP.router_id)
keyfunc = lambda row: row[1] group_iterator = itertools.groupby(q, _row_one)
group_iterator = itertools.groupby(q, keyfunc)
result = [] result = []
for key, value in group_iterator: for key, value in group_iterator:

View File

@ -92,8 +92,7 @@ class PolicyHook(hooks.PecanHook):
if not controller or utils.is_member_action(controller): if not controller or utils.is_member_action(controller):
return return
collection = state.request.context.get('collection') collection = state.request.context.get('collection')
needs_prefetch = (state.request.method == 'PUT' or needs_prefetch = state.request.method in ('PUT', 'DELETE')
state.request.method == 'DELETE')
policy.init() policy.init()
action = controller.plugin_handlers[ action = controller.plugin_handlers[

View File

@ -204,7 +204,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
# Stores the port IDs whose binding has been activated # Stores the port IDs whose binding has been activated
self.activated_bindings = set() self.activated_bindings = set()
# Stores smartnic ports update/remove # Stores smartnic ports update/remove
self.updated_smartnic_ports = list() self.updated_smartnic_ports = []
# Stores integration bridge smartnic ports data # Stores integration bridge smartnic ports data
self.current_smartnic_ports_map = {} self.current_smartnic_ports_map = {}
@ -219,7 +219,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
# keeps association between ports and ofports to detect ofport change # keeps association between ports and ofports to detect ofport change
self.vifname_to_ofport_map = {} self.vifname_to_ofport_map = {}
# Stores newly created bridges # Stores newly created bridges
self.added_bridges = list() self.added_bridges = []
self.bridge_mappings = self._parse_bridge_mappings( self.bridge_mappings = self._parse_bridge_mappings(
ovs_conf.bridge_mappings) ovs_conf.bridge_mappings)
self.rp_bandwidths = place_utils.parse_rp_bandwidths( self.rp_bandwidths = place_utils.parse_rp_bandwidths(
@ -2753,7 +2753,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
self.process_smartnic_ports() self.process_smartnic_ports()
updated_smartnic_ports_copy = ( updated_smartnic_ports_copy = (
self.updated_smartnic_ports) self.updated_smartnic_ports)
self.updated_smartnic_ports = list() self.updated_smartnic_ports = []
for port_data in updated_smartnic_ports_copy: for port_data in updated_smartnic_ports_copy:
self.treat_smartnic_port(port_data) self.treat_smartnic_port(port_data)

View File

@ -937,8 +937,8 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase):
for route in self._nb_idl.lr_route_list(router.uuid).execute( for route in self._nb_idl.lr_route_list(router.uuid).execute(
check_error=True): check_error=True):
if (route.nexthop == '' and if (route.nexthop == '' and
(route.ip_prefix == n_const.IPv4_ANY or route.ip_prefix in (n_const.IPv4_ANY,
route.ip_prefix == n_const.IPv6_ANY)): n_const.IPv6_ANY)):
cmds.append( cmds.append(
self._nb_idl.delete_static_route( self._nb_idl.delete_static_route(
router.name, route.ip_prefix, '')) router.name, route.ip_prefix, ''))

View File

@ -2056,8 +2056,8 @@ class OVNClient(object):
if self.is_external_ports_supported(): if self.is_external_ports_supported():
# If there are no external ports in this network, there's # If there are no external ports in this network, there's
# no need to check the AZs # no need to check the AZs
if any([p for p in lswitch.ports if if any(p for p in lswitch.ports if
p.type == ovn_const.LSP_TYPE_EXTERNAL]): p.type == ovn_const.LSP_TYPE_EXTERNAL):
# Check for changes in the network Availability Zones # Check for changes in the network Availability Zones
ovn_ls_azs = lswitch.external_ids.get( ovn_ls_azs = lswitch.external_ids.get(
ovn_const.OVN_AZ_HINTS_EXT_ID_KEY, '') ovn_const.OVN_AZ_HINTS_EXT_ID_KEY, '')

View File

@ -105,7 +105,7 @@ class ChassisEvent(row_event.RowEvent):
def _get_min_priority_in_hcg(self, ha_chassis_group): def _get_min_priority_in_hcg(self, ha_chassis_group):
"""Find the next lowest priority number within a HA Chassis Group.""" """Find the next lowest priority number within a HA Chassis Group."""
min_priority = min( min_priority = min(
[ch.priority for ch in ha_chassis_group.ha_chassis], (ch.priority for ch in ha_chassis_group.ha_chassis),
default=ovn_const.HA_CHASSIS_GROUP_HIGHEST_PRIORITY) default=ovn_const.HA_CHASSIS_GROUP_HIGHEST_PRIORITY)
return min_priority - 1 return min_priority - 1

View File

@ -143,8 +143,8 @@ def _should_validate_sub_attributes(attribute, sub_attr):
"""Verify that sub-attributes are iterable and should be validated.""" """Verify that sub-attributes are iterable and should be validated."""
validate = attribute.get('validate') validate = attribute.get('validate')
return (validate and isinstance(sub_attr, abc.Iterable) and return (validate and isinstance(sub_attr, abc.Iterable) and
any([k.startswith('type:dict') and any(k.startswith('type:dict') and v
v for (k, v) in validate.items()])) for (k, v) in validate.items()))
def _build_subattr_match_rule(attr_name, attr, action, target): def _build_subattr_match_rule(attr_name, attr, action, target):
@ -383,11 +383,15 @@ class FieldCheck(policy.Check):
(resource, field, value)) (resource, field, value))
# Value might need conversion - we need help from the attribute map # Value might need conversion - we need help from the attribute map
def _no_conv(x):
return x
try: try:
attr = attributes.RESOURCES[resource][field] attr = attributes.RESOURCES[resource][field]
conv_func = attr['convert_to'] conv_func = attr['convert_to']
except KeyError: except KeyError:
conv_func = lambda x: x conv_func = _no_conv
self.field = field self.field = field
self.resource = resource self.resource = resource

View File

@ -177,8 +177,7 @@ def _get_rpc_workers(plugin=None):
if workers is None: if workers is None:
# By default, half as many rpc workers as api workers # By default, half as many rpc workers as api workers
workers = int(_get_api_workers() / 2) workers = int(_get_api_workers() / 2)
if workers < 1: workers = max(workers, 1)
workers = 1
# If workers > 0 then start_rpc_listeners would be called in a # If workers > 0 then start_rpc_listeners would be called in a
# subprocess and we cannot simply catch the NotImplementedError. It is # subprocess and we cannot simply catch the NotImplementedError. It is

View File

@ -175,10 +175,13 @@ def get_logs_bound_port(context, port_id):
project_id=project_id, project_id=project_id,
resource_type=constants.SECURITY_GROUP, resource_type=constants.SECURITY_GROUP,
enabled=True) enabled=True)
is_bound = lambda log: (log.resource_id in port.security_group_ids or
log.target_id == port.id or def _is_bound(log):
(not log.target_id and not log.resource_id)) return (log.resource_id in port.security_group_ids or
return [log for log in logs if is_bound(log)] log.target_id == port.id or
(not log.target_id and not log.resource_id))
return [log for log in logs if _is_bound(log)]
def get_logs_bound_sg(context, sg_id=None, project_id=None, port_id=None, def get_logs_bound_sg(context, sg_id=None, project_id=None, port_id=None,

View File

@ -67,7 +67,7 @@ def setup_logging():
def find_deleted_sg_rules(old_port, new_ports): def find_deleted_sg_rules(old_port, new_ports):
del_rules = list() del_rules = []
for port in new_ports: for port in new_ports:
if old_port.id == port.id: if old_port.id == port.id:
for rule in old_port.secgroup_rules: for rule in old_port.secgroup_rules:

View File

@ -17,8 +17,8 @@ from neutron_lib.api.definitions import network_ip_availability
from neutron_lib.db import utils as db_utils from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions from neutron_lib import exceptions
import neutron.db.db_base_plugin_v2 as db_base_plugin_v2 from neutron.db import db_base_plugin_v2
import neutron.db.network_ip_availability_db as ip_availability_db from neutron.db import network_ip_availability_db as ip_availability_db
class NetworkIPAvailabilityPlugin(ip_availability_db.IpAvailabilityMixin, class NetworkIPAvailabilityPlugin(ip_availability_db.IpAvailabilityMixin,

View File

@ -94,12 +94,15 @@ def bridge_has_port(bridge, is_port_predicate):
return any(iface for iface in ifaces if is_port_predicate(iface)) return any(iface for iface in ifaces if is_port_predicate(iface))
def _is_instance_port(port_name):
return not is_trunk_service_port(port_name)
def bridge_has_instance_port(bridge): def bridge_has_instance_port(bridge):
"""True if there is an OVS port that doesn't have bridge or patch ports """True if there is an OVS port that doesn't have bridge or patch ports
prefix. prefix.
""" """
is_instance_port = lambda p: not is_trunk_service_port(p) return bridge_has_port(bridge, _is_instance_port)
return bridge_has_port(bridge, is_instance_port)
def bridge_has_service_port(bridge): def bridge_has_service_port(bridge):

View File

@ -123,7 +123,7 @@ class TrunkPlugin(service_base.ServicePluginBase):
def check_driver_compatibility(self): def check_driver_compatibility(self):
"""Fail to load if no compatible driver is found.""" """Fail to load if no compatible driver is found."""
if not any([driver.is_loaded for driver in self._drivers]): if not any(driver.is_loaded for driver in self._drivers):
raise trunk_exc.IncompatibleTrunkPluginConfiguration() raise trunk_exc.IncompatibleTrunkPluginConfiguration()
def check_segmentation_compatibility(self): def check_segmentation_compatibility(self):