Fixes use of dict methods for Python3

In Python3 the dict.keys(), dict.values() and dict.items() methods have
been changed to return iterators instead of lists. This causes issues
with code that expects a list.

bp python3
Change-Id: Id0d55ea4b992666848af1b1a055bc7841548cc6a
This commit is contained in:
David Stanek 2015-04-23 15:20:58 +00:00
parent 2874082ceb
commit c6e2beaa69
20 changed files with 48 additions and 41 deletions

View File

@ -88,7 +88,7 @@ class Endpoint(controller.V2Controller):
# add the legacy endpoint with an interface url
legacy_ep['%surl' % endpoint['interface']] = endpoint['url']
return {'endpoints': legacy_endpoints.values()}
return {'endpoints': list(legacy_endpoints.values())}
@controller.v2_deprecated
def create_endpoint(self, context, endpoint):

View File

@ -451,7 +451,7 @@ class MongoApi(object):
doc_date = self._get_doc_date()
insert_refs = []
update_refs = []
existing_docs = self._get_results_as_dict(mapping.keys())
existing_docs = self._get_results_as_dict(list(mapping.keys()))
for key, value in mapping.items():
ref = self._get_cache_entry(key, value.payload, value.metadata,
doc_date)
@ -536,7 +536,7 @@ class BaseTransform(AbstractManipulator):
def transform_incoming(self, son, collection):
"""Used while saving data to MongoDB."""
for (key, value) in son.items():
for (key, value) in list(son.items()):
if isinstance(value, api.CachedValue):
son[key] = value.payload # key is 'value' field here
son['meta'] = value.metadata
@ -553,7 +553,7 @@ class BaseTransform(AbstractManipulator):
('_id', 'value', 'meta', 'doc_date')):
payload = son.pop('value', None)
metadata = son.pop('meta', None)
for (key, value) in son.items():
for (key, value) in list(son.items()):
if isinstance(value, dict):
son[key] = self.transform_outgoing(value, collection)
if metadata is not None:

View File

@ -1152,4 +1152,4 @@ def list_opts():
:returns: a list of (group_name, opts) tuples
"""
return FILE_OPTIONS.items()
return list(FILE_OPTIONS.items())

View File

@ -617,7 +617,7 @@ def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in LDAP_TLS_CERTS.values():
if tls_req_cert in list(LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
@ -1440,8 +1440,8 @@ class BaseLdap(object):
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
self.attribute_mapping.values() +
self.extra_attr_mapping.keys())))
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
@ -1460,8 +1460,8 @@ class BaseLdap(object):
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
self.attribute_mapping.values() +
self.extra_attr_mapping.keys())))
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,

View File

@ -27,7 +27,8 @@ def upgrade(migrate_engine):
# names, depending on version of MySQL used. We shoud make this naming
# consistent, by reverting index name to a consistent condition.
if any(i for i in endpoint.indexes if
i.columns.keys() == ['service_id'] and i.name != 'service_id'):
list(i.columns.keys()) == ['service_id']
and i.name != 'service_id'):
# NOTE(i159): by this action will be made re-creation of an index
# with the new name. This can be considered as renaming under the
# MySQL rules.
@ -37,5 +38,6 @@ def upgrade(migrate_engine):
meta, autoload=True)
if any(i for i in user_group_membership.indexes if
i.columns.keys() == ['group_id'] and i.name != 'group_id'):
list(i.columns.keys()) == ['group_id']
and i.name != 'group_id'):
sa.Index('group_id', user_group_membership.c.group_id).create()

View File

@ -51,7 +51,7 @@ def flatten_dict(d, parent_key=''):
for k, v in d.items():
new_key = parent_key + '.' + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key).items())
items.extend(list(flatten_dict(v, new_key).items()))
else:
items.append((new_key, v))
return dict(items)
@ -81,7 +81,7 @@ class SmarterEncoder(jsonutils.json.JSONEncoder):
"""Help for JSON encoding dict-like objects."""
def default(self, obj):
if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
return dict(obj.iteritems())
return dict(six.iteritems(obj))
return super(SmarterEncoder, self).default(obj)

View File

@ -356,7 +356,7 @@ def transform_to_group_ids(group_names, mapping_id,
def get_assertion_params_from_env(context):
LOG.debug('Environment variables: %s', context['environment'])
prefix = CONF.federation.assertion_prefix
for k, v in context['environment'].items():
for k, v in list(context['environment'].items()):
if k.startswith(prefix):
yield (k, v)

View File

@ -26,7 +26,8 @@ def upgrade(migrate_engine):
# indexes create automatically. That those indexes will have different
# names, depending on version of MySQL used. We shoud make this naming
# consistent, by reverting index name to a consistent condition.
if any(i for i in table.indexes if i.columns.keys() == ['consumer_id']
if any(i for i in table.indexes if
list(i.columns.keys()) == ['consumer_id']
and i.name != 'consumer_id'):
# NOTE(i159): by this action will be made re-creation of an index
# with the new name. This can be considered as renaming under the

View File

@ -63,7 +63,7 @@ class Extensions(wsgi.Application):
return None
def get_extensions_info(self, context):
return {'extensions': {'values': self.extensions.values()}}
return {'extensions': {'values': list(self.extensions.values())}}
def get_extension_info(self, context, extension_alias):
try:
@ -177,7 +177,7 @@ class Version(wsgi.Application):
versions = self._get_versions_list(context)
return wsgi.render_response(status=(300, 'Multiple Choices'), body={
'versions': {
'values': versions.values()
'values': list(versions.values())
}
})

View File

@ -1093,14 +1093,14 @@ class DomainConfigManager(manager.Manager):
'provided contains group %(group_other)s '
'instead') % {
'group': group,
'group_other': config.keys()[0]}
'group_other': list(config.keys())[0]}
raise exception.InvalidDomainConfig(reason=msg)
if option and option not in config[group]:
msg = _('Trying to update option %(option)s in group '
'%(group)s, but config provided contains option '
'%(option_other)s instead') % {
'group': group, 'option': option,
'option_other': config[group].keys()[0]}
'option_other': list(config[group].keys())[0]}
raise exception.InvalidDomainConfig(reason=msg)
# Finally, we need to check if the group/option specified

View File

@ -21,7 +21,7 @@ from keystone.tests import unit as tests
# List of 2-tuples, (pem_type, pem_header)
headers = pemutils.PEM_TYPE_TO_HEADER.items()
headers = list(pemutils.PEM_TYPE_TO_HEADER.items())
def make_data(size, offset=0):

View File

@ -412,7 +412,7 @@ class TestCase(BaseTestCase):
for manager_name, manager in six.iteritems(drivers):
setattr(self, manager_name, manager)
self.addCleanup(self.cleanup_instance(*drivers.keys()))
self.addCleanup(self.cleanup_instance(*list(drivers.keys())))
def load_extra_backends(self):
"""Override to load managers that aren't loaded by default.

View File

@ -254,7 +254,7 @@ class FakeLdap(core.LDAPHandler):
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in core.LDAP_TLS_CERTS.values():
if tls_req_cert in list(core.LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
raise ValueError("invalid TLS_REQUIRE_CERT tls_req_cert=%s",

View File

@ -160,7 +160,7 @@ class MockCollection(object):
return new
if isinstance(obj, dict):
new = container()
for key, value in obj.items():
for key, value in list(obj.items()):
new[key] = self._copy_doc(value, container)
return new
else:

View File

@ -488,6 +488,8 @@ class KVSTest(tests.TestCase):
memcached_expire_time=memcache_expire_time,
some_other_arg=uuid.uuid4().hex,
no_expiry_keys=[self.key_bar])
kvs_driver = kvs._region.backend.driver
# Ensure the set_arguments are correct
self.assertDictEqual(
kvs._region.backend._get_set_arguments_driver_attr(),
@ -499,8 +501,8 @@ class KVSTest(tests.TestCase):
self.assertDictEqual(
kvs._region.backend.driver.client.set_arguments_passed,
expected_set_args)
self.assertEqual(expected_foo_keys,
kvs._region.backend.driver.client.keys_values.keys())
observed_foo_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_foo_keys, observed_foo_keys)
self.assertEqual(
self.value_foo,
kvs._region.backend.driver.client.keys_values[self.key_foo][0])
@ -511,8 +513,8 @@ class KVSTest(tests.TestCase):
self.assertDictEqual(
kvs._region.backend.driver.client.set_arguments_passed,
expected_no_expiry_args)
self.assertEqual(expected_bar_keys,
kvs._region.backend.driver.client.keys_values.keys())
observed_bar_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_bar_keys, observed_bar_keys)
self.assertEqual(
self.value_bar,
kvs._region.backend.driver.client.keys_values[self.key_bar][0])
@ -523,8 +525,8 @@ class KVSTest(tests.TestCase):
self.assertDictEqual(
kvs._region.backend.driver.client.set_arguments_passed,
expected_set_args)
self.assertEqual(expected_foo_keys,
kvs._region.backend.driver.client.keys_values.keys())
observed_foo_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_foo_keys, observed_foo_keys)
self.assertEqual(
self.value_foo,
kvs._region.backend.driver.client.keys_values[self.key_foo][0])
@ -535,8 +537,8 @@ class KVSTest(tests.TestCase):
self.assertDictEqual(
kvs._region.backend.driver.client.set_arguments_passed,
expected_no_expiry_args)
self.assertEqual(expected_bar_keys,
kvs._region.backend.driver.client.keys_values.keys())
observed_bar_keys = list(kvs_driver.client.keys_values.keys())
self.assertEqual(expected_bar_keys, observed_bar_keys)
self.assertEqual(
self.value_bar,
kvs._region.backend.driver.client.keys_values[self.key_bar][0])

View File

@ -405,14 +405,16 @@ class SqlUpgradeTests(SqlMigrateBase):
self.upgrade(53)
self.upgrade(54)
table = sqlalchemy.Table('assignment', self.metadata, autoload=True)
index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
index_data = [(idx.name, list(idx.columns.keys()))
for idx in table.indexes]
self.assertIn(('ix_actor_id', ['actor_id']), index_data)
def test_token_user_id_and_trust_id_index_upgrade(self):
self.upgrade(54)
self.upgrade(55)
table = sqlalchemy.Table('token', self.metadata, autoload=True)
index_data = [(idx.name, idx.columns.keys()) for idx in table.indexes]
index_data = [(idx.name, list(idx.columns.keys()))
for idx in table.indexes]
self.assertIn(('ix_token_user_id', ['user_id']), index_data)
self.assertIn(('ix_token_trust_id', ['trust_id']), index_data)

View File

@ -651,7 +651,7 @@ class RestfulTestCase(tests.SQLDriverOverrides, rest.RestfulTestCase,
of those in expected.
"""
for k, v in expected.iteritems():
for k, v in six.iteritems(expected):
self.assertIn(k, actual)
if isinstance(v, dict):
self.assertDictContainsSubset(v, actual[k])
@ -803,7 +803,7 @@ class RestfulTestCase(tests.SQLDriverOverrides, rest.RestfulTestCase,
self.assertValidCatalog(resp.json['catalog'])
self.assertIn('links', resp.json)
self.assertIsInstance(resp.json['links'], dict)
self.assertEqual(['self'], resp.json['links'].keys())
self.assertEqual(['self'], list(resp.json['links'].keys()))
self.assertEqual(
'http://localhost/v3/auth/catalog',
resp.json['links']['self'])

View File

@ -109,14 +109,14 @@ class FederatedSetupMixin(object):
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
def xor_project_domain(iterable):
return sum(('project' in iterable, 'domain' in iterable)) % 2
def xor_project_domain(token_keys):
return sum(('project' in token_keys, 'domain' in token_keys)) % 2
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
# Check for either project or domain
if not xor_project_domain(token.keys()):
if not xor_project_domain(list(token.keys())):
raise AssertionError("You must specify either"
"project or domain.")

View File

@ -195,7 +195,7 @@ class V2TokenDataHelper(object):
new_service_ref['endpoints'] = endpoints_ref
services[service] = new_service_ref
return services.values()
return list(services.values())
@dependency.requires('assignment_api', 'catalog_api', 'federation_api',

View File

@ -178,7 +178,7 @@ def rotate_keys(keystone_user_id=None, keystone_group_id=None):
LOG.info(_LI('Starting key rotation with %(count)s key files: %(list)s'), {
'count': len(key_files),
'list': key_files.values()})
'list': list(key_files.values())})
# determine the number of the new primary key
current_primary_key = max(key_files.keys())