TrivialFix: Using assertEqual instead of assertEquals

Following OpenStack Style Guidelines[1]: http://docs.openstack.org/developer/hacking/#unit-tests-and-assertraises
[H234] assertEquals() logs a DeprecationWarning in Python 3.x,
use assertEqual() instead. The same goes for assertNotEquals().

Change-Id: Iaa4b2fd04d2aa661bb196c4b9f2494a281a9294a
This commit is contained in:
Nam Nguyen Hoai 2016-09-28 11:50:01 +07:00
parent 0a96410af7
commit b2f25e97a4
14 changed files with 58 additions and 58 deletions

View File

@ -154,7 +154,7 @@ class CsvExporterTest(OswlTest, DbTest):
# Checking filtered inst structures don't fetched
count_with_filtered = get_inst_structures_query(None, None).count()
self.assertEquals(count_initial, count_with_filtered)
self.assertEqual(count_initial, count_with_filtered)
# Generating not filtered inst structures
oswls = self.get_saved_oswls(20, consts.OSWL_RESOURCE_TYPES.vm,
@ -166,8 +166,8 @@ class CsvExporterTest(OswlTest, DbTest):
# Checking not filtered inst structures fetched
count_with_not_filtered = get_inst_structures_query(None, None).count()
get_inst_structures_query(None, None).all()
self.assertEquals(count_initial + not_filtered_num,
count_with_not_filtered)
self.assertEqual(count_initial + not_filtered_num,
count_with_not_filtered)
def test_no_filtered_structures(self):
oswls = self.get_saved_oswls(100, consts.OSWL_RESOURCE_TYPES.vm,

View File

@ -172,7 +172,7 @@ class JsonExporterTest(InstStructureTest, OswlTest, DbTest):
# Checking SqlAlchemy objects equality
for c in expected.__table__.columns:
column_name = c.name
self.assertEquals(
self.assertEqual(
getattr(expected, column_name),
getattr(actual, column_name)
)

View File

@ -301,7 +301,7 @@ class JsonReportsTest(DbTest):
resp = self.client.get(url)
self.check_response_ok(resp)
self.assertEqual(0, cached_mc_get.call_count)
self.assertEquals(3, cached_mc_set.call_count)
self.assertEqual(3, cached_mc_set.call_count)
@mock.patch.object(memcache.Client, 'get', return_value=None)
def test_get_nodes_num(self, _):

View File

@ -167,7 +167,7 @@ class OswlStatsToCsvTest(OswlTest, DbTest):
oswls = get_oswls_query(resource_type).all()
oswl = oswls[0]
self.assertEquals(
self.assertEqual(
inst_struct.creation_date,
exporter.get_last_sync_datetime(oswl)
)
@ -175,7 +175,7 @@ class OswlStatsToCsvTest(OswlTest, DbTest):
inst_struct.modification_date = datetime.utcnow()
oswls = get_oswls_query(resource_type).all()
oswl = oswls[0]
self.assertEquals(
self.assertEqual(
inst_struct.modification_date,
exporter.get_last_sync_datetime(oswl)
)
@ -236,7 +236,7 @@ class OswlStatsToCsvTest(OswlTest, DbTest):
oswls_seamless = exporter.fill_date_gaps(
oswls, datetime.utcnow().date())
self.assertEquals(1, len(list(oswls_seamless)))
self.assertEqual(1, len(list(oswls_seamless)))
# Checking record is duplicated
inst_struct.modification_date = datetime.utcnow()
@ -250,8 +250,8 @@ class OswlStatsToCsvTest(OswlTest, DbTest):
on_date_days = 1
on_date = (datetime.utcnow() - timedelta(days=on_date_days)).date()
oswls_seamless = list(exporter.fill_date_gaps(oswls, on_date))
self.assertEquals(created_days - on_date_days + 1,
len(oswls_seamless))
self.assertEqual(created_days - on_date_days + 1,
len(oswls_seamless))
# Checking dates are seamless and grow
expected_date = oswls_seamless[0].stats_on_date
@ -277,7 +277,7 @@ class OswlStatsToCsvTest(OswlTest, DbTest):
self.get_saved_inst_structs(oswls_saved,
creation_date_range=(0, 0))
oswls = get_oswls_query(resource_type).all()
self.assertEquals(oswls_before + num, len(list(oswls)))
self.assertEqual(oswls_before + num, len(list(oswls)))
# Checking added, modified, removed not empty
for oswl in oswls:
@ -325,7 +325,7 @@ class OswlStatsToCsvTest(OswlTest, DbTest):
creation_date_range=(0, 0))
# Checking all resources in seamless oswls
oswls = get_oswls_query(resource_type).all()
self.assertEquals(insts_num * clusters_num, len(oswls))
self.assertEqual(insts_num * clusters_num, len(oswls))
oswls_seamless = list(exporter.fill_date_gaps(
oswls, datetime.utcnow().date()))
self.assertEqual(insts_num * clusters_num * (created_days + 1),
@ -788,10 +788,10 @@ class OswlStatsToCsvTest(OswlTest, DbTest):
def check_resource_state(resource, tenant_id, is_added,
is_modified, is_removed):
self.assertEquals(is_added, resource[is_added_pos])
self.assertEquals(is_modified, resource[is_modified_pos])
self.assertEquals(is_removed, resource[is_removed_pos])
self.assertEquals(tenant_id, resource[tenant_id_pos])
self.assertEqual(is_added, resource[is_added_pos])
self.assertEqual(is_modified, resource[is_modified_pos])
self.assertEqual(is_removed, resource[is_removed_pos])
self.assertEqual(tenant_id, resource[tenant_id_pos])
# The fist oswl status True only in is_added
check_resource_state(flatten_resources[0], 'first',

View File

@ -39,7 +39,7 @@ class BaseTest(TestCase):
self.assertIn(resp.status_code, codes)
def check_response_error(self, resp, code):
self.assertEquals(code, resp.status_code)
self.assertEqual(code, resp.status_code)
class DbTest(BaseTest):

View File

@ -58,12 +58,12 @@ class BaseTest(TestCase):
def check_response_ok(self, resp, codes=(200, 201)):
self.assertIn(resp.status_code, codes)
d = json.loads(resp.data)
self.assertEquals('ok', d['status'])
self.assertEqual('ok', d['status'])
def check_response_error(self, resp, code):
self.assertEquals(code, resp.status_code)
self.assertEqual(code, resp.status_code)
d = json.loads(resp.data)
self.assertEquals('error', d['status'])
self.assertEqual('error', d['status'])
class DbTest(BaseTest):

View File

@ -32,15 +32,15 @@ class TestUtil(BaseTest):
def test_split_collection(self):
coll = list(xrange(3))
chunks = list(split_collection(coll, chunk_size=len(coll)))
self.assertEquals(1, len(chunks))
self.assertEqual(1, len(chunks))
self.assertListEqual(chunks[0], coll)
chunks = list(split_collection(coll, chunk_size=len(coll) + 1))
self.assertEquals(1, len(chunks))
self.assertEqual(1, len(chunks))
self.assertListEqual(chunks[0], coll)
chunks = list(split_collection(coll, chunk_size=len(coll) - 1))
self.assertEquals(2, len(chunks))
self.assertEqual(2, len(chunks))
self.assertListEqual(chunks[0], coll[:-1])
self.assertListEqual(chunks[1], coll[-1:])

View File

@ -70,14 +70,14 @@ class TestActionLogs(DbTest):
self.check_response_ok(resp)
resp_data = json.loads(resp.data)
for d in resp_data['action_logs']:
self.assertEquals(
self.assertEqual(
consts.ACTION_LOG_STATUSES.added,
d['status']
)
actual_logs = db.session.query(ActionLog).filter(
ActionLog.master_node_uid == master_node_uid).all()
self.assertEquals(len(expected_logs), len(actual_logs))
self.assertEqual(len(expected_logs), len(actual_logs))
self.assertListEqual(
sorted([l['external_id'] for l in expected_logs]),
sorted([l.external_id for l in actual_logs])
@ -119,11 +119,11 @@ class TestActionLogs(DbTest):
ActionLog.master_node_uid == master_node_uid).count()
resp_data = json.loads(resp.data)
for d in resp_data['action_logs']:
self.assertEquals(
self.assertEqual(
consts.ACTION_LOG_STATUSES.added,
d['status']
)
self.assertEquals(len(action_logs), count_actual)
self.assertEqual(len(action_logs), count_actual)
# Checking duplications is not added
new_action_logs = [
@ -158,7 +158,7 @@ class TestActionLogs(DbTest):
self.check_response_ok(resp)
count_actual = db.session.query(ActionLog).filter(
ActionLog.master_node_uid == master_node_uid).count()
self.assertEquals(
self.assertEqual(
len(new_action_logs),
count_actual
)
@ -171,8 +171,8 @@ class TestActionLogs(DbTest):
lambda x: x['status'] == consts.ACTION_LOG_STATUSES.added,
data['action_logs']
)
self.assertEquals(len(action_logs), len(existed))
self.assertEquals(len(new_action_logs) - len(action_logs), len(added))
self.assertEqual(len(action_logs), len(existed))
self.assertEqual(len(new_action_logs) - len(action_logs), len(added))
def test_validation_error(self):
expected_logs = [{'master_node_uid': 'x', 'external_id': None}]

View File

@ -68,11 +68,11 @@ class LibvirtTypesDistribution(ElasticTest):
expected_clusters_num += len(clusters_in_statuses)
total_clusters_num += structure['clusters_num']
self.assertGreater(total_clusters_num, actual_clusters_num)
self.assertEquals(expected_clusters_num, actual_clusters_num)
self.assertEqual(expected_clusters_num, actual_clusters_num)
# checking number of filtered libvirt types and clusters
libvirt_types = filtered_statuses['attributes']['libvirt_types']
self.assertEquals(
self.assertEqual(
expected_clusters_num,
sum(d['doc_count'] for d in libvirt_types['buckets'])
)

View File

@ -161,5 +161,5 @@ class OsDistribution(ElasticTest):
self.assertDictEqual(expected_oses, actual_oses)
# checking clusters are filtered
self.assertEquals(expected_clusters_num,
sum(six.itervalues(actual_oses)))
self.assertEqual(expected_clusters_num,
sum(six.itervalues(actual_oses)))

View File

@ -105,7 +105,7 @@ class Reports(ElasticTest):
body=query)
inst_count = len(filter(
lambda x: x['fuel_release']['release'] == release, installations))
self.assertEquals(inst_count, resp['count'])
self.assertEqual(inst_count, resp['count'])
def test_filtration(self):
installations_num = 100
@ -155,11 +155,11 @@ class Reports(ElasticTest):
filter(lambda c: c['status'] in statuses,
structure['clusters'])
)
self.assertEquals(expected_clusters_num, actual_clusters_num)
self.assertEqual(expected_clusters_num, actual_clusters_num)
# checking number of filtered libvirt types and clusters
libvirt_types = filtered_statuses['attributes']['libvirt_types']
self.assertEquals(
self.assertEqual(
expected_clusters_num,
sum(d['doc_count'] for d in libvirt_types['buckets'])
)
@ -258,8 +258,8 @@ class Reports(ElasticTest):
# checking filtered clusters num
filtered_releases = resp['aggregations']['releases']
# checking releases are filtered
self.assertEquals(releases_data[filter_by_release],
filtered_releases['doc_count'])
self.assertEqual(releases_data[filter_by_release],
filtered_releases['doc_count'])
def test_filtration_by_is_filtered(self):
# Query for fetching libvirt distribution

View File

@ -51,7 +51,7 @@ class ElasticsearchMapping(ElasticTest):
result = resp['aggregations']['structs']['buckets']
# checking that master_node_uids with whitespaces and
# non-literal symbols didn't split
self.assertEquals(len(docs), len(result))
self.assertEqual(len(docs), len(result))
def test_mixed_values_in_list_migration(self):
doc = {

View File

@ -43,8 +43,8 @@ class MappingRuleTest(MigrationTest):
doc_source = doc['_source']
self.assertIn('creation_date', doc_source)
self.assertIn('modification_date', doc_source)
self.assertEquals(index, doc['_index'])
self.assertEquals(doc_type, doc['_type'])
self.assertEquals(db_obj.master_node_uid, doc['_id'])
self.assertEqual(index, doc['_index'])
self.assertEqual(doc_type, doc['_type'])
self.assertEqual(db_obj.master_node_uid, doc['_id'])
for struct_key in six.iterkeys(db_obj.structure):
self.assertIn(struct_key, doc_source)

View File

@ -71,8 +71,8 @@ class MigratorTest(MigrationTest):
time_of_sync = parser.parse(info_after.last_sync_time)
self.assertLessEqual(time_before, time_of_sync)
self.assertGreaterEqual(datetime.datetime.utcnow(), time_of_sync)
self.assertEquals(info_before.last_sync_value,
info_after.last_sync_value)
self.assertEqual(info_before.last_sync_value,
info_after.last_sync_value)
def get_indexed_docs_num(self, sync_info):
resp = self.es.count(index=sync_info.index_name,
@ -100,11 +100,11 @@ class MigratorTest(MigrationTest):
self.es.indices.refresh(index=config.INDEX_FUEL)
is_indexed_docs_after = self.get_indexed_docs_num(is_sync_info)
self.assertEquals(is_indexed_docs_before + docs_num,
is_indexed_docs_after)
self.assertEqual(is_indexed_docs_before + docs_num,
is_indexed_docs_after)
al_indexed_docs_after = self.get_indexed_docs_num(al_sync_info)
self.assertEquals(al_indexed_docs_before + docs_num,
al_indexed_docs_after)
self.assertEqual(al_indexed_docs_before + docs_num,
al_indexed_docs_after)
@patch('migration.config.DB_SYNC_CHUNK_SIZE', 2)
def test_installation_structure_migration(self):
@ -136,8 +136,8 @@ class MigratorTest(MigrationTest):
# checking all docs are migrated
self.es.indices.refresh(index=sync_info.index_name)
self.assertEquals(indexed_docs_before + len(mn_uids),
self.get_indexed_docs_num(sync_info))
self.assertEqual(indexed_docs_before + len(mn_uids),
self.get_indexed_docs_num(sync_info))
# checking new docs are indexed
for mn_uid in mn_uids - null_md_uids:
@ -201,7 +201,7 @@ class MigratorTest(MigrationTest):
self.assertIsNotNone(sync_info_after.last_sync_value)
self.assertIsNotNone(sync_info_after.last_sync_time)
indexed_docs_after = self.get_indexed_docs_num(sync_info_after)
self.assertEquals(indexed_docs_before + docs_num, indexed_docs_after)
self.assertEqual(indexed_docs_before + docs_num, indexed_docs_after)
def test_empty_action_logs_migration(self):
migrator = Migrator()
@ -211,7 +211,7 @@ class MigratorTest(MigrationTest):
time_of_sync = parser.parse(info.last_sync_time)
self.assertLessEqual(time_before, time_of_sync)
self.assertGreaterEqual(datetime.datetime.utcnow(), time_of_sync)
self.assertEquals(0, info.last_sync_value)
self.assertEqual(0, info.last_sync_value)
@patch('migration.config.DB_SYNC_CHUNK_SIZE', 2)
def test_action_logs_migration(self):
@ -238,12 +238,12 @@ class MigratorTest(MigrationTest):
self.assertGreaterEqual(datetime.datetime.utcnow(), time_of_sync)
# checking last sync id is updated
self.assertEquals(last_obj.id, new_sync_info.last_sync_value)
self.assertEqual(last_obj.id, new_sync_info.last_sync_value)
# checking all docs are migrated
self.es.indices.refresh(index=sync_info.index_name)
self.assertEquals(indexed_docs_before + docs_num,
self.get_indexed_docs_num(sync_info))
self.assertEqual(indexed_docs_before + docs_num,
self.get_indexed_docs_num(sync_info))
# checking new docs are indexed
check_keys = [
@ -281,5 +281,5 @@ class MigratorTest(MigrationTest):
# checking all docs are migrated
self.es.indices.refresh(index=sync_info.index_name)
self.assertEquals(indexed_docs_before + docs_num,
self.get_indexed_docs_num(sync_info))
self.assertEqual(indexed_docs_before + docs_num,
self.get_indexed_docs_num(sync_info))