Python3: Fix failing tempest tests

Fix sorting and string encoding

Story: 2000975
Task: 24394

Change-Id: Iaf4575b47fdc299c4e9eb77e19c36bebb0642ec8
This commit is contained in:
Adrian Czarnecki 2018-07-19 14:09:45 +02:00
parent 55b2ce0312
commit 7ba3a8b7ab
5 changed files with 67 additions and 57 deletions

View File

@ -22,6 +22,7 @@ from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import requests
from six import PY3
from monasca_common.rest import utils as rest_utils
@ -250,39 +251,42 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
# name - optional
if name:
# replace ' with \' to make query parsable
clean_name = name.replace("'", "\\'")
where_clause += ' from "{}" '.format(clean_name.encode('utf8'))
clean_name = name.replace("'", "\\'") if PY3 \
else name.replace("'", "\\'").encode('utf-8')
where_clause += ' from "{}" '.format(clean_name)
# tenant id
where_clause += " where _tenant_id = '{}' ".format(tenant_id.encode(
"utf8"))
where_clause += " where _tenant_id = '{}' ".format(tenant_id)
# region
where_clause += " and _region = '{}' ".format(region.encode('utf8'))
where_clause += " and _region = '{}' ".format(region)
# dimensions - optional
if dimensions:
for dimension_name, dimension_value in iter(
sorted(dimensions.items())):
# replace ' with \' to make query parsable
clean_dimension_name = dimension_name.replace("\'", "\\'")
clean_dimension_name = dimension_name.replace("\'", "\\'") if PY3 \
else dimension_name.replace("\'", "\\'").encode('utf-8')
if dimension_value == "":
where_clause += " and \"{}\" =~ /.+/ ".format(
clean_dimension_name)
elif '|' in dimension_value:
# replace ' with \' to make query parsable
clean_dimension_value = dimension_value.replace("\'", "\\'")
clean_dimension_value = dimension_value.replace("\'", "\\'") if PY3 else \
dimension_value.replace("\'", "\\'").encode('utf-8')
where_clause += " and \"{}\" =~ /^{}$/ ".format(
clean_dimension_name.encode('utf8'),
clean_dimension_value.encode('utf8'))
clean_dimension_name,
clean_dimension_value)
else:
# replace ' with \' to make query parsable
clean_dimension_value = dimension_value.replace("\'", "\\'")
clean_dimension_value = dimension_value.replace("\'", "\\'") if PY3 else \
dimension_value.replace("\'", "\\'").encode('utf-8')
where_clause += " and \"{}\" = '{}' ".format(
clean_dimension_name.encode('utf8'),
clean_dimension_value.encode('utf8'))
clean_dimension_name,
clean_dimension_value)
if start_timestamp is not None:
where_clause += " and time >= " + str(int(start_timestamp *
@ -358,7 +362,7 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
for value in dim_value_set:
json_dim_value_list.append({u'dimension_value': value})
json_dim_value_list = sorted(json_dim_value_list)
json_dim_value_list = sorted(json_dim_value_list, key=lambda x: x[u'dimension_value'])
return json_dim_value_list
def _build_serie_dimension_values_from_v0_11_0(self, series_names, dimension_name):
@ -392,7 +396,7 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
for value in dim_value_set:
json_dim_value_list.append({u'dimension_value': value})
json_dim_value_list = sorted(json_dim_value_list)
json_dim_value_list = sorted(json_dim_value_list, key=lambda x: x[u'dimension_value'])
return json_dim_value_list
def _build_serie_dimension_names(self, series_names):
@ -535,7 +539,7 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
entry = {u'name': name[0]}
json_metric_list.append(entry)
json_metric_list = sorted(json_metric_list)
json_metric_list = sorted(json_metric_list, key=lambda k: k['name'])
return json_metric_list
def _get_dimensions(self, tenant_id, region, name, dimensions):
@ -844,13 +848,13 @@ class MetricsRepository(metrics_repository.AbstractMetricsRepository):
reason, reason_data, sub_alarms, tenant_id
from alarm_state_history
"""
tenant_id = tenant_id if PY3 else tenant_id.encode('utf-8')
where_clause = (
" where tenant_id = '{}' ".format(tenant_id.encode('utf8')))
" where tenant_id = '{}' ".format(tenant_id))
alarm_id_where_clause_list = (
[" alarm_id = '{}' ".format(id.encode('utf8'))
for id in alarm_id_list])
[" alarm_id = '{}' ".format(alarm_id if PY3 else alarm_id.encode('utf8'))
for alarm_id in alarm_id_list])
alarm_id_where_clause = " or ".join(alarm_id_where_clause_list)

View File

@ -304,19 +304,21 @@ class AlarmsRepository(sql_repository.SQLRepository,
query = (query
.where(a.c.lifecycle_state ==
bindparam('b_lifecycle_state')))
parms['b_lifecycle_state'] = query_parms['lifecycle_state'].encode('utf8')
parms['b_lifecycle_state'] = query_parms['lifecycle_state'] \
if six.PY3 else query_parms['lifecycle_state'].encode('utf8')
if 'link' in query_parms:
query = query.where(a.c.link == bindparam('b_link'))
parms['b_link'] = query_parms['link'].encode('utf8')
parms['b_link'] = query_parms['link'] if six.PY3 \
else query_parms['link'].encode('utf8')
if 'state_updated_start_time' in query_parms:
query = (query
.where(a.c.state_updated_at >=
bindparam('b_state_updated_at')))
date_str = query_parms['state_updated_start_time'].encode('utf8') if six.PY2 else \
query_parms['state_updated_start_time']
date_str = query_parms['state_updated_start_time'] if six.PY3 \
else query_parms['state_updated_start_time'].encode('utf8')
date_param = datetime.strptime(date_str,
'%Y-%m-%dT%H:%M:%S.%fZ')
parms['b_state_updated_at'] = date_param
@ -376,7 +378,6 @@ class AlarmsRepository(sql_repository.SQLRepository,
.select_from(sub_query_from)
.distinct())
query = query.where(a.c.id.in_(sub_query))
order_columns = []
if 'sort_by' in query_parms:
columns_mapper = \
@ -386,13 +387,13 @@ class AlarmsRepository(sql_repository.SQLRepository,
'state_updated_timestamp': a.c.state_updated_at,
'updated_timestamp': a.c.updated_at,
'created_timestamp': a.c.created_at,
'severity': models.field_sort(ad.c.severity, map(text, ["'LOW'",
"'MEDIUM'",
"'HIGH'",
"'CRITICAL'"])),
'state': models.field_sort(a.c.state, map(text, ["'OK'",
"'UNDETERMINED'",
"'ALARM'"]))}
'severity': models.field_sort(ad.c.severity, list(map(text, ["'LOW'",
"'MEDIUM'",
"'HIGH'",
"'CRITICAL'"]))),
'state': models.field_sort(a.c.state, list(map(text, ["'OK'",
"'UNDETERMINED'",
"'ALARM'"])))}
order_columns, received_cols = self._remap_columns(
query_parms['sort_by'], columns_mapper)

View File

@ -217,8 +217,8 @@ class TestRepoMetricsInfluxDB(base.BaseTestCase):
mock_client.query.assert_called_once_with(
'show tag values from "custom_metric" with key = "hostname"'
' where _tenant_id = \'{tenant}\''
' and _region = \'{region}\' '.format(tenant=b'38dc2a2549f94d2e9a4fa1cc45a4970c',
region=b'useast'))
' and _region = \'{region}\' '.format(tenant='38dc2a2549f94d2e9a4fa1cc45a4970c',
region='useast'))
@patch("monasca_api.common.repositories.influxdb."
"metrics_repository.client.InfluxDBClient")

View File

@ -112,9 +112,9 @@ def get_query_param(req, param_name, required=False, default_val=None):
params = falcon.uri.parse_query_string(req.query_string)
if param_name in params:
if isinstance(params[param_name], list):
param_val = params[param_name][0].decode('utf8')
param_val = encodeutils.safe_decode(params[param_name][0], 'utf8')
else:
param_val = params[param_name].decode('utf8')
param_val = encodeutils.safe_decode(params[param_name], 'utf8')
return param_val
else:
@ -325,7 +325,7 @@ def validate_query_dimensions(dimensions):
def paginate(resource, uri, limit):
parsed_uri = urlparse.urlparse(uri)
self_link = build_base_uri(parsed_uri)
self_link = encodeutils.safe_decode(build_base_uri(parsed_uri), 'utf8')
old_query_params = _get_old_query_params(parsed_uri)
@ -337,7 +337,7 @@ def paginate(resource, uri, limit):
if 'id' in resource[limit - 1]:
new_offset = resource[limit - 1]['id']
next_link = build_base_uri(parsed_uri)
next_link = encodeutils.safe_decode(build_base_uri(parsed_uri), 'utf8')
new_query_params = [u'offset' + '=' + urlparse.quote(
new_offset.encode('utf8'), safe='')]
@ -348,9 +348,9 @@ def paginate(resource, uri, limit):
next_link += '?' + '&'.join(new_query_params)
resource = {u'links': ([{u'rel': u'self',
u'href': self_link.decode('utf8')},
u'href': self_link},
{u'rel': u'next',
u'href': next_link.decode('utf8')}]),
u'href': next_link}]),
u'elements': resource[:limit]}
else:
@ -368,7 +368,7 @@ def paginate_with_no_id(dictionary_list, uri, offset, limit):
value list.
"""
parsed_uri = urlparse.urlparse(uri)
self_link = build_base_uri(parsed_uri)
self_link = encodeutils.safe_decode(build_base_uri(parsed_uri), 'utf-8')
old_query_params = _get_old_query_params(parsed_uri)
if old_query_params:
@ -385,11 +385,10 @@ def paginate_with_no_id(dictionary_list, uri, offset, limit):
# Then truncate it with limit
truncated_list_offset_limit = truncated_list_offset[:limit]
links = [{u'rel': u'self', u'href': self_link.decode('utf8')}]
links = [{u'rel': u'self', u'href': self_link}]
if len(truncated_list_offset) > limit:
new_offset = truncated_list_offset_limit[limit - 1].values()[0]
next_link = build_base_uri(parsed_uri)
new_offset = list(truncated_list_offset_limit[limit - 1].values())[0]
next_link = encodeutils.safe_decode(build_base_uri(parsed_uri), 'utf-8')
new_query_params = [u'offset' + '=' + new_offset]
_get_old_query_params_except_offset(new_query_params, parsed_uri)
@ -397,13 +396,13 @@ def paginate_with_no_id(dictionary_list, uri, offset, limit):
if new_query_params:
next_link += '?' + '&'.join(new_query_params)
links.append({u'rel': u'next', u'href': next_link.decode('utf8')})
links.append({u'rel': u'next', u'href': next_link})
resource = {u'links': links,
u'elements': truncated_list_offset_limit}
else:
resource = {u'links': ([{u'rel': u'self',
u'href': self_link.decode('utf8')}]),
u'href': self_link}]),
u'elements': dictionary_list}
return resource
@ -457,15 +456,15 @@ def paginate_alarming(resource, uri, limit):
next_link += '?' + '&'.join(new_query_params)
resource = {u'links': ([{u'rel': u'self',
u'href': self_link.decode('utf8')},
u'href': encodeutils.safe_decode(self_link, 'utf8')},
{u'rel': u'next',
u'href': next_link.decode('utf8')}]),
u'href': encodeutils.safe_decode(next_link, 'utf8')}]),
u'elements': resource[:limit]}
else:
resource = {u'links': ([{u'rel': u'self',
u'href': self_link.decode('utf8')}]),
u'href': encodeutils.safe_decode(self_link, 'utf8')}]),
u'elements': resource}
return resource
@ -530,6 +529,7 @@ def paginate_measurements(measurements, uri, limit):
parsed_uri = urlparse.urlparse(uri)
self_link = build_base_uri(parsed_uri)
self_link = encodeutils.safe_decode(self_link, 'utf-8')
old_query_params = _get_old_query_params(parsed_uri)
@ -539,7 +539,7 @@ def paginate_measurements(measurements, uri, limit):
if measurements:
measurement_elements = []
resource = {u'links': [{u'rel': u'self',
u'href': self_link.decode('utf8')},
u'href': self_link},
]}
for measurement in measurements:
if len(measurement['measurements']) >= limit:
@ -548,6 +548,7 @@ def paginate_measurements(measurements, uri, limit):
measurement['measurements'][limit - 1][0]])
next_link = build_base_uri(parsed_uri)
next_link = encodeutils.safe_decode(next_link, 'utf-8')
new_query_params = [u'offset' + '=' + urlparse.quote(
new_offset.encode('utf8'), safe='')]
@ -558,7 +559,7 @@ def paginate_measurements(measurements, uri, limit):
next_link += '?' + '&'.join(new_query_params)
resource[u'links'].append({u'rel': u'next',
u'href': next_link.decode('utf8')})
u'href': next_link})
truncated_measurement = {u'dimensions': measurement['dimensions'],
u'measurements': (measurement
@ -575,9 +576,8 @@ def paginate_measurements(measurements, uri, limit):
resource[u'elements'] = measurement_elements
else:
resource = {u'links': ([{u'rel': u'self',
u'href': self_link.decode('utf8')}]),
u'href': self_link}]),
u'elements': []}
return resource
@ -622,10 +622,12 @@ def paginate_statistics(statistics, uri, limit):
if old_query_params:
self_link += '?' + '&'.join(old_query_params)
self_link = encodeutils.safe_decode(self_link, 'utf-8')
if statistics:
statistic_elements = []
resource = {u'links': [{u'rel': u'self',
u'href': self_link.decode('utf8')}]}
u'href': self_link}]}
for statistic in statistics:
stat_id = statistic['id']
@ -649,8 +651,9 @@ def paginate_statistics(statistics, uri, limit):
if new_query_params:
next_link += '?' + '&'.join(new_query_params)
next_link = encodeutils.safe_decode(next_link, 'utf-8')
resource[u'links'].append({u'rel': u'next',
u'href': next_link.decode('utf8')})
u'href': next_link})
truncated_statistic = {u'dimensions': statistic['dimensions'],
u'statistics': (statistic['statistics'][:limit]),
@ -671,7 +674,7 @@ def paginate_statistics(statistics, uri, limit):
else:
resource = {u'links': ([{u'rel': u'self',
u'href': self_link.decode('utf8')}]),
u'href': self_link}]),
u'elements': []}
return resource

View File

@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import encodeutils
from monasca_api.v2.reference import helpers
@ -27,7 +29,7 @@ class Version2(object):
'id': 'v2.0',
'links': [{
'rel': 'self',
'href': req.uri.decode('utf-8')
'href': encodeutils.safe_decode(req.uri, 'utf-8')
}],
'status': 'CURRENT',
'updated': "2013-03-06T00:00:00.000Z"