Nodes CSV report implemented

Changes list:

- CSV exporter for nodes added
- link on nodes report added to the UI
- export of lists and tuples to CSV implemented. As result
  enumeration functions removed from export_utils
- aggregate functions handled in the skeleton
- lists and tuples handled in the skeleton generation util
- workaround for counting of volume attachments removed
- new fields added into installation info and OSWL skeletons
- unnecessary fields removed from installation info and OSWL skeletons
- lists definitions changed in skeletons
- common logic for plugins and nodes reports extracted to helper functions
- node related data removed from cluster report (manufacturer, platform name)

Change-Id: Iacf5421895f6803acf071111ef04a13c53eba6ac
Closes-Bug: #1572490
This commit is contained in:
Alexander Kislitsky 2016-04-19 20:10:04 +03:00
parent e200196347
commit 51da64b7db
11 changed files with 519 additions and 403 deletions

View File

@ -32,8 +32,7 @@ class Production(object):
CSV_DEFAULT_FROM_DATE_DAYS = 90
CSV_DB_YIELD_PER = 100
JSON_DB_DEFAULT_LIMIT = 1000
# Number of attachments included into volumes CSV report
CSV_VOLUME_ATTACHMENTS_NUM = 1
CSV_DEFAULT_LIST_ITEMS_NUM = 5
class Testing(Production):

View File

@ -37,6 +37,7 @@ bp = Blueprint('clusters_to_csv', __name__)
CLUSTERS_REPORT_FILE = 'clusters.csv'
PLUGINS_REPORT_FILE = 'plugins.csv'
NODES_REPORT_FILE = 'nodes.csv'
def extract_date(field_name, default_value=None, date_format='%Y-%m-%d'):
@ -172,6 +173,23 @@ def clusters_to_csv():
return Response(result, mimetype='text/csv', headers=headers)
@bp.route('/nodes', methods=['GET'])
def nodes_to_csv():
app.logger.debug("Handling nodes_to_csv get request")
inst_structures = get_inst_structures()
exporter = StatsToCsv()
result = exporter.export_nodes(inst_structures)
# NOTE: result - is generator, but streaming can not work with some
# WSGI middlewares: http://flask.pocoo.org/docs/0.10/patterns/streaming/
app.logger.debug("Get request for nodes_to_csv handled")
headers = {
'Content-Disposition': 'attachment; filename={}'.format(
NODES_REPORT_FILE)
}
return Response(result, mimetype='text/csv', headers=headers)
@bp.route('/plugins', methods=['GET'])
def plugins_to_csv():
app.logger.debug("Handling plugins_to_csv get request")

View File

@ -15,7 +15,9 @@
import csv
import io
import itertools
import six
from sqlalchemy.util import KeyedTuple
from fuel_analytics.api.app import app
@ -27,9 +29,30 @@ def get_keys_paths(skeleton):
"""
def _keys_paths_helper(keys, skel):
result = []
if isinstance(skel, dict):
for k in sorted(six.iterkeys(skel)):
result.extend(_keys_paths_helper(keys + [k], skel[k]))
elif isinstance(skel, (list, tuple)):
# For lists in the skeleton we can specify repeats value.
# For instance we want to show 3 roles in the CSV report.
# In this case skeleton for roles will be {'roles': [None, 3]}
if len(skel) > 1:
repeats = skel[1]
else:
repeats = app.config['CSV_DEFAULT_LIST_ITEMS_NUM']
if len(skel):
for idx in six.moves.xrange(repeats):
result.extend(_keys_paths_helper(keys + [idx], skel[0]))
else:
result.append(keys)
elif callable(skel):
# Handling aggregate functions in the skeleton. For instance if
# we want to show number of networks we will have the following
# skeleton: {'networks': count}
result.append(keys + [skel])
else:
result.append(keys)
return result
@ -38,6 +61,7 @@ def get_keys_paths(skeleton):
def get_flatten_data(keys_paths, data):
"""Creates flatten data from data by keys_paths
:param keys_paths: list of dict keys lists
:param data: dict with nested structures
:return: list of flatten data dicts
@ -46,13 +70,25 @@ def get_flatten_data(keys_paths, data):
for key_path in keys_paths:
d = data
for key in key_path:
if callable(key):
# Handling aggregate functions in the skeleton
d = key(d)
break
if isinstance(d, dict):
d = d.get(key, None)
elif isinstance(d, KeyedTuple):
# If we specify DB fields in the query SQLAlchemy
# returns KeyedTuple inherited from tuple
d = getattr(d, key, None)
elif isinstance(d, (list, tuple)):
d = d[key] if key < len(d) else None
else:
d = getattr(d, key, None)
if d is None:
break
if isinstance(d, (list, tuple)):
# If type for list items is not specified values
# will be shown as joined text
flatten_data.append(' '.join(map(six.text_type, d)))
else:
flatten_data.append(d)
@ -84,13 +120,14 @@ def construct_skeleton(data):
list_result.append(dict_result)
return list_result
else:
return data
return None
def get_data_skeleton(structures):
"""Gets skeleton by structures list
:param structures:
:return: data structure skeleton
"""Constructs and merges skeletons from raw data
:param structures: list of data
:return: skeleton for provided data structures
"""
def _merge_skeletons(lh, rh):
keys_paths = get_keys_paths(rh)
@ -102,11 +139,13 @@ def get_data_skeleton(structures):
if isinstance(data_point, dict):
if key not in merge_point:
merge_point[key] = {}
elif isinstance(data_point, list):
elif isinstance(data_point, (list, tuple)):
if key not in merge_point:
merge_point[key] = [{}]
_merge_skeletons(merge_point[key][0],
get_data_skeleton(data_point))
merge_point[key] = [get_data_skeleton(data_point)]
else:
_merge_skeletons(merge_point[key][0],
get_data_skeleton(data_point))
break
else:
merge_point[key] = None
merge_point = merge_point[key]
@ -114,63 +153,13 @@ def get_data_skeleton(structures):
skeleton = {}
for structure in structures:
app.logger.debug("Constructing skeleton by data: %s", structure)
app.logger.debug("Updating skeleton by %s",
construct_skeleton(structure))
_merge_skeletons(skeleton, construct_skeleton(structure))
new_skeleton = construct_skeleton(structure)
app.logger.debug("Updating skeleton by %s", new_skeleton)
_merge_skeletons(skeleton, new_skeleton)
app.logger.debug("Result skeleton is %s", skeleton)
return skeleton
def align_enumerated_field_values(values, number):
"""Fills result list by the None values, if number is greater than
values len. The first element of result is bool value
len(values) > number
:param values:
:param number:
:return: aligned list to 'number' + 1 length, filled by Nones on
empty values positions and bool value on the first place. Bool value
is True if len(values) > number
"""
if number > 0:
return ([len(values) > number] +
(values + [None] * (number - len(values)))[:number])
else:
return []
def get_enumerated_keys_paths(resource_type, skeleton_name,
nested_data_skeleton, enum_length):
"""Gets enumerated keys paths for nested data lists or tuples in the
skeleton. For example volume contains list of attachments. Only enum_length
of them will be shown in report. The first element of result is the column
for showing if number of elements in resource greater or not than
enum_length.
:param resource_type: name of resource type. used for column names
generation
:param skeleton_name: name of skeleton. used for generation of the first
column name in result
:param nested_data_skeleton: skeleton of nested structure
:param enum_length: number of enumerated nested elements
:return: list of enumerated column names
"""
app.logger.debug("Getting additional enumerated keys paths for: "
"%s, skeleton: %s", resource_type, skeleton_name)
result = []
gt_field_name = '{}_gt_{}'.format(skeleton_name, enum_length)
result.append([resource_type, gt_field_name])
skel_keys_paths = get_keys_paths(nested_data_skeleton)
for i in six.moves.xrange(enum_length):
attachment_key_paths = [resource_type, skeleton_name,
six.text_type(i)]
for key_path in skel_keys_paths:
result.append(attachment_key_paths + key_path)
app.logger.debug("Additional enumerated keys paths for: "
"%s, skeleton: %s are: %s", resource_type,
skeleton_name, result)
return result
def flatten_data_as_csv(keys_paths, flatten_data):
"""Returns flatten data in CSV
:param keys_paths: list of dict keys lists for columns names
@ -181,7 +170,10 @@ def flatten_data_as_csv(keys_paths, flatten_data):
app.logger.debug("Saving flatten data as CSV started")
names = []
for key_path in keys_paths:
names.append('.'.join(key_path))
# Handling functions and list indexes in key_path
key_texts = (getattr(k, '__name__', six.text_type(k))
for k in key_path)
names.append('.'.join(key_texts))
output = six.BytesIO()
writer = csv.writer(output)

View File

@ -18,7 +18,6 @@ import itertools
import six
from fuel_analytics.api.app import app
from fuel_analytics.api.common import consts
from fuel_analytics.api.resources.utils import export_utils
from fuel_analytics.api.resources.utils.skeleton import OSWL_SKELETONS
@ -27,21 +26,12 @@ class OswlStatsToCsv(object):
OSWL_INDEX_FIELDS = ('master_node_uid', 'cluster_id', 'resource_type')
def get_additional_volume_keys_paths(self):
num = app.config['CSV_VOLUME_ATTACHMENTS_NUM']
return export_utils.get_enumerated_keys_paths(
consts.OSWL_RESOURCE_TYPES.volume, 'volume_attachment',
OSWL_SKELETONS['volume_attachment'], num)
def get_additional_keys_paths(self, resource_type):
# Additional key paths for resource type info
resource_additional_key_paths = [[resource_type, 'is_added'],
[resource_type, 'is_modified'],
[resource_type, 'is_removed']]
if resource_type == consts.OSWL_RESOURCE_TYPES.volume:
resource_additional_key_paths += \
self.get_additional_volume_keys_paths()
return resource_additional_key_paths
"""Returns additional key paths for resource type info."""
return [[resource_type, 'is_added'],
[resource_type, 'is_modified'],
[resource_type, 'is_removed']]
def get_resource_keys_paths(self, resource_type):
"""Gets key paths for resource type. csv key paths is combination
@ -79,20 +69,6 @@ class OswlStatsToCsv(object):
is_removed = id_val in removed_ids
result = [is_added, is_modified, is_removed]
# Handling nested lists and tuples
if resource_type == consts.OSWL_RESOURCE_TYPES.volume:
flatten_attachments = []
skeleton = OSWL_SKELETONS['volume_attachment']
enum_length = (app.config['CSV_VOLUME_ATTACHMENTS_NUM'] *
len(skeleton))
attachment_keys_paths = export_utils.get_keys_paths(skeleton)
for attachment in resource.get('attachments', []):
flatten_attachment = export_utils.get_flatten_data(
attachment_keys_paths, attachment)
flatten_attachments.extend(flatten_attachment)
result += export_utils.align_enumerated_field_values(
flatten_attachments, enum_length)
return result
def handle_empty_version_info(self, oswl, clusters_versions):
@ -114,8 +90,6 @@ class OswlStatsToCsv(object):
if oswl.version_info:
return
# self._add_oswl_to_clusters_versions_cache(oswl, clusters_versions)
mn_uid = oswl.master_node_uid
cluster_id = oswl.cluster_id

View File

@ -14,6 +14,11 @@
from fuel_analytics.api.common import consts
def count(items):
return len(items) if items is not None else None
INSTALLATION_INFO_SKELETON = {
'structure': {
'allocated_nodes_num': None,
@ -79,25 +84,65 @@ INSTALLATION_INFO_SKELETON = {
'is_customized': None,
'mode': None,
'net_provider': None,
'node_groups': [{'id': None, 'nodes': [{}]}],
'node_groups': [{'id': None}],
'nodes': [
{
'bond_interfaces': [
{'id': None, 'slaves': [{}]}
],
'bond_interfaces': count,
'nic_interfaces': count,
'error_type': None,
'group_id': None,
'id': None,
'manufacturer': None,
'nic_interfaces': [{'id': None}],
'online': None,
'os': None,
'pending_addition': None,
'pending_deletion': None,
'pending_roles': [{}],
'roles': [None],
'pending_roles': [None],
'platform_name': None,
'roles': [{}],
'status': None
'status': None,
'meta': {
'cpu': {
'real': None,
'total': None,
'spec': [
{
'frequency': None,
'model': None,
},
10 # number of showing items
]
},
'memory': {
'slots': None,
'total': None,
'maximum_capacity': None,
'devices': [
{
'frequency': None,
'type': None,
'size': None
},
10 # number of showing items
]
},
'disks': [
{
'name': None,
'removable': None,
'model': None,
'size': None
},
10 # number of showing items
],
'system': {
'product': None,
'family': None,
'version': None,
'manufacturer': None
},
'interfaces': count
}
}
],
'nodes_num': None,
@ -116,36 +161,30 @@ INSTALLATION_INFO_SKELETON = {
'releases': [{
'deployment_scripts_path': None,
'repository_path': None,
'mode': [],
'mode': [None],
'os': None,
'version': None,
}],
'fuel_version': None,
'package_version': None,
'is_hotpluggable': None,
'groups': [None],
'licenses': [None]
}
],
'release': {'name': None, 'os': None, 'version': None},
'release': {'name': None, 'os': None},
'status': None
}
],
'clusters_num': None,
'fuel_release': {
'api': None,
'astute_sha': None,
'build_id': None,
'build_number': None,
'feature_groups': [{}],
'fuellib_sha': None,
'fuel-library_sha': None,
'fuelmain_sha': None,
'nailgun_sha': None,
'ostf_sha': None,
'fuel-ostf_sha': None,
'python-fuelclient_sha': None,
'production': None,
'feature_groups': [None],
'release': None
},
'fuel_packages': [],
'fuel_packages': [None],
'unallocated_nodes_num': None,
'user_information': {
'company': None,
@ -200,7 +239,8 @@ OSWL_SKELETONS = {
'volume_type': None,
'size': None,
'snapshot_id': None,
'tenant_id': None
'tenant_id': None,
'attachments': count
},
'volume_attachment': {
"device": None,

View File

@ -15,7 +15,6 @@
import collections
import copy
import six
from six.moves import range
from fuel_analytics.api.app import app
from fuel_analytics.api.resources.utils import export_utils
@ -46,37 +45,25 @@ class StatsToCsv(object):
# Removing lists of dicts from cluster skeleton
cluster_skeleton.pop('nodes', None)
cluster_skeleton.pop('node_groups', None)
cluster_skeleton.pop('installed_plugins', None)
cluster_key_paths = export_utils.get_keys_paths(cluster_skeleton)
result_key_paths = cluster_key_paths + structure_key_paths
def enumerated_field_keys(field_name, number):
"""Adds enumerated fields columns and property
field for showing case, when values will be cut
:param field_name: field name
:param number: number of enumerated fields
:return: list of cut fact column and enumerated columns names
"""
result = [['{}_gt{}'.format(field_name, number)]]
for i in range(number):
result.append(['{}_{}'.format(field_name, i)])
return result
# Handling enumeration of manufacturers names
result_key_paths.extend(enumerated_field_keys('nodes_manufacturer',
self.MANUFACTURERS_NUM))
# Handling enumeration of platform names
result_key_paths.extend(enumerated_field_keys('nodes_platform_name',
self.PLATFORM_NAMES_NUM))
# Handling network verification check
result_key_paths.append([self.NETWORK_VERIFICATION_COLUMN])
app.logger.debug("Cluster keys paths got")
return structure_key_paths, cluster_key_paths, result_key_paths
def _get_subcluster_keys_paths(self, skeleton):
key_paths = export_utils.get_keys_paths(skeleton)
structure_key_paths = [['master_node_uid'],
['structure', 'fuel_packages']]
cluster_key_paths = [['cluster_id'], ['cluster_fuel_version']]
result_key_paths = key_paths + cluster_key_paths + structure_key_paths
return structure_key_paths, cluster_key_paths, \
key_paths, result_key_paths
def get_plugin_keys_paths(self):
app.logger.debug("Getting plugin keys paths")
structure_skeleton = copy.deepcopy(INSTALLATION_INFO_SKELETON)
@ -84,15 +71,19 @@ class StatsToCsv(object):
plugin_skeleton = clusters[0]['installed_plugins'][0]
plugin_skeleton.pop('releases', None)
plugin_key_paths = export_utils.get_keys_paths(plugin_skeleton)
structure_key_paths = [['master_node_uid'],
['structure', 'fuel_packages']]
cluster_key_paths = [['cluster_id'], ['cluster_fuel_version']]
result_key_paths = plugin_key_paths + cluster_key_paths + \
structure_key_paths
result = self._get_subcluster_keys_paths(plugin_skeleton)
app.logger.debug("Plugin keys paths got")
return structure_key_paths, cluster_key_paths, \
plugin_key_paths, result_key_paths
return result
def get_node_keys_paths(self):
app.logger.debug("Getting node keys paths")
structure_skeleton = copy.deepcopy(INSTALLATION_INFO_SKELETON)
clusters = structure_skeleton['structure']['clusters']
node_skeleton = clusters[0]['nodes'][0]
result = self._get_subcluster_keys_paths(node_skeleton)
app.logger.debug("Node keys paths got")
return result
def build_action_logs_idx(self, action_logs):
app.logger.debug("Building action logs index started")
@ -117,21 +108,6 @@ class StatsToCsv(object):
app.logger.debug("Getting flatten clusters info is started")
action_logs_idx = self.build_action_logs_idx(action_logs)
def extract_nodes_fields(field, nodes):
"""Extracts fields values from nested nodes dicts
:param field: field name
:param nodes: nodes data list
:return: set of extracted fields values from nodes
"""
result = set([d.get(field) for d in nodes])
return filter(lambda x: x is not None, result)
def extract_nodes_manufacturers(nodes):
return extract_nodes_fields('manufacturer', nodes)
def extract_nodes_platform_name(nodes):
return extract_nodes_fields('platform_name', nodes)
for inst_structure in inst_structures:
try:
structure = inst_structure.structure
@ -144,19 +120,6 @@ class StatsToCsv(object):
flatten_cluster = export_utils.get_flatten_data(
cluster_keys_paths, cluster)
flatten_cluster.extend(flatten_structure)
nodes = cluster.get('nodes', [])
# Adding enumerated manufacturers
manufacturers = extract_nodes_manufacturers(nodes)
flatten_cluster += export_utils.\
align_enumerated_field_values(manufacturers,
self.MANUFACTURERS_NUM)
# Adding enumerated platforms
platform_names = extract_nodes_platform_name(nodes)
flatten_cluster += export_utils.\
align_enumerated_field_values(platform_names,
self.PLATFORM_NAMES_NUM)
# Adding network verification status
idx = export_utils.get_index(
@ -190,7 +153,28 @@ class StatsToCsv(object):
:param inst_structures: list of installation structures
:return: list of flatten plugins info
"""
app.logger.debug("Getting flatten plugins info started")
return self._get_flatten_subcluster_data(
'installed_plugins',
structure_keys_paths,
cluster_keys_paths,
plugin_keys_paths,
inst_structures
)
def _get_flatten_subcluster_data(self, data_path, structure_keys_paths,
cluster_keys_paths, keys_paths,
inst_structures):
"""Gets flatten data form clusters from installation
structures collection
:param structure_keys_paths: list of keys paths in the
installation structure
:param cluster_keys_paths: list of keys paths in the cluster
:param keys_paths: list of keys paths in the data
:param inst_structures: list of installation structures
:return: list of flatten plugins info
"""
app.logger.debug("Getting flatten %s info started", data_path)
for inst_structure in inst_structures:
try:
@ -205,22 +189,42 @@ class StatsToCsv(object):
cluster.get('fuel_version')
flatten_cluster = export_utils.get_flatten_data(
cluster_keys_paths, cluster)
plugins = cluster.pop('installed_plugins', [])
for plugin in plugins:
flatten_plugin = export_utils.get_flatten_data(
plugin_keys_paths, plugin)
flatten_plugin.extend(flatten_cluster)
flatten_plugin.extend(flatten_structure)
yield flatten_plugin
data = cluster.pop(data_path, [])
for item in data:
flatten_data = export_utils.get_flatten_data(
keys_paths, item)
flatten_data.extend(flatten_cluster)
flatten_data.extend(flatten_structure)
yield flatten_data
except Exception as e:
# Generation of report should be reliable
app.logger.error("Getting flatten plugin data failed. "
app.logger.error("Getting flatten %s data failed. "
"Installation info id: %s, "
"master node uid: %s, error: %s",
data_path,
inst_structure.id,
inst_structure.master_node_uid,
six.text_type(e))
app.logger.debug("Getting flatten plugins info finished")
app.logger.debug("Getting flatten %s info finished", data_path)
def get_flatten_nodes(self, structure_keys_paths, cluster_keys_paths,
node_keys_paths, inst_structures):
"""Gets flatten plugins data form clusters from installation
structures collection
:param structure_keys_paths: list of keys paths in the
installation structure
:param cluster_keys_paths: list of keys paths in the cluster
:param node_keys_paths: list of keys paths in the node
:param inst_structures: list of installation structures
:return: list of flatten plugins info
"""
return self._get_flatten_subcluster_data(
'nodes',
structure_keys_paths,
cluster_keys_paths,
node_keys_paths,
inst_structures
)
def export_clusters(self, inst_structures, action_logs):
app.logger.info("Export clusters info into CSV started")
@ -245,3 +249,15 @@ class StatsToCsv(object):
csv_keys_paths, flatten_plugins)
app.logger.info("Export plugins info into CSV finished")
return result
def export_nodes(self, inst_structures):
app.logger.info("Export nodes info into CSV started")
(structure_keys_paths, cluster_keys_paths,
node_keys_paths, csv_keys_paths) = self.get_node_keys_paths()
flatten_nodes = self.get_flatten_nodes(
structure_keys_paths, cluster_keys_paths,
node_keys_paths, inst_structures)
result = export_utils.flatten_data_as_csv(
csv_keys_paths, flatten_nodes)
app.logger.info("Export nodes info into CSV finished")
return result

View File

@ -14,13 +14,19 @@
# License for the specific language governing permissions and limitations
# under the License.
import six
from fuel_analytics.test.base import BaseTest
from fuel_analytics.api.resources.utils import export_utils
class O(object):
"""Helper object."""
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
class ExportUtilsTest(BaseTest):
def test_get_key_paths(self):
@ -32,32 +38,36 @@ class ExportUtilsTest(BaseTest):
paths = export_utils.get_keys_paths(skeleton)
self.assertListEqual([['a', 'e'], ['a', 'g']], paths)
skeleton = [{'a': 'b', 'c': 'd'}]
paths = export_utils.get_keys_paths(skeleton)
self.assertListEqual([[]], paths)
def test_get_key_paths_for_lists(self):
skeleton = {'a': [{'b': None}, 2], 'c': [None, 2]}
actual = export_utils.get_keys_paths(skeleton)
expected = [['a', 0, 'b'], ['a', 1, 'b'], ['c', 0], ['c', 1]]
self.assertListEqual(expected, actual)
skeleton = {'h': [{'a': 'b', 'c': 'd'}, 1], 't': None}
actual = export_utils.get_keys_paths(skeleton)
self.assertListEqual([['h', 0, 'a'], ['h', 0, 'c'], ['t']], actual)
def test_get_key_paths_for_empty_lists(self):
skeleton = {'h': [], 't': None}
actual = export_utils.get_keys_paths(skeleton)
self.assertListEqual([['h'], ['t']], actual)
def test_get_flatten_data(self):
class O(object):
def __init__(self, a, c, x):
self.a = a
self.c = c
self.x = x
data = [
{'a': 'b', 'c': {'e': 2.1}},
{'a': 'ee\nxx', 'c': {'e': 3.1415}, 'x': ['z', 'zz']},
{'a': 'b', 'b': {'e': 2.1}},
{'a': 'ee\nxx', 'b': {'e': 3.1415}, 'c': ['z', 'zz']},
O('y', {'e': 44}, None),
O('yy', {'e': 45}, ['b', 'e'])
]
skeleton = {'a': None, 'b': {'e': None}, 'c': [None, 2]}
expected_flatten_data = [
['b', 2.1, None],
['ee\nxx', 3.1415, 'z zz'],
['y', 44, None],
['yy', 45, 'b e']
['b', 2.1, None, None],
['ee\nxx', 3.1415, 'z', 'zz'],
['y', 44, None, None],
['yy', 45, 'b', 'e']
]
skeleton = export_utils.get_data_skeleton(data)
key_paths = export_utils.get_keys_paths(skeleton)
for idx, expected in enumerate(expected_flatten_data):
@ -68,6 +78,69 @@ class ExportUtilsTest(BaseTest):
actual = export_utils.get_flatten_data(key_paths, data)
self.assertListEqual(expected_flatten_data[idx], actual)
def test_get_flatten_data_for_functions(self):
skeleton = {'a': None, 'b': len, 'c': max}
data = [
O('y', [1, 2], [0, 42, -1]),
{'a': 'yy', 'b': {'e': 45}, 'c': ['z', 'e']}
]
expected_flatten_data = [
['y', 2, 42],
['yy', 1, 'z']
]
key_paths = export_utils.get_keys_paths(skeleton)
for idx, expected in enumerate(expected_flatten_data):
actual = export_utils.get_flatten_data(key_paths, data[idx])
self.assertEqual(expected, actual)
for idx, data in enumerate(data):
actual = export_utils.get_flatten_data(key_paths, data)
self.assertEqual(expected_flatten_data[idx], actual)
def test_get_flatten_data_for_list(self):
b_repeats = 1
e_repeats = 2
skeleton = {
'a': None,
'b': [
{'d': None, 'e': [{'f': None}, e_repeats]},
b_repeats
],
'c': []
}
expected_keys = [
['a'],
['b', 0, 'd'], ['b', 0, 'e', 0, 'f'], ['b', 0, 'e', 1, 'f'],
['c']
]
self.assertEqual(expected_keys, export_utils.get_keys_paths(skeleton))
data = [
O('a_val_o', [{'d': 'd_0_o', 'e': [{'f': 'f_0_o'}]}],
['c_o_0', 'c_o_1']),
{'a': 'a_val', 'b': [{'d': 'd_0', 'e': []}, {'d': 'ignored'}],
'c': 'c_val'}
]
expected_flatten_data = [
['a_val_o', 'd_0_o', 'f_0_o', None, 'c_o_0 c_o_1'],
['a_val', 'd_0', None, None, 'c_val'],
]
key_paths = export_utils.get_keys_paths(skeleton)
for idx, expected in enumerate(expected_flatten_data):
actual = export_utils.get_flatten_data(key_paths, data[idx])
self.assertEqual(expected, actual)
for idx, data in enumerate(data):
actual = export_utils.get_flatten_data(key_paths, data)
self.assertEqual(expected_flatten_data[idx], actual)
def test_get_flatten_as_csv_unicode(self):
data = [
{'a': u'b'},
@ -90,23 +163,29 @@ class ExportUtilsTest(BaseTest):
def test_dict_construct_skeleton(self):
data = {'a': 'b'}
skeleton = export_utils.construct_skeleton(data)
self.assertDictEqual(data, skeleton)
expected = {'a': None}
actual = export_utils.construct_skeleton(data)
self.assertDictEqual(expected, actual)
data = {'a': 'b', 'x': None}
skeleton = export_utils.construct_skeleton(data)
self.assertDictEqual(data, skeleton)
expected = {'a': None, 'x': None}
actual = export_utils.construct_skeleton(data)
self.assertDictEqual(expected, actual)
def test_list_construct_skeleton(self):
data = ['a', 'b', 'c']
skeleton = export_utils.construct_skeleton(data)
self.assertListEqual([], skeleton)
actual = export_utils.construct_skeleton(data)
self.assertListEqual([], actual)
data = []
actual = export_utils.construct_skeleton(data)
self.assertListEqual([], actual)
data = [{'a': None}, {'b': 'x'}, {'a': 4, 'c': 'xx'}, {}]
skeleton = export_utils.construct_skeleton(data)
self.assertListEqual(
sorted(skeleton[0].keys()),
sorted(['a', 'b', 'c'])
actual = export_utils.construct_skeleton(data)
self.assertItemsEqual(
actual[0].keys(),
['a', 'b', 'c']
)
data = [
@ -117,9 +196,51 @@ class ExportUtilsTest(BaseTest):
['a'],
{'p': 'q'}
]
skeleton = export_utils.construct_skeleton(data)
self.assertListEqual([[[], {'a': 'b', 'x': 'z'}], {'p': 'q'}],
skeleton)
actual = export_utils.construct_skeleton(data)
expected = [[[], {'a': None, 'x': None}], {'p': None}]
self.assertListEqual(expected, actual)
def test_construct_skeleton(self):
data = {'a': 'b', 'c': [[{'d': 'e'}], 'f']}
expected = {'a': None, 'c': [[{'d': None}]]}
actual = export_utils.construct_skeleton(data)
self.assertEqual(expected, actual)
data = {'a': {'b': []}}
expected = {'a': {'b': []}}
actual = export_utils.construct_skeleton(data)
self.assertEqual(expected, actual)
data = {'a': {'b': [{'c': 'd'}, {'e': 'f'}]}}
expected = {'a': {'b': [{'c': None, 'e': None}]}}
actual = export_utils.construct_skeleton(data)
self.assertEqual(expected, actual)
def test_get_skeleton_for_dicts(self):
data = [
{'ci': {'p': True, 'e': '@', 'n': 'n'}},
# reducing fields in nested dict
{'ci': {'p': False}},
# adding new value
{'a': 'b'},
# checking empty dict
{}
]
actual = export_utils.get_data_skeleton(data)
expected = {'a': None, 'ci': {'p': None, 'e': None, 'n': None}}
self.assertEqual(expected, actual)
def test_get_skeleton_for_lists(self):
data = [
{'c': [{'s': 'v', 'n': 2}, {'s': 'vv', 'n': 22}]},
# adding new value in the list
{'c': [{'z': 'p'}]},
# checking empty list
{'c': []},
]
actual = export_utils.get_data_skeleton(data)
expected = {'c': [{'s': None, 'n': None, 'z': None}]}
self.assertEqual(expected, actual)
def test_get_skeleton(self):
data = [
@ -135,27 +256,10 @@ class ExportUtilsTest(BaseTest):
# adding new value
{'a': 'b'},
]
skeleton = export_utils.get_data_skeleton(data)
self.assertDictEqual(
{'a': None, 'c': [{'s': None, 'n': None, 'z': None}],
'ci': {'p': None, 'e': None, 'n': None}},
skeleton)
def test_align_enumerated_field_values(self):
# Data for checks in format (source, num, expected)
checks = [
([], 0, []),
([], 1, [False, None]),
(['a'], 1, [False, 'a']),
(['a'], 2, [False, 'a', None]),
(['a', 'b'], 2, [False, 'a', 'b']),
(['a', 'b'], 1, [True, 'a'])
]
for source, num, expected in checks:
self.assertListEqual(
expected,
export_utils.align_enumerated_field_values(source, num)
)
actual = export_utils.get_data_skeleton(data)
expected = {'a': None, 'ci': {'p': None, 'e': None, 'n': None},
'c': [{'s': None, 'n': None, 'z': None}]}
self.assertEqual(expected, actual)
def test_get_index(self):
@ -172,21 +276,3 @@ class ExportUtilsTest(BaseTest):
]
for obj, fields, idx in checks:
self.assertTupleEqual(idx, export_utils.get_index(obj, *fields))
def test_get_enumerated_keys_paths(self):
resource_type = 'res_type'
skeleton_name = 'test_skel'
enum_num = 2
skeleton = {'id': None, 'attr': None, 'value': None}
keys_paths = export_utils.get_enumerated_keys_paths(
resource_type, skeleton_name, skeleton, enum_num)
# Checking gt field in keys paths
self.assertEqual(len(keys_paths), enum_num * len(skeleton) + 1)
self.assertEqual(keys_paths[0],
['res_type', 'test_skel_gt_{}'.format(enum_num)])
# Checking all keys paths present
for key in six.iterkeys(skeleton):
for i in six.moves.range(enum_num):
keys_path = [resource_type, skeleton_name,
six.text_type(i), key]
self.assertIn(keys_path, keys_paths)

View File

@ -0,0 +1,125 @@
# -*- coding: utf-8 -*-
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import csv
import datetime
import six
import types
from fuel_analytics.test.api.resources.utils.inst_structure_test import \
InstStructureTest
from fuel_analytics.test.base import DbTest
from fuel_analytics.api.app import app
from fuel_analytics.api.app import db
from fuel_analytics.api.db import model
from fuel_analytics.api.resources.utils.stats_to_csv import StatsToCsv
class NodesToCsvExportTest(InstStructureTest, DbTest):
def test_get_node_keys_paths(self):
exporter = StatsToCsv()
_, _, _, csv_keys_paths = exporter.get_node_keys_paths()
self.assertTrue(['cluster_id'] in csv_keys_paths)
self.assertTrue(['cluster_fuel_version'] in csv_keys_paths)
self.assertTrue(['master_node_uid'] in csv_keys_paths)
self.assertTrue(['os'] in csv_keys_paths)
self.assertTrue(['roles', 0] in csv_keys_paths)
self.assertTrue(['pending_roles', 0] in csv_keys_paths)
self.assertTrue(['status'] in csv_keys_paths)
self.assertTrue(['online'] in csv_keys_paths)
self.assertTrue(['platform_name'] in csv_keys_paths)
self.assertTrue(['manufacturer'] in csv_keys_paths)
self.assertTrue(['error_type'] in csv_keys_paths)
def test_get_flatten_nodes(self):
installations_num = 10
inst_structures = self.get_saved_inst_structures(
installations_num=installations_num)
exporter = StatsToCsv()
structure_paths, cluster_paths, node_paths, csv_paths = \
exporter.get_node_keys_paths()
flatten_nodes = exporter.get_flatten_nodes(
structure_paths, cluster_paths, node_paths, inst_structures)
self.assertTrue(isinstance(flatten_nodes, types.GeneratorType))
pos_mn_uid = csv_paths.index(['master_node_uid'])
pos_cluster_id = csv_paths.index(['cluster_id'])
pos_status = csv_paths.index(['status'])
for flatten_node in flatten_nodes:
self.assertIsNotNone(flatten_node[pos_mn_uid])
self.assertIsNotNone(flatten_node[pos_cluster_id])
self.assertIsNotNone(flatten_node[pos_status])
self.assertEquals(len(csv_paths), len(flatten_node))
def test_export_nodes(self):
installations_num = 100
exporter = StatsToCsv()
with app.test_request_context('/?from_date=2015-02-01'):
# Creating installation structures
inst_structures = self.get_saved_inst_structures(
installations_num=installations_num)
# Filtering installation structures
result = exporter.export_nodes(inst_structures)
self.assertTrue(isinstance(result, types.GeneratorType))
output = six.StringIO(list(result))
reader = csv.reader(output)
for _ in reader:
pass
def test_fuel_release_info_in_flatten_nodes(self):
inst_fuel_version = '8.0'
cluster_fuel_version = '7.0'
packages = ['z', 'a', 'c']
inst_structures = [
model.InstallationStructure(
master_node_uid='one',
creation_date=datetime.datetime.utcnow(),
is_filtered=False,
structure={
'fuel_release': {'release': inst_fuel_version},
'fuel_packages': packages,
'clusters': [{
'id': 1, 'nodes': [],
'fuel_version': cluster_fuel_version,
'installed_plugins': [{
'name': 'plugin_a',
'version': 'plugin_version_0',
'releases': [],
'fuel_version': ['8.0', '7.0'],
'package_version': 'package_version_0'
}],
}]
}
)
]
for structure in inst_structures:
db.session.add(structure)
db.session.flush()
exporter = StatsToCsv()
structure_paths, cluster_paths, node_paths, csv_paths = \
exporter.get_node_keys_paths()
flatten_nodes = exporter.get_flatten_nodes(
structure_paths, cluster_paths, node_paths, inst_structures)
pos_fuel_version = csv_paths.index(['cluster_fuel_version'])
pos_packages = csv_paths.index(['structure', 'fuel_packages'])
for flatten_node in flatten_nodes:
self.assertEqual(cluster_fuel_version,
flatten_node[pos_fuel_version])
self.assertEqual(' '.join(packages),
flatten_node[pos_packages])

View File

@ -114,22 +114,16 @@ class OswlStatsToCsvTest(OswlTest, DbTest):
resource_id in modified_ids,
resource_id in removed_ids
]
# In case of CSV_VOLUME_ATTACHMENTS_NUM > 0
# additional info of volume will be extended by attachments
# info. Attachments handling is tested in the method
# test_volumes_attachments
with mock.patch.dict(app.config,
{'CSV_VOLUME_ATTACHMENTS_NUM': 0}):
added_ids = set(item['id'] for item in
resource_data.get('added', []))
modified_ids = set(item['id'] for item in
resource_data.get('removed', []))
removed_ids = set(item['id'] for item in
resource_data.get('modified', []))
added_ids = set(item['id'] for item in
resource_data.get('added', []))
modified_ids = set(item['id'] for item in
resource_data.get('removed', []))
removed_ids = set(item['id'] for item in
resource_data.get('modified', []))
actual = exporter.get_additional_resource_info(
resource, oswl.resource_type,
added_ids, modified_ids, removed_ids)
actual = exporter.get_additional_resource_info(
resource, oswl.resource_type,
added_ids, modified_ids, removed_ids)
self.assertListEqual(expected, actual)
def test_export(self):
@ -488,37 +482,6 @@ class OswlStatsToCsvTest(OswlTest, DbTest):
for key in six.iterkeys(OSWL_SKELETONS[resource_type]):
self.assertIn(key, res_data)
def test_volumes_attachments(self):
exporter = OswlStatsToCsv()
num = 100
resource_type = consts.OSWL_RESOURCE_TYPES.volume
with app.test_request_context():
oswls_saved = self.get_saved_oswls(
num, resource_type, current_num_range=(1, 1),
removed_num_range=(0, 0))
# Saving installation structures for proper oswls filtering
self.get_saved_inst_structs(oswls_saved)
oswls = list(get_oswls(resource_type))
oswl_keys_paths, vm_keys_paths, csv_keys_paths = \
exporter.get_resource_keys_paths(resource_type)
flatten_volumes = exporter.get_flatten_resources(
resource_type, oswl_keys_paths, vm_keys_paths, oswls, {})
flatten_volumes = list(flatten_volumes)
csv_att_num = app.config['CSV_VOLUME_ATTACHMENTS_NUM']
gt_field_pos = csv_keys_paths.index([
resource_type, 'volume_attachment_gt_{}'.format(csv_att_num)])
for idx, fv in enumerate(flatten_volumes):
oswl = oswls[idx]
# Checking CSV fields alignment
self.assertEqual(len(csv_keys_paths), len(fv))
# Checking gt field calculation
volume = oswl.resource_data['current'][0]
self.assertEqual(fv[gt_field_pos],
len(volume['attachments']) > csv_att_num)
def test_oswl_invalid_data(self):
exporter = OswlStatsToCsv()
num = 10

View File

@ -38,14 +38,6 @@ class StatsToCsvExportTest(InstStructureTest, DbTest):
def test_get_cluster_keys_paths(self):
exporter = StatsToCsv()
_, _, csv_keys_paths = exporter.get_cluster_keys_paths()
self.assertIn(['nodes_platform_name_gt3'], csv_keys_paths)
self.assertIn(['nodes_platform_name_0'], csv_keys_paths)
self.assertIn(['nodes_platform_name_1'], csv_keys_paths)
self.assertIn(['nodes_platform_name_2'], csv_keys_paths)
self.assertIn(['nodes_manufacturer_gt3'], csv_keys_paths)
self.assertIn(['nodes_manufacturer_0'], csv_keys_paths)
self.assertIn(['nodes_manufacturer_1'], csv_keys_paths)
self.assertIn(['nodes_manufacturer_2'], csv_keys_paths)
self.assertIn(['attributes', 'heat'], csv_keys_paths)
self.assertIn(['attributes', 'auto_assign_floating_ip'],
csv_keys_paths)
@ -76,17 +68,7 @@ class StatsToCsvExportTest(InstStructureTest, DbTest):
csv_keys_paths)
self.assertIn(['vmware_attributes', 'vmware_az_nova_computes_num'],
csv_keys_paths)
self.assertIn(['structure', 'fuel_release', 'ostf_sha'],
csv_keys_paths)
self.assertIn(['structure', 'fuel_release', 'fuel-ostf_sha'],
csv_keys_paths)
self.assertIn(['structure', 'fuel_release', 'python-fuelclient_sha'],
csv_keys_paths)
self.assertIn(['structure', 'fuel_release', 'fuellib_sha'],
csv_keys_paths)
self.assertIn(['structure', 'fuel_release', 'fuel-library_sha'],
csv_keys_paths)
self.assertIn(['structure', 'fuel_packages'], csv_keys_paths)
self.assertIn(['structure', 'fuel_packages', 0], csv_keys_paths)
self.assertNotIn(['structure', 'clusters'], csv_keys_paths)
self.assertNotIn(['installed_plugins'], csv_keys_paths)
@ -118,13 +100,8 @@ class StatsToCsvExportTest(InstStructureTest, DbTest):
self.assertTrue(isinstance(result, types.GeneratorType))
output = six.StringIO(list(result))
reader = csv.reader(output)
columns = reader.next()
# Checking enumerated columns are present in the output
self.assertIn('nodes_manufacturer_0', columns)
self.assertIn('nodes_manufacturer_gt3', columns)
self.assertIn('nodes_platform_name_0', columns)
self.assertIn('nodes_platform_name_gt3', columns)
# Pop columns names from reader
_ = reader.next()
# Checking reading result CSV
for _ in reader:
@ -227,31 +204,6 @@ class StatsToCsvExportTest(InstStructureTest, DbTest):
for flatten_cluster in flatten_clusters[2:]:
self.assertIsNone(flatten_cluster[nv_column_pos])
def test_platform_names(self):
exporter = StatsToCsv()
inst_structures = self.generate_inst_structures(
clusters_num_range=(1, 1))
inst_structure = list(inst_structures)[0]
nodes = []
for i in six.moves.range(exporter.PLATFORM_NAMES_NUM + 1):
node = self.generate_node()
node['platform_name'] = i
# to be ensure manufacturers all the same
node['manufacturer'] = 'x'
nodes.append(node)
inst_structure.structure['clusters'][0]['nodes'] = nodes
db.session.add(inst_structure)
db.session.commit()
structure_keys_paths, cluster_keys_paths, csv_keys_paths = \
exporter.get_cluster_keys_paths()
flatten_clusters = exporter.get_flatten_clusters(
structure_keys_paths, cluster_keys_paths,
[inst_structure], [])
flatten_cluster = list(flatten_clusters)[0]
pos = csv_keys_paths.index(['nodes_platform_name_gt3'])
self.assertEqual(True, flatten_cluster[pos])
def test_vmware_attributes(self):
exporter = StatsToCsv()
inst_structures = self.generate_inst_structures(
@ -260,56 +212,6 @@ class StatsToCsvExportTest(InstStructureTest, DbTest):
for _ in result:
pass
def test_fuel_release(self):
exporter = StatsToCsv()
# Calculating positions of the params in the flatten data
structure_keys_paths, cluster_keys_paths, csv_keys_paths = \
exporter.get_cluster_keys_paths()
ostf_pos = csv_keys_paths.index(['structure', 'fuel_release',
'ostf_sha'])
f_ostf_pos = csv_keys_paths.index(['structure', 'fuel_release',
'fuel-ostf_sha'])
f_lib_pos = csv_keys_paths.index(['structure', 'fuel_release',
'fuellib_sha'])
f_libbrary_pos = csv_keys_paths.index(['structure', 'fuel_release',
'fuel-library_sha'])
f_cli_pos = csv_keys_paths.index(['structure', 'fuel_release',
'python-fuelclient_sha'])
# Checking fuel_release structure before 2015.04
inst_structures = self.generate_inst_structures(
release_generators=('_fuel_release_gen',)
)
flatten_clusters = exporter.get_flatten_clusters(
structure_keys_paths, cluster_keys_paths,
inst_structures, [])
for flatten_cluster in flatten_clusters:
self.assertIsNotNone(flatten_cluster[ostf_pos])
self.assertIsNone(flatten_cluster[f_ostf_pos])
self.assertIsNotNone(flatten_cluster[f_lib_pos])
self.assertIsNone(flatten_cluster[f_libbrary_pos])
self.assertIsNone(flatten_cluster[f_cli_pos])
# Checking fuel_release structure after 2015.04
inst_structures = self.generate_inst_structures(
release_generators=('_fuel_release_gen_2015_04',)
)
flatten_clusters = exporter.get_flatten_clusters(
structure_keys_paths, cluster_keys_paths,
inst_structures, [])
for flatten_cluster in flatten_clusters:
self.assertIsNone(flatten_cluster[ostf_pos])
self.assertIsNotNone(flatten_cluster[f_ostf_pos])
self.assertIsNone(flatten_cluster[f_lib_pos])
self.assertIsNotNone(flatten_cluster[f_libbrary_pos])
self.assertIsNotNone(flatten_cluster[f_cli_pos])
def test_cluster_invalid_data(self):
exporter = StatsToCsv()
num = 10

View File

@ -52,6 +52,7 @@
<ul>
<li><button class="btn-link" id="clusters">Installations info</button></li>
<li><button class="btn-link" id="plugins">Plugins</button></li>
<li><button class="btn-link" id="nodes">Nodes</button></li>
<li><button class="btn-link" id="flavor">Flavors</button></li>
<li><button class="btn-link" id="image">Images</button></li>
<li><button class="btn-link" id="keystone_user">Keystone users</button></li>