Refactor nailgun.statistics package

Two subpackages (for statistic collecting of fuel infrastructure using
and collecting of oswl info) created in order to separate code dedicated
to logically different functions. Utility (universal utility functions
and custom errors description) infrastructure left on
statistics package level with possibility of reusing in subpackages'
code. Tests refactored considering current separation.

Change-Id: I89509d5fcd224276c3cd6c95353cea0ebca2162e
Closes-Bug: #1439268
This commit is contained in:
Artem Roma 2015-04-01 18:09:58 +03:00
parent c47ba89597
commit 4603980b81
24 changed files with 2203 additions and 1998 deletions

View File

@ -11,9 +11,3 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
WhiteListRule = namedtuple(
'WhiteListItem', ['path', 'map_to_name', 'transform_func'])

View File

@ -0,0 +1,13 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -18,8 +18,8 @@ from nailgun.objects import ClusterCollection
from nailgun.objects import MasterNodeSettings
from nailgun.objects import NodeCollection
from nailgun.settings import settings
from nailgun.statistics.utils import _get_attr_value
from nailgun.statistics import WhiteListRule
from nailgun.statistics.utils import get_attr_value
from nailgun.statistics.utils import WhiteListRule
from nailgun import utils
@ -185,7 +185,7 @@ class InstallationInfo(object):
result_attrs = {}
for path, map_to_name, func in white_list:
try:
result_attrs[map_to_name] = _get_attr_value(
result_attrs[map_to_name] = get_attr_value(
path, func, attributes)
except (KeyError, TypeError):
pass

View File

@ -0,0 +1,13 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -25,7 +25,8 @@ from nailgun.objects import ClusterCollection
from nailgun.objects import OpenStackWorkloadStatsCollection
from nailgun.settings import settings
from nailgun.statistics import errors
from nailgun.statistics.oswl_saver import oswl_statistics_save
from nailgun.statistics.oswl import helpers
from nailgun.statistics.oswl.saver import oswl_statistics_save
from nailgun.statistics import utils
@ -55,11 +56,11 @@ def collect(resource_type):
# Collect current OSWL data and update data in DB
for cluster in operational_clusters:
try:
client_provider = utils.ClientProvider(cluster)
client_provider = helpers.ClientProvider(cluster)
proxy_for_os_api = utils.get_proxy_for_cluster(cluster)
with utils.set_proxy(proxy_for_os_api):
data = utils.get_info_from_os_resource_manager(
data = helpers.get_info_from_os_resource_manager(
client_provider, resource_type)
oswl_statistics_save(cluster.id, resource_type, data)

View File

@ -0,0 +1,215 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cinderclient import client as cinder_client
from keystoneclient import discover as keystone_discover
from keystoneclient.v2_0 import client as keystone_client_v2
from keystoneclient.v3 import client as keystone_client_v3
from novaclient import client as nova_client
from nailgun import consts
from nailgun.db import db
from nailgun.logger import logger
from nailgun import objects
from nailgun.settings import settings
from nailgun.statistics.oswl.resources_description \
import resources_description
from nailgun.statistics import utils
class ClientProvider(object):
"""Initialize clients for OpenStack components
and expose them as attributes
"""
clients_version_attr_path = {
"nova": ["client", "version"],
"cinder": ["client", "version"],
"keystone": ["version"]
}
def __init__(self, cluster):
self.cluster = cluster
self._nova = None
self._cinder = None
self._keystone = None
self._credentials = None
@property
def nova(self):
if self._nova is None:
self._nova = nova_client.Client(
settings.OPENSTACK_API_VERSION["nova"],
*self.credentials,
service_type=consts.NOVA_SERVICE_TYPE.compute
)
return self._nova
@property
def cinder(self):
if self._cinder is None:
self._cinder = cinder_client.Client(
settings.OPENSTACK_API_VERSION["cinder"],
*self.credentials
)
return self._cinder
@property
def keystone(self):
if self._keystone is None:
# kwargs are universal for v2 and v3 versions of
# keystone client that are different only in accepting
# of tenant/project keyword name
auth_kwargs = {
"username": self.credentials[0],
"password": self.credentials[1],
"tenant_name": self.credentials[2],
"project_name": self.credentials[2],
"auth_url": self.credentials[3]
}
self._keystone = self._get_keystone_client(auth_kwargs)
return self._keystone
def _get_keystone_client(self, auth_creds):
"""Instantiate client based on returned from keystone
server version data.
:param auth_creds: credentials for authentication which also are
parameters for client's instance initialization
:returns: instance of keystone client of appropriate version
:raises: exception if response from server contains version other than
2.x and 3.x
"""
discover = keystone_discover.Discover(**auth_creds)
for version_data in discover.version_data():
version = version_data["version"][0]
if version <= 2:
return keystone_client_v2.Client(**auth_creds)
elif version == 3:
return keystone_client_v3.Client(**auth_creds)
raise Exception("Failed to discover keystone version "
"for auth_url {0}".format(
auth_creds.get("auth_url"))
)
@property
def credentials(self):
if self._credentials is None:
access_data = objects.Cluster.get_editable_attributes(
self.cluster
)['editable']['workloads_collector']
os_user = access_data["username"]["value"]
os_password = access_data["password"]["value"]
os_tenant = access_data["tenant"]["value"]
auth_host = utils.get_mgmt_ip_of_cluster_controller(self.cluster)
auth_url = "http://{0}:{1}/{2}/".format(
auth_host, settings.AUTH_PORT,
settings.OPENSTACK_API_VERSION["keystone"])
self._credentials = (os_user, os_password, os_tenant, auth_url)
return self._credentials
def get_info_from_os_resource_manager(client_provider, resource_name):
"""Utilize clients provided by client_provider instance to retrieve
data for resource_name, description of which is stored in
resources_description data structure.
:param client_provider: objects that provides instances of openstack
clients as its attributes
:param resource_name: string that contains name of resource for which
info should be collected from installation
:returns: data that store collected info
"""
resource_description = resources_description[resource_name]
client_name = resource_description["retrieved_from_component"]
client_inst = getattr(client_provider, client_name)
client_api_version = utils.get_nested_attr(
client_inst,
client_provider.clients_version_attr_path[client_name]
)
matched_api = \
resource_description["supported_api_versions"][client_api_version]
resource_manager_name = matched_api["resource_manager_name"]
resource_manager = getattr(client_inst, resource_manager_name)
attributes_white_list = matched_api["attributes_white_list"]
additional_display_options = \
matched_api.get("additional_display_options", {})
resource_info = _get_data_from_resource_manager(
resource_manager,
attributes_white_list,
additional_display_options
)
return resource_info
def _get_data_from_resource_manager(resource_manager, attrs_white_list_rules,
additional_display_options):
data = []
display_options = {}
display_options.update(additional_display_options)
instances_list = resource_manager.list(**display_options)
for inst in instances_list:
inst_details = {}
obj_dict = \
inst.to_dict() if hasattr(inst, "to_dict") else inst.__dict__
for rule in attrs_white_list_rules:
inst_details[rule.map_to_name] = utils.get_attr_value(
rule.path, rule.transform_func, obj_dict
)
data.append(inst_details)
return data
def delete_expired_oswl_entries():
try:
deleted_rows_count = \
objects.OpenStackWorkloadStatsCollection.clean_expired_entries()
if deleted_rows_count == 0:
logger.info("There are no expired OSWL entries in db.")
db().commit()
logger.info("Expired OSWL entries are "
"successfully cleaned from db")
except Exception as e:
logger.exception("Exception while cleaning oswls entries from "
"db. Details: {0}".format(six.text_type(e)))
finally:
db.remove()

View File

@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.statistics import WhiteListRule
from nailgun.statistics.utils import WhiteListRule
resources_description = {

View File

@ -27,7 +27,8 @@ from nailgun.db.sqlalchemy import models
from nailgun.logger import logger
from nailgun import objects
from nailgun.settings import settings
from nailgun.statistics.installation_info import InstallationInfo
from nailgun.statistics.fuel_statistics.installation_info \
import InstallationInfo
from nailgun.statistics.utils import dithered

View File

@ -11,145 +11,40 @@
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
import os
import random
import six
from contextlib import contextmanager
from cinderclient import client as cinder_client
from keystoneclient import discover as keystone_discover
from keystoneclient.v2_0 import client as keystone_client_v2
from keystoneclient.v3 import client as keystone_client_v3
from novaclient import client as nova_client
from nailgun import consts
from nailgun.db import db
from nailgun.logger import logger
from nailgun.network import manager
from nailgun import objects
from nailgun.settings import settings
from nailgun.statistics import errors
from nailgun.statistics.oswl_resources_description import resources_description
class ClientProvider(object):
"""Initialize clients for OpenStack components
and expose them as attributes
"""
clients_version_attr_path = {
"nova": ["client", "version"],
"cinder": ["client", "version"],
"keystone": ["version"]
}
def __init__(self, cluster):
self.cluster = cluster
self._nova = None
self._cinder = None
self._keystone = None
self._credentials = None
@property
def nova(self):
if self._nova is None:
self._nova = nova_client.Client(
settings.OPENSTACK_API_VERSION["nova"],
*self.credentials,
service_type=consts.NOVA_SERVICE_TYPE.compute
)
return self._nova
@property
def cinder(self):
if self._cinder is None:
self._cinder = cinder_client.Client(
settings.OPENSTACK_API_VERSION["cinder"],
*self.credentials
)
return self._cinder
@property
def keystone(self):
if self._keystone is None:
# kwargs are universal for v2 and v3 versions of
# keystone client that are different only in accepting
# of tenant/project keyword name
auth_kwargs = {
"username": self.credentials[0],
"password": self.credentials[1],
"tenant_name": self.credentials[2],
"project_name": self.credentials[2],
"auth_url": self.credentials[3]
}
self._keystone = self._get_keystone_client(auth_kwargs)
return self._keystone
def _get_keystone_client(self, auth_creds):
"""Instantiate client based on returned from keystone
server version data.
:param auth_creds: credentials for authentication which also are
parameters for client's instance initialization
:returns: instance of keystone client of appropriate version
:raises: exception if response from server contains version other than
2.x and 3.x
"""
discover = keystone_discover.Discover(**auth_creds)
for version_data in discover.version_data():
version = version_data["version"][0]
if version <= 2:
return keystone_client_v2.Client(**auth_creds)
elif version == 3:
return keystone_client_v3.Client(**auth_creds)
raise Exception("Failed to discover keystone version "
"for auth_url {0}".format(
auth_creds.get("auth_url"))
)
@property
def credentials(self):
if self._credentials is None:
access_data = objects.Cluster.get_editable_attributes(
self.cluster
)['editable']['workloads_collector']
os_user = access_data["username"]["value"]
os_password = access_data["password"]["value"]
os_tenant = access_data["tenant"]["value"]
auth_host = _get_host_for_auth(self.cluster)
auth_url = "http://{0}:{1}/{2}/".format(
auth_host, settings.AUTH_PORT,
settings.OPENSTACK_API_VERSION["keystone"])
self._credentials = (os_user, os_password, os_tenant, auth_url)
return self._credentials
WhiteListRule = namedtuple(
'WhiteListItem', ['path', 'map_to_name', 'transform_func'])
def _get_host_for_auth(cluster):
def get_mgmt_ip_of_cluster_controller(cluster):
return manager.NetworkManager._get_ip_by_network_name(
_get_online_controller(cluster),
get_online_controller(cluster),
consts.NETWORKS.management
).ip_addr
def get_proxy_for_cluster(cluster):
proxy_host = _get_online_controller(cluster).ip
proxy_host = get_online_controller(cluster).ip
proxy_port = settings.OPENSTACK_INFO_COLLECTOR_PROXY_PORT
proxy = "http://{0}:{1}".format(proxy_host, proxy_port)
return proxy
def _get_online_controller(cluster):
def get_online_controller(cluster):
online_controllers = filter(
lambda node: ("controller" in node.roles and node.online is True),
cluster.nodes
@ -166,72 +61,7 @@ def _get_online_controller(cluster):
return controller
def get_info_from_os_resource_manager(client_provider, resource_name):
"""Utilize clients provided by client_provider instance to retrieve
data for resource_name, description of which is stored in
resources_description data structure.
:param client_provider: objects that provides instances of openstack
clients as its attributes
:param resource_name: string that contains name of resource for which
info should be collected from installation
:returns: data that store collected info
"""
resource_description = resources_description[resource_name]
client_name = resource_description["retrieved_from_component"]
client_inst = getattr(client_provider, client_name)
client_api_version = _get_nested_attr(
client_inst,
client_provider.clients_version_attr_path[client_name]
)
matched_api = \
resource_description["supported_api_versions"][client_api_version]
resource_manager_name = matched_api["resource_manager_name"]
resource_manager = getattr(client_inst, resource_manager_name)
attributes_white_list = matched_api["attributes_white_list"]
additional_display_options = \
matched_api.get("additional_display_options", {})
resource_info = _get_data_from_resource_manager(
resource_manager,
attributes_white_list,
additional_display_options
)
return resource_info
def _get_data_from_resource_manager(resource_manager, attrs_white_list_rules,
additional_display_options):
data = []
display_options = {}
display_options.update(additional_display_options)
instances_list = resource_manager.list(**display_options)
for inst in instances_list:
inst_details = {}
obj_dict = \
inst.to_dict() if hasattr(inst, "to_dict") else inst.__dict__
for rule in attrs_white_list_rules:
inst_details[rule.map_to_name] = _get_attr_value(
rule.path, rule.transform_func, obj_dict
)
data.append(inst_details)
return data
def _get_attr_value(path, func, attrs):
def get_attr_value(path, func, attrs):
"""Gets attribute value from 'attrs' by specified
'path'. In case of nested list - list of
of found values will be returned
@ -245,7 +75,7 @@ def _get_attr_value(path, func, attrs):
result_list = []
for cur_attr in attrs:
try:
value = _get_attr_value(path[idx:], func, cur_attr)
value = get_attr_value(path[idx:], func, cur_attr)
result_list.append(value)
except (KeyError, TypeError):
pass
@ -257,7 +87,7 @@ def _get_attr_value(path, func, attrs):
return attrs
def _get_nested_attr(obj, attr_path):
def get_nested_attr(obj, attr_path):
# prevent from error in case of empty list and
# None object
if not all([obj, attr_path]):
@ -270,7 +100,7 @@ def _get_nested_attr(obj, attr_path):
if len(attr_path) == 1:
return attr_value
return _get_nested_attr(attr_value, attr_path[1:])
return get_nested_attr(attr_value, attr_path[1:])
@contextmanager
@ -309,23 +139,3 @@ def set_proxy(proxy):
def dithered(medium, interval=(0.9, 1.1)):
return random.randint(int(medium * interval[0]), int(medium * interval[1]))
def delete_expired_oswl_entries():
try:
deleted_rows_count = \
objects.OpenStackWorkloadStatsCollection.clean_expired_entries()
if deleted_rows_count == 0:
logger.info("There are no expired OSWL entries in db.")
db().commit()
logger.info("Expired OSWL entries are "
"successfully cleaned from db")
except Exception as e:
logger.exception("Exception while cleaning oswls entries from "
"db. Details: {0}".format(six.text_type(e)))
finally:
db.remove()

View File

@ -32,7 +32,8 @@ from nailgun.db.sqlalchemy.models import Task
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.settings import settings
from nailgun.statistics.params_white_lists import task_output_white_list
from nailgun.statistics.fuel_statistics.tasks_params_white_lists \
import task_output_white_list
tasks_names_actions_groups_mapping = {

View File

@ -22,7 +22,8 @@ from nailgun.test.base import reverse
from nailgun import objects
from nailgun.statistics.installation_info import InstallationInfo
from nailgun.statistics.fuel_statistics.installation_info \
import InstallationInfo
from nailgun.statistics.statsenderd import StatsSender

View File

@ -23,7 +23,8 @@ from nailgun.test.base import reverse
from nailgun import consts
from nailgun import objects
from nailgun.statistics.params_white_lists import task_output_white_list
from nailgun.statistics.fuel_statistics.tasks_params_white_lists \
import task_output_white_list
from nailgun.task.helpers import TaskHelper

View File

@ -0,0 +1,184 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import Mock
from mock import patch
from mock import PropertyMock
from nailgun.test.base import BaseTestCase
from nailgun import consts
from nailgun.objects import Cluster
from nailgun.settings import settings
from nailgun.statistics import errors
from nailgun.statistics.oswl import helpers
class TestOpenStackClientProvider(BaseTestCase):
@patch("nailgun.statistics.oswl.helpers.ClientProvider.credentials",
new_callable=PropertyMock)
def test_clients_providing(self, creds_mock):
fake_credentials = (
"fake_username",
"fake_password",
"fake_tenant_name",
"fake_auth_url"
)
auth_kwargs = {
"username": fake_credentials[0],
"password": fake_credentials[1],
"tenant_name": fake_credentials[2],
"project_name": fake_credentials[2],
"auth_url": fake_credentials[3]
}
creds_mock.return_value = fake_credentials
client_provider = helpers.ClientProvider(cluster=None)
nova_client_path = ("nailgun.statistics.oswl."
"helpers.nova_client.Client")
cinder_client_path = ("nailgun.statistics.oswl."
"helpers.cinder_client.Client")
return_value_mock = Mock()
with patch(nova_client_path,
Mock(return_value=return_value_mock)) as nova_client_mock:
self.assertTrue(client_provider.nova is return_value_mock)
client_provider.nova
nova_client_mock.assert_called_once_with(
settings.OPENSTACK_API_VERSION["nova"],
*fake_credentials,
service_type=consts.NOVA_SERVICE_TYPE.compute
)
with patch(cinder_client_path,
Mock(return_value=return_value_mock)) as cinder_client_mock:
self.assertTrue(client_provider.cinder is return_value_mock)
client_provider.cinder
cinder_client_mock.assert_called_once_with(
settings.OPENSTACK_API_VERSION["cinder"],
*fake_credentials
)
with patch.object(client_provider, "_get_keystone_client",
return_value=return_value_mock) as get_kc_mock:
kc = client_provider.keystone
self.assertTrue(kc is return_value_mock)
client_provider.keystone
get_kc_mock.assert_called_with_once(**auth_kwargs)
def test_fail_if_no_online_controllers(self):
self.env.create(
nodes_kwargs=[{"online": False, "roles": ["controller"]}]
)
cluster = self.env.clusters[0]
client_provider = helpers.ClientProvider(cluster)
with self.assertRaises(errors.NoOnlineControllers):
client_provider.credentials
@patch("nailgun.statistics.oswl.helpers.keystone_client_v3.Client")
@patch("nailgun.statistics.oswl.helpers.keystone_client_v2.Client")
@patch("nailgun.statistics.oswl.helpers.keystone_discover.Discover")
def test_get_keystone_client(self, kd_mock, kc_v2_mock, kc_v3_mock):
version_data_v2 = [{"version": (2, 0)}]
version_data_v3 = [{"version": (3, 0)}]
mixed_version_data = [{"version": (4, 0)}, {"version": (3, 0)}]
not_supported_version_data = [{"version": (4, 0)}]
auth_creds = {"auth_url": "fake"}
client_provider = helpers.ClientProvider(cluster=None)
discover_inst_mock = Mock()
kd_mock.return_value = discover_inst_mock
kc_v2_inst_mock = Mock()
kc_v2_mock.return_value = kc_v2_inst_mock
kc_v3_inst_mock = Mock()
kc_v3_mock.return_value = kc_v3_inst_mock
def check_returned(version_data, client_class_mock, client_inst_mock):
discover_inst_mock.version_data = Mock(return_value=version_data)
kc_client_inst = client_provider._get_keystone_client(auth_creds)
kd_mock.assert_called_with(**auth_creds)
self.assertTrue(kc_client_inst is client_inst_mock)
client_class_mock.assert_called_with(**auth_creds)
check_returned(version_data_v2, kc_v2_mock, kc_v2_inst_mock)
check_returned(version_data_v3, kc_v3_mock, kc_v3_inst_mock)
check_returned(mixed_version_data, kc_v3_mock, kc_v3_inst_mock)
fail_message = ("Failed to discover keystone version "
"for auth_url {0}"
.format(auth_creds["auth_url"]))
discover_inst_mock.version_data = \
Mock(return_value=not_supported_version_data)
self.assertRaisesRegexp(
Exception,
fail_message,
client_provider._get_keystone_client,
auth_creds
)
def test_get_auth_credentials(self):
expected_username = "test"
expected_password = "test"
expected_tenant = "test"
expected_auth_host = "0.0.0.0"
expected_auth_url = "http://{0}:{1}/{2}/".format(
expected_auth_host, settings.AUTH_PORT,
settings.OPENSTACK_API_VERSION["keystone"])
expected = (expected_username, expected_password, expected_tenant,
expected_auth_url)
cluster = self.env.create_cluster(api=False)
updated_attributes = {
"editable": {
"workloads_collector": {
"username": {"value": expected_username},
"password": {"value": expected_password},
"tenant": {"value": expected_tenant}
}
}
}
Cluster.update_attributes(cluster, updated_attributes)
get_host_for_auth_path = ("nailgun.statistics.utils."
"get_mgmt_ip_of_cluster_controller")
with patch(get_host_for_auth_path,
return_value=expected_auth_host):
client_provider = helpers.ClientProvider(cluster)
creds = client_provider.credentials
self.assertEqual(expected, creds)

View File

@ -0,0 +1,431 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from sqlalchemy.inspection import inspect
from nailgun.test.base import BaseTestCase
from nailgun import consts
from nailgun.db.sqlalchemy.models import cluster as cluster_model
from nailgun.db.sqlalchemy.models import plugins
from nailgun.objects import Cluster
from nailgun.objects import ReleaseCollection
from nailgun.settings import settings
from nailgun.statistics.fuel_statistics.installation_info \
import InstallationInfo
class TestInstallationInfo(BaseTestCase):
def test_release_info(self):
info = InstallationInfo()
f_info = info.fuel_release_info()
self.assertDictEqual(f_info, settings.VERSION)
def test_get_attributes_centos(self):
self.env.upload_fixtures(['openstack'])
info = InstallationInfo()
release = ReleaseCollection.filter_by(None, operating_system='CentOS')
cluster_data = self.env.create_cluster(
release_id=release[0].id
)
cluster = Cluster.get_by_uid(cluster_data['id'])
editable = cluster.attributes.editable
attr_key_list = [a[1] for a in info.attributes_white_list]
attrs_dict = info.get_attributes(editable, info.attributes_white_list)
self.assertEqual(
set(attr_key_list),
set(attrs_dict.keys())
)
def test_get_attributes_ubuntu(self):
self.env.upload_fixtures(['openstack'])
info = InstallationInfo()
release = ReleaseCollection.filter_by(None, operating_system='Ubuntu')
cluster_data = self.env.create_cluster(
release_id=release[0].id
)
cluster = Cluster.get_by_uid(cluster_data['id'])
editable = cluster.attributes.editable
attr_key_list = [a[1] for a in info.attributes_white_list]
attrs_dict = info.get_attributes(editable, info.attributes_white_list)
self.assertEqual(
# no vlan splinters for ubuntu
set(attr_key_list) - set(('vlan_splinters', 'vlan_splinters_ovs')),
set(attrs_dict.keys())
)
def test_get_empty_attributes(self):
info = InstallationInfo()
trash_attrs = {'some': 'trash', 'nested': {'n': 't'}}
result = info.get_attributes(trash_attrs, info.attributes_white_list)
self.assertDictEqual({}, result)
def test_get_attributes_exception_handled(self):
info = InstallationInfo()
variants = [
None,
{},
{'common': None},
{'common': {'libvirt_type': {}}},
{'common': {'libvirt_type': 3}},
]
for attrs in variants:
result = info.get_attributes(attrs, info.attributes_white_list)
self.assertDictEqual({}, result)
def test_clusters_info(self):
self.env.upload_fixtures(['openstack'])
info = InstallationInfo()
release = ReleaseCollection.filter_by(None, operating_system='CentOS')
nodes_params = [
{'roles': ['compute']},
{'roles': ['compute']},
{'roles': ['controller']}
]
self.env.create(
cluster_kwargs={
'release_id': release[0].id,
'mode': consts.CLUSTER_MODES.ha_full,
'net_provider': consts.CLUSTER_NET_PROVIDERS.nova_network},
nodes_kwargs=nodes_params
)
self.env.create_node({'status': consts.NODE_STATUSES.discover})
clusters_info = info.get_clusters_info()
cluster = self.env.clusters[0]
self.assertEquals(1, len(clusters_info))
cluster_info = clusters_info[0]
self.assertEquals(len(nodes_params), len(cluster_info['nodes']))
self.assertEquals(len(nodes_params), cluster_info['nodes_num'])
self.assertEquals(consts.CLUSTER_MODES.ha_full,
cluster_info['mode'])
self.assertEquals(consts.CLUSTER_NET_PROVIDERS.nova_network,
cluster_info['net_provider'])
self.assertEquals(consts.CLUSTER_STATUSES.new,
cluster_info['status'])
self.assertEquals(False,
cluster_info['is_customized'])
self.assertEquals(cluster.id,
cluster_info['id'])
self.assertEquals(cluster.fuel_version,
cluster_info['fuel_version'])
self.assertTrue('attributes' in cluster_info)
self.assertTrue('release' in cluster_info)
self.assertEquals(cluster.release.operating_system,
cluster_info['release']['os'])
self.assertEquals(cluster.release.name,
cluster_info['release']['name'])
self.assertEquals(cluster.release.version,
cluster_info['release']['version'])
self.assertEquals(1, len(cluster_info['node_groups']))
group_info = cluster_info['node_groups'][0]
group = [ng for ng in cluster.node_groups][0]
self.assertEquals(group.id,
group_info['id'])
self.assertEquals(len(nodes_params),
len(group_info['nodes']))
self.assertEquals(set([n.id for n in group.nodes]),
set(group_info['nodes']))
def test_network_configuration(self):
info = InstallationInfo()
# Checking nova network configuration
nova = consts.CLUSTER_NET_PROVIDERS.nova_network
self.env.create(cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_full,
'net_provider': nova
})
clusters_info = info.get_clusters_info()
cluster_info = clusters_info[0]
self.assertTrue('network_configuration' in cluster_info)
network_config = cluster_info['network_configuration']
for field in ('fixed_network_size', 'fixed_networks_vlan_start',
'fixed_networks_amount', 'net_manager'):
self.assertIn(field, network_config)
# Checking neutron network configuration
neutron = consts.CLUSTER_NET_PROVIDERS.neutron
self.env.create(cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_full,
'net_provider': neutron
})
clusters_info = info.get_clusters_info()
# Clusters info is unordered list, so we should find required
# cluster_info
cluster_info = filter(lambda x: x['net_provider'] == neutron,
clusters_info)[0]
self.assertTrue('network_configuration' in cluster_info)
network_config = cluster_info['network_configuration']
for field in ('segmentation_type', 'net_l23_provider'):
self.assertIn(field, network_config)
def test_nodes_info(self):
info = InstallationInfo()
self.env.create(
release_kwargs={
'operating_system': consts.RELEASE_OS.centos
},
nodes_kwargs=[
{'status': consts.NODE_STATUSES.discover,
'roles': ['controller', 'compute']},
{'roles': [],
'pending_roles': ['compute']}
]
)
self.env.make_bond_via_api(
'bond0', consts.BOND_MODES.active_backup,
['eth0', 'eth1'], node_id=self.env.nodes[0].id)
nodes_info = info.get_nodes_info(self.env.nodes)
self.assertEquals(len(self.env.nodes), len(nodes_info))
for idx, node in enumerate(self.env.nodes):
node_info = nodes_info[idx]
self.assertEquals(node_info['id'], node.id)
self.assertEquals(node_info['group_id'], node.group_id)
self.assertListEqual(node_info['roles'], node.roles)
self.assertEquals(node_info['os'], node.os_platform)
self.assertEquals(node_info['status'], node.status)
self.assertEquals(node_info['error_type'], node.error_type)
self.assertEquals(node_info['online'], node.online)
self.assertEquals(node_info['manufacturer'], node.manufacturer)
self.assertEquals(node_info['platform_name'], node.platform_name)
self.assertEquals(node_info['pending_addition'],
node.pending_addition)
self.assertEquals(node_info['pending_deletion'],
node.pending_deletion)
self.assertEquals(node_info['pending_roles'], node.pending_roles)
self.assertEqual(
node_info['nic_interfaces'],
[{'id': i.id} for i in node.nic_interfaces]
)
self.assertEqual(
node_info['bond_interfaces'],
[{'id': i.id, 'slaves': [s.id for s in i.slaves]}
for i in node.bond_interfaces]
)
def test_plugins_info(self):
info = InstallationInfo()
cluster = self.env.create_cluster(api=False)
plugin_kwargs = self.env.get_default_plugin_metadata()
plugin_obj = plugins.Plugin(**plugin_kwargs)
self.db.add(plugin_obj)
self.db.flush()
plugin_kwargs["id"] = plugin_obj.id
cluster_plugin_kwargs = {
"cluster_id": cluster.id,
"plugin_id": plugin_obj.id
}
cluster_plugin = plugins.ClusterPlugins(**cluster_plugin_kwargs)
self.db.add(cluster_plugin)
self.db.flush()
expected_attributes_names = (
"id",
"name",
"version",
"releases",
"fuel_version",
"package_version",
)
expected_info = dict(
[(key, value) for key, value in six.iteritems(plugin_kwargs)
if key in expected_attributes_names]
)
expected = [expected_info]
actual = info.get_cluster_plugins_info(cluster)
self.assertEqual(expected, actual)
def test_installation_info(self):
info = InstallationInfo()
nodes_params = [
{'roles': ['compute']},
{'roles': ['compute']},
{'roles': ['controller']}
]
self.env.create(
release_kwargs={
'operating_system': consts.RELEASE_OS.centos
},
cluster_kwargs={},
nodes_kwargs=nodes_params
)
unallocated_nodes_params = [
{'status': consts.NODE_STATUSES.discover},
{'status': consts.NODE_STATUSES.discover}
]
for unallocated_node in unallocated_nodes_params:
self.env.create_node(**unallocated_node)
info = info.get_installation_info()
self.assertEquals(1, info['clusters_num'])
self.assertEquals(len(nodes_params), info['allocated_nodes_num'])
self.assertEquals(len(unallocated_nodes_params),
info['unallocated_nodes_num'])
self.assertTrue('master_node_uid' in info)
self.assertTrue('contact_info_provided' in info['user_information'])
self.assertDictEqual(settings.VERSION, info['fuel_release'])
def test_all_cluster_data_collected(self):
self.env.create(nodes_kwargs=[{'roles': ['compute']}])
self.env.create_node(status=consts.NODE_STATUSES.discover)
# Fetching installation info struct
info = InstallationInfo()
info = info.get_installation_info()
actual_cluster = info['clusters'][0]
# Creating cluster schema
cluster_schema = {}
for column in inspect(cluster_model.Cluster).columns:
cluster_schema[six.text_type(column.name)] = None
for rel in inspect(cluster_model.Cluster).relationships:
cluster_schema[six.text_type(rel.table.name)] = None
# Removing of not required fields
remove_fields = (
'tasks', 'cluster_changes', 'nodegroups', 'pending_release_id',
'releases', 'replaced_provisioning_info', 'notifications',
'deployment_tasks', 'name', 'replaced_deployment_info',
'grouping'
)
for field in remove_fields:
cluster_schema.pop(field)
# Renaming fields for matching
rename_fields = (
('plugins', 'installed_plugins'),
('networking_configs', 'network_configuration'),
('release_id', 'release'),
)
for name_from, name_to in rename_fields:
cluster_schema.pop(name_from)
cluster_schema[name_to] = None
# If test failed here it means, that you have added properties
# to cluster and they are not exported into statistics.
# If you don't know what to do, contact fuel-stats team please.
for key in six.iterkeys(cluster_schema):
self.assertIn(key, actual_cluster)
def _find_leafs_paths(self, structure, leafs_names=('value',)):
"""Finds paths to leafs
:param structure: structure for searching
:param leafs_names: leafs names
:return: list of tuples of dicts keys to leafs
"""
def _keys_paths_helper(result, keys, struct):
if isinstance(struct, dict):
for k in sorted(six.iterkeys(struct)):
if k in leafs_names:
result.append(keys)
else:
_keys_paths_helper(result, keys + (k,), struct[k])
elif isinstance(struct, (tuple, list)):
for d in struct:
_keys_paths_helper(result, keys, d)
else:
# leaf not found
pass
leafs_paths = []
_keys_paths_helper(leafs_paths, (), structure)
return self._remove_private_leafs_paths(leafs_paths)
def _remove_private_leafs_paths(self, leafs_paths):
"""Removes paths to private information
:return: leafs paths without paths to private information
"""
private_paths = (
('access', 'email'), ('access', 'password'), ('access', 'tenant'),
('access', 'user'), ('common', 'auth_key'), ('corosync', 'group'),
('corosync', 'port'), ('external_dns', 'dns_list'),
('external_mongo', 'hosts_ip'),
('external_mongo', 'mongo_db_name'),
('external_mongo', 'mongo_password'),
('external_mongo', 'mongo_user'), ('syslog', 'syslog_port'),
('syslog', 'syslog_server'), ('workloads_collector', 'password'),
('workloads_collector', 'tenant'),
('workloads_collector', 'username'), ('zabbix', 'password'),
('zabbix', 'username'),
('common', 'use_vcenter'), # removed attribute
('murano_settings', 'murano_repo_url'),
)
return filter(lambda x: x not in private_paths, leafs_paths)
def test_all_cluster_attributes_in_white_list(self):
self.env.create(nodes_kwargs=[{'roles': ['compute']}])
self.env.create_node(status=consts.NODE_STATUSES.discover)
cluster = self.env.clusters[0]
expected_paths = self._find_leafs_paths(cluster.attributes.editable)
# Removing 'value' from expected paths
actual_paths = [rule.path[:-1] for rule in
InstallationInfo.attributes_white_list]
# If test failed here it means, that you have added cluster
# attributes and they are not added into
# InstallationInfo.attributes_white_list
# If you don't know what should be added into white list, contact
# fuel-stats team please.
for path in expected_paths:
self.assertIn(path, actual_paths)
def test_all_cluster_vmware_attributes_in_white_list(self):
self.env.create(nodes_kwargs=[{'roles': ['compute']}])
self.env.create_node(status=consts.NODE_STATUSES.discover)
cluster = self.env.clusters[0]
expected_paths = self._find_leafs_paths(
cluster.vmware_attributes.editable,
leafs_names=('vsphere_cluster', 'enable'))
# Removing leaf name from expected paths
actual_paths = [rule.path[:-1] for rule in
InstallationInfo.vmware_attributes_white_list]
# If test failed here it means, that you have added cluster vmware
# attributes and they are not added into
# InstallationInfo.vmware_attributes_white_list
# If you don't know what should be added into white list, contact
# fuel-stats team please.
for path in expected_paths:
self.assertIn(path, actual_paths)
def test_wite_list_unique_names(self):
names = set(rule.map_to_name for rule in
InstallationInfo.attributes_white_list)
self.assertEqual(len(InstallationInfo.attributes_white_list),
len(names))
names = set(rule.map_to_name for rule in
InstallationInfo.vmware_attributes_white_list)
self.assertEqual(len(InstallationInfo.vmware_attributes_white_list),
len(names))

View File

@ -0,0 +1,185 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from nailgun.test.base import BaseTestCase
from nailgun import consts
from nailgun.objects import Cluster
from nailgun.objects import OpenStackWorkloadStats
from nailgun.statistics.oswl.collector import collect as oswl_collect_once
class TestOSWLCollector(BaseTestCase):
vms_info = [{
"id": 1,
"status": "running",
}]
def collect_for_operational_cluster(self, get_info_mock):
cluster = self.env.create_cluster(
api=False,
status=consts.CLUSTER_STATUSES.operational
)
cls_id = cluster.id
get_info_mock.return_value = self.vms_info
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
upd_time = last.updated_time
res_data = {
'added': [{'time': upd_time.isoformat(), 'id': 1}],
'removed': [],
'modified': [],
'current': self.vms_info}
self.assertEqual(last.resource_data, res_data)
return cls_id, res_data
def update_cluster_status_and_oswl_data(self, cls_id, status):
cls = Cluster.get_by_uid(cls_id)
Cluster.update(cls, {'status': status})
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
return OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
@patch('nailgun.statistics.oswl.collector.utils.set_proxy')
@patch('nailgun.statistics.oswl.collector.helpers.ClientProvider')
@patch('nailgun.statistics.oswl.collector.helpers.'
'get_info_from_os_resource_manager')
def test_skip_collection_for_errorful_cluster(self, get_info_mock, *_):
error_cluster = self.env.create(
api=False,
nodes_kwargs=[{"roles": ["controller"], "online": False}],
cluster_kwargs={"name": "error",
"status": consts.CLUSTER_STATUSES.operational}
)
normal_cluster = self.env.create(
api=False,
nodes_kwargs=[{"roles": ["controller"], "online": True}],
cluster_kwargs={"name": "normal",
"status": consts.CLUSTER_STATUSES.operational}
)
get_info_mock.return_value = self.vms_info
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last_for_error_clsr = OpenStackWorkloadStats.get_last_by(
error_cluster["id"], consts.OSWL_RESOURCE_TYPES.vm)
self.assertIsNone(last_for_error_clsr)
last_for_normal_clsr = OpenStackWorkloadStats.get_last_by(
normal_cluster["id"], consts.OSWL_RESOURCE_TYPES.vm)
self.assertIsNotNone(last_for_normal_clsr)
upd_time = last_for_normal_clsr.updated_time
res_data = {
'added': [{'time': upd_time.isoformat(), 'id': 1}],
'removed': [],
'modified': [],
'current': self.vms_info}
self.assertEqual(last_for_normal_clsr.resource_data, res_data)
@patch('nailgun.statistics.oswl.collector.utils.get_proxy_for_cluster')
@patch('nailgun.statistics.oswl.collector.utils.set_proxy')
@patch('nailgun.statistics.oswl.collector.helpers.ClientProvider')
@patch('nailgun.statistics.oswl.collector.helpers.'
'get_info_from_os_resource_manager')
def test_clear_data_for_changed_cluster(self, get_info_mock, *_):
cls_id, res_data = self.collect_for_operational_cluster(get_info_mock)
last = self.update_cluster_status_and_oswl_data(
cls_id, consts.CLUSTER_STATUSES.error)
# nothing is changed while cluster is in error status
self.assertEqual(last.resource_data, res_data)
last = self.update_cluster_status_and_oswl_data(
cls_id, consts.CLUSTER_STATUSES.remove)
removed = dict(self.vms_info[0])
removed['time'] = last.updated_time.isoformat()
res_data.update({
'removed': [removed],
'current': []})
# current data is cleared when cluster status is changed
self.assertEqual(last.resource_data, res_data)
@patch('nailgun.statistics.oswl.collector.utils.get_proxy_for_cluster')
@patch('nailgun.statistics.oswl.collector.utils.set_proxy')
@patch('nailgun.statistics.oswl.collector.helpers.ClientProvider')
@patch('nailgun.statistics.oswl.collector.helpers.'
'get_info_from_os_resource_manager')
def test_clear_data_for_removed_cluster(self, get_info_mock, *_):
cls_id, res_data = self.collect_for_operational_cluster(get_info_mock)
cls = Cluster.get_by_uid(cls_id)
Cluster.delete(cls)
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
removed = dict(self.vms_info[0])
removed['time'] = last.updated_time.isoformat()
res_data.update({
'removed': [removed],
'current': []})
# current data is cleared when cluster is deleted
self.assertEqual(last.resource_data, res_data)
@patch('nailgun.statistics.oswl.collector.utils.get_proxy_for_cluster')
@patch('nailgun.statistics.oswl.collector.utils.set_proxy')
@patch('nailgun.statistics.oswl.collector.helpers.ClientProvider')
@patch('nailgun.statistics.oswl.collector.helpers.'
'get_info_from_os_resource_manager')
def test_removed_several_times(self, get_info_mock, *_):
cls_id, res_data = self.collect_for_operational_cluster(get_info_mock)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
self.assertItemsEqual(self.vms_info, last.resource_data['current'])
# reset cluster
get_info_mock.return_value = []
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
removed = dict(self.vms_info[0])
removed['time'] = last.updated_time.isoformat()
removed_data = [removed]
# check data is not duplicated in removed on several collects
for _ in xrange(10):
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(removed_data, last.resource_data['removed'])
# cluster is operational
# checking 'removed' is don't changed
get_info_mock.return_value = self.vms_info
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(removed_data, last.resource_data['removed'])
# reset cluster again
# checking only id and time added to 'removed'
get_info_mock.return_value = []
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
removed_data.append({
'id': removed_data[0]['id'],
'time': last.updated_time.isoformat()
})
self.assertListEqual(removed_data, last.resource_data['removed'])

View File

@ -0,0 +1,307 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import Mock
import six
from nailgun.test.base import BaseTestCase
from nailgun import consts
from nailgun.statistics.oswl import helpers
class TestOSWLHelpers(BaseTestCase):
components_to_mock = {
"nova": {
"servers": [
{
"id": 1,
"status": "running",
"OS-EXT-STS:power_state": 1,
"created": "date_of_creation",
"hostId": "test_host_id",
"tenant_id": "test_tenant_id",
"image": {"id": "test_image_id"},
"flavor": {"id": "test_flavor_id"},
},
],
"flavors": [
{
"id": 2,
"ram": 64,
"vcpus": 4,
"OS-FLV-EXT-DATA:ephemeral": 1,
"disk": 1,
"swap": 16,
},
],
"images": [
{
"id": 4,
"minDisk": 1,
"minRam": 64,
"OS-EXT-IMG-SIZE:size": 13000000,
"created": "some_date_of_creation",
"updated": "some_date_of_update"
},
],
"client": {"version": "v1.1"}
},
"cinder": {
"volumes": [
{
"id": 3,
"availability_zone": "test_availability_zone",
"encrypted": False,
"bootable": False,
"status": "available",
"volume_type": "test_volume",
"size": 1,
"os-vol-host-attr:host": "test-node",
"snapshot_id": None,
"attachments": "test_attachments",
"os-vol-tenant-attr:tenant_id": "test_tenant",
},
],
"client": {"version": "v1"}
},
"keystone": {
"tenants": [
{
"id": 5,
"enabled": True,
},
],
"users": [
{
"id": "test_user_id",
"enabled": True,
"tenantId": "test_tenant_id",
}
],
"version": "v2.0"
},
}
def _prepare_client_provider_mock(self):
client_provider_mock = Mock()
clients_version_attr_path = {
"nova": ["client", "version"],
"cinder": ["client", "version"],
"keystone": ["version"]
}
setattr(client_provider_mock, "clients_version_attr_path",
clients_version_attr_path)
return client_provider_mock
def _update_mock_with_complex_dict(self, root_mock, attrs_dict):
for key, value in six.iteritems(attrs_dict):
attr_name = key
attr_value = value
if isinstance(value, dict):
attr_value = Mock()
self._update_mock_with_complex_dict(
attr_value, value
)
elif isinstance(value, list):
attr_value = Mock()
to_return = []
for data in value:
attr_value_element = Mock()
attr_value_element.to_dict.return_value = data
to_return.append(attr_value_element)
attr_value.list.return_value = to_return
setattr(root_mock, attr_name, attr_value)
def test_get_oswl_info(self):
expected = {
"vm": [
{
"id": 1,
"status": "running",
"power_state": 1,
"created_at": "date_of_creation",
"image_id": "test_image_id",
"flavor_id": "test_flavor_id",
"host_id": "test_host_id",
"tenant_id": "test_tenant_id",
},
],
"flavor": [
{
"id": 2,
"ram": 64,
"vcpus": 4,
"ephemeral": 1,
"disk": 1,
"swap": 16,
},
],
"image": [
{
"id": 4,
"minDisk": 1,
"minRam": 64,
"sizeBytes": 13000000,
"created_at": "some_date_of_creation",
"updated_at": "some_date_of_update"
},
],
"volume": [
{
"id": 3,
"availability_zone": "test_availability_zone",
"encrypted_flag": False,
"bootable_flag": False,
"status": "available",
"volume_type": "test_volume",
"size": 1,
"host": "test-node",
"snapshot_id": None,
"attachments": "test_attachments",
"tenant_id": "test_tenant",
},
],
"tenant": [
{
"id": 5,
"enabled_flag": True,
},
],
"keystone_user": [
{
"id": "test_user_id",
"enabled_flag": True,
"tenant_id": "test_tenant_id",
},
],
}
client_provider_mock = self._prepare_client_provider_mock()
self._update_mock_with_complex_dict(client_provider_mock,
self.components_to_mock)
for resource_name, expected_data in six.iteritems(expected):
actual = helpers.get_info_from_os_resource_manager(
client_provider_mock, resource_name
)
self.assertEqual(actual, expected_data)
def test_different_api_versions_handling_for_tenants(self):
keystone_v2_component = {
"keystone": {
"tenants": [
{
"id": 5,
"enabled": True,
},
],
"version": "v2.0"
},
}
keystone_v3_component = {
"keystone": {
"projects": [
{
"id": 5,
"enabled": True,
},
],
"version": "v3.0"
},
}
client_provider_mock = self._prepare_client_provider_mock()
self._update_mock_with_complex_dict(client_provider_mock,
keystone_v2_component)
client_provider_mock.keystone.tenants.list.assert_called_once()
client_provider_mock = self._prepare_client_provider_mock()
self._update_mock_with_complex_dict(client_provider_mock,
keystone_v3_component)
client_provider_mock.keystone.projects.list.assert_called_once()
def test_different_api_versions_handling_for_users(self):
keystone_v2_component = {
"keystone": {
"users": [
{
"id": "test_user_id",
"enabled": True,
"tenantId": "test_tenant_id",
}
],
"version": "v2.0"
},
}
keystone_v3_component = {
"keystone": {
"users": [
{
"id": "test_user_id",
"enabled": True,
"default_project_id": "test_tenant_id",
}
],
"version": "v3"
},
}
client_provider_mock = self._prepare_client_provider_mock()
self._update_mock_with_complex_dict(client_provider_mock,
keystone_v2_component)
kc_v2_info = helpers.get_info_from_os_resource_manager(
client_provider_mock, consts.OSWL_RESOURCE_TYPES.keystone_user
)
client_provider_mock = self._prepare_client_provider_mock()
self._update_mock_with_complex_dict(client_provider_mock,
keystone_v3_component)
kc_v3_info = helpers.get_info_from_os_resource_manager(
client_provider_mock, consts.OSWL_RESOURCE_TYPES.keystone_user
)
self.assertEqual(kc_v2_info, kc_v3_info)
def test_additional_display_opts_supplied(self):
expected_display_options = {"search_opts": {"all_tenants": 1}}
client_provider_mock = self._prepare_client_provider_mock()
self._update_mock_with_complex_dict(client_provider_mock,
self.components_to_mock)
helpers.get_info_from_os_resource_manager(
client_provider_mock, consts.OSWL_RESOURCE_TYPES.vm
)
client_provider_mock.nova.servers.list.assert_called_once_with(
**expected_display_options
)
helpers.get_info_from_os_resource_manager(
client_provider_mock, consts.OSWL_RESOURCE_TYPES.volume
)
client_provider_mock.cinder.volumes.list.assert_called_once_with(
**expected_display_options
)

View File

@ -0,0 +1,100 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nailgun.test.base import BaseTestCase
from nailgun import consts
from nailgun.objects import OpenStackWorkloadStats
from nailgun.objects import OpenStackWorkloadStatsCollection
from nailgun.settings import settings
class TestOSWLObject(BaseTestCase):
def test_oswl_get_last_by_cluster_id_resource_type(self):
cluster_id = 1
dt = datetime.datetime.utcnow()
obj_data = {
'cluster_id': cluster_id,
'resource_type': consts.OSWL_RESOURCE_TYPES.vm,
'created_date': dt.date(),
'updated_time': dt.time(),
'resource_checksum': ""
}
obj = OpenStackWorkloadStats.create(obj_data)
self.assertEqual(
OpenStackWorkloadStats.get_last_by(
cluster_id, consts.OSWL_RESOURCE_TYPES.vm),
obj
)
self.assertIsNone(
OpenStackWorkloadStats.get_last_by(
0, consts.OSWL_RESOURCE_TYPES.vm)
)
self.assertIsNone(
OpenStackWorkloadStats.get_last_by(
cluster_id, consts.OSWL_RESOURCE_TYPES.tenant)
)
OpenStackWorkloadStats.delete(obj)
self.assertIsNone(
OpenStackWorkloadStats.get_last_by(
cluster_id, consts.OSWL_RESOURCE_TYPES.vm)
)
def test_clean_expired_entries(self):
dt_now = datetime.datetime.utcnow()
t_delta = datetime.timedelta(days=settings.OSWL_STORING_PERIOD)
entries_to_del_cluster_ids = (1, 2)
for cluster_id in entries_to_del_cluster_ids:
obj_kwargs = {
"cluster_id": cluster_id,
"resource_type": consts.OSWL_RESOURCE_TYPES.volume,
"updated_time": dt_now.time(),
"created_date": dt_now.date() - t_delta,
"resource_checksum": ""
}
OpenStackWorkloadStats.create(obj_kwargs)
untouched_obj_kwargs = {
"cluster_id": 3,
"resource_type": consts.OSWL_RESOURCE_TYPES.vm,
"updated_time": dt_now.time(),
"created_date": dt_now.date(),
"resource_checksum": ""
}
OpenStackWorkloadStats.create(untouched_obj_kwargs)
OpenStackWorkloadStatsCollection.clean_expired_entries()
self.db.commit()
for cluster_id in entries_to_del_cluster_ids:
instance = \
OpenStackWorkloadStats.get_last_by(
cluster_id,
consts.OSWL_RESOURCE_TYPES.volume
)
self.assertIsNone(instance)
untouched_obj = OpenStackWorkloadStats.get_last_by(
untouched_obj_kwargs["cluster_id"],
consts.OSWL_RESOURCE_TYPES.vm
)
self.assertIsNotNone(untouched_obj)

View File

@ -0,0 +1,275 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import six
from nailgun.test.base import BaseTestCase
from nailgun import consts
from nailgun.objects import OpenStackWorkloadStats
from nailgun.objects import OpenStackWorkloadStatsCollection
from nailgun.statistics.oswl.saver import oswl_data_checksum
from nailgun.statistics.oswl.saver import oswl_statistics_save
class TestOSWLServerInfoSaving(BaseTestCase):
vms_info = {
"id": 1,
"status": "running",
"power_state": 1,
"created_at": "dt",
"host_id": "111",
"tenant_id": "222",
"image_id": "333",
"flavor_id": "444"
}
@property
def empty_data(self):
return {
'cluster_id': 1,
'resource_type': consts.OSWL_RESOURCE_TYPES.vm,
'created_date': datetime.datetime.utcnow().date(),
'resource_data': {'added': [],
'removed': [],
'modified': [],
'current': []},
'resource_checksum': oswl_data_checksum([]),
'is_sent': False
}
def data_w_default_vm_info(self, time):
data = self.empty_data
data['resource_data'].update({
'added': [{'time': time.isoformat(), 'id': 1}],
'current': [self.vms_info]
})
return data
def check_overall_rec_count(self, count):
saved = OpenStackWorkloadStatsCollection.all()
self.assertEqual(saved.count(), count)
return saved
def check_data_vs_rec(self, data, rec):
data['resource_checksum'] = \
oswl_data_checksum(data['resource_data']['current'])
for k, v in six.iteritems(data):
if isinstance(v, (list, tuple)):
self.assertItemsEqual(v, getattr(rec, k))
else:
self.assertEqual(v, getattr(rec, k))
def save_data_and_check_record(self, data):
oswl_statistics_save(1, consts.OSWL_RESOURCE_TYPES.vm, data)
last = OpenStackWorkloadStats.get_last_by(
1, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(last, self.check_overall_rec_count(1).first())
return last
def add_default_vm_info_and_check(self):
last = self.save_data_and_check_record([self.vms_info])
time_update = last.updated_time
data = self.data_w_default_vm_info(time_update)
self.check_data_vs_rec(data, last)
return time_update, data
def test_empty_data(self):
last = self.save_data_and_check_record([])
self.check_data_vs_rec(self.empty_data, last)
def test_added_same_info(self):
# VM is added
time_update, data = self.add_default_vm_info_and_check()
# save same info
last = self.save_data_and_check_record([self.vms_info])
# DB row was not updated
self.assertEqual(time_update, last.updated_time)
self.check_data_vs_rec(data, last)
def test_added_one_by_one(self):
# VM with id=1 is added
time_update1, data = self.add_default_vm_info_and_check()
# VM with id=2 is added
two_vms = [dict(self.vms_info), dict(self.vms_info)]
two_vms[1]['id'] = 2
last = self.save_data_and_check_record(two_vms)
time_update2 = last.updated_time
data['resource_data'].update({
'added': [{'time': time_update1.isoformat(), 'id': 1},
{'time': time_update2.isoformat(), 'id': 2}],
'current': two_vms
})
self.check_data_vs_rec(data, last)
def test_added_on_cluster_reset(self):
# VM with id=1 is added
time_update1, data = self.add_default_vm_info_and_check()
# VM with id=2 is added
two_vms = [self.vms_info]
self.save_data_and_check_record(two_vms)
# reset cluster
self.save_data_and_check_record([])
last = self.save_data_and_check_record(two_vms)
time_update2 = last.updated_time
time_removed2 = last.resource_data['removed'][0]['time']
data['resource_data'].update({
'added': [{'time': time_update1.isoformat(), 'id': 1},
{'time': time_update2.isoformat(), 'id': 1}],
'current': two_vms,
'removed': [dict(two_vms[0], **{'time': time_removed2})]
})
self.check_data_vs_rec(data, last)
def test_added_then_removed(self):
# VM is added
time_update, data = self.add_default_vm_info_and_check()
# VM is removed
last = self.save_data_and_check_record([])
time_update = last.updated_time
removed = dict(self.vms_info)
removed['time'] = time_update.isoformat()
data['resource_data'].update({
'removed': [removed],
'current': []
})
self.check_data_vs_rec(data, last)
def test_modified(self):
# VM is added
time_update, data = self.add_default_vm_info_and_check()
# VM power state and status are changed
vms_new = [dict(self.vms_info)]
vms_new[0]['power_state'] = 0
vms_new[0]['status'] = 'stopped'
last = self.save_data_and_check_record(vms_new)
time_update = last.updated_time
modified1 = {'power_state': self.vms_info['power_state'],
'status': self.vms_info['status'],
'time': time_update.isoformat(),
'id': self.vms_info['id']}
data['resource_data'].update({
'modified': [modified1],
'current': vms_new
})
self.check_data_vs_rec(data, last)
# VM power state is changed back
vms_new1 = [dict(vms_new[0])]
vms_new1[0]['power_state'] = 1
last = self.save_data_and_check_record(vms_new1)
time_update = last.updated_time
modified2 = {'power_state': vms_new[0]['power_state'],
'time': time_update.isoformat(),
'id': vms_new[0]['id']}
data['resource_data'].update({
'modified': [modified1, modified2],
'current': vms_new1
})
self.check_data_vs_rec(data, last)
# VM status is changed back
last = self.save_data_and_check_record([self.vms_info])
time_update = last.updated_time
modified3 = {'status': vms_new1[0]['status'],
'time': time_update.isoformat(),
'id': vms_new1[0]['id']}
data['resource_data'].update({
'modified': [modified1, modified2, modified3],
'current': [self.vms_info]
})
self.check_data_vs_rec(data, last)
def test_add_row_per_day(self):
# VM is added
last = self.save_data_and_check_record([self.vms_info])
date_cur = last.created_date
time_update = last.updated_time
date_1st_rec = date_cur - datetime.timedelta(days=1)
# make existing record one day older
OpenStackWorkloadStats.update(last,
{'created_date': date_1st_rec})
# pass the same data
# no new record was created and existing one remains unchanged
self.assertEqual(last,
self.save_data_and_check_record([self.vms_info]))
# VM is removed
oswl_statistics_save(1, consts.OSWL_RESOURCE_TYPES.vm, [])
saved = self.check_overall_rec_count(2)
last = OpenStackWorkloadStats.get_last_by(
1, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(last.created_date, date_cur)
for rec in saved:
if rec.created_date == date_cur:
self.assertEqual(rec, last)
# last record contains 'removed' and empty 'added'
data = self.empty_data
removed = dict(self.vms_info)
removed['time'] = last.updated_time.isoformat()
data['resource_data']['removed'] = [removed]
self.check_data_vs_rec(data, rec)
elif rec.created_date == date_1st_rec:
# first record contains 'added' and empty 'removed'
data = self.data_w_default_vm_info(time_update)
data['created_date'] = date_1st_rec
self.check_data_vs_rec(data, rec)
def test_oswl_is_sent_restored_on_changes(self):
cluster_id = 1
vm_info = {
"id": 1,
"power_state": 1,
}
oswl_statistics_save(cluster_id, consts.OSWL_RESOURCE_TYPES.vm,
[vm_info])
last = OpenStackWorkloadStats.get_last_by(
cluster_id, consts.OSWL_RESOURCE_TYPES.vm)
# Setting is_sent to True
OpenStackWorkloadStats.update(last, {'is_sent': True})
self.assertEqual(True, last.is_sent)
# Checking is_sent is not changed if data is not changed
oswl_statistics_save(cluster_id, consts.OSWL_RESOURCE_TYPES.vm,
[vm_info])
last_no_change = OpenStackWorkloadStats.get_last_by(
cluster_id, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(True, last_no_change.is_sent)
# Checking is_sent is changed if data is changed
vm_info["power_state"] += 1
oswl_statistics_save(cluster_id, consts.OSWL_RESOURCE_TYPES.vm,
[vm_info])
last_changed = OpenStackWorkloadStats.get_last_by(
cluster_id, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(False, last_changed.is_sent)

View File

@ -0,0 +1,323 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from mock import Mock
from mock import patch
import requests
import urllib3
from nailgun.test.base import BaseTestCase
from nailgun import consts
from nailgun.objects import Cluster
from nailgun.objects import OpenStackWorkloadStats
from nailgun.settings import settings
from nailgun.statistics.statsenderd import StatsSender
FEATURE_MIRANTIS = {'feature_groups': ['mirantis']}
FEATURE_EXPERIMENTAL = {'feature_groups': ['experimental']}
class TestStatisticsSender(BaseTestCase):
def check_collector_urls(self, server):
self.assertEqual(
StatsSender().build_collector_url("COLLECTOR_ACTION_LOGS_URL"),
settings.COLLECTOR_ACTION_LOGS_URL.format(collector_server=server)
)
self.assertEqual(
StatsSender().build_collector_url("COLLECTOR_INST_INFO_URL"),
settings.COLLECTOR_INST_INFO_URL.format(collector_server=server)
)
self.assertEqual(
StatsSender().build_collector_url("COLLECTOR_OSWL_INFO_URL"),
settings.COLLECTOR_OSWL_INFO_URL.format(collector_server=server)
)
self.assertEqual(
StatsSender().build_collector_url("COLLECTOR_PING_URL"),
settings.COLLECTOR_PING_URL.format(collector_server=server)
)
@patch.dict('nailgun.settings.settings.VERSION', FEATURE_MIRANTIS)
def test_mirantis_collector_urls(self):
self.check_collector_urls(StatsSender.COLLECTOR_MIRANTIS_SERVER)
@patch.dict('nailgun.settings.settings.VERSION', FEATURE_EXPERIMENTAL)
def test_community_collector_urls(self):
self.check_collector_urls(StatsSender.COLLECTOR_COMMUNITY_SERVER)
@patch('nailgun.statistics.statsenderd.requests.get')
def test_ping_ok(self, requests_get):
requests_get.return_value = Mock(status_code=200)
sender = StatsSender()
self.assertTrue(sender.ping_collector())
requests_get.assert_called_once_with(
sender.build_collector_url("COLLECTOR_PING_URL"),
timeout=settings.COLLECTOR_RESP_TIMEOUT)
@patch('nailgun.statistics.statsenderd.requests.get')
@patch('nailgun.statistics.statsenderd.logger.error')
def test_ping_failed_on_connection_errors(self, log_error, requests_get):
except_types = (
urllib3.exceptions.DecodeError,
urllib3.exceptions.ProxyError,
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.TooManyRedirects,
requests.exceptions.HTTPError)
for except_ in except_types:
requests_get.side_effect = except_()
self.assertFalse(StatsSender().ping_collector())
log_error.assert_called_with("Collector ping failed: %s",
type(except_()).__name__)
@patch('nailgun.statistics.statsenderd.requests.get')
@patch('nailgun.statistics.statsenderd.logger.exception')
def test_ping_failed_on_exception(self, log_exception, requests_get):
requests_get.side_effect = Exception("custom")
self.assertFalse(StatsSender().ping_collector())
log_exception.assert_called_once_with(
"Collector ping failed: %s", "custom")
@patch('nailgun.statistics.statsenderd.requests.post')
def test_send_ok(self, requests_post):
requests_post.return_value = Mock(status_code=200)
sender = StatsSender()
self.assertEqual(
sender.send_data_to_url(
url=sender.build_collector_url("COLLECTOR_ACTION_LOGS_URL"),
data={}),
requests_post.return_value
)
requests_post.assert_called_once_with(
sender.build_collector_url("COLLECTOR_ACTION_LOGS_URL"),
headers={'content-type': 'application/json'},
data='{}',
timeout=settings.COLLECTOR_RESP_TIMEOUT)
@patch('nailgun.statistics.statsenderd.requests.post')
@patch('nailgun.statistics.statsenderd.logger.error')
def test_send_failed_on_connection_error(self, log_error, requests_post):
except_types = (
urllib3.exceptions.DecodeError,
urllib3.exceptions.ProxyError,
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.TooManyRedirects)
for except_ in except_types:
requests_post.side_effect = except_()
sender = StatsSender()
self.assertIsNone(
sender.send_data_to_url(
url=sender.build_collector_url(
"COLLECTOR_ACTION_LOGS_URL"),
data={})
)
log_error.assert_called_with(
"Sending data to collector failed: %s",
type(except_()).__name__)
@patch('nailgun.statistics.statsenderd.requests.post')
@patch('nailgun.statistics.statsenderd.logger.exception')
def test_send_failed_on_exception(self, log_error, requests_post):
requests_post.side_effect = Exception("custom")
sender = StatsSender()
self.assertIsNone(
sender.send_data_to_url(
url=sender.build_collector_url("COLLECTOR_ACTION_LOGS_URL"),
data={})
)
log_error.assert_called_once_with(
"Sending data to collector failed: %s", "custom")
def test_skipped_action_logs(self):
class Response(object):
status_code = 200
def json(self):
return {
'status': 'ok',
'action_logs': [{'external_id': 1, 'status': 'skipped'}]}
sender = StatsSender()
commit = 'nailgun.db.sqlalchemy.DeadlockDetectingSession.commit'
with patch.object(sender, 'send_data_to_url',
return_value=Response()):
with patch.object(sender, 'is_status_acceptable',
return_value=True):
with patch(commit) as mocked_commit:
sender.send_log_serialized([{'external_id': 1}], [1])
self.assertEqual(0, mocked_commit.call_count)
@patch('nailgun.statistics.statsenderd.time.sleep')
@patch('nailgun.statistics.statsenderd.dithered')
@patch('nailgun.db.sqlalchemy.fixman.settings.'
'STATS_ENABLE_CHECK_INTERVAL', 0)
@patch('nailgun.db.sqlalchemy.fixman.settings.'
'COLLECTOR_PING_INTERVAL', 1)
def test_send_stats_once_after_dberror(self, dithered, sleep):
def fn():
# try to commit wrong data
Cluster.create(
{
"id": "500",
"release_id": "500"
}
)
self.db.commit()
ss = StatsSender()
ss.send_stats_once()
# one call with STATS_ENABLE_CHECK_INTERVAL was made (all went ok)
self.assertEqual(sleep.call_count, 1)
dithered.assert_called_with(0)
with patch.object(ss,
'must_send_stats',
fn):
ss.send_stats_once()
# one more call with COLLECTOR_PING_INTERVAL value
self.assertEqual(sleep.call_count, 2)
dithered.assert_called_with(1)
ss.send_stats_once()
# one more call was made (all went ok)
self.assertEqual(sleep.call_count, 3)
@patch('nailgun.statistics.statsenderd.StatsSender.send_data_to_url')
def test_oswl_nothing_to_send(self, send_data_to_url):
dt = datetime.datetime.utcnow()
obj_data = {
'cluster_id': 1,
'resource_type': consts.OSWL_RESOURCE_TYPES.vm,
'created_date': dt.date(),
'updated_time': dt.time(),
'resource_checksum': ""
}
obj = OpenStackWorkloadStats.create(obj_data)
self.assertEqual(
OpenStackWorkloadStats.get_last_by(
1, consts.OSWL_RESOURCE_TYPES.vm),
obj
)
StatsSender().send_oswl_info()
# Nothing to send as it doesn't send today's records. Today's are not
# sent as they are not complete and can be updated during the day.
self.assertEqual(send_data_to_url.call_count, 0)
@patch('nailgun.db.sqlalchemy.fixman.settings.OSWL_COLLECT_PERIOD', 0)
@patch('nailgun.statistics.statsenderd.StatsSender.send_data_to_url')
def test_oswl_send_todays_record(self, send_data_to_url):
dt = datetime.datetime.utcnow()
obj_data = {
'cluster_id': 1,
'resource_type': consts.OSWL_RESOURCE_TYPES.vm,
'created_date': dt.date(),
'updated_time': dt.time(),
'resource_checksum': ""
}
obj = OpenStackWorkloadStats.create(obj_data)
self.assertEqual(
OpenStackWorkloadStats.get_last_by(
1, consts.OSWL_RESOURCE_TYPES.vm),
obj
)
StatsSender().send_oswl_info()
self.assertEqual(send_data_to_url.call_count, 1)
def check_oswl_data_send_result(self, send_data_to_url, status, is_sent):
# make yesterdays record (today's will not be sent)
dt = datetime.datetime.utcnow() - datetime.timedelta(days=1)
obj_data = {
'cluster_id': 1,
'resource_type': consts.OSWL_RESOURCE_TYPES.vm,
'created_date': dt.date(),
'updated_time': dt.time(),
'resource_checksum': ""
}
obj = OpenStackWorkloadStats.create(obj_data)
self.assertEqual(
OpenStackWorkloadStats.get_last_by(
1, consts.OSWL_RESOURCE_TYPES.vm),
obj
)
rec_id = obj.id
self.assertEqual(obj.is_sent, False)
# emulate the answer from requests.post()
class response(object):
status_code = 200
data = {
"status": "ok",
"text": "ok",
"oswl_stats": [{
"master_node_uid": "",
"id": rec_id,
"status": status
}]
}
def __getitem__(self, key):
return self.data[key]
@classmethod
def json(cls):
return cls.data
send_data_to_url.return_value = response
sender = StatsSender()
sender.send_oswl_info()
obj_data_sent = {'oswl_stats': [{
'id': rec_id,
'cluster_id': 1,
'created_date': dt.date().isoformat(),
'updated_time': dt.time().isoformat(),
'resource_type': 'vm',
'resource_checksum': '',
'master_node_uid': None,
'resource_data': None,
}]}
send_data_to_url.assert_called_once_with(
url=sender.build_collector_url("COLLECTOR_OSWL_INFO_URL"),
data=obj_data_sent)
obj = OpenStackWorkloadStats.get_last_by(
1, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(obj.is_sent, is_sent)
OpenStackWorkloadStats.delete(obj)
send_data_to_url.reset_mock()
@patch('nailgun.statistics.statsenderd.StatsSender.send_data_to_url')
def test_oswl_data_send_results(self, send_data_to_url):
status_vs_sent = {
"added": True,
"updated": True,
"failed": False
}
for status, is_sent in status_vs_sent.iteritems():
self.check_oswl_data_send_result(send_data_to_url, status, is_sent)

View File

@ -0,0 +1,128 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import Mock
from mock import patch
import os
from nailgun.test.base import BaseTestCase
from nailgun.statistics import errors
from nailgun.statistics import utils
class TestUtilsFunctions(BaseTestCase):
def test_set_proxy_func(self):
def check_proxy():
with utils.set_proxy(new_proxy):
self.assertEqual(os.environ.get("http_proxy"), new_proxy)
def raise_inside_context():
with utils.set_proxy(new_proxy):
raise Exception("Just an error")
expected = {"http_proxy": "test"}
new_proxy = "fake_proxy"
# check that proxy old value is restored
# after exit from context manager w/ and w/o exception
with patch.dict("os.environ", expected):
check_proxy()
self.assertEqual(os.environ.get("http_proxy"),
expected["http_proxy"])
raise_inside_context()
self.assertEqual(os.environ.get("http_proxy"),
expected["http_proxy"])
# check that env variable is deleted
# after exit from context manager w/ and w/o exception
check_proxy()
self.assertNotIn("http_proxy", os.environ)
raise_inside_context()
self.assertNotIn("http_proxy", os.environ)
def test_get_attr_value(self):
attributes = {
'a': 'b',
'c': [
{'x': 'z', 'y': [{'t': 'u'}, {'v': 'w'}, {'t': 'u0'}]},
{'x': 'zz', 'y': [{'t': 'uu'}, {'v': 'ww'}]}
],
'd': {'f': 'g', 'k': [0, 1, 2]},
}
white_list = (
utils.WhiteListRule(('a',), 'map_a', None),
utils.WhiteListRule(('d', 'f'), 'map_f', None),
utils.WhiteListRule(('d', 'k'), 'map_k_len', len),
utils.WhiteListRule(('c', 'x'), 'map_x', None),
utils.WhiteListRule(('c', 'y', 't'), 'map_t', None),
)
actual = {}
for rule in white_list:
actual[rule.map_to_name] = utils.get_attr_value(
rule.path, rule.transform_func, attributes)
expected = {
'map_f': 'g',
'map_k_len': 3,
'map_a': 'b',
'map_x': ['z', 'zz'],
'map_t': [['u', 'u0'], ['uu']],
}
self.assertDictEqual(actual, expected)
def test_get_online_controller(self):
node_name = "test"
self.env.create(
nodes_kwargs=[{"online": True,
"roles": ["controller"],
"name": node_name}]
)
cluster = self.env.clusters[0]
online_controller = utils.get_online_controller(cluster)
self.assertIsNotNone(online_controller)
self.assertEqual(online_controller.name, node_name)
cluster.nodes[0].online = False
self.assertRaises(errors.NoOnlineControllers,
utils.get_online_controller,
cluster)
def test_get_nested_attr(self):
expected_attr = Mock()
intermediate_attr = Mock(spec=["expected_attr"])
containing_obj = Mock(spec=["intermediate_attr"])
intermediate_attr.expected_attr = expected_attr
containing_obj.intermediate_attr = intermediate_attr
existing_attr_path = ["intermediate_attr", "expected_attr"]
self.assertEqual(
expected_attr,
utils.get_nested_attr(containing_obj, existing_attr_path)
)
missing_attrs_pathes = [
["missing_attr", "expected_attr"],
["intermediate_attr", "missing_attr"],
]
for attr_path in missing_attrs_pathes:
self.assertIsNone(
utils.get_nested_attr(containing_obj, attr_path)
)

File diff suppressed because it is too large Load Diff

View File

@ -69,8 +69,8 @@ if __name__ == "__main__":
'assassind = nailgun.assassin.assassind:run',
'receiverd = nailgun.rpc.receiverd:run',
'statsenderd = nailgun.statistics.statsenderd:run',
'oswl_collectord = nailgun.statistics.oswl_collector:run',
('oswl_cleaner = nailgun.statistics.utils:'
'oswl_collectord = nailgun.statistics.oswl.collector:run',
('oswl_cleaner = nailgun.statistics.oswl.helpers:'
'delete_expired_oswl_entries'),
],
'nose.plugins.0.10': [