Merge "Add more rpc data filters to tasks logging"

This commit is contained in:
Jenkins 2014-12-01 16:14:58 +00:00 committed by Gerrit Code Review
commit f71d843778
12 changed files with 473 additions and 163 deletions

View File

@ -77,7 +77,9 @@ class ActionLog(NailgunObject):
# one should take precaution as such construction overrides keys
# that are already present in additional_info
if data.get('additional_info'):
data['additional_info'].update(instance.additional_info)
add_info = dict(instance.additional_info)
add_info.update(data['additional_info'])
data['additional_info'] = add_info
return super(ActionLog, cls).update(instance, data)

View File

@ -131,6 +131,7 @@ class Task(NailgunObject):
lambda s: s.message is not None, subtasks)))
cls.update(instance, data)
TaskHelper.update_action_log(instance)
elif any(map(lambda s: s.status in ('error',), subtasks)):
for subtask in subtasks:
@ -150,6 +151,8 @@ class Task(NailgunObject):
), subtasks)))))
cls.update(instance, data)
TaskHelper.update_action_log(instance)
else:
subtasks_with_progress = filter(
lambda s: s.progress is not None,

View File

@ -19,6 +19,7 @@ import copy
import datetime
import itertools
import os
import six
import traceback
from sqlalchemy import or_
@ -348,19 +349,33 @@ class NailgunReceiver(object):
@classmethod
def _update_action_log_entry(cls, task_status, task_uuid, nodes_from_resp):
if task_status in (consts.TASK_STATUSES.ready,
consts.TASK_STATUSES.error):
al = objects.ActionLog.get_by_task_uuid(task_uuid)
try:
if task_status in (consts.TASK_STATUSES.ready,
consts.TASK_STATUSES.error):
al = objects.ActionLog.get_by_task_uuid(task_uuid)
if al:
data = {
'end_timestamp': datetime.datetime.utcnow(),
'additional_info': {
'nodes_from_resp': nodes_from_resp,
'ended_with_status': task_status
if al:
data = {
'end_timestamp': datetime.datetime.utcnow(),
'additional_info': {
'nodes_from_resp': cls.sanitize_nodes_from_resp(
nodes_from_resp),
'ended_with_status': task_status
}
}
}
objects.ActionLog.update(al, data)
objects.ActionLog.update(al, data)
except Exception as e:
logger.error("_update_action_log_entry failed: %s",
six.text_type(e))
@classmethod
def sanitize_nodes_from_resp(cls, nodes):
resp = []
if isinstance(nodes, list):
for n in nodes:
if isinstance(n, dict) and 'uid' in n:
resp.append(n['uid'])
return resp
@classmethod
def _generate_error_message(cls, task, error_types, names_only=False):
@ -614,6 +629,8 @@ class NailgunReceiver(object):
data = {'status': status, 'progress': progress, 'message': message}
objects.Task.update(task, data)
cls._update_action_log_entry(status, task_uuid, nodes)
@classmethod
def reset_environment_resp(cls, **kwargs):
logger.info(
@ -691,6 +708,8 @@ class NailgunReceiver(object):
data = {'status': status, 'progress': progress, 'message': message}
objects.Task.update(task, data)
cls._update_action_log_entry(status, task_uuid, nodes)
@classmethod
def _notify_inaccessible(cls, cluster_id, nodes_uids, action):
ia_nodes_db = db().query(Node.name).filter(
@ -841,6 +860,8 @@ class NailgunReceiver(object):
objects.Task.update_verify_networks(task, status, progress,
error_msg, result)
cls._update_action_log_entry(status, task_uuid, nodes)
@classmethod
def multicast_verification_resp(cls, **kwargs):
"""Receiver for verification of multicast packages

View File

@ -18,9 +18,10 @@ from nailgun import consts
# "*" means any key name
# "" means any value
task_output_white_list = {
consts.TASK_NAMES.provision: {
_task_output_white_list_template = {
"provision_template": {
"method": "",
"respond_to": "",
"api_version": "",
"args": {
"task_uuid": "",
@ -30,18 +31,6 @@ task_output_white_list = {
},
"nodes": {
"uid": "",
"interfaces": {
"*": {
"static": "",
"netmask": ""
}
},
"interfaces_extra": {
"*": {
"onboot": "",
"peerdns": ""
}
},
"ks_meta": {
"mco_enable": "",
"mlnx_iser_enabled": "",
@ -75,8 +64,9 @@ task_output_white_list = {
}
}
},
consts.TASK_NAMES.deployment: {
"deployment_template": {
"method": "",
"respond_to": "",
"api_version": "",
"args": {
"task_uuid": "",
@ -124,8 +114,7 @@ task_output_white_list = {
},
"L3": {
"enable_dhcp": ""
},
"tenant": ""
}
}
}
},
@ -135,7 +124,6 @@ task_output_white_list = {
"image_data": {
"*": {
"container": "",
"uri": "",
"format": ""
}
},
@ -178,21 +166,10 @@ task_output_white_list = {
"status": "",
"deployment_mode": "",
"fail_if_error": "",
"puppet_manifests_source": "",
"network_scheme": {
"transformations": {
"*": "",
},
"roles": {
"*": ""
},
"interfaces": {
"*": {
"L2": {
"vlan_splinters": ""
}
}
},
"version": "",
"provider": "",
"endpoints": {
@ -211,7 +188,6 @@ task_output_white_list = {
"min_ram": "",
"disk_format": "",
"glance_properties": "",
"img_name": "",
"public": ""
},
"fuel_version": "",
@ -223,9 +199,6 @@ task_output_white_list = {
"uid": "",
"role": ""
},
"repo_metadata": {
"nailgun": ""
},
"kernel_params": {
"kernel": ""
},
@ -236,7 +209,6 @@ task_output_white_list = {
"enabled": ""
}
},
"puppet_modules_source": "",
"debug": "",
"deployment_id": "",
"openstack_version_prev": ""
@ -248,5 +220,89 @@ task_output_white_list = {
"*": {}
}
}
},
"delete_template": {
"method": "",
"respond_to": "",
"api_version": "",
"args": {
"task_uuid": "",
"nodes": {
"id": "",
"uid": "",
"roles": ""
}
}
},
"dump_template": {
"method": "",
"respond_to": "",
"api_version": "",
"args": {
"task_uuid": "",
"settings": {
'timestamp': "",
'lastdump': "",
'target': "",
'dump': {
'*': {
'objects': {
'type': "",
'command': "",
'path': "",
},
'hosts': {}
}
}
}
}
},
"networks_verify_template": {
"method": "",
"respond_to": "",
"api_version": "",
"args": {
"task_uuid": "",
"nodes": {
"uid": ""
}
},
"subtasks": {
"method": "",
"respond_to": "",
"api_version": "",
"args": {
"task_uuid": ""
}
}
}
}
task_output_white_list = {
consts.TASK_NAMES.provision:
_task_output_white_list_template["provision_template"],
consts.TASK_NAMES.deployment:
_task_output_white_list_template["deployment_template"],
consts.TASK_NAMES.update:
_task_output_white_list_template["deployment_template"],
consts.TASK_NAMES.node_deletion:
_task_output_white_list_template["delete_template"],
consts.TASK_NAMES.cluster_deletion:
_task_output_white_list_template["delete_template"],
consts.TASK_NAMES.reset_environment:
_task_output_white_list_template["delete_template"],
consts.TASK_NAMES.stop_deployment:
_task_output_white_list_template["delete_template"],
consts.TASK_NAMES.verify_networks:
_task_output_white_list_template["networks_verify_template"],
consts.TASK_NAMES.check_dhcp:
_task_output_white_list_template["networks_verify_template"],
consts.TASK_NAMES.multicast_verification:
_task_output_white_list_template["networks_verify_template"],
consts.TASK_NAMES.dump:
_task_output_white_list_template["dump_template"],
}

View File

@ -17,6 +17,7 @@
import datetime
import os
import shutil
import six
import web
@ -29,11 +30,13 @@ from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import Task
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.objects import ActionLog
from nailgun.settings import settings
from nailgun.statistics.params_white_lists import task_output_white_list
tasks_names_actions_groups_mapping = {
consts.TASK_NAMES.deploy: "cluster_changes",
consts.TASK_NAMES.deployment: "cluster_changes",
consts.TASK_NAMES.provision: "cluster_changes",
consts.TASK_NAMES.node_deletion: "cluster_changes",
@ -406,3 +409,30 @@ class TaskHelper(object):
return None
white_list = task_output_white_list[al.action_name]
return sanitize_sub_tree(task_output, white_list)
@classmethod
def create_action_log(cls, task):
try:
create_kwargs = cls.prepare_action_log_kwargs(task)
return ActionLog.create(create_kwargs)
except Exception as e:
logger.error("create_action_log failed: %s", six.text_type(e))
@classmethod
def update_action_log(cls, task, al_instance=None):
try:
if not al_instance:
al_instance = ActionLog.get_by_task_uuid(task.uuid)
if al_instance:
update_data = {
"end_timestamp": datetime.datetime.utcnow(),
"additional_info": {
"ended_with_status": task.status,
"message": task.message,
"output": cls.sanitize_task_output(task.cache,
al_instance)
}
}
ActionLog.update(al_instance, update_data)
except Exception as e:
logger.error("update_action_log failed: %s", six.text_type(e))

View File

@ -14,8 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import six
import traceback
from nailgun.objects.serializers.network_configuration \
@ -45,32 +43,13 @@ class TaskManager(object):
if cluster_id:
self.cluster = db().query(Cluster).get(cluster_id)
def create_action_log(self, task_instance):
create_kwargs = TaskHelper.prepare_action_log_kwargs(task_instance)
return objects.ActionLog.create(create_kwargs)
def update_action_log(self, task, task_output, al_instance):
try:
update_data = {
"end_timestamp": datetime.datetime.utcnow(),
"additional_info": {
"ended_with_status": task.status,
"message": task.message,
"output": task_output
}
}
objects.ActionLog.update(al_instance, update_data)
except Exception as e:
logger.error("update_action_log failed: ", six.text_type(e))
def _call_silently(self, task, instance, *args, **kwargs):
# create action_log for task
al = self.create_action_log(task)
al = TaskHelper.create_action_log(task)
method = getattr(instance, kwargs.pop('method_name', 'execute'))
if task.status == TASK_STATUSES.error:
self.update_action_log(task, None, al)
TaskHelper.update_action_log(task, al)
return
try:
to_return = method(task, *args, **kwargs)
@ -78,11 +57,7 @@ class TaskManager(object):
# update action_log instance for task
# for asynchronous task it will be not final update
# as they also are updated in rpc receiver
self.update_action_log(
task,
TaskHelper.sanitize_task_output(to_return, al),
al
)
TaskHelper.update_action_log(task, al)
return to_return
except Exception as exc:
@ -99,7 +74,7 @@ class TaskManager(object):
'message': err}
objects.Task.update(task, data)
self.update_action_log(task, None, al)
TaskHelper.update_action_log(task, al)
def check_running_task(self, task_name):
current_tasks = db().query(Task).filter_by(
@ -183,6 +158,7 @@ class ApplyChangesTaskManager(TaskManager):
# we should have task committed for processing in other threads
db().commit()
TaskHelper.create_action_log(supertask)
# Run validation if user didn't redefine
# provisioning and deployment information
@ -612,8 +588,6 @@ class UpdateEnvironmentTaskManager(TaskManager):
db().refresh(task_update)
task_update.cache = deployment_message
for node in nodes_to_change:
node.status = 'deploying'
node.progress = 0

View File

@ -46,13 +46,16 @@ from nailgun.task.helpers import TaskHelper
from nailgun.utils.zabbix import ZabbixManager
def make_astute_message(method, respond_to, args):
return {
def make_astute_message(task, method, respond_to, args):
message = {
'api_version': settings.VERSION['api'],
'method': method,
'respond_to': respond_to,
'args': args
}
message['args']['task_uuid'] = task.uuid
task.cache = message
return message
def fake_cast(queue, messages, **kwargs):
@ -146,18 +149,19 @@ class DeploymentTask(object):
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
db().commit()
return make_astute_message(
rpc_message = make_astute_message(
task,
'deploy',
'deploy_resp',
{
'task_uuid': task.uuid,
'deployment_info': serialized_cluster,
'pre_deployment': pre_deployment,
'post_deployment': post_deployment
}
)
db().commit()
return rpc_message
class UpdateTask(object):
@ -180,16 +184,17 @@ class UpdateTask(object):
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
db().commit()
return make_astute_message(
rpc_message = make_astute_message(
task,
'deploy',
'deploy_resp',
{
'task_uuid': task.uuid,
'deployment_info': serialized_cluster
}
)
db().commit()
return rpc_message
class ProvisionTask(object):
@ -215,16 +220,17 @@ class ProvisionTask(object):
).get_admin_network_group_id(node.id)
TaskHelper.prepare_syslog_dir(node, admin_net_id)
db().commit()
return make_astute_message(
rpc_message = make_astute_message(
task,
'provision',
'provision_resp',
{
'task_uuid': task.uuid,
'provisioning_info': serialized_cluster
}
)
db().commit()
return rpc_message
class DeletionTask(object):
@ -321,10 +327,10 @@ class DeletionTask(object):
db().commit()
msg_delete = make_astute_message(
task,
'remove_nodes',
respond_to,
{
'task_uuid': task.uuid,
'nodes': nodes_to_delete,
'engine': {
'url': settings.COBBLER_URL,
@ -334,6 +340,7 @@ class DeletionTask(object):
}
}
)
db().flush()
# only fake tasks
if USE_FAKE and nodes_to_restore:
msg_delete['args']['nodes_to_restore'] = nodes_to_restore
@ -351,11 +358,11 @@ class StopDeploymentTask(object):
).filter(
not_(Node.status == 'ready')
).yield_per(100)
return make_astute_message(
rpc_message = make_astute_message(
task,
"stop_deploy_task",
"stop_deployment_resp",
{
"task_uuid": task.uuid,
"stop_task_uuid": stop_task.uuid,
"nodes": [
{
@ -375,6 +382,8 @@ class StopDeploymentTask(object):
}
}
)
db().commit()
return rpc_message
@classmethod
def execute(cls, task, deploy_task, provision_task):
@ -399,11 +408,11 @@ class ResetEnvironmentTask(object):
nodes_to_reset = db().query(Node).filter(
Node.cluster_id == task.cluster.id
).yield_per(100)
return make_astute_message(
rpc_message = make_astute_message(
task,
"reset_environment",
"reset_environment_resp",
{
"task_uuid": task.uuid,
"nodes": [
{
'uid': n.uid,
@ -419,6 +428,8 @@ class ResetEnvironmentTask(object):
}
}
)
db().commit()
return rpc_message
@classmethod
def execute(cls, task):
@ -476,14 +487,13 @@ class BaseNetworkVerification(object):
def get_message(self):
nodes = self.get_message_body()
message = make_astute_message(
self.task,
self.task.name,
'{0}_resp'.format(self.task.name),
{
'task_uuid': self.task.uuid,
'nodes': nodes
}
)
self.task.cache = message
return message
def execute(self, task=None):
@ -776,15 +786,13 @@ class DumpTask(object):
def execute(cls, task):
logger.debug("DumpTask: task={0}".format(task.uuid))
message = make_astute_message(
task,
'dump_environment',
'dump_environment_resp',
{
'task_uuid': task.uuid,
'settings': cls.conf()
}
)
task.cache = message
db().add(task)
db().commit()
rpc.cast('naily', message)
@ -835,5 +843,4 @@ class GenerateCapacityLogTask(object):
task.result = {'log_id': capacity_log.id}
task.status = 'ready'
task.progress = '100'
db().add(task)
db().commit()

View File

@ -37,6 +37,8 @@ from webtest import app
import nailgun
from nailgun.api.v1.urls import urls
from nailgun import consts
from nailgun.db import db
from nailgun.db import flush
from nailgun.db import syncdb
@ -609,6 +611,48 @@ class Environment(object):
"Nothing to reset - try creating cluster"
)
def delete_environment(self, expect_http=202):
if self.clusters:
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'obj_id': self.clusters[0].id}),
expect_errors=True,
headers=self.default_headers)
self.tester.assertEqual(resp.status_code, expect_http)
if not str(expect_http).startswith("2"):
return resp.body
return self.db.query(Task).filter_by(
name=consts.TASK_NAMES.cluster_deletion
).first()
else:
raise NotImplementedError(
"Nothing to delete - try creating cluster"
)
def update_environment(self, pending_release_id=None, expect_http=202):
if self.clusters:
if not pending_release_id:
pending_release_id = self.clusters[0].release_id
self.clusters[0].pending_release_id = pending_release_id
self.db.commit()
resp = self.app.put(
reverse(
'ClusterUpdateHandler',
kwargs={'cluster_id': self.clusters[0].id}),
expect_errors=True,
headers=self.default_headers)
self.tester.assertEqual(expect_http, resp.status_code)
if not str(expect_http).startswith("2"):
return resp.body
return self.db.query(Task).filter_by(
name=consts.TASK_NAMES.update
).first()
else:
raise NotImplementedError(
"Nothing to update - try creating cluster"
)
def launch_verify_networks(self, data=None):
if self.clusters:
net_urls = {

View File

@ -16,7 +16,6 @@
import datetime
import random
import six
import uuid
from nailgun.db.sqlalchemy.models import Attributes
@ -881,7 +880,10 @@ class TestConsumer(BaseIntegrationTest):
# check that action_log entry was updated
# in receiver's methods' code
self.assertIsNotNone(al.end_timestamp)
self.assertIn('nodes_from_resp', six.iterkeys(al.additional_info))
self.assertIn('nodes_from_resp',
al.additional_info)
self.assertEqual(set(al.additional_info['nodes_from_resp']),
set(kwargs['node_ids']))
self.assertEqual(kwargs['task_status'],
al.additional_info.get('ended_with_status'))

View File

@ -0,0 +1,204 @@
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
from nailgun.test.base import reverse
from nailgun import consts
from nailgun.statistics.params_white_lists import task_output_white_list
from nailgun.task.helpers import TaskHelper
class TestTasksLogging(BaseIntegrationTest):
def tearDown(self):
self._wait_for_threads()
super(TestTasksLogging, self).tearDown()
def check_keys_included(self, keys, data):
"""Check that only values with keys from 'keys' are present in 'data'
"""
if isinstance(data, list):
for d in data:
self.check_keys_included(keys, d)
elif isinstance(data, dict):
for k in data:
if k in keys:
self.check_keys_included(keys[k], data[k])
elif "*" in keys:
self.check_keys_included(keys["*"], data[k])
else:
self.fail("key {0} is not present in {1}".format(k, keys))
else:
self.assertIn(keys, ("", {}))
def check_task_name_and_sanitized_data(self, pos, logger, task_name,
is_supertask=False):
"""Test task name against known value and check sanitized data doesn't
contain keys which are absent in white_list.
:param pos: position of call parameters inside logger.call_args_list,
(negative value: -1 - last call, -2 - pre-last call, etc.)
:param logger: mock object for logger method
:param task_name: expected task name
:param is_supertask: whether given task must be a supertask or not
"""
log_args = logger.call_args_list
task = log_args[pos][0][0]
self.assertEqual(task.name, task_name)
if len(log_args[pos][0]) == 2:
log_record = log_args[pos][0][1]
if task_name in task_output_white_list:
self.check_keys_included(
task_output_white_list[task_name],
TaskHelper.sanitize_task_output(task.cache, log_record))
else:
self.assertIsNone(
TaskHelper.sanitize_task_output(task.cache, log_record))
else:
# supertask
self.assertTrue(is_supertask)
self.assertEqual(task.cache, {})
@fake_tasks(god_mode=True)
@patch.object(TaskHelper, 'update_action_log')
def test_deployment_task_logging(self, logger):
self.env.create(
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': 'gre'
},
nodes_kwargs=[
{"pending_addition": True, "pending_roles": ["controller"]},
{"pending_addition": True, "pending_roles": ["cinder"]},
{"pending_addition": True, "pending_roles": ["compute"]},
]
)
supertask = self.env.launch_deployment()
self.assertEqual(len(logger.call_args_list), 4)
self.check_task_name_and_sanitized_data(
-4, logger, consts.TASK_NAMES.check_networks)
self.check_task_name_and_sanitized_data(
-3, logger, consts.TASK_NAMES.check_before_deployment)
self.check_task_name_and_sanitized_data(
-2, logger, consts.TASK_NAMES.provision)
self.check_task_name_and_sanitized_data(
-1, logger, consts.TASK_NAMES.deployment)
self.env.wait_ready(supertask, 15)
# call for 'deploy' is added
self.assertEqual(len(logger.call_args_list), 5)
self.check_task_name_and_sanitized_data(
-1, logger, consts.TASK_NAMES.deploy, is_supertask=True)
@fake_tasks(god_mode=True)
@patch.object(TaskHelper, 'update_action_log')
def test_delete_task_logging(self, logger):
self.env.create(
nodes_kwargs=[
{"roles": ["controller"]},
{"roles": ["cinder"]},
{"roles": ["compute"]},
]
)
self.env.delete_environment()
self.assertGreaterEqual(len(logger.call_args_list), 1)
self.check_task_name_and_sanitized_data(
-1, logger, consts.TASK_NAMES.cluster_deletion)
@fake_tasks(god_mode=True)
@patch.object(TaskHelper, 'update_action_log')
def test_reset_task_logging(self, logger):
self.env.create(
nodes_kwargs=[
{"roles": ["controller"]},
{"roles": ["cinder"]},
{"roles": ["compute"]},
]
)
self.env.reset_environment()
self.assertGreaterEqual(len(logger.call_args_list), 1)
self.check_task_name_and_sanitized_data(
-1, logger, consts.TASK_NAMES.reset_environment)
@fake_tasks(god_mode=True, recover_nodes=False)
@patch.object(TaskHelper, 'update_action_log')
def test_stop_task_logging(self, logger):
self.env.create(
nodes_kwargs=[
{"pending_addition": True, "pending_roles": ["controller"]},
{"pending_addition": True, "pending_roles": ["cinder"]},
{"pending_addition": True, "pending_roles": ["compute"]},
]
)
self.env.launch_deployment()
self.env.stop_deployment()
self.assertGreaterEqual(len(logger.call_args_list), 1)
self.check_task_name_and_sanitized_data(
-1, logger, consts.TASK_NAMES.stop_deployment)
@fake_tasks(god_mode=True)
@patch.object(TaskHelper, 'update_action_log')
def test_update_task_logging(self, logger):
self.env.create(
nodes_kwargs=[
{"roles": ["controller"], "status": "ready"},
{"roles": ["cinder"], "status": "ready"},
{"roles": ["compute"], "status": "ready"},
]
)
self.env.update_environment()
self.assertGreaterEqual(len(logger.call_args_list), 1)
self.check_task_name_and_sanitized_data(
-1, logger, consts.TASK_NAMES.update)
@fake_tasks(god_mode=True)
@patch.object(TaskHelper, 'update_action_log')
def test_dump_task_logging(self, logger):
resp = self.app.put(
reverse('LogPackageHandler'), headers=self.default_headers
)
self.assertEqual(resp.status_code, 202)
self.assertGreaterEqual(len(logger.call_args_list), 1)
self.check_task_name_and_sanitized_data(
-1, logger, consts.TASK_NAMES.dump)
@fake_tasks(god_mode=True)
@patch.object(TaskHelper, 'update_action_log')
def test_verify_task_logging(self, logger):
self.env.create(
nodes_kwargs=[
{"pending_addition": True, "pending_roles": ["controller"]},
{"pending_addition": True, "pending_roles": ["cinder"]},
{"pending_addition": True, "pending_roles": ["compute"]},
]
)
self.env.launch_verify_networks()
self.assertGreaterEqual(len(logger.call_args_list), 1)
self.check_task_name_and_sanitized_data(
-1, logger, consts.TASK_NAMES.verify_networks)

View File

@ -121,6 +121,7 @@ class TestTaskManagers(BaseIntegrationTest):
self.assertIsNotNone(action_log.end_timestamp)
self.assertIn("ended_with_status", action_log.additional_info)
self.assertIn("message", action_log.additional_info)
self.assertIn("output", action_log.additional_info)
def test_check_before_deployment_with_error(self):
self.env.create(
@ -133,25 +134,36 @@ class TestTaskManagers(BaseIntegrationTest):
action_logs = objects.ActionLogCollection.all()
self.assertEqual(action_logs.count(), 3)
for al in action_logs:
self.assertEqual(al.action_type, ACTION_TYPES.nailgun_task)
self.assertEqual(al.additional_info["parent_task_id"],
supertask.id)
self.assertIsNotNone(al.end_timestamp)
self.assertIn("ended_with_status", al.additional_info)
self.assertIn("message", al.additional_info)
if al.additional_info["operation"] == TASK_NAMES.deploy:
self.assertIsNone(al.additional_info["parent_task_id"])
self.assertEqual(al.task_uuid, supertask.uuid)
else:
self.assertIsNotNone(al.end_timestamp)
self.assertIn("ended_with_status", al.additional_info)
self.assertIn("message", al.additional_info)
self.assertIn("output", al.additional_info)
if al.additional_info["operation"] == TASK_NAMES.check_networks:
# check_networks task is not updated to "ready" status in case
# of success but left with "running" value
self.assertEqual(al.additional_info["ended_with_status"],
TASK_STATUSES.running)
elif (
al.additional_info["operation"] ==
TASK_NAMES.check_before_deployment
):
self.assertEqual(al.additional_info["ended_with_status"],
TASK_STATUSES.error)
if (
al.additional_info["operation"] ==
TASK_NAMES.check_networks
):
# check_networks task is not updated to "ready" status in
# case of success but left with "running" value
self.assertEqual(al.additional_info["ended_with_status"],
TASK_STATUSES.running)
self.assertEqual(al.additional_info["parent_task_id"],
supertask.id)
elif (
al.additional_info["operation"] ==
TASK_NAMES.check_before_deployment
):
self.assertEqual(al.additional_info["ended_with_status"],
TASK_STATUSES.error)
self.assertEqual(al.additional_info["parent_task_id"],
supertask.id)
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')
@ -341,7 +353,7 @@ class TestTaskManagers(BaseIntegrationTest):
tasks = self.db.query(Task).all()
self.assertEqual(tasks, [])
@fake_tasks()
@fake_tasks(recover_nodes=False)
def test_deletion_during_deployment(self):
self.env.create(
nodes_kwargs=[

View File

@ -20,14 +20,12 @@ import requests
import urllib3
from nailgun.test.base import BaseTestCase
from nailgun.test.base import fake_tasks
from nailgun import consts
from nailgun.objects import Cluster
from nailgun.objects import ReleaseCollection
from nailgun.settings import settings
from nailgun.statistics.installation_info import InstallationInfo
from nailgun.statistics.params_white_lists import task_output_white_list
from nailgun.statistics.statsenderd import StatsSender
FEATURE_MIRANTIS = {'feature_groups': ['mirantis']}
@ -344,46 +342,3 @@ class TestStatisticsSender(BaseTestCase):
)
log_error.assert_called_once_with(
"Sending data to collector failed: %s", "custom")
class TestTasksLogging(BaseTestCase):
def check_keys_included(self, keys, data):
"""Check that only values with keys from 'keys' are present in 'data'
"""
if isinstance(data, list):
for d in data:
self.check_keys_included(keys, d)
elif isinstance(data, dict):
for k in data:
if k in keys:
self.check_keys_included(keys[k], data[k])
elif "*" in keys:
self.check_keys_included(keys["*"], data[k])
else:
self.fail("key {0} is not present in {1}".format(k, keys))
else:
self.assertEqual("", keys)
@fake_tasks(god_mode=True)
@patch('nailgun.task.manager.TaskManager.update_action_log')
def test_deployment_task_logging(self, logger):
self.env.create(
nodes_kwargs=[
{"pending_addition": True, "pending_roles": ["controller"]},
{"pending_addition": True, "pending_roles": ["cinder"]},
{"pending_addition": True, "pending_roles": ["compute"]},
]
)
supertask = self.env.launch_deployment()
self.env.wait_ready(supertask, 15)
log_args = logger.call_args_list
self.assertGreaterEqual(len(log_args), 2)
provision_args = log_args[-2][0][1]
deployment_args = log_args[-1][0][1]
self.check_keys_included(
task_output_white_list[consts.TASK_NAMES.provision],
provision_args)
self.check_keys_included(
task_output_white_list[consts.TASK_NAMES.deployment],
deployment_args)