Unified task returning procedure
Change-Id: Ic15d88a4513452214b488f0b3de79a18e1e4cdaf Closes-Bug: #1427536
This commit is contained in:
parent
4813ce2b3f
commit
2de3806128
|
@ -26,6 +26,7 @@ import web
|
|||
from nailgun.api.v1.validators.base import BaseDefferedTaskValidator
|
||||
from nailgun.api.v1.validators.base import BasicValidator
|
||||
from nailgun.api.v1.validators.graph import GraphTasksValidator
|
||||
from nailgun import consts
|
||||
from nailgun.db import db
|
||||
from nailgun.errors import errors
|
||||
from nailgun.logger import logger
|
||||
|
@ -234,6 +235,15 @@ class BaseHandler(object):
|
|||
|
||||
return list(node_query)
|
||||
|
||||
def raise_task(self, task):
|
||||
if task.status in [consts.TASK_STATUSES.ready,
|
||||
consts.TASK_STATUSES.error]:
|
||||
status = 200
|
||||
else:
|
||||
status = 202
|
||||
|
||||
raise self.http(status, objects.Task.to_json(task))
|
||||
|
||||
|
||||
def content_json(func, cls, *args, **kwargs):
|
||||
json_resp = lambda data: (
|
||||
|
@ -551,7 +561,7 @@ class DeferredTaskHandler(BaseHandler):
|
|||
# let it be 500
|
||||
raise
|
||||
|
||||
raise self.http(202, self.single.to_json(task))
|
||||
self.raise_task(task)
|
||||
|
||||
|
||||
class DeploymentTasksHandler(SingleHandler):
|
||||
|
|
|
@ -90,12 +90,16 @@ class CapacityLogHandler(BaseHandler):
|
|||
"""Starts capacity data generation.
|
||||
|
||||
:returns: JSONized Task object.
|
||||
:http: * 202 (setup task created and started)
|
||||
:http: * 200 (setup task successfully executed)
|
||||
* 202 (setup task created and started)
|
||||
* 400 (data validation failed)
|
||||
* 404 (cluster not found in db)
|
||||
"""
|
||||
# TODO(pkaminski): this seems to be synchronous, no task needed here
|
||||
manager = GenerateCapacityLogTaskManager()
|
||||
task = manager.execute()
|
||||
|
||||
raise self.http(202, objects.Task.to_json(task))
|
||||
self.raise_task(task)
|
||||
|
||||
|
||||
class CapacityLogCsvHandler(BaseHandler):
|
||||
|
|
|
@ -383,7 +383,8 @@ class LogPackageHandler(BaseHandler):
|
|||
def PUT(self):
|
||||
""":returns: JSONized Task object.
|
||||
:http: * 200 (task successfully executed)
|
||||
* 400 (failed to execute task)
|
||||
* 400 (data validation failed)
|
||||
* 404 (cluster not found in db)
|
||||
"""
|
||||
try:
|
||||
conf = jsonutils.loads(web.data()) if web.data() else None
|
||||
|
@ -393,7 +394,8 @@ class LogPackageHandler(BaseHandler):
|
|||
logger.warn(u'DumpTask: error while execution '
|
||||
'dump environment task: {0}'.format(str(exc)))
|
||||
raise self.http(400, str(exc))
|
||||
raise self.http(202, objects.Task.to_json(task))
|
||||
|
||||
self.raise_task(task)
|
||||
|
||||
|
||||
class LogPackageDefaultConfig(BaseHandler):
|
||||
|
|
|
@ -85,9 +85,12 @@ class NovaNetworkConfigurationHandler(ProviderHandler):
|
|||
@content
|
||||
def PUT(self, cluster_id):
|
||||
""":returns: JSONized Task object.
|
||||
:http: * 202 (network checking task created)
|
||||
:http: * 200 (task successfully executed)
|
||||
* 202 (network checking task scheduled for execution)
|
||||
* 400 (data validation failed)
|
||||
* 404 (cluster not found in db)
|
||||
"""
|
||||
# TODO(pkaminski): this seems to be synchronous, no task needed here
|
||||
data = jsonutils.loads(web.data())
|
||||
if data.get("networks"):
|
||||
data["networks"] = [
|
||||
|
@ -126,7 +129,7 @@ class NovaNetworkConfigurationHandler(ProviderHandler):
|
|||
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
raise self.http(200, objects.Task.to_json(task))
|
||||
self.raise_task(task)
|
||||
|
||||
|
||||
class NeutronNetworkConfigurationHandler(ProviderHandler):
|
||||
|
@ -149,6 +152,13 @@ class NeutronNetworkConfigurationHandler(ProviderHandler):
|
|||
|
||||
@content
|
||||
def PUT(self, cluster_id):
|
||||
""":returns: JSONized Task object.
|
||||
:http: * 200 (task successfully executed)
|
||||
* 202 (network checking task scheduled for execution)
|
||||
* 400 (data validation failed)
|
||||
* 404 (cluster not found in db)
|
||||
"""
|
||||
# TODO(pkaminski): this seems to be synchronous, no task needed here
|
||||
data = jsonutils.loads(web.data())
|
||||
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
|
||||
self.check_net_provider(cluster)
|
||||
|
@ -182,7 +192,7 @@ class NeutronNetworkConfigurationHandler(ProviderHandler):
|
|||
objects.Task.update(task, data)
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
raise self.http(200, objects.Task.to_json(task))
|
||||
self.raise_task(task)
|
||||
|
||||
|
||||
class NetworkConfigurationVerifyHandler(ProviderHandler):
|
||||
|
@ -194,13 +204,14 @@ class NetworkConfigurationVerifyHandler(ProviderHandler):
|
|||
""":IMPORTANT: this method should be rewritten to be more RESTful
|
||||
|
||||
:returns: JSONized Task object.
|
||||
:http: * 202 (network checking task failed)
|
||||
* 200 (network verification task started)
|
||||
:http: * 200 (network verification task finished/has error)
|
||||
* 202 (network verification task running)
|
||||
* 400 (data validation failed)
|
||||
* 404 (cluster not found in db)
|
||||
"""
|
||||
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
|
||||
self.check_net_provider(cluster)
|
||||
raise self.http(202, self.launch_verify(cluster))
|
||||
self.launch_verify(cluster)
|
||||
|
||||
def launch_verify(self, cluster):
|
||||
data = self.validator.validate_networks_update(web.data())
|
||||
|
@ -222,7 +233,8 @@ class NetworkConfigurationVerifyHandler(ProviderHandler):
|
|||
task = task_manager.execute(data, vlan_ids)
|
||||
except errors.CantRemoveOldVerificationTask:
|
||||
raise self.http(400, "You cannot delete running task manually")
|
||||
return objects.Task.to_json(task)
|
||||
|
||||
self.raise_task(task)
|
||||
|
||||
|
||||
class NovaNetworkConfigurationVerifyHandler(NetworkConfigurationVerifyHandler):
|
||||
|
|
|
@ -54,15 +54,17 @@ class NodeHandler(SingleHandler):
|
|||
"""Deletes a node from DB and from Cobbler.
|
||||
|
||||
:return: JSON-ed deletion task
|
||||
:http: * 202 (nodes are successfully scheduled for deletion)
|
||||
* 404 (invalid node id specified)
|
||||
:http: * 200 (node has been succesfully deleted)
|
||||
* 202 (node is successfully scheduled for deletion)
|
||||
* 400 (data validation failed)
|
||||
* 404 (node not found in db)
|
||||
"""
|
||||
|
||||
node = self.get_object_or_404(self.single, obj_id)
|
||||
task_manager = NodeDeletionTaskManager(cluster_id=node.cluster_id)
|
||||
task = task_manager.execute([node], mclient_remove=False)
|
||||
|
||||
raise self.http(202, objects.Task.to_json(task))
|
||||
self.raise_task(task)
|
||||
|
||||
|
||||
class NodeCollectionHandler(CollectionHandler):
|
||||
|
@ -100,7 +102,7 @@ class NodeCollectionHandler(CollectionHandler):
|
|||
def PUT(self):
|
||||
""":returns: Collection of JSONized Node objects.
|
||||
:http: * 200 (nodes are successfully updated)
|
||||
* 400 (invalid nodes data specified)
|
||||
* 400 (data validation failed)
|
||||
"""
|
||||
data = self.checked_data(
|
||||
self.validator.validate_collection_update
|
||||
|
@ -130,8 +132,10 @@ class NodeCollectionHandler(CollectionHandler):
|
|||
Takes (JSONed) list of node ids to delete.
|
||||
|
||||
:return: JSON-ed deletion task
|
||||
:http: * 202 (nodes are successfully scheduled for deletion)
|
||||
* 400 (invalid nodes data specified)
|
||||
:http: * 200 (nodes have been succesfully deleted)
|
||||
* 202 (nodes are successfully scheduled for deletion)
|
||||
* 400 (data validation failed)
|
||||
* 404 (nodes not found in db)
|
||||
"""
|
||||
# TODO(pkaminski): web.py does not support parsing of array arguments
|
||||
# in the queryset so we specify the input as comma-separated list
|
||||
|
@ -145,7 +149,7 @@ class NodeCollectionHandler(CollectionHandler):
|
|||
task_manager = NodeDeletionTaskManager(cluster_id=nodes[0].cluster_id)
|
||||
task = task_manager.execute(nodes)
|
||||
|
||||
raise self.http(202, objects.Task.to_json(task))
|
||||
self.raise_task(task)
|
||||
|
||||
|
||||
class NodeAgentHandler(BaseHandler):
|
||||
|
@ -158,7 +162,7 @@ class NodeAgentHandler(BaseHandler):
|
|||
""":returns: node id.
|
||||
:http: * 200 (node are successfully updated)
|
||||
* 304 (node data not changed since last request)
|
||||
* 400 (invalid nodes data specified)
|
||||
* 400 (data validation failed)
|
||||
* 404 (node not found)
|
||||
"""
|
||||
nd = self.checked_data(
|
||||
|
@ -209,7 +213,7 @@ class NodeNICsHandler(BaseHandler):
|
|||
def PUT(self, node_id):
|
||||
""":returns: Collection of JSONized Node objects.
|
||||
:http: * 200 (nodes are successfully updated)
|
||||
* 400 (invalid nodes data specified)
|
||||
* 400 (data validation failed)
|
||||
"""
|
||||
interfaces_data = self.checked_data(
|
||||
self.validator.validate_structure_and_data, node_id=node_id)
|
||||
|
@ -236,7 +240,7 @@ class NodeCollectionNICsHandler(BaseHandler):
|
|||
def PUT(self):
|
||||
""":returns: Collection of JSONized Node objects.
|
||||
:http: * 200 (nodes are successfully updated)
|
||||
* 400 (invalid nodes data specified)
|
||||
* 400 (data validation failed)
|
||||
"""
|
||||
data = self.checked_data(
|
||||
self.validator.validate_collection_structure_and_data)
|
||||
|
|
|
@ -211,14 +211,15 @@ class SelectedNodesBase(NodesFilterMixin, BaseHandler):
|
|||
task_manager.__class__.__name__, traceback.format_exc())
|
||||
raise self.http(400, message=six.text_type(exc))
|
||||
|
||||
raise self.http(202, objects.Task.to_json(task))
|
||||
self.raise_task(task)
|
||||
|
||||
@content
|
||||
def PUT(self, cluster_id):
|
||||
""":returns: JSONized Task object.
|
||||
:http: * 200 (task successfully executed)
|
||||
* 202 (task scheduled for execution)
|
||||
* 400 (data validation failed)
|
||||
* 404 (cluster or nodes not found in db)
|
||||
* 400 (failed to execute task)
|
||||
"""
|
||||
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
|
||||
return self.handle_task(cluster)
|
||||
|
@ -237,8 +238,9 @@ class ProvisionSelectedNodes(SelectedNodesBase):
|
|||
def PUT(self, cluster_id):
|
||||
""":returns: JSONized Task object.
|
||||
:http: * 200 (task successfully executed)
|
||||
* 202 (task scheduled for execution)
|
||||
* 400 (data validation failed)
|
||||
* 404 (cluster or nodes not found in db)
|
||||
* 400 (failed to execute task)
|
||||
"""
|
||||
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
|
||||
|
||||
|
@ -271,8 +273,9 @@ class DeploySelectedNodes(BaseDeploySelectedNodes):
|
|||
def PUT(self, cluster_id):
|
||||
""":returns: JSONized Task object.
|
||||
:http: * 200 (task successfully executed)
|
||||
* 202 (task scheduled for execution)
|
||||
* 400 (data validation failed)
|
||||
* 404 (cluster or nodes not found in db)
|
||||
* 400 (failed to execute task)
|
||||
"""
|
||||
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
|
||||
return self.handle_task(cluster)
|
||||
|
@ -286,8 +289,9 @@ class DeploySelectedNodesWithTasks(BaseDeploySelectedNodes):
|
|||
def PUT(self, cluster_id):
|
||||
""":returns: JSONized Task object.
|
||||
:http: * 200 (task successfully executed)
|
||||
* 202 (task scheduled for execution)
|
||||
* 400 (data validation failed)
|
||||
* 404 (cluster or nodes not found in db)
|
||||
* 400 (failed to execute task)
|
||||
"""
|
||||
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
|
||||
data = self.checked_data(
|
||||
|
|
|
@ -694,6 +694,7 @@ class VerifyNetworksTaskManager(TaskManager):
|
|||
)
|
||||
verification_tasks = list(verification_tasks)
|
||||
|
||||
# TODO(pkaminski): this code shouldn't be required at all
|
||||
if verification_tasks:
|
||||
ver_task = verification_tasks[0]
|
||||
if ver_task.status == consts.TASK_STATUSES.running:
|
||||
|
|
|
@ -600,7 +600,6 @@ class EnvironmentManager(object):
|
|||
kwargs={'cluster_id': self.clusters[0].id}),
|
||||
headers=self.default_headers)
|
||||
|
||||
self.tester.assertEqual(202, resp.status_code)
|
||||
return self.db.query(Task).filter_by(
|
||||
uuid=resp.json_body['uuid']
|
||||
).first()
|
||||
|
@ -609,7 +608,7 @@ class EnvironmentManager(object):
|
|||
"Nothing to deploy - try creating cluster"
|
||||
)
|
||||
|
||||
def stop_deployment(self, expect_http=202):
|
||||
def stop_deployment(self):
|
||||
if self.clusters:
|
||||
resp = self.app.put(
|
||||
reverse(
|
||||
|
@ -617,9 +616,7 @@ class EnvironmentManager(object):
|
|||
kwargs={'cluster_id': self.clusters[0].id}),
|
||||
expect_errors=True,
|
||||
headers=self.default_headers)
|
||||
self.tester.assertEqual(expect_http, resp.status_code)
|
||||
if not str(expect_http).startswith("2"):
|
||||
return resp.body
|
||||
|
||||
return self.db.query(Task).filter_by(
|
||||
uuid=resp.json_body['uuid']
|
||||
).first()
|
||||
|
@ -722,7 +719,6 @@ class EnvironmentManager(object):
|
|||
nets,
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.tester.assertEqual(202, resp.status_code)
|
||||
task_uuid = resp.json_body['uuid']
|
||||
return self.db.query(Task).filter_by(uuid=task_uuid).first()
|
||||
else:
|
||||
|
|
|
@ -19,6 +19,7 @@ from hashlib import md5
|
|||
from mock import patch
|
||||
from StringIO import StringIO
|
||||
|
||||
from nailgun import consts
|
||||
from nailgun.db.sqlalchemy.models import Task
|
||||
from nailgun.test.base import BaseIntegrationTest
|
||||
from nailgun.test.base import fake_tasks
|
||||
|
@ -30,10 +31,11 @@ class TestHandlers(BaseIntegrationTest):
|
|||
resp = self.app.put(
|
||||
reverse('CapacityLogHandler'),
|
||||
headers=self.default_headers)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
self.assertEqual(resp.json_body['status'], consts.TASK_STATUSES.ready)
|
||||
|
||||
capacity_task = self.db.query(Task).filter_by(
|
||||
name="capacity_log"
|
||||
name=consts.TASK_NAMES.capacity_log
|
||||
).first()
|
||||
self.env.wait_ready(capacity_task)
|
||||
|
||||
|
@ -127,7 +129,8 @@ class TestHandlers(BaseIntegrationTest):
|
|||
self.assertEqual(test_env['cluster'], 'test_name')
|
||||
self.assertEqual(test_env['nodes'], 6)
|
||||
|
||||
@fake_tasks(override_state={"progress": 100, "status": "ready"})
|
||||
@fake_tasks(override_state={"progress": 100,
|
||||
"status": consts.TASK_STATUSES.ready})
|
||||
def test_capacity_csv_log_with_unicode(self):
|
||||
self.env.create(
|
||||
cluster_kwargs={
|
||||
|
|
|
@ -20,6 +20,7 @@ from sqlalchemy.sql import not_
|
|||
|
||||
from nailgun import objects
|
||||
|
||||
from nailgun import consts
|
||||
from nailgun.db.sqlalchemy.models import Cluster
|
||||
from nailgun.db.sqlalchemy.models import NetworkGroup
|
||||
from nailgun.db.sqlalchemy.models import Release
|
||||
|
@ -204,4 +205,4 @@ class TestHandlers(BaseIntegrationTest):
|
|||
|
||||
resp = self.env.nova_networks_put(cluster['id'], nets)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual(resp.json_body['status'], 'ready')
|
||||
self.assertEqual(resp.json_body['status'], consts.TASK_STATUSES.ready)
|
||||
|
|
|
@ -19,6 +19,7 @@ from mock import patch
|
|||
from oslo.serialization import jsonutils
|
||||
from sqlalchemy.sql import not_
|
||||
|
||||
from nailgun import consts
|
||||
from nailgun.db.sqlalchemy.models import Cluster
|
||||
from nailgun.db.sqlalchemy.models import NetworkGroup
|
||||
from nailgun.network.manager import NetworkManager
|
||||
|
@ -101,7 +102,7 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
|||
resp = self.env.nova_networks_put(self.cluster.id, data)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.ready)
|
||||
|
||||
self.db.refresh(self.cluster)
|
||||
mgmt_ng = [ng for ng in self.cluster.network_groups
|
||||
|
@ -138,7 +139,7 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
|||
|
||||
def test_network_group_update_changes_network(self):
|
||||
network = self.db.query(NetworkGroup).filter(
|
||||
not_(NetworkGroup.name == "fuelweb_admin")
|
||||
not_(NetworkGroup.name == consts.NETWORKS.fuelweb_admin)
|
||||
).first()
|
||||
self.assertIsNotNone(network)
|
||||
new_vlan_id = 500 # non-used vlan id
|
||||
|
@ -152,7 +153,7 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
|||
|
||||
def test_update_networks_and_net_manager(self):
|
||||
network = self.db.query(NetworkGroup).filter(
|
||||
not_(NetworkGroup.name == "fuelweb_admin")
|
||||
not_(NetworkGroup.name == consts.NETWORKS.fuelweb_admin)
|
||||
).first()
|
||||
new_vlan_id = 500 # non-used vlan id
|
||||
new_net = {'networking_parameters': {'net_manager': 'VlanManager'},
|
||||
|
@ -174,7 +175,7 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
|||
expect_errors=True)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.error)
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
'Invalid network ID: 500'
|
||||
|
@ -184,7 +185,9 @@ class TestNovaNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
|||
resp = self.env.nova_networks_get(self.cluster.id)
|
||||
data = resp.json_body
|
||||
for net in data['networks']:
|
||||
if net['name'] in ('fuelweb_admin', 'public', 'fixed'):
|
||||
if net['name'] in (consts.NETWORKS.fuelweb_admin,
|
||||
consts.NETWORKS.public,
|
||||
consts.NETWORKS.fixed):
|
||||
self.assertIsNone(net['vlan_start'])
|
||||
else:
|
||||
self.assertIsNotNone(net['vlan_start'])
|
||||
|
@ -300,7 +303,7 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
|||
resp = self.env.neutron_networks_put(self.cluster.id, data)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.ready)
|
||||
|
||||
self.db.refresh(self.cluster)
|
||||
mgmt_ng = [ng for ng in self.cluster.network_groups
|
||||
|
@ -316,7 +319,7 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
|||
expect_errors=True)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.error)
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Change of 'segmentation_type' is prohibited"
|
||||
|
@ -349,7 +352,7 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
|||
expect_errors=True)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.error)
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
"Change of 'segmentation_type' is prohibited"
|
||||
|
@ -364,7 +367,7 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
|||
expect_errors=True)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.error)
|
||||
self.assertEqual(
|
||||
task['message'],
|
||||
'Invalid network ID: 500'
|
||||
|
@ -385,7 +388,7 @@ class TestNeutronNetworkConfigurationHandlerMultinode(BaseIntegrationTest):
|
|||
resp = self.env.neutron_networks_put(self.cluster.id, data)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.ready)
|
||||
|
||||
self.db.refresh(self.cluster)
|
||||
publ_ng = filter(lambda ng: ng.name == 'public',
|
||||
|
@ -480,7 +483,7 @@ class TestAdminNetworkConfiguration(BaseIntegrationTest):
|
|||
expect_errors=True)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.error)
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'check_networks')
|
||||
self.assertIn("Address space intersection between networks:\n"
|
||||
|
@ -490,9 +493,9 @@ class TestAdminNetworkConfiguration(BaseIntegrationTest):
|
|||
def test_deploy_error_when_admin_cidr_match_other_network_cidr(self):
|
||||
resp = self.env.cluster_changes_put(self.cluster['id'],
|
||||
expect_errors=True)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.error)
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'deploy')
|
||||
self.assertIn("Address space intersection between networks:\n"
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
from netaddr import IPAddress
|
||||
from netaddr import IPNetwork
|
||||
|
||||
from nailgun import consts
|
||||
from nailgun.db.sqlalchemy.models import Cluster
|
||||
from nailgun.db.sqlalchemy.models import NetworkGroup
|
||||
from nailgun.db.sqlalchemy.models import NeutronConfig
|
||||
|
@ -52,11 +53,11 @@ class TestNetworkChecking(BaseIntegrationTest):
|
|||
def set_cluster_changes_w_error(self, cluster_id):
|
||||
resp = self.env.cluster_changes_put(cluster_id,
|
||||
expect_errors=True)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.error)
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'deploy')
|
||||
self.assertEqual(task['name'], consts.TASK_NAMES.deploy)
|
||||
self.check_result_format(task, cluster_id)
|
||||
return task
|
||||
|
||||
|
@ -65,9 +66,9 @@ class TestNetworkChecking(BaseIntegrationTest):
|
|||
expect_errors=True)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.error)
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'check_networks')
|
||||
self.assertEqual(task['name'], consts.TASK_NAMES.check_networks)
|
||||
self.check_result_format(task, cluster_id)
|
||||
return task
|
||||
|
||||
|
@ -75,9 +76,9 @@ class TestNetworkChecking(BaseIntegrationTest):
|
|||
resp = self.env.nova_networks_put(cluster_id, nets)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.ready)
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'check_networks')
|
||||
self.assertEqual(task['name'], consts.TASK_NAMES.check_networks)
|
||||
return task
|
||||
|
||||
def update_neutron_networks_w_error(self, cluster_id, nets):
|
||||
|
@ -85,9 +86,9 @@ class TestNetworkChecking(BaseIntegrationTest):
|
|||
expect_errors=True)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'error')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.error)
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'check_networks')
|
||||
self.assertEqual(task['name'], consts.TASK_NAMES.check_networks)
|
||||
self.check_result_format(task, cluster_id)
|
||||
return task
|
||||
|
||||
|
@ -95,9 +96,9 @@ class TestNetworkChecking(BaseIntegrationTest):
|
|||
resp = self.env.neutron_networks_put(cluster_id, nets)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.ready)
|
||||
self.assertEqual(task['progress'], 100)
|
||||
self.assertEqual(task['name'], 'check_networks')
|
||||
self.assertEqual(task['name'], consts.TASK_NAMES.check_networks)
|
||||
return task
|
||||
|
||||
|
||||
|
@ -312,7 +313,7 @@ class TestNovaHandlers(TestNetworkChecking):
|
|||
"256"
|
||||
task = self.update_nova_networks_success(self.cluster.id, self.nets)
|
||||
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.ready)
|
||||
|
||||
def test_network_size_and_amount_not_fit_cidr(self):
|
||||
self.nets['networking_parameters']['net_manager'] = 'VlanManager'
|
||||
|
|
|
@ -67,7 +67,7 @@ class TestHandlers(BaseIntegrationTest):
|
|||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
def test_node_valid_metadata_gets_updated(self):
|
||||
new_metadata = self.env.default_metadata()
|
||||
|
|
|
@ -19,6 +19,7 @@ from netaddr import IPNetwork
|
|||
|
||||
from oslo.serialization import jsonutils
|
||||
|
||||
from nailgun import consts
|
||||
from nailgun.db.sqlalchemy.models import Cluster
|
||||
from nailgun.db.sqlalchemy.models import NetworkNICAssignment
|
||||
from nailgun.test.base import BaseIntegrationTest
|
||||
|
@ -160,7 +161,7 @@ class TestNodeHandlers(BaseIntegrationTest):
|
|||
resp = self.env.nova_networks_put(cluster['id'], nets)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.ready)
|
||||
|
||||
mac = self.env.generate_random_mac()
|
||||
meta = self.env.default_metadata()
|
||||
|
@ -197,7 +198,7 @@ class TestNodeHandlers(BaseIntegrationTest):
|
|||
resp = self.env.nova_networks_put(cluster['id'], nets)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.ready)
|
||||
|
||||
mac = self.env.generate_random_mac()
|
||||
meta = self.env.default_metadata()
|
||||
|
@ -238,7 +239,7 @@ class TestNodeHandlers(BaseIntegrationTest):
|
|||
resp = self.env.neutron_networks_put(cluster['id'], nets)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.ready)
|
||||
|
||||
mac = self.env.generate_random_mac()
|
||||
meta = self.env.default_metadata()
|
||||
|
@ -275,7 +276,7 @@ class TestNodeHandlers(BaseIntegrationTest):
|
|||
resp = self.env.neutron_networks_put(cluster['id'], nets)
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
task = resp.json_body
|
||||
self.assertEqual(task['status'], 'ready')
|
||||
self.assertEqual(task['status'], consts.TASK_STATUSES.ready)
|
||||
|
||||
mac = self.env.generate_random_mac()
|
||||
meta = self.env.default_metadata()
|
||||
|
|
|
@ -45,7 +45,9 @@ class TestPutSameJson(base.BaseIntegrationTest):
|
|||
jsonutils.dumps(data),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.assertEqual(response.status_code, expected_status)
|
||||
if not isinstance(expected_status, list):
|
||||
expected_status = [expected_status]
|
||||
self.assertIn(response.status_code, expected_status)
|
||||
|
||||
def http_get(self, name, arguments):
|
||||
"""Makes a GET request to a resource with `name`.
|
||||
|
@ -100,7 +102,7 @@ class TestPutSameJson(base.BaseIntegrationTest):
|
|||
{
|
||||
'cluster_id': self.cluster.id
|
||||
},
|
||||
cluster_changes, 202
|
||||
cluster_changes, [200, 202]
|
||||
)
|
||||
|
||||
def test_cluster_attributes(self):
|
||||
|
|
|
@ -18,9 +18,7 @@ import copy
|
|||
|
||||
import unittest2
|
||||
|
||||
from nailgun.consts import BOND_MODES
|
||||
from nailgun.consts import CLUSTER_STATUSES
|
||||
from nailgun.consts import NETWORK_INTERFACE_TYPES
|
||||
from nailgun import consts
|
||||
from nailgun.test.base import BaseIntegrationTest
|
||||
from nailgun.test.base import fake_tasks
|
||||
from nailgun.test.base import reverse
|
||||
|
@ -141,7 +139,7 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
|||
|
||||
task = self.env.launch_verify_networks(nets)
|
||||
self.db.refresh(task)
|
||||
self.assertEqual(task.status, "error")
|
||||
self.assertEqual(task.status, consts.TASK_STATUSES.error)
|
||||
error_msg = 'At least two nodes are required to be in ' \
|
||||
'the environment for network verification.'
|
||||
self.assertEqual(task.message, error_msg)
|
||||
|
@ -150,8 +148,8 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
|||
def test_network_verify_when_env_not_ready(self):
|
||||
cluster_db = self.env.clusters[0]
|
||||
blocking_statuses = (
|
||||
CLUSTER_STATUSES.deployment,
|
||||
CLUSTER_STATUSES.update,
|
||||
consts.CLUSTER_STATUSES.deployment,
|
||||
consts.CLUSTER_STATUSES.update,
|
||||
)
|
||||
for status in blocking_statuses:
|
||||
cluster_db.status = status
|
||||
|
@ -169,7 +167,7 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
|||
task = self.env.launch_verify_networks(nets)
|
||||
self.db.refresh(task)
|
||||
|
||||
self.assertEqual(task.status, "error")
|
||||
self.assertEqual(task.status, consts.TASK_STATUSES.error)
|
||||
error_msg = (
|
||||
"Environment is not ready to run network verification "
|
||||
"because it is in '{0}' state.".format(status)
|
||||
|
@ -190,7 +188,7 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
|||
|
||||
self.env.create_task(
|
||||
name="verify_networks",
|
||||
status="running",
|
||||
status=consts.TASK_STATUSES.running,
|
||||
cluster_id=self.env.clusters[0].id
|
||||
)
|
||||
|
||||
|
@ -250,7 +248,7 @@ class TestVerifyNetworksDisabled(BaseIntegrationTest):
|
|||
"name": "eth2",
|
||||
"current_speed": None}])
|
||||
self.env.create(
|
||||
cluster_kwargs={'status': 'operational',
|
||||
cluster_kwargs={'status': consts.CLUSTER_STATUSES.operational,
|
||||
'net_provider': 'neutron',
|
||||
'net_segment_type': 'vlan'},
|
||||
nodes_kwargs=[
|
||||
|
@ -264,10 +262,11 @@ class TestVerifyNetworksDisabled(BaseIntegrationTest):
|
|||
)
|
||||
self.cluster = self.env.clusters[0]
|
||||
|
||||
@unittest2.skip('Fails randomly, bug #1427658')
|
||||
@fake_tasks()
|
||||
def test_network_verification_neutron_with_vlan_segmentation(self):
|
||||
task = self.env.launch_verify_networks()
|
||||
self.assertEqual(task.status, 'running')
|
||||
self.assertEqual(task.status, consts.TASK_STATUSES.running)
|
||||
self.env.wait_ready(task, 30)
|
||||
|
||||
|
||||
|
@ -305,7 +304,7 @@ class TestNetworkVerificationWithBonds(BaseIntegrationTest):
|
|||
for node in self.env.nodes:
|
||||
data, admin_nic, other_nic, empty_nic = self.verify_nics(node)
|
||||
self.env.make_bond_via_api("ovs-bond0",
|
||||
BOND_MODES.balance_slb,
|
||||
consts.BOND_MODES.balance_slb,
|
||||
[other_nic["name"], empty_nic["name"]],
|
||||
node["id"])
|
||||
self.verify_bonds(node)
|
||||
|
@ -333,7 +332,8 @@ class TestNetworkVerificationWithBonds(BaseIntegrationTest):
|
|||
resp = self.env.node_nics_get(node["id"])
|
||||
self.assertEqual(resp.status_code, 200)
|
||||
|
||||
bond = filter(lambda nic: nic["type"] == NETWORK_INTERFACE_TYPES.bond,
|
||||
bond = filter(lambda nic: nic["type"] ==
|
||||
consts.NETWORK_INTERFACE_TYPES.bond,
|
||||
resp.json_body)
|
||||
self.assertEqual(len(bond), 1)
|
||||
self.assertEqual(bond[0]["name"], "ovs-bond0")
|
||||
|
@ -422,7 +422,7 @@ class TestVerifyNeutronVlan(BaseIntegrationTest):
|
|||
self.env.launch_deployment()
|
||||
stop_task = self.env.stop_deployment()
|
||||
self.env.wait_ready(stop_task, 60)
|
||||
self.assertEqual(self.cluster.status, "stopped")
|
||||
self.assertEqual(self.cluster.status, consts.CLUSTER_STATUSES.stopped)
|
||||
verify_task = self.env.launch_verify_networks()
|
||||
self.env.wait_ready(verify_task, 60)
|
||||
|
||||
|
@ -446,7 +446,7 @@ class TestVerifyNeutronVlan(BaseIntegrationTest):
|
|||
|
||||
# check private VLAN range for nodes in Verify parameters
|
||||
task = self.env.launch_verify_networks()
|
||||
self.assertEqual(task.status, 'running')
|
||||
self.assertEqual(task.status, consts.TASK_STATUSES.running)
|
||||
for node in task.cache['args']['nodes']:
|
||||
for net in node['networks']:
|
||||
if net['iface'] == priv_nics[node['uid']]:
|
||||
|
|
|
@ -19,8 +19,8 @@ import shutil
|
|||
import tempfile
|
||||
import time
|
||||
|
||||
from mock import Mock
|
||||
from mock import patch
|
||||
import mock
|
||||
|
||||
from oslo.serialization import jsonutils
|
||||
|
||||
import nailgun
|
||||
|
@ -43,7 +43,7 @@ class TestLogs(BaseIntegrationTest):
|
|||
self.local_log_file = os.path.join(self.log_dir, 'nailgun.log')
|
||||
regexp = (r'^(?P<date>\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}):'
|
||||
'(?P<level>\w+):(?P<text>.+)$')
|
||||
self.patcher = patch.object(
|
||||
self.patcher = mock.patch.object(
|
||||
settings, 'LOGS', [
|
||||
{
|
||||
'id': 'nailgun',
|
||||
|
@ -350,30 +350,30 @@ class TestLogs(BaseIntegrationTest):
|
|||
f.write(self._format_log_entry(log_entry))
|
||||
f.flush()
|
||||
|
||||
@patch.dict('nailgun.task.task.settings.DUMP',
|
||||
{
|
||||
'dump': {
|
||||
'local': {
|
||||
'hosts': [],
|
||||
'objects': [],
|
||||
},
|
||||
'master': {
|
||||
'hosts': [],
|
||||
'objects': [{
|
||||
'type': 'subs',
|
||||
'path': '/var/log/remote',
|
||||
'subs': {}
|
||||
}],
|
||||
},
|
||||
'slave': {
|
||||
'hosts': [],
|
||||
'objects': [],
|
||||
}
|
||||
},
|
||||
'target': '/path/to/save',
|
||||
'lastdump': '/path/to/latest',
|
||||
'timestamp': True,
|
||||
})
|
||||
@mock.patch.dict('nailgun.task.task.settings.DUMP',
|
||||
{
|
||||
'dump': {
|
||||
'local': {
|
||||
'hosts': [],
|
||||
'objects': [],
|
||||
},
|
||||
'master': {
|
||||
'hosts': [],
|
||||
'objects': [{
|
||||
'type': 'subs',
|
||||
'path': '/var/log/remote',
|
||||
'subs': {}
|
||||
}],
|
||||
},
|
||||
'slave': {
|
||||
'hosts': [],
|
||||
'objects': [],
|
||||
}
|
||||
},
|
||||
'target': '/path/to/save',
|
||||
'lastdump': '/path/to/latest',
|
||||
'timestamp': True,
|
||||
})
|
||||
def test_snapshot_conf(self):
|
||||
self.env.create_node(
|
||||
status='ready',
|
||||
|
@ -408,9 +408,10 @@ class TestLogs(BaseIntegrationTest):
|
|||
}
|
||||
self.datadiff(DumpTask.conf(), conf)
|
||||
|
||||
@patch.dict('nailgun.task.task.settings.DUMP', {'lastdump': 'LASTDUMP'})
|
||||
@mock.patch.dict('nailgun.task.task.settings.DUMP',
|
||||
{'lastdump': 'LASTDUMP'})
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@mock.patch('nailgun.rpc.cast')
|
||||
def test_snapshot_cast(self, mocked_rpc):
|
||||
task = self.env.create_task(name='dump')
|
||||
DumpTask.execute(task)
|
||||
|
@ -429,10 +430,10 @@ class TestLogs(BaseIntegrationTest):
|
|||
|
||||
def test_snapshot_task_manager(self):
|
||||
tm = DumpTaskManager()
|
||||
mock = Mock(return_value=None)
|
||||
tm._call_silently = mock
|
||||
m = mock.Mock(return_value=None)
|
||||
tm._call_silently = m
|
||||
task = tm.execute()
|
||||
mock.assert_called_once_with(task, DumpTask, conf=None)
|
||||
m.assert_called_once_with(task, DumpTask, conf=None)
|
||||
|
||||
def test_snapshot_task_manager_already_running(self):
|
||||
self.env.create_task(name="dump")
|
||||
|
@ -440,31 +441,31 @@ class TestLogs(BaseIntegrationTest):
|
|||
self.assertRaises(errors.DumpRunning, tm.execute)
|
||||
|
||||
def test_log_package_handler_ok(self):
|
||||
task = jsonutils.dumps({
|
||||
task = {
|
||||
"status": "running",
|
||||
"name": "dump",
|
||||
"progress": 0,
|
||||
"message": None,
|
||||
"id": 1,
|
||||
"uuid": "00000000-0000-0000-0000-000000000000"
|
||||
})
|
||||
tm_patcher = patch('nailgun.api.v1.handlers.logs.DumpTaskManager')
|
||||
th_patcher = patch('nailgun.api.v1.handlers.logs.objects.Task')
|
||||
}
|
||||
tm_patcher = mock.patch('nailgun.api.v1.handlers.logs.DumpTaskManager')
|
||||
th_patcher = mock.patch('nailgun.api.v1.handlers.logs.objects.Task')
|
||||
tm_mocked = tm_patcher.start()
|
||||
th_mocked = th_patcher.start()
|
||||
tm_instance = tm_mocked.return_value
|
||||
tm_instance.execute.return_value = task
|
||||
th_mocked.to_json.side_effect = lambda x: x
|
||||
tm_instance.execute.return_value = mock.Mock(**task)
|
||||
th_mocked.to_json.side_effect = lambda x: task
|
||||
resp = self.app.put(
|
||||
reverse('LogPackageHandler'), "[]", headers=self.default_headers
|
||||
)
|
||||
tm_patcher.stop()
|
||||
th_patcher.stop()
|
||||
self.assertEqual(task, resp.body)
|
||||
self.assertEqual(resp.status_code, 202)
|
||||
self.assertDictEqual(task, resp.json_body)
|
||||
|
||||
def test_log_package_handler_failed(self):
|
||||
tm_patcher = patch('nailgun.api.v1.handlers.logs.DumpTaskManager')
|
||||
tm_patcher = mock.patch('nailgun.api.v1.handlers.logs.DumpTaskManager')
|
||||
tm_mocked = tm_patcher.start()
|
||||
tm_instance = tm_mocked.return_value
|
||||
|
||||
|
@ -480,7 +481,7 @@ class TestLogs(BaseIntegrationTest):
|
|||
tm_patcher.stop()
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
|
||||
@patch('nailgun.api.v1.handlers.logs.DumpTaskManager')
|
||||
@mock.patch('nailgun.api.v1.handlers.logs.DumpTaskManager')
|
||||
def test_log_package_handler_with_dump_task_manager_error(self,
|
||||
dump_manager):
|
||||
"""Test verifies that 400 status would be returned in case of errors
|
||||
|
@ -499,7 +500,7 @@ class TestLogs(BaseIntegrationTest):
|
|||
)
|
||||
self.assertEqual(resp.status_code, 400)
|
||||
|
||||
@patch('nailgun.task.task.DumpTask.conf')
|
||||
@mock.patch('nailgun.task.task.DumpTask.conf')
|
||||
def test_dump_conf_returned(self, mconf):
|
||||
mconf.return_value = {'test': 'config'}
|
||||
resp = self.app.get(
|
||||
|
@ -508,7 +509,7 @@ class TestLogs(BaseIntegrationTest):
|
|||
)
|
||||
self.assertEqual(resp.json, {'test': 'config'})
|
||||
|
||||
@patch('nailgun.task.task.rpc.cast')
|
||||
@mock.patch('nailgun.task.task.rpc.cast')
|
||||
def test_custom_conf_passed_to_execute(self, mcast):
|
||||
custom_config = {'test': 'config'}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
# under the License.
|
||||
|
||||
|
||||
from nailgun import consts
|
||||
from nailgun.db.sqlalchemy.models import Task
|
||||
from nailgun.test.base import BaseTestCase
|
||||
from nailgun.test.base import fake_tasks
|
||||
|
@ -22,7 +23,8 @@ from nailgun.test.base import reverse
|
|||
|
||||
class TestTaskHandlers(BaseTestCase):
|
||||
|
||||
@fake_tasks(override_state={"progress": 100, "status": "ready"})
|
||||
@fake_tasks(override_state={"progress": 100,
|
||||
"status": consts.TASK_STATUSES.ready})
|
||||
def test_task_deletion(self):
|
||||
self.env.create(
|
||||
nodes_kwargs=[
|
||||
|
|
Loading…
Reference in New Issue