Fix hacking 0.10 errors in nailgun

Change-Id: I82e418d21e367ff379dda935f035dcc709c3a034
Partial-Bug: #1410810
This commit is contained in:
Maciej Kwiek 2015-09-30 16:44:52 +02:00
parent 98211d5e4d
commit 1ffc5e54e9
122 changed files with 807 additions and 794 deletions

View File

@ -28,13 +28,13 @@ from nailgun import objects
class NodeAssignmentHandler(BaseHandler):
"""Node assignment handler
"""
"""Node assignment handler"""
validator = NodeAssignmentValidator
@content
def POST(self, cluster_id):
""":returns: Empty string
:http: * 200 (nodes are successfully assigned)
* 400 (invalid nodes data specified)
* 404 (cluster/node not found in db)
@ -59,13 +59,13 @@ class NodeAssignmentHandler(BaseHandler):
class NodeUnassignmentHandler(BaseHandler):
"""Node assignment handler
"""
"""Node assignment handler"""
validator = NodeUnassignmentValidator
@content
def POST(self, cluster_id):
""":returns: Empty string
:http: * 200 (node successfully unassigned)
* 404 (cluster/node not found in db)
* 400 (invalid data specified)

View File

@ -51,10 +51,11 @@ def forbid_client_caching(handler):
def load_db_driver(handler):
"""Wrap all handlers calls in a special construction, that's call
"""Wrap all handlers calls so transaction is handled accordingly
rollback if something wrong or commit changes otherwise. Please note,
only HTTPError should be rised up from this function. All another
possible errors should be handle.
only HTTPError should be raised up from this function. All another
possible errors should be handled.
"""
try:
# execute handler and commit changes if all is ok
@ -99,7 +100,9 @@ class BaseHandler(object):
@classmethod
def http(cls, status_code, msg="", err_list=None, headers=None):
"""Raise an HTTP status code, as specified. Useful for returning status
"""Raise an HTTP status code.
Useful for returning status
codes like 401 Unauthorized or 403 Forbidden.
:param status_code: the HTTP status code as an integer
@ -318,7 +321,9 @@ def content_json(func, cls, *args, **kwargs):
def content(*args, **kwargs):
"""This decorator checks Accept header received from client
"""Set context-type of response based on Accept header
This decorator checks Accept header received from client
and returns corresponding wrapper (only JSON is currently
supported). It can be used as is:
@ -369,6 +374,7 @@ class SingleHandler(BaseHandler):
@content
def GET(self, obj_id):
""":returns: JSONized REST object.
:http: * 200 (OK)
* 404 (object not found in db)
"""
@ -378,6 +384,7 @@ class SingleHandler(BaseHandler):
@content
def PUT(self, obj_id):
""":returns: JSONized REST object.
:http: * 200 (OK)
* 404 (object not found in db)
"""
@ -393,6 +400,7 @@ class SingleHandler(BaseHandler):
@content
def DELETE(self, obj_id):
""":returns: Empty string
:http: * 204 (object successfully deleted)
* 404 (object not found in db)
"""
@ -419,6 +427,7 @@ class CollectionHandler(BaseHandler):
@content
def GET(self):
""":returns: Collection of JSONized REST objects.
:http: * 200 (OK)
"""
q = self.collection.eager(None, self.eager)
@ -427,6 +436,7 @@ class CollectionHandler(BaseHandler):
@content
def POST(self):
""":returns: JSONized REST object.
:http: * 201 (object successfully created)
* 400 (invalid object data specified)
* 409 (object with such parameters already exists)
@ -443,9 +453,7 @@ class CollectionHandler(BaseHandler):
class DBSingletonHandler(BaseHandler):
"""Manages an object that is supposed to have only one entry in the DB
(a DB singleton).
"""
"""Manages an object that is supposed to have only one entry in the DB"""
single = None
validator = BasicValidator
@ -462,6 +470,7 @@ class DBSingletonHandler(BaseHandler):
@content
def GET(self):
"""Get singleton object from DB
:http: * 200 (OK)
* 404 (Object not found in DB)
"""
@ -472,6 +481,7 @@ class DBSingletonHandler(BaseHandler):
@content
def PUT(self):
"""Change object in DB
:http: * 200 (OK)
* 400 (Invalid data)
* 404 (Object not present in DB)
@ -487,10 +497,10 @@ class DBSingletonHandler(BaseHandler):
@content
def PATCH(self):
"""Update object
:http: * 200 (OK)
* 400 (Invalid data)
* 404 (Object not present in DB)
"""
data = self.checked_data(self.validator.validate_update)
@ -506,8 +516,7 @@ class DBSingletonHandler(BaseHandler):
# TODO(enchantner): rewrite more handlers to inherit from this
# and move more common code here
class DeferredTaskHandler(BaseHandler):
"""Abstract Deferred Task Handler
"""
"""Abstract Deferred Task Handler"""
validator = BaseDefferedTaskValidator
single = objects.Task
@ -519,6 +528,7 @@ class DeferredTaskHandler(BaseHandler):
@content
def PUT(self, cluster_id):
""":returns: JSONized Task object.
:http: * 202 (task successfully executed)
* 400 (invalid object data specified)
* 404 (environment is not found)
@ -573,6 +583,7 @@ class DeploymentTasksHandler(SingleHandler):
@content
def GET(self, obj_id):
""":returns: Deployment tasks
:http: * 200 OK
* 404 (release object not found)
"""
@ -591,6 +602,7 @@ class DeploymentTasksHandler(SingleHandler):
@content
def PUT(self, obj_id):
""":returns: Deployment tasks
:http: * 200 (OK)
* 400 (invalid data specified)
* 404 (object not found in db)

View File

@ -70,8 +70,7 @@ class UnicodeWriter(object):
class CapacityLogHandler(BaseHandler):
"""Task single handler
"""
"""Task single handler"""
fields = (
"id",

View File

@ -45,8 +45,7 @@ from nailgun.task.manager import UpdateEnvironmentTaskManager
class ClusterHandler(SingleHandler):
"""Cluster single handler
"""
"""Cluster single handler"""
single = objects.Cluster
validator = ClusterValidator
@ -54,6 +53,7 @@ class ClusterHandler(SingleHandler):
@content
def DELETE(self, obj_id):
""":returns: {}
:http: * 202 (cluster deletion process launched)
* 400 (failed to execute cluster deletion process)
* 404 (cluster not found in db)
@ -73,8 +73,7 @@ class ClusterHandler(SingleHandler):
class ClusterCollectionHandler(CollectionHandler):
"""Cluster collection handler
"""
"""Cluster collection handler"""
collection = objects.ClusterCollection
validator = ClusterValidator
@ -114,8 +113,7 @@ class ClusterUpdateHandler(DeferredTaskHandler):
class ClusterAttributesHandler(BaseHandler):
"""Cluster attributes handler
"""
"""Cluster attributes handler"""
fields = (
"editable",
@ -126,6 +124,7 @@ class ClusterAttributesHandler(BaseHandler):
@content
def GET(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
@ -138,6 +137,7 @@ class ClusterAttributesHandler(BaseHandler):
def PUT(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 400 (wrong attributes data specified)
* 404 (cluster not found in db)
@ -153,6 +153,7 @@ class ClusterAttributesHandler(BaseHandler):
@content
def PATCH(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 400 (wrong attributes data specified)
* 404 (cluster not found in db)
@ -186,8 +187,7 @@ class ClusterAttributesHandler(BaseHandler):
class ClusterAttributesDefaultsHandler(BaseHandler):
"""Cluster default attributes handler
"""
"""Cluster default attributes handler"""
fields = (
"editable",
@ -196,6 +196,7 @@ class ClusterAttributesDefaultsHandler(BaseHandler):
@content
def GET(self, cluster_id):
""":returns: JSONized default Cluster attributes.
:http: * 200 (OK)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
@ -209,6 +210,7 @@ class ClusterAttributesDefaultsHandler(BaseHandler):
@content
def PUT(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 400 (wrong attributes data specified)
* 404 (cluster not found in db)
@ -240,12 +242,12 @@ class ClusterAttributesDefaultsHandler(BaseHandler):
class ClusterGeneratedData(BaseHandler):
"""Cluster generated data
"""
"""Cluster generated data"""
@content
def GET(self, cluster_id):
""":returns: JSONized cluster generated data
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
@ -260,8 +262,7 @@ class ClusterDeploymentTasksHandler(DeploymentTasksHandler):
class VmwareAttributesHandler(BaseHandler):
"""Vmware attributes handler
"""
"""Vmware attributes handler"""
fields = (
"editable",
@ -272,6 +273,7 @@ class VmwareAttributesHandler(BaseHandler):
@content
def GET(self, cluster_id):
""":returns: JSONized Cluster vmware attributes.
:http: * 200 (OK)
* 400 (cluster doesn't accept vmware configuration)
* 404 (cluster not found in db |
@ -298,6 +300,7 @@ class VmwareAttributesHandler(BaseHandler):
@content
def PUT(self, cluster_id):
""":returns: JSONized Cluster vmware attributes.
:http: * 200 (OK)
* 400 (wrong attributes data specified |
cluster doesn't accept vmware configuration)
@ -332,12 +335,12 @@ class VmwareAttributesHandler(BaseHandler):
class VmwareAttributesDefaultsHandler(BaseHandler):
"""Vmware default attributes handler
"""
"""Vmware default attributes handler"""
@content
def GET(self, cluster_id):
""":returns: JSONized default Cluster vmware attributes.
:http: * 200 (OK)
* 400 (cluster doesn't accept vmware configuration)
* 404 (cluster not found in db)

View File

@ -193,8 +193,7 @@ def read_log(
class LogEntryCollectionHandler(BaseHandler):
"""Log entry collection handler
"""
"""Log entry collection handler"""
@content
def GET(self):
@ -377,11 +376,11 @@ class LogEntryCollectionHandler(BaseHandler):
class LogPackageHandler(BaseHandler):
"""Log package handler
"""
"""Log package handler"""
@content
def PUT(self):
""":returns: JSONized Task object.
:http: * 200 (task successfully executed)
* 400 (data validation failed)
* 404 (cluster not found in db)
@ -403,18 +402,19 @@ class LogPackageDefaultConfig(BaseHandler):
@content
def GET(self):
"""Generates default config for snapshot
:http: * 200
"""
return DumpTask.conf()
class LogSourceCollectionHandler(BaseHandler):
"""Log source collection handler
"""
"""Log source collection handler"""
@content
def GET(self):
""":returns: Collection of log sources (from settings)
:http: * 200 (OK)
"""
return settings.LOGS
@ -424,6 +424,7 @@ class SnapshotDownloadHandler(BaseHandler):
def GET(self, snapshot_name):
""":returns: empty response
:resheader X-Accel-Redirect: snapshot_name
:http: * 200 (OK)
* 401 (Unauthorized)
@ -435,12 +436,12 @@ class SnapshotDownloadHandler(BaseHandler):
class LogSourceByNodeCollectionHandler(BaseHandler):
"""Log source by node collection handler
"""
"""Log source by node collection handler"""
@content
def GET(self, node_id):
""":returns: Collection of log sources by node (from settings)
:http: * 200 (OK)
* 404 (node not found in db)
"""

View File

@ -49,8 +49,7 @@ from nailgun.task.manager import VerifyNetworksTaskManager
class ProviderHandler(BaseHandler):
"""Base class for network configuration handlers
"""
"""Base class for network configuration handlers"""
provider = None
@ -86,6 +85,7 @@ class ProviderHandler(BaseHandler):
@content
def GET(self, cluster_id):
""":returns: JSONized network configuration for cluster.
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
@ -107,6 +107,7 @@ class ProviderHandler(BaseHandler):
@content
def PUT(self, cluster_id):
""":returns: JSONized Task object.
:http: * 200 (task successfully executed)
* 202 (network checking task scheduled for execution)
* 400 (data validation failed)
@ -136,8 +137,7 @@ class ProviderHandler(BaseHandler):
class NovaNetworkConfigurationHandler(ProviderHandler):
"""Network configuration handler
"""
"""Network configuration handler"""
validator = NovaNetworkConfigurationValidator
serializer = NovaNetworkConfigurationSerializer
@ -145,8 +145,7 @@ class NovaNetworkConfigurationHandler(ProviderHandler):
class NeutronNetworkConfigurationHandler(ProviderHandler):
"""Neutron Network configuration handler
"""
"""Neutron Network configuration handler"""
validator = NeutronNetworkConfigurationValidator
serializer = NeutronNetworkConfigurationSerializer
@ -154,8 +153,7 @@ class NeutronNetworkConfigurationHandler(ProviderHandler):
class TemplateNetworkConfigurationHandler(BaseHandler):
"""Neutron Network configuration handler
"""
"""Neutron Network configuration handler"""
validator = NetworkTemplateValidator
def check_if_template_modification_locked(self, cluster):
@ -166,6 +164,7 @@ class TemplateNetworkConfigurationHandler(BaseHandler):
@content
def GET(self, cluster_id):
""":returns: network template for cluster (json format)
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
@ -175,6 +174,7 @@ class TemplateNetworkConfigurationHandler(BaseHandler):
@content
def PUT(self, cluster_id):
""":returns: {}
:http: * 200 (OK)
* 400 (invalid object data specified)
* 403 (change of configuration is forbidden)
@ -189,6 +189,7 @@ class TemplateNetworkConfigurationHandler(BaseHandler):
def DELETE(self, cluster_id):
""":returns: {}
:http: * 204 (object successfully deleted)
* 403 (change of configuration is forbidden)
* 404 (cluster not found in db)
@ -200,8 +201,7 @@ class TemplateNetworkConfigurationHandler(BaseHandler):
class NetworkConfigurationVerifyHandler(ProviderHandler):
"""Network configuration verify handler base
"""
"""Network configuration verify handler base"""
validator = NetworkConfigurationValidator
@ -243,8 +243,7 @@ class NetworkConfigurationVerifyHandler(ProviderHandler):
class NovaNetworkConfigurationVerifyHandler(NetworkConfigurationVerifyHandler):
"""Nova-Network configuration verify handler
"""
"""Nova-Network configuration verify handler"""
validator = NovaNetworkConfigurationValidator
provider = consts.CLUSTER_NET_PROVIDERS.nova_network
@ -252,8 +251,7 @@ class NovaNetworkConfigurationVerifyHandler(NetworkConfigurationVerifyHandler):
class NeutronNetworkConfigurationVerifyHandler(
NetworkConfigurationVerifyHandler):
"""Neutron network configuration verify handler
"""
"""Neutron network configuration verify handler"""
validator = NeutronNetworkConfigurationValidator
provider = consts.CLUSTER_NET_PROVIDERS.neutron

View File

@ -23,16 +23,14 @@ from nailgun import objects
class NetworkGroupHandler(SingleHandler):
"""Network group handler
"""
"""Network group handler"""
validator = NetworkGroupValidator
single = objects.NetworkGroup
class NetworkGroupCollectionHandler(CollectionHandler):
"""Network group collection handler
"""
"""Network group collection handler"""
collection = objects.NetworkGroupCollection
validator = NetworkGroupValidator

View File

@ -74,16 +74,14 @@ class NodeHandler(SingleHandler):
class NodeCollectionHandler(CollectionHandler):
"""Node collection handler
"""
"""Node collection handler"""
validator = NodeValidator
collection = objects.NodeCollection
@content
def GET(self):
"""May receive cluster_id parameter to filter list
of nodes
"""May receive cluster_id parameter to filter list of nodes
:returns: Collection of JSONized Node objects.
:http: * 200 (OK)
@ -101,6 +99,7 @@ class NodeCollectionHandler(CollectionHandler):
@content
def PUT(self):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (data validation failed)
"""
@ -165,6 +164,7 @@ class NodeAgentHandler(BaseHandler):
@content
def PUT(self):
""":returns: node id.
:http: * 200 (node are successfully updated)
* 304 (node data not changed since last request)
* 400 (data validation failed)
@ -198,8 +198,7 @@ class NodeAgentHandler(BaseHandler):
class NodeNICsHandler(BaseHandler):
"""Node network interfaces handler
"""
"""Node network interfaces handler"""
model = NodeNICInterface
validator = NetAssignmentValidator
@ -208,6 +207,7 @@ class NodeNICsHandler(BaseHandler):
@content
def GET(self, node_id):
""":returns: Collection of JSONized Node interfaces.
:http: * 200 (OK)
* 404 (node not found in db)
"""
@ -217,6 +217,7 @@ class NodeNICsHandler(BaseHandler):
@content
def PUT(self, node_id):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (data validation failed)
"""
@ -234,8 +235,7 @@ class NodeNICsHandler(BaseHandler):
class NodeCollectionNICsHandler(BaseHandler):
"""Node collection network interfaces handler
"""
"""Node collection network interfaces handler"""
model = NetworkGroup
validator = NetAssignmentValidator
@ -244,6 +244,7 @@ class NodeCollectionNICsHandler(BaseHandler):
@content
def PUT(self):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (data validation failed)
"""
@ -266,12 +267,12 @@ class NodeCollectionNICsHandler(BaseHandler):
class NodeNICsDefaultHandler(BaseHandler):
"""Node default network interfaces handler
"""
"""Node default network interfaces handler"""
@content
def GET(self, node_id):
""":returns: Collection of default JSONized interfaces for node.
:http: * 200 (OK)
* 404 (node not found in db)
"""
@ -286,15 +287,13 @@ class NodeNICsDefaultHandler(BaseHandler):
class NodeCollectionNICsDefaultHandler(NodeNICsDefaultHandler):
"""Node collection default network interfaces handler
"""
"""Node collection default network interfaces handler"""
validator = NetAssignmentValidator
@content
def GET(self):
"""May receive cluster_id parameter to filter list
of nodes
"""May receive cluster_id parameter to filter list of nodes
:returns: Collection of JSONized Nodes interfaces.
:http: * 200 (OK)
@ -311,12 +310,12 @@ class NodeCollectionNICsDefaultHandler(NodeNICsDefaultHandler):
class NodesAllocationStatsHandler(BaseHandler):
"""Node allocation stats handler
"""
"""Node allocation stats handler"""
@content
def GET(self):
""":returns: Total and unallocated nodes count.
:http: * 200 (OK)
"""
unallocated_nodes = db().query(Node).filter_by(cluster_id=None).count()

View File

@ -32,8 +32,7 @@ Handlers dealing with node groups
class NodeGroupHandler(SingleHandler):
"""NodeGroup single handler
"""
"""NodeGroup single handler"""
single = objects.NodeGroup
validator = NodeGroupValidator
@ -48,16 +47,14 @@ class NodeGroupHandler(SingleHandler):
class NodeGroupCollectionHandler(CollectionHandler):
"""NodeGroup collection handler
"""
"""NodeGroup collection handler"""
collection = objects.NodeGroupCollection
validator = NodeGroupValidator
@content
def GET(self):
"""May receive cluster_id parameter to filter list
of groups
"""May receive cluster_id parameter to filter list of groups
:returns: Collection of JSONized Task objects.
:http: * 200 (OK)

View File

@ -29,8 +29,7 @@ from nailgun.api.v1.validators.notification import NotificationValidator
class NotificationHandler(SingleHandler):
"""Notification single handler
"""
"""Notification single handler"""
single = objects.Notification
validator = NotificationValidator
@ -44,6 +43,7 @@ class NotificationCollectionHandler(CollectionHandler):
@content
def PUT(self):
""":returns: Collection of JSONized Notification objects.
:http: * 200 (OK)
* 400 (invalid data specified for collection update)
"""

View File

@ -45,15 +45,13 @@ class NodesFilterMixin(object):
validator = NodesFilterValidator
def get_default_nodes(self, cluster):
"""Method should be overriden and
return list of nodes
"""
"""Method should be overriden and return list of nodes"""
raise NotImplementedError('Please Implement this method')
def get_nodes(self, cluster):
"""If nodes selected in filter
then returns them, else returns
default nodes.
"""If nodes selected in filter then return them
else return default nodes
"""
nodes = web.input(nodes=None).nodes
if nodes:
@ -67,13 +65,15 @@ class NodesFilterMixin(object):
class DefaultOrchestratorInfo(NodesFilterMixin, BaseHandler):
"""Base class for default orchestrator data.
"""Base class for default orchestrator data
Need to redefine serializer variable
"""
@content
def GET(self, cluster_id):
""":returns: JSONized default data which will be passed to orchestrator
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
@ -90,20 +90,17 @@ class OrchestratorInfo(BaseHandler):
"""Base class for replaced data."""
def get_orchestrator_info(self, cluster):
"""Method should return data
which will be passed to orchestrator
"""
"""Method should return data which will be passed to orchestrator"""
raise NotImplementedError('Please Implement this method')
def update_orchestrator_info(self, cluster, data):
"""Method should override data which
will be passed to orchestrator
"""
"""Method should override data which will be passed to orchestrator"""
raise NotImplementedError('Please Implement this method')
@content
def GET(self, cluster_id):
""":returns: JSONized data which will be passed to orchestrator
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
@ -113,6 +110,7 @@ class OrchestratorInfo(BaseHandler):
@content
def PUT(self, cluster_id):
""":returns: JSONized data which will be passed to orchestrator
:http: * 200 (OK)
* 400 (wrong data specified)
* 404 (cluster not found in db)
@ -128,6 +126,7 @@ class OrchestratorInfo(BaseHandler):
@content
def DELETE(self, cluster_id):
""":returns: {}
:http: * 202 (orchestrator data deletion process launched)
* 400 (failed to execute orchestrator data deletion process)
* 404 (cluster not found in db)
@ -218,6 +217,7 @@ class SelectedNodesBase(NodesFilterMixin, BaseHandler):
@content
def PUT(self, cluster_id):
""":returns: JSONized Task object.
:http: * 200 (task successfully executed)
* 202 (task scheduled for execution)
* 400 (data validation failed)
@ -239,6 +239,7 @@ class ProvisionSelectedNodes(SelectedNodesBase):
@content
def PUT(self, cluster_id):
""":returns: JSONized Task object.
:http: * 200 (task successfully executed)
* 202 (task scheduled for execution)
* 400 (data validation failed)
@ -288,6 +289,7 @@ class DeploySelectedNodes(BaseDeploySelectedNodes):
@content
def PUT(self, cluster_id):
""":returns: JSONized Task object.
:http: * 200 (task successfully executed)
* 202 (task scheduled for execution)
* 400 (data validation failed)
@ -304,6 +306,7 @@ class DeploySelectedNodesWithTasks(BaseDeploySelectedNodes):
@content
def PUT(self, cluster_id):
""":returns: JSONized Task object.
:http: * 200 (task successfully executed)
* 202 (task scheduled for execution)
* 400 (data validation failed)
@ -322,6 +325,7 @@ class TaskDeployGraph(BaseHandler):
def GET(self, cluster_id):
""":returns: DOT representation of deployment graph.
:http: * 200 (graph returned)
* 404 (cluster not found in db)
* 400 (failed to get graph)

View File

@ -38,6 +38,7 @@ class PluginCollectionHandler(base.CollectionHandler):
@content
def POST(self):
""":returns: JSONized REST object.
:http: * 201 (object successfully created)
* 400 (invalid object data specified)
* 409 (object with such parameters already exists)
@ -57,6 +58,7 @@ class PluginSyncHandler(base.BaseHandler):
@content
def POST(self):
""":returns: JSONized REST object.
:http: * 200 (plugins successfully synced)
* 404 (plugin not found in db)
* 400 (problem with parsing metadata file)

View File

@ -35,6 +35,7 @@ class FuelRegistrationForm(BaseHandler):
@content
def GET(self):
"""Returns Fuel registration form
:returns: JSON representation of registration form
:http: * 200 (OK)
"""
@ -60,6 +61,7 @@ class FuelLoginForm(BaseHandler):
@content
def GET(self):
"""Returns Fuel login form
:returns: JSON representation of login form
:http: * 200 (OK)
"""
@ -85,6 +87,7 @@ class FuelRestorePasswordForm(BaseHandler):
@content
def GET(self):
"""Returns Fuel restore password form
:returns: JSON representation of restore password form
:http: * 200 (OK)
"""

View File

@ -29,16 +29,14 @@ from nailgun.objects import ReleaseCollection
class ReleaseHandler(SingleHandler):
"""Release single handler
"""
"""Release single handler"""
single = Release
validator = ReleaseValidator
class ReleaseCollectionHandler(CollectionHandler):
"""Release collection handler
"""
"""Release collection handler"""
validator = ReleaseValidator
collection = ReleaseCollection
@ -46,6 +44,7 @@ class ReleaseCollectionHandler(CollectionHandler):
@content
def GET(self):
""":returns: Sorted releases' collection in JSON format
:http: * 200 (OK)
"""
q = sorted(self.collection.all(), reverse=True)
@ -53,8 +52,7 @@ class ReleaseCollectionHandler(CollectionHandler):
class ReleaseNetworksHandler(SingleHandler):
"""Release Handler for network metadata
"""
"""Release Handler for network metadata"""
single = Release
validator = ReleaseNetworksValidator

View File

@ -21,8 +21,7 @@ from nailgun.api.v1.handlers.base import content
class BaseRemovedInHandler(BaseHandler):
"""Removed resource base handler
"""
"""Removed resource base handler"""
@property
def fuel_version(self):
@ -43,8 +42,7 @@ class BaseRemovedInHandler(BaseHandler):
class RemovedIn51Handler(BaseRemovedInHandler):
"""Removed resource handler for Fuel 5.1
"""
"""Removed resource handler for Fuel 5.1"""
fuel_version = "5.1"

View File

@ -39,7 +39,9 @@ class RoleHandler(base.SingleHandler):
@content
def GET(self, release_id, role_name):
""":http:
"""Retrieve role
:http:
* 200 (OK)
* 404 (no such object found)
"""
@ -48,7 +50,9 @@ class RoleHandler(base.SingleHandler):
@content
def PUT(self, release_id, role_name):
""":http:
"""Update role
:http:
* 200 (OK)
* 404 (no such object found)
"""
@ -59,7 +63,9 @@ class RoleHandler(base.SingleHandler):
return RoleSerializer.serialize_from_release(release, role_name)
def DELETE(self, release_id, role_name):
""":http:
"""Remove role
:http:
* 204 (object successfully deleted)
* 400 (cannot delete object)
* 404 (no such object found)
@ -81,7 +87,9 @@ class RoleCollectionHandler(base.CollectionHandler):
@content
def POST(self, release_id):
""":http:
"""Create role for release
:http:
* 201 (object successfully created)
* 400 (invalid object data specified)
* 409 (object with such parameters already exists)
@ -104,9 +112,6 @@ class RoleCollectionHandler(base.CollectionHandler):
@content
def GET(self, release_id):
""":http:
* 200 (OK)
"""
release = self.get_object_or_404(objects.Release, release_id)
role_names = six.iterkeys(release.roles_metadata)
return [RoleSerializer.serialize_from_release(release, name)
@ -123,6 +128,7 @@ class ClusterRolesHandler(base.BaseHandler):
@content
def GET(self, cluster_id, role_name):
""":returns: JSON-ed metadata for the role
:http:
* 200 (OK)
* 404 (no such object found)
@ -137,6 +143,7 @@ class ClusterRolesCollectionHandler(base.BaseHandler):
@content
def GET(self, cluster_id):
""":returns: collection of JSON-ed cluster roles metadata
:http:
* 200 (OK)
* 404 (no such object found)

View File

@ -32,8 +32,7 @@ Handlers dealing with tasks
class TaskHandler(SingleHandler):
"""Task single handler
"""
"""Task single handler"""
single = objects.Task
validator = TaskValidator
@ -41,6 +40,7 @@ class TaskHandler(SingleHandler):
@content
def DELETE(self, obj_id):
""":returns: Empty string
:http: * 204 (object successfully deleted)
* 404 (object not found in db)
"""
@ -61,16 +61,14 @@ class TaskHandler(SingleHandler):
class TaskCollectionHandler(CollectionHandler):
"""Task collection handler
"""
"""Task collection handler"""
collection = objects.TaskCollection
validator = TaskValidator
@content
def GET(self):
"""May receive cluster_id parameter to filter list
of tasks
"""May receive cluster_id parameter to filter list of tasks
:returns: Collection of JSONized Task objects.
:http: * 200 (OK)

View File

@ -25,14 +25,14 @@ from nailgun import utils
class VersionHandler(BaseHandler):
"""Version info handler
"""
"""Version info handler"""
release_versions = "/etc/fuel/release_versions/*.yaml"
@content
def GET(self):
""":returns: FUEL/FUELWeb commit SHA, release version.
:http: * 200 (OK)
"""
version = settings.VERSION

View File

@ -65,6 +65,7 @@ class SpawnVmsHandler(BaseHandler):
@content
def PUT(self, cluster_id):
""":returns: JSONized Task object.
:http: * 200 (task successfully executed)
* 202 (task scheduled for execution)
* 400 (data validation failed)
@ -76,12 +77,12 @@ class SpawnVmsHandler(BaseHandler):
class NodeVMsHandler(BaseHandler):
"""Node vms handler
"""
"""Node vms handler"""
@content
def GET(self, node_id):
""":returns: JSONized node vms_conf.
:http: * 200 (OK)
* 404 (node not found in db)
"""
@ -92,6 +93,7 @@ class NodeVMsHandler(BaseHandler):
@content
def PUT(self, node_id):
""":returns: JSONized node vms_conf.
:http: * 200 (OK)
* 400 (invalid vmsdata specified)
* 404 (node not found in db)

View File

@ -300,9 +300,7 @@ _locals = locals()
def get_extensions_urls():
"""Method is used to retrieve the data about
handlers and urls from extensions and convert
them into web.py consumable format.
"""Get handlers and urls from extensions, convert them into web.py format
:returns: dict in the next format:
{'urls': (r'/url/', 'ClassName'),
@ -331,8 +329,10 @@ def get_extensions_urls():
def get_feature_groups_urls():
"""Method is used to retrieve urls depended on feature groups like
'experimental' or 'advanced' which should be enable only for this modes.
"""Method for retrieving urls dependant on feature groups
Feature groups can be 'experimental' or 'advanced' which should be
enable only for this modes.
:returns: list of urls
"""
@ -344,9 +344,7 @@ def get_feature_groups_urls():
def get_all_urls():
"""Merges urls and handlers from core with
urls and handlers from extensions
"""
"""Merges urls and handlers from core and from extensions"""
ext_urls = get_extensions_urls()
all_urls = list(urls)
all_urls.extend(get_feature_groups_urls())

View File

@ -56,13 +56,15 @@ class AssignmentValidator(BasicValidator):
@classmethod
def check_unique_hostnames(cls, nodes, cluster_id):
hostnames = [node.hostname for node in nodes]
conflicting_hostnames = [x[0] for x in db.query(
Node.hostname).filter(
sa.and_(
conflicting_hostnames = [
x[0] for x in
db.query(
Node.hostname).filter(sa.and_(
Node.hostname.in_(hostnames),
Node.cluster_id == cluster_id,
)
).all()]
).all()
]
if conflicting_hostnames:
raise errors.AlreadyExists(
"Nodes with hostnames [{0}] already exist in cluster {1}."

View File

@ -129,7 +129,7 @@ attribute_schema = {
'file',
]
},
#'value': None, # custom validation depending on type
# 'value': None, # custom validation depending on type
'restrictions': role.RESTRICTIONS,
'weight': {
'type': 'integer',

View File

@ -122,7 +122,9 @@ class NetworkConfigurationValidator(BasicValidator):
@classmethod
def _check_for_ip_conflicts(cls, network, cluster, notation, use_gateway):
"""This method checks if any of already allocated IPs will be \
"""Will there be ip confclicts after networks update?
This method checks if any of already allocated IPs will be
out of all ip-ranges after networks update.
"""
# skip admin network
@ -208,9 +210,7 @@ class NeutronNetworkConfigurationValidator(NetworkConfigurationValidator):
@classmethod
def _check_multiple_floating_ip_ranges(cls, net_params):
"""Check that there is only one floating IP range
in the input data.
"""
"""Check that there is only one floating IP range in the input data"""
# TODO(aroma): if only one IP range is supported
# by the protocol we should get rid of the nested
# list then

View File

@ -314,6 +314,7 @@ class NodesFilterValidator(BasicValidator):
@classmethod
def validate(cls, nodes):
"""Used for filtering nodes
:param nodes: list of ids in string representation.
Example: "1,99,3,4"
@ -347,8 +348,7 @@ class DeploySelectedNodesValidator(NodesFilterValidator):
@classmethod
def validate_nodes_to_deploy(cls, data, nodes, cluster_id):
"""Check whether nodes that scheduled for deployment are
in proper state
"""Check if nodes scheduled for deployment are in proper state
:param data: raw json data, usually web.data(). Is not used here
and is needed for maintaining consistency of data validating logic

View File

@ -84,7 +84,9 @@ class NotificationValidator(BasicValidator):
@classmethod
def validate_delete(cls, data, instance):
"""There's nothing to do right now. We just have to remove a given
instance from database, without any validations.
"""There's nothing to do right now.
We just have to remove a given instance from database,
without any validations.
"""
pass

View File

@ -37,8 +37,7 @@ from nailgun.urls import urls
def build_app(db_driver=None):
"""Build app and disable debug mode in case of production
"""
"""Build app and disable debug mode in case of production"""
web.config.debug = bool(int(settings.DEVELOPMENT))
app = web.application(urls(), locals(),
autoreload=bool(int(settings.AUTO_RELOAD)))
@ -70,7 +69,9 @@ def build_middleware(app):
def run_server(func, server_address=('0.0.0.0', 8080)):
"""This function same as runsimple from web/httpserver
"""Run server
This function is the same as runsimple from web/httpserver
except removed LogMiddleware because we use
HTTPLoggerMiddleware instead
"""

View File

@ -30,8 +30,7 @@ ALLOWED_LOCKS_CHAINS = [
class Lock(object):
"""Locking table info. Includes traceback info of locking call
"""
"""Locking table info. Includes traceback info of locking call"""
@staticmethod
def _warnings_only():
@ -126,8 +125,7 @@ class LockTransitionNotAllowedError(DeadlockDetectorError):
def clean_locks():
"""Context must be cleaned when transaction ends
"""
"""Context must be cleaned when transaction ends"""
context.locks = []

View File

@ -102,7 +102,7 @@ def upgrade():
def upgrade_schema():
### commands auto generated by Alembic - please adjust! ###
# commands auto generated by Alembic - please adjust! ###
op.add_column(
'releases',
sa.Column(
@ -178,7 +178,7 @@ def upgrade_schema():
'replaced_deployment_info', JSON(), nullable=True))
op.add_column('nodes', sa.Column(
'replaced_provisioning_info', JSON(), nullable=True))
### end Alembic commands ###
# end Alembic commands ###
def upgrade_data():
@ -224,7 +224,7 @@ def downgrade():
def downgrade_schema():
### commands auto generated by Alembic - please adjust! ###
# commands auto generated by Alembic - please adjust! ###
op.drop_column('nodes', 'replaced_provisioning_info')
op.drop_column('nodes', 'replaced_deployment_info')
upgrade_enum(
@ -290,7 +290,7 @@ def downgrade_schema():
nullable=False),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
# end Alembic commands ###
def downgrade_data():

View File

@ -294,8 +294,7 @@ def upgrade_node_groups(connection):
def dump_master_node_settings(connection):
"""Generate uuid for master node installation and update
master_node_settings table by generated value
"""Update master_node_settings table with new uuid for master node
Arguments:
connection - a database connection

View File

@ -240,7 +240,7 @@ def extend_ip_addrs_model_upgrade():
op.alter_column('ip_addrs', 'vip_type',
type_=sa.String(length=50),
existing_type=sa.Enum('haproxy', 'vrouter',
name='network_vip_types'))
name='network_vip_types'))
drop_enum('network_vip_types')
@ -446,9 +446,10 @@ def upgrade_node_roles_metadata():
def migrate_volumes_into_extension_upgrade():
"""Migrate data into intermediate table, from
which specific extensions will be able to retrieve
the data. It allows us not to hardcode extension
"""Migrate data into intermediate table
Specific extensions will be able to retrieve
the data from this table. It allows us not to hardcode extension
tables in core migrations.
"""
connection = op.get_bind()

View File

@ -135,11 +135,11 @@ class Cluster(Base):
@property
def is_locked(self):
if self.status in ("new", "stopped") and not \
db().query(Node).filter_by(
cluster_id=self.id,
status="ready"
).count():
N = Node
if self.status in ("new", "stopped") and not db().query(N).filter_by(
cluster_id=self.id,
status="ready"
).count():
return False
return True

View File

@ -271,8 +271,9 @@ class NodeNICInterface(Base):
# TODO(fzhadaev): move to object
@classmethod
def offloading_modes_as_flat_dict(cls, modes):
"""Represents multilevel structure of offloading modes
as flat dictionary for easy merging.
"""Represents multilevel structure of offloading modes as flat dict
This is done to ease merging
:param modes: list of offloading modes
:return: flat dictionary {mode['name']: mode['state']}
"""

View File

@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
# flake8: noqa
# This is done because grammar definition in docstrings cause H405
import ply.lex
import ply.yacc

View File

@ -130,9 +130,7 @@ class BaseExtension(object):
@classmethod
def alembic_migrations_path(cls):
"""If extension provides database migrations,
the method should return path to alembic migrations
"""
"""Path to alembic migrations (if extension provides any)"""
return None
@abc.abstractproperty
@ -141,14 +139,14 @@ class BaseExtension(object):
@abc.abstractproperty
def version(self):
"""Version of the extension, follow semantic
versioning schema (http://semver.org/)
"""Version of the extension
Follows semantic versioning schema (http://semver.org/)
"""
@classmethod
def full_name(cls):
"""Returns extension's name and version in human readable format
"""
"""Returns extension's name and version in human readable format"""
return '{0}-{1}'.format(cls.name, cls.version)
@classmethod
@ -161,30 +159,24 @@ class BaseExtension(object):
@classmethod
def on_node_create(cls, node):
"""Callback which gets executed when node is created
"""
"""Callback which gets executed when node is created"""
@classmethod
def on_node_update(cls, node):
"""Callback which gets executed when node is updated
"""
"""Callback which gets executed when node is updated"""
@classmethod
def on_node_reset(cls, node):
"""Callback which gets executed when node is reseted
"""
"""Callback which gets executed when node is reseted"""
@classmethod
def on_node_delete(cls, node):
"""Callback which gets executed when node is deleted
"""
"""Callback which gets executed when node is deleted"""
@classmethod
def on_node_collection_delete(cls, node_ids):
"""Callback which gets executed when node collection is deleted
"""
"""Callback which gets executed when node collection is deleted"""
@classmethod
def on_cluster_delete(cls, cluster):
"""Callback which gets executed when cluster is deleted
"""
"""Callback which gets executed when cluster is deleted"""

View File

@ -26,14 +26,13 @@ from nailgun import objects
class NodeDisksHandler(BaseHandler):
"""Node disks handler
"""
validator = NodeDisksValidator
@content
def GET(self, node_id):
""":returns: JSONized node disks.
:http: * 200 (OK)
* 404 (node not found in db)
"""
@ -46,6 +45,7 @@ class NodeDisksHandler(BaseHandler):
@content
def PUT(self, node_id):
""":returns: JSONized node disks.
:http: * 200 (OK)
* 400 (invalid disks data specified)
* 404 (node not found in db)
@ -73,12 +73,11 @@ class NodeDisksHandler(BaseHandler):
class NodeDefaultsDisksHandler(BaseHandler):
"""Node default disks handler
"""
@content
def GET(self, node_id):
""":returns: JSONized node disks.
:http: * 200 (OK)
* 404 (node or its attributes not found in db)
"""
@ -93,12 +92,11 @@ class NodeDefaultsDisksHandler(BaseHandler):
class NodeVolumesInformationHandler(BaseHandler):
"""Node volumes information handler
"""
@content
def GET(self, node_id):
""":returns: JSONized volumes info for node.
:http: * 200 (OK)
* 404 (node not found in db)
"""

View File

@ -51,8 +51,6 @@ class NailgunNodeAdapter(object):
@property
def is_ubuntu(self):
"""Returns True if operating system of the node
is Ubuntu, False otherwise
"""
"""Returns True if node OS is Ubuntu, False otherwise"""
return (self.node.cluster and
self.node.cluster.release.operating_system.lower() == "ubuntu")

View File

@ -15,12 +15,12 @@
# under the License.
class StateList:
class StateList(object):
def __init__(self, *state_list):
self.state_list = state_list
self.__dict__.update(dict(zip(state_list, state_list)))
def all_exclude(self, excluded_states):
return filter(
lambda state: not state in excluded_states,
lambda state: state not in excluded_states,
self.state_list)

View File

@ -29,8 +29,7 @@ formatter = logging.Formatter(LOGFORMAT, DATEFORMAT)
def make_nailgun_logger():
"""Make logger for nailgun app writes logs to stdout
"""
"""Make logger for nailgun app writes logs to stdout"""
logger = logging.getLogger("nailgun")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
@ -40,8 +39,7 @@ def make_nailgun_logger():
def make_api_logger():
"""Make logger for REST API writes logs to the file
"""
"""Make logger for REST API writes logs to the file"""
# Circular import dependency problem
# we import logger module in settings
from nailgun.settings import settings

View File

@ -63,7 +63,7 @@ compiled_urls_actions_mapping = utils.compile_mapping_keys(
r'/network_configuration/nova_network/?$'): {
'action_name': 'nova_network',
'action_group': 'network_configuration'
},
},
r'.*/clusters/(?P<cluster_id>\d+)/network_configuration/neutron/?$': {
'action_name': 'neutron',
'action_group': 'network_configuration'
@ -72,12 +72,12 @@ compiled_urls_actions_mapping = utils.compile_mapping_keys(
r'nova_network/verify/?$'): {
'action_name': 'nova_network',
'action_group': 'network_verification'
},
},
(r'.*/clusters/(?P<cluster_id>\d+)/network_configuration/'
r'neutron/verify/?$'): {
'action_name': 'neutron',
'action_group': 'network_verification'
},
},
r'.*/clusters/(?P<cluster_id>\d+)/attributes/?$': {
'action_name': 'attributes',
'action_group': 'cluster_attributes'
@ -133,9 +133,7 @@ class ConnectionMonitorMiddleware(object):
request_body = utils.get_body_from_env(env)
def save_headers_start_response(status, headers, *args):
"""Hook for saving response headers for further
processing
"""
"""Hook for saving resp headers for further processing"""
self.status = status
return start_response(status, headers, *args)

View File

@ -36,7 +36,9 @@ def public_urls():
class CookieTokenMixin(object):
"""Mixin for getting the auth token out of request X-Auth-Token header or
"""Mixin for getting the auth token out of request
Token is taken from X-Auth-Token header or
if that doesn't exist, from the cookie.
"""
def get_auth_token(self, env):
@ -56,9 +58,7 @@ class CookieTokenMixin(object):
class SkipAuthMixin(object):
"""Mixin which skips verification of authentication tokens for public
routes in the API.
"""
"""Skips verification of authentication tokens for public routes in API."""
def __init__(self, app):
self.public_api_routes = {}
self.app = app
@ -91,8 +91,7 @@ class SkipAuthMixin(object):
class FakeAuthProtocol(CookieTokenMixin):
"""Auth protocol for fake mode.
"""
"""Auth protocol for fake mode."""
def __init__(self, app, conf):
self.app = app
@ -108,8 +107,7 @@ class NailgunKeystoneAuthMiddleware(
CookieTokenMixin,
SkipAuthMixin,
auth_token.AuthProtocol):
"""Auth middleware for keystone.
"""
"""Auth middleware for keystone."""
def __call__(self, env, start_response):
token = self.get_auth_token(env)
@ -123,6 +121,5 @@ class NailgunKeystoneAuthMiddleware(
class NailgunFakeKeystoneAuthMiddleware(SkipAuthMixin, FakeAuthProtocol):
"""Auth middleware for fake mode.
"""
"""Auth middleware for fake mode."""
pass

View File

@ -17,9 +17,7 @@ import six
def get_body_from_env(env):
"""Exctracts request body from wsgi
environment variable
"""
"""Exctracts request body from wsgi environment variable"""
content_length = env.get('CONTENT_LENGTH')
body = ''
@ -39,9 +37,7 @@ def compile_mapping_keys(mapping):
def get_group_from_matcher(matcher_obj, string_to_match, group_name):
"""Returns value corresponding to given group_name if it is present in
matcher_obj
"""
"""Get value corresponding to group_name if it's present in matcher_obj"""
matched = matcher_obj.match(string_to_match)
if matched:
groups_dictionary = matched.groupdict()

View File

@ -39,8 +39,7 @@ from nailgun.task.helpers import TaskHelper
class NetworkCheck(object):
def __init__(self, task, data):
"""Collect Network Groups data
"""
"""Collect Network Groups data"""
self.cluster = task.cluster
self.task = task
self.data = data
@ -89,6 +88,7 @@ class NetworkCheck(object):
def check_untagged_intersection(self):
"""check if there are untagged networks on the same interface
(both nova-net and neutron)
"""
netw_untagged = lambda n: (n['vlan_start'] is None) \
@ -141,9 +141,7 @@ class NetworkCheck(object):
self.expose_error_messages()
def check_network_address_spaces_intersection(self):
"""check intersection of networks address spaces for all networks
(nova-net)
"""
"""check intersection of address spaces for all networks (nova-net)"""
nets_w_cidr = filter(lambda n: n['cidr'], self.networks)
for ngs in combinations(nets_w_cidr, 2):
addrs = [netaddr.IPNetwork(ngs[0]['cidr']).cidr,
@ -194,7 +192,9 @@ class NetworkCheck(object):
self.expose_error_messages()
def check_public_floating_ranges_intersection(self):
"""1. Check intersection of networks address spaces inside
"""Check public floating ranges intersection
1. Check intersection of networks address spaces inside
Public and Floating network
2. Check that Public Gateway is in Public CIDR
3. Check that Public IP ranges are in Public CIDR
@ -262,7 +262,9 @@ class NetworkCheck(object):
self.expose_error_messages()
def check_vlan_ids_range_and_intersection(self):
"""1. check intersection of networks VLAN IDs ranges
"""Check vlan ids range and intersection
1. check intersection of networks VLAN IDs ranges
2. check networks VLAN ID ranges are in allowed range
(nova-net)
"""
@ -300,7 +302,9 @@ class NetworkCheck(object):
self.expose_error_messages()
def check_networks_amount(self):
"""1. check number of fixed networks is one in case of FlatDHCPManager
"""Check networks count
1. check number of fixed networks is one in case of FlatDHCPManager
2. check number of fixed networks fit in fixed CIDR and size of
one fixed network
(nova-net)
@ -323,7 +327,9 @@ class NetworkCheck(object):
self.expose_error_messages()
def neutron_check_segmentation_ids(self):
"""1. check networks VLAN IDs not in Neutron L2 private VLAN ID range
"""Check neutron segmentation ids
1. check networks VLAN IDs not in Neutron L2 private VLAN ID range
for VLAN segmentation only
2. check networks VLAN IDs should not intersect
(neutron)
@ -358,8 +364,9 @@ class NetworkCheck(object):
raise errors.NetworkCheckError(err_msg)
def neutron_check_network_address_spaces_intersection(self):
"""Check intersection between address spaces of all networks
including admin (neutron)
"""Check intersection of address spaces of all networks including admin
(Neutron)
"""
# check intersection of address ranges
# between all networks
@ -458,7 +465,9 @@ class NetworkCheck(object):
self.expose_error_messages()
def neutron_check_l3_addresses_not_match_subnet_and_broadcast(self):
"""check virtual l3 network address ranges and gateway don't intersect
"""validate virtual l3 network address range and gateway
check virtual l3 network address ranges and gateway don't intersect
with subnetwork address and broadcast address (neutron)
"""
ext_fl = self.network_config['floating_ranges'][0]
@ -490,7 +499,9 @@ class NetworkCheck(object):
self.expose_error_messages()
def check_network_classes_exclude_loopback(self):
"""1. check network address space lies inside A,B or C network class
"""Check if address space is in real world addresses space
1. check network address space lies inside A,B or C network class
address space
2. check network address space doesn't lie inside loopback
address space
@ -518,7 +529,9 @@ class NetworkCheck(object):
self.expose_error_messages()
def check_network_addresses_not_match_subnet_and_broadcast(self):
"""check network address ranges and gateway don't intersect with
"""Network shouldn't intersect with subnetwork and broadcast
check network address ranges and gateway don't intersect with
subnetwork address and broadcast address (both neutron and nova-net)
"""
for n in self.networks:
@ -559,8 +572,7 @@ class NetworkCheck(object):
self.expose_error_messages()
def check_bond_slaves_speeds(self):
"""check bond slaves speeds are equal
"""
"""check bond slaves speeds are equal"""
for node in self.cluster.nodes:
for bond in node.bond_interfaces:
slaves_speed = set(
@ -573,8 +585,7 @@ class NetworkCheck(object):
self.err_msgs.append(warn_msg)
def check_dns_servers_ips(self):
"""check DNS servers IPs are distinct
"""
"""check DNS servers IPs are distinct"""
ips = self.network_config['dns_nameservers']
if len(set(ips)) < len(ips):
self.err_msgs.append(
@ -585,7 +596,8 @@ class NetworkCheck(object):
self.expose_error_messages()
def check_calculated_network_cidr(self):
"""Check calculated networks CIDRs are equal to values set by user.
"""Check calculated networks CIDRs are equal to values set by user
E.g. when user set CIDR to "10.20.30.0/16" it will be calculated as
"10.20.0.0/16". So, this helps to avoid some user errors while entering
network parameters.
@ -606,8 +618,7 @@ class NetworkCheck(object):
self.expose_error_messages()
def check_configuration(self):
"""check network configuration parameters
"""
"""check network configuration parameters"""
if self.net_provider == consts.CLUSTER_NET_PROVIDERS.neutron:
self.neutron_check_network_address_spaces_intersection()
self.neutron_check_segmentation_ids()
@ -623,8 +634,7 @@ class NetworkCheck(object):
self.check_calculated_network_cidr()
def check_interface_mapping(self):
"""check mapping of networks to NICs
"""
"""check mapping of networks to NICs"""
self.check_untagged_intersection()
self.check_bond_slaves_speeds()
return self.err_msgs

View File

@ -23,9 +23,10 @@ from nailgun import objects
def check_received_data(cached, received):
"""Check data received from net_probe (received) against data from
task (cached) for one node. Assemble connectivity errors description and
return it to the caller.
"""Check data received from net_probe (received)
Received data is checked against data from task (cached) for one node.
Assemble connectivity errors description and return it to the caller.
:param cached: data for one node from task.cache
:type cached: dict
@ -104,7 +105,8 @@ def check_received_data(cached, received):
def append_message(original, appendix):
"""Append message to output string with a delimiter.
"""Append message to output string with a delimiter
No delimiter is added if any of strings is empty.
"""
return '\n'.join(filter(None, (original, appendix)))

View File

@ -73,8 +73,9 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def build_role_to_network_group_mapping(cls, cluster, node_group_name):
"""Builds network role to network map according to template data if
template is loaded. Otherwise, empty map is returned.
"""Build network role to network map according to template data
If template is not loaded, empty map is returned.
:param cluster: Cluster instance
:type cluster: Cluster model
@ -103,7 +104,8 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def get_network_group_for_role(cls, network_role, net_group_mapping):
"""Returns network group to which network role is associated.
"""Returns network group to which network role is associated
If networking template is set first lookup happens in the
template. Otherwise the default network group from
the network role is returned.
@ -120,9 +122,7 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def get_node_networks_with_ips(cls, node):
"""Returns node's IP and network's data (meta, gateway) for
each network of particular node.
"""
"""Returns IP and network data (meta, gateway) for each node network"""
if not node.group_id:
return {}
@ -152,8 +152,9 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def get_node_endpoints(cls, node):
"""Returns a set of endpoints for particular node for the case when
template is loaded. Endpoints are taken from 'endpoints' field
"""Get a set of endpoints for node for the case when template is loaded
Endpoints are taken from 'endpoints' field
of templates for every node role.
"""
endpoints = set()
@ -169,7 +170,9 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def get_node_network_mapping(cls, node):
"""Returns a list of pairs (network, endpoint) for particular node
"""Get (network, endpoint) mappings for node with loaded template
Returns a list of pairs (network, endpoint) for particular node
for the case when template is loaded. Networks are aggregated for all
node roles assigned to node. Endpoints are taken from 'endpoints' field
of templates for every node role and they are mapped to networks from
@ -187,8 +190,7 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def get_network_name_to_endpoint_mappings(cls, cluster):
"""Returns dict of endpoint-to-network mappings for every node group
of the cluster::
"""Returns endpoint-to-network mappings for node groups in cluster
{
"node_group1": {
@ -215,9 +217,7 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def assign_ips_in_node_group(
cls, net_id, net_name, node_ids, ip_ranges):
"""Assigns IP addresses for nodes with IDs listed in "node_ids" in
given network.
"""
"""Assigns IP addresses for nodes in given network"""
ips_by_node_id = db().query(
models.IPAddr.ip_addr,
models.IPAddr.node
@ -254,9 +254,10 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
@classmethod
def assign_ips_for_nodes_w_template(cls, cluster, nodes):
"""Assign IPs for the case when network template is applied. IPs for
every node are allocated only for networks which are mapped to the
particular node according to the template.
"""Assign IPs for the case when network template is applied
IPs for every node are allocated only for networks which are mapped
to the particular node according to the template.
"""
network_by_group = db().query(
models.NetworkGroup.id,

View File

@ -52,8 +52,7 @@ class NovaNetworkManager70(AllocateVIPs70Mixin, NovaNetworkManager):
@classmethod
def build_role_to_network_group_mapping(cls, *_):
"""Default network role to network mapping is used always so
map building is not required.
"""Not needed due to always using default net role to network mapping
:return: Empty network role to network map
:rtype: dict
@ -62,7 +61,8 @@ class NovaNetworkManager70(AllocateVIPs70Mixin, NovaNetworkManager):
@classmethod
def get_network_group_for_role(cls, network_role, _):
"""Returns network group to which network role is associated.
"""Returns network group to which network role is associated
The default network group from the network role description is
returned.

View File

@ -57,6 +57,7 @@ class BaseTemplate(object):
class NetworkTemplate(BaseTemplate):
"""NetworkTemplate object provides string substitution
NetworkTemplate substitutes <% key %> to value
for key=value
Spaces inside <%...%> block are ignored

View File

@ -43,8 +43,7 @@ IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING)
class NailgunObject(object):
"""Base class for objects
"""
"""Base class for objects"""
#: Serializer class for object
serializer = BasicSerializer
@ -114,6 +113,7 @@ class NailgunObject(object):
@classmethod
def save(cls, instance=None):
"""Save current changes for instance in DB.
Current transaction will be commited
(in case of SQLAlchemy).
@ -148,8 +148,7 @@ class NailgunObject(object):
class NailgunCollection(object):
"""Base class for object collections
"""
"""Base class for object collections"""
#: Single object class
single = NailgunObject
@ -238,6 +237,7 @@ class NailgunCollection(object):
@classmethod
def filter_by(cls, iterable, **kwargs):
"""Filter given iterable by specified kwargs.
In case if iterable=None filters all object instances
:param iterable: iterable (SQLAlchemy query)
@ -265,7 +265,8 @@ class NailgunCollection(object):
@classmethod
def filter_by_not(cls, iterable, **kwargs):
"""Filter given iterable by specified kwargs with negation.
In case of `iterable` is `None` filters all object instances.
In case if `iterable` is `None` filters all object instances.
:param iterable: iterable (SQLAlchemy query)
:returns: filtered iterable (SQLAlchemy query)
@ -291,6 +292,7 @@ class NailgunCollection(object):
@classmethod
def lock_for_update(cls, iterable):
"""Use SELECT FOR UPDATE on a given iterable (query).
In case if iterable=None returns all object instances
:param iterable: iterable (SQLAlchemy query)
@ -310,6 +312,7 @@ class NailgunCollection(object):
def filter_by_list(cls, iterable, field_name, list_of_values,
order_by=()):
"""Filter given iterable by list of list_of_values.
In case if iterable=None filters all object instances
:param iterable: iterable (SQLAlchemy query)
@ -336,6 +339,7 @@ class NailgunCollection(object):
@classmethod
def filter_by_id_list(cls, iterable, uid_list):
"""Filter given iterable by list of uids.
In case if iterable=None filters all object instances
:param iterable: iterable (SQLAlchemy query)
@ -351,6 +355,7 @@ class NailgunCollection(object):
@classmethod
def eager_base(cls, iterable, options):
"""Eager load linked object instances (SQLAlchemy FKs).
In case if iterable=None applies to all object instances
:param iterable: iterable (SQLAlchemy query)
@ -365,6 +370,7 @@ class NailgunCollection(object):
@classmethod
def eager(cls, iterable, fields):
"""Eager load linked object instances (SQLAlchemy FKs).
By default joinedload will be applied to every field.
If you want to use custom eagerload method - use eager_base
In case if iterable=None applies to all object instances
@ -389,6 +395,7 @@ class NailgunCollection(object):
@classmethod
def to_list(cls, iterable=None, fields=None):
"""Serialize iterable to list of dicts
In case if iterable=None serializes all object instances
:param iterable: iterable (SQLAlchemy query)
@ -404,6 +411,7 @@ class NailgunCollection(object):
@classmethod
def to_json(cls, iterable=None, fields=None):
"""Serialize iterable to JSON
In case if iterable=None serializes all object instances
:param iterable: iterable (SQLAlchemy query)

View File

@ -46,16 +46,14 @@ from nailgun.utils import traverse
class Attributes(NailgunObject):
"""Cluster attributes object
"""
"""Cluster attributes object"""
#: SQLAlchemy model for Cluster attributes
model = models.Attributes
@classmethod
def generate_fields(cls, instance):
"""Generate field values for Cluster attributes using
generators.
"""Generate field values for Cluster attributes using generators.
:param instance: Attributes instance
:returns: None
@ -77,9 +75,10 @@ class Attributes(NailgunObject):
@classmethod
def merged_attrs(cls, instance):
"""Generates merged dict which includes generated Cluster
attributes recursively updated by new values from editable
attributes.
"""Generates merged dict of attributes
Result includes generated Cluster attributes recursively updated
by new values from editable attributes
:param instance: Attributes instance
:returns: dict of merged attributes
@ -91,8 +90,11 @@ class Attributes(NailgunObject):
@classmethod
def merged_attrs_values(cls, instance):
"""Transforms raw dict of attributes returned by :func:`merged_attrs`
into dict of facts for sending to orchestrator.
"""Transforms raw dict of attributes into dict of facts
Raw dict is taken from :func:`merged_attrs`
The result of this function is a dict of facts that wil be sent to
orchestrator
:param instance: Attributes instance
:returns: dict of merged attributes
@ -116,8 +118,7 @@ class Attributes(NailgunObject):
class Cluster(NailgunObject):
"""Cluster object
"""
"""Cluster object"""
#: SQLAlchemy model for Cluster
model = models.Cluster
@ -128,8 +129,8 @@ class Cluster(NailgunObject):
@classmethod
def create(cls, data):
"""Create Cluster instance with specified parameters in DB.
This includes:
This includes:
* creating Cluster attributes and generating default values \
(see :func:`create_attributes`)
* creating NetworkGroups for Cluster
@ -198,8 +199,8 @@ class Cluster(NailgunObject):
@classmethod
def create_attributes(cls, instance):
"""Create attributes for current Cluster instance and
generate default values for them
"""Create attributes for Cluster instance, generate their values
(see :func:`Attributes.generate_fields`)
:param instance: Cluster instance
@ -297,6 +298,7 @@ class Cluster(NailgunObject):
@classmethod
def get_network_manager(cls, instance=None):
"""Get network manager for Cluster instance.
If instance is None then the default NetworkManager is returned
:param instance: Cluster instance
@ -323,6 +325,7 @@ class Cluster(NailgunObject):
@classmethod
def add_pending_changes(cls, instance, changes_type, node_id=None):
"""Add pending changes for current Cluster.
If node_id is specified then links created changes with node.
:param instance: Cluster instance
@ -369,6 +372,7 @@ class Cluster(NailgunObject):
@classmethod
def clear_pending_changes(cls, instance, node_id=None):
"""Clear pending changes for current Cluster.
If node_id is specified then only clears changes connected
to this node.
@ -392,7 +396,8 @@ class Cluster(NailgunObject):
@classmethod
def update(cls, instance, data):
"""Update Cluster object instance with specified parameters in DB.
"""Update Cluster object instance with specified parameters in DB
If "nodes" are specified in data then they will replace existing ones
(see :func:`update_nodes`)
@ -415,7 +420,8 @@ class Cluster(NailgunObject):
@classmethod
def update_nodes(cls, instance, nodes_ids):
"""Update Cluster nodes by specified node IDs.
"""Update Cluster nodes by specified node IDs
Nodes with specified IDs will replace existing ones in Cluster
:param instance: Cluster instance
@ -481,10 +487,8 @@ class Cluster(NailgunObject):
db().flush()
@classmethod
def get_ifaces_for_network_in_cluster(
cls, instance, net):
"""Method for receiving node_id:iface pairs for all nodes in
specific cluster
def get_ifaces_for_network_in_cluster(cls, instance, net):
"""Method for receiving node_id:iface pairs for all nodes in cluster
:param instance: Cluster instance
:param net: Nailgun specific network name
@ -561,8 +565,7 @@ class Cluster(NailgunObject):
@classmethod
def should_assign_public_to_all_nodes(cls, instance):
"""Determine whether Public network is to be assigned to all nodes in
this cluster.
"""Check if Public network is to be assigned to all nodes in cluster
:param instance: cluster instance
:returns: True when Public network is to be assigned to all nodes
@ -600,15 +603,16 @@ class Cluster(NailgunObject):
@classmethod
def set_primary_role(cls, instance, nodes, role_name):
"""Method for assigning primary attribute for specific role.
- verify that there is no primary attribute of specific role
assigned to cluster nodes with this role in role list
or pending role list, and this node is not marked for deletion
assigned to cluster nodes with this role in role list
or pending role list, and this node is not marked for deletion
- if there is no primary role assigned, filter nodes which have current
role in roles or pending_roles
role in roles or pending_roles
- if there is nodes with ready state - they should have higher priority
- if role was in primary_role_list - change primary attribute
for that association, same for role_list, this is required
because deployment_serializer used by cli to generate deployment info
for that association, same for role_list, this is required
because deployment_serializer used by cli to generate deployment info
:param instance: Cluster db objects
:param nodes: list of Node db objects
@ -643,8 +647,9 @@ class Cluster(NailgunObject):
@classmethod
def set_primary_roles(cls, instance, nodes):
"""Idempotent method for assignment of all primary attribute
for all roles that requires it.
"""Assignment of all primary attribute for all roles that requires it.
This method is idempotent
To mark role as primary add has_primary: true attribute to release
:param instance: Cluster db object
@ -682,9 +687,11 @@ class Cluster(NailgunObject):
@classmethod
def get_primary_node(cls, instance, role_name):
"""Get primary node for role_name. If primary node is not
found None will be returned. Pending roles and roles are
used in search.
"""Get primary node for role_name
If primary node is not found None will be returned
Pending roles and roles are used in search
:param instance: cluster db object
:type: python object
:param role_name: node role name
@ -790,9 +797,10 @@ class Cluster(NailgunObject):
@classmethod
def get_volumes_metadata(cls, instance):
"""Return proper volumes metadata for cluster and consists
with general volumes metadata from release and volumes
metadata from plugins which releated to this cluster
"""Return proper volumes metadata for cluster
Metadata consists of general volumes metadata from release
and volumes metadata from plugins which are releated to this cluster
:param instance: Cluster DB instance
:returns: dict -- object with merged volumes metadata
@ -809,8 +817,7 @@ class Cluster(NailgunObject):
@classmethod
def create_vmware_attributes(cls, instance):
"""Store VmwareAttributes instance into DB.
"""
"""Store VmwareAttributes instance into DB."""
vmware_metadata = instance.release.vmware_attributes_metadata
if vmware_metadata:
return VmwareAttributes.create(
@ -844,8 +851,9 @@ class Cluster(NailgunObject):
@classmethod
def get_vmware_attributes(cls, instance):
"""Get VmwareAttributes instance from DB. Now we have
relation with cluster 1:1.
"""Get VmwareAttributes instance from DB.
Now we have relation with cluster 1:1.
"""
return db().query(models.VmwareAttributes).filter(
models.VmwareAttributes.cluster_id == instance.id
@ -853,8 +861,7 @@ class Cluster(NailgunObject):
@classmethod
def get_default_vmware_attributes(cls, instance):
"""Get metadata from release with empty value section.
"""
"""Get metadata from release with empty value section."""
editable = instance.release.vmware_attributes_metadata.get("editable")
editable = traverse(editable, AttributesGenerator, {
'cluster': instance,
@ -864,8 +871,9 @@ class Cluster(NailgunObject):
@classmethod
def update_vmware_attributes(cls, instance, data):
"""Update Vmware attributes. Actually we allways update only
value section in editable.
"""Update Vmware attributes.
Actually we allways update only value section in editable.
"""
metadata = instance.vmware_attributes.editable['metadata']
value = data.get('editable', {}).get('value')
@ -882,16 +890,15 @@ class Cluster(NailgunObject):
@classmethod
def is_vmware_enabled(cls, instance):
"""Check if current cluster support vmware configuration
"""
"""Check if current cluster supports vmware configuration"""
attributes = cls.get_attributes(instance).editable
return attributes.get('common', {}).get('use_vcenter', {}).get('value')
@staticmethod
def adjust_nodes_lists_on_controller_removing(instance, nodes_to_delete,
nodes_to_deploy):
"""In case of deleting controller(s) adds other controller(s)
to nodes_to_deploy
"""Adds controllers to nodes_to_deploy if deleting other controllers
:param instance: instance of SqlAlchemy cluster
:param nodes_to_delete: list of nodes to be deleted
:param nodes_to_deploy: list of nodes to be deployed
@ -967,8 +974,7 @@ class Cluster(NailgunObject):
@classmethod
def get_assigned_roles(cls, instance):
"""Get list of all roles currently assigned to nodes
in the specified cluster
"""Get list of all roles currently assigned to nodes in cluster
:param instance: nailgun.db.sqlalchemy.models.Cluster instance
:returns: List of node roles currently assigned
@ -991,8 +997,9 @@ class Cluster(NailgunObject):
@classmethod
def is_network_modification_locked(cls, instance):
"""Checks whether network settings can be modified or
deleted with current status of cluster.
"""Checks whether network settings can be modified or deleted.
The result depends on the current status of cluster.
"""
allowed = [consts.CLUSTER_STATUSES.new,
consts.CLUSTER_STATUSES.stopped,
@ -1002,8 +1009,7 @@ class Cluster(NailgunObject):
class ClusterCollection(NailgunCollection):
"""Cluster collection
"""
"""Cluster collection"""
#: Single Cluster object class
single = Cluster

View File

@ -51,8 +51,9 @@ class MasterNodeSettings(NailgunObject):
@classmethod
def update(cls, instance, data):
"""Update MasterNodeSettings object instance with specified parameters
in DB. master_node_uid cannot be changed so it's ignored.
"""Update MasterNodeSettings instance with specified parameters in DB.
master_node_uid cannot be changed so it's ignored.
:param instance: MasterNodeSettings instance
:param data: dictionary of key-value pairs as object fields

View File

@ -40,6 +40,7 @@ class NetworkGroup(NailgunObject):
@classmethod
def create(cls, data):
"""Create NetworkGroup instance with specified parameters in DB.
Create corresponding IPAddrRange instance with IP range specified in
data or calculated from CIDR if not specified.
@ -79,6 +80,7 @@ class NetworkGroup(NailgunObject):
@classmethod
def update_meta(cls, instance, data):
"""Updates particular keys in object's meta.
Is used by NetworkManager.update_networks as
for old clusters only those data in meta is
allowed for updating
@ -104,7 +106,9 @@ class NetworkGroup(NailgunObject):
@classmethod
def _regenerate_ip_ranges_on_notation(cls, instance, data):
"""Regenerate IP-address ranges basing on 'notation' field of
"""Regenerate IP-address ranges
This method regenerates IPs based on 'notation' field of
Network group 'meta' content.
:param instance: NetworkGroup instance
@ -175,8 +179,9 @@ class NetworkGroup(NailgunObject):
@classmethod
def _delete_ips(cls, instance):
"""Network group cleanup - deletes all IPs were assigned within
the network group.
"""Network group cleanup
Deletes all IPs which were assigned within the network group.
:param instance: NetworkGroup instance
:type instance: models.NetworkGroup

View File

@ -54,8 +54,7 @@ from nailgun.network.template import NetworkTemplate
class Node(NailgunObject):
"""Node object
"""
"""Node object"""
#: SQLAlchemy model for Node
model = models.Node
@ -141,8 +140,7 @@ class Node(NailgunObject):
@classmethod
def should_have_public_with_ip(cls, instance):
"""Determine whether this node should be connected to Public network
with an IP address assigned from that network
"""Returns True if node should have IP belonging to Public network
:param instance: Node DB instance
:returns: True when node has Public network
@ -162,6 +160,7 @@ class Node(NailgunObject):
@classmethod
def should_have_public(cls, instance):
"""Determine whether this node should be connected to Public network,
no matter with or without an IP address assigned from that network
For example Neutron DVR does require Public network access on compute
@ -187,6 +186,7 @@ class Node(NailgunObject):
@classmethod
def create(cls, data):
"""Create Node instance with specified parameters in DB.
This includes:
* generating its name by MAC (if name is not specified in data)
@ -286,6 +286,7 @@ class Node(NailgunObject):
@classmethod
def hardware_info_locked(cls, instance):
"""Returns true if update of hardware information is not allowed.
It is not allowed during provision/deployment, after
successful provision/deployment and during node removal.
"""
@ -297,6 +298,7 @@ class Node(NailgunObject):
@classmethod
def update_interfaces(cls, instance, update_by_agent=False):
"""Update interfaces for Node instance using Cluster
network manager (see :func:`get_network_manager`)
:param instance: Node instance
@ -377,6 +379,7 @@ class Node(NailgunObject):
@classmethod
def update(cls, instance, data):
"""Update Node instance with specified parameters in DB.
This includes:
* adding node to Cluster (if cluster_id is not None in data) \
@ -498,7 +501,8 @@ class Node(NailgunObject):
@classmethod
def reset_to_discover(cls, instance):
"""Flush database objects which is not consistent with actual node
configuration in the event of resetting node to discover state
configuration in the event of resetting node to discover state
:param instance: Node database object
:returns: None
@ -599,6 +603,7 @@ class Node(NailgunObject):
@classmethod
def update_roles(cls, instance, new_roles):
"""Update roles for Node instance.
Logs an error if node doesn't belong to Cluster
:param instance: Node instance
@ -623,6 +628,7 @@ class Node(NailgunObject):
@classmethod
def update_pending_roles(cls, instance, new_pending_roles):
"""Update pending_roles for Node instance.
Logs an error if node doesn't belong to Cluster
:param instance: Node instance
@ -654,6 +660,7 @@ class Node(NailgunObject):
@classmethod
def update_primary_roles(cls, instance, new_primary_roles):
"""Update primary_roles for Node instance.
Logs an error if node doesn't belong to Cluster
:param instance: Node instance
@ -688,6 +695,7 @@ class Node(NailgunObject):
@classmethod
def add_into_cluster(cls, instance, cluster_id):
"""Adds Node to Cluster by its ID.
Also assigns networks by default for Node.
:param instance: Node instance
@ -724,6 +732,7 @@ class Node(NailgunObject):
@classmethod
def get_admin_physical_iface(cls, instance):
"""Returns node's physical iface.
In case if we have bonded admin iface, first
of the bonded ifaces will be returned
@ -745,6 +754,7 @@ class Node(NailgunObject):
@classmethod
def remove_from_cluster(cls, instance):
"""Remove Node from Cluster.
Also drops networks assignment for Node and clears both
roles and pending roles
@ -774,8 +784,7 @@ class Node(NailgunObject):
@classmethod
def move_roles_to_pending_roles(cls, instance):
"""Move roles to pending_roles
"""
"""Move roles to pending_roles"""
instance.pending_roles = instance.pending_roles + instance.roles
instance.roles = []
instance.primary_roles = []
@ -803,8 +812,9 @@ class Node(NailgunObject):
@classmethod
def get_kernel_params(cls, instance):
"""Return cluster kernel_params if they wasnot replaced by
custom params.
"""Get kernel params
Return cluster kernel_params if they weren't replaced by custom params.
"""
return (instance.kernel_params or
Cluster.get_default_kernel_params(instance.cluster))
@ -866,9 +876,11 @@ class Node(NailgunObject):
@classmethod
def get_unique_hostname(cls, node, cluster_id):
"""Generate default hostname 'node-{id}' if it's not used
or 'node-{uuid} otherwise. It's needed for case when user have
manually renamed any another node to 'node-{id}'.
"""Generate default hostname
Hostname is 'node-{id}' if it's not used or 'node-{uuid} otherwise.
It's needed for case when user have manually renamed any another node
to 'node-{id}'.
"""
hostname = cls.get_slave_name(node)
if cls.get_by_hostname(hostname, cluster_id):
@ -877,8 +889,7 @@ class Node(NailgunObject):
class NodeCollection(NailgunCollection):
"""Node collection
"""
"""Node collection"""
#: Single Node object class
single = Node
@ -900,8 +911,9 @@ class NodeCollection(NailgunCollection):
@classmethod
def prepare_for_lt_6_1_deployment(cls, instances):
"""Prepare environment for deployment,
assign management, public, storage ips
"""Prepare environment for deployment
Assign management, public, storage ips
"""
# TODO(enchantner): check network manager instance for each node
@ -914,8 +926,9 @@ class NodeCollection(NailgunCollection):
@classmethod
def prepare_for_6_1_deployment(cls, instances, nst=None):
"""Prepare environment for deployment,
assign management, public, storage, private ips
"""Prepare environment for deployment
Assign management, public, storage, private ips
"""
# TODO(enchantner): check network manager instance for each node
@ -931,9 +944,7 @@ class NodeCollection(NailgunCollection):
@classmethod
def prepare_for_deployment(cls, instances):
"""Prepare environment for deployment. Assign IPs for all
networks.
"""
"""Prepare environment for deployment. Assign IPs for all networks."""
if not instances:
logger.debug("prepare_for_deployment was called with no instances")
return
@ -977,15 +988,14 @@ class NodeCollection(NailgunCollection):
@classmethod
def prepare_for_provisioning(cls, instances):
"""Prepare environment for provisioning,
assign admin IPs
"""
"""Prepare environment for provisioning, assign admin IPs"""
netmanager = Cluster.get_network_manager()
netmanager.assign_admin_ips(instances)
@classmethod
def lock_nodes(cls, instances):
"""Locking nodes instances, fetched before, but required to be locked
:param instances: list of nodes
:return: list of locked nodes
"""
@ -1014,8 +1024,7 @@ class NodeCollection(NailgunCollection):
@classmethod
def discovery_node_ids(self):
"""List of nodes ids which belong to the cluster and have
'discovery' status
"""Ids of nodes which belong to the cluster and have 'discovery' status
:returns: list of node ids
"""

View File

@ -32,8 +32,7 @@ class OpenStackWorkloadStats(NailgunObject):
@classmethod
def get_last_by(cls, cluster_id, resource_type):
"""Get last entry by cluster_id and resource type.
"""
"""Get last entry by cluster_id and resource type."""
instance = db().query(models.OpenStackWorkloadStats) \
.order_by(models.OpenStackWorkloadStats.created_date.desc()) \
.filter_by(cluster_id=cluster_id) \
@ -48,8 +47,7 @@ class OpenStackWorkloadStatsCollection(NailgunCollection):
@classmethod
def get_ready_to_send(cls):
"""Get entries which are ready to send but were not sent yet.
"""
"""Get entries which are ready to send but were not sent yet."""
last_date = datetime.datetime.utcnow().date() - \
datetime.timedelta(days=settings.OSWL_COLLECT_PERIOD)
instance = db().query(models.OpenStackWorkloadStats) \
@ -60,8 +58,7 @@ class OpenStackWorkloadStatsCollection(NailgunCollection):
@classmethod
def clean_expired_entries(cls):
"""Delete expired oswl entries from db
"""
"""Delete expired oswl entries from db"""
# CAVEAT(aroma): if settings.OSWL_COLLECT_PERIOD is 0
# then all oswl entries will be deleted from db
last_date = datetime.datetime.utcnow().date() - \
@ -73,9 +70,10 @@ class OpenStackWorkloadStatsCollection(NailgunCollection):
@classmethod
def get_last_by_resource_type(cls, resource_type):
"""Get records for given resource_type which have most recent
created_date (today or yesterday). Records (for some clusters)
which were updated earlier will not be selected.
"""Get most recently created records for given resource_type
Records (for some clusters) which were updated earlier than yesterday
will not be selected.
"""
instances = db().query(models.OpenStackWorkloadStats) \
.order_by(models.OpenStackWorkloadStats.created_date.desc()) \

View File

@ -40,7 +40,8 @@ class PluginCollection(base.NailgunCollection):
@classmethod
def all_newest(cls):
"""Returns new plugins.
"""Returns plugins in most recent versions
Example:
There are 4 plugins:
- name: plugin_name, version: 1.0.0
@ -68,6 +69,7 @@ class PluginCollection(base.NailgunCollection):
@classmethod
def get_by_uids(cls, plugin_ids):
"""Returns plugins by given ids.
:param plugin_ids: list of plugin ids
:type plugin_ids: list

View File

@ -33,8 +33,7 @@ from nailgun.settings import settings
class Release(NailgunObject):
"""Release object
"""
"""Release object"""
#: SQLAlchemy model for Release
model = models.Release
@ -72,6 +71,7 @@ class Release(NailgunObject):
@classmethod
def update_role(cls, instance, role):
"""Update existing Release instance with specified role.
Previous ones are deleted.
:param instance: a Release instance
@ -152,8 +152,7 @@ class Release(NailgunObject):
class ReleaseCollection(NailgunCollection):
"""Release collection
"""
"""Release collection"""
#: Single Release object class
single = Release

View File

@ -111,6 +111,7 @@ class NodeInterfacesSerializer(BasicSerializer):
@classmethod
def _get_env_version(cls, instance):
"""Returns environment's version.
Returns current Fuel version by default.
"""
if instance.node.cluster:

View File

@ -110,7 +110,7 @@ class Task(NailgunObject):
elif any(map(lambda s: s.status in ('error',), subtasks)):
for subtask in subtasks:
if not subtask.status in ('error', 'ready'):
if subtask.status not in ('error', 'ready'):
subtask.status = 'error'
subtask.progress = 100
subtask.message = 'Task aborted'
@ -121,7 +121,7 @@ class Task(NailgunObject):
lambda s: (s.message or ""), filter(
lambda s: (
s.status == 'error' and not
# TODO: make this check less ugly
# TODO(aroma): make this check less ugly
s.message == 'Task aborted'
), subtasks)))))

View File

@ -71,7 +71,9 @@ class DBAPI(object):
@lockutils.synchronized('dbapi_backend', 'nailgun-')
def __get_backend(self):
"""Get the actual backend. May be a module or an instance of
"""Get the actual backend
May be a module or an instance of
a class. Doesn't matter to us. We do this synchronized as it's
possible multiple greenthreads started very quickly trying to do
DB calls and eventlet can switch threads before self.__backend gets

View File

@ -17,7 +17,7 @@
# under the License.
import commands
import commands # noqa
import ConfigParser
import os
import urlparse

View File

@ -86,7 +86,8 @@ def delete_if_exists(path, remove=os.unlink):
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
"""Protect code that wants to operate on PATH atomically
Any exception will cause PATH to be removed.
:param path: File to work with

View File

@ -57,8 +57,10 @@ def set_defaults(lock_path):
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
"""Lock implementation which allows multiple locks
This implementation works around issues like
bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.

View File

@ -428,9 +428,9 @@ def setup(product_name):
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
cfg.set_defaults(
log_opts,
logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():

View File

@ -130,7 +130,7 @@ def set_time_override(override_time=None):
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
assert(utcnow.override_time is not None)
try:
for dt in utcnow.override_time:
dt += timedelta

View File

@ -30,8 +30,7 @@ from nailgun.settings import settings
class MuranoMetadataSerializerMixin(object):
def generate_test_vm_image_data(self, node):
"""Adds murano metadata to the test image
"""
"""Adds murano metadata to the test image"""
image_data = super(
MuranoMetadataSerializerMixin,
self).generate_test_vm_image_data(node)
@ -48,8 +47,7 @@ class MuranoMetadataSerializerMixin(object):
class VmwareDeploymentSerializerMixin(object):
def generate_vmware_data(self, node):
"""Extend serialize data with vmware attributes
"""
"""Extend serialize data with vmware attributes"""
vmware_data = {}
allowed_roles = [
'controller',
@ -139,7 +137,9 @@ class VmwareDeploymentSerializerMixin(object):
@staticmethod
def escape_dollar(data):
"""In order to disable variable interpolation in
"""Escape dollar symbol
In order to disable variable interpolation in
values that we write to configuration files during
deployment we must replace all $ (dollar sign) occurrences.
"""
@ -209,6 +209,7 @@ class NetworkDeploymentSerializer(object):
@classmethod
def network_ranges(cls, group_id):
"""Returns ranges for network groups
except range for public network for each node
"""
ng_db = db().query(NetworkGroup).filter_by(group_id=group_id).all()
@ -223,8 +224,7 @@ class NetworkDeploymentSerializer(object):
@classmethod
def get_ip_ranges_first_last(cls, network_group):
"""Get all ip ranges in "10.0.0.0-10.0.0.255" format
"""
"""Get all ip ranges in "10.0.0.0-10.0.0.255" format"""
return [
"{0}-{1}".format(ip_range.first, ip_range.last)
for ip_range in network_group.ip_ranges
@ -232,8 +232,7 @@ class NetworkDeploymentSerializer(object):
@classmethod
def get_addr_mask(cls, network_data, net_name, render_name):
"""Get addr for network by name
"""
"""Get addr for network by name"""
nets = filter(
lambda net: net['name'] == net_name,
network_data)
@ -266,6 +265,7 @@ class NetworkDeploymentSerializer(object):
@classmethod
def add_bridge(cls, name, provider=None):
"""Add bridge to schema
It will take global provider if it is omitted here
"""
bridge = {
@ -279,6 +279,7 @@ class NetworkDeploymentSerializer(object):
@classmethod
def add_port(cls, name, bridge, provider=None):
"""Add port to schema
Bridge name may be None, port will not be connected to any bridge then
It will take global provider if it is omitted here
Port name can be in form "XX" or "XX.YY", where XX - NIC name,
@ -298,6 +299,7 @@ class NetworkDeploymentSerializer(object):
@classmethod
def add_bond(cls, iface, parameters):
"""Add bond to schema
All required parameters should be inside parameters dict. (e.g.
bond_properties, interface_properties, provider, bridge).
bond_properties is obligatory, others are optional.
@ -319,6 +321,7 @@ class NetworkDeploymentSerializer(object):
@classmethod
def add_patch(cls, bridges, provider=None, mtu=None):
"""Add patch to schema
Patch connects two bridges listed in 'bridges'.
OVS bridge must go first in 'bridges'.
It will take global provider if it is omitted here

View File

@ -32,8 +32,7 @@ from nailgun.orchestrator.tasks_serializer import TaskSerializers
class DeploymentGraph(nx.DiGraph):
"""DirectedGraph that is used to generate configuration for speficific
orchestrators.
"""DirectedGraph used to generate configs for speficific orchestrators
In case of astute - we are working with priorities
In - mistral - we will serialize workbook from this graph
@ -116,6 +115,7 @@ class DeploymentGraph(nx.DiGraph):
def get_next_groups(self, processed_nodes):
"""Get nodes that have predecessors in processed_nodes list.
All predecessors should be taken into account, not only direct
parents
@ -152,8 +152,8 @@ class DeploymentGraph(nx.DiGraph):
return rst
def should_exclude_task(self, task):
"""Stores all conditions when task should be excluded from
execution.
"""Stores all conditions when task should be excluded from execution.
:param task: task name
"""
if self.node[task]['type'] in consts.INTERNAL_TASKS:
@ -230,6 +230,7 @@ class DeploymentGraph(nx.DiGraph):
def filter_subgraph(self, start=None, end=None, include=()):
"""Exclude tasks that is not meant to be executed
:param include: container with task names
"""
wgraph = self.find_subgraph(start=start, end=end)
@ -240,9 +241,7 @@ class DeploymentGraph(nx.DiGraph):
class AstuteGraph(object):
"""This object stores logic that required for working with astute
orchestrator.
"""
"""This object stores logic that required for working with astute"""
def __init__(self, cluster):
self.cluster = cluster
@ -280,7 +279,9 @@ class AstuteGraph(object):
return result
def assign_parallel_nodes(self, priority, nodes):
"""It is possible that same node have 2 or more roles that can be
"""Assign parallel nodes
It is possible that same node have 2 or more roles that can be
deployed in parallel. We can not allow it. That is why priorities
will be assigned in chunks

View File

@ -66,9 +66,7 @@ class DeploymentMultinodeSerializer(GraphBasedSerializer):
critical_roles = ['controller', 'ceph-osd', 'primary-mongo']
def serialize(self, cluster, nodes, ignore_customized=False):
"""Method generates facts which
through an orchestrator passes to puppet
"""
"""Method generates facts which are passed to puppet"""
def keyfunc(node):
return bool(node.replaced_deployment_info)
@ -154,7 +152,9 @@ class DeploymentMultinodeSerializer(GraphBasedSerializer):
return None
def set_storage_parameters(self, cluster, attrs):
"""Generate pg_num as the number of OSDs across the cluster
"""Generate pg_num
pg_num is generated as the number of OSDs across the cluster
multiplied by 100, divided by Ceph replication factor, and
rounded up to the nearest power of 2.
"""
@ -193,9 +193,7 @@ class DeploymentMultinodeSerializer(GraphBasedSerializer):
@classmethod
def node_list(cls, nodes):
"""Generate nodes list. Represents
as "nodes" parameter in facts.
"""
"""Generate nodes list. Represents as "nodes" parameter in facts."""
node_list = []
for node in nodes:
@ -220,14 +218,13 @@ class DeploymentMultinodeSerializer(GraphBasedSerializer):
return filter(lambda node: node['role'] not in roles, nodes)
def set_critical_nodes(self, nodes):
"""Set behavior on nodes deployment error
during deployment process.
"""
"""Set behavior on nodes deployment error during deployment process."""
for n in nodes:
n['fail_if_error'] = n['role'] in self.critical_roles
def serialize_nodes(self, nodes):
"""Serialize node for each role.
For example if node has two roles then
in orchestrator will be passed two serialized
nodes.
@ -239,9 +236,7 @@ class DeploymentMultinodeSerializer(GraphBasedSerializer):
return serialized_nodes
def serialize_node(self, node, role):
"""Serialize node, then it will be
merged with common attributes
"""
"""Serialize node, then it will be merged with common attributes"""
node_attrs = {
# Yes, uid is really should be a string
'uid': node.uid,
@ -249,8 +244,7 @@ class DeploymentMultinodeSerializer(GraphBasedSerializer):
'status': node.status,
'role': role,
'vms_conf': node.attributes.vms_conf,
# TODO (eli): need to remove, requried
# for the fake thread only
# TODO(eli): need to remove, requried for the fake thread only
'online': node.online
}
@ -344,8 +338,7 @@ class DeploymentHASerializer(DeploymentMultinodeSerializer):
@classmethod
def node_list(cls, nodes):
"""Node list
"""
"""Node list"""
node_list = super(
DeploymentHASerializer,
cls
@ -357,8 +350,7 @@ class DeploymentHASerializer(DeploymentMultinodeSerializer):
return node_list
def get_common_attrs(self, cluster):
"""Common attributes for all facts
"""
"""Common attributes for all facts"""
common_attrs = super(
DeploymentHASerializer,
self
@ -376,8 +368,7 @@ class DeploymentHASerializer(DeploymentMultinodeSerializer):
return common_attrs
def get_assigned_vips(self, cluster):
"""Assign and get vips for net groups
"""
"""Assign and get vips for net groups"""
return objects.Cluster.get_network_manager(cluster).\
assign_vips_for_net_groups(cluster)
@ -555,8 +546,7 @@ def get_serializer_for_cluster(cluster):
def serialize(orchestrator_graph, cluster, nodes, ignore_customized=False):
"""Serialization depends on deployment mode
"""
"""Serialization depends on deployment mode"""
objects.Cluster.set_primary_roles(cluster, nodes)
env_version = cluster.release.environment_version

View File

@ -14,8 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
#(dshulyak) temporary, this config will be moved to fuel-library
#until we will stabilize our api
# (dshulyak) temporary, this config will be moved to fuel-library
# until we will stabilize our api
DEPLOYMENT_51_60 = """
- id: deploy_start

View File

@ -55,9 +55,7 @@ class NeutronNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def network_provider_node_attrs(cls, cluster, node):
"""Serialize node, then it will be
merged with common attributes
"""
"""Serialize node, then it will be merged with common attributes"""
nm = Cluster.get_network_manager(cluster)
networks = nm.get_node_networks(node)
node_attrs = {
@ -68,7 +66,9 @@ class NeutronNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def mellanox_settings(cls, node_attrs, cluster, networks):
"""Serialize mellanox node attrs, then it will be
"""Mellanox settings
Serialize mellanox node attrsthen it will be
merged with common attributes, if mellanox plugin or iSER storage
enabled.
"""
@ -101,8 +101,11 @@ class NeutronNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def set_mellanox_ml2_config(cls, node_attrs, nm, networks):
"""Change the yaml file to include the required configurations
"""Set config for ml2 mellanox mechanism driver
Change the yaml file to include the required configurations
for ml2 mellanox mechanism driver.
should be called only in case of mellanox SR-IOV plugin usage.
"""
# Set physical port for SR-IOV virtual functions
@ -119,7 +122,9 @@ class NeutronNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def fix_iser_port(cls, node_attrs, nm, networks):
"""Change the iser port to eth_iser probed (VF on the HV) interface
"""Fix iser port
Change the iser port to eth_iser probed (VF on the HV) interface
instead of br-storage. that change is made due to RDMA
(Remote Direct Memory Access) limitation of working with physical
interfaces.
@ -170,8 +175,7 @@ class NeutronNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def neutron_attrs(cls, cluster):
"""Network configuration for Neutron
"""
"""Network configuration for Neutron"""
attrs = {}
attrs['L3'] = cls.generate_l3(cluster)
attrs['L2'] = cls.generate_l2(cluster)
@ -834,16 +838,14 @@ class NeutronNetworkDeploymentSerializer70(
@classmethod
def get_node_non_default_networks(cls, node):
"""Returns list of non-default networks assigned to node.
"""
"""Returns list of non-default networks assigned to node."""
nm = Cluster.get_network_manager(node.cluster)
return filter(lambda net: net['name'] not in consts.NETWORKS,
nm.get_node_networks(node))
@classmethod
def get_bridge_name(cls, name, suffix=0):
"""Generates linux bridge name based on network name and suffix.
"""
"""Generates linux bridge name based on network name and suffix."""
if not name.startswith('br-'):
name = 'br-' + name
if suffix:
@ -854,16 +856,16 @@ class NeutronNetworkDeploymentSerializer70(
@classmethod
def is_valid_non_default_bridge_name(cls, name):
"""Validate bridge name for non-default network.
"""
"""Validate bridge name for non-default network."""
if name in consts.DEFAULT_BRIDGES_NAMES:
return False
return bool(cls.RE_BRIDGE_NAME.match(name))
@classmethod
def get_node_non_default_bridge_mapping(cls, node):
"""Returns dict of non-default networks assigned to node with
generated bridges names.
"""Non-default networks assigned to node with generated bridges names
Returns dict
"""
mapping = {}
for net in cls.get_node_non_default_networks(node):
@ -910,8 +912,11 @@ class NeutronNetworkDeploymentSerializer70(
@classmethod
def _get_network_role_mapping(cls, node, mapping):
"""Aggregates common logic for methods 'get_network_role_mapping_to_ip'
and 'get_network_role_mapping_to_interfaces'.
"""Aggregates common logic for mapping retrieval methods
these methods are:
- 'get_network_role_mapping_to_ip'
- 'get_network_role_mapping_to_interfaces'.
"""
roles = dict()
for role in Cluster.get_network_roles(node.cluster):
@ -1122,9 +1127,7 @@ class NeutronNetworkTemplateSerializer70(
@classmethod
def _get_network_roles(cls, node):
"""Returns network roles for the specified node based
on the node's assigned roles.
"""
"""Returns network roles for the node based on the assigned roles."""
roles = {}
template = node.network_template
for node_role in node.all_roles:
@ -1138,7 +1141,8 @@ class NeutronNetworkTemplateSerializer70(
@classmethod
def generate_transformations(cls, node, *args):
"""Overrides default transformation generation.
"""Overrides default transformation generation
Transformations are taken verbatim from each role template's
transformations section.
"""
@ -1258,6 +1262,7 @@ class NeutronNetworkTemplateSerializer70(
@classmethod
def update_nodes_net_info(cls, cluster, nodes):
"""Adds information about networks to each node.
This info is deprecated in 7.0 and should be removed in later version.
"""
nm = Cluster.get_network_manager(cluster)

View File

@ -56,8 +56,7 @@ class NovaNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def novanetwork_attrs(cls, cluster):
"""Network configuration
"""
"""Network configuration"""
attrs = {'network_manager': cluster.network_config.net_manager}
# network_size is required for all managers, otherwise
@ -81,6 +80,7 @@ class NovaNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def add_vlan_interfaces(cls, node):
"""Assign fixed_interfaces and vlan_interface.
They should be equal.
"""
net_manager = Cluster.get_network_manager(node.cluster)
@ -93,8 +93,7 @@ class NovaNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def configure_interfaces(cls, node):
"""Configure interfaces
"""
"""Configure interfaces"""
network_data = node.network_data
interfaces = {}
@ -125,15 +124,16 @@ class NovaNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def __make_interface_name(cls, name, vlan):
"""Make interface name
"""
"""Make interface name"""
if name and vlan:
return '.'.join([name, str(vlan)])
return name
@classmethod
def __add_hw_interfaces(cls, interfaces, hw_interfaces):
"""Add interfaces which not represents in
"""Add hardware interfaces
Add interfaces which not represents in
interfaces list but they are represented on node
"""
for hw_interface in hw_interfaces:
@ -145,8 +145,7 @@ class NovaNetworkDeploymentSerializer(NetworkDeploymentSerializer):
@classmethod
def interfaces_list(cls, network_data):
"""Generate list of interfaces
"""
"""Generate list of interfaces"""
interfaces = {}
for network in network_data:
if_name = cls.__make_interface_name(
@ -312,9 +311,7 @@ class NovaNetworkDeploymentSerializer70(NovaNetworkDeploymentSerializer61):
@classmethod
def network_provider_node_attrs(cls, cluster, node):
"""Serialize node, then it will be
merged with common attributes
"""
"""Serialize node, then it will be merged with common attributes"""
node_attrs = super(NovaNetworkDeploymentSerializer70,
cls).network_provider_node_attrs(cluster, node)
node_attrs['network_metadata'] = cls.generate_network_metadata(cluster)

View File

@ -89,7 +89,8 @@ class BasePluginDeploymentHooksSerializer(object):
self._set_task_defaults(plugin, task), task)
def _sort_by_stage_postfix(self, tasks):
"""Sorts tasks in the correct order by task postfixes,
"""Sorts tasks by task postfixes
for example here are several tasks' stages:
stage: post_deployment/100

View File

@ -42,8 +42,7 @@ class Priority(object):
class PriorityStrategy(object):
"""Set priorities for sequence of tasks using some strategy.
"""
"""Set priorities for sequence of tasks using some strategy"""
def __init__(self):
#: priority sequence generator

View File

@ -240,6 +240,7 @@ class ProvisioningSerializer(object):
@classmethod
def interfaces_mapping_for_udev(cls, node):
"""Serialize interfaces mapping for cobbler
:param node: node model
:returns: returns string, example:
00:02:03:04:04_eth0,00:02:03:04:05_eth1

View File

@ -119,8 +119,7 @@ class ExpressionBasedTask(DeploymentHook):
class GenericNodeHook(ExpressionBasedTask):
"""Should be used for node serialization.
"""
"""Should be used for node serialization."""
hook_type = abc.abstractproperty
@ -364,7 +363,9 @@ class TaskSerializers(object):
deploy_serializers = [PuppetHook, CreateVMsOnCompute]
def __init__(self, stage_serializers=None, deploy_serializers=None):
"""Task serializers for stage (pre/post) are different from
"""TaskSerializers initializer
Task serializers for stage (pre/post) are different from
serializers used for main deployment.
This should be considered as limitation of current architecture,

View File

@ -31,8 +31,8 @@ from nailgun.settings import settings
@six.add_metaclass(abc.ABCMeta)
class PluginAdapterBase(object):
"""Implements wrapper for plugin db model to provide
logic related to configuration files.
"""Implements wrapper for plugin db model configuration files logic
1. Uploading plugin provided cluster attributes
2. Uploading tasks
3. Enabling/Disabling of plugin based on cluster attributes
@ -55,13 +55,10 @@ class PluginAdapterBase(object):
@abc.abstractmethod
def path_name(self):
"""A name which is used to create path to
plugin related scripts and repositories
"""
"""A name which is used to create path to plugin scripts and repos"""
def sync_metadata_to_db(self):
"""Sync metadata from config yaml files into DB
"""
"""Sync metadata from config yaml files into DB"""
metadata_file_path = os.path.join(
self.plugin_path, self.plugin_metadata)
@ -95,16 +92,17 @@ class PluginAdapterBase(object):
return data
def get_plugin_attributes(self, cluster):
"""Should be used for initial configuration uploading to
custom storage. Will be invoked in 2 cases:
1. Cluster is created but there was no plugins in system
on that time, so when plugin is uploaded we need to iterate
over all clusters and decide if plugin should be applied
2. Plugins is uploaded before cluster creation, in this case
we will iterate over all plugins and upload configuration for them
"""Should be used for initial configuration uploading to custom storage
In this case attributes will be added to same cluster attributes
model and stored in editable field
Will be invoked in 2 cases:
1. Cluster is created but there was no plugins in system
on that time, so when plugin is uploaded we need to iterate
over all clusters and decide if plugin should be applied
2. Plugins is uploaded before cluster creation, in this case
we will iterate over all plugins and upload configuration for them
In this case attributes will be added to same cluster attributes
model and stored in editable field
"""
config = {}
if os.path.exists(self.config_file):
@ -116,7 +114,8 @@ class PluginAdapterBase(object):
return {}
def validate_cluster_compatibility(self, cluster):
"""Validates if plugin is compatible with cluster.
"""Validates if plugin is compatible with cluster
- validates operating systems
- modes of clusters (simple or ha)
- release version
@ -134,8 +133,7 @@ class PluginAdapterBase(object):
return False
def _is_release_version_compatible(self, rel_version, plugin_rel_version):
"""Checks if release version is compatible with
plugin version.
"""Checks if release version is compatible with plugin version
:param str rel_version: release version
:param str plugin_rel_version: plugin release version
@ -147,7 +145,8 @@ class PluginAdapterBase(object):
return rel_os.startswith(plugin_os) and rel_fuel.startswith(plugin_rel)
def update_metadata(self, attributes):
"""Overwrights only default values in metadata.
"""Overwrites only default values in metadata
Plugin should be able to provide UI "native" conditions
to enable/disable plugin on UI itself
"""
@ -162,8 +161,9 @@ class PluginAdapterBase(object):
'plugin_id': self.plugin.id}
def set_cluster_tasks(self):
"""Loads plugins provided tasks from tasks config file and
sets them to instance tasks variable.
"""Load plugins provided tasks and set them to instance tasks variable
Provided tasks are loaded from tasks config file.
"""
task_yaml = os.path.join(
self.plugin_path,
@ -208,9 +208,7 @@ class PluginAdapterBase(object):
@property
def normalized_roles_metadata(self):
"""Adds a restriction for every role which blocks plugin disabling
if nodes with plugin-provided roles exist in the cluster
"""
"""Block plugin disabling if nodes with plugin-provided roles exist"""
result = {}
for role, meta in six.iteritems(self.plugin.roles_metadata):
condition = "settings:{0}.metadata.enabled == false".format(
@ -222,9 +220,7 @@ class PluginAdapterBase(object):
return result
def get_release_info(self, release):
"""Returns plugin release information which corresponds to
a provided release.
"""
"""Get plugin release information which corresponds to given release"""
os = release.operating_system.lower()
version = release.version
@ -266,26 +262,26 @@ class PluginAdapterBase(object):
class PluginAdapterV1(PluginAdapterBase):
"""Plugins attributes class for package version 1.0.0
"""
"""Plugins attributes class for package version 1.0.0"""
@property
def path_name(self):
"""Returns a name and full version, e.g. if there is
a plugin with name "plugin_name" and version is "1.0.0",
the method returns "plugin_name-1.0.0"
"""Returns a name and full version
e.g. if there is a plugin with name "plugin_name" and version
is "1.0.0", the method returns "plugin_name-1.0.0"
"""
return self.full_name
class PluginAdapterV2(PluginAdapterBase):
"""Plugins attributes class for package version 2.0.0
"""
"""Plugins attributes class for package version 2.0.0"""
@property
def path_name(self):
"""Returns a name and major version of the plugin, e.g.
if there is a plugin with name "plugin_name" and version
"""Returns a name and major version of the plugin
e.g. if there is a plugin with name "plugin_name" and version
is "1.0.0", the method returns "plugin_name-1.0".
It's different from previous version because in previous
@ -299,8 +295,9 @@ class PluginAdapterV2(PluginAdapterBase):
@property
def _major_version(self):
"""Returns major version of plugin's version, e.g.
if plugin has 1.2.3 version, the method returns 1.2
"""Returns major version of plugin's version
e.g. if plugin has 1.2.3 version, the method returns 1.2
"""
version_tuple = StrictVersion(self.plugin.version).version
major = '.'.join(map(str, version_tuple[:2]))
@ -309,8 +306,7 @@ class PluginAdapterV2(PluginAdapterBase):
class PluginAdapterV3(PluginAdapterV2):
"""Plugin wrapper class for package version 3.0.0
"""
"""Plugin wrapper class for package version 3.0.0"""
node_roles_config_name = 'node_roles.yaml'
volumes_config_name = 'volumes.yaml'
@ -318,8 +314,7 @@ class PluginAdapterV3(PluginAdapterV2):
network_roles_config_name = 'network_roles.yaml'
def sync_metadata_to_db(self):
"""Sync metadata from all config yaml files to DB
"""
"""Sync metadata from all config yaml files to DB"""
super(PluginAdapterV3, self).sync_metadata_to_db()
data_to_update = {}

View File

@ -26,7 +26,9 @@ class PluginManager(object):
@classmethod
def process_cluster_attributes(cls, cluster, attrs):
"""Iterates through plugins attributes, creates
"""Generate Cluster-Plugins relation based on attributes
Iterates through plugins attributes, creates
or deletes Cluster <-> Plugins relation if plugin
is enabled or disabled.
@ -166,8 +168,7 @@ class PluginManager(object):
@classmethod
def get_volumes_metadata(cls, cluster):
"""Get volumes metadata for specific cluster from all
plugins which enabled for it.
"""Get volumes metadata for cluster from all plugins which enabled it
:param cluster: Cluster DB model
:returns: dict -- object with merged volumes data from plugins
@ -209,8 +210,9 @@ class PluginManager(object):
@classmethod
def sync_plugins_metadata(cls, plugin_ids=None):
"""Sync metadata for plugins by given ids. If there is not
ids all newest plugins will be synced
"""Sync metadata for plugins by given ids.
If there are no ids all newest plugins will be synced
"""
if plugin_ids:
plugins = PluginCollection.get_by_uids(plugin_ids)

View File

@ -536,8 +536,7 @@ class NailgunReceiver(object):
@classmethod
def _make_plugins_success_message(cls, plugins):
"""Makes plugins installation message
"""
"""Makes plugins installation message"""
msg = 'Plugin {0} is deployed. {1}'
return '\n'.join(
map(lambda p: msg.format(p.name, p.description), plugins))
@ -876,6 +875,7 @@ class NailgunReceiver(object):
@classmethod
def multicast_verification_resp(cls, **kwargs):
"""Receiver for verification of multicast packages
data - {1: response, 2: response}
"""
logger.info(
@ -930,6 +930,7 @@ class NailgunReceiver(object):
@classmethod
def check_dhcp_resp(cls, **kwargs):
"""Receiver method for check_dhcp task
For example of kwargs check FakeCheckingDhcpThread
"""
logger.info(

View File

@ -14,9 +14,7 @@
class StatsException(Exception):
"""Base class exception for all statistic related
custom exceptions
"""
"""Base class exception for all statistic related custom exceptions"""
pass

View File

@ -27,7 +27,8 @@ from nailgun import utils
class InstallationInfo(object):
"""Collects info about Fuel installation
Master nodes, clusters, networks, e.t.c.
Includes master nodes, clusters, networks, etc.
Used for collecting info for fuel statistics
"""

View File

@ -30,9 +30,7 @@ from nailgun.statistics import utils
class ClientProvider(object):
"""Initialize clients for OpenStack components
and expose them as attributes
"""
"""Initialize clients for OpenStack component and expose them as attrs"""
clients_version_attr_path = {
"nova": ["client", "version"],
@ -88,8 +86,7 @@ class ClientProvider(object):
return self._keystone
def _get_keystone_client(self, auth_creds):
"""Instantiate client based on returned from keystone
server version data.
"""Create client based on returned from keystone server version data.
:param auth_creds: credentials for authentication which also are
parameters for client's instance initialization
@ -145,7 +142,9 @@ class ClientProvider(object):
def get_info_from_os_resource_manager(client_provider, resource_name):
"""Utilize clients provided by client_provider instance to retrieve
"""Use OpenStack resource manager to retrieve information about resource
Utilize clients provided by client_provider instance to retrieve
data for resource_name, description of which is stored in
resources_description data structure.

View File

@ -17,8 +17,9 @@ from nailgun.statistics.utils import WhiteListRule
def volume_attachments_transform_func(attachments_list):
"""Transformation func for attachment attribute of the volume
oswl resource. Filter each element (which is dict itself) of
"""Transformation func for attachment attribute of the volume oswl resource
Filter each element (which is dict itself) of
attachments list.
:param attachments_list: list of dicts which keys describes attachment

View File

@ -73,6 +73,7 @@ def oswl_data_checksum(data):
def oswl_statistics_save(cluster_id, resource_type, data):
"""Save OSWL statistics data for given cluster and resource_type to DB.
DB changes are not committed here.
"""
dt = datetime.utcnow()

View File

@ -61,9 +61,10 @@ def get_online_controller(cluster):
def get_attr_value(path, func, attrs):
"""Gets attribute value from 'attrs' by specified
'path'. In case of nested list - list of
of found values will be returned
"""Gets attribute value from 'attrs' by specified path
In case of nested list - list of found values will be returned
:param path: list of keys for accessing the attribute value
:param func: if not None - will be applied to the value
:param attrs: attributes data
@ -104,9 +105,9 @@ def get_nested_attr(obj, attr_path):
@contextmanager
def set_proxy(proxy):
"""Replace http_proxy environment variable for the scope
of context execution. After exit from context old proxy value
(if any) is restored
"""Replace http_proxy environment var for the scope of context execution
After exit from context old proxy value (if any) is restored
:param proxy: - proxy url
"""

View File

@ -528,8 +528,7 @@ class FakeVerificationThread(FakeThread):
class FakeMulticastVerifications(FakeAmpqThread):
"""network verifications will be as single dispatcher method in naily
"""
"""Network verifications will be as single dispatcher method in naily"""
def ready_multicast(self):
response = {
@ -591,12 +590,10 @@ class FakeMulticastVerifications(FakeAmpqThread):
class FakeCheckingDhcpThread(FakeAmpqThread):
"""Thread to be used with test_task_managers.py
"""
"""Thread to be used with test_task_managers.py"""
def _get_message(self, mac):
"""Example of message with discovered dhcp server
"""
"""Example of message with discovered dhcp server"""
nodes = [{'uid': '90',
'status': 'ready',
'data': [{'mac': mac,

View File

@ -75,7 +75,9 @@ class TaskHelper(object):
# TODO(aroma): move it to utils module
@classmethod
def before_deployment_error(cls, task):
"""Returns True in case of check_before_deployment
"""Checks if there was an error before deployment
Returns True in case of check_before_deployment
or check_networks error and if cluster wasn't
deployed yet
"""
@ -235,6 +237,7 @@ class TaskHelper(object):
@classmethod
def nodes_to_deploy_ha(cls, cluster, nodes):
"""Get nodes for deployment for ha mode
* in case of failed controller should be redeployed
all controllers
* in case of failed non-controller should be
@ -262,9 +265,7 @@ class TaskHelper(object):
@classmethod
def __has_controller_nodes(cls, nodes):
"""Returns True if list of nodes has
at least one controller.
"""
"""Returns True if list of nodes has at least one controller."""
for node in nodes:
if 'controller' in set(node.roles + node.pending_roles):
return True
@ -282,6 +283,7 @@ class TaskHelper(object):
@classmethod
def prepare_action_log_kwargs(cls, task):
"""Prepares kwargs dict for ActionLog db model class
:param task: task instance to be processed
:returns: kwargs dict for action log creation
"""
@ -360,6 +362,7 @@ class TaskHelper(object):
@classmethod
def create_action_log(cls, task):
"""Creates action log
:param task: SqlAlchemy task object
:return: SqlAlchemy action_log object
"""

View File

@ -214,6 +214,7 @@ class ApplyChangesTaskManager(TaskManager, DeploymentCheckMixin):
def _execute_async(self, supertask_id, deployment_tasks=None,
nodes_to_provision_deploy=None):
"""Function for execute task in the mule
:param supertask_id: id of parent task
"""
logger.info(u"ApplyChangesTask: execute async starting for task %s",
@ -253,6 +254,7 @@ class ApplyChangesTaskManager(TaskManager, DeploymentCheckMixin):
def _execute_async_content(self, supertask, deployment_tasks=None,
nodes_to_provision_deploy=None):
"""Processes supertask async in mule
:param supertask: SqlAlchemy task object
"""
@ -424,6 +426,7 @@ class ApplyChangesTaskManager(TaskManager, DeploymentCheckMixin):
def check_before_deployment(self, supertask):
"""Performs checks before deployment
:param supertask: task SqlAlchemy object
"""
# checking admin intersection with untagged
@ -505,8 +508,7 @@ class SpawnVMsTaskManager(ApplyChangesTaskManager):
class ProvisioningTaskManager(TaskManager):
def execute(self, nodes_to_provision):
"""Run provisioning task on specified nodes
"""
"""Run provisioning task on specified nodes"""
# locking nodes
nodes_ids = [node.id for node in nodes_to_provision]
nodes = objects.NodeCollection.filter_by_list(

View File

@ -89,38 +89,42 @@ def fake_cast(queue, messages, **kwargs):
class DeploymentTask(object):
# LOGIC
# Use cases:
# 1. Cluster exists, node(s) added
# If we add one node to existing OpenStack cluster, other nodes may require
# updates (redeployment), but they don't require full system reinstallation.
# How to: run deployment for all nodes which system type is target.
# Run provisioning first and then deployment for nodes which are in
# discover system type.
# Q: Should we care about node status (provisioning, error, deploying)?
# A: offline - when node doesn't respond (agent doesn't run, not
# implemented); let's say user should remove this node from
# cluster before deployment.
# ready - target OS is loaded and node is Ok, we redeploy
# ready nodes only if cluster has pending changes i.e.
# network or cluster attrs were changed
# discover - in discovery mode, provisioning is required
# provisioning - at the time of task execution there should not be such
# case. If there is - previous provisioning has failed.
# Possible solution would be to try again to provision
# deploying - the same as provisioning, but stucked in previous deploy,
# solution - try to deploy. May loose some data if reprovis.
# error - recognized error in deployment or provisioning... We have to
# know where the error was. If in deployment - reprovisioning may
# not be a solution (can loose data). If in provisioning - can do
# provisioning & deployment again
# 2. New cluster, just added nodes
# Provision first, and run deploy as second
# 3. Remove some and add some another node
# Deletion task will run first and will actually remove nodes, include
# removal from DB.. however removal from DB happens when remove_nodes_resp
# is ran. It means we have to filter nodes and not to run deployment on
# those which are prepared for removal.
"""Task for applying changes to cluster
LOGIC
Use cases:
1. Cluster exists, node(s) added
If we add one node to existing OpenStack cluster, other nodes may require
updates (redeployment), but they don't require full system
reinstallation.
How to: run deployment for all nodes which system type is target.
Run provisioning first and then deployment for nodes which are in
discover system type.
Q: Should we care about node status (provisioning, error, deploying)?
A: offline - when node doesn't respond (agent doesn't run, not
implemented); let's say user should remove this node from
cluster before deployment.
ready - target OS is loaded and node is Ok, we redeploy
ready nodes only if cluster has pending changes i.e.
network or cluster attrs were changed
discover - in discovery mode, provisioning is required
provisioning - at the time of task execution there should not be such
case. If there is - previous provisioning has failed.
Possible solution would be to try again to provision
deploying - the same as provisioning, but stucked in previous deploy,
solution - try to deploy. May loose some data if reprovis.
error - recognized error in deployment or provisioning... We have to
know where the error was. If in deployment - reprovisioning
may not be a solution (can loose data).
If in provisioning - can do provisioning & deployment again
2. New cluster, just added nodes
Provision first, and run deploy as second
3. Remove some and add some another node
Deletion task will run first and will actually remove nodes, include
removal from DB.. however removal from DB happens when remove_nodes_resp
is ran. It means we have to filter nodes and not to run deployment on
those which are prepared for removal.
"""
@classmethod
def _get_deployment_method(cls, cluster):
@ -188,7 +192,9 @@ class DeploymentTask(object):
class UpdateNodesInfoTask(object):
"""The task is intended to be used in order to update both nodes.yaml and
"""Task for updating nodes.yaml and /etc/hosts on all slaves
The task is intended to be used in order to update both nodes.yaml and
/etc/hosts on all slaves. This task aren't going to manage node or cluster
statuses, and should be used only in one case - when we remove some node
and don't add anything new (if some new node is added, these tasks will
@ -808,9 +814,7 @@ class BaseNetworkVerification(object):
@classmethod
def enabled(cls, cluster):
"""Should be used to verify that subtask is enabled based on
cluster configuration
"""
"""Verify that subtask is enabled based on cluster configuration"""
return True
@ -886,7 +890,8 @@ class VerifyNetworksForTemplateMixin(object):
@classmethod
def get_ifaces_from_template_on_undeployed_node(cls, node, node_json):
"""Retrieves list of network interfaces on the undeployed node
from the network template.
List is retrieved from the network template.
"""
bonds = collections.defaultdict(list)
ifaces = collections.defaultdict(set)
@ -916,7 +921,8 @@ class VerifyNetworksForTemplateMixin(object):
@classmethod
def get_ifaces_from_template_on_deployed_node(cls, node, node_json):
"""Retrieves list of network interfaces on the deployed node
from the network template.
List is retrieved from the network template.
"""
ifaces = collections.defaultdict(set)
for transformation, vlan_ids in cls._get_transformations(node):
@ -949,8 +955,7 @@ class VerifyNetworksForTemplateMixin(object):
).get_ifaces_on_undeployed_node(node, node_json, has_public)
def get_ifaces_on_deployed_node(self, node, node_json, has_public):
"""Retrieves list of network interfaces on the deployed node.
"""
"""Retrieves list of network interfaces on the deployed node."""
if node.network_template:
self.get_ifaces_from_template_on_deployed_node(node, node_json)
return
@ -977,8 +982,7 @@ class VerifyNetworksTask(VerifyNetworksForTemplateMixin,
class CheckDhcpTask(VerifyNetworksForTemplateMixin,
BaseNetworkVerification):
"""Task for dhcp verification
"""
"""Task for dhcp verification"""
class MulticastVerificationTask(BaseNetworkVerification):
@ -1002,11 +1006,13 @@ class MulticastVerificationTask(BaseNetworkVerification):
@classmethod
def enabled(cls, cluster):
"""Multicast should be enabled only in case 'corosync' section
"""Checks whether task is enabled
Multicast should be enabled only in case 'corosync' section
is present in editable attributes, which is not the case if cluster
was upgraded from 5.0
"""
#TODO(dshulyak) enable it, when it will be possible to upgrade
# TODO(dshulyak) enable it, when it will be possible to upgrade
# mcagent and network checker for old envs
return False
@ -1136,8 +1142,7 @@ class CheckBeforeDeploymentTask(object):
@classmethod
def _is_disk_checking_required(cls, node):
"""Disk checking required in case if node is not provisioned.
"""
"""Disk checking required in case if node is not provisioned."""
if node.status in ('ready', 'deploying', 'provisioned') or \
(node.status == 'error' and node.error_type != 'provision'):
return False
@ -1201,9 +1206,7 @@ class CheckBeforeDeploymentTask(object):
@classmethod
def _check_mongo_nodes(cls, task):
"""Mongo nodes shouldn't be present in environment
if external mongo is chosen.
"""
"""Check for mongo nodes presence in env with external mongo"""
components = objects.Attributes.merged_attrs(
task.cluster.attributes).get("additional_components", None)
if (components and components["ceilometer"]["value"]
@ -1219,8 +1222,7 @@ class CheckBeforeDeploymentTask(object):
@classmethod
def _check_vmware_consistency(cls, task):
"""Checks vmware attributes consistency and proper values
"""
"""Checks vmware attributes consistency and proper values"""
attributes = task.cluster.attributes.editable
vmware_attributes = task.cluster.vmware_attributes
# Old(< 6.1) clusters haven't vmware support
@ -1284,8 +1286,9 @@ class CheckBeforeDeploymentTask(object):
@classmethod
def _check_deployment_graph_for_correctness(self, task):
"""Check that deployment graph hasn't not existing dependencies(
such as requires|required_for|tasks|groups)
"""Check that deployment graph doesn't have existing dependencies
example dependencies are: requires|required_for|tasks|groups
"""
deployment_tasks = objects.Cluster.get_deployment_tasks(task.cluster)
graph_validator = deployment_graph.DeploymentGraphValidator(

View File

@ -340,8 +340,9 @@ class EnvironmentManager(object):
def create_nodes_w_interfaces_count(self,
nodes_count, if_count=2, **kwargs):
"""Create nodes_count nodes with if_count interfaces each.
Default random MAC is generated for each interface.
"""Create nodes_count nodes with if_count interfaces each
Default random MAC is generated for each interface
"""
nodes = []
for i in range(nodes_count):
@ -1494,6 +1495,7 @@ class BaseMasterNodeSettignsTest(BaseIntegrationTest):
class BaseValidatorTest(TestCase):
"""JSON-schema validation policy:
1) All required properties are present;
2) No additional properties allowed;
3) Item has correct type.

View File

@ -20,8 +20,9 @@ from nailgun.test.base import BaseAuthenticationIntegrationTest
class TestAuthToken(BaseAuthenticationIntegrationTest):
"""Test the authentication tokens -- using X-Auth-Token header
and the token=xxx cookie.
"""Test the authentication tokens
using X-Auth-Token header and the token=xxx cookie.
The header has priority over cookie.
"""
@ -31,8 +32,7 @@ class TestAuthToken(BaseAuthenticationIntegrationTest):
self.headers = copy.deepcopy(self.default_headers)
def test_no_token_error(self):
"""Make sure that 401 is raised when no token is provided.
"""
"""Make sure that 401 is raised when no token is provided."""
resp = self.app.get(
'/api/nodes/allocation/stats',
headers=self.default_headers,
@ -41,8 +41,7 @@ class TestAuthToken(BaseAuthenticationIntegrationTest):
self.assertEqual(401, resp.status_code)
def test_x_auth_token_header(self):
"""Check that X-Auth-Token header authentication works.
"""
"""Check that X-Auth-Token header authentication works."""
self.headers['X-Auth-Token'] = self.token
resp = self.app.get(
@ -52,8 +51,7 @@ class TestAuthToken(BaseAuthenticationIntegrationTest):
self.assertEqual(200, resp.status_code)
def test_cookie_token(self):
"""Make sure that Cookie authentication works.
"""
"""Make sure that Cookie authentication works."""
self.headers['Cookie'] = 'token=%s' % self.token
resp = self.app.get(
@ -63,9 +61,7 @@ class TestAuthToken(BaseAuthenticationIntegrationTest):
self.assertEqual(200, resp.status_code)
def test_x_auth_token_header_has_priority_over_cookie(self):
"""Make sure that X-Auth-Token header has priority over the
Cookie token.
"""
"""X-Auth-Token header has priority over the Cookie token."""
self.headers['X-Auth-Token'] = self.token
self.headers['Cookie'] = 'token=xxx'

View File

@ -60,10 +60,7 @@ class TestHandlers(BaseIntegrationTest):
self.assertEqual(201, resp.status_code)
def test_cluster_create_no_ip_addresses(self):
"""In this test we check that no error is occured
if two clusters will have same networks updated to use
full CIDR
"""
"""Two clusters having same networks updated to use full CIDR is ok"""
cluster = self.env.create_cluster(api=True)
cluster_db = self.db.query(Cluster).get(cluster["id"])
cluster2 = self.env.create_cluster(api=True,

View File

@ -397,9 +397,6 @@ class TestTaskDeployGraph(BaseGraphTasksTests):
self.assertIn('Task nonexistent is not present in graph', resp.body)
def test_single_task_from_tasks_subset(self, m_get_tasks):
"""If only pre-B and pre-A tasks will be executed,
what requirements pre-C will have?
"""
m_get_tasks.return_value = self.tasks
resp = self.app.get(
reverse('TaskDeployGraph', kwargs={

View File

@ -23,8 +23,9 @@ from nailgun.test import base
class TestMongoNodes(base.BaseTestCase):
def get_custom_meta(self, ceilometer_enabled, ext_mongo_enabled):
"""This method sets values for metadata parameters:
ceilometer and ext_mongo (enabled or not).
"""This method sets values for metadata parameters
parameteres are ceilometer and ext_mongo (enabled or not)
"""
attr_meta = self.env.get_default_attributes_metadata()
attr_meta['editable']['additional_components'].update({

View File

@ -84,13 +84,12 @@ class TestNetworkManager(BaseNetworkManagerTest):
consts.NETWORKS.management
)
management_net = self.db.query(NetworkGroup).\
filter(
NetworkGroup.group_id ==
objects.Cluster.get_default_group(self.env.clusters[0]).id
).filter_by(
name=consts.NETWORKS.management
).first()
management_net = self.db.query(NetworkGroup).filter(
NetworkGroup.group_id ==
objects.Cluster.get_default_group(self.env.clusters[0]).id
).filter_by(
name=consts.NETWORKS.management
).first()
assigned_ips = []
for node in self.env.nodes:
@ -314,12 +313,11 @@ class TestNetworkManager(BaseNetworkManagerTest):
node_group = self.env.create_node_group()
self.env.nodes[1].group_id = node_group.json_body['id']
self.db().flush()
mgmt_net = self.db.query(NetworkGroup).\
filter(
NetworkGroup.group_id == node_group.json_body["id"]
).filter_by(
name=consts.NETWORKS.management
).first()
mgmt_net = self.db.query(NetworkGroup).filter(
NetworkGroup.group_id == node_group.json_body["id"]
).filter_by(
name=consts.NETWORKS.management
).first()
mock_range = IPAddrRange(
first='9.9.9.1',
@ -333,12 +331,11 @@ class TestNetworkManager(BaseNetworkManagerTest):
consts.NETWORKS.management)
for n in self.env.nodes:
mgmt_net = self.db.query(NetworkGroup).\
filter(
NetworkGroup.group_id == n.group_id
).filter_by(
name=consts.NETWORKS.management
).first()
mgmt_net = self.db.query(NetworkGroup).filter(
NetworkGroup.group_id == n.group_id
).filter_by(
name=consts.NETWORKS.management
).first()
ip = self.db.query(IPAddr).\
filter_by(network=mgmt_net.id).\
filter_by(node=n.id).first()

View File

@ -53,8 +53,8 @@ class TestHandlers(BaseIntegrationTest):
resp = self.app.post(
reverse('NodeCollectionHandler'),
jsonutils.dumps({'id': node_id,
'mac': self.env.generate_random_mac(),
'status': 'discover'}),
'mac': self.env.generate_random_mac(),
'status': 'discover'}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(400, resp.status_code)

View File

@ -632,7 +632,7 @@ class TestNodeNICsHandlersValidation(BaseIntegrationTest):
self.node_nics_put_check_error(
"Node '{0}': '{1}' network(s) are left unassigned".format(
self.env.nodes[0]["id"], unassigned_id)
self.env.nodes[0]["id"], unassigned_id)
)
def test_assignment_change_failed_node_has_unknown_network(self):
@ -640,7 +640,7 @@ class TestNodeNICsHandlersValidation(BaseIntegrationTest):
self.node_nics_put_check_error(
"Network '1234567' doesn't exist for node {0}".format(
self.env.nodes[0]["id"])
self.env.nodes[0]["id"])
)
def test_nic_change_failed_node_has_unknown_interface(self):

View File

@ -203,8 +203,9 @@ class TestDeploymentAttributesSerialization70(
self.env._create_network_group(cluster=self.cluster_db,
name=self.custom_network['name'],
cidr=self.custom_network['cidr'],
vlan_start=
self.custom_network['vlan_start'])
vlan_start=self.custom_network[
'vlan_start'
])
self._add_plugin_network_roles()
self.env.create_node(
api=True,
@ -277,8 +278,7 @@ class TestDeploymentAttributesSerialization70(
for interface in meta['interfaces']:
changed_offloading_modes[interface['name']] = \
NetworkManager._get_modified_offloading_modes(
interface.get('offloading_modes')
)
interface.get('offloading_modes'))
for node in self.serialized_for_astute:
interfaces = node['network_scheme']['interfaces']

View File

@ -376,10 +376,10 @@ class TestPrePostHooks(BasePluginTest):
self.assertEqual(len(rsync), 1)
self.assertEqual(len(cmd_tasks), 2)
for t in plugins_tasks:
#shoud uid be a string
# should uid be a string
self.assertEqual(
sorted(t['uids']), sorted([n.uid for n in self.cluster.nodes]))
#diagnostic name is present only for plugin tasks
# diagnostic name is present only for plugin tasks
self.assertEqual(t['diagnostic_name'], self.plugin.full_name)
apt_update = [t for t in cmd_tasks
if u'apt-get update' in t['parameters']['cmd']]

View File

@ -74,8 +74,9 @@ class TestPutSameJson(base.BaseIntegrationTest):
)
def http_get(self, name, arguments):
"""Makes a GET request to a resource with `name`.
Returns a deserialized dict.
"""Makes a GET request to a resource with `name`
Returns a deserialized dict
"""
resp = self.app.get(
base.reverse(name, kwargs=arguments),

View File

@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from six import add_metaclass
from nailgun.test.base import BaseIntegrationTest
from nailgun.utils import reverse
@ -62,8 +64,9 @@ class RemovedResourcesMeta(BaseTestDataProviderMeta):
methods = ('get', 'head', 'post', 'put', 'delete')
@add_metaclass(RemovedResourcesMeta)
class TestRemovedResources(BaseTestRemovedResources):
__metaclass__ = RemovedResourcesMeta
pass
class RemovedIn51HandlerMeta(BaseTestDataProviderMeta):

View File

@ -1,15 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -299,7 +299,9 @@ class TestVerifyNetworks(BaseReciverTestCase):
self.assertEqual(task.message, '')
def test_verify_networks_with_dhcp_subtask(self):
"""Test verifies that when dhcp subtask is ready and
"""verify_networks status depends on dhcp subtask
Test verifies that when dhcp subtask is ready and
verify_networks errored - verify_networks will be in error
"""
self.env.create(
@ -492,9 +494,7 @@ class TestVerifyNetworks(BaseReciverTestCase):
self.assertItemsEqual(task.result, error_nodes)
def test_verify_networks_resp_incomplete_network_data_on_first_node(self):
"""Test verifies that when network data is incomplete on first node
task would not fail and be erred as expected
"""
"""First node network data incompletion causes task fail"""
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
@ -539,8 +539,9 @@ class TestVerifyNetworks(BaseReciverTestCase):
self.assertEqual(task.result, error_nodes)
def test_verify_networks_resp_without_vlans_only(self):
"""Verify that network verification without vlans passes
when there only iface without vlans configured
"""Net verification without vlans
Passes when only iface without vlans configured
"""
self.env.create(
cluster_kwargs={},
@ -576,9 +577,7 @@ class TestVerifyNetworks(BaseReciverTestCase):
self.assertEqual(task.status, "ready")
def test_verify_networks_resp_without_vlans_only_erred(self):
"""Verify that network verification without vlans fails
when not all sended info received
"""
"""Net verification without vlans fails when not all info received"""
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
@ -624,8 +623,7 @@ class TestVerifyNetworks(BaseReciverTestCase):
self.assertEqual(task.result, error_nodes)
def test_verify_networks_resp_partially_without_vlans(self):
"""Verify that network verification partially without vlans passes
"""
"""Verify that network verification partially without vlans passes"""
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
@ -660,8 +658,7 @@ class TestVerifyNetworks(BaseReciverTestCase):
self.assertEqual(task.status, "ready")
def test_verify_networks_with_excluded_networks(self):
"""Verify that network verification can exclude interfaces
"""
"""Verify that network verification can exclude interfaces"""
self.env.create(
cluster_kwargs={},
nodes_kwargs=[

View File

@ -35,8 +35,7 @@ class TestTasksLogging(BaseIntegrationTest):
super(TestTasksLogging, self).tearDown()
def check_keys_included(self, keys, data):
"""Check that only values with keys from 'keys' are present in 'data'
"""
"""Check that only values with keys from keys are present in data"""
if isinstance(data, list):
for d in data:
self.check_keys_included(keys, d)
@ -53,8 +52,10 @@ class TestTasksLogging(BaseIntegrationTest):
def check_task_name_and_sanitized_data(self, pos, logger, task_name,
one_parameter=False):
"""Test task name against known value and check sanitized data doesn't
contain keys which are absent in white_list.
"""Test task name against known value
Check sanitized data doesn't contain keys which are absent in
white_list
:param pos: position of call parameters inside logger.call_args_list,
(negative value: -1 - last call, -2 - pre-last call, etc.)
@ -248,8 +249,8 @@ class TestTasksLogging(BaseIntegrationTest):
consts.TASK_NAMES.check_before_deployment))
def simulate_running_deployment(self, deploy_task, progress=42):
"""To exclude race condition errors in the tests we simulate
running process of deployment
"""To exclude race condition errors in the tests we simulate deployment
:param deploy_task: deploy task object
:param progress: task progress value
"""

Some files were not shown because too many files have changed in this diff Show More