diff --git a/nailgun/nailgun/api/handlers/base.py b/nailgun/nailgun/api/handlers/base.py index fd3c54e419..a3a25aeea2 100644 --- a/nailgun/nailgun/api/handlers/base.py +++ b/nailgun/nailgun/api/handlers/base.py @@ -165,15 +165,21 @@ class BaseHandler(object): ) as exc: notifier.notify("error", exc.message) raise self.http(400, exc.message) + except ( + errors.NotAllowed, + ) as exc: + raise self.http(403, exc.message) except ( errors.AlreadyExists ) as exc: raise self.http(409, exc.message) except ( errors.InvalidData, - Exception + errors.NodeOffline, ) as exc: - raise self.http(400, str(exc)) + raise self.http(400, exc.message) + except Exception as exc: + raise return valid_data def get_object_or_404(self, model, *args, **kwargs): @@ -242,20 +248,10 @@ class SingleHandler(BaseHandler): obj_id ) - try: - data = self.checked_data( - self.validator.validate_update, - instance=obj - ) - except ( - errors.InvalidData, - errors.NodeOffline - ) as exc: - raise self.http(400, exc.message) - except ( - errors.AlreadyExists, - ) as exc: - raise self.http(409, exc.message) + data = self.checked_data( + self.validator.validate_update, + instance=obj + ) self.single.update(obj, data) return self.single.to_json(obj) @@ -283,13 +279,15 @@ class CollectionHandler(BaseHandler): validator = BasicValidator collection = None + eager = () @content_json def GET(self): """:returns: Collection of JSONized REST objects. :http: * 200 (OK) """ - return self.collection.to_json() + q = self.collection.eager(self.eager, None) + return self.collection.to_json(q) @content_json def POST(self): @@ -298,6 +296,7 @@ class CollectionHandler(BaseHandler): * 400 (invalid object data specified) * 409 (object with such parameters already exists) """ + data = self.checked_data() try: diff --git a/nailgun/nailgun/api/handlers/node.py b/nailgun/nailgun/api/handlers/node.py index 9e13081e5b..6f60ccfbae 100644 --- a/nailgun/nailgun/api/handlers/node.py +++ b/nailgun/nailgun/api/handlers/node.py @@ -19,15 +19,14 @@ Handlers dealing with nodes """ from datetime import datetime -import json import traceback -from sqlalchemy.orm import joinedload - import web from nailgun.api.handlers.base import BaseHandler +from nailgun.api.handlers.base import CollectionHandler from nailgun.api.handlers.base import content_json +from nailgun.api.handlers.base import SingleHandler from nailgun.api.serializers.node import NodeInterfacesSerializer from nailgun.api.validators.network import NetAssignmentValidator from nailgun.api.validators.node import NodeValidator @@ -45,117 +44,13 @@ from nailgun.network.manager import NetworkManager from nailgun import notifier -class NodeHandler(BaseHandler): - fields = ('id', 'name', 'meta', 'progress', 'roles', 'pending_roles', - 'status', 'mac', 'fqdn', 'ip', 'manufacturer', 'platform_name', - 'pending_addition', 'pending_deletion', 'os_platform', - 'error_type', 'online', 'cluster', 'uuid') - model = Node +class NodeHandler(SingleHandler): + + single = objects.Node validator = NodeValidator - @classmethod - def render(cls, instance, fields=None): - json_data = None - try: - json_data = BaseHandler.render(instance, fields=cls.fields) - json_data['network_data'] = NetworkManager.get_node_networks( - instance.id) - except Exception: - logger.error(traceback.format_exc()) - return json_data - @content_json - def GET(self, node_id): - """:returns: JSONized Node object. - :http: * 200 (OK) - * 404 (node not found in db) - """ - node = self.get_object_or_404(Node, node_id) - return self.render(node) - - @content_json - def PUT(self, node_id): - """:returns: JSONized Node object. - :http: * 200 (OK) - * 400 (invalid node data specified) - * 404 (node not found in db) - """ - node = self.get_object_or_404(Node, node_id) - if not node.attributes: - node.attributes = NodeAttributes(node_id=node.id) - - data = self.checked_data(self.validator.validate_update) - - network_manager = NetworkManager - - old_cluster_id = node.cluster_id - - if data.get("pending_roles") == [] and node.cluster: - objects.Cluster.clear_pending_changes( - node.cluster, - node_id=node.id - ) - - if "cluster_id" in data: - if data["cluster_id"] is None and node.cluster: - objects.Cluster.clear_pending_changes( - node.cluster, - node_id=node.id - ) - node.roles = node.pending_roles = [] - node.reset_name_to_default() - node.cluster_id = data["cluster_id"] - if node.cluster_id != old_cluster_id: - if old_cluster_id: - network_manager.clear_assigned_networks(node) - if node.cluster_id: - network_manager = node.cluster.network_manager - network_manager.assign_networks_by_default(node) - - regenerate_volumes = any(( - 'roles' in data and set(data['roles']) != set(node.roles), - 'pending_roles' in data and - set(data['pending_roles']) != set(node.pending_roles), - node.cluster_id != old_cluster_id - )) - - for key, value in data.iteritems(): - # we don't allow to update id explicitly - # and updated cluster_id before all other fields - if key in ("id", "cluster_id"): - continue - setattr(node, key, value) - db().flush() - - if not node.status in ('provisioning', 'deploying' - ) and regenerate_volumes: - try: - node.attributes.volumes = \ - node.volume_manager.gen_volumes_info() - except Exception as exc: - msg = ( - u"Failed to generate volumes " - "info for node '{0}': '{1}'" - ).format( - node.name or data.get("mac") or data.get("id"), - str(exc) or "see logs for details" - ) - logger.warning(traceback.format_exc()) - notifier.notify("error", msg, node_id=node.id) - return self.render(node) - - def DELETE(self, node_id): - """:returns: Empty string - :http: * 204 (node successfully deleted) - * 404 (cluster not found in db) - """ - node = self.get_object_or_404(Node, node_id) - db().delete(node) - - raise self.http(204) - - -class NodeCollectionHandler(BaseHandler): +class NodeCollectionHandler(CollectionHandler): """Node collection handler """ @@ -165,25 +60,16 @@ class NodeCollectionHandler(BaseHandler): 'error_type', 'online', 'cluster', 'uuid') validator = NodeValidator - - @classmethod - def render(cls, nodes, fields=None): - json_list = [] - network_manager = NetworkManager - ips_mapped = network_manager.get_grouped_ips_by_node() - networks_grouped = network_manager.get_networks_grouped_by_cluster() - for node in nodes: - try: - json_data = BaseHandler.render(node, fields=cls.fields) - - json_data['network_data'] = network_manager.\ - get_node_networks_optimized( - node, ips_mapped.get(node.id, []), - networks_grouped.get(node.cluster_id, [])) - json_list.append(json_data) - except Exception: - logger.error(traceback.format_exc()) - return json_list + collection = objects.NodeCollection + eager = ( + 'cluster', + 'nic_interfaces', + 'nic_interfaces.assigned_networks_list', + 'bond_interfaces', + 'bond_interfaces.assigned_networks_list', + 'role_list', + 'pending_role_list' + ) @content_json def GET(self): @@ -194,133 +80,14 @@ class NodeCollectionHandler(BaseHandler): :http: * 200 (OK) """ cluster_id = web.input(cluster_id=None).cluster_id - nodes = db().query(Node).options( - joinedload('cluster'), - joinedload('nic_interfaces'), - joinedload('nic_interfaces.assigned_networks_list'), - joinedload('bond_interfaces'), - joinedload('bond_interfaces.assigned_networks_list'), - joinedload('role_list'), - joinedload('pending_role_list')) + nodes = self.collection.eager(None, self.eager) + if cluster_id == '': - nodes = nodes.filter_by( - cluster_id=None).all() + nodes = nodes.filter_by(cluster_id=None) elif cluster_id: - nodes = nodes.filter_by( - cluster_id=cluster_id).all() - else: - nodes = nodes.all() - return self.render(nodes) + nodes = nodes.filter_by(cluster_id=cluster_id) - @content_json - def POST(self): - """:returns: JSONized Node object. - :http: * 201 (cluster successfully created) - * 400 (invalid node data specified) - * 403 (node has incorrect status) - * 409 (node with such parameters already exists) - """ - data = self.checked_data() - if data.get("status", "") != "discover": - msg = u"Node with mac '{0}' was not created, " \ - u"because request status is '{1}'."\ - .format(data[u'mac'], data.get(u'status')) - logger.warning(msg) - raise self.http( - 403, "Only bootstrap nodes are allowed to be registered." - ) - - node = Node( - #always produce unified (i.e. with lowercased letters) - #default name for nodes - name="Untitled ({0})".format(data['mac'][-5:].lower()), - timestamp=datetime.now() - ) - if "cluster_id" in data: - # FIXME(vk): this part is needed only for tests. Normally, - # nodes are created only by agent and POST requests don't contain - # cluster_id, but our integration and unit tests widely use it. - # We need to assign cluster first - cluster_id = data.pop("cluster_id") - if cluster_id: - node.cluster = objects.Cluster.get_by_uid(cluster_id) - for key, value in data.iteritems(): - if key == "id": - continue - elif key == "meta": - node.create_meta(value) - else: - setattr(node, key, value) - - db().add(node) - db().flush() - - node.attributes = NodeAttributes() - - try: - node.attributes.volumes = node.volume_manager.gen_volumes_info() - if node.cluster: - objects.Cluster.add_pending_changes( - node.cluster, - "disks", - node_id=node.id - ) - except Exception as exc: - msg = ( - u"Failed to generate volumes " - "info for node '{0}': '{1}'" - ).format( - node.name or data.get("mac") or data.get("id"), - str(exc) or "see logs for details" - ) - logger.warning(traceback.format_exc()) - notifier.notify("error", msg, node_id=node.id) - db().add(node) - db().flush() - - network_manager = NetworkManager - # Add interfaces for node from 'meta'. - if node.meta and node.meta.get('interfaces'): - network_manager.update_interfaces_info(node) - - if node.cluster_id: - network_manager = node.cluster.network_manager - network_manager.assign_networks_by_default(node) - - try: - # we use multiplier of 1024 because there are no problems here - # with unfair size calculation - ram = str(round(float( - node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM" - except Exception as exc: - logger.warning(traceback.format_exc()) - ram = "unknown RAM" - - try: - # we use multiplier of 1000 because disk vendors specify HDD size - # in terms of decimal capacity. Sources: - # http://knowledge.seagate.com/articles/en_US/FAQ/172191en - # http://physics.nist.gov/cuu/Units/binary.html - hd_size = round(float( - sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1) - # if HDD > 100 GB we show it's size in TB - if hd_size > 100: - hd_size = str(hd_size / 1000) + " TB HDD" - else: - hd_size = str(hd_size) + " GB HDD" - except Exception as exc: - logger.warning(traceback.format_exc()) - hd_size = "unknown HDD" - - cores = str(node.meta.get('cpu', {}).get('total', "unknown")) - notifier.notify( - "discover", - "New node is discovered: %s CPUs / %s / %s " % - (cores, ram, hd_size), - node_id=node.id - ) - - raise self.http(201, json.dumps(NodeHandler.render(node), indent=4)) + return self.collection.to_json(nodes) @content_json def PUT(self): @@ -328,112 +95,42 @@ class NodeCollectionHandler(BaseHandler): :http: * 200 (nodes are successfully updated) * 400 (invalid nodes data specified) """ - data = self.checked_data(self.validator.validate_collection_update) + data = self.checked_data( + self.validator.validate_collection_update + ) - q = db().query(Node) nodes_updated = [] for nd in data: - node = None - if nd.get("mac"): - node = q.filter_by(mac=nd["mac"]).first() \ - or self.validator.validate_existent_node_mac_update(nd) - else: - node = q.get(nd["id"]) - - old_cluster_id = node.cluster_id - - if nd.get("pending_roles") == [] and node.cluster: - objects.Cluster.clear_pending_changes( - node.cluster, - node_id=node.id - ) - - if "cluster_id" in nd: - if nd["cluster_id"] is None and node.cluster: - objects.Cluster.clear_pending_changes( - node.cluster, - node_id=node.id + node = self.collection.single.get_by_mac_or_uid( + mac=nd.get("mac"), + node_uid=nd.get("id") + ) + if not node: + can_search_by_ifaces = all([ + nd.get("mac"), + nd.get("meta"), + nd["meta"].get("interfaces") + ]) + if can_search_by_ifaces: + node = self.collection.single.search_by_interfaces( + nd["meta"]["interfaces"] ) - node.roles = node.pending_roles = [] - node.reset_name_to_default() - node.cluster_id = nd["cluster_id"] - regenerate_volumes = any(( - 'roles' in nd and - set(nd['roles']) != set(node.roles), - 'pending_roles' in nd and - set(nd['pending_roles']) != set(node.pending_roles), - node.cluster_id != old_cluster_id - )) - - for key, value in nd.iteritems(): - if key == "meta": - node.update_meta(value) - # don't update node ID - elif key != "id": - setattr(node, key, value) - db().flush() - db().refresh(node) - if not node.attributes: - node.attributes = NodeAttributes() - db().flush() - if not node.attributes.volumes: - node.attributes.volumes = \ - node.volume_manager.gen_volumes_info() - db().flush() - if not node.status in ('provisioning', 'deploying'): - variants = ( - "disks" in node.meta and - len(node.meta["disks"]) != len( - filter( - lambda d: d["type"] == "disk", - node.attributes.volumes - ) - ), - regenerate_volumes + if not node: + raise self.http( + 404, + "Can't find node: {0}".format(nd) ) - if any(variants): - try: - node.attributes.volumes = \ - node.volume_manager.gen_volumes_info() - if node.cluster: - objects.Cluster.add_pending_changes( - node.cluster, - "disks", - node_id=node.id - ) - except Exception as exc: - msg = ( - "Failed to generate volumes " - "info for node '{0}': '{1}'" - ).format( - node.name or data.get("mac") or data.get("id"), - str(exc) or "see logs for details" - ) - logger.warning(traceback.format_exc()) - notifier.notify("error", msg, node_id=node.id) - - db().flush() - - network_manager = NetworkManager + self.collection.single.update(node, nd) nodes_updated.append(node.id) - if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id: - if old_cluster_id: - network_manager.clear_assigned_networks(node) - if node.cluster: - network_manager = node.cluster.network_manager - network_manager.assign_networks_by_default(node) # we need eagerload everything that is used in render - nodes = db().query(Node).options( - joinedload('cluster'), - joinedload('nic_interfaces'), - joinedload('nic_interfaces.assigned_networks_list'), - joinedload('bond_interfaces'), - joinedload('bond_interfaces.assigned_networks_list')).\ - filter(Node.id.in_(nodes_updated)).all() - return self.render(nodes) + nodes = self.collection.get_by_id_list( + self.collection.eager(None, self.eager), + nodes_updated + ) + return self.collection.to_json(nodes) class NodeAgentHandler(BaseHandler): diff --git a/nailgun/nailgun/api/serializers/node.py b/nailgun/nailgun/api/serializers/node.py index 94fdc82733..bce873ff4e 100644 --- a/nailgun/nailgun/api/serializers/node.py +++ b/nailgun/nailgun/api/serializers/node.py @@ -19,6 +19,30 @@ from nailgun import consts from nailgun.api.serializers.base import BasicSerializer +class NodeSerializer(BasicSerializer): + + fields = ( + 'id', + 'name', + 'meta', + 'progress', + 'roles', + 'pending_roles', + 'status', + 'mac', + 'fqdn', + 'ip', + 'manufacturer', + 'platform_name', + 'pending_addition', + 'pending_deletion', + 'os_platform', + 'error_type', + 'online', + 'cluster' + ) + + class NodeInterfacesSerializer(BasicSerializer): nic_fields = ( diff --git a/nailgun/nailgun/api/urls/v1.py b/nailgun/nailgun/api/urls/v1.py index 3a3871fc1f..9c0dc04097 100644 --- a/nailgun/nailgun/api/urls/v1.py +++ b/nailgun/nailgun/api/urls/v1.py @@ -139,7 +139,7 @@ urls = ( NodeCollectionHandler, r'/nodes/agent/?$', NodeAgentHandler, - r'/nodes/(?P\d+)/?$', + r'/nodes/(?P\d+)/?$', NodeHandler, r'/nodes/(?P\d+)/disks/?$', NodeDisksHandler, diff --git a/nailgun/nailgun/api/validators/cluster.py b/nailgun/nailgun/api/validators/cluster.py index 19a1a4d67a..b39985d17e 100644 --- a/nailgun/nailgun/api/validators/cluster.py +++ b/nailgun/nailgun/api/validators/cluster.py @@ -25,7 +25,10 @@ class ClusterValidator(BasicValidator): def _validate_common(cls, data): d = cls.validate_json(data) if d.get("name"): - if ClusterCollection.filter_by(name=d["name"]).first(): + if ClusterCollection.filter_by( + query=None, + name=d["name"] + ).first(): raise errors.AlreadyExists( "Environment with this name already exists", log_message=True diff --git a/nailgun/nailgun/api/validators/node.py b/nailgun/nailgun/api/validators/node.py index 10e8d04264..2ad9d59384 100644 --- a/nailgun/nailgun/api/validators/node.py +++ b/nailgun/nailgun/api/validators/node.py @@ -17,6 +17,10 @@ from nailgun.api.validators.base import BasicValidator from nailgun.api.validators.json_schema.disks import disks_simple_format_schema from nailgun.api.validators.json_schema.node import node_format_schema +from nailgun import consts + +from nailgun import objects + from nailgun.db import db from nailgun.db.sqlalchemy.models import Node from nailgun.db.sqlalchemy.models import NodeNICInterface @@ -98,9 +102,15 @@ class MetaValidator(BasicValidator): class NodeValidator(BasicValidator): @classmethod def validate(cls, data): + # TODO(enchantner): rewrite validators to use Node object data = cls.validate_json(data) cls.validate_schema(data, node_format_schema) + if data.get("status", "") != "discover": + raise errors.NotAllowed( + "Only bootstrap nodes are allowed to be registered." + ) + if 'mac' not in data: raise errors.InvalidData( "No mac address specified", @@ -173,20 +183,66 @@ class NodeValidator(BasicValidator): ) @classmethod - def validate_update(cls, data): - d = cls.validate_json(data) - if "status" in d and d["status"] not in Node.NODE_STATUSES: + def validate_update(cls, data, instance=None): + if isinstance(data, (str, unicode)): + d = cls.validate_json(data) + else: + d = data + + if "status" in d and d["status"] not in consts.NODE_STATUSES: raise errors.InvalidData( "Invalid status for node", log_message=True ) - if 'roles' in d and 'id' in d: - node = db().query(Node).get(d['id']) + + if not d.get("mac") and not d.get("id") and not instance: + raise errors.InvalidData( + "Neither MAC nor ID is specified", + log_message=True + ) + + q = db().query(Node) + if "mac" in d: + if not d["mac"]: + raise errors.InvalidData( + "Null MAC is specified", + log_message=True + ) + else: + existent_node = q.filter_by(mac=d["mac"]).first() \ + or cls.validate_existent_node_mac_update(d) + if not existent_node: + raise errors.InvalidData( + "Invalid MAC is specified", + log_message=True + ) + + if "id" in d and d["id"]: + existent_node = q.get(d["id"]) + if not existent_node: + raise errors.InvalidData( + "Invalid ID specified", + log_message=True + ) + + if "roles" in d: + if instance: + node = instance + else: + node = objects.Node.get_by_mac_or_uid( + mac=d.get("mac"), + node_uid=d.get("id") + ) cls.validate_roles(d, node) + if 'meta' in d: d['meta'] = MetaValidator.validate_update(d['meta']) return d + @classmethod + def validate_delete(cls, instance): + pass + @classmethod def validate_collection_update(cls, data): d = cls.validate_json(data) @@ -196,38 +252,8 @@ class NodeValidator(BasicValidator): log_message=True ) - q = db().query(Node) for nd in d: - if not nd.get("mac") and not nd.get("id"): - raise errors.InvalidData( - "Neither MAC nor ID is specified", - log_message=True - ) - if "mac" in nd and not nd["mac"]: - raise errors.InvalidData( - "Null MAC is specified", - log_message=True - ) - else: - if nd.get("mac"): - existent_node = q.filter_by(mac=nd["mac"]).first() \ - or cls.validate_existent_node_mac_update(nd) - if not existent_node: - raise errors.InvalidData( - "Invalid MAC specified", - log_message=True - ) - if nd.get("id"): - existent_node = q.get(nd["id"]) - if not existent_node: - raise errors.InvalidData( - "Invalid ID specified", - log_message=True - ) - if 'roles' in nd: - cls.validate_roles(nd, existent_node) - if 'meta' in nd: - nd['meta'] = MetaValidator.validate_update(nd['meta']) + cls.validate_update(nd) return d diff --git a/nailgun/nailgun/consts.py b/nailgun/nailgun/consts.py index 4f6f3dfcfe..1f4a174b5b 100644 --- a/nailgun/nailgun/consts.py +++ b/nailgun/nailgun/consts.py @@ -71,6 +71,21 @@ CLUSTER_NET_SEGMENT_TYPES = Enum( 'gre' ) +NODE_STATUSES = Enum( + 'ready', + 'discover', + 'provisioning', + 'provisioned', + 'deploying', + 'error' +) + +NODE_ERRORS = Enum( + 'deploy', + 'provision', + 'deletion' +) + NETWORK_INTERFACE_TYPES = Enum( 'ether', 'bond' diff --git a/nailgun/nailgun/db/sqlalchemy/fixman.py b/nailgun/nailgun/db/sqlalchemy/fixman.py index 8f180e90f5..ce1ab15433 100644 --- a/nailgun/nailgun/db/sqlalchemy/fixman.py +++ b/nailgun/nailgun/db/sqlalchemy/fixman.py @@ -30,7 +30,7 @@ import sqlalchemy.types from nailgun.db import db from nailgun.db.sqlalchemy import models from nailgun.logger import logger -from nailgun.network.manager import NetworkManager +from nailgun import objects from nailgun.settings import settings from nailgun.utils import dict_merge @@ -178,11 +178,9 @@ def upload_fixture(fileobj, loader=None): # UGLY HACK for testing if new_obj.__class__.__name__ == 'Node': - new_obj.attributes = models.NodeAttributes() - db().commit() - new_obj.attributes.volumes = \ - new_obj.volume_manager.gen_volumes_info() - NetworkManager.update_interfaces_info(new_obj) + objects.Node.create_attributes(new_obj) + objects.Node.update_volumes(new_obj) + objects.Node.update_interfaces(new_obj) db().commit() diff --git a/nailgun/nailgun/db/sqlalchemy/models/node.py b/nailgun/nailgun/db/sqlalchemy/models/node.py old mode 100644 new mode 100755 index c786f1b18c..fcee5448af --- a/nailgun/nailgun/db/sqlalchemy/models/node.py +++ b/nailgun/nailgun/db/sqlalchemy/models/node.py @@ -68,28 +68,15 @@ class Role(Base): class Node(Base): __tablename__ = 'nodes' - NODE_STATUSES = ( - 'ready', - 'discover', - 'provisioning', - 'provisioned', - 'deploying', - 'error' - ) - NODE_ERRORS = ( - 'deploy', - 'provision', - 'deletion' - ) id = Column(Integer, primary_key=True) uuid = Column(String(36), nullable=False, default=lambda: str(uuid.uuid4()), unique=True) cluster_id = Column(Integer, ForeignKey('clusters.id')) name = Column(Unicode(100)) status = Column( - Enum(*NODE_STATUSES, name='node_status'), + Enum(*consts.NODE_STATUSES, name='node_status'), nullable=False, - default='discover' + default=consts.NODE_STATUSES.discover ) meta = Column(JSON, default={}) mac = Column(LowercaseString(17), nullable=False, unique=True) @@ -102,7 +89,7 @@ class Node(Base): pending_addition = Column(Boolean, default=False) pending_deletion = Column(Boolean, default=False) changes = relationship("ClusterChanges", backref="node") - error_type = Column(Enum(*NODE_ERRORS, name='node_error_type')) + error_type = Column(Enum(*consts.NODE_ERRORS, name='node_error_type')) error_msg = Column(String(255)) timestamp = Column(DateTime, nullable=False) online = Column(Boolean, default=True) @@ -256,16 +243,17 @@ class Node(Base): def update_meta(self, data): # helper for basic checking meta before updation result = [] - for iface in data["interfaces"]: - if not self._check_interface_has_required_params(iface): - logger.warning( - "Invalid interface data: {0}. " - "Interfaces are not updated.".format(iface) - ) - data["interfaces"] = self.meta.get("interfaces") - self.meta = data - return - result.append(self._clean_iface(iface)) + if "interfaces" in data: + for iface in data["interfaces"]: + if not self._check_interface_has_required_params(iface): + logger.warning( + "Invalid interface data: {0}. " + "Interfaces are not updated.".format(iface) + ) + data["interfaces"] = self.meta.get("interfaces") + self.meta = data + return + result.append(self._clean_iface(iface)) data["interfaces"] = result self.meta = data @@ -273,14 +261,15 @@ class Node(Base): def create_meta(self, data): # helper for basic checking meta before creation result = [] - for iface in data["interfaces"]: - if not self._check_interface_has_required_params(iface): - logger.warning( - "Invalid interface data: {0}. " - "Skipping interface.".format(iface) - ) - continue - result.append(self._clean_iface(iface)) + if "interfaces" in data: + for iface in data["interfaces"]: + if not self._check_interface_has_required_params(iface): + logger.warning( + "Invalid interface data: {0}. " + "Skipping interface.".format(iface) + ) + continue + result.append(self._clean_iface(iface)) data["interfaces"] = result self.meta = data diff --git a/nailgun/nailgun/errors/__init__.py b/nailgun/nailgun/errors/__init__.py index d81c5ab2ef..c6e35ea3a5 100644 --- a/nailgun/nailgun/errors/__init__.py +++ b/nailgun/nailgun/errors/__init__.py @@ -25,6 +25,7 @@ default_messages = { # REST errors "CannotDelete": "Can't delete object", "CannotCreate": "Can't create object", + "NotAllowed": "Action is not allowed", "InvalidField": "Invalid field specified for object", # node discovering errors diff --git a/nailgun/nailgun/network/manager.py b/nailgun/nailgun/network/manager.py index 34745c4584..78f9a822c2 100644 --- a/nailgun/nailgun/network/manager.py +++ b/nailgun/nailgun/network/manager.py @@ -29,7 +29,7 @@ from netaddr import IPRange from sqlalchemy.orm import joinedload from sqlalchemy.sql import not_ -from nailgun import objects +from nailgun.objects import Cluster from nailgun import consts from nailgun.db import db @@ -243,7 +243,7 @@ class NetworkManager(object): :returns: None :raises: Exception """ - cluster = objects.Cluster.get_by_uid(cluster_id) + cluster = Cluster.get_by_uid(cluster_id) if not cluster: raise Exception(u"Cluster id='%s' not found" % cluster_id) @@ -362,6 +362,7 @@ class NetworkManager(object): ips = ips.filter_by(node=node_id) if network_id: ips = ips.filter_by(network=network_id) + try: admin_net_id = cls.get_admin_network_group_id() except errors.AdminNetworkNotFound: @@ -445,9 +446,13 @@ class NetworkManager(object): # Assign remaining networks to NIC #0 # as all the networks must be assigned. # But network check will not pass if we get here. - logger.warn("Cannot assign all networks appropriately for" - " node %r. Set all unassigned networks to the" - " interface %r", node.name, nics[0]['name']) + logger.warn( + u"Cannot assign all networks appropriately for" + u"node %r. Set all unassigned networks to the" + u"interface %r", + node.name, + nics[0]['name'] + ) for ng_id in to_assign_ids: nics[0].setdefault('assigned_networks', []).append( {'id': ng_id, 'name': ngs_by_id[ng_id].name}) @@ -731,7 +736,9 @@ class NetworkManager(object): try: cls.__check_interfaces_correctness(node) except errors.InvalidInterfacesInfo as e: - logger.warn("Cannot update interfaces: %s" % str(e)) + logger.warn( + "Cannot update interfaces: {0}".format(str(e)) + ) return for interface in node.meta["interfaces"]: @@ -790,14 +797,14 @@ class NetworkManager(object): interface.node_id = node.id cls.__set_interface_attributes(interface, interface_attrs) db().add(interface) - db().commit() - node.nic_interfaces.append(interface) + db().flush() @classmethod def __update_existing_interface(cls, interface_id, interface_attrs): interface = db().query(NodeNICInterface).get(interface_id) cls.__set_interface_attributes(interface, interface_attrs) - db().commit() + db().add(interface) + db().flush() @classmethod def __set_interface_attributes(cls, interface, interface_attrs): @@ -830,6 +837,7 @@ class NetworkManager(object): mac_addresses, node_name)) map(db().delete, interfaces_to_delete) + db().flush() @classmethod def get_admin_ip_for_node(cls, node): @@ -887,7 +895,7 @@ class NetworkManager(object): @classmethod def get_end_point_ip(cls, cluster_id): - cluster_db = objects.Cluster.get_by_uid(cluster_id) + cluster_db = Cluster.get_by_uid(cluster_id) ip = None if cluster_db.is_ha_mode: ip = cls.assign_vip(cluster_db.id, "public") @@ -988,7 +996,7 @@ class NetworkManager(object): :type cluster_id: int :returns: None """ - cluster_db = objects.Cluster.get_by_uid(cluster_id) + cluster_db = Cluster.get_by_uid(cluster_id) networks_metadata = cluster_db.release.networks_metadata networks_list = networks_metadata[cluster_db.net_provider]["networks"] used_nets = [IPNetwork(cls.get_admin_network_group().cidr)] @@ -1078,7 +1086,7 @@ class NetworkManager(object): cls.update_cidr_from_gw_mask(ng_db, ng) if ng_db.meta.get("notation"): cls.cleanup_network_group(ng_db) - objects.Cluster.add_pending_changes(ng_db.cluster, 'networks') + Cluster.add_pending_changes(ng_db.cluster, 'networks') @classmethod def cluster_has_bonds(cls, cluster_id): diff --git a/nailgun/nailgun/objects/__init__.py b/nailgun/nailgun/objects/__init__.py index 70cb3dcce4..e760e0e39a 100644 --- a/nailgun/nailgun/objects/__init__.py +++ b/nailgun/nailgun/objects/__init__.py @@ -24,5 +24,8 @@ from nailgun.objects.cluster import Attributes from nailgun.objects.cluster import Cluster from nailgun.objects.cluster import ClusterCollection +from nailgun.objects.node import Node +from nailgun.objects.node import NodeCollection + from nailgun.objects.task import Task from nailgun.objects.task import TaskCollection diff --git a/nailgun/nailgun/objects/base.py b/nailgun/nailgun/objects/base.py index 6304350567..3f343616df 100644 --- a/nailgun/nailgun/objects/base.py +++ b/nailgun/nailgun/objects/base.py @@ -16,6 +16,8 @@ import json +from sqlalchemy.orm import joinedload + from nailgun.api.serializers.base import BasicSerializer from nailgun.db import db from nailgun.errors import errors @@ -93,7 +95,7 @@ class NailgunCollection(object): ).yield_per(yield_per) @classmethod - def filter_by(cls, yield_per=100, **kwargs): + def filter_by(cls, query, yield_per=100, **kwargs): for k in kwargs.iterkeys(): if k not in cls.single.schema["properties"]: raise AttributeError( @@ -103,14 +105,25 @@ class NailgunCollection(object): ) ) - return db().query( - cls.single.model - ).filter_by( - **kwargs - ).yield_per(yield_per) + use_query = query or cls.all(yield_per=yield_per) + return use_query.filter_by(**kwargs) @classmethod - def to_list(cls, fields=None, yield_per=100, query=None): + def get_by_id_list(cls, query, id_list, yield_per=100): + use_query = query or cls.all(yield_per=yield_per) + return use_query.filter(cls.single.model.id.in_(id_list)) + + @classmethod + def eager(cls, query, fields, yield_per=100): + use_query = query or cls.all(yield_per=yield_per) + if fields: + return use_query.options( + *[joinedload(f) for f in fields] + ) + return use_query + + @classmethod + def to_list(cls, query=None, fields=None, yield_per=100): use_query = query or cls.all(yield_per=yield_per) return map( lambda o: cls.single.to_dict(o, fields=fields), @@ -118,7 +131,7 @@ class NailgunCollection(object): ) @classmethod - def to_json(cls, fields=None, yield_per=100, query=None): + def to_json(cls, query=None, fields=None, yield_per=100): return json.dumps( cls.to_list( fields=fields, diff --git a/nailgun/nailgun/objects/node.py b/nailgun/nailgun/objects/node.py new file mode 100755 index 0000000000..434380c3d4 --- /dev/null +++ b/nailgun/nailgun/objects/node.py @@ -0,0 +1,386 @@ +# -*- coding: utf-8 -*- + +# Copyright 2013 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import traceback + +from datetime import datetime + +from nailgun import consts + +from nailgun.api.serializers.node import NodeSerializer + +from nailgun import notifier + +from nailgun.db import db +from nailgun.db.sqlalchemy import models +from nailgun.errors import errors +from nailgun.logger import logger +from nailgun.network.manager import NetworkManager + +from nailgun.objects import Cluster +from nailgun.objects import NailgunCollection +from nailgun.objects import NailgunObject + + +class Node(NailgunObject): + + model = models.Node + serializer = NodeSerializer + + schema = { + "$schema": "http://json-schema.org/draft-04/schema#", + "title": "Node", + "description": "Serialized Node object", + "type": "object", + "properties": { + "id": {"type": "number"}, + "cluster_id": {"type": "number"}, + "name": {"type": "string"}, + "status": { + "type": "string", + "enum": list(consts.NODE_STATUSES) + }, + "meta": {"type": "object"}, + "mac": {"type": "string"}, + "api": {"type": "string"}, + "fqdn": {"type": "string"}, + "manufacturer": {"type": "string"}, + "platform_name": {"type": "string"}, + "progress": {"type": "number"}, + "os_platform": {"type": "string"}, + "pending_addition": {"type": "boolean"}, + "pending_deletion": {"type": "boolean"}, + "error_type": { + "type": "string", + "enum": list(consts.NODE_ERRORS) + }, + "error_msg": {"type": "string"}, + "online": {"type": "boolean"}, + "roles": {"type": "array"}, + "pending_roles": {"type": "array"}, + "agent_checksum": {"type": "string"} + } + } + + @classmethod + def get_by_mac_or_uid(cls, mac=None, node_uid=None): + node = None + if not mac and not node_uid: + return node + + q = db().query(cls.model) + if mac: + node = q.filter_by(mac=mac).first() + else: + node = q.get(node_uid) + return node + + @classmethod + def search_by_interfaces(cls, interfaces): + return db().query(cls.model).join( + models.NodeNICInterface, + cls.model.nic_interfaces + ).filter( + models.NodeNICInterface.mac.in_( + [n["mac"] for n in interfaces] + ) + ).first() + + @classmethod + def create(cls, data): + if "name" not in data: + data["name"] = "Untitled ({0})".format( + data['mac'][-5:].lower() + ) + data["timestamp"] = datetime.now() + data.pop("id", None) + + #TODO(enchantner): fix this temporary hack in clients + if "cluster_id" not in data and "cluster" in data: + cluster_id = data.pop("cluster", None) + data["cluster_id"] = cluster_id + + roles = data.pop("roles", None) + pending_roles = data.pop("pending_roles", None) + + new_node_meta = data.pop("meta", {}) + new_node_cluster_id = data.pop("cluster_id", None) + new_node = super(Node, cls).create(data) + new_node.create_meta(new_node_meta) + db().flush() + + # Add interfaces for node from 'meta'. + if new_node.meta and new_node.meta.get('interfaces'): + cls.update_interfaces(new_node) + + # adding node into cluster + if new_node_cluster_id: + cls.add_into_cluster(new_node, new_node_cluster_id) + + # updating roles + if roles is not None: + cls.update_roles(new_node, roles) + if pending_roles is not None: + cls.update_pending_roles(new_node, pending_roles) + + # creating attributes + cls.create_attributes(new_node) + cls.update_volumes(new_node) + + cls.create_discover_notification(new_node) + return new_node + + @classmethod + def create_attributes(cls, instance): + new_attributes = models.NodeAttributes() + instance.attributes = new_attributes + db().add(new_attributes) + db().add(instance) + db().flush() + return new_attributes + + @classmethod + def update_interfaces(cls, instance): + NetworkManager.update_interfaces_info(instance) + + @classmethod + def update_volumes(cls, instance): + attrs = instance.attributes + if not attrs: + attrs = cls.create_attributes(instance) + + try: + attrs.volumes = instance.volume_manager.gen_volumes_info() + except Exception as exc: + msg = ( + u"Failed to generate volumes " + u"info for node '{0}': '{1}'" + ).format( + instance.name or instance.mac or instance.id, + str(exc) or "see logs for details" + ) + logger.warning(traceback.format_exc()) + notifier.notify("error", msg, node_id=instance.id) + + if instance.cluster_id: + Cluster.add_pending_changes( + instance.cluster, + "disks", + node_id=instance.id + ) + + db().add(attrs) + db().flush() + + @classmethod + def create_discover_notification(cls, instance): + try: + # we use multiplier of 1024 because there are no problems here + # with unfair size calculation + ram = str(round(float( + instance.meta['memory']['total']) / 1073741824, 1)) + " GB RAM" + except Exception: + logger.warning(traceback.format_exc()) + ram = "unknown RAM" + + try: + # we use multiplier of 1000 because disk vendors specify HDD size + # in terms of decimal capacity. Sources: + # http://knowledge.seagate.com/articles/en_US/FAQ/172191en + # http://physics.nist.gov/cuu/Units/binary.html + hd_size = round( + float( + sum( + [d["size"] for d in instance.meta["disks"]] + ) / 1000000000 + ), + 1 + ) + # if HDD > 100 GB we show it's size in TB + if hd_size > 100: + hd_size = str(hd_size / 1000) + " TB HDD" + else: + hd_size = str(hd_size) + " GB HDD" + except Exception: + logger.warning(traceback.format_exc()) + hd_size = "unknown HDD" + + cores = str(instance.meta.get('cpu', {}).get('total', "unknown")) + notifier.notify( + "discover", + "New node is discovered: %s CPUs / %s / %s " % + (cores, ram, hd_size), + node_id=instance.id + ) + + @classmethod + def update(cls, instance, data): + data.pop("id", None) + + roles = data.pop("roles", None) + pending_roles = data.pop("pending_roles", None) + new_meta = data.pop("meta", None) + + #TODO(enchantner): fix this temporary hack in clients + if "cluster_id" not in data and "cluster" in data: + cluster_id = data.pop("cluster", None) + data["cluster_id"] = cluster_id + + if new_meta: + instance.update_meta(new_meta) + # smarter check needed + cls.update_interfaces(instance) + + new_cluster_id = instance.cluster_id + cluster_changed = False + if "cluster_id" in data: + new_cluster_id = data.pop("cluster_id") + if instance.cluster_id: + if new_cluster_id is None: + # removing node from cluster + cluster_changed = True + cls.remove_from_cluster(instance) + elif new_cluster_id != instance.cluster_id: + # changing node cluster to another + # (is currently not allowed) + raise errors.CannotUpdate( + u"Changing cluster on the fly is not allowed" + ) + else: + if new_cluster_id is not None: + # assigning node to cluster + cluster_changed = True + cls.add_into_cluster(instance, new_cluster_id) + + # calculating flags + roles_changed = ( + roles is not None and set(roles) != set(instance.roles) + ) + pending_roles_changed = ( + pending_roles is not None and + set(pending_roles) != set(instance.pending_roles) + ) + + super(Node, cls).update(instance, data) + + if roles_changed: + cls.update_roles(instance, roles) + if pending_roles_changed: + cls.update_pending_roles(instance, pending_roles) + + if any(( + roles_changed, + pending_roles_changed, + cluster_changed + )) and instance.status not in ( + consts.NODE_STATUSES.provisioning, + consts.NODE_STATUSES.deploying + ): + cls.update_volumes(instance) + + return instance + + @classmethod + def update_roles(cls, instance, new_roles): + if not instance.cluster_id: + logger.warning( + u"Attempting to assign roles to node " + u"'{0}' which isn't added to cluster".format( + instance.name or instance.id + ) + ) + return + + instance.role_list = db().query(models.Role).filter_by( + release_id=instance.cluster.release_id, + ).filter( + models.Role.name.in_(new_roles) + ).all() + db().flush() + db().refresh(instance) + + @classmethod + def update_pending_roles(cls, instance, new_pending_roles): + if not instance.cluster_id: + logger.warning( + u"Attempting to assign pending roles to node " + u"'{0}' which isn't added to cluster".format( + instance.name or instance.id + ) + ) + return + + logger.debug( + u"Updating pending roles for node {0}: {1}".format( + instance.id, + new_pending_roles + ) + ) + + if new_pending_roles == []: + instance.pending_role_list = [] + # research why the hell we need this + Cluster.clear_pending_changes( + instance.cluster, + node_id=instance.id + ) + else: + instance.pending_role_list = db().query(models.Role).filter_by( + release_id=instance.cluster.release_id, + ).filter( + models.Role.name.in_(new_pending_roles) + ).all() + + db().flush() + db().refresh(instance) + + @classmethod + def add_into_cluster(cls, instance, cluster_id): + instance.cluster_id = cluster_id + db().flush() + network_manager = instance.cluster.network_manager + network_manager.assign_networks_by_default(instance) + + @classmethod + def remove_from_cluster(cls, instance): + Cluster.clear_pending_changes( + instance.cluster, + node_id=instance.id + ) + instance.cluster_id = None + instance.roles = instance.pending_roles = [] + instance.reset_name_to_default() + db().flush() + db().refresh(instance) + NetworkManager.clear_assigned_networks(instance) + + @classmethod + def to_dict(cls, instance, fields=None): + node_dict = super(Node, cls).to_dict(instance, fields=fields) + ips_mapped = NetworkManager.get_grouped_ips_by_node() + networks_grouped = NetworkManager.get_networks_grouped_by_cluster() + + node_dict['network_data'] = NetworkManager.get_node_networks_optimized( + instance, + ips_mapped.get(instance.id, []), + networks_grouped.get(instance.cluster_id, []) + ) + return node_dict + + +class NodeCollection(NailgunCollection): + + single = Node diff --git a/nailgun/nailgun/objects/task.py b/nailgun/nailgun/objects/task.py index 3d77cd9785..1aa569a6de 100644 --- a/nailgun/nailgun/objects/task.py +++ b/nailgun/nailgun/objects/task.py @@ -77,5 +77,5 @@ class TaskCollection(NailgunCollection): @classmethod def get_by_cluster_id(cls, cluster_id): if cluster_id == '': - return cls.filter_by(cluster_id=None) - return cls.filter_by(cluster_id=cluster_id) + return cls.filter_by(None, cluster_id=None) + return cls.filter_by(None, cluster_id=cluster_id) diff --git a/nailgun/nailgun/task/task.py b/nailgun/nailgun/task/task.py index 605e09c9ea..8134e8570a 100644 --- a/nailgun/nailgun/task/task.py +++ b/nailgun/nailgun/task/task.py @@ -453,8 +453,10 @@ class CheckBeforeDeploymentTask(object): node.volume_manager.check_disk_space_for_deployment() except errors.NotEnoughFreeSpace: raise errors.NotEnoughFreeSpace( - u"Node '%s' has insufficient disk space" % - node.human_readable_name) + u"Node '{0}' has insufficient disk space".format( + node.human_readable_name + ) + ) @classmethod def _check_volumes(cls, task): diff --git a/nailgun/nailgun/test/base.py b/nailgun/nailgun/test/base.py index 3549895225..e3c657736d 100644 --- a/nailgun/nailgun/test/base.py +++ b/nailgun/nailgun/test/base.py @@ -46,7 +46,6 @@ from nailgun.logger import logger from nailgun.db.sqlalchemy.fixman import load_fixture from nailgun.db.sqlalchemy.fixman import upload_fixture -from nailgun.db.sqlalchemy.models import Node from nailgun.db.sqlalchemy.models import NodeAttributes from nailgun.db.sqlalchemy.models import NodeNICInterface from nailgun.db.sqlalchemy.models import Notification @@ -55,6 +54,7 @@ from nailgun.db.sqlalchemy.models import Task # here come objects from nailgun.objects import Cluster +from nailgun.objects import Node from nailgun.objects import Release from nailgun.app import build_app @@ -250,7 +250,7 @@ class Environment(object): return None self.tester.assertEquals(resp.status_code, expect_http) node = json.loads(resp.body) - node_db = self.db.query(Node).get(node['id']) + node_db = Node.get_by_uid(node['id']) if 'interfaces' not in node_data['meta'] \ or not node_data['meta']['interfaces']: self._set_interfaces_if_not_set_in_meta( @@ -258,25 +258,8 @@ class Environment(object): kwargs.get('meta', None)) self.nodes.append(node_db) else: - node = Node() - node.timestamp = datetime.now() - if 'cluster_id' in node_data: - cluster_id = node_data.pop('cluster_id') - for cluster in self.clusters: - if cluster.id == cluster_id: - node.cluster = cluster - break - else: - node.cluster_id = cluster_id - for key, value in node_data.iteritems(): - setattr(node, key, value) - node.attributes = self.create_attributes() - node.attributes.volumes = node.volume_manager.gen_volumes_info() - self.db.add(node) - self.db.commit() - if node.meta and node.meta.get('interfaces'): - self._create_interfaces_from_meta(node) - + node = Node.create(node_data) + db().commit() self.nodes.append(node) return node diff --git a/nailgun/nailgun/test/integration/test_changes_model.py b/nailgun/nailgun/test/integration/test_changes_model.py index 2fd4cfc806..24489ff48c 100644 --- a/nailgun/nailgun/test/integration/test_changes_model.py +++ b/nailgun/nailgun/test/integration/test_changes_model.py @@ -190,8 +190,12 @@ class TestClusterChanges(BaseIntegrationTest): name="networks" ).all() self.assertEquals(len(networks_changes), 1) + disks_changes = self.db.query(ClusterChanges).filter_by( + name="disks" + ).all() + self.assertEquals(len(disks_changes), 1) all_changes = self.db.query(ClusterChanges).all() - self.assertEquals(len(all_changes), 2) + self.assertEquals(len(all_changes), 3) @fake_tasks(godmode=True) def test_role_unassignment_drops_changes(self): @@ -209,7 +213,7 @@ class TestClusterChanges(BaseIntegrationTest): ) self.app.put( reverse("NodeHandler", - kwargs={"node_id": new_node["id"]}), + kwargs={"obj_id": new_node["id"]}), json.dumps({ "cluster": None, "pending_addition": False, @@ -217,7 +221,6 @@ class TestClusterChanges(BaseIntegrationTest): }), headers=self.default_headers ) - all_changes = self.db.query(ClusterChanges).filter_by( cluster_id=self.env.clusters[0].id, node_id=new_node["id"] diff --git a/nailgun/nailgun/test/integration/test_deployment_error_handling.py b/nailgun/nailgun/test/integration/test_deployment_error_handling.py index 94b7a47a10..a1bca0978f 100644 --- a/nailgun/nailgun/test/integration/test_deployment_error_handling.py +++ b/nailgun/nailgun/test/integration/test_deployment_error_handling.py @@ -81,11 +81,11 @@ class TestErrors(BaseIntegrationTest): self.assertIsNotNone( self.db.query(Notification).filter_by(message=err_msg).first() ) - self.assertEqual( + self.assertIsNotNone( self.db.query(Notification).filter_by( - node_id=self.env.nodes[2].id - ).first().message, - "Failed to deploy node 'Third': I forgot about teapot!" + node_id=self.env.nodes[2].id, + message="Failed to deploy node 'Third': I forgot about teapot!" + ).first() ) self.env.refresh_nodes() self.env.refresh_clusters() diff --git a/nailgun/nailgun/test/integration/test_node_collection_handlers.py b/nailgun/nailgun/test/integration/test_node_collection_handlers.py index 32296c9274..fea4b78be7 100644 --- a/nailgun/nailgun/test/integration/test_node_collection_handlers.py +++ b/nailgun/nailgun/test/integration/test_node_collection_handlers.py @@ -207,14 +207,16 @@ class TestHandlers(BaseIntegrationTest): json.dumps([{'id': None, 'mac': node.mac, 'manufacturer': 'man5'}]), - headers=self.default_headers) + headers=self.default_headers + ) self.assertEquals(resp.status_code, 200) resp = self.app.put( reverse('NodeCollectionHandler'), json.dumps([{'id': node.id, 'manufacturer': 'man6'}]), - headers=self.default_headers) + headers=self.default_headers + ) self.assertEquals(resp.status_code, 200) resp = self.app.put( @@ -335,7 +337,8 @@ class TestHandlers(BaseIntegrationTest): ) node2_json = { "mac": self.env.generate_random_mac(), - "meta": self.env.default_metadata() + "meta": self.env.default_metadata(), + "status": "discover" } node2_json["meta"]["interfaces"][0]["mac"] = node1.mac resp = self.app.post( @@ -424,7 +427,7 @@ class TestHandlers(BaseIntegrationTest): node = self.env.create_node(api=False) resp = self.app.post( reverse('NodeCollectionHandler'), - json.dumps({'mac': node.mac}), + json.dumps({'mac': node.mac, 'status': 'discover'}), headers=self.default_headers, expect_errors=True) self.assertEquals(409, resp.status_code) @@ -485,7 +488,7 @@ class TestHandlers(BaseIntegrationTest): )[0]['id'] self.app.delete( - reverse('NodeHandler', {'node_id': node_id}) + reverse('NodeHandler', {'obj_id': node_id}) ) node_name_test(node_mac.lower()) diff --git a/nailgun/nailgun/test/integration/test_node_handler.py b/nailgun/nailgun/test/integration/test_node_handler.py index e140768bf0..64d8847882 100644 --- a/nailgun/nailgun/test/integration/test_node_handler.py +++ b/nailgun/nailgun/test/integration/test_node_handler.py @@ -26,7 +26,7 @@ class TestHandlers(BaseIntegrationTest): def test_node_get(self): node = self.env.create_node(api=False) resp = self.app.get( - reverse('NodeHandler', kwargs={'node_id': node.id}), + reverse('NodeHandler', kwargs={'obj_id': node.id}), headers=self.default_headers) self.assertEquals(200, resp.status_code) response = json.loads(resp.body) @@ -60,7 +60,7 @@ class TestHandlers(BaseIntegrationTest): def test_node_deletion(self): node = self.env.create_node(api=False) resp = self.app.delete( - reverse('NodeHandler', kwargs={'node_id': node.id}), + reverse('NodeHandler', kwargs={'obj_id': node.id}), "", headers=self.default_headers, expect_errors=True @@ -71,7 +71,7 @@ class TestHandlers(BaseIntegrationTest): new_metadata = self.env.default_metadata() node = self.env.create_node(api=False) resp = self.app.put( - reverse('NodeHandler', kwargs={'node_id': node.id}), + reverse('NodeHandler', kwargs={'obj_id': node.id}), json.dumps({'meta': new_metadata}), headers=self.default_headers) self.assertEquals(resp.status_code, 200) @@ -87,7 +87,7 @@ class TestHandlers(BaseIntegrationTest): node = self.env.create_node(api=False) params = {'status': 'error'} resp = self.app.put( - reverse('NodeHandler', kwargs={'node_id': node.id}), + reverse('NodeHandler', kwargs={'obj_id': node.id}), json.dumps(params), headers=self.default_headers) self.assertEquals(resp.status_code, 200) @@ -97,7 +97,7 @@ class TestHandlers(BaseIntegrationTest): node = self.env.create_node(api=False) for flag in flags: resp = self.app.put( - reverse('NodeHandler', kwargs={'node_id': node.id}), + reverse('NodeHandler', kwargs={'obj_id': node.id}), json.dumps({flag: True}), headers=self.default_headers ) @@ -113,7 +113,7 @@ class TestHandlers(BaseIntegrationTest): def test_put_returns_400_if_no_body(self): node = self.env.create_node(api=False) resp = self.app.put( - reverse('NodeHandler', kwargs={'node_id': node.id}), + reverse('NodeHandler', kwargs={'obj_id': node.id}), "", headers=self.default_headers, expect_errors=True) @@ -123,7 +123,7 @@ class TestHandlers(BaseIntegrationTest): node = self.env.create_node(api=False) params = {'status': 'invalid_status'} resp = self.app.put( - reverse('NodeHandler', kwargs={'node_id': node.id}), + reverse('NodeHandler', kwargs={'obj_id': node.id}), json.dumps(params), headers=self.default_headers, expect_errors=True) diff --git a/nailgun/nailgun/test/integration/test_rpc_consumer.py b/nailgun/nailgun/test/integration/test_rpc_consumer.py index d3e076c8b7..4e15780654 100644 --- a/nailgun/nailgun/test/integration/test_rpc_consumer.py +++ b/nailgun/nailgun/test/integration/test_rpc_consumer.py @@ -318,7 +318,7 @@ class TestVerifyNetworks(BaseIntegrationTest): u'absent_vlans': [100, 101, 102, 103, 104], u'interface': 'eth0', u'mac': node2.interfaces[0].mac, - u'name': None, + u'name': 'Untitled ({0})'.format(node2.mac[-5:].lower()), u'uid': node2.id}]) def test_verify_networks_resp_forgotten_node_error(self): diff --git a/nailgun/nailgun/test/unit/test_handlers.py b/nailgun/nailgun/test/unit/test_handlers.py index 356d461d43..36c641d9f3 100644 --- a/nailgun/nailgun/test/unit/test_handlers.py +++ b/nailgun/nailgun/test/unit/test_handlers.py @@ -26,7 +26,7 @@ class TestHandlers(BaseIntegrationTest): def test_all_api_urls_404_or_405(self): urls = { 'ClusterHandler': {'obj_id': 1}, - 'NodeHandler': {'node_id': 1}, + 'NodeHandler': {'obj_id': 1}, 'ReleaseHandler': {'obj_id': 1}, } for handler in urls: diff --git a/nailgun/nailgun/test/unit/test_node_deletion.py b/nailgun/nailgun/test/unit/test_node_deletion.py index 8a33314449..6896355c95 100644 --- a/nailgun/nailgun/test/unit/test_node_deletion.py +++ b/nailgun/nailgun/test/unit/test_node_deletion.py @@ -48,7 +48,7 @@ class TestNodeDeletion(BaseIntegrationTest): resp = self.app.delete( reverse( 'NodeHandler', - kwargs={'node_id': node.id}), + kwargs={'obj_id': node.id}), headers=self.default_headers ) self.assertEquals(204, resp.status_code) diff --git a/nailgun/nailgun/test/unit/test_node_disks.py b/nailgun/nailgun/test/unit/test_node_disks.py index 210fd2a83e..8374d99a5d 100644 --- a/nailgun/nailgun/test/unit/test_node_disks.py +++ b/nailgun/nailgun/test/unit/test_node_disks.py @@ -78,7 +78,11 @@ class TestNodeDisksHandlers(BaseIntegrationTest): self.assertEqual(len(disk['volumes']), 0) def test_volumes_regeneration_after_roles_update(self): - self.create_node(roles=[], pending_roles=['compute']) + self.env.create( + nodes_kwargs=[ + {"roles": [], "pending_roles": ['compute']} + ] + ) node_db = self.env.nodes[0] original_roles_response = self.get(node_db.id) diff --git a/nailgun/nailgun/test/unit/test_node_nic_handler.py b/nailgun/nailgun/test/unit/test_node_nic_handler.py index 9272aa97d8..0c8c7383e7 100644 --- a/nailgun/nailgun/test/unit/test_node_nic_handler.py +++ b/nailgun/nailgun/test/unit/test_node_nic_handler.py @@ -116,7 +116,7 @@ class TestHandlers(BaseIntegrationTest): ) self.assertEquals(resp.status_code, 200) resp = self.app.get( - reverse('NodeHandler', kwargs={'node_id': node['id']}), + reverse('NodeHandler', kwargs={'obj_id': node['id']}), headers=self.default_headers ) ifaces = json.loads(resp.body)['meta']['interfaces'] diff --git a/nailgun/nailgun/volumes/manager.py b/nailgun/nailgun/volumes/manager.py index 07791e1bb4..c1edd0aed0 100644 --- a/nailgun/nailgun/volumes/manager.py +++ b/nailgun/nailgun/volumes/manager.py @@ -918,8 +918,12 @@ class VolumeManager(object): disks_space = sum([d.size for d in self.disks]) minimal_installation_size = self.__calc_minimal_installation_size() - self.__logger('Checking disks space: disks space %s, minimal size %s' % - (disks_space, minimal_installation_size)) + self.__logger( + 'Checking disks space: disks space {0}, minimal size {1}'.format( + disks_space, + minimal_installation_size + ) + ) if disks_space < minimal_installation_size: raise errors.NotEnoughFreeSpace()