Initial version of Node object
Related to blueprint nailgun-objects-flow Change-Id: I1a0fe6da827898a6deb405c40cb671476cc7443f
This commit is contained in:
parent
7c16388b3c
commit
df965bfcc0
|
@ -165,15 +165,21 @@ class BaseHandler(object):
|
||||||
) as exc:
|
) as exc:
|
||||||
notifier.notify("error", exc.message)
|
notifier.notify("error", exc.message)
|
||||||
raise self.http(400, exc.message)
|
raise self.http(400, exc.message)
|
||||||
|
except (
|
||||||
|
errors.NotAllowed,
|
||||||
|
) as exc:
|
||||||
|
raise self.http(403, exc.message)
|
||||||
except (
|
except (
|
||||||
errors.AlreadyExists
|
errors.AlreadyExists
|
||||||
) as exc:
|
) as exc:
|
||||||
raise self.http(409, exc.message)
|
raise self.http(409, exc.message)
|
||||||
except (
|
except (
|
||||||
errors.InvalidData,
|
errors.InvalidData,
|
||||||
Exception
|
errors.NodeOffline,
|
||||||
) as exc:
|
) as exc:
|
||||||
raise self.http(400, str(exc))
|
raise self.http(400, exc.message)
|
||||||
|
except Exception as exc:
|
||||||
|
raise
|
||||||
return valid_data
|
return valid_data
|
||||||
|
|
||||||
def get_object_or_404(self, model, *args, **kwargs):
|
def get_object_or_404(self, model, *args, **kwargs):
|
||||||
|
@ -242,20 +248,10 @@ class SingleHandler(BaseHandler):
|
||||||
obj_id
|
obj_id
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
data = self.checked_data(
|
||||||
data = self.checked_data(
|
self.validator.validate_update,
|
||||||
self.validator.validate_update,
|
instance=obj
|
||||||
instance=obj
|
)
|
||||||
)
|
|
||||||
except (
|
|
||||||
errors.InvalidData,
|
|
||||||
errors.NodeOffline
|
|
||||||
) as exc:
|
|
||||||
raise self.http(400, exc.message)
|
|
||||||
except (
|
|
||||||
errors.AlreadyExists,
|
|
||||||
) as exc:
|
|
||||||
raise self.http(409, exc.message)
|
|
||||||
|
|
||||||
self.single.update(obj, data)
|
self.single.update(obj, data)
|
||||||
return self.single.to_json(obj)
|
return self.single.to_json(obj)
|
||||||
|
@ -283,13 +279,15 @@ class CollectionHandler(BaseHandler):
|
||||||
|
|
||||||
validator = BasicValidator
|
validator = BasicValidator
|
||||||
collection = None
|
collection = None
|
||||||
|
eager = ()
|
||||||
|
|
||||||
@content_json
|
@content_json
|
||||||
def GET(self):
|
def GET(self):
|
||||||
""":returns: Collection of JSONized REST objects.
|
""":returns: Collection of JSONized REST objects.
|
||||||
:http: * 200 (OK)
|
:http: * 200 (OK)
|
||||||
"""
|
"""
|
||||||
return self.collection.to_json()
|
q = self.collection.eager(self.eager, None)
|
||||||
|
return self.collection.to_json(q)
|
||||||
|
|
||||||
@content_json
|
@content_json
|
||||||
def POST(self):
|
def POST(self):
|
||||||
|
@ -298,6 +296,7 @@ class CollectionHandler(BaseHandler):
|
||||||
* 400 (invalid object data specified)
|
* 400 (invalid object data specified)
|
||||||
* 409 (object with such parameters already exists)
|
* 409 (object with such parameters already exists)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
data = self.checked_data()
|
data = self.checked_data()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -19,15 +19,14 @@ Handlers dealing with nodes
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import json
|
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from sqlalchemy.orm import joinedload
|
|
||||||
|
|
||||||
import web
|
import web
|
||||||
|
|
||||||
from nailgun.api.handlers.base import BaseHandler
|
from nailgun.api.handlers.base import BaseHandler
|
||||||
|
from nailgun.api.handlers.base import CollectionHandler
|
||||||
from nailgun.api.handlers.base import content_json
|
from nailgun.api.handlers.base import content_json
|
||||||
|
from nailgun.api.handlers.base import SingleHandler
|
||||||
from nailgun.api.serializers.node import NodeInterfacesSerializer
|
from nailgun.api.serializers.node import NodeInterfacesSerializer
|
||||||
from nailgun.api.validators.network import NetAssignmentValidator
|
from nailgun.api.validators.network import NetAssignmentValidator
|
||||||
from nailgun.api.validators.node import NodeValidator
|
from nailgun.api.validators.node import NodeValidator
|
||||||
|
@ -45,117 +44,13 @@ from nailgun.network.manager import NetworkManager
|
||||||
from nailgun import notifier
|
from nailgun import notifier
|
||||||
|
|
||||||
|
|
||||||
class NodeHandler(BaseHandler):
|
class NodeHandler(SingleHandler):
|
||||||
fields = ('id', 'name', 'meta', 'progress', 'roles', 'pending_roles',
|
|
||||||
'status', 'mac', 'fqdn', 'ip', 'manufacturer', 'platform_name',
|
single = objects.Node
|
||||||
'pending_addition', 'pending_deletion', 'os_platform',
|
|
||||||
'error_type', 'online', 'cluster', 'uuid')
|
|
||||||
model = Node
|
|
||||||
validator = NodeValidator
|
validator = NodeValidator
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def render(cls, instance, fields=None):
|
|
||||||
json_data = None
|
|
||||||
try:
|
|
||||||
json_data = BaseHandler.render(instance, fields=cls.fields)
|
|
||||||
json_data['network_data'] = NetworkManager.get_node_networks(
|
|
||||||
instance.id)
|
|
||||||
except Exception:
|
|
||||||
logger.error(traceback.format_exc())
|
|
||||||
return json_data
|
|
||||||
|
|
||||||
@content_json
|
class NodeCollectionHandler(CollectionHandler):
|
||||||
def GET(self, node_id):
|
|
||||||
""":returns: JSONized Node object.
|
|
||||||
:http: * 200 (OK)
|
|
||||||
* 404 (node not found in db)
|
|
||||||
"""
|
|
||||||
node = self.get_object_or_404(Node, node_id)
|
|
||||||
return self.render(node)
|
|
||||||
|
|
||||||
@content_json
|
|
||||||
def PUT(self, node_id):
|
|
||||||
""":returns: JSONized Node object.
|
|
||||||
:http: * 200 (OK)
|
|
||||||
* 400 (invalid node data specified)
|
|
||||||
* 404 (node not found in db)
|
|
||||||
"""
|
|
||||||
node = self.get_object_or_404(Node, node_id)
|
|
||||||
if not node.attributes:
|
|
||||||
node.attributes = NodeAttributes(node_id=node.id)
|
|
||||||
|
|
||||||
data = self.checked_data(self.validator.validate_update)
|
|
||||||
|
|
||||||
network_manager = NetworkManager
|
|
||||||
|
|
||||||
old_cluster_id = node.cluster_id
|
|
||||||
|
|
||||||
if data.get("pending_roles") == [] and node.cluster:
|
|
||||||
objects.Cluster.clear_pending_changes(
|
|
||||||
node.cluster,
|
|
||||||
node_id=node.id
|
|
||||||
)
|
|
||||||
|
|
||||||
if "cluster_id" in data:
|
|
||||||
if data["cluster_id"] is None and node.cluster:
|
|
||||||
objects.Cluster.clear_pending_changes(
|
|
||||||
node.cluster,
|
|
||||||
node_id=node.id
|
|
||||||
)
|
|
||||||
node.roles = node.pending_roles = []
|
|
||||||
node.reset_name_to_default()
|
|
||||||
node.cluster_id = data["cluster_id"]
|
|
||||||
if node.cluster_id != old_cluster_id:
|
|
||||||
if old_cluster_id:
|
|
||||||
network_manager.clear_assigned_networks(node)
|
|
||||||
if node.cluster_id:
|
|
||||||
network_manager = node.cluster.network_manager
|
|
||||||
network_manager.assign_networks_by_default(node)
|
|
||||||
|
|
||||||
regenerate_volumes = any((
|
|
||||||
'roles' in data and set(data['roles']) != set(node.roles),
|
|
||||||
'pending_roles' in data and
|
|
||||||
set(data['pending_roles']) != set(node.pending_roles),
|
|
||||||
node.cluster_id != old_cluster_id
|
|
||||||
))
|
|
||||||
|
|
||||||
for key, value in data.iteritems():
|
|
||||||
# we don't allow to update id explicitly
|
|
||||||
# and updated cluster_id before all other fields
|
|
||||||
if key in ("id", "cluster_id"):
|
|
||||||
continue
|
|
||||||
setattr(node, key, value)
|
|
||||||
db().flush()
|
|
||||||
|
|
||||||
if not node.status in ('provisioning', 'deploying'
|
|
||||||
) and regenerate_volumes:
|
|
||||||
try:
|
|
||||||
node.attributes.volumes = \
|
|
||||||
node.volume_manager.gen_volumes_info()
|
|
||||||
except Exception as exc:
|
|
||||||
msg = (
|
|
||||||
u"Failed to generate volumes "
|
|
||||||
"info for node '{0}': '{1}'"
|
|
||||||
).format(
|
|
||||||
node.name or data.get("mac") or data.get("id"),
|
|
||||||
str(exc) or "see logs for details"
|
|
||||||
)
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
notifier.notify("error", msg, node_id=node.id)
|
|
||||||
return self.render(node)
|
|
||||||
|
|
||||||
def DELETE(self, node_id):
|
|
||||||
""":returns: Empty string
|
|
||||||
:http: * 204 (node successfully deleted)
|
|
||||||
* 404 (cluster not found in db)
|
|
||||||
"""
|
|
||||||
node = self.get_object_or_404(Node, node_id)
|
|
||||||
db().delete(node)
|
|
||||||
|
|
||||||
raise self.http(204)
|
|
||||||
|
|
||||||
|
|
||||||
class NodeCollectionHandler(BaseHandler):
|
|
||||||
"""Node collection handler
|
"""Node collection handler
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -165,25 +60,16 @@ class NodeCollectionHandler(BaseHandler):
|
||||||
'error_type', 'online', 'cluster', 'uuid')
|
'error_type', 'online', 'cluster', 'uuid')
|
||||||
|
|
||||||
validator = NodeValidator
|
validator = NodeValidator
|
||||||
|
collection = objects.NodeCollection
|
||||||
@classmethod
|
eager = (
|
||||||
def render(cls, nodes, fields=None):
|
'cluster',
|
||||||
json_list = []
|
'nic_interfaces',
|
||||||
network_manager = NetworkManager
|
'nic_interfaces.assigned_networks_list',
|
||||||
ips_mapped = network_manager.get_grouped_ips_by_node()
|
'bond_interfaces',
|
||||||
networks_grouped = network_manager.get_networks_grouped_by_cluster()
|
'bond_interfaces.assigned_networks_list',
|
||||||
for node in nodes:
|
'role_list',
|
||||||
try:
|
'pending_role_list'
|
||||||
json_data = BaseHandler.render(node, fields=cls.fields)
|
)
|
||||||
|
|
||||||
json_data['network_data'] = network_manager.\
|
|
||||||
get_node_networks_optimized(
|
|
||||||
node, ips_mapped.get(node.id, []),
|
|
||||||
networks_grouped.get(node.cluster_id, []))
|
|
||||||
json_list.append(json_data)
|
|
||||||
except Exception:
|
|
||||||
logger.error(traceback.format_exc())
|
|
||||||
return json_list
|
|
||||||
|
|
||||||
@content_json
|
@content_json
|
||||||
def GET(self):
|
def GET(self):
|
||||||
|
@ -194,133 +80,14 @@ class NodeCollectionHandler(BaseHandler):
|
||||||
:http: * 200 (OK)
|
:http: * 200 (OK)
|
||||||
"""
|
"""
|
||||||
cluster_id = web.input(cluster_id=None).cluster_id
|
cluster_id = web.input(cluster_id=None).cluster_id
|
||||||
nodes = db().query(Node).options(
|
nodes = self.collection.eager(None, self.eager)
|
||||||
joinedload('cluster'),
|
|
||||||
joinedload('nic_interfaces'),
|
|
||||||
joinedload('nic_interfaces.assigned_networks_list'),
|
|
||||||
joinedload('bond_interfaces'),
|
|
||||||
joinedload('bond_interfaces.assigned_networks_list'),
|
|
||||||
joinedload('role_list'),
|
|
||||||
joinedload('pending_role_list'))
|
|
||||||
if cluster_id == '':
|
if cluster_id == '':
|
||||||
nodes = nodes.filter_by(
|
nodes = nodes.filter_by(cluster_id=None)
|
||||||
cluster_id=None).all()
|
|
||||||
elif cluster_id:
|
elif cluster_id:
|
||||||
nodes = nodes.filter_by(
|
nodes = nodes.filter_by(cluster_id=cluster_id)
|
||||||
cluster_id=cluster_id).all()
|
|
||||||
else:
|
|
||||||
nodes = nodes.all()
|
|
||||||
return self.render(nodes)
|
|
||||||
|
|
||||||
@content_json
|
return self.collection.to_json(nodes)
|
||||||
def POST(self):
|
|
||||||
""":returns: JSONized Node object.
|
|
||||||
:http: * 201 (cluster successfully created)
|
|
||||||
* 400 (invalid node data specified)
|
|
||||||
* 403 (node has incorrect status)
|
|
||||||
* 409 (node with such parameters already exists)
|
|
||||||
"""
|
|
||||||
data = self.checked_data()
|
|
||||||
if data.get("status", "") != "discover":
|
|
||||||
msg = u"Node with mac '{0}' was not created, " \
|
|
||||||
u"because request status is '{1}'."\
|
|
||||||
.format(data[u'mac'], data.get(u'status'))
|
|
||||||
logger.warning(msg)
|
|
||||||
raise self.http(
|
|
||||||
403, "Only bootstrap nodes are allowed to be registered."
|
|
||||||
)
|
|
||||||
|
|
||||||
node = Node(
|
|
||||||
#always produce unified (i.e. with lowercased letters)
|
|
||||||
#default name for nodes
|
|
||||||
name="Untitled ({0})".format(data['mac'][-5:].lower()),
|
|
||||||
timestamp=datetime.now()
|
|
||||||
)
|
|
||||||
if "cluster_id" in data:
|
|
||||||
# FIXME(vk): this part is needed only for tests. Normally,
|
|
||||||
# nodes are created only by agent and POST requests don't contain
|
|
||||||
# cluster_id, but our integration and unit tests widely use it.
|
|
||||||
# We need to assign cluster first
|
|
||||||
cluster_id = data.pop("cluster_id")
|
|
||||||
if cluster_id:
|
|
||||||
node.cluster = objects.Cluster.get_by_uid(cluster_id)
|
|
||||||
for key, value in data.iteritems():
|
|
||||||
if key == "id":
|
|
||||||
continue
|
|
||||||
elif key == "meta":
|
|
||||||
node.create_meta(value)
|
|
||||||
else:
|
|
||||||
setattr(node, key, value)
|
|
||||||
|
|
||||||
db().add(node)
|
|
||||||
db().flush()
|
|
||||||
|
|
||||||
node.attributes = NodeAttributes()
|
|
||||||
|
|
||||||
try:
|
|
||||||
node.attributes.volumes = node.volume_manager.gen_volumes_info()
|
|
||||||
if node.cluster:
|
|
||||||
objects.Cluster.add_pending_changes(
|
|
||||||
node.cluster,
|
|
||||||
"disks",
|
|
||||||
node_id=node.id
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
|
||||||
msg = (
|
|
||||||
u"Failed to generate volumes "
|
|
||||||
"info for node '{0}': '{1}'"
|
|
||||||
).format(
|
|
||||||
node.name or data.get("mac") or data.get("id"),
|
|
||||||
str(exc) or "see logs for details"
|
|
||||||
)
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
notifier.notify("error", msg, node_id=node.id)
|
|
||||||
db().add(node)
|
|
||||||
db().flush()
|
|
||||||
|
|
||||||
network_manager = NetworkManager
|
|
||||||
# Add interfaces for node from 'meta'.
|
|
||||||
if node.meta and node.meta.get('interfaces'):
|
|
||||||
network_manager.update_interfaces_info(node)
|
|
||||||
|
|
||||||
if node.cluster_id:
|
|
||||||
network_manager = node.cluster.network_manager
|
|
||||||
network_manager.assign_networks_by_default(node)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# we use multiplier of 1024 because there are no problems here
|
|
||||||
# with unfair size calculation
|
|
||||||
ram = str(round(float(
|
|
||||||
node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
|
|
||||||
except Exception as exc:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
ram = "unknown RAM"
|
|
||||||
|
|
||||||
try:
|
|
||||||
# we use multiplier of 1000 because disk vendors specify HDD size
|
|
||||||
# in terms of decimal capacity. Sources:
|
|
||||||
# http://knowledge.seagate.com/articles/en_US/FAQ/172191en
|
|
||||||
# http://physics.nist.gov/cuu/Units/binary.html
|
|
||||||
hd_size = round(float(
|
|
||||||
sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
|
|
||||||
# if HDD > 100 GB we show it's size in TB
|
|
||||||
if hd_size > 100:
|
|
||||||
hd_size = str(hd_size / 1000) + " TB HDD"
|
|
||||||
else:
|
|
||||||
hd_size = str(hd_size) + " GB HDD"
|
|
||||||
except Exception as exc:
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
hd_size = "unknown HDD"
|
|
||||||
|
|
||||||
cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
|
|
||||||
notifier.notify(
|
|
||||||
"discover",
|
|
||||||
"New node is discovered: %s CPUs / %s / %s " %
|
|
||||||
(cores, ram, hd_size),
|
|
||||||
node_id=node.id
|
|
||||||
)
|
|
||||||
|
|
||||||
raise self.http(201, json.dumps(NodeHandler.render(node), indent=4))
|
|
||||||
|
|
||||||
@content_json
|
@content_json
|
||||||
def PUT(self):
|
def PUT(self):
|
||||||
|
@ -328,112 +95,42 @@ class NodeCollectionHandler(BaseHandler):
|
||||||
:http: * 200 (nodes are successfully updated)
|
:http: * 200 (nodes are successfully updated)
|
||||||
* 400 (invalid nodes data specified)
|
* 400 (invalid nodes data specified)
|
||||||
"""
|
"""
|
||||||
data = self.checked_data(self.validator.validate_collection_update)
|
data = self.checked_data(
|
||||||
|
self.validator.validate_collection_update
|
||||||
|
)
|
||||||
|
|
||||||
q = db().query(Node)
|
|
||||||
nodes_updated = []
|
nodes_updated = []
|
||||||
for nd in data:
|
for nd in data:
|
||||||
node = None
|
node = self.collection.single.get_by_mac_or_uid(
|
||||||
if nd.get("mac"):
|
mac=nd.get("mac"),
|
||||||
node = q.filter_by(mac=nd["mac"]).first() \
|
node_uid=nd.get("id")
|
||||||
or self.validator.validate_existent_node_mac_update(nd)
|
)
|
||||||
else:
|
if not node:
|
||||||
node = q.get(nd["id"])
|
can_search_by_ifaces = all([
|
||||||
|
nd.get("mac"),
|
||||||
old_cluster_id = node.cluster_id
|
nd.get("meta"),
|
||||||
|
nd["meta"].get("interfaces")
|
||||||
if nd.get("pending_roles") == [] and node.cluster:
|
])
|
||||||
objects.Cluster.clear_pending_changes(
|
if can_search_by_ifaces:
|
||||||
node.cluster,
|
node = self.collection.single.search_by_interfaces(
|
||||||
node_id=node.id
|
nd["meta"]["interfaces"]
|
||||||
)
|
|
||||||
|
|
||||||
if "cluster_id" in nd:
|
|
||||||
if nd["cluster_id"] is None and node.cluster:
|
|
||||||
objects.Cluster.clear_pending_changes(
|
|
||||||
node.cluster,
|
|
||||||
node_id=node.id
|
|
||||||
)
|
)
|
||||||
node.roles = node.pending_roles = []
|
|
||||||
node.reset_name_to_default()
|
|
||||||
node.cluster_id = nd["cluster_id"]
|
|
||||||
|
|
||||||
regenerate_volumes = any((
|
if not node:
|
||||||
'roles' in nd and
|
raise self.http(
|
||||||
set(nd['roles']) != set(node.roles),
|
404,
|
||||||
'pending_roles' in nd and
|
"Can't find node: {0}".format(nd)
|
||||||
set(nd['pending_roles']) != set(node.pending_roles),
|
|
||||||
node.cluster_id != old_cluster_id
|
|
||||||
))
|
|
||||||
|
|
||||||
for key, value in nd.iteritems():
|
|
||||||
if key == "meta":
|
|
||||||
node.update_meta(value)
|
|
||||||
# don't update node ID
|
|
||||||
elif key != "id":
|
|
||||||
setattr(node, key, value)
|
|
||||||
db().flush()
|
|
||||||
db().refresh(node)
|
|
||||||
if not node.attributes:
|
|
||||||
node.attributes = NodeAttributes()
|
|
||||||
db().flush()
|
|
||||||
if not node.attributes.volumes:
|
|
||||||
node.attributes.volumes = \
|
|
||||||
node.volume_manager.gen_volumes_info()
|
|
||||||
db().flush()
|
|
||||||
if not node.status in ('provisioning', 'deploying'):
|
|
||||||
variants = (
|
|
||||||
"disks" in node.meta and
|
|
||||||
len(node.meta["disks"]) != len(
|
|
||||||
filter(
|
|
||||||
lambda d: d["type"] == "disk",
|
|
||||||
node.attributes.volumes
|
|
||||||
)
|
|
||||||
),
|
|
||||||
regenerate_volumes
|
|
||||||
)
|
)
|
||||||
if any(variants):
|
|
||||||
try:
|
|
||||||
node.attributes.volumes = \
|
|
||||||
node.volume_manager.gen_volumes_info()
|
|
||||||
if node.cluster:
|
|
||||||
objects.Cluster.add_pending_changes(
|
|
||||||
node.cluster,
|
|
||||||
"disks",
|
|
||||||
node_id=node.id
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
|
||||||
msg = (
|
|
||||||
"Failed to generate volumes "
|
|
||||||
"info for node '{0}': '{1}'"
|
|
||||||
).format(
|
|
||||||
node.name or data.get("mac") or data.get("id"),
|
|
||||||
str(exc) or "see logs for details"
|
|
||||||
)
|
|
||||||
logger.warning(traceback.format_exc())
|
|
||||||
notifier.notify("error", msg, node_id=node.id)
|
|
||||||
|
|
||||||
db().flush()
|
|
||||||
|
|
||||||
network_manager = NetworkManager
|
|
||||||
|
|
||||||
|
self.collection.single.update(node, nd)
|
||||||
nodes_updated.append(node.id)
|
nodes_updated.append(node.id)
|
||||||
if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
|
|
||||||
if old_cluster_id:
|
|
||||||
network_manager.clear_assigned_networks(node)
|
|
||||||
if node.cluster:
|
|
||||||
network_manager = node.cluster.network_manager
|
|
||||||
network_manager.assign_networks_by_default(node)
|
|
||||||
|
|
||||||
# we need eagerload everything that is used in render
|
# we need eagerload everything that is used in render
|
||||||
nodes = db().query(Node).options(
|
nodes = self.collection.get_by_id_list(
|
||||||
joinedload('cluster'),
|
self.collection.eager(None, self.eager),
|
||||||
joinedload('nic_interfaces'),
|
nodes_updated
|
||||||
joinedload('nic_interfaces.assigned_networks_list'),
|
)
|
||||||
joinedload('bond_interfaces'),
|
return self.collection.to_json(nodes)
|
||||||
joinedload('bond_interfaces.assigned_networks_list')).\
|
|
||||||
filter(Node.id.in_(nodes_updated)).all()
|
|
||||||
return self.render(nodes)
|
|
||||||
|
|
||||||
|
|
||||||
class NodeAgentHandler(BaseHandler):
|
class NodeAgentHandler(BaseHandler):
|
||||||
|
|
|
@ -19,6 +19,30 @@ from nailgun import consts
|
||||||
from nailgun.api.serializers.base import BasicSerializer
|
from nailgun.api.serializers.base import BasicSerializer
|
||||||
|
|
||||||
|
|
||||||
|
class NodeSerializer(BasicSerializer):
|
||||||
|
|
||||||
|
fields = (
|
||||||
|
'id',
|
||||||
|
'name',
|
||||||
|
'meta',
|
||||||
|
'progress',
|
||||||
|
'roles',
|
||||||
|
'pending_roles',
|
||||||
|
'status',
|
||||||
|
'mac',
|
||||||
|
'fqdn',
|
||||||
|
'ip',
|
||||||
|
'manufacturer',
|
||||||
|
'platform_name',
|
||||||
|
'pending_addition',
|
||||||
|
'pending_deletion',
|
||||||
|
'os_platform',
|
||||||
|
'error_type',
|
||||||
|
'online',
|
||||||
|
'cluster'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class NodeInterfacesSerializer(BasicSerializer):
|
class NodeInterfacesSerializer(BasicSerializer):
|
||||||
|
|
||||||
nic_fields = (
|
nic_fields = (
|
||||||
|
|
|
@ -139,7 +139,7 @@ urls = (
|
||||||
NodeCollectionHandler,
|
NodeCollectionHandler,
|
||||||
r'/nodes/agent/?$',
|
r'/nodes/agent/?$',
|
||||||
NodeAgentHandler,
|
NodeAgentHandler,
|
||||||
r'/nodes/(?P<node_id>\d+)/?$',
|
r'/nodes/(?P<obj_id>\d+)/?$',
|
||||||
NodeHandler,
|
NodeHandler,
|
||||||
r'/nodes/(?P<node_id>\d+)/disks/?$',
|
r'/nodes/(?P<node_id>\d+)/disks/?$',
|
||||||
NodeDisksHandler,
|
NodeDisksHandler,
|
||||||
|
|
|
@ -25,7 +25,10 @@ class ClusterValidator(BasicValidator):
|
||||||
def _validate_common(cls, data):
|
def _validate_common(cls, data):
|
||||||
d = cls.validate_json(data)
|
d = cls.validate_json(data)
|
||||||
if d.get("name"):
|
if d.get("name"):
|
||||||
if ClusterCollection.filter_by(name=d["name"]).first():
|
if ClusterCollection.filter_by(
|
||||||
|
query=None,
|
||||||
|
name=d["name"]
|
||||||
|
).first():
|
||||||
raise errors.AlreadyExists(
|
raise errors.AlreadyExists(
|
||||||
"Environment with this name already exists",
|
"Environment with this name already exists",
|
||||||
log_message=True
|
log_message=True
|
||||||
|
|
|
@ -17,6 +17,10 @@ from nailgun.api.validators.base import BasicValidator
|
||||||
from nailgun.api.validators.json_schema.disks import disks_simple_format_schema
|
from nailgun.api.validators.json_schema.disks import disks_simple_format_schema
|
||||||
from nailgun.api.validators.json_schema.node import node_format_schema
|
from nailgun.api.validators.json_schema.node import node_format_schema
|
||||||
|
|
||||||
|
from nailgun import consts
|
||||||
|
|
||||||
|
from nailgun import objects
|
||||||
|
|
||||||
from nailgun.db import db
|
from nailgun.db import db
|
||||||
from nailgun.db.sqlalchemy.models import Node
|
from nailgun.db.sqlalchemy.models import Node
|
||||||
from nailgun.db.sqlalchemy.models import NodeNICInterface
|
from nailgun.db.sqlalchemy.models import NodeNICInterface
|
||||||
|
@ -98,9 +102,15 @@ class MetaValidator(BasicValidator):
|
||||||
class NodeValidator(BasicValidator):
|
class NodeValidator(BasicValidator):
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate(cls, data):
|
def validate(cls, data):
|
||||||
|
# TODO(enchantner): rewrite validators to use Node object
|
||||||
data = cls.validate_json(data)
|
data = cls.validate_json(data)
|
||||||
cls.validate_schema(data, node_format_schema)
|
cls.validate_schema(data, node_format_schema)
|
||||||
|
|
||||||
|
if data.get("status", "") != "discover":
|
||||||
|
raise errors.NotAllowed(
|
||||||
|
"Only bootstrap nodes are allowed to be registered."
|
||||||
|
)
|
||||||
|
|
||||||
if 'mac' not in data:
|
if 'mac' not in data:
|
||||||
raise errors.InvalidData(
|
raise errors.InvalidData(
|
||||||
"No mac address specified",
|
"No mac address specified",
|
||||||
|
@ -173,20 +183,66 @@ class NodeValidator(BasicValidator):
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_update(cls, data):
|
def validate_update(cls, data, instance=None):
|
||||||
d = cls.validate_json(data)
|
if isinstance(data, (str, unicode)):
|
||||||
if "status" in d and d["status"] not in Node.NODE_STATUSES:
|
d = cls.validate_json(data)
|
||||||
|
else:
|
||||||
|
d = data
|
||||||
|
|
||||||
|
if "status" in d and d["status"] not in consts.NODE_STATUSES:
|
||||||
raise errors.InvalidData(
|
raise errors.InvalidData(
|
||||||
"Invalid status for node",
|
"Invalid status for node",
|
||||||
log_message=True
|
log_message=True
|
||||||
)
|
)
|
||||||
if 'roles' in d and 'id' in d:
|
|
||||||
node = db().query(Node).get(d['id'])
|
if not d.get("mac") and not d.get("id") and not instance:
|
||||||
|
raise errors.InvalidData(
|
||||||
|
"Neither MAC nor ID is specified",
|
||||||
|
log_message=True
|
||||||
|
)
|
||||||
|
|
||||||
|
q = db().query(Node)
|
||||||
|
if "mac" in d:
|
||||||
|
if not d["mac"]:
|
||||||
|
raise errors.InvalidData(
|
||||||
|
"Null MAC is specified",
|
||||||
|
log_message=True
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
existent_node = q.filter_by(mac=d["mac"]).first() \
|
||||||
|
or cls.validate_existent_node_mac_update(d)
|
||||||
|
if not existent_node:
|
||||||
|
raise errors.InvalidData(
|
||||||
|
"Invalid MAC is specified",
|
||||||
|
log_message=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if "id" in d and d["id"]:
|
||||||
|
existent_node = q.get(d["id"])
|
||||||
|
if not existent_node:
|
||||||
|
raise errors.InvalidData(
|
||||||
|
"Invalid ID specified",
|
||||||
|
log_message=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if "roles" in d:
|
||||||
|
if instance:
|
||||||
|
node = instance
|
||||||
|
else:
|
||||||
|
node = objects.Node.get_by_mac_or_uid(
|
||||||
|
mac=d.get("mac"),
|
||||||
|
node_uid=d.get("id")
|
||||||
|
)
|
||||||
cls.validate_roles(d, node)
|
cls.validate_roles(d, node)
|
||||||
|
|
||||||
if 'meta' in d:
|
if 'meta' in d:
|
||||||
d['meta'] = MetaValidator.validate_update(d['meta'])
|
d['meta'] = MetaValidator.validate_update(d['meta'])
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def validate_delete(cls, instance):
|
||||||
|
pass
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_collection_update(cls, data):
|
def validate_collection_update(cls, data):
|
||||||
d = cls.validate_json(data)
|
d = cls.validate_json(data)
|
||||||
|
@ -196,38 +252,8 @@ class NodeValidator(BasicValidator):
|
||||||
log_message=True
|
log_message=True
|
||||||
)
|
)
|
||||||
|
|
||||||
q = db().query(Node)
|
|
||||||
for nd in d:
|
for nd in d:
|
||||||
if not nd.get("mac") and not nd.get("id"):
|
cls.validate_update(nd)
|
||||||
raise errors.InvalidData(
|
|
||||||
"Neither MAC nor ID is specified",
|
|
||||||
log_message=True
|
|
||||||
)
|
|
||||||
if "mac" in nd and not nd["mac"]:
|
|
||||||
raise errors.InvalidData(
|
|
||||||
"Null MAC is specified",
|
|
||||||
log_message=True
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
if nd.get("mac"):
|
|
||||||
existent_node = q.filter_by(mac=nd["mac"]).first() \
|
|
||||||
or cls.validate_existent_node_mac_update(nd)
|
|
||||||
if not existent_node:
|
|
||||||
raise errors.InvalidData(
|
|
||||||
"Invalid MAC specified",
|
|
||||||
log_message=True
|
|
||||||
)
|
|
||||||
if nd.get("id"):
|
|
||||||
existent_node = q.get(nd["id"])
|
|
||||||
if not existent_node:
|
|
||||||
raise errors.InvalidData(
|
|
||||||
"Invalid ID specified",
|
|
||||||
log_message=True
|
|
||||||
)
|
|
||||||
if 'roles' in nd:
|
|
||||||
cls.validate_roles(nd, existent_node)
|
|
||||||
if 'meta' in nd:
|
|
||||||
nd['meta'] = MetaValidator.validate_update(nd['meta'])
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -71,6 +71,21 @@ CLUSTER_NET_SEGMENT_TYPES = Enum(
|
||||||
'gre'
|
'gre'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
NODE_STATUSES = Enum(
|
||||||
|
'ready',
|
||||||
|
'discover',
|
||||||
|
'provisioning',
|
||||||
|
'provisioned',
|
||||||
|
'deploying',
|
||||||
|
'error'
|
||||||
|
)
|
||||||
|
|
||||||
|
NODE_ERRORS = Enum(
|
||||||
|
'deploy',
|
||||||
|
'provision',
|
||||||
|
'deletion'
|
||||||
|
)
|
||||||
|
|
||||||
NETWORK_INTERFACE_TYPES = Enum(
|
NETWORK_INTERFACE_TYPES = Enum(
|
||||||
'ether',
|
'ether',
|
||||||
'bond'
|
'bond'
|
||||||
|
|
|
@ -30,7 +30,7 @@ import sqlalchemy.types
|
||||||
from nailgun.db import db
|
from nailgun.db import db
|
||||||
from nailgun.db.sqlalchemy import models
|
from nailgun.db.sqlalchemy import models
|
||||||
from nailgun.logger import logger
|
from nailgun.logger import logger
|
||||||
from nailgun.network.manager import NetworkManager
|
from nailgun import objects
|
||||||
from nailgun.settings import settings
|
from nailgun.settings import settings
|
||||||
from nailgun.utils import dict_merge
|
from nailgun.utils import dict_merge
|
||||||
|
|
||||||
|
@ -178,11 +178,9 @@ def upload_fixture(fileobj, loader=None):
|
||||||
|
|
||||||
# UGLY HACK for testing
|
# UGLY HACK for testing
|
||||||
if new_obj.__class__.__name__ == 'Node':
|
if new_obj.__class__.__name__ == 'Node':
|
||||||
new_obj.attributes = models.NodeAttributes()
|
objects.Node.create_attributes(new_obj)
|
||||||
db().commit()
|
objects.Node.update_volumes(new_obj)
|
||||||
new_obj.attributes.volumes = \
|
objects.Node.update_interfaces(new_obj)
|
||||||
new_obj.volume_manager.gen_volumes_info()
|
|
||||||
NetworkManager.update_interfaces_info(new_obj)
|
|
||||||
db().commit()
|
db().commit()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -68,28 +68,15 @@ class Role(Base):
|
||||||
|
|
||||||
class Node(Base):
|
class Node(Base):
|
||||||
__tablename__ = 'nodes'
|
__tablename__ = 'nodes'
|
||||||
NODE_STATUSES = (
|
|
||||||
'ready',
|
|
||||||
'discover',
|
|
||||||
'provisioning',
|
|
||||||
'provisioned',
|
|
||||||
'deploying',
|
|
||||||
'error'
|
|
||||||
)
|
|
||||||
NODE_ERRORS = (
|
|
||||||
'deploy',
|
|
||||||
'provision',
|
|
||||||
'deletion'
|
|
||||||
)
|
|
||||||
id = Column(Integer, primary_key=True)
|
id = Column(Integer, primary_key=True)
|
||||||
uuid = Column(String(36), nullable=False,
|
uuid = Column(String(36), nullable=False,
|
||||||
default=lambda: str(uuid.uuid4()), unique=True)
|
default=lambda: str(uuid.uuid4()), unique=True)
|
||||||
cluster_id = Column(Integer, ForeignKey('clusters.id'))
|
cluster_id = Column(Integer, ForeignKey('clusters.id'))
|
||||||
name = Column(Unicode(100))
|
name = Column(Unicode(100))
|
||||||
status = Column(
|
status = Column(
|
||||||
Enum(*NODE_STATUSES, name='node_status'),
|
Enum(*consts.NODE_STATUSES, name='node_status'),
|
||||||
nullable=False,
|
nullable=False,
|
||||||
default='discover'
|
default=consts.NODE_STATUSES.discover
|
||||||
)
|
)
|
||||||
meta = Column(JSON, default={})
|
meta = Column(JSON, default={})
|
||||||
mac = Column(LowercaseString(17), nullable=False, unique=True)
|
mac = Column(LowercaseString(17), nullable=False, unique=True)
|
||||||
|
@ -102,7 +89,7 @@ class Node(Base):
|
||||||
pending_addition = Column(Boolean, default=False)
|
pending_addition = Column(Boolean, default=False)
|
||||||
pending_deletion = Column(Boolean, default=False)
|
pending_deletion = Column(Boolean, default=False)
|
||||||
changes = relationship("ClusterChanges", backref="node")
|
changes = relationship("ClusterChanges", backref="node")
|
||||||
error_type = Column(Enum(*NODE_ERRORS, name='node_error_type'))
|
error_type = Column(Enum(*consts.NODE_ERRORS, name='node_error_type'))
|
||||||
error_msg = Column(String(255))
|
error_msg = Column(String(255))
|
||||||
timestamp = Column(DateTime, nullable=False)
|
timestamp = Column(DateTime, nullable=False)
|
||||||
online = Column(Boolean, default=True)
|
online = Column(Boolean, default=True)
|
||||||
|
@ -256,16 +243,17 @@ class Node(Base):
|
||||||
def update_meta(self, data):
|
def update_meta(self, data):
|
||||||
# helper for basic checking meta before updation
|
# helper for basic checking meta before updation
|
||||||
result = []
|
result = []
|
||||||
for iface in data["interfaces"]:
|
if "interfaces" in data:
|
||||||
if not self._check_interface_has_required_params(iface):
|
for iface in data["interfaces"]:
|
||||||
logger.warning(
|
if not self._check_interface_has_required_params(iface):
|
||||||
"Invalid interface data: {0}. "
|
logger.warning(
|
||||||
"Interfaces are not updated.".format(iface)
|
"Invalid interface data: {0}. "
|
||||||
)
|
"Interfaces are not updated.".format(iface)
|
||||||
data["interfaces"] = self.meta.get("interfaces")
|
)
|
||||||
self.meta = data
|
data["interfaces"] = self.meta.get("interfaces")
|
||||||
return
|
self.meta = data
|
||||||
result.append(self._clean_iface(iface))
|
return
|
||||||
|
result.append(self._clean_iface(iface))
|
||||||
|
|
||||||
data["interfaces"] = result
|
data["interfaces"] = result
|
||||||
self.meta = data
|
self.meta = data
|
||||||
|
@ -273,14 +261,15 @@ class Node(Base):
|
||||||
def create_meta(self, data):
|
def create_meta(self, data):
|
||||||
# helper for basic checking meta before creation
|
# helper for basic checking meta before creation
|
||||||
result = []
|
result = []
|
||||||
for iface in data["interfaces"]:
|
if "interfaces" in data:
|
||||||
if not self._check_interface_has_required_params(iface):
|
for iface in data["interfaces"]:
|
||||||
logger.warning(
|
if not self._check_interface_has_required_params(iface):
|
||||||
"Invalid interface data: {0}. "
|
logger.warning(
|
||||||
"Skipping interface.".format(iface)
|
"Invalid interface data: {0}. "
|
||||||
)
|
"Skipping interface.".format(iface)
|
||||||
continue
|
)
|
||||||
result.append(self._clean_iface(iface))
|
continue
|
||||||
|
result.append(self._clean_iface(iface))
|
||||||
|
|
||||||
data["interfaces"] = result
|
data["interfaces"] = result
|
||||||
self.meta = data
|
self.meta = data
|
||||||
|
|
|
@ -25,6 +25,7 @@ default_messages = {
|
||||||
# REST errors
|
# REST errors
|
||||||
"CannotDelete": "Can't delete object",
|
"CannotDelete": "Can't delete object",
|
||||||
"CannotCreate": "Can't create object",
|
"CannotCreate": "Can't create object",
|
||||||
|
"NotAllowed": "Action is not allowed",
|
||||||
"InvalidField": "Invalid field specified for object",
|
"InvalidField": "Invalid field specified for object",
|
||||||
|
|
||||||
# node discovering errors
|
# node discovering errors
|
||||||
|
|
|
@ -29,7 +29,7 @@ from netaddr import IPRange
|
||||||
from sqlalchemy.orm import joinedload
|
from sqlalchemy.orm import joinedload
|
||||||
from sqlalchemy.sql import not_
|
from sqlalchemy.sql import not_
|
||||||
|
|
||||||
from nailgun import objects
|
from nailgun.objects import Cluster
|
||||||
|
|
||||||
from nailgun import consts
|
from nailgun import consts
|
||||||
from nailgun.db import db
|
from nailgun.db import db
|
||||||
|
@ -243,7 +243,7 @@ class NetworkManager(object):
|
||||||
:returns: None
|
:returns: None
|
||||||
:raises: Exception
|
:raises: Exception
|
||||||
"""
|
"""
|
||||||
cluster = objects.Cluster.get_by_uid(cluster_id)
|
cluster = Cluster.get_by_uid(cluster_id)
|
||||||
if not cluster:
|
if not cluster:
|
||||||
raise Exception(u"Cluster id='%s' not found" % cluster_id)
|
raise Exception(u"Cluster id='%s' not found" % cluster_id)
|
||||||
|
|
||||||
|
@ -362,6 +362,7 @@ class NetworkManager(object):
|
||||||
ips = ips.filter_by(node=node_id)
|
ips = ips.filter_by(node=node_id)
|
||||||
if network_id:
|
if network_id:
|
||||||
ips = ips.filter_by(network=network_id)
|
ips = ips.filter_by(network=network_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
admin_net_id = cls.get_admin_network_group_id()
|
admin_net_id = cls.get_admin_network_group_id()
|
||||||
except errors.AdminNetworkNotFound:
|
except errors.AdminNetworkNotFound:
|
||||||
|
@ -445,9 +446,13 @@ class NetworkManager(object):
|
||||||
# Assign remaining networks to NIC #0
|
# Assign remaining networks to NIC #0
|
||||||
# as all the networks must be assigned.
|
# as all the networks must be assigned.
|
||||||
# But network check will not pass if we get here.
|
# But network check will not pass if we get here.
|
||||||
logger.warn("Cannot assign all networks appropriately for"
|
logger.warn(
|
||||||
" node %r. Set all unassigned networks to the"
|
u"Cannot assign all networks appropriately for"
|
||||||
" interface %r", node.name, nics[0]['name'])
|
u"node %r. Set all unassigned networks to the"
|
||||||
|
u"interface %r",
|
||||||
|
node.name,
|
||||||
|
nics[0]['name']
|
||||||
|
)
|
||||||
for ng_id in to_assign_ids:
|
for ng_id in to_assign_ids:
|
||||||
nics[0].setdefault('assigned_networks', []).append(
|
nics[0].setdefault('assigned_networks', []).append(
|
||||||
{'id': ng_id, 'name': ngs_by_id[ng_id].name})
|
{'id': ng_id, 'name': ngs_by_id[ng_id].name})
|
||||||
|
@ -731,7 +736,9 @@ class NetworkManager(object):
|
||||||
try:
|
try:
|
||||||
cls.__check_interfaces_correctness(node)
|
cls.__check_interfaces_correctness(node)
|
||||||
except errors.InvalidInterfacesInfo as e:
|
except errors.InvalidInterfacesInfo as e:
|
||||||
logger.warn("Cannot update interfaces: %s" % str(e))
|
logger.warn(
|
||||||
|
"Cannot update interfaces: {0}".format(str(e))
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
for interface in node.meta["interfaces"]:
|
for interface in node.meta["interfaces"]:
|
||||||
|
@ -790,14 +797,14 @@ class NetworkManager(object):
|
||||||
interface.node_id = node.id
|
interface.node_id = node.id
|
||||||
cls.__set_interface_attributes(interface, interface_attrs)
|
cls.__set_interface_attributes(interface, interface_attrs)
|
||||||
db().add(interface)
|
db().add(interface)
|
||||||
db().commit()
|
db().flush()
|
||||||
node.nic_interfaces.append(interface)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def __update_existing_interface(cls, interface_id, interface_attrs):
|
def __update_existing_interface(cls, interface_id, interface_attrs):
|
||||||
interface = db().query(NodeNICInterface).get(interface_id)
|
interface = db().query(NodeNICInterface).get(interface_id)
|
||||||
cls.__set_interface_attributes(interface, interface_attrs)
|
cls.__set_interface_attributes(interface, interface_attrs)
|
||||||
db().commit()
|
db().add(interface)
|
||||||
|
db().flush()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def __set_interface_attributes(cls, interface, interface_attrs):
|
def __set_interface_attributes(cls, interface, interface_attrs):
|
||||||
|
@ -830,6 +837,7 @@ class NetworkManager(object):
|
||||||
mac_addresses, node_name))
|
mac_addresses, node_name))
|
||||||
|
|
||||||
map(db().delete, interfaces_to_delete)
|
map(db().delete, interfaces_to_delete)
|
||||||
|
db().flush()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_admin_ip_for_node(cls, node):
|
def get_admin_ip_for_node(cls, node):
|
||||||
|
@ -887,7 +895,7 @@ class NetworkManager(object):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_end_point_ip(cls, cluster_id):
|
def get_end_point_ip(cls, cluster_id):
|
||||||
cluster_db = objects.Cluster.get_by_uid(cluster_id)
|
cluster_db = Cluster.get_by_uid(cluster_id)
|
||||||
ip = None
|
ip = None
|
||||||
if cluster_db.is_ha_mode:
|
if cluster_db.is_ha_mode:
|
||||||
ip = cls.assign_vip(cluster_db.id, "public")
|
ip = cls.assign_vip(cluster_db.id, "public")
|
||||||
|
@ -988,7 +996,7 @@ class NetworkManager(object):
|
||||||
:type cluster_id: int
|
:type cluster_id: int
|
||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
cluster_db = objects.Cluster.get_by_uid(cluster_id)
|
cluster_db = Cluster.get_by_uid(cluster_id)
|
||||||
networks_metadata = cluster_db.release.networks_metadata
|
networks_metadata = cluster_db.release.networks_metadata
|
||||||
networks_list = networks_metadata[cluster_db.net_provider]["networks"]
|
networks_list = networks_metadata[cluster_db.net_provider]["networks"]
|
||||||
used_nets = [IPNetwork(cls.get_admin_network_group().cidr)]
|
used_nets = [IPNetwork(cls.get_admin_network_group().cidr)]
|
||||||
|
@ -1078,7 +1086,7 @@ class NetworkManager(object):
|
||||||
cls.update_cidr_from_gw_mask(ng_db, ng)
|
cls.update_cidr_from_gw_mask(ng_db, ng)
|
||||||
if ng_db.meta.get("notation"):
|
if ng_db.meta.get("notation"):
|
||||||
cls.cleanup_network_group(ng_db)
|
cls.cleanup_network_group(ng_db)
|
||||||
objects.Cluster.add_pending_changes(ng_db.cluster, 'networks')
|
Cluster.add_pending_changes(ng_db.cluster, 'networks')
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def cluster_has_bonds(cls, cluster_id):
|
def cluster_has_bonds(cls, cluster_id):
|
||||||
|
|
|
@ -24,5 +24,8 @@ from nailgun.objects.cluster import Attributes
|
||||||
from nailgun.objects.cluster import Cluster
|
from nailgun.objects.cluster import Cluster
|
||||||
from nailgun.objects.cluster import ClusterCollection
|
from nailgun.objects.cluster import ClusterCollection
|
||||||
|
|
||||||
|
from nailgun.objects.node import Node
|
||||||
|
from nailgun.objects.node import NodeCollection
|
||||||
|
|
||||||
from nailgun.objects.task import Task
|
from nailgun.objects.task import Task
|
||||||
from nailgun.objects.task import TaskCollection
|
from nailgun.objects.task import TaskCollection
|
||||||
|
|
|
@ -16,6 +16,8 @@
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
from sqlalchemy.orm import joinedload
|
||||||
|
|
||||||
from nailgun.api.serializers.base import BasicSerializer
|
from nailgun.api.serializers.base import BasicSerializer
|
||||||
from nailgun.db import db
|
from nailgun.db import db
|
||||||
from nailgun.errors import errors
|
from nailgun.errors import errors
|
||||||
|
@ -93,7 +95,7 @@ class NailgunCollection(object):
|
||||||
).yield_per(yield_per)
|
).yield_per(yield_per)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def filter_by(cls, yield_per=100, **kwargs):
|
def filter_by(cls, query, yield_per=100, **kwargs):
|
||||||
for k in kwargs.iterkeys():
|
for k in kwargs.iterkeys():
|
||||||
if k not in cls.single.schema["properties"]:
|
if k not in cls.single.schema["properties"]:
|
||||||
raise AttributeError(
|
raise AttributeError(
|
||||||
|
@ -103,14 +105,25 @@ class NailgunCollection(object):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
return db().query(
|
use_query = query or cls.all(yield_per=yield_per)
|
||||||
cls.single.model
|
return use_query.filter_by(**kwargs)
|
||||||
).filter_by(
|
|
||||||
**kwargs
|
|
||||||
).yield_per(yield_per)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def to_list(cls, fields=None, yield_per=100, query=None):
|
def get_by_id_list(cls, query, id_list, yield_per=100):
|
||||||
|
use_query = query or cls.all(yield_per=yield_per)
|
||||||
|
return use_query.filter(cls.single.model.id.in_(id_list))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def eager(cls, query, fields, yield_per=100):
|
||||||
|
use_query = query or cls.all(yield_per=yield_per)
|
||||||
|
if fields:
|
||||||
|
return use_query.options(
|
||||||
|
*[joinedload(f) for f in fields]
|
||||||
|
)
|
||||||
|
return use_query
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def to_list(cls, query=None, fields=None, yield_per=100):
|
||||||
use_query = query or cls.all(yield_per=yield_per)
|
use_query = query or cls.all(yield_per=yield_per)
|
||||||
return map(
|
return map(
|
||||||
lambda o: cls.single.to_dict(o, fields=fields),
|
lambda o: cls.single.to_dict(o, fields=fields),
|
||||||
|
@ -118,7 +131,7 @@ class NailgunCollection(object):
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def to_json(cls, fields=None, yield_per=100, query=None):
|
def to_json(cls, query=None, fields=None, yield_per=100):
|
||||||
return json.dumps(
|
return json.dumps(
|
||||||
cls.to_list(
|
cls.to_list(
|
||||||
fields=fields,
|
fields=fields,
|
||||||
|
|
|
@ -0,0 +1,386 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2013 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from nailgun import consts
|
||||||
|
|
||||||
|
from nailgun.api.serializers.node import NodeSerializer
|
||||||
|
|
||||||
|
from nailgun import notifier
|
||||||
|
|
||||||
|
from nailgun.db import db
|
||||||
|
from nailgun.db.sqlalchemy import models
|
||||||
|
from nailgun.errors import errors
|
||||||
|
from nailgun.logger import logger
|
||||||
|
from nailgun.network.manager import NetworkManager
|
||||||
|
|
||||||
|
from nailgun.objects import Cluster
|
||||||
|
from nailgun.objects import NailgunCollection
|
||||||
|
from nailgun.objects import NailgunObject
|
||||||
|
|
||||||
|
|
||||||
|
class Node(NailgunObject):
|
||||||
|
|
||||||
|
model = models.Node
|
||||||
|
serializer = NodeSerializer
|
||||||
|
|
||||||
|
schema = {
|
||||||
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
"title": "Node",
|
||||||
|
"description": "Serialized Node object",
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": {"type": "number"},
|
||||||
|
"cluster_id": {"type": "number"},
|
||||||
|
"name": {"type": "string"},
|
||||||
|
"status": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": list(consts.NODE_STATUSES)
|
||||||
|
},
|
||||||
|
"meta": {"type": "object"},
|
||||||
|
"mac": {"type": "string"},
|
||||||
|
"api": {"type": "string"},
|
||||||
|
"fqdn": {"type": "string"},
|
||||||
|
"manufacturer": {"type": "string"},
|
||||||
|
"platform_name": {"type": "string"},
|
||||||
|
"progress": {"type": "number"},
|
||||||
|
"os_platform": {"type": "string"},
|
||||||
|
"pending_addition": {"type": "boolean"},
|
||||||
|
"pending_deletion": {"type": "boolean"},
|
||||||
|
"error_type": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": list(consts.NODE_ERRORS)
|
||||||
|
},
|
||||||
|
"error_msg": {"type": "string"},
|
||||||
|
"online": {"type": "boolean"},
|
||||||
|
"roles": {"type": "array"},
|
||||||
|
"pending_roles": {"type": "array"},
|
||||||
|
"agent_checksum": {"type": "string"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_by_mac_or_uid(cls, mac=None, node_uid=None):
|
||||||
|
node = None
|
||||||
|
if not mac and not node_uid:
|
||||||
|
return node
|
||||||
|
|
||||||
|
q = db().query(cls.model)
|
||||||
|
if mac:
|
||||||
|
node = q.filter_by(mac=mac).first()
|
||||||
|
else:
|
||||||
|
node = q.get(node_uid)
|
||||||
|
return node
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def search_by_interfaces(cls, interfaces):
|
||||||
|
return db().query(cls.model).join(
|
||||||
|
models.NodeNICInterface,
|
||||||
|
cls.model.nic_interfaces
|
||||||
|
).filter(
|
||||||
|
models.NodeNICInterface.mac.in_(
|
||||||
|
[n["mac"] for n in interfaces]
|
||||||
|
)
|
||||||
|
).first()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create(cls, data):
|
||||||
|
if "name" not in data:
|
||||||
|
data["name"] = "Untitled ({0})".format(
|
||||||
|
data['mac'][-5:].lower()
|
||||||
|
)
|
||||||
|
data["timestamp"] = datetime.now()
|
||||||
|
data.pop("id", None)
|
||||||
|
|
||||||
|
#TODO(enchantner): fix this temporary hack in clients
|
||||||
|
if "cluster_id" not in data and "cluster" in data:
|
||||||
|
cluster_id = data.pop("cluster", None)
|
||||||
|
data["cluster_id"] = cluster_id
|
||||||
|
|
||||||
|
roles = data.pop("roles", None)
|
||||||
|
pending_roles = data.pop("pending_roles", None)
|
||||||
|
|
||||||
|
new_node_meta = data.pop("meta", {})
|
||||||
|
new_node_cluster_id = data.pop("cluster_id", None)
|
||||||
|
new_node = super(Node, cls).create(data)
|
||||||
|
new_node.create_meta(new_node_meta)
|
||||||
|
db().flush()
|
||||||
|
|
||||||
|
# Add interfaces for node from 'meta'.
|
||||||
|
if new_node.meta and new_node.meta.get('interfaces'):
|
||||||
|
cls.update_interfaces(new_node)
|
||||||
|
|
||||||
|
# adding node into cluster
|
||||||
|
if new_node_cluster_id:
|
||||||
|
cls.add_into_cluster(new_node, new_node_cluster_id)
|
||||||
|
|
||||||
|
# updating roles
|
||||||
|
if roles is not None:
|
||||||
|
cls.update_roles(new_node, roles)
|
||||||
|
if pending_roles is not None:
|
||||||
|
cls.update_pending_roles(new_node, pending_roles)
|
||||||
|
|
||||||
|
# creating attributes
|
||||||
|
cls.create_attributes(new_node)
|
||||||
|
cls.update_volumes(new_node)
|
||||||
|
|
||||||
|
cls.create_discover_notification(new_node)
|
||||||
|
return new_node
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create_attributes(cls, instance):
|
||||||
|
new_attributes = models.NodeAttributes()
|
||||||
|
instance.attributes = new_attributes
|
||||||
|
db().add(new_attributes)
|
||||||
|
db().add(instance)
|
||||||
|
db().flush()
|
||||||
|
return new_attributes
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def update_interfaces(cls, instance):
|
||||||
|
NetworkManager.update_interfaces_info(instance)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def update_volumes(cls, instance):
|
||||||
|
attrs = instance.attributes
|
||||||
|
if not attrs:
|
||||||
|
attrs = cls.create_attributes(instance)
|
||||||
|
|
||||||
|
try:
|
||||||
|
attrs.volumes = instance.volume_manager.gen_volumes_info()
|
||||||
|
except Exception as exc:
|
||||||
|
msg = (
|
||||||
|
u"Failed to generate volumes "
|
||||||
|
u"info for node '{0}': '{1}'"
|
||||||
|
).format(
|
||||||
|
instance.name or instance.mac or instance.id,
|
||||||
|
str(exc) or "see logs for details"
|
||||||
|
)
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
notifier.notify("error", msg, node_id=instance.id)
|
||||||
|
|
||||||
|
if instance.cluster_id:
|
||||||
|
Cluster.add_pending_changes(
|
||||||
|
instance.cluster,
|
||||||
|
"disks",
|
||||||
|
node_id=instance.id
|
||||||
|
)
|
||||||
|
|
||||||
|
db().add(attrs)
|
||||||
|
db().flush()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def create_discover_notification(cls, instance):
|
||||||
|
try:
|
||||||
|
# we use multiplier of 1024 because there are no problems here
|
||||||
|
# with unfair size calculation
|
||||||
|
ram = str(round(float(
|
||||||
|
instance.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
|
||||||
|
except Exception:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
ram = "unknown RAM"
|
||||||
|
|
||||||
|
try:
|
||||||
|
# we use multiplier of 1000 because disk vendors specify HDD size
|
||||||
|
# in terms of decimal capacity. Sources:
|
||||||
|
# http://knowledge.seagate.com/articles/en_US/FAQ/172191en
|
||||||
|
# http://physics.nist.gov/cuu/Units/binary.html
|
||||||
|
hd_size = round(
|
||||||
|
float(
|
||||||
|
sum(
|
||||||
|
[d["size"] for d in instance.meta["disks"]]
|
||||||
|
) / 1000000000
|
||||||
|
),
|
||||||
|
1
|
||||||
|
)
|
||||||
|
# if HDD > 100 GB we show it's size in TB
|
||||||
|
if hd_size > 100:
|
||||||
|
hd_size = str(hd_size / 1000) + " TB HDD"
|
||||||
|
else:
|
||||||
|
hd_size = str(hd_size) + " GB HDD"
|
||||||
|
except Exception:
|
||||||
|
logger.warning(traceback.format_exc())
|
||||||
|
hd_size = "unknown HDD"
|
||||||
|
|
||||||
|
cores = str(instance.meta.get('cpu', {}).get('total', "unknown"))
|
||||||
|
notifier.notify(
|
||||||
|
"discover",
|
||||||
|
"New node is discovered: %s CPUs / %s / %s " %
|
||||||
|
(cores, ram, hd_size),
|
||||||
|
node_id=instance.id
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def update(cls, instance, data):
|
||||||
|
data.pop("id", None)
|
||||||
|
|
||||||
|
roles = data.pop("roles", None)
|
||||||
|
pending_roles = data.pop("pending_roles", None)
|
||||||
|
new_meta = data.pop("meta", None)
|
||||||
|
|
||||||
|
#TODO(enchantner): fix this temporary hack in clients
|
||||||
|
if "cluster_id" not in data and "cluster" in data:
|
||||||
|
cluster_id = data.pop("cluster", None)
|
||||||
|
data["cluster_id"] = cluster_id
|
||||||
|
|
||||||
|
if new_meta:
|
||||||
|
instance.update_meta(new_meta)
|
||||||
|
# smarter check needed
|
||||||
|
cls.update_interfaces(instance)
|
||||||
|
|
||||||
|
new_cluster_id = instance.cluster_id
|
||||||
|
cluster_changed = False
|
||||||
|
if "cluster_id" in data:
|
||||||
|
new_cluster_id = data.pop("cluster_id")
|
||||||
|
if instance.cluster_id:
|
||||||
|
if new_cluster_id is None:
|
||||||
|
# removing node from cluster
|
||||||
|
cluster_changed = True
|
||||||
|
cls.remove_from_cluster(instance)
|
||||||
|
elif new_cluster_id != instance.cluster_id:
|
||||||
|
# changing node cluster to another
|
||||||
|
# (is currently not allowed)
|
||||||
|
raise errors.CannotUpdate(
|
||||||
|
u"Changing cluster on the fly is not allowed"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if new_cluster_id is not None:
|
||||||
|
# assigning node to cluster
|
||||||
|
cluster_changed = True
|
||||||
|
cls.add_into_cluster(instance, new_cluster_id)
|
||||||
|
|
||||||
|
# calculating flags
|
||||||
|
roles_changed = (
|
||||||
|
roles is not None and set(roles) != set(instance.roles)
|
||||||
|
)
|
||||||
|
pending_roles_changed = (
|
||||||
|
pending_roles is not None and
|
||||||
|
set(pending_roles) != set(instance.pending_roles)
|
||||||
|
)
|
||||||
|
|
||||||
|
super(Node, cls).update(instance, data)
|
||||||
|
|
||||||
|
if roles_changed:
|
||||||
|
cls.update_roles(instance, roles)
|
||||||
|
if pending_roles_changed:
|
||||||
|
cls.update_pending_roles(instance, pending_roles)
|
||||||
|
|
||||||
|
if any((
|
||||||
|
roles_changed,
|
||||||
|
pending_roles_changed,
|
||||||
|
cluster_changed
|
||||||
|
)) and instance.status not in (
|
||||||
|
consts.NODE_STATUSES.provisioning,
|
||||||
|
consts.NODE_STATUSES.deploying
|
||||||
|
):
|
||||||
|
cls.update_volumes(instance)
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def update_roles(cls, instance, new_roles):
|
||||||
|
if not instance.cluster_id:
|
||||||
|
logger.warning(
|
||||||
|
u"Attempting to assign roles to node "
|
||||||
|
u"'{0}' which isn't added to cluster".format(
|
||||||
|
instance.name or instance.id
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
instance.role_list = db().query(models.Role).filter_by(
|
||||||
|
release_id=instance.cluster.release_id,
|
||||||
|
).filter(
|
||||||
|
models.Role.name.in_(new_roles)
|
||||||
|
).all()
|
||||||
|
db().flush()
|
||||||
|
db().refresh(instance)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def update_pending_roles(cls, instance, new_pending_roles):
|
||||||
|
if not instance.cluster_id:
|
||||||
|
logger.warning(
|
||||||
|
u"Attempting to assign pending roles to node "
|
||||||
|
u"'{0}' which isn't added to cluster".format(
|
||||||
|
instance.name or instance.id
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
u"Updating pending roles for node {0}: {1}".format(
|
||||||
|
instance.id,
|
||||||
|
new_pending_roles
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if new_pending_roles == []:
|
||||||
|
instance.pending_role_list = []
|
||||||
|
# research why the hell we need this
|
||||||
|
Cluster.clear_pending_changes(
|
||||||
|
instance.cluster,
|
||||||
|
node_id=instance.id
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
instance.pending_role_list = db().query(models.Role).filter_by(
|
||||||
|
release_id=instance.cluster.release_id,
|
||||||
|
).filter(
|
||||||
|
models.Role.name.in_(new_pending_roles)
|
||||||
|
).all()
|
||||||
|
|
||||||
|
db().flush()
|
||||||
|
db().refresh(instance)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add_into_cluster(cls, instance, cluster_id):
|
||||||
|
instance.cluster_id = cluster_id
|
||||||
|
db().flush()
|
||||||
|
network_manager = instance.cluster.network_manager
|
||||||
|
network_manager.assign_networks_by_default(instance)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def remove_from_cluster(cls, instance):
|
||||||
|
Cluster.clear_pending_changes(
|
||||||
|
instance.cluster,
|
||||||
|
node_id=instance.id
|
||||||
|
)
|
||||||
|
instance.cluster_id = None
|
||||||
|
instance.roles = instance.pending_roles = []
|
||||||
|
instance.reset_name_to_default()
|
||||||
|
db().flush()
|
||||||
|
db().refresh(instance)
|
||||||
|
NetworkManager.clear_assigned_networks(instance)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def to_dict(cls, instance, fields=None):
|
||||||
|
node_dict = super(Node, cls).to_dict(instance, fields=fields)
|
||||||
|
ips_mapped = NetworkManager.get_grouped_ips_by_node()
|
||||||
|
networks_grouped = NetworkManager.get_networks_grouped_by_cluster()
|
||||||
|
|
||||||
|
node_dict['network_data'] = NetworkManager.get_node_networks_optimized(
|
||||||
|
instance,
|
||||||
|
ips_mapped.get(instance.id, []),
|
||||||
|
networks_grouped.get(instance.cluster_id, [])
|
||||||
|
)
|
||||||
|
return node_dict
|
||||||
|
|
||||||
|
|
||||||
|
class NodeCollection(NailgunCollection):
|
||||||
|
|
||||||
|
single = Node
|
|
@ -77,5 +77,5 @@ class TaskCollection(NailgunCollection):
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_by_cluster_id(cls, cluster_id):
|
def get_by_cluster_id(cls, cluster_id):
|
||||||
if cluster_id == '':
|
if cluster_id == '':
|
||||||
return cls.filter_by(cluster_id=None)
|
return cls.filter_by(None, cluster_id=None)
|
||||||
return cls.filter_by(cluster_id=cluster_id)
|
return cls.filter_by(None, cluster_id=cluster_id)
|
||||||
|
|
|
@ -453,8 +453,10 @@ class CheckBeforeDeploymentTask(object):
|
||||||
node.volume_manager.check_disk_space_for_deployment()
|
node.volume_manager.check_disk_space_for_deployment()
|
||||||
except errors.NotEnoughFreeSpace:
|
except errors.NotEnoughFreeSpace:
|
||||||
raise errors.NotEnoughFreeSpace(
|
raise errors.NotEnoughFreeSpace(
|
||||||
u"Node '%s' has insufficient disk space" %
|
u"Node '{0}' has insufficient disk space".format(
|
||||||
node.human_readable_name)
|
node.human_readable_name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _check_volumes(cls, task):
|
def _check_volumes(cls, task):
|
||||||
|
|
|
@ -46,7 +46,6 @@ from nailgun.logger import logger
|
||||||
|
|
||||||
from nailgun.db.sqlalchemy.fixman import load_fixture
|
from nailgun.db.sqlalchemy.fixman import load_fixture
|
||||||
from nailgun.db.sqlalchemy.fixman import upload_fixture
|
from nailgun.db.sqlalchemy.fixman import upload_fixture
|
||||||
from nailgun.db.sqlalchemy.models import Node
|
|
||||||
from nailgun.db.sqlalchemy.models import NodeAttributes
|
from nailgun.db.sqlalchemy.models import NodeAttributes
|
||||||
from nailgun.db.sqlalchemy.models import NodeNICInterface
|
from nailgun.db.sqlalchemy.models import NodeNICInterface
|
||||||
from nailgun.db.sqlalchemy.models import Notification
|
from nailgun.db.sqlalchemy.models import Notification
|
||||||
|
@ -55,6 +54,7 @@ from nailgun.db.sqlalchemy.models import Task
|
||||||
|
|
||||||
# here come objects
|
# here come objects
|
||||||
from nailgun.objects import Cluster
|
from nailgun.objects import Cluster
|
||||||
|
from nailgun.objects import Node
|
||||||
from nailgun.objects import Release
|
from nailgun.objects import Release
|
||||||
|
|
||||||
from nailgun.app import build_app
|
from nailgun.app import build_app
|
||||||
|
@ -250,7 +250,7 @@ class Environment(object):
|
||||||
return None
|
return None
|
||||||
self.tester.assertEquals(resp.status_code, expect_http)
|
self.tester.assertEquals(resp.status_code, expect_http)
|
||||||
node = json.loads(resp.body)
|
node = json.loads(resp.body)
|
||||||
node_db = self.db.query(Node).get(node['id'])
|
node_db = Node.get_by_uid(node['id'])
|
||||||
if 'interfaces' not in node_data['meta'] \
|
if 'interfaces' not in node_data['meta'] \
|
||||||
or not node_data['meta']['interfaces']:
|
or not node_data['meta']['interfaces']:
|
||||||
self._set_interfaces_if_not_set_in_meta(
|
self._set_interfaces_if_not_set_in_meta(
|
||||||
|
@ -258,25 +258,8 @@ class Environment(object):
|
||||||
kwargs.get('meta', None))
|
kwargs.get('meta', None))
|
||||||
self.nodes.append(node_db)
|
self.nodes.append(node_db)
|
||||||
else:
|
else:
|
||||||
node = Node()
|
node = Node.create(node_data)
|
||||||
node.timestamp = datetime.now()
|
db().commit()
|
||||||
if 'cluster_id' in node_data:
|
|
||||||
cluster_id = node_data.pop('cluster_id')
|
|
||||||
for cluster in self.clusters:
|
|
||||||
if cluster.id == cluster_id:
|
|
||||||
node.cluster = cluster
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
node.cluster_id = cluster_id
|
|
||||||
for key, value in node_data.iteritems():
|
|
||||||
setattr(node, key, value)
|
|
||||||
node.attributes = self.create_attributes()
|
|
||||||
node.attributes.volumes = node.volume_manager.gen_volumes_info()
|
|
||||||
self.db.add(node)
|
|
||||||
self.db.commit()
|
|
||||||
if node.meta and node.meta.get('interfaces'):
|
|
||||||
self._create_interfaces_from_meta(node)
|
|
||||||
|
|
||||||
self.nodes.append(node)
|
self.nodes.append(node)
|
||||||
|
|
||||||
return node
|
return node
|
||||||
|
|
|
@ -190,8 +190,12 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||||
name="networks"
|
name="networks"
|
||||||
).all()
|
).all()
|
||||||
self.assertEquals(len(networks_changes), 1)
|
self.assertEquals(len(networks_changes), 1)
|
||||||
|
disks_changes = self.db.query(ClusterChanges).filter_by(
|
||||||
|
name="disks"
|
||||||
|
).all()
|
||||||
|
self.assertEquals(len(disks_changes), 1)
|
||||||
all_changes = self.db.query(ClusterChanges).all()
|
all_changes = self.db.query(ClusterChanges).all()
|
||||||
self.assertEquals(len(all_changes), 2)
|
self.assertEquals(len(all_changes), 3)
|
||||||
|
|
||||||
@fake_tasks(godmode=True)
|
@fake_tasks(godmode=True)
|
||||||
def test_role_unassignment_drops_changes(self):
|
def test_role_unassignment_drops_changes(self):
|
||||||
|
@ -209,7 +213,7 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||||
)
|
)
|
||||||
self.app.put(
|
self.app.put(
|
||||||
reverse("NodeHandler",
|
reverse("NodeHandler",
|
||||||
kwargs={"node_id": new_node["id"]}),
|
kwargs={"obj_id": new_node["id"]}),
|
||||||
json.dumps({
|
json.dumps({
|
||||||
"cluster": None,
|
"cluster": None,
|
||||||
"pending_addition": False,
|
"pending_addition": False,
|
||||||
|
@ -217,7 +221,6 @@ class TestClusterChanges(BaseIntegrationTest):
|
||||||
}),
|
}),
|
||||||
headers=self.default_headers
|
headers=self.default_headers
|
||||||
)
|
)
|
||||||
|
|
||||||
all_changes = self.db.query(ClusterChanges).filter_by(
|
all_changes = self.db.query(ClusterChanges).filter_by(
|
||||||
cluster_id=self.env.clusters[0].id,
|
cluster_id=self.env.clusters[0].id,
|
||||||
node_id=new_node["id"]
|
node_id=new_node["id"]
|
||||||
|
|
|
@ -81,11 +81,11 @@ class TestErrors(BaseIntegrationTest):
|
||||||
self.assertIsNotNone(
|
self.assertIsNotNone(
|
||||||
self.db.query(Notification).filter_by(message=err_msg).first()
|
self.db.query(Notification).filter_by(message=err_msg).first()
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertIsNotNone(
|
||||||
self.db.query(Notification).filter_by(
|
self.db.query(Notification).filter_by(
|
||||||
node_id=self.env.nodes[2].id
|
node_id=self.env.nodes[2].id,
|
||||||
).first().message,
|
message="Failed to deploy node 'Third': I forgot about teapot!"
|
||||||
"Failed to deploy node 'Third': I forgot about teapot!"
|
).first()
|
||||||
)
|
)
|
||||||
self.env.refresh_nodes()
|
self.env.refresh_nodes()
|
||||||
self.env.refresh_clusters()
|
self.env.refresh_clusters()
|
||||||
|
|
|
@ -207,14 +207,16 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
json.dumps([{'id': None,
|
json.dumps([{'id': None,
|
||||||
'mac': node.mac,
|
'mac': node.mac,
|
||||||
'manufacturer': 'man5'}]),
|
'manufacturer': 'man5'}]),
|
||||||
headers=self.default_headers)
|
headers=self.default_headers
|
||||||
|
)
|
||||||
self.assertEquals(resp.status_code, 200)
|
self.assertEquals(resp.status_code, 200)
|
||||||
|
|
||||||
resp = self.app.put(
|
resp = self.app.put(
|
||||||
reverse('NodeCollectionHandler'),
|
reverse('NodeCollectionHandler'),
|
||||||
json.dumps([{'id': node.id,
|
json.dumps([{'id': node.id,
|
||||||
'manufacturer': 'man6'}]),
|
'manufacturer': 'man6'}]),
|
||||||
headers=self.default_headers)
|
headers=self.default_headers
|
||||||
|
)
|
||||||
self.assertEquals(resp.status_code, 200)
|
self.assertEquals(resp.status_code, 200)
|
||||||
|
|
||||||
resp = self.app.put(
|
resp = self.app.put(
|
||||||
|
@ -335,7 +337,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
)
|
)
|
||||||
node2_json = {
|
node2_json = {
|
||||||
"mac": self.env.generate_random_mac(),
|
"mac": self.env.generate_random_mac(),
|
||||||
"meta": self.env.default_metadata()
|
"meta": self.env.default_metadata(),
|
||||||
|
"status": "discover"
|
||||||
}
|
}
|
||||||
node2_json["meta"]["interfaces"][0]["mac"] = node1.mac
|
node2_json["meta"]["interfaces"][0]["mac"] = node1.mac
|
||||||
resp = self.app.post(
|
resp = self.app.post(
|
||||||
|
@ -424,7 +427,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
node = self.env.create_node(api=False)
|
node = self.env.create_node(api=False)
|
||||||
resp = self.app.post(
|
resp = self.app.post(
|
||||||
reverse('NodeCollectionHandler'),
|
reverse('NodeCollectionHandler'),
|
||||||
json.dumps({'mac': node.mac}),
|
json.dumps({'mac': node.mac, 'status': 'discover'}),
|
||||||
headers=self.default_headers,
|
headers=self.default_headers,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
self.assertEquals(409, resp.status_code)
|
self.assertEquals(409, resp.status_code)
|
||||||
|
@ -485,7 +488,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
)[0]['id']
|
)[0]['id']
|
||||||
|
|
||||||
self.app.delete(
|
self.app.delete(
|
||||||
reverse('NodeHandler', {'node_id': node_id})
|
reverse('NodeHandler', {'obj_id': node_id})
|
||||||
)
|
)
|
||||||
|
|
||||||
node_name_test(node_mac.lower())
|
node_name_test(node_mac.lower())
|
||||||
|
|
|
@ -26,7 +26,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
def test_node_get(self):
|
def test_node_get(self):
|
||||||
node = self.env.create_node(api=False)
|
node = self.env.create_node(api=False)
|
||||||
resp = self.app.get(
|
resp = self.app.get(
|
||||||
reverse('NodeHandler', kwargs={'node_id': node.id}),
|
reverse('NodeHandler', kwargs={'obj_id': node.id}),
|
||||||
headers=self.default_headers)
|
headers=self.default_headers)
|
||||||
self.assertEquals(200, resp.status_code)
|
self.assertEquals(200, resp.status_code)
|
||||||
response = json.loads(resp.body)
|
response = json.loads(resp.body)
|
||||||
|
@ -60,7 +60,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
def test_node_deletion(self):
|
def test_node_deletion(self):
|
||||||
node = self.env.create_node(api=False)
|
node = self.env.create_node(api=False)
|
||||||
resp = self.app.delete(
|
resp = self.app.delete(
|
||||||
reverse('NodeHandler', kwargs={'node_id': node.id}),
|
reverse('NodeHandler', kwargs={'obj_id': node.id}),
|
||||||
"",
|
"",
|
||||||
headers=self.default_headers,
|
headers=self.default_headers,
|
||||||
expect_errors=True
|
expect_errors=True
|
||||||
|
@ -71,7 +71,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
new_metadata = self.env.default_metadata()
|
new_metadata = self.env.default_metadata()
|
||||||
node = self.env.create_node(api=False)
|
node = self.env.create_node(api=False)
|
||||||
resp = self.app.put(
|
resp = self.app.put(
|
||||||
reverse('NodeHandler', kwargs={'node_id': node.id}),
|
reverse('NodeHandler', kwargs={'obj_id': node.id}),
|
||||||
json.dumps({'meta': new_metadata}),
|
json.dumps({'meta': new_metadata}),
|
||||||
headers=self.default_headers)
|
headers=self.default_headers)
|
||||||
self.assertEquals(resp.status_code, 200)
|
self.assertEquals(resp.status_code, 200)
|
||||||
|
@ -87,7 +87,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
node = self.env.create_node(api=False)
|
node = self.env.create_node(api=False)
|
||||||
params = {'status': 'error'}
|
params = {'status': 'error'}
|
||||||
resp = self.app.put(
|
resp = self.app.put(
|
||||||
reverse('NodeHandler', kwargs={'node_id': node.id}),
|
reverse('NodeHandler', kwargs={'obj_id': node.id}),
|
||||||
json.dumps(params),
|
json.dumps(params),
|
||||||
headers=self.default_headers)
|
headers=self.default_headers)
|
||||||
self.assertEquals(resp.status_code, 200)
|
self.assertEquals(resp.status_code, 200)
|
||||||
|
@ -97,7 +97,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
node = self.env.create_node(api=False)
|
node = self.env.create_node(api=False)
|
||||||
for flag in flags:
|
for flag in flags:
|
||||||
resp = self.app.put(
|
resp = self.app.put(
|
||||||
reverse('NodeHandler', kwargs={'node_id': node.id}),
|
reverse('NodeHandler', kwargs={'obj_id': node.id}),
|
||||||
json.dumps({flag: True}),
|
json.dumps({flag: True}),
|
||||||
headers=self.default_headers
|
headers=self.default_headers
|
||||||
)
|
)
|
||||||
|
@ -113,7 +113,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
def test_put_returns_400_if_no_body(self):
|
def test_put_returns_400_if_no_body(self):
|
||||||
node = self.env.create_node(api=False)
|
node = self.env.create_node(api=False)
|
||||||
resp = self.app.put(
|
resp = self.app.put(
|
||||||
reverse('NodeHandler', kwargs={'node_id': node.id}),
|
reverse('NodeHandler', kwargs={'obj_id': node.id}),
|
||||||
"",
|
"",
|
||||||
headers=self.default_headers,
|
headers=self.default_headers,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
|
@ -123,7 +123,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
node = self.env.create_node(api=False)
|
node = self.env.create_node(api=False)
|
||||||
params = {'status': 'invalid_status'}
|
params = {'status': 'invalid_status'}
|
||||||
resp = self.app.put(
|
resp = self.app.put(
|
||||||
reverse('NodeHandler', kwargs={'node_id': node.id}),
|
reverse('NodeHandler', kwargs={'obj_id': node.id}),
|
||||||
json.dumps(params),
|
json.dumps(params),
|
||||||
headers=self.default_headers,
|
headers=self.default_headers,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
|
|
|
@ -318,7 +318,7 @@ class TestVerifyNetworks(BaseIntegrationTest):
|
||||||
u'absent_vlans': [100, 101, 102, 103, 104],
|
u'absent_vlans': [100, 101, 102, 103, 104],
|
||||||
u'interface': 'eth0',
|
u'interface': 'eth0',
|
||||||
u'mac': node2.interfaces[0].mac,
|
u'mac': node2.interfaces[0].mac,
|
||||||
u'name': None,
|
u'name': 'Untitled ({0})'.format(node2.mac[-5:].lower()),
|
||||||
u'uid': node2.id}])
|
u'uid': node2.id}])
|
||||||
|
|
||||||
def test_verify_networks_resp_forgotten_node_error(self):
|
def test_verify_networks_resp_forgotten_node_error(self):
|
||||||
|
|
|
@ -26,7 +26,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
def test_all_api_urls_404_or_405(self):
|
def test_all_api_urls_404_or_405(self):
|
||||||
urls = {
|
urls = {
|
||||||
'ClusterHandler': {'obj_id': 1},
|
'ClusterHandler': {'obj_id': 1},
|
||||||
'NodeHandler': {'node_id': 1},
|
'NodeHandler': {'obj_id': 1},
|
||||||
'ReleaseHandler': {'obj_id': 1},
|
'ReleaseHandler': {'obj_id': 1},
|
||||||
}
|
}
|
||||||
for handler in urls:
|
for handler in urls:
|
||||||
|
|
|
@ -48,7 +48,7 @@ class TestNodeDeletion(BaseIntegrationTest):
|
||||||
resp = self.app.delete(
|
resp = self.app.delete(
|
||||||
reverse(
|
reverse(
|
||||||
'NodeHandler',
|
'NodeHandler',
|
||||||
kwargs={'node_id': node.id}),
|
kwargs={'obj_id': node.id}),
|
||||||
headers=self.default_headers
|
headers=self.default_headers
|
||||||
)
|
)
|
||||||
self.assertEquals(204, resp.status_code)
|
self.assertEquals(204, resp.status_code)
|
||||||
|
|
|
@ -78,7 +78,11 @@ class TestNodeDisksHandlers(BaseIntegrationTest):
|
||||||
self.assertEqual(len(disk['volumes']), 0)
|
self.assertEqual(len(disk['volumes']), 0)
|
||||||
|
|
||||||
def test_volumes_regeneration_after_roles_update(self):
|
def test_volumes_regeneration_after_roles_update(self):
|
||||||
self.create_node(roles=[], pending_roles=['compute'])
|
self.env.create(
|
||||||
|
nodes_kwargs=[
|
||||||
|
{"roles": [], "pending_roles": ['compute']}
|
||||||
|
]
|
||||||
|
)
|
||||||
node_db = self.env.nodes[0]
|
node_db = self.env.nodes[0]
|
||||||
original_roles_response = self.get(node_db.id)
|
original_roles_response = self.get(node_db.id)
|
||||||
|
|
||||||
|
|
|
@ -116,7 +116,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||||
)
|
)
|
||||||
self.assertEquals(resp.status_code, 200)
|
self.assertEquals(resp.status_code, 200)
|
||||||
resp = self.app.get(
|
resp = self.app.get(
|
||||||
reverse('NodeHandler', kwargs={'node_id': node['id']}),
|
reverse('NodeHandler', kwargs={'obj_id': node['id']}),
|
||||||
headers=self.default_headers
|
headers=self.default_headers
|
||||||
)
|
)
|
||||||
ifaces = json.loads(resp.body)['meta']['interfaces']
|
ifaces = json.loads(resp.body)['meta']['interfaces']
|
||||||
|
|
|
@ -918,8 +918,12 @@ class VolumeManager(object):
|
||||||
disks_space = sum([d.size for d in self.disks])
|
disks_space = sum([d.size for d in self.disks])
|
||||||
minimal_installation_size = self.__calc_minimal_installation_size()
|
minimal_installation_size = self.__calc_minimal_installation_size()
|
||||||
|
|
||||||
self.__logger('Checking disks space: disks space %s, minimal size %s' %
|
self.__logger(
|
||||||
(disks_space, minimal_installation_size))
|
'Checking disks space: disks space {0}, minimal size {1}'.format(
|
||||||
|
disks_space,
|
||||||
|
minimal_installation_size
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if disks_space < minimal_installation_size:
|
if disks_space < minimal_installation_size:
|
||||||
raise errors.NotEnoughFreeSpace()
|
raise errors.NotEnoughFreeSpace()
|
||||||
|
|
Loading…
Reference in New Issue