Cluster object

Related to blueprint nailgun-objects-flow

Change-Id: I3dd4c13c8f269cc81a97aa8a3803cd036f56e9bf
This commit is contained in:
Nikolay Markov 2014-03-19 15:37:12 +04:00
parent 94fdcceedf
commit e568fb7dec
23 changed files with 348 additions and 241 deletions

View File

@ -18,22 +18,22 @@
Handlers dealing with clusters
"""
import json
import traceback
from nailgun.api.handlers.base import BaseHandler
from nailgun.api.handlers.base import DeferredTaskHandler
from nailgun.api.handlers.base import CollectionHandler
from nailgun.api.handlers.base import SingleHandler
from nailgun import objects
from nailgun.api.handlers.base import content_json
from nailgun.api.validators.cluster import AttributesValidator
from nailgun.api.validators.cluster import ClusterValidator
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Attributes
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import Release
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.task.manager import ApplyChangesTaskManager
from nailgun.task.manager import ClusterDeletionManager
@ -42,81 +42,21 @@ from nailgun.task.manager import StopDeploymentTaskManager
from nailgun import utils
class ClusterHandler(BaseHandler):
class ClusterHandler(SingleHandler):
"""Cluster single handler
"""
fields = (
"id",
"name",
"mode",
"changes",
"status",
"grouping",
"is_customized",
"net_provider",
"net_segment_type",
"release_id"
)
model = Cluster
single = objects.Cluster
validator = ClusterValidator
@content_json
def GET(self, cluster_id):
""":returns: JSONized Cluster object.
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
return self.render(cluster)
@content_json
def PUT(self, cluster_id):
""":returns: JSONized Cluster object.
:http: * 200 (OK)
* 400 (invalid cluster data specified)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
data = self.checked_data(cluster_id=cluster_id)
network_manager = cluster.network_manager
for key, value in data.iteritems():
if key == "nodes":
# TODO(NAME): sepatate nodes
#for deletion and addition by set().
new_nodes = db().query(Node).filter(
Node.id.in_(value)
)
nodes_to_remove = [n for n in cluster.nodes
if n not in new_nodes]
nodes_to_add = [n for n in new_nodes
if n not in cluster.nodes]
for node in nodes_to_add:
if not node.online:
raise self.http(
400, "Can not add offline node to cluster"
)
map(cluster.nodes.remove, nodes_to_remove)
map(cluster.nodes.append, nodes_to_add)
for node in nodes_to_remove:
network_manager.clear_assigned_networks(node)
for node in nodes_to_add:
network_manager.assign_networks_by_default(node)
else:
setattr(cluster, key, value)
db().commit()
return self.render(cluster)
@content_json
def DELETE(self, cluster_id):
def DELETE(self, obj_id):
""":returns: {}
:http: * 202 (cluster deletion process launched)
* 400 (failed to execute cluster deletion process)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
cluster = self.get_object_or_404(self.single.model, obj_id)
task_manager = ClusterDeletionManager(cluster_id=cluster.id)
try:
logger.debug('Trying to execute cluster deletion task')
@ -130,90 +70,13 @@ class ClusterHandler(BaseHandler):
raise self.http(202, '{}')
class ClusterCollectionHandler(BaseHandler):
class ClusterCollectionHandler(CollectionHandler):
"""Cluster collection handler
"""
collection = objects.ClusterCollection
validator = ClusterValidator
@content_json
def GET(self):
""":returns: Collection of JSONized Cluster objects.
:http: * 200 (OK)
"""
return map(
ClusterHandler.render,
db().query(Cluster).all()
)
@content_json
def POST(self):
""":returns: JSONized Cluster object.
:http: * 201 (cluster successfully created)
* 400 (invalid cluster data specified)
* 409 (cluster with such parameters already exists)
"""
# It's used for cluster creating only.
data = self.checked_data()
cluster = Cluster()
cluster.release = db().query(Release).get(data["release"])
# TODO(NAME): use fields
for field in (
"name",
"mode",
"net_provider",
"net_segment_type",
"status"
):
if data.get(field):
setattr(cluster, field, data.get(field))
db().add(cluster)
db().commit()
attributes = Attributes(
editable=cluster.release.attributes_metadata.get("editable"),
generated=cluster.release.attributes_metadata.get("generated"),
cluster=cluster
)
attributes.generate_fields()
netmanager = cluster.network_manager
try:
netmanager.create_network_groups(cluster.id)
if cluster.net_provider == 'neutron':
netmanager.create_neutron_config(cluster)
cluster.add_pending_changes("attributes")
cluster.add_pending_changes("networks")
if 'nodes' in data and data['nodes']:
nodes = db().query(Node).filter(
Node.id.in_(data['nodes'])
).all()
map(cluster.nodes.append, nodes)
db().commit()
for node in nodes:
netmanager.assign_networks_by_default(node)
raise self.http(
201, json.dumps(ClusterHandler.render(cluster), indent=4)
)
except (
errors.OutOfVLANs,
errors.OutOfIPs,
errors.NoSuitableCIDR,
errors.InvalidNetworkPool
) as e:
# Cluster was created in this request,
# so we no need to use ClusterDeletionManager.
# All relations wiil be cascade deleted automaticly.
# TODO(NAME): investigate transactions
db().delete(cluster)
raise self.http(400, e.message)
class ClusterChangesHandler(DeferredTaskHandler):

View File

@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.serializers.base import BasicSerializer
class ClusterSerializer(BasicSerializer):
fields = (
"id",
"name",
"mode",
"changes",
"status",
"grouping",
"is_customized",
"net_provider",
"net_segment_type",
"release_id"
)

View File

@ -89,7 +89,7 @@ urls = (
r'/clusters/?$',
ClusterCollectionHandler,
r'/clusters/(?P<cluster_id>\d+)/?$',
r'/clusters/(?P<obj_id>\d+)/?$',
ClusterHandler,
r'/clusters/(?P<cluster_id>\d+)/changes/?$',
ClusterChangesHandler,

View File

@ -22,9 +22,8 @@ from nailgun.errors import errors
class ClusterValidator(BasicValidator):
@classmethod
def validate(cls, data, **kwargs):
def _validate_common(cls, data):
d = cls.validate_json(data)
cluster_id = kwargs.get("cluster_id") or d.get("id")
if d.get("name"):
if db().query(Cluster).filter_by(
name=d["name"]
@ -37,18 +36,24 @@ class ClusterValidator(BasicValidator):
release = db().query(Release).get(d.get("release"))
if not release:
raise errors.InvalidData(
"Invalid release id",
"Invalid release ID",
log_message=True
)
return d
@classmethod
def validate(cls, data):
return cls._validate_common(data)
@classmethod
def validate_update(cls, data, instance):
d = cls._validate_common(data)
for k in ("net_provider", "net_segment_type"):
if k in d and getattr(instance, k) != d[k]:
raise errors.InvalidData(
u"Changing '{0}' for environment is prohibited".format(k),
log_message=True
)
if cluster_id:
cluster = db().query(Cluster).get(cluster_id)
if cluster:
for k in ("net_provider", "net_segment_type"):
if k in d and getattr(cluster, k) != d[k]:
raise errors.InvalidData(
"Change of '%s' is prohibited" % k,
log_message=True
)
return d

View File

@ -30,6 +30,47 @@ RELEASE_STATES = Enum(
'available'
)
CLUSTER_MODES = Enum(
'multinode',
'ha_full',
'ha_compact'
)
CLUSTER_STATUSES = Enum(
'new',
'deployment',
'stopped',
'operational',
'error',
'remove'
)
CLUSTER_NET_MANAGERS = Enum(
'FlatDHCPManager',
'VlanManager'
)
CLUSTER_GROUPING = Enum(
'roles',
'hardware',
'both'
)
CLUSTER_NET_PROVIDERS = Enum(
'nova_network',
'neutron'
)
CLUSTER_NET_L23_PROVIDERS = Enum(
'ovs'
)
CLUSTER_NET_SEGMENT_TYPES = Enum(
'none',
'vlan',
'gre'
)
NETWORK_INTERFACE_TYPES = Enum(
'ether',
'bond'

View File

@ -25,6 +25,8 @@ from sqlalchemy import Integer
from sqlalchemy import Unicode
from sqlalchemy.orm import relationship, backref
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy.models.base import Base
from nailgun.db.sqlalchemy.models.fields import JSON
@ -52,57 +54,42 @@ class ClusterChanges(Base):
class Cluster(Base):
__tablename__ = 'clusters'
MODES = ('multinode', 'ha_full', 'ha_compact')
STATUSES = (
'new',
'deployment',
'stopped',
'operational',
'error',
'remove'
)
NET_MANAGERS = ('FlatDHCPManager', 'VlanManager')
GROUPING = ('roles', 'hardware', 'both')
# Neutron-related
NET_PROVIDERS = ('nova_network', 'neutron')
NET_L23_PROVIDERS = ('ovs',)
NET_SEGMENT_TYPES = ('none', 'vlan', 'gre')
id = Column(Integer, primary_key=True)
mode = Column(
Enum(*MODES, name='cluster_mode'),
Enum(*consts.CLUSTER_MODES, name='cluster_mode'),
nullable=False,
default='multinode'
default=consts.CLUSTER_MODES.multinode
)
status = Column(
Enum(*STATUSES, name='cluster_status'),
Enum(*consts.CLUSTER_STATUSES, name='cluster_status'),
nullable=False,
default='new'
default=consts.CLUSTER_STATUSES.new
)
net_provider = Column(
Enum(*NET_PROVIDERS, name='net_provider'),
Enum(*consts.CLUSTER_NET_PROVIDERS, name='net_provider'),
nullable=False,
default='nova_network'
default=consts.CLUSTER_NET_PROVIDERS.nova_network
)
net_l23_provider = Column(
Enum(*NET_L23_PROVIDERS, name='net_l23_provider'),
Enum(*consts.CLUSTER_NET_L23_PROVIDERS, name='net_l23_provider'),
nullable=False,
default='ovs'
default=consts.CLUSTER_NET_L23_PROVIDERS.ovs
)
net_segment_type = Column(
Enum(*NET_SEGMENT_TYPES,
Enum(*consts.CLUSTER_NET_SEGMENT_TYPES,
name='net_segment_type'),
nullable=False,
default='vlan'
default=consts.CLUSTER_NET_SEGMENT_TYPES.vlan
)
net_manager = Column(
Enum(*NET_MANAGERS, name='cluster_net_manager'),
Enum(*consts.CLUSTER_NET_MANAGERS, name='cluster_net_manager'),
nullable=False,
default='FlatDHCPManager'
default=consts.CLUSTER_NET_MANAGERS.FlatDHCPManager
)
grouping = Column(
Enum(*GROUPING, name='cluster_grouping'),
Enum(*consts.CLUSTER_GROUPING, name='cluster_grouping'),
nullable=False,
default='roles'
default=consts.CLUSTER_GROUPING.roles
)
name = Column(Unicode(50), unique=True, nullable=False)
release_id = Column(Integer, ForeignKey('releases.id'), nullable=False)
@ -197,7 +184,7 @@ class Cluster(Base):
if node_id:
ch.node_id = node_id
db().add(ch)
db().commit()
db().flush()
def clear_pending_changes(self, node_id=None):
chs = db().query(ClusterChanges).filter_by(
@ -206,7 +193,7 @@ class Cluster(Base):
if node_id:
chs = chs.filter_by(node_id=node_id)
map(db().delete, chs.all())
db().commit()
db().flush()
@property
def network_manager(self):
@ -249,7 +236,7 @@ class Attributes(Base):
def generate_fields(self):
self.generated = self.traverse(self.generated)
db().add(self)
db().commit()
db().flush()
@classmethod
def traverse(cls, cdict):

View File

@ -24,6 +24,7 @@ default_messages = {
# REST errors
"CannotDelete": "Can't delete object",
"CannotCreate": "Can't create object",
"InvalidField": "Invalid field specified for object",
# node discovering errors

View File

@ -103,7 +103,7 @@ class NetworkManager(object):
IPAddr.network == nw_group.id
).all()
map(db().delete, ips)
db().commit()
db().flush()
@classmethod
def assign_admin_ips(cls, node_id, num=1):
@ -377,7 +377,7 @@ class NetworkManager(object):
for nic in node.interfaces:
while nic.assigned_networks_list:
nic.assigned_networks_list.pop()
db().commit()
db().flush()
@classmethod
def get_default_networks_assignment(cls, node):
@ -464,7 +464,7 @@ class NetworkManager(object):
nics[nic['id']].assigned_networks_list = list(
db().query(NetworkGroup).filter(
NetworkGroup.id.in_(ng_ids)))
db().commit()
db().flush()
@classmethod
def get_cluster_networkgroups_by_node(cls, node):
@ -1047,10 +1047,10 @@ class NetworkManager(object):
meta=net
)
db().add(nw_group)
db().commit()
db().flush()
if net.get("notation"):
nw_group.ip_ranges.append(new_ip_range)
db().commit()
db().flush()
cls.cleanup_network_group(nw_group)
@classmethod

View File

@ -123,7 +123,7 @@ class NeutronManager(NetworkManager):
.items():
setattr(cluster.neutron_config, key, value)
db().add(cluster.neutron_config)
db().commit()
db().flush()
@classmethod
def generate_vlan_ids_list(cls, data, cluster, ng):

View File

@ -20,5 +20,8 @@ from nailgun.objects.base import NailgunCollection
from nailgun.objects.release import Release
from nailgun.objects.release import ReleaseCollection
from nailgun.objects.cluster import Cluster
from nailgun.objects.cluster import ClusterCollection
from nailgun.objects.task import Task
from nailgun.objects.task import TaskCollection

View File

@ -0,0 +1,172 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun import consts
from nailgun.api.serializers.cluster import ClusterSerializer
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.errors import errors
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
class Cluster(NailgunObject):
model = models.Cluster
serializer = ClusterSerializer
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Cluster",
"description": "Serialized Cluster object",
"type": "object",
"properties": {
"id": {"type": "number"},
"name": {"type": "string"},
"mode": {
"type": "string",
"enum": list(consts.CLUSTER_MODES)
},
"status": {
"type": "string",
"enum": list(consts.CLUSTER_STATUSES)
},
"net_provider": {
"type": "string",
"enum": list(consts.CLUSTER_NET_PROVIDERS)
},
"net_l23_provider": {
"type": "string",
"enum": list(consts.CLUSTER_NET_L23_PROVIDERS)
},
"net_segment_type": {
"type": "string",
"enum": list(consts.CLUSTER_NET_SEGMENT_TYPES)
},
"net_manager": {
"type": "string",
"enum": list(consts.CLUSTER_NET_MANAGERS)
},
"grouping": {
"type": "string",
"enum": list(consts.CLUSTER_GROUPING)
},
"release_id": {"type": "number"},
"dns_nameservers": {"type": "array"},
"replaced_deployment_info": {"type": "object"},
"replaced_provisioning_info": {"type": "object"},
"is_customized": {"type": "boolean"}
}
}
@classmethod
def create(cls, data):
#TODO(enchantner): fix this temporary hack in clients
if "release_id" not in data:
release_id = data.pop("release", None)
data["release_id"] = release_id
assign_nodes = data.pop("nodes", [])
with db().begin(subtransactions=True):
new_cluster = super(Cluster, cls).create(data)
attributes = models.Attributes(
editable=new_cluster.release.attributes_metadata.get(
"editable"
),
generated=new_cluster.release.attributes_metadata.get(
"generated"
),
cluster=new_cluster
)
attributes.generate_fields()
netmanager = new_cluster.network_manager
try:
netmanager.create_network_groups(new_cluster.id)
if new_cluster.net_provider == 'neutron':
netmanager.create_neutron_config(new_cluster)
new_cluster.add_pending_changes("attributes")
new_cluster.add_pending_changes("networks")
if assign_nodes:
cls.update_nodes(new_cluster, assign_nodes)
except (
errors.OutOfVLANs,
errors.OutOfIPs,
errors.NoSuitableCIDR,
errors.InvalidNetworkPool
) as exc:
db().delete(new_cluster)
raise errors.CannotCreate(exc.message)
return new_cluster
@classmethod
def update(cls, instance, data):
nodes = data.pop("nodes", None)
super(Cluster, cls).update(instance, data)
if nodes is not None:
cls.update_nodes(instance, nodes)
return instance
@classmethod
def update_nodes(cls, instance, nodes_ids):
with db().begin(subtransactions=True):
# TODO(NAME): sepatate nodes
#for deletion and addition by set().
new_nodes = []
if nodes_ids:
new_nodes = db().query(models.Node).filter(
models.Node.id.in_(nodes_ids)
)
nodes_to_remove = [n for n in instance.nodes
if n not in new_nodes]
nodes_to_add = [n for n in new_nodes
if n not in instance.nodes]
for node in nodes_to_add:
if not node.online:
raise errors.NodeOffline(
u"Cannot add offline node "
u"'{0}' to environment".format(node.id)
)
map(instance.nodes.remove, nodes_to_remove)
map(instance.nodes.append, nodes_to_add)
map(
instance.network_manager.clear_assigned_networks,
nodes_to_remove
)
map(
instance.network_manager.assign_networks_by_default,
nodes_to_add
)
class ClusterCollection(NailgunCollection):
single = Cluster

View File

@ -57,7 +57,7 @@ class TestClusterChanges(BaseIntegrationTest):
resp = self.app.get(
reverse(
'ClusterHandler',
kwargs={'cluster_id': cluster['id']}),
kwargs={'obj_id': cluster['id']}),
headers=self.default_headers
)
response = json.loads(resp.body)

View File

@ -71,7 +71,7 @@ class TestCharsetIssues(BaseIntegrationTest):
self.app.delete(
reverse(
'ClusterHandler',
kwargs={'cluster_id': cluster_id}),
kwargs={'obj_id': cluster_id}),
headers=self.default_headers
)
timeout = 10

View File

@ -30,7 +30,7 @@ class TestHandlers(BaseIntegrationTest):
def delete(self, cluster_id):
return self.app.delete(
reverse('ClusterHandler', kwargs={'cluster_id': cluster_id}),
reverse('ClusterHandler', kwargs={'obj_id': cluster_id}),
'',
headers=self.default_headers
)
@ -38,7 +38,7 @@ class TestHandlers(BaseIntegrationTest):
def test_cluster_get(self):
cluster = self.env.create_cluster(api=False)
resp = self.app.get(
reverse('ClusterHandler', kwargs={'cluster_id': cluster.id}),
reverse('ClusterHandler', kwargs={'obj_id': cluster.id}),
headers=self.default_headers
)
self.assertEquals(200, resp.status_code)
@ -70,7 +70,7 @@ class TestHandlers(BaseIntegrationTest):
clusters_before = len(self.db.query(Cluster).all())
resp = self.app.put(
reverse('ClusterHandler', kwargs={'cluster_id': cluster.id}),
reverse('ClusterHandler', kwargs={'obj_id': cluster.id}),
json.dumps({'name': updated_name}),
headers=self.default_headers
)
@ -89,7 +89,7 @@ class TestHandlers(BaseIntegrationTest):
cluster = self.env.create_cluster(api=False)
self.assertEquals(cluster.net_manager, "FlatDHCPManager")
resp = self.app.put(
reverse('ClusterHandler', kwargs={'cluster_id': cluster.id}),
reverse('ClusterHandler', kwargs={'obj_id': cluster.id}),
json.dumps({'net_manager': 'VlanManager'}),
headers=self.default_headers
)
@ -101,13 +101,16 @@ class TestHandlers(BaseIntegrationTest):
cluster = self.env.create_cluster(api=False)
self.assertEquals(cluster.net_provider, "nova_network")
resp = self.app.put(
reverse('ClusterHandler', kwargs={'cluster_id': cluster.id}),
reverse('ClusterHandler', kwargs={'obj_id': cluster.id}),
json.dumps({'net_provider': 'neutron'}),
headers=self.default_headers,
expect_errors=True
)
self.assertEquals(resp.status_code, 400)
self.assertEquals(resp.body, "Change of 'net_provider' is prohibited")
self.assertEquals(
resp.body,
"Changing 'net_provider' for environment is prohibited"
)
def test_cluster_update_fails_on_net_segment_type_change(self):
cluster = self.env.create_cluster(
@ -117,23 +120,26 @@ class TestHandlers(BaseIntegrationTest):
)
self.assertEquals(cluster.net_provider, "neutron")
resp = self.app.put(
reverse('ClusterHandler', kwargs={'cluster_id': cluster.id}),
reverse('ClusterHandler', kwargs={'obj_id': cluster.id}),
json.dumps({'net_segment_type': 'vlan'}),
headers=self.default_headers,
expect_errors=True
)
self.assertEquals(resp.status_code, 400)
self.assertEquals(resp.body,
"Change of 'net_segment_type' is prohibited")
self.assertEquals(
resp.body,
"Changing 'net_segment_type' for environment is prohibited"
)
def test_cluster_node_list_update(self):
node1 = self.env.create_node(api=False)
node2 = self.env.create_node(api=False)
cluster = self.env.create_cluster(api=False)
resp = self.app.put(
reverse('ClusterHandler', kwargs={'cluster_id': cluster.id}),
reverse('ClusterHandler', kwargs={'obj_id': cluster.id}),
json.dumps({'nodes': [node1.id]}),
headers=self.default_headers
headers=self.default_headers,
expect_errors=True
)
self.assertEquals(resp.status_code, 200)
@ -142,7 +148,7 @@ class TestHandlers(BaseIntegrationTest):
self.assertEquals(nodes[0].id, node1.id)
resp = self.app.put(
reverse('ClusterHandler', kwargs={'cluster_id': cluster.id}),
reverse('ClusterHandler', kwargs={'obj_id': cluster.id}),
json.dumps({'nodes': [node2.id]}),
headers=self.default_headers
)

View File

@ -65,7 +65,7 @@ class TestClusterHandlers(BaseIntegrationTest):
node = self.env.create_node(api=True, meta=meta, mac=mac)
cluster = self.env.create_cluster(api=True, nodes=[node['id']])
resp = self.app.put(
reverse('ClusterHandler', kwargs={'cluster_id': cluster['id']}),
reverse('ClusterHandler', kwargs={'obj_id': cluster['id']}),
json.dumps({'nodes': []}),
headers=self.default_headers
)

View File

@ -21,7 +21,6 @@ import json
from netaddr import IPRange
from nailgun.consts import OVS_BOND_MODES
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import IPAddrRange
from nailgun.db.sqlalchemy.models import NetworkGroup
@ -50,7 +49,7 @@ class OrchestratorSerializerTestBase(BaseIntegrationTest):
self.assertEquals(len(self.filter_by_role(nodes, role)), count)
def get_controllers(self, cluster_id):
return db().query(Node).\
return self.db.query(Node).\
filter_by(cluster_id=cluster_id,
pending_deletion=False).\
filter(Node.role_list.any(name='controller')).\
@ -526,7 +525,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
# Remove 'vlan_splinters' attribute and check results.
editable_attrs.pop('vlan_splinters', None)
db.refresh(cluster.attributes)
self.db.refresh(cluster.attributes)
cluster.attributes.editable = editable_attrs
self.db.commit()
@ -545,7 +544,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
# Set 'vlan_splinters' to 'some_text' and check results.
editable_attrs['vlan_splinters'] = {'value': 'some_text'}
db.refresh(cluster.attributes)
self.db.refresh(cluster.attributes)
cluster.attributes.editable = editable_attrs
self.db.commit()
@ -565,7 +564,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
# Set 'vlan_splinters' to 'disabled' and check results.
editable_attrs['vlan_splinters'] = {'value': 'disabled'}
db.refresh(cluster.attributes)
self.db.refresh(cluster.attributes)
cluster.attributes.editable = editable_attrs
self.db.commit()
@ -591,7 +590,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
#value of kernel-ml should end up with vlan_splinters = off
editable_attrs['vlan_splinters'] = {'value': 'kernel_lt'}
db.refresh(cluster.attributes)
self.db.refresh(cluster.attributes)
cluster.attributes.editable = editable_attrs
self.db.commit()
@ -617,7 +616,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
editable_attrs.setdefault(
'vlan_splinters', {'value': 'hard'}
)['value'] = 'hard'
db.refresh(cluster.attributes)
self.db.refresh(cluster.attributes)
cluster.attributes.editable = editable_attrs
self.db.commit()
@ -647,7 +646,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
editable_attrs.setdefault(
'vlan_splinters', {'value': 'hard'}
)['value'] = 'hard'
db.refresh(cluster.attributes)
self.db.refresh(cluster.attributes)
cluster.attributes.editable = editable_attrs
self.db.commit()
@ -682,7 +681,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
editable_attrs.setdefault(
'vlan_splinters', {'value': 'soft'}
)['value'] = 'soft'
db.refresh(cluster.attributes)
self.db.refresh(cluster.attributes)
cluster.attributes.editable = editable_attrs
self.db.commit()

View File

@ -15,7 +15,6 @@
# under the License.
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Node
from nailgun.orchestrator.provisioning_serializers import serialize
@ -40,7 +39,9 @@ class TestProvisioningSerializer(BaseIntegrationTest):
serialized_cluster = serialize(cluster_db, cluster_db.nodes)
for node in serialized_cluster['nodes']:
node_db = db().query(Node).filter_by(fqdn=node['hostname']).first()
node_db = self.db.query(Node).filter_by(
fqdn=node['hostname']
).first()
self.assertEquals(
node['kernel_options']['netcfg/choose_interface'],
node_db.admin_interface.mac)

View File

@ -18,7 +18,6 @@ import json
from sqlalchemy.exc import IntegrityError
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Role
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
@ -108,10 +107,10 @@ class TestRoles(BaseIntegrationTest):
release_id=release_json["id"])
added = True
try:
db().add(role)
db().commit()
self.db.add(role)
self.db.commit()
except IntegrityError:
db.rollback()
self.db.rollback()
added = False
self.assertFalse(added)

View File

@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.db import db
from nailgun.db.sqlalchemy.models.task import Task
from nailgun.test.base import BaseIntegrationTest
@ -51,7 +50,7 @@ class TestStopDeployment(BaseIntegrationTest):
stop_task = self.env.stop_deployment()
self.env.wait_ready(stop_task, 60)
self.assertIsNone(
db().query(Task).filter_by(
self.db.query(Task).filter_by(
uuid=deploy_task_uuid
).first()
)

View File

@ -205,7 +205,7 @@ class TestTaskManagers(BaseIntegrationTest):
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'cluster_id': self.env.clusters[0].id}),
kwargs={'obj_id': self.env.clusters[0].id}),
headers=self.default_headers
)
self.assertEquals(202, resp.status_code)
@ -246,7 +246,7 @@ class TestTaskManagers(BaseIntegrationTest):
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'cluster_id': cluster_id}),
kwargs={'obj_id': cluster_id}),
headers=self.default_headers
)
self.assertEquals(202, resp.status_code)
@ -291,7 +291,7 @@ class TestTaskManagers(BaseIntegrationTest):
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'cluster_id': cluster_id}),
kwargs={'obj_id': cluster_id}),
headers=self.default_headers
)
timeout = 120
@ -335,7 +335,7 @@ class TestTaskManagers(BaseIntegrationTest):
resp = self.app.delete(
reverse(
'ClusterHandler',
kwargs={'cluster_id': cluster_id}),
kwargs={'obj_id': cluster_id}),
headers=self.default_headers
)
self.assertEquals(202, resp.status_code)

View File

@ -25,7 +25,7 @@ from nailgun.test.base import reverse
class TestHandlers(BaseIntegrationTest):
def test_all_api_urls_404_or_405(self):
urls = {
'ClusterHandler': {'cluster_id': 1},
'ClusterHandler': {'obj_id': 1},
'NodeHandler': {'node_id': 1},
'ReleaseHandler': {'obj_id': 1},
}

View File

@ -25,7 +25,6 @@ from mock import patch
import nailgun
from nailgun.api.handlers.logs import read_backwards
from nailgun.db import db
from nailgun.db.sqlalchemy.models import RedHatAccount
from nailgun.db.sqlalchemy.models import Role
from nailgun.errors import errors
@ -394,7 +393,7 @@ class TestLogs(BaseIntegrationTest):
"""
def dump_task_with_bad_model(*args, **kwargs):
db().add(Role())
self.db.add(Role())
raise errors.DumpRunning()
dump_manager().execute.side_effect = dump_task_with_bad_model

View File

@ -16,7 +16,6 @@
from nailgun.errors import errors
from nailgun.test.base import BaseTestCase
from nailgun.db import db
from nailgun.db.sqlalchemy.models import Task
from nailgun.task.helpers import TaskHelper
@ -25,8 +24,8 @@ class TestUtils(BaseTestCase):
def test_get_task_by_uuid_returns_task(self):
task = Task(name='deploy')
db().add(task)
db().commit()
self.db.add(task)
self.db.commit()
task_by_uuid = TaskHelper.get_task_by_uuid(task.uuid)
self.assertEquals(task.uuid, task_by_uuid.uuid)