Remove requirement for ha_compact to have 3 controllers

Remove ha_compact constraint
Remove tests from declaring cluster mode and set ha_compact as default
Reverse order of multinode and ha_compact, ha_compact should become
 default in menus

This depricates multinode deployments following the logic that all
 ha deployments can now support single controllers.

Further commit will be needed to remove multinode when this is considered
 stable

Change-Id: I8a69897aecc450be4ec5c7b4d786ccc1c2fd93aa
Implements: blueprint single-controller-ha
This commit is contained in:
Andrew Woodward 2014-03-11 16:56:39 -07:00
parent 22233679c2
commit eed71d9a2f
23 changed files with 122 additions and 73 deletions

View File

@ -45,14 +45,14 @@ class TestHandlers(BaseTestCase):
expected_stdout = \
[(
"env create --name=TestEnv --release=1",
"Environment 'TestEnv' with id=1, mode=multinode and "
"Environment 'TestEnv' with id=1, mode=ha_compact and "
"network-mode=nova_network was created!\n"
), (
"--env-id=1 env set --name=NewEnv",
"Environment with id=1 was renamed to 'NewEnv'.\n"
), (
"--env-id=1 env set --mode=ha",
"Mode of environment with id=1 was set to 'ha'.\n"
"--env-id=1 env set --mode=multinode",
"Mode of environment with id=1 was set to 'multinode'.\n"
)]
for cmd, msg in expected_stdout:
@ -128,7 +128,7 @@ class TestFiles(BaseTestCase):
deployment_provision_files = {
"--env 1 deployment --default": (
"deployment_1",
"deployment_1/controller_1.yaml",
"deployment_1/primary-controller_1.yaml",
"deployment_1/compute_2.yaml",
"deployment_1/compute_3.yaml"
),

View File

@ -52,7 +52,7 @@ class Cluster(Base):
mode = Column(
Enum(*consts.CLUSTER_MODES, name='cluster_mode'),
nullable=False,
default=consts.CLUSTER_MODES.multinode
default=consts.CLUSTER_MODES.ha_compact
)
status = Column(
Enum(*consts.CLUSTER_STATUSES, name='cluster_status'),

View File

@ -4,10 +4,10 @@
fields:
state: "available"
modes_metadata:
ha_compact:
description: "This configuration Deploys OpenStack ready for high availability (HA). Controller services are prepared for HA by setting up a base MySQL/Galera, RabbitMQ and HAProxy so that additional controllers can be deployed NOW, or scaled out LATER. 3 or more controllers are required for a true HA environment."
multinode:
description: "In this configuration the OpenStack controller is deployed separately from the compute and cinder nodes. This mode assumes the presence of 1 controller node and 1 or more compute/cinder nodes. You can add more nodes to scale your cloud later."
ha_compact:
description: "This configuration requires multiple OpenStack controllers (3+) and provides high availability for all OpenStack components, including MySQL/Galera, RabbitMQ, and Cinder, as well as OpenStack API services. Select this configuration if you want to build a production-grade OpenStack cloud with 6 nodes or more."
roles:
- controller
- compute

View File

@ -440,10 +440,10 @@ class CheckBeforeDeploymentTask(object):
raise errors.NotEnoughControllers(
"Not enough controllers, %s mode requires at least 1 "
"controller" % (cluster_mode))
elif cluster_mode == 'ha_compact' and controllers_count < 3:
elif cluster_mode == 'ha_compact' and controllers_count < 1:
raise errors.NotEnoughControllers(
"Not enough controllers, %s mode requires at least 3 "
"controllers" % (cluster_mode))
"Not enough controllers, %s mode requires at least 1 "
"controller" % (cluster_mode))
@classmethod
def _check_disks(cls, task):

View File

@ -39,9 +39,6 @@ class TestHandlers(BaseIntegrationTest):
@patch('nailgun.rpc.cast')
def test_nova_deploy_cast_with_right_args(self, mocked_rpc):
self.env.create(
cluster_kwargs={
'mode': 'ha_compact'
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
@ -310,7 +307,6 @@ class TestHandlers(BaseIntegrationTest):
def test_neutron_deploy_cast_with_right_args(self, mocked_rpc):
self.env.create(
cluster_kwargs={
'mode': 'ha_compact',
'net_provider': 'neutron',
'net_segment_type': 'gre'
},
@ -745,7 +741,7 @@ class TestHandlers(BaseIntegrationTest):
self.env.neutron_networks_get(self.env.clusters[0].id).body)
pub = filter(lambda ng: ng['name'] == 'public',
net_data['networks'])[0]
pub.update({'ip_ranges': [['172.16.0.10', '172.16.0.12'],
pub.update({'ip_ranges': [['172.16.0.10', '172.16.0.13'],
['172.16.0.20', '172.16.0.22']]})
resp = self.env.neutron_networks_put(self.env.clusters[0].id, net_data)
@ -761,7 +757,7 @@ class TestHandlers(BaseIntegrationTest):
n_rpc_deploy = args[1][1]['args']['deployment_info']
self.assertEquals(len(n_rpc_deploy), 5)
pub_ips = ['172.16.0.10', '172.16.0.11', '172.16.0.12',
pub_ips = ['172.16.0.11', '172.16.0.12', '172.16.0.13',
'172.16.0.20', '172.16.0.21']
for n in n_rpc_deploy:
for i, n_common_args in enumerate(n['nodes']):
@ -841,7 +837,7 @@ class TestHandlers(BaseIntegrationTest):
n_rpc_deploy = args[1][1]['args']['deployment_info']
self.assertEquals(len(n_rpc_deploy), 2)
pub_ips = ['172.16.10.10', '172.16.10.11']
pub_ips = ['172.16.10.11', '172.16.10.12']
for n in n_rpc_deploy:
for i, n_common_args in enumerate(n['nodes']):
self.assertEquals(n_common_args['public_address'], pub_ips[i])
@ -937,10 +933,12 @@ class TestHandlers(BaseIntegrationTest):
"Node '%s' has insufficient disk space" %
node_db.human_readable_name)
# TODO(awoodward): Purge multinode
def test_occurs_error_not_enough_controllers_for_multinode(self):
self.env.create(
cluster_kwargs={
'mode': 'multinode'},
'mode': 'multinode'
},
nodes_kwargs=[
{'roles': ['compute'], 'pending_addition': True}])
@ -954,8 +952,6 @@ class TestHandlers(BaseIntegrationTest):
def test_occurs_error_not_enough_controllers_for_ha(self):
self.env.create(
cluster_kwargs={
'mode': 'ha_compact'},
nodes_kwargs=[
{'roles': ['compute'], 'pending_addition': True}])
@ -965,12 +961,10 @@ class TestHandlers(BaseIntegrationTest):
self.assertEquals(
task.message,
'Not enough controllers, ha_compact '
'mode requires at least 3 controllers')
'mode requires at least 1 controller')
def test_occurs_error_not_enough_osds_for_ceph(self):
cluster = self.env.create(
cluster_kwargs={
'mode': 'multinode'},
nodes_kwargs=[
{'roles': ['controller', 'ceph-osd'],
'pending_addition': True}])
@ -998,8 +992,6 @@ class TestHandlers(BaseIntegrationTest):
@fake_tasks(godmode=True)
def test_enough_osds_for_ceph(self):
cluster = self.env.create(
cluster_kwargs={
'mode': 'multinode'},
nodes_kwargs=[
{'roles': ['controller', 'ceph-osd'],
'pending_addition': True}])

View File

@ -250,7 +250,6 @@ class TestHandlers(BaseIntegrationTest):
def test_cluster_generated_data_handler(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{'pending_addition': True},
{'online': False, 'status': 'ready'}])

View File

@ -0,0 +1,90 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.task.helpers import TaskHelper
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
class TestClusterScaling(BaseIntegrationTest):
'''Tests to ensure that nailgun supports scaling operations.'''
def create_env(self, nodes_kwargs):
cluster = self.env.create(
nodes_kwargs=nodes_kwargs)
cluster_db = self.db.query(Cluster).get(cluster['id'])
return cluster_db
def filter_by_role(self, nodes, role):
return filter(lambda node: role in node.all_roles, nodes)
@fake_tasks()
def test_deploy_single_controller(self):
self.create_env(
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True}])
supertask = self.env.launch_deployment()
self.assertEquals(supertask.name, 'deploy')
self.env.wait_ready(supertask)
self.assertEquals(supertask.status, 'ready')
@fake_tasks()
def test_deploy_grow_controllers(self):
cluster = self.create_env(
nodes_kwargs=[
{'roles': ['controller']},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True}])
# We have to build 2 new controllers
n_nodes = TaskHelper.nodes_to_provision(cluster)
self.assertEquals(len(n_nodes), 2)
# All controllers must re-deploy (run puppet)
r_nodes = TaskHelper.nodes_to_deploy(cluster)
self.assertEquals(len(r_nodes), 3)
supertask = self.env.launch_deployment()
self.assertEquals(supertask.name, 'deploy')
self.env.wait_ready(supertask)
self.assertEquals(supertask.status, 'ready')
controllers = self.filter_by_role(cluster.nodes, 'controller')
self.assertEquals(len(controllers), 3)
@fake_tasks()
def test_deploy_shrink_controllers(self):
cluster = self.create_env(
nodes_kwargs=[
{'roles': ['controller']},
{'roles': ['controller'], 'pending_deletion': True},
{'roles': ['controller'], 'pending_deletion': True}])
# Check that we are deleting 2 controllers
d_nodes = TaskHelper.nodes_to_delete(cluster)
self.assertEquals(len(d_nodes), 2)
supertask = self.env.launch_deployment()
self.assertEquals(supertask.name, 'deploy')
self.env.wait_ready(supertask)
self.assertEquals(supertask.status, 'ready')
controllers = self.filter_by_role(cluster.nodes, 'controller')
self.assertEquals(len(controllers), 1)

View File

@ -30,7 +30,6 @@ class TestHorizonURL(BaseIntegrationTest):
@fake_tasks(godmode=True)
def test_horizon_url_ha_mode(self):
self.env.create(
cluster_kwargs={"mode": "ha_compact"},
nodes_kwargs=[
{"pending_addition": True},
{"pending_addition": True},

View File

@ -30,9 +30,6 @@ class TestNetworkModels(BaseIntegrationTest):
@fake_tasks(godmode=True)
def test_cluster_locking_after_deployment(self):
self.env.create(
cluster_kwargs={
"mode": "ha_compact"
},
nodes_kwargs=[
{"pending_addition": True},
{"pending_addition": True},

View File

@ -82,8 +82,6 @@ class TestDefaultOrchestratorInfoHandlers(BaseIntegrationTest):
super(TestDefaultOrchestratorInfoHandlers, self).setUp()
cluster = self.env.create(
cluster_kwargs={
'mode': 'multinode'},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True},
@ -166,8 +164,6 @@ class TestSelectedNodesAction(BaseIntegrationTest):
def setUp(self):
super(TestSelectedNodesAction, self).setUp()
self.env.create(
cluster_kwargs={
'mode': 'ha_compact'},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},

View File

@ -27,8 +27,6 @@ from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
from nailgun.orchestrator.deployment_serializers \
import DeploymentHASerializer
from nailgun.orchestrator.deployment_serializers \
import DeploymentMultinodeSerializer
from nailgun.settings import settings
from nailgun.task.helpers import TaskHelper
from nailgun.test.base import BaseIntegrationTest
@ -40,7 +38,7 @@ class OrchestratorSerializerTestBase(BaseIntegrationTest):
"""Class containts helpers."""
def filter_by_role(self, nodes, role):
return filter(lambda node: node['role'] == role, nodes)
return filter(lambda node: role in node['role'], nodes)
def filter_by_uid(self, nodes, uid):
return filter(lambda node: node['uid'] == uid, nodes)
@ -57,18 +55,19 @@ class OrchestratorSerializerTestBase(BaseIntegrationTest):
@property
def serializer(self):
return DeploymentMultinodeSerializer
return DeploymentHASerializer
def serialize(self, cluster):
TaskHelper.prepare_for_deployment(cluster.nodes)
return self.serializer.serialize(cluster, cluster.nodes)
# TODO(awoodward): multinode deprecation: probably has duplicates
class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNovaOrchestratorSerializer, self).setUp()
self.cluster = self.create_env('multinode')
self.cluster = self.create_env('ha_compact')
def create_env(self, mode, network_manager='FlatDHCPManager'):
node_args = [
@ -173,7 +172,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
node['storage_address'])
def test_vlan_manager(self):
cluster = self.create_env('multinode')
cluster = self.create_env('ha_compact')
data = {'net_manager': 'VlanManager'}
url = reverse('NovaNetworkConfigurationHandler',
kwargs={'cluster_id': cluster.id})
@ -354,11 +353,12 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
{'point': '2', 'weight': '2'}])
# TODO(awoodward): multinode deprecation: probably has duplicates
class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
def setUp(self):
super(TestNeutronOrchestratorSerializer, self).setUp()
self.cluster = self.create_env('multinode')
self.cluster = self.create_env('ha_compact')
def create_env(self, mode, segment_type='vlan'):
cluster = self.env.create(
@ -460,7 +460,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
node['storage_address'])
def test_neutron_l3_gateway(self):
cluster = self.create_env('multinode', 'gre')
cluster = self.create_env('ha_compact', 'gre')
test_gateway = "192.168.111.255"
public_ng = self.db.query(NetworkGroup).filter(
NetworkGroup.name == 'public'
@ -480,7 +480,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
)
def test_gre_segmentation(self):
cluster = self.create_env('multinode', 'gre')
cluster = self.create_env('ha_compact', 'gre')
facts = self.serializer.serialize(cluster, cluster.nodes)
for fact in facts:
@ -503,7 +503,6 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
}
cluster = self.env.create(
cluster_kwargs={
'mode': 'multinode',
'net_provider': 'neutron',
'net_segment_type': segment_type
},
@ -760,7 +759,6 @@ class TestNeutronOrchestratorSerializerBonds(OrchestratorSerializerTestBase):
def create_env(self, nodes_count=2, nic_count=3, segment_type='vlan'):
cluster = self.env.create_cluster(
mode='multinode',
net_provider='neutron',
net_segment_type=segment_type)
self.env.create_nodes_w_interfaces_count(

View File

@ -29,7 +29,6 @@ class TestProvisioningSerializer(BaseIntegrationTest):
self.env.create(
cluster_kwargs={
'mode': 'multinode',
'release_id': release.id},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},

View File

@ -146,7 +146,6 @@ class TestRoles(BaseIntegrationTest):
def test_roles_failed_to_delete_assigned(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"status": "ready", "roles": ["controller"]}
]

View File

@ -25,7 +25,6 @@ class TestStopDeployment(BaseIntegrationTest):
def setUp(self):
super(TestStopDeployment, self).setUp()
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"name": "First",
"pending_addition": True},

View File

@ -46,7 +46,6 @@ class TestTaskManagers(BaseIntegrationTest):
@fake_tasks(godmode=True)
def test_deployment_task_managers(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"pending_addition": True},
{"pending_deletion": True, 'status': 'provisioned'},
@ -88,7 +87,6 @@ class TestTaskManagers(BaseIntegrationTest):
self, _):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{'pending_deletion': True, 'status': 'discover'}])
@ -176,7 +174,6 @@ class TestTaskManagers(BaseIntegrationTest):
@fake_tasks(godmode=True)
def test_redeployment_works(self):
self.env.create(
cluster_kwargs={"mode": "ha_compact"},
nodes_kwargs=[
{"pending_addition": True},
{"pending_addition": True},
@ -235,7 +232,6 @@ class TestTaskManagers(BaseIntegrationTest):
@fake_tasks()
def test_deletion_cluster_task_manager(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"status": "ready", "progress": 100},
{"roles": ["compute"], "status": "ready", "progress": 100},
@ -276,7 +272,6 @@ class TestTaskManagers(BaseIntegrationTest):
@fake_tasks()
def test_deletion_during_deployment(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"status": "ready", "pending_addition": True},
]
@ -321,7 +316,6 @@ class TestTaskManagers(BaseIntegrationTest):
self.env.create(
cluster_kwargs={
"api": True,
"mode": "ha_compact"
},
nodes_kwargs=[
{"roles": ["controller"], "pending_addition": True},
@ -365,7 +359,6 @@ class TestTaskManagers(BaseIntegrationTest):
@fake_tasks()
def test_node_fqdn_is_assigned(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"pending_addition": True},
{"pending_addition": True}
@ -393,7 +386,6 @@ class TestTaskManagers(BaseIntegrationTest):
@fake_tasks()
def test_no_changes_no_cry(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"status": "ready"}
]
@ -406,7 +398,6 @@ class TestTaskManagers(BaseIntegrationTest):
@fake_tasks()
def test_deletion_offline_node(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"online": False, "pending_deletion": True},
{"status": "ready"}
@ -420,7 +411,6 @@ class TestTaskManagers(BaseIntegrationTest):
@fake_tasks()
def test_deletion_three_offline_nodes_and_one_online(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"online": False, "pending_deletion": True},
{"online": False, "pending_deletion": True},

View File

@ -124,7 +124,6 @@ class TestAssignmentHandlers(BaseIntegrationTest):
def test_unassignment_after_deploy(self):
cluster = self.env.create(
cluster_kwargs={},
nodes_kwargs=[{}]
)
node = self.env.nodes[0]

View File

@ -34,7 +34,6 @@ class TestNodeDeletion(BaseIntegrationTest):
@patch('nailgun.rpc.cast')
def test_node_deletion_and_attributes_clearing(self, mocked_rpc):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"pending_addition": True},
]

View File

@ -36,7 +36,6 @@ class TestNodeDisksHandlers(BaseIntegrationTest):
if pending_roles is None:
pending_roles = []
self.env.create(
cluster_kwargs={},
nodes_kwargs=[{
'roles': roles,
'pending_roles': pending_roles,
@ -337,7 +336,6 @@ class TestVolumeManager(BaseIntegrationTest):
def create_node(self, *roles):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[{
'roles': [],
'pending_roles': roles,

View File

@ -28,8 +28,6 @@ class TestHelperUpdateClusterStatus(BaseTestCase):
def setUp(self):
super(TestHelperUpdateClusterStatus, self).setUp()
self.env.create(
cluster_kwargs={
'mode': 'ha_compact'},
nodes_kwargs=[
{'roles': ['controller']},
{'roles': ['compute']},
@ -155,7 +153,6 @@ class TestCheckBeforeDeploymentTask(BaseTestCase):
def setUp(self):
super(TestCheckBeforeDeploymentTask, self).setUp()
self.env.create(
cluster_kwargs={},
nodes_kwargs=[{'roles': ['controller']}])
self.env.create_node()

View File

@ -27,8 +27,6 @@ class TestTaskHelpers(BaseTestCase):
def create_env(self, nodes):
cluster = self.env.create(
cluster_kwargs={
'mode': 'ha_compact'},
nodes_kwargs=nodes)
cluster_db = self.db.query(Cluster).get(cluster['id'])

View File

@ -51,8 +51,8 @@
"stopped": "Stopped"
},
"mode": {
"multinode": "Multi-node",
"ha_compact": "Multi-node with HA"
"ha_compact": "Multi-node with HA",
"multinode": "Multi-node"
}
},
"network": {
@ -533,8 +533,8 @@
"removing": "移除中"
},
"mode": {
"multinode": "多节点",
"ha_compact": "HA多节点"
"ha_compact": "HA多节点",
"multinode": "多节点"
}
},
"network": {

View File

@ -112,7 +112,7 @@ define(['utils', 'deepModel'], function(utils) {
return true;
},
availableModes: function() {
return ['multinode', 'ha_compact'];
return ['ha_compact', 'multinode'];
},
fetchRelated: function(related, options) {
return this.get(related).fetch(_.extend({data: {cluster_id: this.id}}, options));

View File

@ -636,7 +636,7 @@ function(require, utils, models, simpleMessageTemplate, createClusterWizardTempl
render: function() {
this.constructor.__super__.render.call(this, {
cluster: this.model,
size: this.model.get('mode') == 'ha_compact' ? 3 : 1
size: 1
});
return this;
}