Merge "Refactor unit test of cdh plugin"

This commit is contained in:
Jenkins 2017-04-21 10:22:21 +00:00 committed by Gerrit Code Review
commit b76c4aa321
13 changed files with 872 additions and 297 deletions

View File

@ -0,0 +1,187 @@
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils as json
from sahara.tests.unit import base
from sahara.tests.unit.plugins.cdh import utils as ctu
from sahara.utils import files as f
json_files = [
'hdfs-service.json',
'hdfs-namenode.json',
'hdfs-datanode.json',
'hdfs-secondarynamenode.json',
'hdfs-gateway.json',
'hdfs-journalnode.json',
'yarn-service.json',
'yarn-resourcemanager.json',
'yarn-nodemanager.json',
'yarn-jobhistory.json',
'yarn-gateway.json',
'oozie-service.json',
'oozie-oozie_server.json',
'hive-service.json',
'hive-hivemetastore.json',
'hive-hiveserver2.json',
'hive-webhcat.json',
'hue-service.json',
'hue-hue_server.json',
'spark-service.json',
'spark-spark_yarn_history_server.json',
'zookeeper-service.json',
'zookeeper-server.json',
'hbase-service.json',
'hbase-master.json',
'hbase-regionserver.json',
'flume-service.json',
'flume-agent.json',
'sentry-service.json',
'sentry-sentry_server.json',
'solr-service.json',
'solr-solr_server.json',
'sqoop-service.json',
'sqoop-sqoop_server.json',
'ks_indexer-service.json',
'ks_indexer-hbase_indexer.json',
'impala-service.json',
'impala-catalogserver.json',
'impala-impalad.json',
'impala-statestore.json',
'kms-service.json',
'kms-kms.json',
'kafka-kafka_broker.json',
'kafka-kafka_mirror_maker.json',
'kafka-service.json'
]
class TestConfigHelper(base.SaharaTestCase):
def setUp(self):
super(TestConfigHelper, self).setUp()
self.path_to_config = None
self.c_h = None
def test_get_ng_plugin_configs(self):
actual_configs = self.c_h._get_ng_plugin_configs()
expected_configs = []
for json_file in json_files:
expected_configs += json.loads(
f.get_file_text(self.path_to_config + json_file))
# compare names
expected_names = set(i['name'] for i in expected_configs)
actual_names = set(i.to_dict()['name'] for i in actual_configs)
self.assertEqual(expected_names, actual_names)
def test_get_cdh5_repo_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(self.c_h.CDH5_REPO_URL.default_value,
self.c_h.get_cdh5_repo_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={
'general': {self.c_h.CDH5_REPO_URL.name: 'spam'}})
self.assertEqual('spam', self.c_h.get_cdh5_repo_url(cluster))
def test_get_cdh5_key_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(self.c_h.CDH5_REPO_KEY_URL.default_value,
self.c_h.get_cdh5_key_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={
'general': {self.c_h.CDH5_REPO_KEY_URL.name: 'spam'}})
self.assertEqual('spam', self.c_h.get_cdh5_key_url(cluster))
def test_get_cm5_repo_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(self.c_h.CM5_REPO_URL.default_value,
self.c_h.get_cm5_repo_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={
'general': {self.c_h.CM5_REPO_URL.name: 'spam'}})
self.assertEqual('spam', self.c_h.get_cm5_repo_url(cluster))
def test_get_cm5_key_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(self.c_h.CM5_REPO_KEY_URL.default_value,
self.c_h.get_cm5_key_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={
'general': {self.c_h.CM5_REPO_KEY_URL.name: 'spam'}})
self.assertEqual('spam', self.c_h.get_cm5_key_url(cluster))
def test_is_swift_enabled(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertTrue(self.c_h.is_swift_enabled(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={
'general': {self.c_h.ENABLE_SWIFT.name: False}})
self.assertFalse(self.c_h.is_swift_enabled(cluster))
def test_get_swift_lib_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(self.c_h.DEFAULT_SWIFT_LIB_URL,
self.c_h.get_swift_lib_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={
'general': {self.c_h.SWIFT_LIB_URL.name: 'spam'}})
self.assertEqual('spam', self.c_h.get_swift_lib_url(cluster))
def test_is_hbase_common_lib_enabled(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertTrue(self.c_h.is_hbase_common_lib_enabled(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={
'general': {self.c_h.ENABLE_HBASE_COMMON_LIB.name: False}})
self.assertFalse(self.c_h.is_hbase_common_lib_enabled(cluster))
def test_get_extjs_lib_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(self.c_h.DEFAULT_EXTJS_LIB_URL,
self.c_h.get_extjs_lib_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={
'general': {self.c_h.EXTJS_LIB_URL.name: 'spam'}})
self.assertEqual('spam', self.c_h.get_extjs_lib_url(cluster))
def test_get_kms_key_url(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertEqual(self.c_h.KMS_REPO_KEY_URL.default_value,
self.c_h.get_kms_key_url(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={
'general': {self.c_h.KMS_REPO_KEY_URL.name: 'spam'}})
self.assertEqual('spam', self.c_h.get_kms_key_url(cluster))
def test_get_required_anti_affinity(self):
cluster = ctu.get_fake_cluster(cluster_configs={})
self.assertTrue(self.c_h.get_required_anti_affinity(cluster))
cluster = ctu.get_fake_cluster(
cluster_configs={
'general': {self.c_h.REQUIRE_ANTI_AFFINITY.name: False}})
self.assertFalse(self.c_h.get_required_anti_affinity(cluster))

View File

@ -23,6 +23,9 @@ from sahara.tests.unit import testutils as tu
icce = ex.InvalidComponentCountException
rsme = ex.RequiredServiceMissingException
icte = ex.InvalidClusterTopology
nnce = ex.NameNodeHAConfigurationError
rmce = ex.ResourceManagerHAConfigurationError
def make_ng_dict_with_inst(counter, name, flavor,
@ -169,6 +172,94 @@ class BaseValidationTestCase(base.SaharaTestCase):
[None, {}]
]
disable_anti_affinity = {'cluster_configs': {'general': {
'Require Anti Affinity': False}}}
cases += [
[None, {'HDFS_JOURNALNODE': 3,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[rsme, {'HDFS_JOURNALNODE': 3,
'ZOOKEEPER_SERVER': 0}, [], disable_anti_affinity],
[icce, {'HDFS_JOURNALNODE': 4,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[icce, {'HDFS_JOURNALNODE': 2,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[nnce, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_SECONDARYNAMENODE']}],
[nnce, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_NAMENODE']}],
[None, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_NAMENODE', 'HDFS_SECONDARYNAMENODE']}],
[None, {'YARN_STANDBYRM': 1,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[icce, {'YARN_STANDBYRM': 2,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[rsme, {'YARN_STANDBYRM': 1,
'ZOOKEEPER_SERVER': 0}, [], disable_anti_affinity],
[rmce, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_RESOURCEMANAGER']}],
[rmce, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_STANDBYRM']}],
[None, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_STANDBYRM', 'YARN_RESOURCEMANAGER']}],
]
cases += [
[None, {'FLUME_AGENT': 1}],
[icce, {'ZOOKEEPER_SERVER': 1, 'SENTRY_SERVER': 2}],
[None, {'ZOOKEEPER_SERVER': 1, 'SENTRY_SERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SENTRY_SERVER': 1}],
[None, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SOLR_SERVER': 1}],
[None, {'YARN_NODEMANAGER': 1, 'YARN_JOBHISTORY': 1,
'SQOOP_SERVER': 1}],
[rsme, {'YARN_NODEMANAGER': 0, 'YARN_JOBHISTORY': 1,
'SQOOP_SERVER': 1}],
[rsme, {'YARN_NODEMANAGER': 1, 'YARN_JOBHISTORY': 0,
'SQOOP_SERVER': 1}],
# HBASE_MASTER AND HBASE_REGIONSERVER depend circularly
[None, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SOLR_SERVER': 1,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 0,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1,
'HBASE_MASTER': 0, 'HBASE_INDEXER': 1}],
]
worker_with_implama = ('worker_ng', 1, ['HDFS_DATANODE',
'YARN_NODEMANAGER',
'IMPALAD'], 3)
cases += [
[None, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]],
[icte, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1},
[]],
[icte, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1},
[worker_with_implama]],
[rsme, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 0,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]],
[rsme, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 0, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]]
]
cases += [
[None, {'KMS': 1}],
[icce, {'KMS': 2}]
]
return cases
def test_validate_cluster_creating(self):

View File

@ -0,0 +1,25 @@
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_5_0 import config_helper
from sahara.tests.unit.plugins.cdh import base_config_helper_test as bcht
class TestConfigHelperV550(bcht.TestConfigHelper):
def setUp(self):
super(TestConfigHelperV550, self).setUp()
self.c_h = config_helper.ConfigHelperV550()
self.path_to_config = 'plugins/cdh/v5_5_0/resources/'

View File

@ -0,0 +1,25 @@
# Copyright (c) 2015 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_5_0 import plugin_utils as pu
from sahara.tests.unit.plugins.cdh import base_plugin_utils_test
class TestPluginUtilsV550(base_plugin_utils_test.TestPluginUtilsHigherThanV5):
def setUp(self):
super(TestPluginUtilsV550, self).setUp()
self.plug_utils = pu.PluginUtilsV550()
self.version = "v5_5_0"

View File

@ -14,110 +14,11 @@
# limitations under the License.
from sahara.plugins.cdh.v5_5_0 import validation
from sahara.plugins import exceptions as ex
from sahara.tests.unit.plugins.cdh import base_validation_tests as bvt
icte = ex.InvalidClusterTopology
nnce = ex.NameNodeHAConfigurationError
icce = ex.InvalidComponentCountException
rsme = ex.RequiredServiceMissingException
rmce = ex.ResourceManagerHAConfigurationError
class ValidationTestCase(bvt.BaseValidationTestCase):
def setUp(self):
super(ValidationTestCase, self).setUp()
self.module = validation.ValidatorV550
def _get_test_cases(self):
cases = super(ValidationTestCase, self)._get_test_cases()
disable_anti_affinity = {'cluster_configs': {'general': {
'Require Anti Affinity': False}}}
cases += [
[None, {'HDFS_JOURNALNODE': 3,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[rsme, {'HDFS_JOURNALNODE': 3,
'ZOOKEEPER_SERVER': 0}, [], disable_anti_affinity],
[icce, {'HDFS_JOURNALNODE': 4,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[icce, {'HDFS_JOURNALNODE': 2,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[nnce, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_SECONDARYNAMENODE']}],
[nnce, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_NAMENODE']}],
[None, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_NAMENODE', 'HDFS_SECONDARYNAMENODE']}],
[None, {'YARN_STANDBYRM': 1,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[icce, {'YARN_STANDBYRM': 2,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[rsme, {'YARN_STANDBYRM': 1,
'ZOOKEEPER_SERVER': 0}, [], disable_anti_affinity],
[rmce, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_RESOURCEMANAGER']}],
[rmce, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_STANDBYRM']}],
[None, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_STANDBYRM', 'YARN_RESOURCEMANAGER']}],
]
cases += [
[None, {'FLUME_AGENT': 1}],
[icce, {'ZOOKEEPER_SERVER': 1, 'SENTRY_SERVER': 2}],
[None, {'ZOOKEEPER_SERVER': 1, 'SENTRY_SERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SENTRY_SERVER': 1}],
[None, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SOLR_SERVER': 1}],
[None, {'YARN_NODEMANAGER': 1, 'YARN_JOBHISTORY': 1,
'SQOOP_SERVER': 1}],
[rsme, {'YARN_NODEMANAGER': 0, 'YARN_JOBHISTORY': 1,
'SQOOP_SERVER': 1}],
[rsme, {'YARN_NODEMANAGER': 1, 'YARN_JOBHISTORY': 0,
'SQOOP_SERVER': 1}],
# HBASE_MASTER AND HBASE_REGIONSERVER depend circularly
[None, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SOLR_SERVER': 1,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 0,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1,
'HBASE_MASTER': 0, 'HBASE_INDEXER': 1}],
]
worker_with_implama = ('worker_ng', 1, ['HDFS_DATANODE',
'YARN_NODEMANAGER',
'IMPALAD'], 3)
cases += [
[None, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]],
[icte, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1},
[]],
[icte, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1},
[worker_with_implama]],
[rsme, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 0,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]],
[rsme, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 0, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]]
]
cases += [
[None, {'KMS': 1}],
[icce, {'KMS': 2}]
]
return cases

View File

@ -0,0 +1,25 @@
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_7_0 import config_helper
from sahara.tests.unit.plugins.cdh import base_config_helper_test as bcht
class TestConfigHelperV570(bcht.TestConfigHelper):
def setUp(self):
super(TestConfigHelperV570, self).setUp()
self.c_h = config_helper.ConfigHelperV570()
self.path_to_config = 'plugins/cdh/v5_7_0/resources/'

View File

@ -0,0 +1,222 @@
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.plugins.cdh.v5_7_0 import deploy
from sahara.tests.unit import base
class DeployCDHV570(base.SaharaTestCase):
def setUp(self):
super(DeployCDHV570, self).setUp()
self.master = mock.MagicMock()
self.master.node_group.node_processes = [
"HDFS_NAMENODE", "YARN_RESOURCEMANAGER", "CLOUDERA_MANAGER",
"SENTRY_SERVER", "YARN_NODEMANAGER", "ZOOKEEPER_SERVER",
"OOZIE_SERVER", "YARN_JOBHISTORY", "HDFS_SECONDARYNAMENODE",
"HIVE_METASTORE", "HIVE_SERVER2", "SPARK_YARN_HISTORY_SERVER",
"HBASE_MASTER", "HBASE_REGIONSERVER", "HUE_SERVER", "KMS",
"FLUME_AGENT", "SOLR_SERVER", "SQOOP_SERVER", "IMPALA_STATESTORE",
"IMPALA_CATALOGSERVER", "IMPALAD", "KEY_VALUE_STORE_INDEXER",
]
self.worker = mock.MagicMock()
self.worker.node_group.node_processes = [
"HDFS_DATANODE", "HDFS_JOURNALNODE", "JOURNALNODE",
"YARN_NODEMANAGER", "YARN_STANDBYRM",
]
self.instances = [self.master, self.worker]
self.cluster = mock.MagicMock()
self.is_cdh_exists = mock.patch(
"sahara.plugins.cdh.commands.is_pre_installed_cdh",
return_value=False)
self.is_cdh_exists.start()
self._create_facade = mock.patch(
"sahara.db.sqlalchemy.api._create_facade_lazily")
self._create_facade.start()
def tearDown(self):
self.is_cdh_exists.stop()
self._create_facade.stop()
super(DeployCDHV570, self).tearDown()
@mock.patch("sahara.plugins.utils.get_instances")
@mock.patch("sahara.plugins.cdh.v5_7_0.deploy.CU")
def test_configure_cluster(self, mock_cu, mock_get_instances):
mock_get_instances.return_value = self.instances
deploy.configure_cluster(self.cluster)
mock_cu.pu.configure_os.assert_called_once_with(self.instances)
mock_cu.pu.install_packages.assert_called_once_with(self.instances,
deploy.PACKAGES)
mock_cu.pu.start_cloudera_agents.assert_called_once_with(
self.instances)
mock_cu.pu.start_cloudera_manager.assert_called_once_with(self.cluster)
mock_cu.update_cloudera_password.assert_called_once_with(self.cluster)
mock_cu.await_agents.assert_called_once_with(self.cluster,
self.instances)
mock_cu.create_mgmt_service.assert_called_once_with(self.cluster)
mock_cu.create_services.assert_called_once_with(self.cluster)
mock_cu.configure_services.assert_called_once_with(self.cluster)
mock_cu.configure_instances.assert_called_once_with(self.instances,
self.cluster)
mock_cu.deploy_configs.assert_called_once_with(self.cluster)
@mock.patch("sahara.plugins.cdh.v5_7_0.deploy.CU")
def test__start_roles(self, mock_cu):
deploy._start_roles(self.cluster, self.instances)
mock_cu.get_service_by_role.assert_any_call('DATANODE',
instance=self.worker)
mock_cu.get_service_by_role.assert_any_call('NODEMANAGER',
instance=self.master)
mock_cu.get_service_by_role.assert_any_call('NODEMANAGER',
instance=self.worker)
self.assertEqual(mock_cu.start_roles.call_count, 3)
@mock.patch("sahara.plugins.cdh.v5_7_0.deploy._start_roles")
@mock.patch("sahara.plugins.cdh.v5_7_0.deploy.CU")
def test_scale_cluster(self, mock_cu, mock__start_roles):
deploy.scale_cluster(self.cluster, None)
self.assertEqual(mock_cu.call_count, 0)
self.assertEqual(mock__start_roles.call_count, 0)
deploy.scale_cluster(self.cluster, self.instances)
mock_cu.pu.configure_os.assert_called_once_with(self.instances)
mock_cu.pu.install_packages.assert_called_once_with(self.instances,
deploy.PACKAGES)
mock_cu.pu.start_cloudera_agents.assert_called_once_with(
self.instances)
mock_cu.await_agents.assert_called_once_with(self.cluster,
self.instances)
mock_cu.configure_instances.assert_called_once_with(self.instances,
self.cluster)
mock_cu.update_configs.assert_called_once_with(self.instances)
mock_cu.pu.configure_swift.assert_called_once_with(self.cluster,
self.instances)
mock_cu.refresh_datanodes.assert_called_once_with(self.cluster)
mock__start_roles.assert_called_once_with(self.cluster,
self.instances)
@mock.patch("sahara.plugins.cdh.v5_7_0.deploy.CU")
def test_decommission_cluster(self, mock_cu):
deploy.decommission_cluster(self.cluster, self.instances)
dns = []
dns_2 = []
nms = []
nms_2 = []
for i in self.instances:
if 'HDFS_DATANODE' in i.node_group.node_processes:
dns.append(mock_cu.pu.get_role_name(i, 'DATANODE'))
dns_2.append(mock_cu.pu.get_role_name(i, 'HDFS_GATEWAY'))
if 'YARN_NODEMANAGER' in i.node_group.node_processes:
nms.append(mock_cu.pu.get_role_name(i, 'NODEMANAGER'))
nms_2.append(mock_cu.pu.get_role_name(i, 'YARN_GATEWAY'))
mock_cu.decommission_nodes.assert_any_call(
self.cluster, 'DATANODE', dns, dns_2)
mock_cu.decommission_nodes.assert_any_call(
self.cluster, 'NODEMANAGER', nms, nms_2)
mock_cu.delete_instances.assert_called_once_with(self.cluster,
self.instances)
mock_cu.refresh_datanodes.assert_called_once_with(self.cluster)
mock_cu.refresh_yarn_nodes.assert_called_once_with(self.cluster)
@mock.patch("sahara.plugins.cdh.v5_7_0.deploy.CU")
def test__prepare_cluster(self, mock_cu):
deploy._prepare_cluster(self.cluster)
mock_cu.pu.install_extjs.assert_called_once_with(self.cluster)
mock_cu.pu.configure_hive.assert_called_once_with(self.cluster)
mock_cu.pu.configure_sentry.assert_called_once_with(self.cluster)
@mock.patch("sahara.service.edp.hdfs_helper.create_hbase_common_lib")
@mock.patch("sahara.plugins.cdh.v5_7_0.deploy.CU")
def test__finish_cluster_starting(self, mock_cu, mock_create_hbase):
deploy._finish_cluster_starting(self.cluster)
mock_cu.pu.put_hive_hdfs_xml.assert_called_once_with(self.cluster)
self.assertTrue(mock_create_hbase.called)
mock_cu.start_service.assert_called_once_with(
mock_cu.get_service_by_role('AGENT', self.cluster))
@mock.patch("sahara.plugins.cdh.v5_7_0.deploy._finish_cluster_starting")
@mock.patch("sahara.plugins.cdh.v5_7_0.deploy._prepare_cluster")
@mock.patch("sahara.plugins.cdh.v5_7_0.deploy.CU")
def test_start_cluster(self, mock_cu, mock_prepare, mock_finish):
jns_count = 0
for i in self.instances:
if "HDFS_JOURNALNODE" in i.node_group.node_processes:
jns_count += 1
mock_cu.pu.get_jns.return_value.__len__.return_value = jns_count
deploy.start_cluster(self.cluster)
mock_prepare.assert_called_once_with(self.cluster)
mock_cu.first_run.assert_called_once_with(self.cluster)
mock_cu.pu.configure_swift.assert_called_once_with(self.cluster)
if jns_count > 0:
mock_cu.enable_namenode_ha.assert_called_once_with(self.cluster)
mock_cu.update_role_config.assert_any_call(
mock_cu.pu.get_secondarynamenode(self.cluster),
'HDFS_NAMENODE'
)
mock_cu.enable_resourcemanager_ha.assert_called_once_with(self.cluster)
mock_cu.update_role_config.assert_any_call(
mock_cu.pu.get_stdb_rm(self.cluster), 'YARN_STANDBYRM')
mock_finish.assert_called_once_with(self.cluster)
def test_get_open_ports(self):
master_ports = [
9000,
7180, 7182, 7183, 7432, 7184, 8084, 8086, 10101,
9997, 9996, 8087, 9998, 9999, 8085, 9995, 9994,
8020, 8022, 50070, 50470,
50090, 50495,
8030, 8031, 8032, 8033, 8088,
8040, 8041, 8042,
10020, 19888,
9083,
10000,
8888,
11000, 11001,
18088,
2181, 3181, 4181, 9010,
60000,
60020,
41414,
8038,
8983, 8984,
8005, 12000,
25020, 26000,
25010, 24000,
21050, 21000, 23000, 25000, 28000, 22000,
16000, 16001
]
deploy.get_open_ports(self.master.node_group)
self.assertItemsEqual(master_ports,
deploy.get_open_ports(self.master.node_group))
worker_ports = [
9000,
50010, 1004, 50075, 1006, 50020,
8480, 8481, 8485,
8040, 8041, 8042,
8030, 8031, 8032, 8033, 8088
]
self.assertItemsEqual(worker_ports,
deploy.get_open_ports(self.worker.node_group))

View File

@ -0,0 +1,25 @@
# Copyright (c) 2015 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_7_0 import plugin_utils as pu
from sahara.tests.unit.plugins.cdh import base_plugin_utils_test
class TestPluginUtilsV570(base_plugin_utils_test.TestPluginUtilsHigherThanV5):
def setUp(self):
super(TestPluginUtilsV570, self).setUp()
self.plug_utils = pu.PluginUtilsV570()
self.version = "v5_7_0"

View File

@ -14,110 +14,11 @@
# limitations under the License.
from sahara.plugins.cdh.v5_7_0 import validation
from sahara.plugins import exceptions as ex
from sahara.tests.unit.plugins.cdh import base_validation_tests as bvt
icte = ex.InvalidClusterTopology
nnce = ex.NameNodeHAConfigurationError
icce = ex.InvalidComponentCountException
rsme = ex.RequiredServiceMissingException
rmce = ex.ResourceManagerHAConfigurationError
class ValidationTestCase(bvt.BaseValidationTestCase):
def setUp(self):
super(ValidationTestCase, self).setUp()
self.module = validation.ValidatorV570
def _get_test_cases(self):
cases = super(ValidationTestCase, self)._get_test_cases()
disable_anti_affinity = {'cluster_configs': {'general': {
'Require Anti Affinity': False}}}
cases += [
[None, {'HDFS_JOURNALNODE': 3,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[rsme, {'HDFS_JOURNALNODE': 3,
'ZOOKEEPER_SERVER': 0}, [], disable_anti_affinity],
[icce, {'HDFS_JOURNALNODE': 4,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[icce, {'HDFS_JOURNALNODE': 2,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[nnce, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_SECONDARYNAMENODE']}],
[nnce, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_NAMENODE']}],
[None, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_NAMENODE', 'HDFS_SECONDARYNAMENODE']}],
[None, {'YARN_STANDBYRM': 1,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[icce, {'YARN_STANDBYRM': 2,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[rsme, {'YARN_STANDBYRM': 1,
'ZOOKEEPER_SERVER': 0}, [], disable_anti_affinity],
[rmce, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_RESOURCEMANAGER']}],
[rmce, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_STANDBYRM']}],
[None, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_STANDBYRM', 'YARN_RESOURCEMANAGER']}],
]
cases += [
[None, {'FLUME_AGENT': 1}],
[icce, {'ZOOKEEPER_SERVER': 1, 'SENTRY_SERVER': 2}],
[None, {'ZOOKEEPER_SERVER': 1, 'SENTRY_SERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SENTRY_SERVER': 1}],
[None, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SOLR_SERVER': 1}],
[None, {'YARN_NODEMANAGER': 1, 'YARN_JOBHISTORY': 1,
'SQOOP_SERVER': 1}],
[rsme, {'YARN_NODEMANAGER': 0, 'YARN_JOBHISTORY': 1,
'SQOOP_SERVER': 1}],
[rsme, {'YARN_NODEMANAGER': 1, 'YARN_JOBHISTORY': 0,
'SQOOP_SERVER': 1}],
# HBASE_MASTER AND HBASE_REGIONSERVER depend circularly
[None, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SOLR_SERVER': 1,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 0,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1,
'HBASE_MASTER': 0, 'HBASE_INDEXER': 1}],
]
worker_with_implama = ('worker_ng', 1, ['HDFS_DATANODE',
'YARN_NODEMANAGER',
'IMPALAD'], 3)
cases += [
[None, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]],
[icte, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1},
[]],
[icte, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1},
[worker_with_implama]],
[rsme, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 0,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]],
[rsme, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 0, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]]
]
cases += [
[None, {'KMS': 1}],
[icce, {'KMS': 2}]
]
return cases

View File

@ -0,0 +1,25 @@
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_9_0 import config_helper
from sahara.tests.unit.plugins.cdh import base_config_helper_test as bcht
class TestConfigHelperV590(bcht.TestConfigHelper):
def setUp(self):
super(TestConfigHelperV590, self).setUp()
self.c_h = config_helper.ConfigHelperV590()
self.path_to_config = 'plugins/cdh/v5_9_0/resources/'

View File

@ -0,0 +1,222 @@
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from sahara.plugins.cdh.v5_9_0 import deploy
from sahara.tests.unit import base
class DeployCDHV590(base.SaharaTestCase):
def setUp(self):
super(DeployCDHV590, self).setUp()
self.master = mock.MagicMock()
self.master.node_group.node_processes = [
"HDFS_NAMENODE", "YARN_RESOURCEMANAGER", "CLOUDERA_MANAGER",
"SENTRY_SERVER", "YARN_NODEMANAGER", "ZOOKEEPER_SERVER",
"OOZIE_SERVER", "YARN_JOBHISTORY", "HDFS_SECONDARYNAMENODE",
"HIVE_METASTORE", "HIVE_SERVER2", "SPARK_YARN_HISTORY_SERVER",
"HBASE_MASTER", "HBASE_REGIONSERVER", "HUE_SERVER", "KMS",
"FLUME_AGENT", "SOLR_SERVER", "SQOOP_SERVER", "IMPALA_STATESTORE",
"IMPALA_CATALOGSERVER", "IMPALAD", "KEY_VALUE_STORE_INDEXER",
]
self.worker = mock.MagicMock()
self.worker.node_group.node_processes = [
"HDFS_DATANODE", "HDFS_JOURNALNODE", "JOURNALNODE",
"YARN_NODEMANAGER", "YARN_STANDBYRM",
]
self.instances = [self.master, self.worker]
self.cluster = mock.MagicMock()
self.is_cdh_exists = mock.patch(
"sahara.plugins.cdh.commands.is_pre_installed_cdh",
return_value=False)
self.is_cdh_exists.start()
self._create_facade = mock.patch(
"sahara.db.sqlalchemy.api._create_facade_lazily")
self._create_facade.start()
def tearDown(self):
self.is_cdh_exists.stop()
self._create_facade.stop()
super(DeployCDHV590, self).tearDown()
@mock.patch("sahara.plugins.utils.get_instances")
@mock.patch("sahara.plugins.cdh.v5_9_0.deploy.CU")
def test_configure_cluster(self, mock_cu, mock_get_instances):
mock_get_instances.return_value = self.instances
deploy.configure_cluster(self.cluster)
mock_cu.pu.configure_os.assert_called_once_with(self.instances)
mock_cu.pu.install_packages.assert_called_once_with(self.instances,
deploy.PACKAGES)
mock_cu.pu.start_cloudera_agents.assert_called_once_with(
self.instances)
mock_cu.pu.start_cloudera_manager.assert_called_once_with(self.cluster)
mock_cu.update_cloudera_password.assert_called_once_with(self.cluster)
mock_cu.await_agents.assert_called_once_with(self.cluster,
self.instances)
mock_cu.create_mgmt_service.assert_called_once_with(self.cluster)
mock_cu.create_services.assert_called_once_with(self.cluster)
mock_cu.configure_services.assert_called_once_with(self.cluster)
mock_cu.configure_instances.assert_called_once_with(self.instances,
self.cluster)
mock_cu.deploy_configs.assert_called_once_with(self.cluster)
@mock.patch("sahara.plugins.cdh.v5_9_0.deploy.CU")
def test__start_roles(self, mock_cu):
deploy._start_roles(self.cluster, self.instances)
mock_cu.get_service_by_role.assert_any_call('DATANODE',
instance=self.worker)
mock_cu.get_service_by_role.assert_any_call('NODEMANAGER',
instance=self.master)
mock_cu.get_service_by_role.assert_any_call('NODEMANAGER',
instance=self.worker)
self.assertEqual(mock_cu.start_roles.call_count, 3)
@mock.patch("sahara.plugins.cdh.v5_9_0.deploy._start_roles")
@mock.patch("sahara.plugins.cdh.v5_9_0.deploy.CU")
def test_scale_cluster(self, mock_cu, mock__start_roles):
deploy.scale_cluster(self.cluster, None)
self.assertEqual(mock_cu.call_count, 0)
self.assertEqual(mock__start_roles.call_count, 0)
deploy.scale_cluster(self.cluster, self.instances)
mock_cu.pu.configure_os.assert_called_once_with(self.instances)
mock_cu.pu.install_packages.assert_called_once_with(self.instances,
deploy.PACKAGES)
mock_cu.pu.start_cloudera_agents.assert_called_once_with(
self.instances)
mock_cu.await_agents.assert_called_once_with(self.cluster,
self.instances)
mock_cu.configure_instances.assert_called_once_with(self.instances,
self.cluster)
mock_cu.update_configs.assert_called_once_with(self.instances)
mock_cu.pu.configure_swift.assert_called_once_with(self.cluster,
self.instances)
mock_cu.refresh_datanodes.assert_called_once_with(self.cluster)
mock__start_roles.assert_called_once_with(self.cluster,
self.instances)
@mock.patch("sahara.plugins.cdh.v5_9_0.deploy.CU")
def test_decommission_cluster(self, mock_cu):
deploy.decommission_cluster(self.cluster, self.instances)
dns = []
dns_2 = []
nms = []
nms_2 = []
for i in self.instances:
if 'HDFS_DATANODE' in i.node_group.node_processes:
dns.append(mock_cu.pu.get_role_name(i, 'DATANODE'))
dns_2.append(mock_cu.pu.get_role_name(i, 'HDFS_GATEWAY'))
if 'YARN_NODEMANAGER' in i.node_group.node_processes:
nms.append(mock_cu.pu.get_role_name(i, 'NODEMANAGER'))
nms_2.append(mock_cu.pu.get_role_name(i, 'YARN_GATEWAY'))
mock_cu.decommission_nodes.assert_any_call(
self.cluster, 'DATANODE', dns, dns_2)
mock_cu.decommission_nodes.assert_any_call(
self.cluster, 'NODEMANAGER', nms, nms_2)
mock_cu.delete_instances.assert_called_once_with(self.cluster,
self.instances)
mock_cu.refresh_datanodes.assert_called_once_with(self.cluster)
mock_cu.refresh_yarn_nodes.assert_called_once_with(self.cluster)
@mock.patch("sahara.plugins.cdh.v5_9_0.deploy.CU")
def test__prepare_cluster(self, mock_cu):
deploy._prepare_cluster(self.cluster)
mock_cu.pu.install_extjs.assert_called_once_with(self.cluster)
mock_cu.pu.configure_hive.assert_called_once_with(self.cluster)
mock_cu.pu.configure_sentry.assert_called_once_with(self.cluster)
@mock.patch("sahara.service.edp.hdfs_helper.create_hbase_common_lib")
@mock.patch("sahara.plugins.cdh.v5_9_0.deploy.CU")
def test__finish_cluster_starting(self, mock_cu, mock_create_hbase):
deploy._finish_cluster_starting(self.cluster)
mock_cu.pu.put_hive_hdfs_xml.assert_called_once_with(self.cluster)
self.assertTrue(mock_create_hbase.called)
mock_cu.start_service.assert_called_once_with(
mock_cu.get_service_by_role('AGENT', self.cluster))
@mock.patch("sahara.plugins.cdh.v5_9_0.deploy._finish_cluster_starting")
@mock.patch("sahara.plugins.cdh.v5_9_0.deploy._prepare_cluster")
@mock.patch("sahara.plugins.cdh.v5_9_0.deploy.CU")
def test_start_cluster(self, mock_cu, mock_prepare, mock_finish):
jns_count = 0
for i in self.instances:
if "HDFS_JOURNALNODE" in i.node_group.node_processes:
jns_count += 1
mock_cu.pu.get_jns.return_value.__len__.return_value = jns_count
deploy.start_cluster(self.cluster)
mock_prepare.assert_called_once_with(self.cluster)
mock_cu.first_run.assert_called_once_with(self.cluster)
mock_cu.pu.configure_swift.assert_called_once_with(self.cluster)
if jns_count > 0:
mock_cu.enable_namenode_ha.assert_called_once_with(self.cluster)
mock_cu.update_role_config.assert_any_call(
mock_cu.pu.get_secondarynamenode(self.cluster),
'HDFS_NAMENODE'
)
mock_cu.enable_resourcemanager_ha.assert_called_once_with(self.cluster)
mock_cu.update_role_config.assert_any_call(
mock_cu.pu.get_stdb_rm(self.cluster), 'YARN_STANDBYRM')
mock_finish.assert_called_once_with(self.cluster)
def test_get_open_ports(self):
master_ports = [
9000,
7180, 7182, 7183, 7432, 7184, 8084, 8086, 10101,
9997, 9996, 8087, 9998, 9999, 8085, 9995, 9994,
8020, 8022, 50070, 50470,
50090, 50495,
8030, 8031, 8032, 8033, 8088,
8040, 8041, 8042,
10020, 19888,
9083,
10000,
8888,
11000, 11001,
18088,
2181, 3181, 4181, 9010,
60000,
60020,
41414,
8038,
8983, 8984,
8005, 12000,
25020, 26000,
25010, 24000,
21050, 21000, 23000, 25000, 28000, 22000,
16000, 16001
]
deploy.get_open_ports(self.master.node_group)
self.assertItemsEqual(master_ports,
deploy.get_open_ports(self.master.node_group))
worker_ports = [
9000,
50010, 1004, 50075, 1006, 50020,
8480, 8481, 8485,
8040, 8041, 8042,
8030, 8031, 8032, 8033, 8088
]
self.assertItemsEqual(worker_ports,
deploy.get_open_ports(self.worker.node_group))

View File

@ -0,0 +1,25 @@
# Copyright (c) 2015 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh.v5_9_0 import plugin_utils as pu
from sahara.tests.unit.plugins.cdh import base_plugin_utils_test
class TestPluginUtilsV590(base_plugin_utils_test.TestPluginUtilsHigherThanV5):
def setUp(self):
super(TestPluginUtilsV590, self).setUp()
self.plug_utils = pu.PluginUtilsV590()
self.version = "v5_9_0"

View File

@ -14,110 +14,11 @@
# limitations under the License.
from sahara.plugins.cdh.v5_9_0 import validation
from sahara.plugins import exceptions as ex
from sahara.tests.unit.plugins.cdh import base_validation_tests as bvt
icte = ex.InvalidClusterTopology
nnce = ex.NameNodeHAConfigurationError
icce = ex.InvalidComponentCountException
rsme = ex.RequiredServiceMissingException
rmce = ex.ResourceManagerHAConfigurationError
class ValidationTestCase(bvt.BaseValidationTestCase):
def setUp(self):
super(ValidationTestCase, self).setUp()
self.module = validation.ValidatorV590
def _get_test_cases(self):
cases = super(ValidationTestCase, self)._get_test_cases()
disable_anti_affinity = {'cluster_configs': {'general': {
'Require Anti Affinity': False}}}
cases += [
[None, {'HDFS_JOURNALNODE': 3,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[rsme, {'HDFS_JOURNALNODE': 3,
'ZOOKEEPER_SERVER': 0}, [], disable_anti_affinity],
[icce, {'HDFS_JOURNALNODE': 4,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[icce, {'HDFS_JOURNALNODE': 2,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[nnce, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_SECONDARYNAMENODE']}],
[nnce, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_NAMENODE']}],
[None, {'HDFS_JOURNALNODE': 3, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['HDFS_NAMENODE', 'HDFS_SECONDARYNAMENODE']}],
[None, {'YARN_STANDBYRM': 1,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[icce, {'YARN_STANDBYRM': 2,
'ZOOKEEPER_SERVER': 1}, [], disable_anti_affinity],
[rsme, {'YARN_STANDBYRM': 1,
'ZOOKEEPER_SERVER': 0}, [], disable_anti_affinity],
[rmce, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_RESOURCEMANAGER']}],
[rmce, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_STANDBYRM']}],
[None, {'YARN_STANDBYRM': 1, 'ZOOKEEPER_SERVER': 1}, [],
{'anti_affinity': ['YARN_STANDBYRM', 'YARN_RESOURCEMANAGER']}],
]
cases += [
[None, {'FLUME_AGENT': 1}],
[icce, {'ZOOKEEPER_SERVER': 1, 'SENTRY_SERVER': 2}],
[None, {'ZOOKEEPER_SERVER': 1, 'SENTRY_SERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SENTRY_SERVER': 1}],
[None, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SOLR_SERVER': 1}],
[None, {'YARN_NODEMANAGER': 1, 'YARN_JOBHISTORY': 1,
'SQOOP_SERVER': 1}],
[rsme, {'YARN_NODEMANAGER': 0, 'YARN_JOBHISTORY': 1,
'SQOOP_SERVER': 1}],
[rsme, {'YARN_NODEMANAGER': 1, 'YARN_JOBHISTORY': 0,
'SQOOP_SERVER': 1}],
# HBASE_MASTER AND HBASE_REGIONSERVER depend circularly
[None, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 0, 'SOLR_SERVER': 1,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 0,
'HBASE_MASTER': 1, 'HBASE_INDEXER': 1,
'HBASE_REGIONSERVER': 1}],
[rsme, {'ZOOKEEPER_SERVER': 1, 'SOLR_SERVER': 1,
'HBASE_MASTER': 0, 'HBASE_INDEXER': 1}],
]
worker_with_implama = ('worker_ng', 1, ['HDFS_DATANODE',
'YARN_NODEMANAGER',
'IMPALAD'], 3)
cases += [
[None, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]],
[icte, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1},
[]],
[icte, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1},
[worker_with_implama]],
[rsme, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 0,
'HIVE_METASTORE': 1, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]],
[rsme, {'IMPALA_CATALOGSERVER': 1, 'IMPALA_STATESTORE': 1,
'HIVE_METASTORE': 0, 'HIVE_SERVER2': 1,
'HDFS_DATANODE': 0, 'YARN_NODEMANAGER': 0},
[worker_with_implama]]
]
cases += [
[None, {'KMS': 1}],
[icce, {'KMS': 2}]
]
return cases