diff --git a/etc/sahara/sahara.conf.sample-basic b/etc/sahara/sahara.conf.sample-basic index 28a61f46ef..b056d75491 100644 --- a/etc/sahara/sahara.conf.sample-basic +++ b/etc/sahara/sahara.conf.sample-basic @@ -22,6 +22,12 @@ # with use_neutron=True) #use_namespaces=false +# Use Designate for internal and external hostnames resolution (boolean value) +#use_designate=false + +# IP addresses of Designate nameservers. This is required if 'use_designate' is +# True +#nameservers = # Maximum length of job binary data in kilobytes that may be # stored or retrieved in a single operation (integer value) diff --git a/releasenotes/notes/designate-integration-784c5f7f29546015.yaml b/releasenotes/notes/designate-integration-784c5f7f29546015.yaml new file mode 100644 index 0000000000..3cb06fe097 --- /dev/null +++ b/releasenotes/notes/designate-integration-784c5f7f29546015.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added integration of Designate for hostname resolution through dns + servers diff --git a/sahara/conductor/objects.py b/sahara/conductor/objects.py index 37cc7bb468..1df27a0455 100644 --- a/sahara/conductor/objects.py +++ b/sahara/conductor/objects.py @@ -64,6 +64,7 @@ class Cluster(object): use_autoconfig is_public is_protected + domain_name """ def has_proxy_gateway(self): @@ -87,6 +88,9 @@ class Cluster(object): extra = self.extra or {} return extra.get('heat_stack_name', self.name) + def use_designate_feature(self): + return CONF.use_designate and self.domain_name + class NodeGroup(object): """An object representing Node Group. @@ -152,13 +156,23 @@ class Instance(object): management_ip volumes storage_devices_number + dns_hostname """ def hostname(self): return self.instance_name def fqdn(self): - return self.instance_name + '.' + CONF.node_domain + if self._use_designate_feature(): + return self.dns_hostname + else: + return self.instance_name + '.' + CONF.node_domain + + def get_ip_or_dns_name(self): + if self._use_designate_feature(): + return self.dns_hostname + else: + return self.management_ip def remote(self): return remote.get_remote(self) @@ -173,6 +187,9 @@ class Instance(object): return mp + def _use_designate_feature(self): + return CONF.use_designate and self.dns_hostname + class ClusterTemplate(object): """An object representing Cluster Template. @@ -190,6 +207,7 @@ class ClusterTemplate(object): node_groups - list of NodeGroup objects is_public is_protected + domain_name """ diff --git a/sahara/config.py b/sahara/config.py index 39f61b96f8..543ea3fab0 100644 --- a/sahara/config.py +++ b/sahara/config.py @@ -94,12 +94,24 @@ networking_opts = [ "use_rootwrap=True") ] +dns_opts = [ + cfg.BoolOpt('use_designate', + default=False, + help='Use Designate for internal and external hostnames ' + 'resolution'), + cfg.ListOpt('nameservers', + default=[], + help="IP addresses of Designate nameservers. " + "This is required if 'use_designate' is True") +] + CONF = cfg.CONF CONF.register_cli_opts(cli_opts) CONF.register_opts(networking_opts) CONF.register_opts(edp_opts) CONF.register_opts(db_opts) +CONF.register_opts(dns_opts) log.register_options(CONF) @@ -149,6 +161,7 @@ def list_opts(): itertools.chain(cli_opts, edp_opts, networking_opts, + dns_opts, db_opts, plugins_base.opts, topology_helper.opts, diff --git a/sahara/db/migration/alembic_migrations/versions/032_add_domain_name.py b/sahara/db/migration/alembic_migrations/versions/032_add_domain_name.py new file mode 100644 index 0000000000..f684e31204 --- /dev/null +++ b/sahara/db/migration/alembic_migrations/versions/032_add_domain_name.py @@ -0,0 +1,38 @@ +# Copyright 2016 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""032_add_domain_name + +Revision ID: 032 +Revises: 031 +Create Date: 2016-07-21 13:33:33.674853 + +""" + +# revision identifiers, used by Alembic. +revision = '032' +down_revision = '031' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.add_column('cluster_templates', sa.Column( + 'domain_name', sa.String(length=255), nullable=True)) + op.add_column('clusters', sa.Column( + 'domain_name', sa.String(length=255), nullable=True)) + op.add_column('instances', sa.Column( + 'dns_hostname', sa.String(length=255), nullable=True)) diff --git a/sahara/db/sqlalchemy/models.py b/sahara/db/sqlalchemy/models.py index 217ed6d670..048fcc21b2 100644 --- a/sahara/db/sqlalchemy/models.py +++ b/sahara/db/sqlalchemy/models.py @@ -83,6 +83,7 @@ class Cluster(mb.SaharaBase): shares = sa.Column(st.JsonListType()) is_public = sa.Column(sa.Boolean()) is_protected = sa.Column(sa.Boolean()) + domain_name = sa.Column(sa.String(255)) def to_dict(self, show_progress=False): d = super(Cluster, self).to_dict() @@ -162,6 +163,7 @@ class Instance(mb.SaharaBase): management_ip = sa.Column(sa.String(45)) volumes = sa.Column(st.JsonListType()) storage_devices_number = sa.Column(sa.Integer) + dns_hostname = sa.Column(sa.String(255)) # Template objects: ClusterTemplate, NodeGroupTemplate, TemplatesRelation @@ -192,6 +194,7 @@ class ClusterTemplate(mb.SaharaBase): shares = sa.Column(st.JsonListType()) is_public = sa.Column(sa.Boolean()) is_protected = sa.Column(sa.Boolean()) + domain_name = sa.Column(sa.String(255)) def to_dict(self): d = super(ClusterTemplate, self).to_dict() diff --git a/sahara/plugins/ambari/plugin.py b/sahara/plugins/ambari/plugin.py index 88a8e823b4..c7c08c2e76 100644 --- a/sahara/plugins/ambari/plugin.py +++ b/sahara/plugins/ambari/plugin.py @@ -98,7 +98,7 @@ class AmbariPluginProvider(p.ProvisioningPluginBase): def _set_cluster_info(self, cluster): ambari_ip = plugin_utils.get_instance( - cluster, p_common.AMBARI_SERVER).management_ip + cluster, p_common.AMBARI_SERVER).get_ip_or_dns_name() ambari_port = "8080" info = { p_common.AMBARI_SERVER: { @@ -113,53 +113,54 @@ class AmbariPluginProvider(p.ProvisioningPluginBase): for idx, namenode in enumerate(nns): info[p_common.NAMENODE][ "Web UI %s" % (idx + 1)] = ( - "http://%s:50070" % namenode.management_ip) + "http://%s:50070" % namenode.get_ip_or_dns_name()) rms = plugin_utils.get_instances(cluster, p_common.RESOURCEMANAGER) info[p_common.RESOURCEMANAGER] = {} for idx, resourcemanager in enumerate(rms): info[p_common.RESOURCEMANAGER][ "Web UI %s" % (idx + 1)] = ( - "http://%s:8088" % resourcemanager.management_ip) + "http://%s:8088" % resourcemanager.get_ip_or_dns_name()) historyserver = plugin_utils.get_instance(cluster, p_common.HISTORYSERVER) if historyserver: info[p_common.HISTORYSERVER] = { - "Web UI": "http://%s:19888" % historyserver.management_ip + "Web UI": "http://%s:19888" % + historyserver.get_ip_or_dns_name() } atlserver = plugin_utils.get_instance(cluster, p_common.APP_TIMELINE_SERVER) if atlserver: info[p_common.APP_TIMELINE_SERVER] = { - "Web UI": "http://%s:8188" % atlserver.management_ip + "Web UI": "http://%s:8188" % atlserver.get_ip_or_dns_name() } oozie = plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER) if oozie: info[p_common.OOZIE_SERVER] = { - "Web UI": "http://%s:11000/oozie" % oozie.management_ip + "Web UI": "http://%s:11000/oozie" % oozie.get_ip_or_dns_name() } hbase_master = plugin_utils.get_instance(cluster, p_common.HBASE_MASTER) if hbase_master: info[p_common.HBASE_MASTER] = { - "Web UI": "http://%s:60010" % hbase_master.management_ip + "Web UI": "http://%s:60010" % hbase_master.get_ip_or_dns_name() } falcon = plugin_utils.get_instance(cluster, p_common.FALCON_SERVER) if falcon: info[p_common.FALCON_SERVER] = { - "Web UI": "http://%s:15000" % falcon.management_ip + "Web UI": "http://%s:15000" % falcon.get_ip_or_dns_name() } storm_ui = plugin_utils.get_instance(cluster, p_common.STORM_UI_SERVER) if storm_ui: info[p_common.STORM_UI_SERVER] = { - "Web UI": "http://%s:8744" % storm_ui.management_ip + "Web UI": "http://%s:8744" % storm_ui.get_ip_or_dns_name() } ranger_admin = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN) if ranger_admin: info[p_common.RANGER_ADMIN] = { - "Web UI": "http://%s:6080" % ranger_admin.management_ip, + "Web UI": "http://%s:6080" % ranger_admin.get_ip_or_dns_name(), "Username": "admin", "Password": "admin" } @@ -167,7 +168,7 @@ class AmbariPluginProvider(p.ProvisioningPluginBase): p_common.SPARK_JOBHISTORYSERVER) if spark_hs: info[p_common.SPARK_JOBHISTORYSERVER] = { - "Web UI": "http://%s:18080" % spark_hs.management_ip + "Web UI": "http://%s:18080" % spark_hs.get_ip_or_dns_name() } info.update(cluster.info.to_dict()) ctx = context.ctx() diff --git a/sahara/plugins/cdh/abstractversionhandler.py b/sahara/plugins/cdh/abstractversionhandler.py index 5b5ccd42bc..2609da3ee2 100644 --- a/sahara/plugins/cdh/abstractversionhandler.py +++ b/sahara/plugins/cdh/abstractversionhandler.py @@ -125,7 +125,7 @@ class BaseVersionHandler(AbstractVersionHandler): hue = self.cloudera_utils.pu.get_hue(cluster) if hue: info['Hue Dashboard'] = { - 'Web UI': 'http://%s:8888' % hue.management_ip + 'Web UI': 'http://%s:8888' % hue.get_ip_or_dns_name() } ctx = context.ctx() diff --git a/sahara/plugins/cdh/cloudera_utils.py b/sahara/plugins/cdh/cloudera_utils.py index 682ba66396..9093c7e8fe 100644 --- a/sahara/plugins/cdh/cloudera_utils.py +++ b/sahara/plugins/cdh/cloudera_utils.py @@ -326,7 +326,7 @@ class ClouderaUtils(object): mng = self.pu.get_manager(cluster) info = { 'Cloudera Manager': { - 'Web UI': 'http://%s:7180' % mng.management_ip, + 'Web UI': 'http://%s:7180' % mng.get_ip_or_dns_name(), 'Username': 'admin', 'Password': db_helper.get_cm_password(cluster) } diff --git a/sahara/plugins/mapr/base/base_cluster_configurer.py b/sahara/plugins/mapr/base/base_cluster_configurer.py index f2bd6156b7..513b2c8fdf 100644 --- a/sahara/plugins/mapr/base/base_cluster_configurer.py +++ b/sahara/plugins/mapr/base/base_cluster_configurer.py @@ -251,7 +251,7 @@ class BaseConfigurer(ac.AbstractConfigurer): display_name = display_name_template % args data = ui_info.copy() data[srvc.SERVICE_UI] = (data[srvc.SERVICE_UI] % - instance.management_ip) + instance.get_ip_or_dns_name()) info.update({display_name: data}) ctx = context.ctx() diff --git a/sahara/plugins/spark/plugin.py b/sahara/plugins/spark/plugin.py index 81b461b509..71cba78462 100644 --- a/sahara/plugins/spark/plugin.py +++ b/sahara/plugins/spark/plugin.py @@ -398,7 +398,7 @@ class SparkProvider(p.ProvisioningPluginBase): 'HDFS', 'dfs.http.address', cluster) port = address[address.rfind(':') + 1:] info['HDFS'] = { - 'Web UI': 'http://%s:%s' % (nn.management_ip, port) + 'Web UI': 'http://%s:%s' % (nn.get_ip_or_dns_name(), port) } info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname() @@ -407,7 +407,8 @@ class SparkProvider(p.ProvisioningPluginBase): 'Spark', 'Master webui port', cluster) if port is not None: info['Spark'] = { - 'Web UI': 'http://%s:%s' % (sp_master.management_ip, port) + 'Web UI': 'http://%s:%s' % ( + sp_master.get_ip_or_dns_name(), port) } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info}) diff --git a/sahara/plugins/storm/plugin.py b/sahara/plugins/storm/plugin.py index 572ed92e22..e7941004ac 100644 --- a/sahara/plugins/storm/plugin.py +++ b/sahara/plugins/storm/plugin.py @@ -310,7 +310,8 @@ class StormProvider(p.ProvisioningPluginBase): port = "8080" info['Strom'] = { - 'Web UI': 'http://%s:%s' % (st_master.management_ip, port) + 'Web UI': 'http://%s:%s' % ( + st_master.get_ip_or_dns_name(), port) } ctx = context.ctx() conductor.cluster_update(ctx, cluster, {'info': info}) diff --git a/sahara/plugins/vanilla/v2_7_1/versionhandler.py b/sahara/plugins/vanilla/v2_7_1/versionhandler.py index 3f92a3f3f3..de66a57480 100644 --- a/sahara/plugins/vanilla/v2_7_1/versionhandler.py +++ b/sahara/plugins/vanilla/v2_7_1/versionhandler.py @@ -107,24 +107,25 @@ class VersionHandler(avm.AbstractVersionHandler): if rm: info['YARN'] = { - 'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'), - 'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032') + 'Web UI': 'http://%s:%s' % (rm.get_ip_or_dns_name(), '8088'), + 'ResourceManager': 'http://%s:%s' % ( + rm.get_ip_or_dns_name(), '8032') } if nn: info['HDFS'] = { - 'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'), + 'Web UI': 'http://%s:%s' % (nn.get_ip_or_dns_name(), '50070'), 'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000') } if oo: info['JobFlow'] = { - 'Oozie': 'http://%s:%s' % (oo.management_ip, '11000') + 'Oozie': 'http://%s:%s' % (oo.get_ip_or_dns_name(), '11000') } if hs: info['MapReduce JobHistory Server'] = { - 'Web UI': 'http://%s:%s' % (hs.management_ip, '19888') + 'Web UI': 'http://%s:%s' % (hs.get_ip_or_dns_name(), '19888') } ctx = context.ctx() diff --git a/sahara/service/engine.py b/sahara/service/engine.py index eb17c4372f..87b8ac68dc 100644 --- a/sahara/service/engine.py +++ b/sahara/service/engine.py @@ -142,10 +142,10 @@ class Engine(object): """Configure active instances. * generate /etc/hosts + * change /etc/resolv.conf * setup passwordless login * etc. """ - hosts_file = cluster_utils.generate_etc_hosts(cluster) cpo.add_provisioning_step( cluster.id, _("Configure instances"), cluster_utils.count_instances(cluster)) @@ -154,14 +154,20 @@ class Engine(object): for node_group in cluster.node_groups: for instance in node_group.instances: with context.set_current_instance_id(instance.instance_id): - tg.spawn( - "configure-instance-%s" % instance.instance_name, - self._configure_instance, instance, hosts_file) + tg.spawn("configure-instance-{}".format( + instance.instance_name), + self._configure_instance, instance, cluster + ) @cpo.event_wrapper(mark_successful_on_exit=True) - def _configure_instance(self, instance, hosts_file): - LOG.debug('Configuring instance') + def _configure_instance(self, instance, cluster): + self._configure_instance_etc_hosts(instance, cluster) + if cluster.use_designate_feature(): + self._configure_instance_resolve_conf(instance) + def _configure_instance_etc_hosts(self, instance, cluster): + LOG.debug('Configuring "/etc/hosts" of instance.') + hosts_file = cluster_utils.generate_etc_hosts(cluster) with instance.remote() as r: r.write_file_to('etc-hosts', hosts_file) r.execute_command('sudo hostname %s' % instance.fqdn()) @@ -169,6 +175,23 @@ class Engine(object): r.execute_command('sudo usermod -s /bin/bash $USER') + def _configure_instance_resolve_conf(self, instance): + LOG.debug('Setting up those name servers from sahara.conf ' + 'which are lacked in the /etc/resolv.conf.') + with instance.remote() as r: + code, curr_resolv_conf = r.execute_command('cat /etc/resolv.conf') + diff = cluster_utils.generate_resolv_conf_diff(curr_resolv_conf) + if diff.strip(): + position = curr_resolv_conf.find('nameserver') + if position == -1: + position = 0 + new_resolv_conf = "{}\n{}{}".format( + curr_resolv_conf[:position], + diff, + curr_resolv_conf[position:]) + r.write_file_to('resolv-conf', new_resolv_conf) + r.execute_command('sudo mv resolv-conf /etc/resolv.conf') + def _generate_user_data_script(self, node_group, instance_name): script = """#!/bin/bash echo "${public_key}" >> ${user_home}/.ssh/authorized_keys\n diff --git a/sahara/service/heat/commons.py b/sahara/service/heat/commons.py index ba077d6a49..65d3bbacd4 100644 --- a/sahara/service/heat/commons.py +++ b/sahara/service/heat/commons.py @@ -14,4 +14,4 @@ # limitations under the License. HEAT_ENGINE_VERSION = 'heat.3.0' -HEAT_TEMPLATE_VERSION = '2013-05-23' +HEAT_TEMPLATE_VERSION = '2016-04-08' diff --git a/sahara/service/heat/heat_engine.py b/sahara/service/heat/heat_engine.py index 1d98387490..682775150f 100644 --- a/sahara/service/heat/heat_engine.py +++ b/sahara/service/heat/heat_engine.py @@ -153,11 +153,17 @@ class HeatEngine(e.Engine): instances = stack.get_node_group_instances(node_group) for instance in instances: nova_id = instance['physical_id'] - name = instance['name'] if nova_id not in old_ids: - instance_id = conductor.instance_add( - ctx, node_group, {"instance_id": nova_id, - "instance_name": name}) + name = instance['name'] + inst = { + "instance_id": nova_id, + "instance_name": name + } + if cluster.use_designate_feature(): + inst.update( + {"dns_hostname": + name + '.' + cluster.domain_name[:-1]}) + instance_id = conductor.instance_add(ctx, node_group, inst) new_ids.append(instance_id) return new_ids @@ -234,6 +240,7 @@ class HeatEngine(e.Engine): cluster = c_u.change_cluster_status(cluster, stages[2]) instances = c_u.get_instances(cluster, inst_ids) + volumes.mount_to_instances(instances) self._configure_instances(cluster) diff --git a/sahara/service/heat/templates.py b/sahara/service/heat/templates.py index 1a73f09099..37b12ec532 100644 --- a/sahara/service/heat/templates.py +++ b/sahara/service/heat/templates.py @@ -35,6 +35,10 @@ SSH_PORT = 22 INSTANCE_RESOURCE_NAME = "inst" SERVER_GROUP_PARAM_NAME = "servgroup" AUTO_SECURITY_GROUP_PARAM_NAME = "autosecgroup" +INTERNAL_DESIGNATE_REC = "internal_designate_record" +INTERNAL_DESIGNATE_REV_REC = "internal_designate_reverse_record" +EXTERNAL_DESIGNATE_REC = "external_designate_record" +EXTERNAL_DESIGNATE_REV_REC = "external_designate_reverse_record" # TODO(vgridnev): Using insecure flag until correct way to pass certificate # will be invented @@ -67,6 +71,14 @@ def _get_inst_name(ng): } +def _get_inst_domain_name(domain): + return { + "list_join": [ + '.', + [{"get_attr": [INSTANCE_RESOURCE_NAME, "name"]}, domain]] + } + + def _get_aa_group_name(cluster): return g.generate_aa_group_name(cluster.name) @@ -175,7 +187,8 @@ class ClusterStack(object): 'disable_rollback': disable_rollback, 'parameters': {}, 'template': main_tmpl, - 'files': self.files} + 'files': self.files + } if CONF.heat_stack_tags: kwargs['tags'] = ",".join(CONF.heat_stack_tags) @@ -346,6 +359,75 @@ class ClusterStack(object): return int(configs.get(cfg_target, {}).get(cfg_name, timeout_cfg.default_value)) + def _serialize_designate_records(self): + if not self.cluster.use_designate_feature(): + return {} + hostname = _get_inst_domain_name(self.cluster.domain_name) + return { + INTERNAL_DESIGNATE_REC: { + 'type': 'OS::Designate::Record', + 'properties': { + 'name': hostname, + 'type': 'A', + 'data': {'get_attr': [ + INSTANCE_RESOURCE_NAME, 'networks', 'private', 0]}, + 'domain': self.cluster.domain_name + } + }, + EXTERNAL_DESIGNATE_REC: { + 'type': 'OS::Designate::Record', + 'properties': { + 'name': hostname, + 'type': 'A', + 'data': {'get_attr': ['floating_ip', 'ip']}, + 'domain': self.cluster.domain_name + } + } + } + + def _serialize_designate_reverse_records(self): + + if not self.cluster.use_designate_feature(): + return {} + + def _generate_reversed_ip(ip): + return { + 'list_join': [ + '.', + [ + {'str_split': ['.', ip, 3]}, + {'str_split': ['.', ip, 2]}, + {'str_split': ['.', ip, 1]}, + {'str_split': ['.', ip, 0]}, + 'in-addr.arpa.' + ] + ] + } + + hostname = _get_inst_domain_name(self.cluster.domain_name) + return { + INTERNAL_DESIGNATE_REV_REC: { + 'type': 'OS::Designate::Record', + 'properties': { + 'name': _generate_reversed_ip({'get_attr': [ + INSTANCE_RESOURCE_NAME, 'networks', 'private', 0]}), + 'type': 'PTR', + 'data': hostname, + 'domain': 'in-addr.arpa.' + } + }, + EXTERNAL_DESIGNATE_REV_REC: { + 'type': 'OS::Designate::Record', + 'properties': { + 'name': _generate_reversed_ip( + {'get_attr': ['floating_ip', 'ip']}), + 'type': 'PTR', + 'data': hostname, + 'domain': 'in-addr.arpa.' + } + } + } + def _serialize_instance(self, ng): resources = {} properties = {} @@ -406,6 +488,8 @@ class ClusterStack(object): } }) + resources.update(self._serialize_designate_records()) + resources.update(self._serialize_designate_reverse_records()) resources.update(self._serialize_volume(ng)) resources.update(self._serialize_wait_condition(ng)) return resources diff --git a/sahara/service/validations/cluster_template_schema.py b/sahara/service/validations/cluster_template_schema.py index 474b43e71e..beda83bb94 100644 --- a/sahara/service/validations/cluster_template_schema.py +++ b/sahara/service/validations/cluster_template_schema.py @@ -99,6 +99,9 @@ CLUSTER_TEMPLATE_SCHEMA = { }, "is_protected": { "type": ["boolean", "null"], + }, + "domain_name": { + "type": ["string", "null"], } }, "additionalProperties": False, diff --git a/sahara/tests/unit/plugins/cdh/v5/test_versionhandler.py b/sahara/tests/unit/plugins/cdh/v5/test_versionhandler.py index 7c317aef6b..3bfd3c7111 100644 --- a/sahara/tests/unit/plugins/cdh/v5/test_versionhandler.py +++ b/sahara/tests/unit/plugins/cdh/v5/test_versionhandler.py @@ -81,7 +81,9 @@ class VersionHandlerTestCase(base.SaharaTestCase): @mock.patch(plugin_utils_path + "get_hue") def test_set_cluster_info(self, get_hue, get_cloudera_manager_info, ctx, cluster_update): - get_hue.return_value.management_ip = "1.2.3.4" + hue = mock.Mock() + hue.get_ip_or_dns_name.return_value = "1.2.3.4" + get_hue.return_value = hue cluster = mock.Mock() self.vh._set_cluster_info(cluster) info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}} diff --git a/sahara/tests/unit/plugins/cdh/v5_3_0/test_versionhandler.py b/sahara/tests/unit/plugins/cdh/v5_3_0/test_versionhandler.py index 6a957e7a04..d6c493b771 100644 --- a/sahara/tests/unit/plugins/cdh/v5_3_0/test_versionhandler.py +++ b/sahara/tests/unit/plugins/cdh/v5_3_0/test_versionhandler.py @@ -82,7 +82,9 @@ class VersionHandlerTestCase(base.SaharaTestCase): @mock.patch(plugin_utils_path + "get_hue") def test_set_cluster_info(self, get_hue, get_cloudera_manager_info, ctx, cluster_update): - get_hue.return_value.management_ip = "1.2.3.4" + hue = mock.Mock() + hue.get_ip_or_dns_name.return_value = "1.2.3.4" + get_hue.return_value = hue cluster = mock.Mock() self.vh._set_cluster_info(cluster) info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}} diff --git a/sahara/tests/unit/plugins/cdh/v5_4_0/test_versionhandler.py b/sahara/tests/unit/plugins/cdh/v5_4_0/test_versionhandler.py index 929d0d0f2c..92e0f3b882 100644 --- a/sahara/tests/unit/plugins/cdh/v5_4_0/test_versionhandler.py +++ b/sahara/tests/unit/plugins/cdh/v5_4_0/test_versionhandler.py @@ -82,7 +82,9 @@ class VersionHandlerTestCase(base.SaharaTestCase): @mock.patch(plugin_utils_path + "get_hue") def test_set_cluster_info(self, get_hue, get_cloudera_manager_info, ctx, cluster_update): - get_hue.return_value.management_ip = "1.2.3.4" + hue = mock.Mock() + hue.get_ip_or_dns_name.return_value = "1.2.3.4" + get_hue.return_value = hue cluster = mock.Mock() self.vh._set_cluster_info(cluster) info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}} diff --git a/sahara/tests/unit/plugins/cdh/v5_5_0/test_versionhandler.py b/sahara/tests/unit/plugins/cdh/v5_5_0/test_versionhandler.py index 179de0df71..2e27779b12 100644 --- a/sahara/tests/unit/plugins/cdh/v5_5_0/test_versionhandler.py +++ b/sahara/tests/unit/plugins/cdh/v5_5_0/test_versionhandler.py @@ -82,7 +82,9 @@ class VersionHandlerTestCase(base.SaharaTestCase): @mock.patch(plugin_utils_path + "get_hue") def test_set_cluster_info(self, get_hue, get_cloudera_manager_info, ctx, cluster_update): - get_hue.return_value.management_ip = "1.2.3.4" + hue = mock.Mock() + hue.get_ip_or_dns_name.return_value = "1.2.3.4" + get_hue.return_value = hue cluster = mock.Mock() self.vh._set_cluster_info(cluster) info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}} diff --git a/sahara/tests/unit/plugins/cdh/v5_7_0/test_versionhandler.py b/sahara/tests/unit/plugins/cdh/v5_7_0/test_versionhandler.py index 67decb070c..d11734b1a2 100644 --- a/sahara/tests/unit/plugins/cdh/v5_7_0/test_versionhandler.py +++ b/sahara/tests/unit/plugins/cdh/v5_7_0/test_versionhandler.py @@ -82,7 +82,9 @@ class VersionHandlerTestCase(base.SaharaTestCase): @mock.patch(plugin_utils_path + "get_hue") def test_set_cluster_info(self, get_hue, get_cloudera_manager_info, ctx, cluster_update): - get_hue.return_value.management_ip = "1.2.3.4" + hue = mock.Mock() + hue.get_ip_or_dns_name.return_value = "1.2.3.4" + get_hue.return_value = hue cluster = mock.Mock() self.vh._set_cluster_info(cluster) info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}} diff --git a/sahara/tests/unit/service/heat/test_templates.py b/sahara/tests/unit/service/heat/test_templates.py index f074adf946..50b563dc01 100644 --- a/sahara/tests/unit/service/heat/test_templates.py +++ b/sahara/tests/unit/service/heat/test_templates.py @@ -45,13 +45,15 @@ class BaseTestClusterTemplate(base.SaharaWithDbTestCase): auto_security_group=True) return ng1, ng2 - def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=None): + def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=None, + domain_name=None): return tu.create_cluster("cluster", "tenant1", "general", "2.6.0", [ng1, ng2], user_keypair_id='user_key', neutron_management_network=mng_network, default_image_id='1', image_id=None, - anti_affinity=anti_affinity or []) + anti_affinity=anti_affinity or [], + domain_name=domain_name) class TestClusterTemplate(BaseTestClusterTemplate): @@ -176,6 +178,113 @@ class TestClusterTemplate(BaseTestClusterTemplate): actual = self._generate_auto_security_group_template(False) self.assertEqual(expected, actual) + @mock.patch("sahara.conductor.objects.Cluster.use_designate_feature") + def test_serialize_designate_records(self, mock_use_designate): + ng1, ng2 = self._make_node_groups('floating') + cluster = self._make_cluster('private_net', ng1, ng2, + domain_name='domain.org.') + + mock_use_designate.return_value = False + heat_template = self._make_heat_template(cluster, ng1, ng2) + expected = {} + actual = heat_template._serialize_designate_records() + self.assertEqual(expected, actual) + + mock_use_designate.return_value = True + heat_template = self._make_heat_template(cluster, ng1, ng2) + expected = { + 'internal_designate_record': { + 'properties': { + 'domain': 'domain.org.', + 'name': { + 'list_join': [ + '.', + [{'get_attr': ['inst', 'name']}, 'domain.org.']] + }, + 'data': {'get_attr': ['inst', 'networks', 'private', 0]}, + 'type': 'A' + }, + 'type': 'OS::Designate::Record' + }, + 'external_designate_record': { + 'properties': { + 'domain': 'domain.org.', + 'name': { + 'list_join': [ + '.', + [{'get_attr': ['inst', 'name']}, 'domain.org.']] + }, + 'data': {'get_attr': ['floating_ip', 'ip']}, + 'type': 'A' + }, + 'type': 'OS::Designate::Record' + } + } + actual = heat_template._serialize_designate_records() + self.assertEqual(expected, actual) + + @mock.patch("sahara.conductor.objects.Cluster.use_designate_feature") + def test_serialize_designate_reversed_records(self, mock_use_designate): + + def _generate_reversed_ip(ip): + return { + 'list_join': [ + '.', + [ + {'str_split': ['.', ip, 3]}, + {'str_split': ['.', ip, 2]}, + {'str_split': ['.', ip, 1]}, + {'str_split': ['.', ip, 0]}, + 'in-addr.arpa.' + ] + ] + } + + ng1, ng2 = self._make_node_groups('floating') + cluster = self._make_cluster('private_net', ng1, ng2, + domain_name='domain.org.') + + mock_use_designate.return_value = False + heat_template = self._make_heat_template(cluster, ng1, ng2) + expected = {} + actual = heat_template._serialize_designate_reverse_records() + self.assertEqual(expected, actual) + + mock_use_designate.return_value = True + heat_template = self._make_heat_template(cluster, ng1, ng2) + expected = { + 'internal_designate_reverse_record': { + 'properties': { + 'domain': 'in-addr.arpa.', + 'name': _generate_reversed_ip( + {'get_attr': ['inst', 'networks', 'private', 0]}), + 'data': { + 'list_join': [ + '.', + [{'get_attr': ['inst', 'name']}, 'domain.org.']] + }, + 'type': 'PTR' + }, + 'type': 'OS::Designate::Record' + }, + 'external_designate_reverse_record': { + 'properties': { + 'domain': 'in-addr.arpa.', + 'name': _generate_reversed_ip( + {'get_attr': ['floating_ip', 'ip']}), + 'data': { + 'list_join': [ + '.', + [{'get_attr': ['inst', 'name']}, 'domain.org.']] + }, + 'type': 'PTR' + }, + 'type': 'OS::Designate::Record' + } + } + actual = heat_template._serialize_designate_reverse_records() + self.assertEqual(expected, actual) + class TestClusterTemplateWaitCondition(BaseTestClusterTemplate): def _make_heat_template(self, cluster, ng1, ng2): diff --git a/sahara/tests/unit/service/validation/test_cluster_template_create_validation.py b/sahara/tests/unit/service/validation/test_cluster_template_create_validation.py index d5a6b91557..4e1fc2b2bf 100644 --- a/sahara/tests/unit/service/validation/test_cluster_template_create_validation.py +++ b/sahara/tests/unit/service/validation/test_cluster_template_create_validation.py @@ -156,7 +156,8 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): "name": "test", 'count': 3 } - ] + ], + 'domain_name': 'domain.org.' }, ) @@ -226,7 +227,8 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): ], 'anti_affinity': ['datanode'], 'description': 'my template', - 'neutron_management_network': str(uuid.uuid4()) + 'neutron_management_network': str(uuid.uuid4()), + 'domain_name': 'domain.org.' }) @mock.patch("sahara.service.validations.base.check_network_exists") @@ -244,7 +246,8 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase): 'node_groups': None, 'anti_affinity': None, 'description': None, - 'neutron_management_network': None + 'neutron_management_network': None, + 'domain_name': None }) def test_cluster_template_create_v_plugin_name_exists(self): diff --git a/sahara/tests/unit/service/validation/utils.py b/sahara/tests/unit/service/validation/utils.py index 2e425c0f23..471487e754 100644 --- a/sahara/tests/unit/service/validation/utils.py +++ b/sahara/tests/unit/service/validation/utils.py @@ -421,7 +421,8 @@ class ValidationTestCase(base.SaharaTestCase): 'name': 'test-cluster', 'plugin_name': 'fake', 'hadoop_version': '0.1', - 'default_image_id': '550e8400-e29b-41d4-a716-446655440000' + 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', + 'domain_name': 'domain.org.' } self._assert_create_object_validation(data=data) data = { diff --git a/sahara/tests/unit/utils/test_cluster.py b/sahara/tests/unit/utils/test_cluster.py index 0d7214f4b4..08f5805783 100644 --- a/sahara/tests/unit/utils/test_cluster.py +++ b/sahara/tests/unit/utils/test_cluster.py @@ -94,8 +94,13 @@ class UtilsClusterTest(base.SaharaWithDbTestCase): cluster = self.api.cluster_get(ctx, cluster.id) self.assertEqual(ng_len - 1, len(cluster.node_groups)) - def test_generate_etc_hosts(self): + @mock.patch("sahara.conductor.objects.Cluster.use_designate_feature") + @mock.patch("socket.gethostbyname") + @mock.patch("sahara.utils.openstack.base.url_for") + def test_generate_etc_hosts(self, mock_url, mock_get_host, + mock_use_designate): cluster = self._make_sample() + mock_use_designate.return_value = False ctx = context.ctx() idx = 0 for ng in cluster.node_groups: @@ -107,12 +112,13 @@ class UtilsClusterTest(base.SaharaWithDbTestCase): 'internal_ip': str(idx), }) cluster = self.api.cluster_get(ctx, cluster) - with mock.patch("sahara.utils.openstack.base.url_for") as mock_url: - mock_url.side_effect = ["http://keystone.local:1234/v13", - "http://swift.local:5678/v42"] - with mock.patch("socket.gethostbyname") as mock_get_host: - mock_get_host.side_effect = ["1.2.3.4", "5.6.7.8"] - value = cluster_utils.generate_etc_hosts(cluster) + + mock_url.side_effect = ["http://keystone.local:1234/v13", + "http://swift.local:5678/v42"] + mock_get_host.side_effect = ["1.2.3.4", "5.6.7.8"] + + value = cluster_utils.generate_etc_hosts(cluster) + expected = ("127.0.0.1 localhost\n" "1 1.novalocal 1\n" "2 2.novalocal 2\n" @@ -121,3 +127,35 @@ class UtilsClusterTest(base.SaharaWithDbTestCase): "1.2.3.4 keystone.local\n" "5.6.7.8 swift.local\n") self.assertEqual(expected, value) + + @mock.patch("sahara.conductor.objects.Cluster.use_designate_feature") + @mock.patch("socket.gethostbyname") + @mock.patch("sahara.utils.openstack.base.url_for") + def test_generate_etc_hosts_with_designate(self, mock_url, mock_get_host, + mock_use_designate): + cluster = self._make_sample() + mock_use_designate.return_value = True + mock_url.side_effect = ["http://keystone.local:1234/v13", + "http://swift.local:5678/v42"] + mock_get_host.side_effect = ["1.2.3.4", "5.6.7.8"] + + value = cluster_utils.generate_etc_hosts(cluster) + + expected = ("127.0.0.1 localhost\n" + "1.2.3.4 keystone.local\n" + "5.6.7.8 swift.local\n") + self.assertEqual(expected, value) + + def test_generate_resolv_conf_diff(self): + curr_resolv_conf = "search openstacklocal\nnameserver 8.8.8.8\n" + + self.override_config("nameservers", ['1.1.1.1']) + value = cluster_utils.generate_resolv_conf_diff(curr_resolv_conf) + expected = "nameserver 1.1.1.1\n" + self.assertEqual(expected, value) + + self.override_config("nameservers", ['1.1.1.1', '8.8.8.8', '2.2.2.2']) + value = cluster_utils.generate_resolv_conf_diff(curr_resolv_conf) + expected = ("nameserver 1.1.1.1\n" + "nameserver 2.2.2.2\n") + self.assertEqual(expected, value) diff --git a/sahara/utils/cluster.py b/sahara/utils/cluster.py index 898991a0ff..8a3aa90dee 100644 --- a/sahara/utils/cluster.py +++ b/sahara/utils/cluster.py @@ -16,6 +16,7 @@ import socket from keystoneauth1 import exceptions as keystone_ex +from oslo_config import cfg from oslo_log import log as logging from six.moves.urllib import parse @@ -29,6 +30,8 @@ from sahara.utils.openstack import base as auth_base conductor = c.API LOG = logging.getLogger(__name__) +CONF = cfg.CONF + # cluster status CLUSTER_STATUS_VALIDATING = "Validating" CLUSTER_STATUS_INFRAUPDATING = "InfraUpdating" @@ -132,13 +135,7 @@ def clean_cluster_from_empty_ng(cluster): conductor.node_group_remove(ctx, ng) -def generate_etc_hosts(cluster): - hosts = "127.0.0.1 localhost\n" - for node_group in cluster.node_groups: - for instance in node_group.instances: - hosts += "%s %s %s\n" % (instance.internal_ip, - instance.fqdn(), - instance.hostname()) +def _etc_hosts_for_services(hosts): # add alias for keystone and swift for service in ["identity", "object-store"]: try: @@ -149,5 +146,31 @@ def generate_etc_hosts(cluster): LOG.debug("Endpoint not found for service: \"%s\"", service) continue hosts += "%s %s\n" % (socket.gethostbyname(hostname), hostname) - return hosts + + +def _etc_hosts_for_instances(hosts, cluster): + for node_group in cluster.node_groups: + for instance in node_group.instances: + hosts += "%s %s %s\n" % (instance.internal_ip, + instance.fqdn(), + instance.hostname()) + return hosts + + +def generate_etc_hosts(cluster): + hosts = "127.0.0.1 localhost\n" + if not cluster.use_designate_feature(): + hosts = _etc_hosts_for_instances(hosts, cluster) + hosts = _etc_hosts_for_services(hosts) + return hosts + + +def generate_resolv_conf_diff(curr_resolv_conf): + # returns string that contains nameservers + # which are lacked in the 'curr_resolve_conf' + resolv_conf = "" + for ns in CONF.nameservers: + if ns not in curr_resolv_conf: + resolv_conf += "nameserver {}\n".format(ns) + return resolv_conf