Added support of instance locality to engines
* added support to heat engine * added support to direct engine * added unit tests for heat templates partially implements bp: volume-instance-locality Change-Id: I9fde3967c46942c8adcbca31300e948b4d0e228e
This commit is contained in:
parent
3814c2796e
commit
c9def918ec
|
@ -221,7 +221,7 @@ class ClusterTemplate(object):
|
|||
for idx in range(0, ng.volumes_per_node):
|
||||
resources.update(self._serialize_volume(
|
||||
inst_name, idx, ng.volumes_size, ng.volumes_availability_zone,
|
||||
ng.volume_type))
|
||||
ng.volume_type, ng.volume_local_to_instance))
|
||||
|
||||
return resources
|
||||
|
||||
|
@ -274,7 +274,8 @@ class ClusterTemplate(object):
|
|||
}
|
||||
|
||||
def _serialize_volume(self, inst_name, volume_idx, volumes_size,
|
||||
volumes_availability_zone, volume_type):
|
||||
volumes_availability_zone, volume_type,
|
||||
volume_local_to_instance):
|
||||
volume_name = _get_volume_name(inst_name, volume_idx)
|
||||
volume_attach_name = _get_volume_attach_name(inst_name, volume_idx)
|
||||
properties = {
|
||||
|
@ -285,6 +286,10 @@ class ClusterTemplate(object):
|
|||
if volumes_availability_zone:
|
||||
properties["availability_zone"] = volumes_availability_zone
|
||||
|
||||
if volume_local_to_instance:
|
||||
properties["scheduler_hints"] = {
|
||||
"local_to_instance": {"Ref": inst_name}}
|
||||
|
||||
return {
|
||||
volume_name: {
|
||||
"Type": "OS::Cinder::Volume",
|
||||
|
|
|
@ -96,7 +96,8 @@ def _attach_volumes_to_node(node_group, instance):
|
|||
for idx in range(1, node_group.volumes_per_node + 1):
|
||||
display_name = "volume_" + instance.instance_name + "_" + str(idx)
|
||||
device = _create_attach_volume(
|
||||
ctx, instance, size, volume_type, display_name,
|
||||
ctx, instance, size, volume_type,
|
||||
node_group.volume_local_to_instance, display_name,
|
||||
node_group.volumes_availability_zone)
|
||||
devices.append(device)
|
||||
LOG.debug("Attached volume %s to instance %s" %
|
||||
|
@ -108,7 +109,8 @@ def _attach_volumes_to_node(node_group, instance):
|
|||
_mount_volume_to_node(instance, idx, devices[idx])
|
||||
|
||||
|
||||
def _create_attach_volume(ctx, instance, size, volume_type, name=None,
|
||||
def _create_attach_volume(ctx, instance, size, volume_type,
|
||||
volume_local_to_instance, name=None,
|
||||
availability_zone=None):
|
||||
if CONF.cinder.api_version == 1:
|
||||
kwargs = {'size': size, 'display_name': name}
|
||||
|
@ -119,6 +121,9 @@ def _create_attach_volume(ctx, instance, size, volume_type, name=None,
|
|||
if availability_zone is not None:
|
||||
kwargs['availability_zone'] = availability_zone
|
||||
|
||||
if volume_local_to_instance:
|
||||
kwargs['scheduler_hints'] = {'local_to_instance': instance.instance_id}
|
||||
|
||||
volume = cinder.client().volumes.create(**kwargs)
|
||||
conductor.append_volume(ctx, instance, volume.id)
|
||||
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
{
|
||||
"AWSTemplateFormatVersion" : "2010-09-09",
|
||||
"Description" : "Data Processing Cluster by Sahara",
|
||||
|
||||
"Resources" : {
|
||||
"cluster-worker-001-port" : {
|
||||
"Type" : "OS::Neutron::Port",
|
||||
"Properties" : {
|
||||
"network_id" : "private_net",
|
||||
"replacement_policy": "AUTO"
|
||||
}
|
||||
},
|
||||
"cluster-worker-001-floating" : {
|
||||
"Type" : "OS::Neutron::FloatingIP" ,
|
||||
"Properties" : {
|
||||
"floating_network_id" : "floating",
|
||||
"port_id" : { "Ref" : "cluster-worker-001-port" }
|
||||
}
|
||||
},
|
||||
"cluster-worker-001" : {
|
||||
"Type" : "OS::Nova::Server",
|
||||
"Properties" : {
|
||||
"name" : "cluster-worker-001",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"networks" : [{ "port" : { "Ref" : "cluster-worker-001-port" }}],
|
||||
"key_name" : "user_key",
|
||||
"user_data": "line2\nline3"
|
||||
}
|
||||
},
|
||||
"cluster-worker-001-volume-0" : {
|
||||
"Type" : "OS::Cinder::Volume",
|
||||
"Properties" : {
|
||||
"name" : "cluster-worker-001-volume-0",
|
||||
"size" : "10",
|
||||
"scheduler_hints": {"local_to_instance": {"Ref": "cluster-worker-001"}},
|
||||
"volume_type" : "vol_type"
|
||||
}
|
||||
},
|
||||
"cluster-worker-001-volume-attachment-0" : {
|
||||
"Type" : "OS::Cinder::VolumeAttachment",
|
||||
"Properties" : {
|
||||
"instance_uuid" : { "Ref" : "cluster-worker-001" },
|
||||
"volume_id" : { "Ref" : "cluster-worker-001-volume-0" },
|
||||
"mountpoint" : null
|
||||
}
|
||||
},
|
||||
"cluster-worker-001-volume-1" : {
|
||||
"Type" : "OS::Cinder::Volume",
|
||||
"Properties" : {
|
||||
"name" : "cluster-worker-001-volume-1",
|
||||
"size" : "10",
|
||||
"scheduler_hints": {"local_to_instance": {"Ref": "cluster-worker-001"}},
|
||||
"volume_type": "vol_type"
|
||||
}
|
||||
},
|
||||
"cluster-worker-001-volume-attachment-1" : {
|
||||
"Type" : "OS::Cinder::VolumeAttachment",
|
||||
"Properties" : {
|
||||
"instance_uuid" : { "Ref" : "cluster-worker-001" },
|
||||
"volume_id" : { "Ref" : "cluster-worker-001-volume-1" },
|
||||
"mountpoint" : null
|
||||
}
|
||||
},
|
||||
"cluster-master-001-port" : {
|
||||
"Type" : "OS::Neutron::Port",
|
||||
"Properties" : {
|
||||
"network_id" : "private_net",
|
||||
"replacement_policy": "AUTO"
|
||||
}
|
||||
},
|
||||
"cluster-master-001-floating" : {
|
||||
"Type" : "OS::Neutron::FloatingIP" ,
|
||||
"Properties" : {
|
||||
"floating_network_id" : "floating",
|
||||
"port_id" : { "Ref" : "cluster-master-001-port" }
|
||||
}
|
||||
},
|
||||
"cluster-master-001" : {
|
||||
"Type" : "OS::Nova::Server",
|
||||
"Properties" : {
|
||||
"name" : "cluster-master-001",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"networks" : [{ "port" : { "Ref" : "cluster-master-001-port" }}],
|
||||
"key_name" : "user_key",
|
||||
"user_data": "line1\nline2"
|
||||
}
|
||||
},
|
||||
"cluster-master-001-volume-0": {
|
||||
"Type": "OS::Cinder::Volume",
|
||||
"Properties": {
|
||||
"name": "cluster-master-001-volume-0",
|
||||
"size": "10",
|
||||
"volume_type": null
|
||||
}
|
||||
},
|
||||
"cluster-master-001-volume-attachment-0": {
|
||||
"Type": "OS::Cinder::VolumeAttachment",
|
||||
"Properties": {
|
||||
"instance_uuid": {"Ref": "cluster-master-001"},
|
||||
"volume_id": {"Ref": "cluster-master-001-volume-0"},
|
||||
"mountpoint": null}
|
||||
}
|
||||
},
|
||||
|
||||
"Outputs" : {}
|
||||
}
|
|
@ -197,6 +197,43 @@ class TestClusterTemplate(base.SaharaWithDbTestCase):
|
|||
json.loads(main_template)
|
||||
)
|
||||
|
||||
def test_load_template_with_volume_local_to_instance(self):
|
||||
"""Checks Heat cluster template with Neutron enabled.
|
||||
|
||||
Two NodeGroups used: 'master' with disabled volume_local_to_instance
|
||||
and 'worker' with enabled volume_local_to_instance.
|
||||
"""
|
||||
ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
|
||||
floating_ip_pool='floating', image_id=None,
|
||||
volumes_per_node=1, volumes_size=10, id=1,
|
||||
volume_type=None, image_username='root')
|
||||
ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
|
||||
floating_ip_pool='floating', image_id=None,
|
||||
volumes_per_node=2, volumes_size=10, id=2,
|
||||
image_username='root', volume_type='vol_type',
|
||||
volume_local_to_instance=True)
|
||||
|
||||
cluster = tu.create_cluster("cluster", "tenant1", "general",
|
||||
"1.2.1", [ng1, ng2],
|
||||
user_keypair_id='user_key',
|
||||
neutron_management_network='private_net',
|
||||
default_image_id='1', image_id=None,
|
||||
anti_affinity=[])
|
||||
heat_template = h.ClusterTemplate(cluster)
|
||||
heat_template.add_node_group_extra(ng1['id'], 1,
|
||||
get_ud_generator('line1\nline2'))
|
||||
heat_template.add_node_group_extra(ng2['id'], 1,
|
||||
get_ud_generator('line2\nline3'))
|
||||
|
||||
self.override_config("use_neutron", True)
|
||||
main_template = heat_template._get_main_template()
|
||||
|
||||
self.assertEqual(
|
||||
json.loads(f.get_file_text(
|
||||
"tests/unit/resources/"
|
||||
"test_serialize_resources_volume_local_to_instance.heat")),
|
||||
json.loads(main_template))
|
||||
|
||||
|
||||
def get_ud_generator(s):
|
||||
def generator(*args, **kwargs):
|
||||
|
|
|
@ -109,7 +109,8 @@ class TestAttachVolume(base.SaharaWithDbTestCase):
|
|||
'volume_type': None,
|
||||
'name': 'master',
|
||||
'cluster_id': '11',
|
||||
'instances': [instance1, instance2]}
|
||||
'instances': [instance1, instance2],
|
||||
'volume_local_to_instance': False}
|
||||
|
||||
cluster = r.ClusterResource({'node_groups': [ng]})
|
||||
|
||||
|
|
Loading…
Reference in New Issue