Merge "Boot from volume"

This commit is contained in:
Zuul 2018-07-25 09:53:32 +00:00 committed by Gerrit Code Review
commit a138f4e5bf
12 changed files with 129 additions and 3 deletions

View File

@ -198,6 +198,7 @@ Create a master node group template with the command:
| Flavor id | 2 |
| Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 |
| Id | 0f066e14-9a73-4379-bbb4-9d9347633e31 |
| Is boot from volume | False |
| Is default | False |
| Is protected | False |
| Is proxy gateway | False |
@ -227,6 +228,42 @@ Create a worker node group template with the command:
| Flavor id | 2 |
| Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 |
| Id | 6546bf44-0590-4539-bfcb-99f8e2c11efc |
| Is boot from volume | False |
| Is default | False |
| Is protected | False |
| Is proxy gateway | False |
| Is public | False |
| Name | vanilla-default-worker |
| Node processes | datanode, nodemanager |
| Plugin name | vanilla |
| Security groups | None |
| Use autoconfig | False |
| Version | <plugin_version> |
| Volumes per node | 0 |
+---------------------+--------------------------------------+
You can also create node group templates setting a flag --boot-from-volume.
This will tell the node group to boot its instances from a volume instead of
the image. This feature allows for easier live migrations and improved
performance.
.. sourcecode:: console
$ openstack dataprocessing node group template create \
--name vanilla-default-worker --plugin vanilla \
--plugin-version <plugin_version> --processes datanode nodemanager \
--flavor 2 --auto-security-group --floating-ip-pool <pool-id> \
--boot-from-volume
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| Auto security group | True |
| Availability zone | None |
| Flavor id | 2 |
| Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 |
| Id | 6546bf44-0590-4539-bfcb-99f8e2c11efc |
| Is boot from volume | True |
| Is default | False |
| Is protected | False |
| Is proxy gateway | False |

View File

@ -0,0 +1,3 @@
---
features:
- Adding the ability to boot a Sahara cluster from volumes instead of images.

View File

@ -50,6 +50,7 @@ NODE_GROUP_DEFAULTS = {
"volumes_availability_zone": None,
"volume_mount_prefix": "/volumes/disk",
"volume_type": None,
"boot_from_volume": False,
"floating_ip_pool": None,
"security_groups": None,
"auto_security_group": False,

View File

@ -110,6 +110,8 @@ class NodeGroup(object):
where to spawn volumes
volume_mount_prefix
volume_type
boot_from_volume - If set to True, the base image will be converted to a
bootable volume.
floating_ip_pool - Floating IP Pool name used to assign Floating IPs to
instances in this Node Group
security_groups - List of security groups for instances in this Node Group
@ -231,6 +233,7 @@ class NodeGroupTemplate(object):
volumes_availability_zone
volume_mount_prefix
volume_type
boot_from_volume
floating_ip_pool
security_groups
auto_security_group

View File

@ -0,0 +1,40 @@
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add boot_from_volumes field for node_groups and related classes
Revision ID: 034
Revises: 033
Create Date: 2018-06-06 17:36:04.749264
"""
# revision identifiers, used by Alembic.
revision = '034'
down_revision = '033'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('node_group_templates',
sa.Column('boot_from_volume', sa.Boolean(), nullable=False))
op.add_column('node_groups',
sa.Column('boot_from_volume', sa.Boolean(), nullable=False))
op.add_column('templates_relations',
sa.Column('boot_from_volume', sa.Boolean(), nullable=False))

View File

@ -117,6 +117,7 @@ class NodeGroup(mb.SaharaBase):
volumes_availability_zone = sa.Column(sa.String(255))
volume_mount_prefix = sa.Column(sa.String(80))
volume_type = sa.Column(sa.String(255))
boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False)
count = sa.Column(sa.Integer, nullable=False)
use_autoconfig = sa.Column(sa.Boolean(), default=True)
@ -228,6 +229,7 @@ class NodeGroupTemplate(mb.SaharaBase):
volumes_availability_zone = sa.Column(sa.String(255))
volume_mount_prefix = sa.Column(sa.String(80))
volume_type = sa.Column(sa.String(255))
boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False)
floating_ip_pool = sa.Column(sa.String(36))
security_groups = sa.Column(st.JsonListType())
auto_security_group = sa.Column(sa.Boolean())
@ -261,6 +263,7 @@ class TemplatesRelation(mb.SaharaBase):
volumes_availability_zone = sa.Column(sa.String(255))
volume_mount_prefix = sa.Column(sa.String(80))
volume_type = sa.Column(sa.String(255))
boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False)
count = sa.Column(sa.Integer, nullable=False)
use_autoconfig = sa.Column(sa.Boolean(), default=True)
cluster_template_id = sa.Column(sa.String(36),

View File

@ -28,6 +28,7 @@ from sahara.utils import general as g
from sahara.utils.openstack import base as b
from sahara.utils.openstack import heat as h
from sahara.utils.openstack import neutron
from sahara.utils.openstack import nova
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -509,11 +510,20 @@ class ClusterStack(object):
properties.update({
"name": inst_name,
"flavor": six.text_type(ng.flavor_id),
"image": ng.get_image_id(),
"admin_user": ng.image_username,
"user_data": userdata
})
if ng.boot_from_volume:
resources.update(self._get_bootable_volume(ng))
properties["block_device_mapping"] = [
{"device_name": "vda",
"volume_id": {"get_resource": "bootable_volume"},
"delete_on_termination": "true"}]
else:
properties.update({"image": ng.get_image_id()})
resources.update({
INSTANCE_RESOURCE_NAME: {
"type": "OS::Nova::Server",
@ -527,6 +537,20 @@ class ClusterStack(object):
resources.update(self._serialize_wait_condition(ng))
return resources
def _get_bootable_volume(self, node_group):
node_group_flavor = nova.get_flavor(id=node_group.flavor_id)
image_size = node_group_flavor.disk
return {
"bootable_volume": {
"type": "OS::Cinder::Volume",
"properties": {
"size": image_size,
"image": node_group.get_image_id()
}
}
}
def _serialize_wait_condition(self, ng):
if not CONF.heat_enable_wait_condition:
return {}

View File

@ -119,6 +119,11 @@ NODE_GROUP_TEMPLATE_SCHEMA_V2["properties"].update({
"type": "string",
}})
NODE_GROUP_TEMPLATE_SCHEMA_V2["required"].append("plugin_version")
NODE_GROUP_TEMPLATE_SCHEMA_V2["properties"].update({
"boot_from_volume": {
"type": "boolean",
}})
# For an update we do not require any fields but we want the given
# fields to be validated

View File

@ -132,6 +132,7 @@ class ClusterTest(test_base.ConductorManagerTestCase):
ng.pop("volumes_availability_zone")
ng.pop("volume_type")
ng.pop("floating_ip_pool")
ng.pop("boot_from_volume")
ng.pop("image_username")
ng.pop("open_ports")
ng.pop("auto_security_group")

View File

@ -458,6 +458,7 @@ class ClusterTemplates(test_base.ConductorManagerTestCase):
ng.pop("volume_type")
ng.pop("auto_security_group")
ng.pop("is_proxy_gateway")
ng.pop("boot_from_volume")
ng.pop('volume_local_to_instance')
self.assertEqual(SAMPLE_CLT["node_groups"],

View File

@ -626,6 +626,14 @@ class SaharaMigrationsCheckers(object):
def _check_033(self, engine, data):
self.assertColumnExists(engine, 'clusters', 'anti_affinity_ratio')
def _check_034(self, engine, data):
self.assertColumnExists(engine, 'node_groups',
'boot_from_volume')
self.assertColumnExists(engine, 'node_group_templates',
'boot_from_volume')
self.assertColumnExists(engine, 'templates_relations',
'boot_from_volume')
class TestMigrationsMySQL(SaharaMigrationsCheckers,
base.BaseWalkMigrationTestCase,

View File

@ -36,12 +36,12 @@ class BaseTestClusterTemplate(base.SaharaWithDbTestCase):
floating_ip_pool=floating_ip_pool, image_id=None,
volumes_per_node=0, volumes_size=0, id="1",
image_username='root', volume_type=None,
auto_security_group=True)
boot_from_volume=False, auto_security_group=True)
ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
floating_ip_pool=floating_ip_pool, image_id=None,
volumes_per_node=2, volumes_size=10, id="2",
image_username='root', volume_type=volume_type,
auto_security_group=True)
boot_from_volume=False, auto_security_group=True)
return ng1, ng2
def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=None,