From f1722350a7fca51c36dc3181d64235d5e8ad14ce Mon Sep 17 00:00:00 2001 From: Nikita Konovalov Date: Thu, 4 Aug 2016 14:15:22 +0300 Subject: [PATCH] Boot from volume Adding the ability to boot a sahara cluster from volume. Story: #2001820 Task: #12558 Change-Id: Ie11c5e7a628c369868d3c56e803da4b9e7d15f85 --- doc/source/user/quickstart.rst | 37 +++++++++++++++++ .../boot-from-volume-e7078452fac1a4a0.yaml | 3 ++ sahara/conductor/manager.py | 1 + sahara/conductor/objects.py | 3 ++ .../versions/034_boot_from_volume.py | 40 +++++++++++++++++++ sahara/db/sqlalchemy/models.py | 3 ++ sahara/service/heat/templates.py | 26 +++++++++++- .../validations/node_group_template_schema.py | 5 +++ .../unit/conductor/manager/test_clusters.py | 1 + .../unit/conductor/manager/test_templates.py | 1 + .../unit/db/migration/test_migrations.py | 8 ++++ .../tests/unit/service/heat/test_templates.py | 4 +- 12 files changed, 129 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/boot-from-volume-e7078452fac1a4a0.yaml create mode 100644 sahara/db/migration/alembic_migrations/versions/034_boot_from_volume.py diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index ebe5fd2369..efe77ab269 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -210,6 +210,7 @@ Create a master node group template with the command: | Flavor id | 2 | | Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 | | Id | 0f066e14-9a73-4379-bbb4-9d9347633e31 | + | Is boot from volume | False | | Is default | False | | Is protected | False | | Is proxy gateway | False | @@ -239,6 +240,42 @@ Create a worker node group template with the command: | Flavor id | 2 | | Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 | | Id | 6546bf44-0590-4539-bfcb-99f8e2c11efc | + | Is boot from volume | False | + | Is default | False | + | Is protected | False | + | Is proxy gateway | False | + | Is public | False | + | Name | vanilla-default-worker | + | Node processes | datanode, nodemanager | + | Plugin name | vanilla | + | Security groups | None | + | Use autoconfig | False | + | Version | | + | Volumes per node | 0 | + +---------------------+--------------------------------------+ + + +You can also create node group templates setting a flag --boot-from-volume. +This will tell the node group to boot its instances from a volume instead of +the image. This feature allows for easier live migrations and improved +performance. + +.. sourcecode:: console + + $ openstack dataprocessing node group template create \ + --name vanilla-default-worker --plugin vanilla \ + --plugin-version --processes datanode nodemanager \ + --flavor 2 --auto-security-group --floating-ip-pool \ + --boot-from-volume + +---------------------+--------------------------------------+ + | Field | Value | + +---------------------+--------------------------------------+ + | Auto security group | True | + | Availability zone | None | + | Flavor id | 2 | + | Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 | + | Id | 6546bf44-0590-4539-bfcb-99f8e2c11efc | + | Is boot from volume | True | | Is default | False | | Is protected | False | | Is proxy gateway | False | diff --git a/releasenotes/notes/boot-from-volume-e7078452fac1a4a0.yaml b/releasenotes/notes/boot-from-volume-e7078452fac1a4a0.yaml new file mode 100644 index 0000000000..63e2608c89 --- /dev/null +++ b/releasenotes/notes/boot-from-volume-e7078452fac1a4a0.yaml @@ -0,0 +1,3 @@ +--- +features: + - Adding the ability to boot a Sahara cluster from volumes instead of images. diff --git a/sahara/conductor/manager.py b/sahara/conductor/manager.py index 5405cb4ee0..8402900e94 100644 --- a/sahara/conductor/manager.py +++ b/sahara/conductor/manager.py @@ -50,6 +50,7 @@ NODE_GROUP_DEFAULTS = { "volumes_availability_zone": None, "volume_mount_prefix": "/volumes/disk", "volume_type": None, + "boot_from_volume": False, "floating_ip_pool": None, "security_groups": None, "auto_security_group": False, diff --git a/sahara/conductor/objects.py b/sahara/conductor/objects.py index b40b7f5d78..eba7ffbadf 100644 --- a/sahara/conductor/objects.py +++ b/sahara/conductor/objects.py @@ -110,6 +110,8 @@ class NodeGroup(object): where to spawn volumes volume_mount_prefix volume_type + boot_from_volume - If set to True, the base image will be converted to a + bootable volume. floating_ip_pool - Floating IP Pool name used to assign Floating IPs to instances in this Node Group security_groups - List of security groups for instances in this Node Group @@ -231,6 +233,7 @@ class NodeGroupTemplate(object): volumes_availability_zone volume_mount_prefix volume_type + boot_from_volume floating_ip_pool security_groups auto_security_group diff --git a/sahara/db/migration/alembic_migrations/versions/034_boot_from_volume.py b/sahara/db/migration/alembic_migrations/versions/034_boot_from_volume.py new file mode 100644 index 0000000000..bd1c6408c8 --- /dev/null +++ b/sahara/db/migration/alembic_migrations/versions/034_boot_from_volume.py @@ -0,0 +1,40 @@ +# Copyright 2016 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Add boot_from_volumes field for node_groups and related classes + +Revision ID: 034 +Revises: 033 +Create Date: 2018-06-06 17:36:04.749264 + +""" + +# revision identifiers, used by Alembic. +revision = '034' +down_revision = '033' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.add_column('node_group_templates', + sa.Column('boot_from_volume', sa.Boolean(), nullable=False)) + + op.add_column('node_groups', + sa.Column('boot_from_volume', sa.Boolean(), nullable=False)) + + op.add_column('templates_relations', + sa.Column('boot_from_volume', sa.Boolean(), nullable=False)) diff --git a/sahara/db/sqlalchemy/models.py b/sahara/db/sqlalchemy/models.py index 9fe9474be4..ad62e1bc38 100644 --- a/sahara/db/sqlalchemy/models.py +++ b/sahara/db/sqlalchemy/models.py @@ -117,6 +117,7 @@ class NodeGroup(mb.SaharaBase): volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) volume_type = sa.Column(sa.String(255)) + boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False) count = sa.Column(sa.Integer, nullable=False) use_autoconfig = sa.Column(sa.Boolean(), default=True) @@ -228,6 +229,7 @@ class NodeGroupTemplate(mb.SaharaBase): volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) volume_type = sa.Column(sa.String(255)) + boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False) floating_ip_pool = sa.Column(sa.String(36)) security_groups = sa.Column(st.JsonListType()) auto_security_group = sa.Column(sa.Boolean()) @@ -261,6 +263,7 @@ class TemplatesRelation(mb.SaharaBase): volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) volume_type = sa.Column(sa.String(255)) + boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False) count = sa.Column(sa.Integer, nullable=False) use_autoconfig = sa.Column(sa.Boolean(), default=True) cluster_template_id = sa.Column(sa.String(36), diff --git a/sahara/service/heat/templates.py b/sahara/service/heat/templates.py index dca261285b..1901bbf35c 100644 --- a/sahara/service/heat/templates.py +++ b/sahara/service/heat/templates.py @@ -28,6 +28,7 @@ from sahara.utils import general as g from sahara.utils.openstack import base as b from sahara.utils.openstack import heat as h from sahara.utils.openstack import neutron +from sahara.utils.openstack import nova CONF = cfg.CONF LOG = logging.getLogger(__name__) @@ -509,11 +510,20 @@ class ClusterStack(object): properties.update({ "name": inst_name, "flavor": six.text_type(ng.flavor_id), - "image": ng.get_image_id(), "admin_user": ng.image_username, "user_data": userdata }) + if ng.boot_from_volume: + resources.update(self._get_bootable_volume(ng)) + properties["block_device_mapping"] = [ + {"device_name": "vda", + "volume_id": {"get_resource": "bootable_volume"}, + "delete_on_termination": "true"}] + + else: + properties.update({"image": ng.get_image_id()}) + resources.update({ INSTANCE_RESOURCE_NAME: { "type": "OS::Nova::Server", @@ -527,6 +537,20 @@ class ClusterStack(object): resources.update(self._serialize_wait_condition(ng)) return resources + def _get_bootable_volume(self, node_group): + node_group_flavor = nova.get_flavor(id=node_group.flavor_id) + image_size = node_group_flavor.disk + + return { + "bootable_volume": { + "type": "OS::Cinder::Volume", + "properties": { + "size": image_size, + "image": node_group.get_image_id() + } + } + } + def _serialize_wait_condition(self, ng): if not CONF.heat_enable_wait_condition: return {} diff --git a/sahara/service/validations/node_group_template_schema.py b/sahara/service/validations/node_group_template_schema.py index 1736e7a691..b8badfeb11 100644 --- a/sahara/service/validations/node_group_template_schema.py +++ b/sahara/service/validations/node_group_template_schema.py @@ -119,6 +119,11 @@ NODE_GROUP_TEMPLATE_SCHEMA_V2["properties"].update({ "type": "string", }}) NODE_GROUP_TEMPLATE_SCHEMA_V2["required"].append("plugin_version") +NODE_GROUP_TEMPLATE_SCHEMA_V2["properties"].update({ + "boot_from_volume": { + "type": "boolean", + }}) + # For an update we do not require any fields but we want the given # fields to be validated diff --git a/sahara/tests/unit/conductor/manager/test_clusters.py b/sahara/tests/unit/conductor/manager/test_clusters.py index e1291ff12b..a8c908e936 100644 --- a/sahara/tests/unit/conductor/manager/test_clusters.py +++ b/sahara/tests/unit/conductor/manager/test_clusters.py @@ -132,6 +132,7 @@ class ClusterTest(test_base.ConductorManagerTestCase): ng.pop("volumes_availability_zone") ng.pop("volume_type") ng.pop("floating_ip_pool") + ng.pop("boot_from_volume") ng.pop("image_username") ng.pop("open_ports") ng.pop("auto_security_group") diff --git a/sahara/tests/unit/conductor/manager/test_templates.py b/sahara/tests/unit/conductor/manager/test_templates.py index c7ec46af97..09c1242e0c 100644 --- a/sahara/tests/unit/conductor/manager/test_templates.py +++ b/sahara/tests/unit/conductor/manager/test_templates.py @@ -458,6 +458,7 @@ class ClusterTemplates(test_base.ConductorManagerTestCase): ng.pop("volume_type") ng.pop("auto_security_group") ng.pop("is_proxy_gateway") + ng.pop("boot_from_volume") ng.pop('volume_local_to_instance') self.assertEqual(SAMPLE_CLT["node_groups"], diff --git a/sahara/tests/unit/db/migration/test_migrations.py b/sahara/tests/unit/db/migration/test_migrations.py index 3fd87a6079..706a047bdf 100644 --- a/sahara/tests/unit/db/migration/test_migrations.py +++ b/sahara/tests/unit/db/migration/test_migrations.py @@ -626,6 +626,14 @@ class SaharaMigrationsCheckers(object): def _check_033(self, engine, data): self.assertColumnExists(engine, 'clusters', 'anti_affinity_ratio') + def _check_034(self, engine, data): + self.assertColumnExists(engine, 'node_groups', + 'boot_from_volume') + self.assertColumnExists(engine, 'node_group_templates', + 'boot_from_volume') + self.assertColumnExists(engine, 'templates_relations', + 'boot_from_volume') + class TestMigrationsMySQL(SaharaMigrationsCheckers, base.BaseWalkMigrationTestCase, diff --git a/sahara/tests/unit/service/heat/test_templates.py b/sahara/tests/unit/service/heat/test_templates.py index 6d49decd39..3166702f2f 100644 --- a/sahara/tests/unit/service/heat/test_templates.py +++ b/sahara/tests/unit/service/heat/test_templates.py @@ -36,12 +36,12 @@ class BaseTestClusterTemplate(base.SaharaWithDbTestCase): floating_ip_pool=floating_ip_pool, image_id=None, volumes_per_node=0, volumes_size=0, id="1", image_username='root', volume_type=None, - auto_security_group=True) + boot_from_volume=False, auto_security_group=True) ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1, floating_ip_pool=floating_ip_pool, image_id=None, volumes_per_node=2, volumes_size=10, id="2", image_username='root', volume_type=volume_type, - auto_security_group=True) + boot_from_volume=False, auto_security_group=True) return ng1, ng2 def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=None,