summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2017-06-15 21:47:51 +0000
committerGerrit Code Review <review@openstack.org>2017-06-15 21:47:51 +0000
commit9769c6c463a6526010e0bf1e2f125b0a8ff1dfd7 (patch)
tree29393fcd9adc3a06807fefc4910f25e82474e1e4
parentdf1ef0ef1bf913ab9852ba03faa5b325c4cf218f (diff)
parent18744ba1991a7e1599d256857727454bac1ae2d2 (diff)
Merge "Tiramisu: replication group support"
-rw-r--r--cinder/api/openstack/api_version_request.py3
-rw-r--r--cinder/api/openstack/rest_api_version_history.rst5
-rw-r--r--cinder/api/v3/groups.py106
-rw-r--r--cinder/api/v3/views/groups.py5
-rw-r--r--cinder/db/sqlalchemy/api.py17
-rw-r--r--cinder/db/sqlalchemy/migrate_repo/versions/102_add_replication_status_to_groups_table.py28
-rw-r--r--cinder/db/sqlalchemy/models.py2
-rw-r--r--cinder/exception.py5
-rw-r--r--cinder/group/api.py203
-rw-r--r--cinder/objects/base.py1
-rw-r--r--cinder/objects/fields.py4
-rw-r--r--cinder/objects/group.py6
-rw-r--r--cinder/tests/fake_driver.py82
-rw-r--r--cinder/tests/unit/api/v3/test_groups.py245
-rw-r--r--cinder/tests/unit/group/test_groups_manager_replication.py133
-rw-r--r--cinder/tests/unit/objects/test_objects.py6
-rw-r--r--cinder/tests/unit/policy.json4
-rw-r--r--cinder/tests/unit/test_db_api.py55
-rw-r--r--cinder/tests/unit/test_migrations.py6
-rw-r--r--cinder/tests/unit/volume/test_rpcapi.py26
-rw-r--r--cinder/volume/driver.py49
-rw-r--r--cinder/volume/manager.py372
-rw-r--r--cinder/volume/rpcapi.py30
-rw-r--r--cinder/volume/utils.py35
-rw-r--r--etc/cinder/policy.json5
-rw-r--r--releasenotes/notes/replication-group-7c6c8a153460ca58.yaml6
26 files changed, 1424 insertions, 15 deletions
diff --git a/cinder/api/openstack/api_version_request.py b/cinder/api/openstack/api_version_request.py
index 4d229f0..3d71906 100644
--- a/cinder/api/openstack/api_version_request.py
+++ b/cinder/api/openstack/api_version_request.py
@@ -91,6 +91,7 @@ REST_API_VERSION_HISTORY = """
91 * 3.35 - Add ``volume-type`` filter to Get-Pools API. 91 * 3.35 - Add ``volume-type`` filter to Get-Pools API.
92 * 3.36 - Add metadata to volumes/summary response body. 92 * 3.36 - Add metadata to volumes/summary response body.
93 * 3.37 - Support sort backup by "name". 93 * 3.37 - Support sort backup by "name".
94 * 3.38 - Add replication group API (Tiramisu).
94""" 95"""
95 96
96# The minimum and maximum versions of the API supported 97# The minimum and maximum versions of the API supported
@@ -98,7 +99,7 @@ REST_API_VERSION_HISTORY = """
98# minimum version of the API supported. 99# minimum version of the API supported.
99# Explicitly using /v1 or /v2 endpoints will still work 100# Explicitly using /v1 or /v2 endpoints will still work
100_MIN_API_VERSION = "3.0" 101_MIN_API_VERSION = "3.0"
101_MAX_API_VERSION = "3.37" 102_MAX_API_VERSION = "3.38"
102_LEGACY_API_VERSION1 = "1.0" 103_LEGACY_API_VERSION1 = "1.0"
103_LEGACY_API_VERSION2 = "2.0" 104_LEGACY_API_VERSION2 = "2.0"
104 105
diff --git a/cinder/api/openstack/rest_api_version_history.rst b/cinder/api/openstack/rest_api_version_history.rst
index e1045c5..987989b 100644
--- a/cinder/api/openstack/rest_api_version_history.rst
+++ b/cinder/api/openstack/rest_api_version_history.rst
@@ -325,3 +325,8 @@ user documentation.
3253.37 3253.37
326---- 326----
327 Support sort backup by "name". 327 Support sort backup by "name".
328
3293.38
330----
331 Added enable_replication/disable_replication/failover_replication/
332 list_replication_targets for replication groups (Tiramisu).
diff --git a/cinder/api/v3/groups.py b/cinder/api/v3/groups.py
index b1666e6..b0e029b 100644
--- a/cinder/api/v3/groups.py
+++ b/cinder/api/v3/groups.py
@@ -34,6 +34,7 @@ LOG = logging.getLogger(__name__)
34 34
35GROUP_API_VERSION = '3.13' 35GROUP_API_VERSION = '3.13'
36GROUP_CREATE_FROM_SRC_API_VERSION = '3.14' 36GROUP_CREATE_FROM_SRC_API_VERSION = '3.14'
37GROUP_REPLICATION_API_VERSION = '3.38'
37 38
38 39
39class GroupsController(wsgi.Controller): 40class GroupsController(wsgi.Controller):
@@ -372,6 +373,111 @@ class GroupsController(wsgi.Controller):
372 373
373 return webob.Response(status_int=http_client.ACCEPTED) 374 return webob.Response(status_int=http_client.ACCEPTED)
374 375
376 @wsgi.Controller.api_version(GROUP_REPLICATION_API_VERSION)
377 @wsgi.action("enable_replication")
378 def enable_replication(self, req, id, body):
379 """Enables replications for a group."""
380 context = req.environ['cinder.context']
381 if body:
382 if not self.is_valid_body(body, 'enable_replication'):
383 msg = _("Missing required element 'enable_replication' in "
384 "request body.")
385 raise exc.HTTPBadRequest(explanation=msg)
386
387 LOG.info('Enable replication group with id: %s.', id,
388 context=context)
389
390 try:
391 group = self.group_api.get(context, id)
392 self.group_api.enable_replication(context, group)
393 # Not found exception will be handled at the wsgi level
394 except (exception.InvalidGroup, exception.InvalidGroupType,
395 exception.InvalidVolume, exception.InvalidVolumeType) as error:
396 raise exc.HTTPBadRequest(explanation=error.msg)
397
398 return webob.Response(status_int=202)
399
400 @wsgi.Controller.api_version(GROUP_REPLICATION_API_VERSION)
401 @wsgi.action("disable_replication")
402 def disable_replication(self, req, id, body):
403 """Disables replications for a group."""
404 context = req.environ['cinder.context']
405 if body:
406 if not self.is_valid_body(body, 'disable_replication'):
407 msg = _("Missing required element 'disable_replication' in "
408 "request body.")
409 raise exc.HTTPBadRequest(explanation=msg)
410
411 LOG.info('Disable replication group with id: %s.', id,
412 context=context)
413
414 try:
415 group = self.group_api.get(context, id)
416 self.group_api.disable_replication(context, group)
417 # Not found exception will be handled at the wsgi level
418 except (exception.InvalidGroup, exception.InvalidGroupType,
419 exception.InvalidVolume, exception.InvalidVolumeType) as error:
420 raise exc.HTTPBadRequest(explanation=error.msg)
421
422 return webob.Response(status_int=202)
423
424 @wsgi.Controller.api_version(GROUP_REPLICATION_API_VERSION)
425 @wsgi.action("failover_replication")
426 def failover_replication(self, req, id, body):
427 """Fails over replications for a group."""
428 context = req.environ['cinder.context']
429 if body:
430 if not self.is_valid_body(body, 'failover_replication'):
431 msg = _("Missing required element 'failover_replication' in "
432 "request body.")
433 raise exc.HTTPBadRequest(explanation=msg)
434
435 grp_body = body['failover_replication']
436 try:
437 allow_attached = strutils.bool_from_string(
438 grp_body.get('allow_attached_volume', False),
439 strict=True)
440 except ValueError:
441 msg = (_("Invalid value '%s' for allow_attached_volume flag.")
442 % grp_body)
443 raise exc.HTTPBadRequest(explanation=msg)
444 secondary_backend_id = grp_body.get('secondary_backend_id')
445
446 LOG.info('Failover replication group with id: %s.', id,
447 context=context)
448
449 try:
450 group = self.group_api.get(context, id)
451 self.group_api.failover_replication(context, group, allow_attached,
452 secondary_backend_id)
453 # Not found exception will be handled at the wsgi level
454 except (exception.InvalidGroup, exception.InvalidGroupType,
455 exception.InvalidVolume, exception.InvalidVolumeType) as error:
456 raise exc.HTTPBadRequest(explanation=error.msg)
457
458 return webob.Response(status_int=202)
459
460 @wsgi.Controller.api_version(GROUP_REPLICATION_API_VERSION)
461 @wsgi.action("list_replication_targets")
462 def list_replication_targets(self, req, id, body):
463 """List replication targets for a group."""
464 context = req.environ['cinder.context']
465 if body:
466 if not self.is_valid_body(body, 'list_replication_targets'):
467 msg = _("Missing required element 'list_replication_targets' "
468 "in request body.")
469 raise exc.HTTPBadRequest(explanation=msg)
470
471 LOG.info('List replication targets for group with id: %s.', id,
472 context=context)
473
474 # Not found exception will be handled at the wsgi level
475 group = self.group_api.get(context, id)
476 replication_targets = self.group_api.list_replication_targets(
477 context, group)
478
479 return replication_targets
480
375 481
376def create_resource(): 482def create_resource():
377 return wsgi.Resource(GroupsController()) 483 return wsgi.Resource(GroupsController())
diff --git a/cinder/api/v3/views/groups.py b/cinder/api/v3/views/groups.py
index 264c779..5b20c29 100644
--- a/cinder/api/v3/views/groups.py
+++ b/cinder/api/v3/views/groups.py
@@ -71,6 +71,11 @@ class ViewBuilder(common.ViewBuilder):
71 group_ref['group']['volumes'] = [volume.id 71 group_ref['group']['volumes'] = [volume.id
72 for volume in group.volumes] 72 for volume in group.volumes]
73 73
74 # Add replication_status if min version is greater than or equal
75 # to 3.38.
76 if req_version.matches("3.38", None):
77 group_ref['group']['replication_status'] = group.replication_status
78
74 return group_ref 79 return group_ref
75 80
76 def _list_view(self, func, request, groups): 81 def _list_view(self, func, request, groups):
diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py
index c0d3ee1..7a08b75 100644
--- a/cinder/db/sqlalchemy/api.py
+++ b/cinder/db/sqlalchemy/api.py
@@ -5563,6 +5563,16 @@ def _group_snapshot_get_query(context, session=None, project_only=False):
5563@apply_like_filters(model=models.Group) 5563@apply_like_filters(model=models.Group)
5564def _process_groups_filters(query, filters): 5564def _process_groups_filters(query, filters):
5565 if filters: 5565 if filters:
5566 # NOTE(xyang): backend_match_level needs to be handled before
5567 # is_valid_model_filters is called as it is not a column name
5568 # in the db.
5569 backend_match_level = filters.pop('backend_match_level', 'backend')
5570 # host is a valid filter. Filter the query by host and
5571 # backend_match_level first.
5572 host = filters.pop('host', None)
5573 if host:
5574 query = query.filter(_filter_host(models.Group.host, host,
5575 match_level=backend_match_level))
5566 # Ensure that filters' keys exist on the model 5576 # Ensure that filters' keys exist on the model
5567 if not is_valid_model_filters(models.Group, filters): 5577 if not is_valid_model_filters(models.Group, filters):
5568 return 5578 return
@@ -5582,10 +5592,9 @@ def _process_group_snapshot_filters(query, filters):
5582 5592
5583def _group_get_all(context, filters=None, marker=None, limit=None, 5593def _group_get_all(context, filters=None, marker=None, limit=None,
5584 offset=None, sort_keys=None, sort_dirs=None): 5594 offset=None, sort_keys=None, sort_dirs=None):
5585 if filters and not is_valid_model_filters(models.Group, 5595 # No need to call is_valid_model_filters here. It is called
5586 filters): 5596 # in _process_group_filters when _generate_paginate_query
5587 return [] 5597 # is called below.
5588
5589 session = get_session() 5598 session = get_session()
5590 with session.begin(): 5599 with session.begin():
5591 # Generate the paginate query 5600 # Generate the paginate query
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/102_add_replication_status_to_groups_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/102_add_replication_status_to_groups_table.py
new file mode 100644
index 0000000..08f367d
--- /dev/null
+++ b/cinder/db/sqlalchemy/migrate_repo/versions/102_add_replication_status_to_groups_table.py
@@ -0,0 +1,28 @@
1# Copyright (C) 2017 Dell Inc. or its subsidiaries.
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
15
16from sqlalchemy import Column
17from sqlalchemy import MetaData, String, Table
18
19
20def upgrade(migrate_engine):
21 meta = MetaData()
22 meta.bind = migrate_engine
23
24 # Add replication_status column to groups table
25 table = Table('groups', meta, autoload=True)
26 if not hasattr(table.c, 'replication_status'):
27 new_column = Column('replication_status', String(255), nullable=True)
28 table.create_column(new_column)
diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py
index 4324d93..92fd418 100644
--- a/cinder/db/sqlalchemy/models.py
+++ b/cinder/db/sqlalchemy/models.py
@@ -193,6 +193,8 @@ class Group(BASE, CinderBase):
193 group_snapshot_id = Column(String(36)) 193 group_snapshot_id = Column(String(36))
194 source_group_id = Column(String(36)) 194 source_group_id = Column(String(36))
195 195
196 replication_status = Column(String(255))
197
196 198
197class Cgsnapshot(BASE, CinderBase): 199class Cgsnapshot(BASE, CinderBase):
198 """Represents a cgsnapshot.""" 200 """Represents a cgsnapshot."""
diff --git a/cinder/exception.py b/cinder/exception.py
index 86c2bfd..72b64f7 100644
--- a/cinder/exception.py
+++ b/cinder/exception.py
@@ -846,6 +846,11 @@ class ReplicationError(CinderException):
846 "error: %(reason)s") 846 "error: %(reason)s")
847 847
848 848
849class ReplicationGroupError(CinderException):
850 message = _("Group %(group_id)s replication "
851 "error: %(reason)s.")
852
853
849class ReplicationNotFound(NotFound): 854class ReplicationNotFound(NotFound):
850 message = _("Volume replication for %(volume_id)s " 855 message = _("Volume replication for %(volume_id)s "
851 "could not be found.") 856 "could not be found.")
diff --git a/cinder/group/api.py b/cinder/group/api.py
index 160204a..9dd965a 100644
--- a/cinder/group/api.py
+++ b/cinder/group/api.py
@@ -151,7 +151,8 @@ class API(base.Base):
151 'name': name, 151 'name': name,
152 'description': description, 152 'description': description,
153 'volume_type_ids': [t['id'] for t in req_volume_types], 153 'volume_type_ids': [t['id'] for t in req_volume_types],
154 'group_type_id': req_group_type['id']} 154 'group_type_id': req_group_type['id'],
155 'replication_status': c_fields.ReplicationStatus.DISABLED}
155 group = None 156 group = None
156 try: 157 try:
157 group = objects.Group(context=context, **kwargs) 158 group = objects.Group(context=context, **kwargs)
@@ -212,6 +213,7 @@ class API(base.Base):
212 'source_group_id': source_group_id, 213 'source_group_id': source_group_id,
213 'group_type_id': group_type_id, 214 'group_type_id': group_type_id,
214 'volume_type_ids': volume_type_ids, 215 'volume_type_ids': volume_type_ids,
216 'replication_status': c_fields.ReplicationStatus.DISABLED
215 } 217 }
216 218
217 group = None 219 group = None
@@ -898,3 +900,202 @@ class API(base.Base):
898 'status': status} 900 'status': status}
899 gsnapshot.update(field) 901 gsnapshot.update(field)
900 gsnapshot.save() 902 gsnapshot.save()
903
904 def _check_type(self, group):
905 if not vol_utils.is_group_a_replication_group_type(group):
906 msg = _("Group %s is not a replication group type.") % group.id
907 LOG.error(msg)
908 raise exception.InvalidGroupType(reason=msg)
909
910 for vol_type in group.volume_types:
911 if not vol_utils.is_replicated_spec(vol_type.extra_specs):
912 msg = _("Volume type %s does not have 'replication_enabled' "
913 "spec key set to '<is> True'.") % vol_type.id
914 LOG.error(msg)
915 raise exception.InvalidVolumeType(reason=msg)
916
917 # Replication group API (Tiramisu)
918 @wrap_check_policy
919 def enable_replication(self, context, group):
920 self._check_type(group)
921
922 valid_status = [c_fields.GroupStatus.AVAILABLE]
923 if group.status not in valid_status:
924 params = {'valid': valid_status,
925 'current': group.status,
926 'id': group.id}
927 msg = _("Group %(id)s status must be %(valid)s, "
928 "but current status is: %(current)s. "
929 "Cannot enable replication.") % params
930 LOG.error(msg)
931 raise exception.InvalidGroup(reason=msg)
932
933 valid_rep_status = [c_fields.ReplicationStatus.DISABLED,
934 c_fields.ReplicationStatus.ENABLED]
935 if group.replication_status not in valid_rep_status:
936 params = {'valid': valid_rep_status,
937 'current': group.replication_status,
938 'id': group.id}
939 msg = _("Group %(id)s replication status must be %(valid)s, "
940 "but current status is: %(current)s. "
941 "Cannot enable replication.") % params
942 LOG.error(msg)
943 raise exception.InvalidGroup(reason=msg)
944
945 volumes = objects.VolumeList.get_all_by_generic_group(
946 context.elevated(), group.id)
947
948 valid_status = ['available', 'in-use']
949 for vol in volumes:
950 if vol.status not in valid_status:
951 params = {'valid': valid_status,
952 'current': vol.status,
953 'id': vol.id}
954 msg = _("Volume %(id)s status must be %(valid)s, "
955 "but current status is: %(current)s. "
956 "Cannot enable replication.") % params
957 LOG.error(msg)
958 raise exception.InvalidVolume(reason=msg)
959 # replication_status could be set to enabled when volume is
960 # created and the mirror is built.
961 if vol.replication_status not in valid_rep_status:
962 params = {'valid': valid_rep_status,
963 'current': vol.replication_status,
964 'id': vol.id}
965 msg = _("Volume %(id)s replication status must be %(valid)s, "
966 "but current status is: %(current)s. "
967 "Cannot enable replication.") % params
968 LOG.error(msg)
969 raise exception.InvalidVolume(reason=msg)
970
971 vol.replication_status = c_fields.ReplicationStatus.ENABLING
972 vol.save()
973
974 group.replication_status = c_fields.ReplicationStatus.ENABLING
975 group.save()
976
977 self.volume_rpcapi.enable_replication(context, group)
978
979 @wrap_check_policy
980 def disable_replication(self, context, group):
981 self._check_type(group)
982
983 valid_status = [c_fields.GroupStatus.AVAILABLE,
984 c_fields.GroupStatus.ERROR]
985 if group.status not in valid_status:
986 params = {'valid': valid_status,
987 'current': group.status,
988 'id': group.id}
989 msg = _("Group %(id)s status must be %(valid)s, "
990 "but current status is: %(current)s. "
991 "Cannot disable replication.") % params
992 LOG.error(msg)
993 raise exception.InvalidGroup(reason=msg)
994
995 valid_rep_status = [c_fields.ReplicationStatus.ENABLED,
996 c_fields.ReplicationStatus.ERROR]
997 if group.replication_status not in valid_rep_status:
998 params = {'valid': valid_rep_status,
999 'current': group.replication_status,
1000 'id': group.id}
1001 msg = _("Group %(id)s replication status must be %(valid)s, "
1002 "but current status is: %(current)s. "
1003 "Cannot disable replication.") % params
1004 LOG.error(msg)
1005 raise exception.InvalidGroup(reason=msg)
1006
1007 volumes = objects.VolumeList.get_all_by_generic_group(
1008 context.elevated(), group.id)
1009
1010 for vol in volumes:
1011 if vol.replication_status not in valid_rep_status:
1012 params = {'valid': valid_rep_status,
1013 'current': vol.replication_status,
1014 'id': vol.id}
1015 msg = _("Volume %(id)s replication status must be %(valid)s, "
1016 "but current status is: %(current)s. "
1017 "Cannot disable replication.") % params
1018 LOG.error(msg)
1019 raise exception.InvalidVolume(reason=msg)
1020
1021 vol.replication_status = c_fields.ReplicationStatus.DISABLING
1022 vol.save()
1023
1024 group.replication_status = c_fields.ReplicationStatus.DISABLING
1025 group.save()
1026
1027 self.volume_rpcapi.disable_replication(context, group)
1028
1029 @wrap_check_policy
1030 def failover_replication(self, context, group,
1031 allow_attached_volume=False,
1032 secondary_backend_id=None):
1033 self._check_type(group)
1034
1035 valid_status = [c_fields.GroupStatus.AVAILABLE]
1036 if group.status not in valid_status:
1037 params = {'valid': valid_status,
1038 'current': group.status,
1039 'id': group.id}
1040 msg = _("Group %(id)s status must be %(valid)s, "
1041 "but current status is: %(current)s. "
1042 "Cannot failover replication.") % params
1043 LOG.error(msg)
1044 raise exception.InvalidGroup(reason=msg)
1045
1046 valid_rep_status = [c_fields.ReplicationStatus.ENABLED,
1047 c_fields.ReplicationStatus.FAILED_OVER]
1048 if group.replication_status not in valid_rep_status:
1049 params = {'valid': valid_rep_status,
1050 'current': group.replication_status,
1051 'id': group.id}
1052 msg = _("Group %(id)s replication status must be %(valid)s, "
1053 "but current status is: %(current)s. "
1054 "Cannot failover replication.") % params
1055 LOG.error(msg)
1056 raise exception.InvalidGroup(reason=msg)
1057
1058 volumes = objects.VolumeList.get_all_by_generic_group(
1059 context.elevated(), group.id)
1060
1061 valid_status = ['available', 'in-use']
1062 for vol in volumes:
1063 if vol.status not in valid_status:
1064 params = {'valid': valid_status,
1065 'current': vol.status,
1066 'id': vol.id}
1067 msg = _("Volume %(id)s status must be %(valid)s, "
1068 "but current status is: %(current)s. "
1069 "Cannot failover replication.") % params
1070 LOG.error(msg)
1071 raise exception.InvalidVolume(reason=msg)
1072 if vol.status == 'in-use' and not allow_attached_volume:
1073 msg = _("Volume %s is attached but allow_attached_volume flag "
1074 "is False. Cannot failover replication.") % vol.id
1075 LOG.error(msg)
1076 raise exception.InvalidVolume(reason=msg)
1077 if vol.replication_status not in valid_rep_status:
1078 params = {'valid': valid_rep_status,
1079 'current': vol.replication_status,
1080 'id': vol.id}
1081 msg = _("Volume %(id)s replication status must be %(valid)s, "
1082 "but current status is: %(current)s. "
1083 "Cannot failover replication.") % params
1084 LOG.error(msg)
1085 raise exception.InvalidVolume(reason=msg)
1086
1087 vol.replication_status = c_fields.ReplicationStatus.FAILING_OVER
1088 vol.save()
1089
1090 group.replication_status = c_fields.ReplicationStatus.FAILING_OVER
1091 group.save()
1092
1093 self.volume_rpcapi.failover_replication(context, group,
1094 allow_attached_volume,
1095 secondary_backend_id)
1096
1097 @wrap_check_policy
1098 def list_replication_targets(self, context, group):
1099 self._check_type(group)
1100
1101 return self.volume_rpcapi.list_replication_targets(context, group)
diff --git a/cinder/objects/base.py b/cinder/objects/base.py
index 9afab74..92ab1ac 100644
--- a/cinder/objects/base.py
+++ b/cinder/objects/base.py
@@ -132,6 +132,7 @@ OBJ_VERSIONS.add('1.21', {'ManageableSnapshot': '1.0',
132OBJ_VERSIONS.add('1.22', {'Snapshot': '1.4'}) 132OBJ_VERSIONS.add('1.22', {'Snapshot': '1.4'})
133OBJ_VERSIONS.add('1.23', {'VolumeAttachment': '1.2'}) 133OBJ_VERSIONS.add('1.23', {'VolumeAttachment': '1.2'})
134OBJ_VERSIONS.add('1.24', {'LogLevel': '1.0', 'LogLevelList': '1.0'}) 134OBJ_VERSIONS.add('1.24', {'LogLevel': '1.0', 'LogLevelList': '1.0'})
135OBJ_VERSIONS.add('1.25', {'Group': '1.2'})
135 136
136 137
137class CinderObjectRegistry(base.VersionedObjectRegistry): 138class CinderObjectRegistry(base.VersionedObjectRegistry):
diff --git a/cinder/objects/fields.py b/cinder/objects/fields.py
index aa85bc0..5d95172 100644
--- a/cinder/objects/fields.py
+++ b/cinder/objects/fields.py
@@ -105,9 +105,11 @@ class ReplicationStatus(BaseCinderEnum):
105 FAILING_OVER = 'failing-over' 105 FAILING_OVER = 'failing-over'
106 FAILOVER_ERROR = 'failover-error' 106 FAILOVER_ERROR = 'failover-error'
107 FAILED_OVER = 'failed-over' 107 FAILED_OVER = 'failed-over'
108 ENABLING = 'enabling'
109 DISABLING = 'disabling'
108 110
109 ALL = (ERROR, ENABLED, DISABLED, NOT_CAPABLE, FAILOVER_ERROR, FAILING_OVER, 111 ALL = (ERROR, ENABLED, DISABLED, NOT_CAPABLE, FAILOVER_ERROR, FAILING_OVER,
110 FAILED_OVER) 112 FAILED_OVER, ENABLING, DISABLING)
111 113
112 114
113class ReplicationStatusField(BaseEnumField): 115class ReplicationStatusField(BaseEnumField):
diff --git a/cinder/objects/group.py b/cinder/objects/group.py
index fc14d30..4ed7e38 100644
--- a/cinder/objects/group.py
+++ b/cinder/objects/group.py
@@ -29,7 +29,8 @@ class Group(base.CinderPersistentObject, base.CinderObject,
29 # Version 1.0: Initial version 29 # Version 1.0: Initial version
30 # Version 1.1: Added group_snapshots, group_snapshot_id, and 30 # Version 1.1: Added group_snapshots, group_snapshot_id, and
31 # source_group_id 31 # source_group_id
32 VERSION = '1.1' 32 # Version 1.2: Added replication_status
33 VERSION = '1.2'
33 34
34 OPTIONAL_FIELDS = ['volumes', 'volume_types', 'group_snapshots'] 35 OPTIONAL_FIELDS = ['volumes', 'volume_types', 'group_snapshots']
35 36
@@ -47,6 +48,7 @@ class Group(base.CinderPersistentObject, base.CinderObject,
47 'status': c_fields.GroupStatusField(nullable=True), 48 'status': c_fields.GroupStatusField(nullable=True),
48 'group_snapshot_id': fields.UUIDField(nullable=True), 49 'group_snapshot_id': fields.UUIDField(nullable=True),
49 'source_group_id': fields.UUIDField(nullable=True), 50 'source_group_id': fields.UUIDField(nullable=True),
51 'replication_status': c_fields.ReplicationStatusField(nullable=True),
50 'volumes': fields.ObjectField('VolumeList', nullable=True), 52 'volumes': fields.ObjectField('VolumeList', nullable=True),
51 'volume_types': fields.ObjectField('VolumeTypeList', 53 'volume_types': fields.ObjectField('VolumeTypeList',
52 nullable=True), 54 nullable=True),
@@ -62,6 +64,8 @@ class Group(base.CinderPersistentObject, base.CinderObject,
62 for key in ('group_snapshot_id', 'source_group_id', 64 for key in ('group_snapshot_id', 'source_group_id',
63 'group_snapshots'): 65 'group_snapshots'):
64 primitive.pop(key, None) 66 primitive.pop(key, None)
67 if target_version < (1, 2):
68 primitive.pop('replication_status', None)
65 69
66 @staticmethod 70 @staticmethod
67 def _from_db_object(context, group, db_group, 71 def _from_db_object(context, group, db_group,
diff --git a/cinder/tests/fake_driver.py b/cinder/tests/fake_driver.py
index 6de5abb..929e240 100644
--- a/cinder/tests/fake_driver.py
+++ b/cinder/tests/fake_driver.py
@@ -21,6 +21,7 @@ from cinder.tests.unit.brick import fake_lvm
21from cinder import utils 21from cinder import utils
22from cinder.volume import driver 22from cinder.volume import driver
23from cinder.volume.drivers import lvm 23from cinder.volume.drivers import lvm
24from cinder.volume import utils as vol_utils
24from cinder.zonemanager import utils as fczm_utils 25from cinder.zonemanager import utils as fczm_utils
25 26
26 27
@@ -44,7 +45,20 @@ class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver):
44 45
45 @utils.trace_method 46 @utils.trace_method
46 def create_volume(self, volume): 47 def create_volume(self, volume):
47 pass 48 """Creates a volume."""
49 super(FakeLoggingVolumeDriver, self).create_volume(volume)
50 model_update = {}
51 try:
52 if (volume.volume_type and volume.volume_type.extra_specs and
53 vol_utils.is_replicated_spec(
54 volume.volume_type.extra_specs)):
55 # Sets the new volume's replication_status to disabled
56 model_update['replication_status'] = (
57 fields.ReplicationStatus.DISABLED)
58 except exception.VolumeTypeNotFound:
59 pass
60 if model_update:
61 return model_update
48 62
49 @utils.trace_method 63 @utils.trace_method
50 def delete_volume(self, volume): 64 def delete_volume(self, volume):
@@ -122,6 +136,68 @@ class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver):
122 def terminate_connection(self, volume, connector, **kwargs): 136 def terminate_connection(self, volume, connector, **kwargs):
123 pass 137 pass
124 138
139 # Replication Group (Tiramisu)
140 @utils.trace_method
141 def enable_replication(self, context, group, volumes):
142 """Enables replication for a group and volumes in the group."""
143 model_update = {
144 'replication_status': fields.ReplicationStatus.ENABLED}
145 volume_model_updates = []
146 for volume_ref in volumes:
147 volume_model_update = {'id': volume_ref.id}
148 volume_model_update['replication_status'] = (
149 fields.ReplicationStatus.ENABLED)
150 volume_model_updates.append(volume_model_update)
151
152 return model_update, volume_model_updates
153
154 # Replication Group (Tiramisu)
155 @utils.trace_method
156 def disable_replication(self, context, group, volumes):
157 """Disables replication for a group and volumes in the group."""
158 model_update = {
159 'replication_status': fields.ReplicationStatus.DISABLED}
160 volume_model_updates = []
161 for volume_ref in volumes:
162 volume_model_update = {'id': volume_ref.id}
163 volume_model_update['replication_status'] = (
164 fields.ReplicationStatus.DISABLED)
165 volume_model_updates.append(volume_model_update)
166
167 return model_update, volume_model_updates
168
169 # Replication Group (Tiramisu)
170 @utils.trace_method
171 def failover_replication(self, context, group, volumes,
172 secondary_backend_id=None):
173 """Fails over replication for a group and volumes in the group."""
174 model_update = {
175 'replication_status': fields.ReplicationStatus.FAILED_OVER}
176 volume_model_updates = []
177 for volume_ref in volumes:
178 volume_model_update = {'id': volume_ref.id}
179 volume_model_update['replication_status'] = (
180 fields.ReplicationStatus.FAILED_OVER)
181 volume_model_updates.append(volume_model_update)
182
183 return model_update, volume_model_updates
184
185 # Replication Group (Tiramisu)
186 @utils.trace_method
187 def create_group(self, context, group):
188 """Creates a group."""
189 model_update = super(FakeLoggingVolumeDriver, self).create_group(
190 context, group)
191 try:
192 if vol_utils.is_group_a_replication_group_type(group):
193 # Sets the new group's replication_status to disabled
194 model_update['replication_status'] = (
195 fields.ReplicationStatus.DISABLED)
196 except exception.GroupTypeNotFound:
197 pass
198
199 return model_update
200
125 def _update_volume_stats(self): 201 def _update_volume_stats(self):
126 data = {'volume_backend_name': self.backend_name, 202 data = {'volume_backend_name': self.backend_name,
127 'vendor_name': 'Open Source', 203 'vendor_name': 'Open Source',
@@ -138,7 +214,8 @@ class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver):
138 'filter_function': self.get_filter_function(), 214 'filter_function': self.get_filter_function(),
139 'goodness_function': self.get_goodness_function(), 215 'goodness_function': self.get_goodness_function(),
140 'consistencygroup_support': False, 216 'consistencygroup_support': False,
141 'replication_enabled': False} 217 'replication_enabled': True,
218 'group_replication_enabled': True, }
142 219
143 data['pools'].append(fake_pool) 220 data['pools'].append(fake_pool)
144 self._stats = data 221 self._stats = data
@@ -218,7 +295,6 @@ class FakeGateDriver(lvm.LVMVolumeDriver):
218 def _update_volume_stats(self): 295 def _update_volume_stats(self):
219 super(FakeGateDriver, self)._update_volume_stats() 296 super(FakeGateDriver, self)._update_volume_stats()
220 self._stats["pools"][0]["consistencygroup_support"] = True 297 self._stats["pools"][0]["consistencygroup_support"] = True
221 self._stats["pools"][0]["replication_enabled"] = True
222 298
223 # NOTE(xyang): Consistency Group functions implemented below 299 # NOTE(xyang): Consistency Group functions implemented below
224 # are for testing purpose only. Data consistency cannot be 300 # are for testing purpose only. Data consistency cannot be
diff --git a/cinder/tests/unit/api/v3/test_groups.py b/cinder/tests/unit/api/v3/test_groups.py
index 53266aa..4ee2b74 100644
--- a/cinder/tests/unit/api/v3/test_groups.py
+++ b/cinder/tests/unit/api/v3/test_groups.py
@@ -38,6 +38,8 @@ from cinder.volume import api as volume_api
38 38
39GROUP_MICRO_VERSION = '3.13' 39GROUP_MICRO_VERSION = '3.13'
40GROUP_FROM_SRC_MICRO_VERSION = '3.14' 40GROUP_FROM_SRC_MICRO_VERSION = '3.14'
41GROUP_REPLICATION_MICRO_VERSION = '3.38'
42INVALID_GROUP_REPLICATION_MICRO_VERSION = '3.37'
41 43
42 44
43@ddt.ddt 45@ddt.ddt
@@ -75,6 +77,7 @@ class GroupsAPITestCase(test.TestCase):
75 availability_zone='az1', 77 availability_zone='az1',
76 host='fakehost', 78 host='fakehost',
77 status=fields.GroupStatus.CREATING, 79 status=fields.GroupStatus.CREATING,
80 replication_status=fields.ReplicationStatus.DISABLED,
78 **kwargs): 81 **kwargs):
79 """Create a group object.""" 82 """Create a group object."""
80 ctxt = ctxt or self.ctxt 83 ctxt = ctxt or self.ctxt
@@ -88,6 +91,7 @@ class GroupsAPITestCase(test.TestCase):
88 group.volume_type_ids = volume_type_ids 91 group.volume_type_ids = volume_type_ids
89 group.host = host 92 group.host = host
90 group.status = status 93 group.status = status
94 group.replication_status = replication_status
91 group.update(kwargs) 95 group.update(kwargs)
92 group.create() 96 group.create()
93 return group 97 return group
@@ -1049,3 +1053,244 @@ class GroupsAPITestCase(test.TestCase):
1049 grp.destroy() 1053 grp.destroy()
1050 volume.destroy() 1054 volume.destroy()
1051 source_grp.destroy() 1055 source_grp.destroy()
1056
1057 @mock.patch('cinder.volume.utils.is_replicated_spec',
1058 return_value=True)
1059 @mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
1060 return_value=True)
1061 def test_enable_replication(self, mock_rep_grp_type, mock_rep_vol_type):
1062 req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
1063 (fake.PROJECT_ID, self.group3.id),
1064 version=GROUP_REPLICATION_MICRO_VERSION)
1065 self.group3.status = fields.GroupStatus.AVAILABLE
1066 self.group3.save()
1067 body = {"enable_replication": {}}
1068 response = self.controller.enable_replication(req,
1069 self.group3.id, body)
1070
1071 group = objects.Group.get_by_id(self.ctxt, self.group3.id)
1072 self.assertEqual(202, response.status_int)
1073 self.assertEqual(fields.GroupStatus.AVAILABLE, group.status)
1074 self.assertEqual(fields.ReplicationStatus.ENABLING,
1075 group.replication_status)
1076
1077 @ddt.data((True, False), (False, True), (False, False))
1078 @ddt.unpack
1079 @mock.patch('cinder.volume.utils.is_replicated_spec')
1080 @mock.patch('cinder.volume.utils.is_group_a_replication_group_type')
1081 def test_enable_replication_wrong_type(self, is_grp_rep_type,
1082 is_vol_rep_type,
1083 mock_rep_grp_type,
1084 mock_rep_vol_type):
1085 mock_rep_grp_type.return_value = is_grp_rep_type
1086 mock_rep_vol_type.return_value = is_vol_rep_type
1087 req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
1088 (fake.PROJECT_ID, self.group3.id),
1089 version=GROUP_REPLICATION_MICRO_VERSION)
1090 self.group3.status = fields.GroupStatus.AVAILABLE
1091 self.group3.save()
1092 body = {"enable_replication": {}}
1093 self.assertRaises(webob.exc.HTTPBadRequest,
1094 self.controller.enable_replication,
1095 req, self.group3.id, body)
1096
1097 @mock.patch('cinder.volume.utils.is_replicated_spec',
1098 return_value=False)
1099 @mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
1100 return_value=True)
1101 def test_enable_replication_wrong_group_type(self, mock_rep_grp_type,
1102 mock_rep_vol_type):
1103 req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
1104 (fake.PROJECT_ID, self.group3.id),
1105 version=GROUP_REPLICATION_MICRO_VERSION)
1106 self.group3.status = fields.GroupStatus.AVAILABLE
1107 self.group3.save()
1108 body = {"enable_replication": {}}
1109 self.assertRaises(webob.exc.HTTPBadRequest,
1110 self.controller.enable_replication,
1111 req, self.group3.id, body)
1112
1113 @mock.patch('cinder.volume.utils.is_replicated_spec',
1114 return_value=True)
1115 @mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
1116 return_value=True)
1117 @ddt.data((GROUP_REPLICATION_MICRO_VERSION, True,
1118 fields.GroupStatus.CREATING,
1119 webob.exc.HTTPBadRequest),
1120 (GROUP_REPLICATION_MICRO_VERSION, False,
1121 fields.GroupStatus.AVAILABLE,
1122 exception.GroupNotFound),
1123 (INVALID_GROUP_REPLICATION_MICRO_VERSION, True,
1124 fields.GroupStatus.AVAILABLE,
1125 exception.VersionNotFoundForAPIMethod),
1126 )
1127 @ddt.unpack
1128 def test_enable_replication_negative(self, version, not_fake,
1129 status, exceptions,
1130 mock_rep_grp_type, mock_rep_vol_type):
1131 if not_fake:
1132 group_id = self.group3.id
1133 else:
1134 group_id = fake.GROUP_ID
1135 req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
1136 (fake.PROJECT_ID, group_id),
1137 version=version)
1138 if not_fake:
1139 self.group3.status = status
1140 self.group3.save()
1141 body = {"enable_replication": {}}
1142 self.assertRaises(exceptions,
1143 self.controller.enable_replication,
1144 req, group_id, body)
1145
1146 @mock.patch('cinder.volume.utils.is_replicated_spec',
1147 return_value=True)
1148 @mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
1149 return_value=True)
1150 def test_disable_replication(self, mock_rep_grp_type, mock_rep_vol_type):
1151 req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
1152 (fake.PROJECT_ID, self.group3.id),
1153 version=GROUP_REPLICATION_MICRO_VERSION)
1154 self.group3.status = fields.GroupStatus.AVAILABLE
1155 self.group3.replication_status = fields.ReplicationStatus.ENABLED
1156 self.group3.save()
1157 body = {"disable_replication": {}}
1158 response = self.controller.disable_replication(req,
1159 self.group3.id, body)
1160
1161 group = objects.Group.get_by_id(self.ctxt, self.group3.id)
1162 self.assertEqual(202, response.status_int)
1163 self.assertEqual(fields.GroupStatus.AVAILABLE, group.status)
1164 self.assertEqual(fields.ReplicationStatus.DISABLING,
1165 group.replication_status)
1166
1167 @mock.patch('cinder.volume.utils.is_replicated_spec',
1168 return_value=True)
1169 @mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
1170 return_value=True)
1171 @ddt.data((GROUP_REPLICATION_MICRO_VERSION, True,
1172 fields.GroupStatus.CREATING,
1173 fields.ReplicationStatus.ENABLED,
1174 webob.exc.HTTPBadRequest),
1175 (GROUP_REPLICATION_MICRO_VERSION, True,
1176 fields.GroupStatus.AVAILABLE,
1177 fields.ReplicationStatus.DISABLED,
1178 webob.exc.HTTPBadRequest),
1179 (GROUP_REPLICATION_MICRO_VERSION, False,
1180 fields.GroupStatus.AVAILABLE,
1181 fields.ReplicationStatus.DISABLED,
1182 exception.GroupNotFound),
1183 (INVALID_GROUP_REPLICATION_MICRO_VERSION, True,
1184 fields.GroupStatus.AVAILABLE,
1185 fields.ReplicationStatus.ENABLED,
1186 exception.VersionNotFoundForAPIMethod),
1187 )
1188 @ddt.unpack
1189 def test_disable_replication_negative(self, version, not_fake,
1190 status, rep_status, exceptions,
1191 mock_rep_grp_type,
1192 mock_rep_vol_type):
1193 if not_fake:
1194 group_id = self.group3.id
1195 else:
1196 group_id = fake.GROUP_ID
1197 req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
1198 (fake.PROJECT_ID, group_id),
1199 version=version)
1200 if not_fake:
1201 self.group3.status = status
1202 self.group3.replication_status = rep_status
1203 self.group3.save()
1204 body = {"disable_replication": {}}
1205 self.assertRaises(exceptions,
1206 self.controller.disable_replication,
1207 req, group_id, body)
1208
1209 @mock.patch('cinder.volume.utils.is_replicated_spec',
1210 return_value=True)
1211 @mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
1212 return_value=True)
1213 def test_failover_replication(self, mock_rep_grp_type, mock_rep_vol_type):
1214 req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
1215 (fake.PROJECT_ID, self.group3.id),
1216 version=GROUP_REPLICATION_MICRO_VERSION)
1217 self.group3.status = fields.GroupStatus.AVAILABLE
1218 self.group3.replication_status = fields.ReplicationStatus.ENABLED
1219 self.group3.save()
1220 body = {"failover_replication": {}}
1221 response = self.controller.failover_replication(req,
1222 self.group3.id, body)
1223
1224 group = objects.Group.get_by_id(self.ctxt, self.group3.id)
1225 self.assertEqual(202, response.status_int)
1226 self.assertEqual(fields.GroupStatus.AVAILABLE, group.status)
1227 self.assertEqual(fields.ReplicationStatus.FAILING_OVER,
1228 group.replication_status)
1229
1230 @mock.patch('cinder.volume.utils.is_replicated_spec',
1231 return_value=True)
1232 @mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
1233 return_value=True)
1234 @ddt.data((GROUP_REPLICATION_MICRO_VERSION, True,
1235 fields.GroupStatus.CREATING,
1236 fields.ReplicationStatus.ENABLED,
1237 webob.exc.HTTPBadRequest),
1238 (GROUP_REPLICATION_MICRO_VERSION, True,
1239 fields.GroupStatus.AVAILABLE,
1240 fields.ReplicationStatus.DISABLED,
1241 webob.exc.HTTPBadRequest),
1242 (GROUP_REPLICATION_MICRO_VERSION, False,
1243 fields.GroupStatus.AVAILABLE,
1244 fields.ReplicationStatus.DISABLED,
1245 exception.GroupNotFound),
1246 (INVALID_GROUP_REPLICATION_MICRO_VERSION, True,
1247 fields.GroupStatus.AVAILABLE,
1248 fields.ReplicationStatus.ENABLED,
1249 exception.VersionNotFoundForAPIMethod),
1250 )
1251 @ddt.unpack
1252 def test_failover_replication_negative(self, version, not_fake,
1253 status, rep_status, exceptions,
1254 mock_rep_grp_type,
1255 mock_rep_vol_type):
1256 if not_fake:
1257 group_id = self.group3.id
1258 else:
1259 group_id = fake.GROUP_ID
1260 req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
1261 (fake.PROJECT_ID, group_id),
1262 version=version)
1263 if not_fake:
1264 self.group3.status = status
1265 self.group3.replication_status = rep_status
1266 self.group3.save()
1267 body = {"failover_replication": {}}
1268 self.assertRaises(exceptions,
1269 self.controller.failover_replication,
1270 req, group_id, body)
1271
1272 @mock.patch('cinder.volume.utils.is_replicated_spec',
1273 return_value=True)
1274 @mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
1275 return_value=True)
1276 @mock.patch('cinder.volume.rpcapi.VolumeAPI.list_replication_targets')
1277 def test_list_replication_targets(self, mock_list_rep_targets,
1278 mock_rep_grp_type, mock_rep_vol_type):
1279 req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
1280 (fake.PROJECT_ID, self.group3.id),
1281 version=GROUP_REPLICATION_MICRO_VERSION)
1282 targets = {
1283 'replication_targets': [
1284 {'backend_id': 'lvm_backend_1'}
1285 ]
1286 }
1287 mock_list_rep_targets.return_value = targets
1288 self.group3.status = fields.GroupStatus.AVAILABLE
1289 self.group3.save()
1290 body = {"list_replication_targets": {}}
1291 response = self.controller.list_replication_targets(
1292 req, self.group3.id, body)
1293
1294 self.assertIn('replication_targets', response)
1295 self.assertEqual('lvm_backend_1',
1296 response['replication_targets'][0]['backend_id'])
diff --git a/cinder/tests/unit/group/test_groups_manager_replication.py b/cinder/tests/unit/group/test_groups_manager_replication.py
new file mode 100644
index 0000000..cf39d8b
--- /dev/null
+++ b/cinder/tests/unit/group/test_groups_manager_replication.py
@@ -0,0 +1,133 @@
1# Copyright (C) 2017 Dell Inc. or its subsidiaries.
2# All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
15
16import ddt
17import mock
18from oslo_config import cfg
19from oslo_utils import importutils
20
21from cinder import context
22from cinder import exception
23from cinder import objects
24from cinder.objects import fields
25from cinder import quota
26from cinder import test
27from cinder.tests.unit import fake_constants as fake
28from cinder.tests.unit import utils as tests_utils
29from cinder.volume import api as volume_api
30from cinder.volume import configuration as conf
31from cinder.volume import driver
32from cinder.volume import utils as volutils
33
34GROUP_QUOTAS = quota.GROUP_QUOTAS
35CONF = cfg.CONF
36
37
38@ddt.ddt
39class GroupManagerTestCase(test.TestCase):
40
41 def setUp(self):
42 super(GroupManagerTestCase, self).setUp()
43 self.volume = importutils.import_object(CONF.volume_manager)
44 self.configuration = mock.Mock(conf.Configuration)
45 self.context = context.get_admin_context()
46 self.context.user_id = fake.USER_ID
47 self.project_id = fake.PROJECT3_ID
48 self.context.project_id = self.project_id
49 self.volume.driver.set_initialized()
50 self.volume.stats = {'allocated_capacity_gb': 0,
51 'pools': {}}
52 self.volume_api = volume_api.API()
53
54 @mock.patch.object(GROUP_QUOTAS, "reserve",
55 return_value=["RESERVATION"])
56 @mock.patch.object(GROUP_QUOTAS, "commit")
57 @mock.patch.object(GROUP_QUOTAS, "rollback")
58 @mock.patch.object(driver.VolumeDriver,
59 "delete_group",
60 return_value=({'status': (
61 fields.GroupStatus.DELETED)}, []))
62 @mock.patch.object(driver.VolumeDriver,
63 "enable_replication",
64 return_value=(None, []))
65 @mock.patch.object(driver.VolumeDriver,
66 "disable_replication",
67 return_value=(None, []))
68 @mock.patch.object(driver.VolumeDriver,
69 "failover_replication",
70 return_value=(None, []))
71 def test_replication_group(self, fake_failover_rep, fake_disable_rep,
72 fake_enable_rep, fake_delete_grp,
73 fake_rollback, fake_commit, fake_reserve):
74 """Test enable, disable, and failover replication for group."""
75
76 def fake_driver_create_grp(context, group):
77 """Make sure that the pool is part of the host."""
78 self.assertIn('host', group)
79 host = group.host
80 pool = volutils.extract_host(host, level='pool')
81 self.assertEqual('fakepool', pool)
82 return {'status': fields.GroupStatus.AVAILABLE,
83 'replication_status': fields.ReplicationStatus.DISABLING}
84
85 self.mock_object(self.volume.driver, 'create_group',
86 fake_driver_create_grp)
87
88 group = tests_utils.create_group(
89 self.context,
90 availability_zone=CONF.storage_availability_zone,
91 volume_type_ids=[fake.VOLUME_TYPE_ID],
92 host='fakehost@fakedrv#fakepool',
93 group_type_id=fake.GROUP_TYPE_ID)
94 group = objects.Group.get_by_id(self.context, group.id)
95 self.volume.create_group(self.context, group)
96 self.assertEqual(
97 group.id,
98 objects.Group.get_by_id(context.get_admin_context(),
99 group.id).id)
100
101 self.volume.disable_replication(self.context, group)
102 group = objects.Group.get_by_id(
103 context.get_admin_context(), group.id)
104 self.assertEqual(fields.ReplicationStatus.DISABLED,
105 group.replication_status)
106
107 group.replication_status = fields.ReplicationStatus.ENABLING
108 group.save()
109 self.volume.enable_replication(self.context, group)
110 group = objects.Group.get_by_id(
111 context.get_admin_context(), group.id)
112 self.assertEqual(fields.ReplicationStatus.ENABLED,
113 group.replication_status)
114
115 group.replication_status = fields.ReplicationStatus.FAILING_OVER
116 group.save()
117 self.volume.failover_replication(self.context, group)
118 group = objects.Group.get_by_id(
119 context.get_admin_context(), group.id)
120 self.assertEqual(fields.ReplicationStatus.FAILED_OVER,
121 group.replication_status)
122
123 targets = self.volume.list_replication_targets(self.context, group)
124 self.assertIn('replication_targets', targets)
125
126 self.volume.delete_group(self.context, group)
127 grp = objects.Group.get_by_id(
128 context.get_admin_context(read_deleted='yes'), group.id)
129 self.assertEqual(fields.GroupStatus.DELETED, grp.status)
130 self.assertRaises(exception.NotFound,
131 objects.Group.get_by_id,
132 self.context,
133 group.id)
diff --git a/cinder/tests/unit/objects/test_objects.py b/cinder/tests/unit/objects/test_objects.py
index a5e0b22..e44022b 100644
--- a/cinder/tests/unit/objects/test_objects.py
+++ b/cinder/tests/unit/objects/test_objects.py
@@ -28,7 +28,7 @@ object_data = {
28 'BackupImport': '1.4-c50f7a68bb4c400dd53dd219685b3992', 28 'BackupImport': '1.4-c50f7a68bb4c400dd53dd219685b3992',
29 'BackupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 29 'BackupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
30 'CleanupRequest': '1.0-e7c688b893e1d5537ccf65cc3eb10a28', 30 'CleanupRequest': '1.0-e7c688b893e1d5537ccf65cc3eb10a28',
31 'Cluster': '1.1-cdb1572b250837933d950cc6662313b8', 31 'Cluster': '1.1-e2c533eb8cdd8d229b6c45c6cf3a9e2c',
32 'ClusterList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 32 'ClusterList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
33 'CGSnapshot': '1.1-3212ac2b4c2811b7134fb9ba2c49ff74', 33 'CGSnapshot': '1.1-3212ac2b4c2811b7134fb9ba2c49ff74',
34 'CGSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 34 'CGSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
@@ -43,7 +43,7 @@ object_data = {
43 'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8', 43 'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8',
44 'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 44 'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
45 'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733', 45 'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733',
46 'Service': '1.4-c7d011989d1718ca0496ccf640b42712', 46 'Service': '1.4-a6727ccda6d4043f5e38e75c7c518c7f',
47 'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 47 'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
48 'Snapshot': '1.4-b7aa184837ccff570b8443bfd1773017', 48 'Snapshot': '1.4-b7aa184837ccff570b8443bfd1773017',
49 'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 49 'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
@@ -56,7 +56,7 @@ object_data = {
56 'VolumeTypeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 56 'VolumeTypeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
57 'GroupType': '1.0-d4a7b272199d0b0d6fc3ceed58539d30', 57 'GroupType': '1.0-d4a7b272199d0b0d6fc3ceed58539d30',
58 'GroupTypeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 58 'GroupTypeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
59 'Group': '1.1-bd853b1d1ee05949d9ce4b33f80ac1a0', 59 'Group': '1.2-2ade6acf2e55687b980048fc3f51dad9',
60 'GroupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 60 'GroupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
61 'GroupSnapshot': '1.0-9af3e994e889cbeae4427c3e351fa91d', 61 'GroupSnapshot': '1.0-9af3e994e889cbeae4427c3e351fa91d',
62 'GroupSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 62 'GroupSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
diff --git a/cinder/tests/unit/policy.json b/cinder/tests/unit/policy.json
index d944799..87b9fa0 100644
--- a/cinder/tests/unit/policy.json
+++ b/cinder/tests/unit/policy.json
@@ -136,6 +136,10 @@
136 "group:get_all_group_snapshots": "", 136 "group:get_all_group_snapshots": "",
137 "group:reset_group_snapshot_status":"", 137 "group:reset_group_snapshot_status":"",
138 "group:reset_status":"", 138 "group:reset_status":"",
139 "group:enable_replication": "",
140 "group:disable_replication": "",
141 "group:failover_replication": "",
142 "group:list_replication_targets": "",
139 143
140 "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api", 144 "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api",
141 145
diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py
index 2fe66bd..58ab9cc 100644
--- a/cinder/tests/unit/test_db_api.py
+++ b/cinder/tests/unit/test_db_api.py
@@ -3057,3 +3057,58 @@ class DBAPIBackendTestCase(BaseTest):
3057 cluster += '#poolname' 3057 cluster += '#poolname'
3058 self.assertEqual(frozen, 3058 self.assertEqual(frozen,
3059 db.is_backend_frozen(self.ctxt, host, cluster)) 3059 db.is_backend_frozen(self.ctxt, host, cluster))
3060
3061
3062class DBAPIGroupTestCase(BaseTest):
3063 def test_group_get_all_by_host(self):
3064 grp_type = db.group_type_create(self.ctxt, {'name': 'my_group_type'})
3065 groups = []
3066 backend = 'host1@lvm'
3067 for i in range(3):
3068 groups.append([db.group_create(
3069 self.ctxt,
3070 {'host': '%(b)s%(n)d' % {'b': backend, 'n': i},
3071 'group_type_id': grp_type['id']})
3072 for j in range(3)])
3073
3074 for i in range(3):
3075 host = '%(b)s%(n)d' % {'b': backend, 'n': i}
3076 filters = {'host': host, 'backend_match_level': 'backend'}
3077 grps = db.group_get_all(
3078 self.ctxt, filters=filters)
3079 self._assertEqualListsOfObjects(groups[i], grps)
3080 for grp in grps:
3081 db.group_destroy(self.ctxt, grp['id'])
3082
3083 db.group_type_destroy(self.ctxt, grp_type['id'])
3084
3085 def test_group_get_all_by_host_with_pools(self):
3086 grp_type = db.group_type_create(self.ctxt, {'name': 'my_group_type'})
3087 groups = []
3088 backend = 'host1@lvm'
3089 pool = '%s#pool1' % backend
3090 grp_on_host_wo_pool = [db.group_create(
3091 self.ctxt,
3092 {'host': backend,
3093 'group_type_id': grp_type['id']})
3094 for j in range(3)]
3095 grp_on_host_w_pool = [db.group_create(
3096 self.ctxt,
3097 {'host': pool,
3098 'group_type_id': grp_type['id']})]
3099 groups.append(grp_on_host_wo_pool + grp_on_host_w_pool)
3100 # insert an additional record that doesn't belongs to the same
3101 # host as 'foo' and test if it is included in the result
3102 grp_foobar = db.group_create(self.ctxt,
3103 {'host': '%sfoo' % backend,
3104 'group_type_id': grp_type['id']})
3105
3106 filters = {'host': backend, 'backend_match_level': 'backend'}
3107 grps = db.group_get_all(self.ctxt, filters=filters)
3108 self._assertEqualListsOfObjects(groups[0], grps)
3109 for grp in grps:
3110 db.group_destroy(self.ctxt, grp['id'])
3111
3112 db.group_destroy(self.ctxt, grp_foobar['id'])
3113
3114 db.group_type_destroy(self.ctxt, grp_type['id'])
diff --git a/cinder/tests/unit/test_migrations.py b/cinder/tests/unit/test_migrations.py
index 3f30fc1..6b5ff4a 100644
--- a/cinder/tests/unit/test_migrations.py
+++ b/cinder/tests/unit/test_migrations.py
@@ -1238,6 +1238,12 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
1238 self.assertEqual(data[volume.id], volume.replication_status, 1238 self.assertEqual(data[volume.id], volume.replication_status,
1239 'id %s' % volume.id) 1239 'id %s' % volume.id)
1240 1240
1241 def _check_102(self, engine, data):
1242 """Test adding replication_status to groups table."""
1243 groups = db_utils.get_table(engine, 'groups')
1244 self.assertIsInstance(groups.c.replication_status.type,
1245 self.VARCHAR_TYPE)
1246
1241 def test_walk_versions(self): 1247 def test_walk_versions(self):
1242 self.walk_versions(False, False) 1248 self.walk_versions(False, False)
1243 self.assert_each_foreign_key_is_part_of_an_index() 1249 self.assert_each_foreign_key_is_part_of_an_index()
diff --git a/cinder/tests/unit/volume/test_rpcapi.py b/cinder/tests/unit/volume/test_rpcapi.py
index 446255a..137f2f5 100644
--- a/cinder/tests/unit/volume/test_rpcapi.py
+++ b/cinder/tests/unit/volume/test_rpcapi.py
@@ -629,3 +629,29 @@ class VolumeRPCAPITestCase(test.RPCAPITestCase):
629 expected_kwargs_diff={ 629 expected_kwargs_diff={
630 'snapshot_id': self.fake_snapshot.id}, 630 'snapshot_id': self.fake_snapshot.id},
631 version='3.13') 631 version='3.13')
632
633 def test_enable_replication(self):
634 self._test_rpc_api('enable_replication', rpc_method='cast',
635 server=self.fake_group.host,
636 group=self.fake_group,
637 version='3.14')
638
639 def test_disable_replication(self):
640 self._test_rpc_api('disable_replication', rpc_method='cast',
641 server=self.fake_group.host,
642 group=self.fake_group,
643 version='3.14')
644
645 def test_failover_replication(self):
646 self._test_rpc_api('failover_replication', rpc_method='cast',
647 server=self.fake_group.host,
648 group=self.fake_group,
649 allow_attached_volume=False,
650 secondary_backend_id=None,
651 version='3.14')
652
653 def test_list_replication_targets(self):
654 self._test_rpc_api('list_replication_targets', rpc_method='call',
655 server=self.fake_group.host,
656 group=self.fake_group,
657 version='3.14')
diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py
index bbc1d06..ed5e352 100644
--- a/cinder/volume/driver.py
+++ b/cinder/volume/driver.py
@@ -1505,6 +1505,55 @@ class BaseVD(object):
1505 method = getattr(cls, method_name) 1505 method = getattr(cls, method_name)
1506 return method.__module__ == getattr(BaseVD, method_name).__module__ 1506 return method.__module__ == getattr(BaseVD, method_name).__module__
1507 1507
1508 # Replication Group (Tiramisu)
1509 def enable_replication(self, context, group, volumes):
1510 """Enables replication for a group and volumes in the group.
1511
1512 :param group: group object
1513 :param volumes: list of volume objects in the group
1514 :returns: model_update - dict of group updates
1515 :returns: volume_model_updates - list of dicts of volume updates
1516 """
1517 raise NotImplementedError()
1518
1519 # Replication Group (Tiramisu)
1520 def disable_replication(self, context, group, volumes):
1521 """Disables replication for a group and volumes in the group.
1522
1523 :param group: group object
1524 :param volumes: list of volume objects in the group
1525 :returns: model_update - dict of group updates
1526 :returns: volume_model_updates - list of dicts of volume updates
1527 """
1528 raise NotImplementedError()
1529
1530 # Replication Group (Tiramisu)
1531 def failover_replication(self, context, group, volumes,
1532 secondary_backend_id=None):
1533 """Fails over replication for a group and volumes in the group.
1534
1535 :param group: group object
1536 :param volumes: list of volume objects in the group
1537 :param secondary_backend_id: backend_id of the secondary site
1538 :returns: model_update - dict of group updates
1539 :returns: volume_model_updates - list of dicts of volume updates
1540 """
1541 raise NotImplementedError()
1542
1543 def get_replication_error_status(self, context, groups):
1544 """Returns error info for replicated groups and its volumes.
1545
1546 :returns: group_model_updates - list of dicts of group updates
1547 if error happens. For example, a dict of a group can be as follows:
1548 {'group_id': xxxx,
1549 'replication_status': fields.ReplicationStatus.ERROR}
1550 :returns: volume_model_updates - list of dicts of volume updates
1551 if error happens. For example, a dict of a volume can be as follows:
1552 {'volume_id': xxxx,
1553 'replication_status': fields.ReplicationStatus.ERROR}
1554 """
1555 return [], []
1556
1508 @classmethod 1557 @classmethod
1509 def supports_replication_feature(cls, feature): 1558 def supports_replication_feature(cls, feature):
1510 """Check if driver class supports replication features. 1559 """Check if driver class supports replication features.
diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py
index 40d8fbc..d650dc9 100644
--- a/cinder/volume/manager.py
+++ b/cinder/volume/manager.py
@@ -2172,6 +2172,49 @@ class VolumeManager(manager.CleanableManager,
2172 if self.extra_capabilities: 2172 if self.extra_capabilities:
2173 volume_stats.update(self.extra_capabilities) 2173 volume_stats.update(self.extra_capabilities)
2174 if volume_stats: 2174 if volume_stats:
2175
2176 # NOTE(xyang): If driver reports replication_status to be
2177 # 'error' in volume_stats, get model updates from driver
2178 # and update db
2179 if volume_stats.get('replication_status') == (
2180 fields.ReplicationStatus.ERROR):
2181 backend = vol_utils.extract_host(self.host, 'backend')
2182 groups = vol_utils.get_replication_groups_by_host(
2183 context, backend)
2184 group_model_updates, volume_model_updates = (
2185 self.driver.get_replication_error_status(context,
2186 groups))
2187 for grp_update in group_model_updates:
2188 try:
2189 grp_obj = objects.Group.get_by_id(
2190 context, grp_update['group_id'])
2191 grp_obj.update(grp_update)
2192 grp_obj.save()
2193 except exception.GroupNotFound:
2194 # Group may be deleted already. Log a warning
2195 # and continue.
2196 LOG.warning("Group %(grp)s not found while "
2197 "updating driver status.",
2198 {'grp': grp_update['group_id']},
2199 resource={
2200 'type': 'group',
2201 'id': grp_update['group_id']})
2202 for vol_update in volume_model_updates:
2203 try:
2204 vol_obj = objects.Volume.get_by_id(
2205 context, vol_update['volume_id'])
2206 vol_obj.update(vol_update)
2207 vol_obj.save()
2208 except exception.VolumeNotFound:
2209 # Volume may be deleted already. Log a warning
2210 # and continue.
2211 LOG.warning("Volume %(vol)s not found while "
2212 "updating driver status.",
2213 {'vol': vol_update['volume_id']},
2214 resource={
2215 'type': 'volume',
2216 'id': vol_update['volume_id']})
2217
2175 # Append volume stats with 'allocated_capacity_gb' 2218 # Append volume stats with 'allocated_capacity_gb'
2176 self._append_volume_stats(volume_stats) 2219 self._append_volume_stats(volume_stats)
2177 2220
@@ -4182,3 +4225,332 @@ class VolumeManager(manager.CleanableManager,
4182 'attached_mode') 4225 'attached_mode')
4183 self._notify_about_volume_usage(context, vref, "detach.end") 4226 self._notify_about_volume_usage(context, vref, "detach.end")
4184 return has_shared_connection 4227 return has_shared_connection
4228
4229 # Replication group API (Tiramisu)
4230 def enable_replication(self, ctxt, group):
4231 """Enable replication."""
4232 group.refresh()
4233 if group.replication_status != fields.ReplicationStatus.ENABLING:
4234 msg = _("Replication status in group %s is not "
4235 "enabling. Cannot enable replication.") % group.id
4236 LOG.error(msg)
4237 raise exception.InvalidGroup(reason=msg)
4238
4239 volumes = group.volumes
4240 for vol in volumes:
4241 vol.refresh()
4242 if vol.replication_status != fields.ReplicationStatus.ENABLING:
4243 msg = _("Replication status in volume %s is not "
4244 "enabling. Cannot enable replication.") % vol.id
4245 LOG.error(msg)
4246 raise exception.InvalidVolume(reason=msg)
4247
4248 self._notify_about_group_usage(
4249 ctxt, group, "enable_replication.start")
4250
4251 volumes_model_update = None
4252 model_update = None
4253 try:
4254 utils.require_driver_initialized(self.driver)
4255
4256 model_update, volumes_model_update = (
4257 self.driver.enable_replication(ctxt, group, volumes))
4258
4259 if volumes_model_update:
4260 for update in volumes_model_update:
4261 vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
4262 vol_obj.update(update)
4263 vol_obj.save()
4264 # If we failed to enable a volume, make sure the status
4265 # for the group is set to error as well
4266 if (update.get('replication_status') ==
4267 fields.ReplicationStatus.ERROR and
4268 model_update.get('replication_status') !=
4269 fields.ReplicationStatus.ERROR):
4270 model_update['replication_status'] = update.get(
4271 'replication_status')
4272
4273 if model_update:
4274 if (model_update.get('replication_status') ==
4275 fields.ReplicationStatus.ERROR):
4276 msg = _('Enable replication failed.')
4277 LOG.error(msg,
4278 resource={'type': 'group',
4279 'id': group.id})
4280 raise exception.VolumeDriverException(message=msg)
4281 else:
4282 group.update(model_update)
4283 group.save()
4284
4285 except exception.CinderException as ex:
4286 group.status = fields.GroupStatus.ERROR
4287 group.replication_status = fields.ReplicationStatus.ERROR
4288 group.save()
4289 # Update volume status to 'error' if driver returns
4290 # None for volumes_model_update.
4291 if not volumes_model_update:
4292 for vol in volumes:
4293 vol.status = 'error'
4294 vol.replication_status = fields.ReplicationStatus.ERROR
4295 vol.save()
4296 err_msg = _("Enable replication group failed: "
4297 "%s.") % six.text_type(ex)
4298 raise exception.ReplicationGroupError(reason=err_msg,
4299 group_id=group.id)
4300
4301 for vol in volumes:
4302 vol.replication_status = fields.ReplicationStatus.ENABLED
4303 vol.save()
4304 group.replication_status = fields.ReplicationStatus.ENABLED
4305 group.save()
4306
4307 self._notify_about_group_usage(
4308 ctxt, group, "enable_replication.end", volumes)
4309 LOG.info("Enable replication completed successfully.",
4310 resource={'type': 'group',
4311 'id': group.id})
4312
4313 # Replication group API (Tiramisu)
4314 def disable_replication(self, ctxt, group):
4315 """Disable replication."""
4316 group.refresh()
4317 if group.replication_status != fields.ReplicationStatus.DISABLING:
4318 msg = _("Replication status in group %s is not "
4319 "disabling. Cannot disable replication.") % group.id
4320 LOG.error(msg)
4321 raise exception.InvalidGroup(reason=msg)
4322
4323 volumes = group.volumes
4324 for vol in volumes:
4325 vol.refresh()
4326 if (vol.replication_status !=
4327 fields.ReplicationStatus.DISABLING):
4328 msg = _("Replication status in volume %s is not "
4329 "disabling. Cannot disable replication.") % vol.id
4330 LOG.error(msg)
4331 raise exception.InvalidVolume(reason=msg)
4332
4333 self._notify_about_group_usage(
4334 ctxt, group, "disable_replication.start")
4335
4336 volumes_model_update = None
4337 model_update = None
4338 try:
4339 utils.require_driver_initialized(self.driver)
4340
4341 model_update, volumes_model_update = (
4342 self.driver.disable_replication(ctxt, group, volumes))
4343
4344 if volumes_model_update:
4345 for update in volumes_model_update:
4346 vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
4347 vol_obj.update(update)
4348 vol_obj.save()
4349 # If we failed to enable a volume, make sure the status
4350 # for the group is set to error as well
4351 if (update.get('replication_status') ==
4352 fields.ReplicationStatus.ERROR and
4353 model_update.get('replication_status') !=
4354 fields.ReplicationStatus.ERROR):
4355 model_update['replication_status'] = update.get(
4356 'replication_status')
4357
4358 if model_update:
4359 if (model_update.get('replication_status') ==
4360 fields.ReplicationStatus.ERROR):
4361 msg = _('Disable replication failed.')
4362 LOG.error(msg,
4363 resource={'type': 'group',
4364 'id': group.id})
4365 raise exception.VolumeDriverException(message=msg)
4366 else:
4367 group.update(model_update)
4368 group.save()
4369
4370 except exception.CinderException as ex:
4371 group.status = fields.GroupStatus.ERROR
4372 group.replication_status = fields.ReplicationStatus.ERROR
4373 group.save()
4374 # Update volume status to 'error' if driver returns
4375 # None for volumes_model_update.
4376 if not volumes_model_update:
4377 for vol in volumes:
4378 vol.status = 'error'
4379 vol.replication_status = fields.ReplicationStatus.ERROR
4380 vol.save()
4381 err_msg = _("Disable replication group failed: "
4382 "%s.") % six.text_type(ex)
4383 raise exception.ReplicationGroupError(reason=err_msg,
4384 group_id=group.id)
4385
4386 for vol in volumes:
4387 vol.replication_status = fields.ReplicationStatus.DISABLED
4388 vol.save()
4389 group.replication_status = fields.ReplicationStatus.DISABLED
4390 group.save()
4391
4392 self._notify_about_group_usage(
4393 ctxt, group, "disable_replication.end", volumes)
4394 LOG.info("Disable replication completed successfully.",
4395 resource={'type': 'group',
4396 'id': group.id})
4397
4398 # Replication group API (Tiramisu)
4399 def failover_replication(self, ctxt, group, allow_attached_volume=False,
4400 secondary_backend_id=None):
4401 """Failover replication."""
4402 group.refresh()
4403 if group.replication_status != fields.ReplicationStatus.FAILING_OVER:
4404 msg = _("Replication status in group %s is not "
4405 "failing-over. Cannot failover replication.") % group.id
4406 LOG.error(msg)
4407 raise exception.InvalidGroup(reason=msg)
4408
4409 volumes = group.volumes
4410 for vol in volumes:
4411 vol.refresh()
4412 if vol.status == 'in-use' and not allow_attached_volume:
4413 msg = _("Volume %s is attached but allow_attached_volume flag "
4414 "is False. Cannot failover replication.") % vol.id
4415 LOG.error(msg)
4416 raise exception.InvalidVolume(reason=msg)
4417 if (vol.replication_status !=
4418 fields.ReplicationStatus.FAILING_OVER):
4419 msg = _("Replication status in volume %s is not "
4420 "failing-over. Cannot failover replication.") % vol.id
4421 LOG.error(msg)
4422 raise exception.InvalidVolume(reason=msg)
4423
4424 self._notify_about_group_usage(
4425 ctxt, group, "failover_replication.start")
4426
4427 volumes_model_update = None
4428 model_update = None
4429 try:
4430 utils.require_driver_initialized(self.driver)
4431
4432 model_update, volumes_model_update = (
4433 self.driver.failover_replication(
4434 ctxt, group, volumes, secondary_backend_id))
4435
4436 if volumes_model_update:
4437 for update in volumes_model_update:
4438 vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
4439 vol_obj.update(update)
4440 vol_obj.save()
4441 # If we failed to enable a volume, make sure the status
4442 # for the group is set to error as well
4443 if (update.get('replication_status') ==
4444 fields.ReplicationStatus.ERROR and
4445 model_update.get('replication_status') !=
4446 fields.ReplicationStatus.ERROR):
4447 model_update['replication_status'] = update.get(
4448 'replication_status')
4449
4450 if model_update:
4451 if (model_update.get('replication_status') ==
4452 fields.ReplicationStatus.ERROR):
4453 msg = _('Failover replication failed.')
4454 LOG.error(msg,
4455 resource={'type': 'group',
4456 'id': group.id})
4457 raise exception.VolumeDriverException(message=msg)
4458 else:
4459 group.update(model_update)
4460 group.save()
4461
4462 except exception.CinderException as ex:
4463 group.status = fields.GroupStatus.ERROR
4464 group.replication_status = fields.ReplicationStatus.ERROR
4465 group.save()
4466 # Update volume status to 'error' if driver returns
4467 # None for volumes_model_update.
4468 if not volumes_model_update:
4469 for vol in volumes:
4470 vol.status = 'error'
4471 vol.replication_status = fields.ReplicationStatus.ERROR
4472 vol.save()
4473 err_msg = _("Failover replication group failed: "
4474 "%s.") % six.text_type(ex)
4475 raise exception.ReplicationGroupError(reason=err_msg,
4476 group_id=group.id)
4477
4478 for vol in volumes:
4479 if secondary_backend_id == "default":
4480 vol.replication_status = fields.ReplicationStatus.ENABLED
4481 else:
4482 vol.replication_status = (
4483 fields.ReplicationStatus.FAILED_OVER)
4484 vol.save()
4485 if secondary_backend_id == "default":
4486 group.replication_status = fields.ReplicationStatus.ENABLED
4487 else:
4488 group.replication_status = fields.ReplicationStatus.FAILED_OVER
4489 group.save()
4490
4491 self._notify_about_group_usage(
4492 ctxt, group, "failover_replication.end", volumes)
4493 LOG.info("Failover replication completed successfully.",
4494 resource={'type': 'group',
4495 'id': group.id})
4496
4497 def list_replication_targets(self, ctxt, group):
4498 """Provide a means to obtain replication targets for a group.
4499
4500 This method is used to find the replication_device config
4501 info. 'backend_id' is a required key in 'replication_device'.
4502
4503 Response Example for admin:
4504 {
4505 'replication_targets': [
4506 {
4507 'backend_id': 'vendor-id-1',
4508 'unique_key': 'val1',
4509 ......
4510 },
4511 {
4512 'backend_id': 'vendor-id-2',
4513 'unique_key': 'val2',
4514 ......
4515 }
4516 ]
4517 }
4518
4519 Response example for non-admin:
4520 {
4521 'replication_targets': [
4522 {
4523 'backend_id': 'vendor-id-1'
4524 },
4525 {
4526 'backend_id': 'vendor-id-2'
4527 }
4528 ]
4529 }
4530
4531 """
4532
4533 replication_targets = []
4534 try:
4535 group = objects.Group.get_by_id(ctxt, group.id)
4536 if self.configuration.replication_device:
4537 if ctxt.is_admin:
4538 for rep_dev in self.configuration.replication_device:
4539 keys = rep_dev.keys()
4540 dev = {}
4541 for k in keys:
4542 dev[k] = rep_dev[k]
4543 replication_targets.append(dev)
4544 else:
4545 for rep_dev in self.configuration.replication_device:
4546 dev = rep_dev.get('backend_id')
4547 if dev:
4548 replication_targets.append({'backend_id': dev})
4549
4550 except exception.GroupNotFound:
4551 err_msg = (_("Get replication targets failed. Group %s not "
4552 "found.") % group.id)
4553 LOG.exception(err_msg)
4554 raise exception.VolumeBackendAPIException(data=err_msg)
4555
4556 return {'replication_targets': replication_targets}
diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py
index 081f1b3..cbeb08c 100644
--- a/cinder/volume/rpcapi.py
+++ b/cinder/volume/rpcapi.py
@@ -130,9 +130,11 @@ class VolumeAPI(rpc.RPCAPI):
130 3.12 - Adds set_log_levels and get_log_levels 130 3.12 - Adds set_log_levels and get_log_levels
131 3.13 - Add initialize_connection_snapshot, 131 3.13 - Add initialize_connection_snapshot,
132 terminate_connection_snapshot, and remove_export_snapshot. 132 terminate_connection_snapshot, and remove_export_snapshot.
133 3.14 - Adds enable_replication, disable_replication,
134 failover_replication, and list_replication_targets.
133 """ 135 """
134 136
135 RPC_API_VERSION = '3.13' 137 RPC_API_VERSION = '3.14'
136 RPC_DEFAULT_VERSION = '3.0' 138 RPC_DEFAULT_VERSION = '3.0'
137 TOPIC = constants.VOLUME_TOPIC 139 TOPIC = constants.VOLUME_TOPIC
138 BINARY = 'cinder-volume' 140 BINARY = 'cinder-volume'
@@ -459,3 +461,29 @@ class VolumeAPI(rpc.RPCAPI):
459 def get_log_levels(self, context, service, log_request): 461 def get_log_levels(self, context, service, log_request):
460 cctxt = self._get_cctxt(host=service.host, version='3.12') 462 cctxt = self._get_cctxt(host=service.host, version='3.12')
461 return cctxt.call(context, 'get_log_levels', log_request=log_request) 463 return cctxt.call(context, 'get_log_levels', log_request=log_request)
464
465 @rpc.assert_min_rpc_version('3.14')
466 def enable_replication(self, ctxt, group):
467 cctxt = self._get_cctxt(group.host, version='3.14')
468 cctxt.cast(ctxt, 'enable_replication',
469 group=group)
470
471 @rpc.assert_min_rpc_version('3.14')
472 def disable_replication(self, ctxt, group):
473 cctxt = self._get_cctxt(group.host, version='3.14')
474 cctxt.cast(ctxt, 'disable_replication',
475 group=group)
476
477 @rpc.assert_min_rpc_version('3.14')
478 def failover_replication(self, ctxt, group, allow_attached_volume=False,
479 secondary_backend_id=None):
480 cctxt = self._get_cctxt(group.host, version='3.14')
481 cctxt.cast(ctxt, 'failover_replication',
482 group=group, allow_attached_volume=allow_attached_volume,
483 secondary_backend_id=secondary_backend_id)
484
485 @rpc.assert_min_rpc_version('3.14')
486 def list_replication_targets(self, ctxt, group):
487 cctxt = self._get_cctxt(group.host, version='3.14')
488 return cctxt.call(ctxt, 'list_replication_targets',
489 group=group)
diff --git a/cinder/volume/utils.py b/cinder/volume/utils.py
index 97f6dea..96f1a43 100644
--- a/cinder/volume/utils.py
+++ b/cinder/volume/utils.py
@@ -926,3 +926,38 @@ def is_group_a_cg_snapshot_type(group_or_snap):
926 ) 926 )
927 return spec == "<is> True" 927 return spec == "<is> True"
928 return False 928 return False
929
930
931def is_group_a_type(group, key):
932 if group.group_type_id is not None:
933 spec = group_types.get_group_type_specs(
934 group.group_type_id, key=key
935 )
936 return spec == "<is> True"
937 return False
938
939
940def is_group_a_non_consistent_replication_group_type(group):
941 return is_group_a_type(group, "group_replication_enabled")
942
943
944def is_group_a_consistent_replication_group_type(group):
945 return is_group_a_type(group, "consistent_group_replication_enabled")
946
947
948def is_group_a_replication_group_type(group):
949 if (is_group_a_non_consistent_replication_group_type(group) or
950 is_group_a_consistent_replication_group_type(group)):
951 return True
952 return False
953
954
955def get_replication_groups_by_host(ctxt, host):
956 groups = []
957 filters = {'host': host, 'backend_match_level': 'backend'}
958 grps = objects.GroupList.get_all(ctxt, filters=filters)
959 for grp in grps:
960 if is_group_a_replication_group_type(grp):
961 groups.append(grp)
962
963 return groups
diff --git a/etc/cinder/policy.json b/etc/cinder/policy.json
index a32ad1c..9a92312 100644
--- a/etc/cinder/policy.json
+++ b/etc/cinder/policy.json
@@ -132,6 +132,11 @@
132 "group:reset_group_snapshot_status":"rule:admin_api", 132 "group:reset_group_snapshot_status":"rule:admin_api",
133 "group:reset_status":"rule:admin_api", 133 "group:reset_status":"rule:admin_api",
134 134
135 "group:enable_replication": "rule:admin_or_owner",
136 "group:disable_replication": "rule:admin_or_owner",
137 "group:failover_replication": "rule:admin_or_owner",
138 "group:list_replication_targets": "rule:admin_or_owner",
139
135 "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api", 140 "scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api",
136 "message:delete": "rule:admin_or_owner", 141 "message:delete": "rule:admin_or_owner",
137 "message:get": "rule:admin_or_owner", 142 "message:get": "rule:admin_or_owner",
diff --git a/releasenotes/notes/replication-group-7c6c8a153460ca58.yaml b/releasenotes/notes/replication-group-7c6c8a153460ca58.yaml
new file mode 100644
index 0000000..e73a872
--- /dev/null
+++ b/releasenotes/notes/replication-group-7c6c8a153460ca58.yaml
@@ -0,0 +1,6 @@
1---
2features:
3 - |
4 Introduced replication group support and added group action APIs
5 enable_replication, disable_replication, failover_replication and
6 list_replication_targets.