Add VNX replication group support

This patch adds consistent replication group capabilities for VNX
driver.

Supported operations:
* Create volume and add to replication group
* Add volume to existing replication group
* Enable replication on group
* Disable replication on group
* Fail over replication group back and forth

DocImpact
Implements: blueprint replication-cg-vnx

Change-Id: Iead8d2fd5581a70afb481239199fcbb1246aa27e
This commit is contained in:
Peter Wang 2017-07-23 00:44:45 -04:00
parent 212b045a02
commit c52323babd
18 changed files with 1161 additions and 176 deletions

View File

@ -182,3 +182,23 @@ class VNXMirrorPromotePrimaryError(VNXMirrorException):
class VNXMirrorNotFoundError(VNXMirrorException):
message = 'Mirror not found'
class VNXMirrorGroupNameInUseError(VNXMirrorException):
message = 'Mirror Group name already in use'
class VNXMirrorGroupNotFoundError(VNXMirrorException):
message = 'Unable to locate the specified group'
class VNXMirrorGroupAlreadyMemberError(VNXMirrorException):
message = 'The mirror is already a member of a group'
class VNXMirrorGroupMirrorNotMemberError(VNXMirrorException):
message = 'The specified mirror is not a member of the group'
class VNXMirrorGroupAlreadyPromotedError(VNXMirrorException):
message = 'The Consistency Group has no secondary images to promote'

View File

@ -24,10 +24,14 @@ volume: &volume_base
display_description: 'test volume'
volume_type_id:
consistencygroup_id:
group_id:
volume_attachment:
_properties: {}
volume_metadata:
_properties: {}
group:
_type: 'group'
_properties: {}
host: &host_base
_properties:
@ -69,6 +73,16 @@ cg_snapshot: &cg_snapshot_base
_uuid: cgsnapshot_id
status: 'creating'
group: &group_base
_type: 'group'
_properties: &group_base_properties
id:
_uuid: group_id
name: 'test_group'
status: 'creating'
replication_status: 'enabled'
###########################################################
# TestCommonAdapter, TestISCSIAdapter, TestFCAdapter
@ -358,7 +372,7 @@ test_terminate_connection_snapshot:
snapshot: *snapshot_base
test_setup_lun_replication:
vol1:
vol1: &volume_for_replication
_type: 'volume'
_properties:
<<: *volume_base_properties
@ -367,6 +381,15 @@ test_setup_lun_replication:
volume_type_id:
_uuid: volume_type_id
test_setup_lun_replication_in_group:
group1:
_type: 'group'
_properties:
<<: *group_base_properties
group_type_id:
_uuid: group_type_id
vol1: *volume_for_replication
test_cleanup_replication:
vol1:
_type: 'volume'
@ -406,6 +429,39 @@ test_failover_host_failback:
_uuid: volume5_id
volume_type_id:
_uuid: volume_type_id
replication_status: enabled
test_failover_host_groups:
group1:
_type: 'group'
_properties:
<<: *group_base_properties
id:
_uuid: group_id
group_type_id:
_uuid: group_type_id
replication_status: failed-over
volumes: [*volume_base, *volume_base]
vol1:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: volume4_id
volume_type_id:
_uuid: volume_type_id
replication_status: failed-over
vol2:
_type: 'volume'
_properties:
<<: *volume_base_properties
id:
_uuid: volume4_id
volume_type_id:
_uuid: volume_type_id
replication_status: failed-over
test_get_pool_name:
volume: *volume_base
@ -445,6 +501,25 @@ test_delete_group_snapshot:
test_delete_cgsnapshot:
###########################################################
# TestReplicationAdaper
###########################################################
test_enable_replication:
volume1: *volume_base
volume2: *volume_base
group: *group_base
test_disable_replication:
volume1: *volume_base
volume2: *volume_base
group: *group_base
test_failover_replication:
volume1: *volume_base
volume2: *volume_base
group: *group_base
###########################################################
# TestUtils
###########################################################
@ -485,6 +560,31 @@ test_get_backend_qos_specs:
volume_type_id:
_uuid: volume_type_id
test_check_type_matched_invalid:
volume:
_type: 'volume'
_properties:
<<: *volume_base_properties
volume_type_id:
_uuid: volume_type_id
group:
_type: 'group'
_properties:
id:
_uuid: group_id
group_type_id:
_uuid: group_type_id
test_check_rep_status_matched_disabled:
group:
_type: 'group'
_properties:
id:
_uuid: group_id
group_type_id:
_uuid: group_type_id
replication_status: 'disabled'
###########################################################
# TestClient
###########################################################

View File

@ -150,6 +150,13 @@ mirror_base: &mirror_base
_type: VNXMirrorImageState
value: 'SYNCHRONIZED'
mirror_group_base: &mirror_group_base
_properties: &mirror_group_base_prop
condition: 'Active'
existed: true
name: 'base_group'
role: 'Primary'
state: 'Synchronized'
###########################################################
# TestClient
@ -998,6 +1005,113 @@ test_promote_image:
_methods:
get_mirror_view: *mirror_promote_image
# Mirror group tests start
test_create_mirror_group:
vnx:
_methods:
create_mirror_group: *mirror_group_base
test_create_mirror_group_name_in_use:
vnx:
_methods:
create_mirror_group:
_raise:
VNXMirrorGroupNameInUseError: Mirror Group name already in use
get_mirror_group: *mirror_group_base
test_delete_mirror_group:
group: &group_to_delete
_methods:
delete:
vnx:
_methods:
get_mirror_group: *group_to_delete
test_delete_mirror_group_not_found:
group: &group_to_delete_not_found
_methods:
delete:
_raise:
VNXMirrorGroupNotFoundError: Unable to locate
vnx:
_methods:
get_mirror_group: *group_to_delete_not_found
test_add_mirror:
group: &group_to_add
_methods:
add_mirror:
vnx:
_methods:
get_mirror_group: *group_to_add
get_mirror_view: *mirror_base
test_add_mirror_already_added:
group: &group_to_add_added
_methods:
add_mirror:
_raise:
VNXMirrorGroupAlreadyMemberError: already a member of a group
vnx:
_methods:
get_mirror_group: *group_to_add_added
get_mirror_view: *mirror_base
test_remove_mirror:
group: &group_to_remove
_methods:
remove_mirror:
vnx:
_methods:
get_mirror_group: *group_to_remove
get_mirror_view: *mirror_base
test_remove_mirror_not_member:
group: &group_to_remove_not_member
_methods:
remove_mirror:
_raise:
VNXMirrorGroupMirrorNotMemberError: not a member of the group
vnx:
_methods:
get_mirror_group: *group_to_remove_not_member
get_mirror_view: *mirror_base
test_promote_mirror_group:
group: &group_to_promote
_methods:
promote_group:
vnx:
_methods:
get_mirror_group: *group_to_promote
test_promote_mirror_group_already_promoted:
group: &group_to_promote_already_promoted
_methods:
promote_group:
_raise:
VNXMirrorGroupAlreadyPromotedError: no secondary images to promote
vnx:
_methods:
get_mirror_group: *group_to_promote_already_promoted
test_sync_mirror_group:
group: &group_to_sync
_methods:
sync_group:
vnx:
_methods:
get_mirror_group: *group_to_sync
test_fracture_mirror_group:
group: &group_to_fracture
_methods:
fracture_group:
vnx:
_methods:
get_mirror_group: *group_to_fracture
test_get_lun_id:
test_get_lun_id_without_provider_location:
@ -1774,6 +1888,22 @@ test_setup_lun_replication:
lun_id: 222
wwn: fake_wwn
test_setup_lun_replication_in_group:
group: &group_for_enable
_methods:
add_mirror:
vnx:
_properties:
serial: 'vnx-serial'
_methods:
get_mirror_view: *mirror_base
get_mirror_group: *group_for_enable
lun:
_properties:
lun_id: 222
wwn: fake_wwn
test_cleanup_replication:
vnx:
_properties:
@ -1826,6 +1956,11 @@ test_failover_host_failback:
_properties:
lun_id: 22
test_failover_host_groups:
lun1:
_properties:
lun_id: 22
test_get_pool_name:
lun: &lun_test_get_pool_name
_properties:
@ -2017,6 +2152,32 @@ test_get_tgt_list_and_initiator_tgt_map_allow_port_only:
get_fc_port: *all_fc_ports
##########################################################
# TestReplicationAdapter
##########################################################
test_enable_replication:
vnx:
_methods:
create_mirror_group: *mirror_group_base
get_mirror_view: *mirror_base
get_mirror_group: *group_for_enable
test_disable_replication:
group: &group_for_disable
_methods:
remove_mirror:
delete:
vnx:
_methods:
get_mirror_view: *mirror_base
get_mirror_group: *group_for_disable
test_failover_replication:
lun1: *lun_base
##########################################################
# TestTaskflow
##########################################################

View File

@ -102,6 +102,7 @@ def _fake_volume_wrapper(*args, **kwargs):
'volume_metadata': 'metadata'}
if 'group' in kwargs:
expected_attrs_key['group'] = kwargs['group']
return fake_volume.fake_volume_obj(
context.get_admin_context(),
expected_attrs=[
@ -337,6 +338,7 @@ cinder_res = CinderResourceMock('mocked_cinder.yaml')
DRIVER_RES_MAPPING = {
'TestResMock': cinder_res,
'TestCommonAdapter': cinder_res,
'TestReplicationAdapter': cinder_res,
'TestISCSIAdapter': cinder_res,
'TestFCAdapter': cinder_res,
'TestUtils': cinder_res,
@ -359,6 +361,7 @@ STORAGE_RES_MAPPING = {
'TestCondition': vnx_res,
'TestClient': vnx_res,
'TestCommonAdapter': vnx_res,
'TestReplicationAdapter': vnx_res,
'TestISCSIAdapter': vnx_res,
'TestFCAdapter': vnx_res,
'TestTaskflow': vnx_res,

View File

@ -55,7 +55,6 @@ class TestCommonAdapter(test.TestCase):
@res_mock.patch_common_adapter
def test_create_volume(self, vnx_common, _ignore, mocked_input):
volume = mocked_input['volume']
volume.host.split('#')[1]
with mock.patch.object(vnx_utils, 'get_backend_qos_specs',
return_value=None):
model_update = vnx_common.create_volume(volume)
@ -1112,6 +1111,31 @@ class TestCommonAdapter(test.TestCase):
self.assertEqual(fields.ReplicationStatus.ENABLED,
rep_update['replication_status'])
@utils.patch_extra_specs({'replication_enabled': '<is> True'})
@utils.patch_group_specs({'consistent_group_replication_enabled':
'<is> True'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_setup_lun_replication_in_group(
self, common_adapter, mocked_res, mocked_input):
vol1 = mocked_input['vol1']
group1 = mocked_input['group1']
vol1.group = group1
fake_mirror = utils.build_fake_mirror_view()
fake_mirror.secondary_client.create_lun.return_value = (
mocked_res['lun'])
common_adapter.mirror_view = fake_mirror
common_adapter.config.replication_device = (
[utils.get_replication_device()])
rep_update = common_adapter.setup_lun_replication(
vol1, 111)
fake_mirror.create_mirror.assert_called_once_with(
'mirror_' + vol1.id, 111)
fake_mirror.add_image.assert_called_once_with(
'mirror_' + vol1.id, mocked_res['lun'].lun_id)
self.assertEqual(fields.ReplicationStatus.ENABLED,
rep_update['replication_status'])
@utils.patch_extra_specs({'replication_enabled': '<is> True'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
@ -1193,6 +1217,8 @@ class TestCommonAdapter(test.TestCase):
name=vol1.name)
self.assertEqual(fake_mirror.secondary_client,
common_adapter.client)
self.assertEqual(device['backend_id'],
common_adapter.active_backend_id)
self.assertEqual(device['backend_id'], backend_id)
for update in updates:
self.assertEqual(fields.ReplicationStatus.FAILED_OVER,
@ -1205,9 +1231,9 @@ class TestCommonAdapter(test.TestCase):
common_adapter.config.replication_device = [
utils.get_replication_device()]
vol1 = mocked_input['vol1']
self.assertRaises(exception.InvalidInput,
self.assertRaises(exception.InvalidReplicationTarget,
common_adapter.failover_host,
None, [vol1], 'new_id')
None, [vol1], 'new_id', [])
@utils.patch_extra_specs({'replication_enabled': '<is> True'})
@res_mock.mock_driver_input
@ -1216,6 +1242,7 @@ class TestCommonAdapter(test.TestCase):
mocked_input):
device = utils.get_replication_device()
common_adapter.config.replication_device = [device]
common_adapter.active_backend_id = device['backend_id']
vol1 = mocked_input['vol1']
lun1 = mocked_res['lun1']
with mock.patch.object(common_adapter, 'build_mirror_view') as fake:
@ -1238,6 +1265,49 @@ class TestCommonAdapter(test.TestCase):
self.assertEqual(fields.ReplicationStatus.ENABLED,
update['updates']['replication_status'])
@utils.patch_group_specs({'consistent_group_replication_enabled':
'<is> True'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_failover_host_groups(self, common_adapter, mocked_res,
mocked_input):
device = utils.get_replication_device()
common_adapter.config.replication_device = [device]
common_adapter.active_backend_id = device['backend_id']
mocked_group = mocked_input['group1']
group1 = mock.Mock()
group1.id = mocked_group.id
group1.replication_status = mocked_group.replication_status
group1.volumes = [mocked_input['vol1'], mocked_input['vol2']]
lun1 = mocked_res['lun1']
with mock.patch.object(common_adapter, 'build_mirror_view') as fake:
fake_mirror = utils.build_fake_mirror_view()
fake_mirror.secondary_client.get_lun.return_value = lun1
fake_mirror.secondary_client.get_serial.return_value = (
device['backend_id'])
fake.return_value = fake_mirror
backend_id, updates, group_update_list = (
common_adapter.failover_host(None, [], 'default', [group1]))
fake_mirror.promote_mirror_group.assert_called_once_with(
group1.id.replace('-', ''))
fake_mirror.secondary_client.get_serial.assert_called_with()
fake_mirror.secondary_client.get_lun.assert_called_with(
name=mocked_input['vol1'].name)
self.assertEqual(fake_mirror.secondary_client,
common_adapter.client)
self.assertEqual([{
'group_id': group1.id,
'updates': {'replication_status':
fields.ReplicationStatus.ENABLED}}],
group_update_list)
self.assertEqual(2, len(updates))
self.assertIsNone(common_adapter.active_backend_id)
self.assertEqual('default', backend_id)
for update in updates:
self.assertEqual(fields.ReplicationStatus.ENABLED,
update['updates']['replication_status'])
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_get_pool_name(self, common_adapter, mocked_res, mocked_input):

View File

@ -463,6 +463,72 @@ class TestClient(test.TestCase):
def test_promote_image(self, client, mocked):
client.promote_image('mirror_promote')
@res_mock.patch_client
def test_create_mirror_group(self, client, mocked):
group_name = 'test_mg'
mg = client.create_mirror_group(group_name)
self.assertIsNotNone(mg)
@res_mock.patch_client
def test_create_mirror_group_name_in_use(self, client, mocked):
group_name = 'test_mg_name_in_use'
mg = client.create_mirror_group(group_name)
self.assertIsNotNone(mg)
@res_mock.patch_client
def test_delete_mirror_group(self, client, mocked):
group_name = 'delete_name'
client.delete_mirror_group(group_name)
@res_mock.patch_client
def test_delete_mirror_group_not_found(self, client, mocked):
group_name = 'group_not_found'
client.delete_mirror_group(group_name)
@res_mock.patch_client
def test_add_mirror(self, client, mocked):
group_name = 'group_add_mirror'
mirror_name = 'mirror_name'
client.add_mirror(group_name, mirror_name)
@res_mock.patch_client
def test_add_mirror_already_added(self, client, mocked):
group_name = 'group_already_added'
mirror_name = 'mirror_name'
client.add_mirror(group_name, mirror_name)
@res_mock.patch_client
def test_remove_mirror(self, client, mocked):
group_name = 'group_mirror'
mirror_name = 'mirror_name'
client.remove_mirror(group_name, mirror_name)
@res_mock.patch_client
def test_remove_mirror_not_member(self, client, mocked):
group_name = 'group_mirror'
mirror_name = 'mirror_name_not_member'
client.remove_mirror(group_name, mirror_name)
@res_mock.patch_client
def test_promote_mirror_group(self, client, mocked):
group_name = 'group_promote'
client.promote_mirror_group(group_name)
@res_mock.patch_client
def test_promote_mirror_group_already_promoted(self, client, mocked):
group_name = 'group_promote'
client.promote_mirror_group(group_name)
@res_mock.patch_client
def test_sync_mirror_group(self, client, mocked):
group_name = 'group_sync'
client.sync_mirror_group(group_name)
@res_mock.patch_client
def test_fracture_mirror_group(self, client, mocked):
group_name = 'group_fracture'
client.fracture_mirror_group(group_name)
@res_mock.mock_driver_input
@res_mock.patch_client
def test_get_lun_id(self, client, mocked, cinder_input):

View File

@ -318,3 +318,37 @@ class TestVNXMirrorView(test.TestCase):
self.primary_client.get_mirror.assert_called_once_with(
'mirror_name')
self.assertFalse(self.primary_client.fracture_image.called)
def test_create_mirror_group(self):
self.mirror_view.create_mirror_group('test_group')
self.primary_client.create_mirror_group.assert_called_once_with(
'test_group')
def test_delete_mirror_group(self):
self.mirror_view.delete_mirror_group('test_group')
self.primary_client.delete_mirror_group.assert_called_once_with(
'test_group')
def test_add_mirror(self):
self.mirror_view.add_mirror('test_group', 'test_mirror')
self.primary_client.add_mirror.assert_called_once_with(
'test_group', 'test_mirror')
def test_remove_mirror(self):
self.mirror_view.remove_mirror('test_group', 'test_mirror')
self.primary_client.remove_mirror('test_group', 'test_mirror')
def test_sync_mirror_group(self):
self.mirror_view.sync_mirror_group('test_group')
self.primary_client.sync_mirror_group.assert_called_once_with(
'test_group')
def test_promote_mirror_group(self):
self.mirror_view.promote_mirror_group('test_group')
self.secondary_client.promote_mirror_group.assert_called_once_with(
'test_group')
def test_fracture_mirror_group(self):
self.mirror_view.fracture_mirror_group('test_group')
self.primary_client.fracture_mirror_group.assert_called_once_with(
'test_group')

View File

@ -71,3 +71,23 @@ class TestVNXDriver(test.TestCase):
_driver.terminate_connection('fake_volume', {'host': 'fake_host'})
_driver.adapter.terminate_connection.assert_called_once_with(
'fake_volume', {'host': 'fake_host'})
def test_is_consistent_group_snapshot_enabled(self):
_driver = self._get_driver('iscsi')
_driver._stats = {'consistent_group_snapshot_enabled': True}
self.assertTrue(_driver.is_consistent_group_snapshot_enabled())
_driver._stats = {'consistent_group_snapshot_enabled': False}
self.assertFalse(_driver.is_consistent_group_snapshot_enabled())
self.assertFalse(_driver.is_consistent_group_snapshot_enabled())
def test_enable_replication(self):
_driver = self._get_driver('iscsi')
_driver.enable_replication(None, 'group', 'volumes')
def test_disable_replication(self):
_driver = self._get_driver('iscsi')
_driver.disable_replication(None, 'group', 'volumes')
def test_failover_replication(self):
_driver = self._get_driver('iscsi')
_driver.failover_replication(None, 'group', 'volumes', 'backend_id')

View File

@ -0,0 +1,96 @@
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import context
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock
from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils
class TestReplicationAdapter(test.TestCase):
def setUp(self):
super(TestReplicationAdapter, self).setUp()
self.configuration = conf.Configuration(None)
vnx_utils.init_ops(self.configuration)
self.configuration.san_ip = '192.168.1.1'
self.configuration.storage_vnx_authentication_type = 'global'
self.ctxt = context.get_admin_context()
def tearDown(self):
super(TestReplicationAdapter, self).tearDown()
@utils.patch_group_specs({
'consistent_group_replication_enabled': '<is> True'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_enable_replication(self, common_adapter, mocked_res,
mocked_input):
group = mocked_input['group']
volumes = [mocked_input['volume1'],
mocked_input['volume2']]
volumes[0].group = group
volumes[1].group = group
common_adapter.enable_replication(self.ctxt, group, volumes)
@utils.patch_group_specs({
'consistent_group_replication_enabled': '<is> True'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_disable_replication(self, common_adapter, mocked_res,
mocked_input):
group = mocked_input['group']
volumes = [mocked_input['volume1'],
mocked_input['volume2']]
volumes[0].group = group
volumes[1].group = group
common_adapter.disable_replication(self.ctxt, group, volumes)
@utils.patch_group_specs({
'consistent_group_replication_enabled': '<is> True'})
@res_mock.mock_driver_input
@res_mock.patch_common_adapter
def test_failover_replication(self, common_adapter, mocked_res,
mocked_input):
device = utils.get_replication_device()
common_adapter.config.replication_device = [device]
group = mocked_input['group']
volumes = [mocked_input['volume1'], mocked_input['volume2']]
lun1 = mocked_res['lun1']
volumes[0].group = group
volumes[1].group = group
secondary_backend_id = 'fake_serial'
with mock.patch.object(common_adapter,
'build_mirror_view') as fake:
fake_mirror = utils.build_fake_mirror_view()
fake_mirror.secondary_client.get_lun.return_value = lun1
fake_mirror.secondary_client.get_serial.return_value = (
device['backend_id'])
fake.return_value = fake_mirror
model_update, volume_updates = common_adapter.failover_replication(
self.ctxt, group, volumes, secondary_backend_id)
fake_mirror.promote_mirror_group.assert_called_with(
group.id.replace('-', ''))
self.assertEqual(fields.ReplicationStatus.FAILED_OVER,
model_update['replication_status'])
for update in volume_updates:
self.assertEqual(fields.ReplicationStatus.FAILED_OVER,
update['replication_status'])

View File

@ -15,6 +15,7 @@
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \
as storops_ex
@ -233,3 +234,24 @@ class TestUtils(test.TestCase):
self.assertIsNotNone(r)
self.assertEqual(100, r[common.QOS_MAX_BWS])
self.assertEqual(10, r[common.QOS_MAX_IOPS])
@ut_utils.patch_group_specs({
'consistent_group_replication_enabled': '<is> True'})
@ut_utils.patch_extra_specs({
'replication_enabled': '<is> False'})
@res_mock.mock_driver_input
def test_check_type_matched_invalid(self, mocked):
volume = mocked['volume']
volume.group = mocked['group']
self.assertRaises(exception.InvalidInput,
vnx_utils.check_type_matched,
volume)
@ut_utils.patch_group_specs({
'consistent_group_replication_enabled': '<is> True'})
@res_mock.mock_driver_input
def test_check_rep_status_matched_disabled(self, mocked):
group = mocked['group']
self.assertRaises(exception.InvalidInput,
vnx_utils.check_rep_status_matched,
group)

View File

@ -24,26 +24,28 @@ from oslo_log import log as logging
from oslo_utils import importutils
import six
storops = importutils.try_import('storops')
if storops:
from storops import exception as storops_ex
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume.drivers.dell_emc.vnx import client
from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.dell_emc.vnx import replication
from cinder.volume.drivers.dell_emc.vnx import taskflows as emc_taskflow
from cinder.volume.drivers.dell_emc.vnx import utils
from cinder.volume import utils as vol_utils
from cinder.zonemanager import utils as zm_utils
storops = importutils.try_import('storops')
if storops:
from storops import exception as storops_ex
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class CommonAdapter(object):
class CommonAdapter(replication.ReplicationAdapter):
VERSION = None
@ -219,7 +221,7 @@ class CommonAdapter(object):
"""Creates a EMC volume."""
volume_size = volume['size']
volume_name = volume['name']
utils.check_type_matched(volume)
volume_metadata = utils.get_metadata(volume)
pool = utils.get_pool_from_host(volume.host)
specs = common.ExtraSpecs.from_volume(volume)
@ -760,17 +762,6 @@ class CommonAdapter(object):
pools_stats.append(pool_stats)
return pools_stats
def append_replication_stats(self, stats):
if self.mirror_view:
stats['replication_enabled'] = True
stats['replication_count'] = 1
stats['replication_type'] = ['sync']
else:
stats['replication_enabled'] = False
stats['replication_targets'] = [
device.backend_id for device in common.ReplicationDeviceList(
self.config)]
def update_volume_stats(self):
stats = self.get_enabler_stats()
stats['pools'] = self.get_pool_stats(stats)
@ -1135,151 +1126,6 @@ class CommonAdapter(object):
self.client.detach_snapshot(smp_name)
return connection_info
def setup_lun_replication(self, volume, primary_lun_id):
"""Setup replication for LUN, this only happens in primary system."""
specs = common.ExtraSpecs.from_volume(volume)
provision = specs.provision
tier = specs.tier
rep_update = {'replication_driver_data': None,
'replication_status': fields.ReplicationStatus.DISABLED}
if specs.is_replication_enabled:
LOG.debug('Starting setup replication '
'for volume: %s.', volume.id)
lun_size = volume.size
mirror_name = utils.construct_mirror_name(volume)
pool_name = utils.get_remote_pool(self.config, volume)
emc_taskflow.create_mirror_view(
self.mirror_view, mirror_name,
primary_lun_id, pool_name,
volume.name, lun_size,
provision, tier)
LOG.info('Successfully setup replication for %s.', volume.id)
rep_update.update({'replication_status':
fields.ReplicationStatus.ENABLED})
return rep_update
def cleanup_lun_replication(self, volume):
specs = common.ExtraSpecs.from_volume(volume)
if specs.is_replication_enabled:
LOG.debug('Starting cleanup replication from volume: '
'%s.', volume.id)
mirror_name = utils.construct_mirror_name(volume)
mirror_view = self.build_mirror_view(self.config, True)
mirror_view.destroy_mirror(mirror_name, volume.name)
LOG.info(
'Successfully destroyed replication for volume: %s',
volume.id)
def build_mirror_view(self, configuration, failover=True):
"""Builds a mirror view operation class.
:param configuration: driver configuration
:param failover: True if from primary to configured array,
False if from configured array to primary.
"""
rep_devices = configuration.replication_device
if not rep_devices:
LOG.info('Replication is not configured on backend: %s.',
configuration.config_group)
return None
elif len(rep_devices) == 1:
if not self.client.is_mirror_view_enabled():
error_msg = _('Replication is configured, '
'but no MirrorView/S enabler installed on VNX.')
raise exception.InvalidInput(reason=error_msg)
rep_list = common.ReplicationDeviceList(configuration)
device = rep_list[0]
secondary_client = client.Client(
ip=device.san_ip,
username=device.san_login,
password=device.san_password,
scope=device.storage_vnx_authentication_type,
naviseccli=self.client.naviseccli,
sec_file=device.storage_vnx_security_file_dir)
if failover:
mirror_view = common.VNXMirrorView(
self.client, secondary_client)
else:
# For fail-back, we need to take care of reversed ownership.
mirror_view = common.VNXMirrorView(
secondary_client, self.client)
return mirror_view
else:
error_msg = _('VNX Cinder driver does not support '
'multiple replication targets.')
raise exception.InvalidInput(reason=error_msg)
def validate_backend_id(self, backend_id):
# Currently, VNX driver only support 1 remote device.
replication_device = common.ReplicationDeviceList(self.config)[0]
if backend_id not in (
'default', replication_device.backend_id):
raise exception.InvalidInput(
reason='Invalid backend_id specified.')
def failover_host(self, context, volumes, secondary_backend_id,
groups=None):
"""Fails over the volume back and forth.
Driver needs to update following info for failed-over volume:
1. provider_location: update serial number and lun id
2. replication_status: new status for replication-enabled volume
"""
volume_update_list = []
self.validate_backend_id(secondary_backend_id)
if secondary_backend_id != 'default':
rep_status = fields.ReplicationStatus.FAILED_OVER
mirror_view = self.build_mirror_view(self.config, True)
else:
rep_status = fields.ReplicationStatus.ENABLED
mirror_view = self.build_mirror_view(self.config, False)
def failover_volume(volume, new_status):
mirror_name = utils.construct_mirror_name(volume)
provider_location = volume.provider_location
try:
mirror_view.promote_image(mirror_name)
except storops_ex.VNXMirrorException as ex:
LOG.error(
'Failed to failover volume %(volume_id)s '
'to %(target)s: %(error)s.',
{'volume_id': volume.id,
'target': secondary_backend_id,
'error': ex})
new_status = fields.ReplicationStatus.FAILOVER_ERROR
else:
# Transfer ownership to secondary_backend_id and
# update provider_location field
secondary_client = mirror_view.secondary_client
updated = dict()
updated['system'] = secondary_client.get_serial()
updated['id'] = six.text_type(
secondary_client.get_lun(name=volume.name).lun_id)
provider_location = utils.update_provider_location(
provider_location, updated)
model_update = {'volume_id': volume.id,
'updates':
{'replication_status': new_status,
'provider_location': provider_location}}
volume_update_list.append(model_update)
for volume in volumes:
specs = common.ExtraSpecs.from_volume(volume)
if specs.is_replication_enabled:
failover_volume(volume, rep_status)
else:
# Since the array has been failed-over
# volumes without replication should be in error.
volume_update_list.append({
'volume_id': volume.id,
'updates': {'status': 'error'}})
# After failover, the secondary is now the primary,
# any sequential request will be redirected to it.
self.client = mirror_view.secondary_client
return secondary_backend_id, volume_update_list, []
def get_pool_name(self, volume):
return self.client.get_pool_name(volume.name)
@ -1293,9 +1139,13 @@ class CommonAdapter(object):
'metadata': metadata}
def create_group(self, context, group):
return self.create_consistencygroup(context, group)
rep_update = self.create_group_replication(group)
model_update = self.create_consistencygroup(context, group)
model_update.update(rep_update)
return model_update
def delete_group(self, context, group, volumes):
self.delete_group_replication(group)
return self.delete_consistencygroup(context, group, volumes)
def create_group_snapshot(self, context, group_snapshot, snapshots):
@ -1322,6 +1172,16 @@ class CommonAdapter(object):
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# 1. First make sure group and volumes have same
# replication extra-specs and replications status.
for volume in (add_volumes + remove_volumes):
utils.check_type_matched(volume)
# 2. Secondly, make sure replication status must be enabled for
# replication-enabled group,
utils.check_rep_status_matched(group)
self.add_volumes_to_group_replication(group, add_volumes)
self.remove_volumes_from_group_replication(group, remove_volumes)
return self.do_update_cg(group.id,
add_volumes,
remove_volumes)

View File

@ -16,11 +16,6 @@ from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
storops = importutils.try_import('storops')
if storops:
from storops import exception as storops_ex
from storops.lib import tasks as storops_tasks
from cinder import exception
from cinder.i18n import _
from cinder import utils as cinder_utils
@ -28,6 +23,10 @@ from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.dell_emc.vnx import const
from cinder.volume.drivers.dell_emc.vnx import utils
storops = importutils.try_import('storops')
if storops:
from storops import exception as storops_ex
from storops.lib import tasks as storops_tasks
LOG = logging.getLogger(__name__)
@ -602,6 +601,55 @@ class Client(object):
mv = self.vnx.get_mirror_view(mirror_name)
mv.promote_image()
def create_mirror_group(self, group_name):
try:
mg = self.vnx.create_mirror_group(group_name)
except storops_ex.VNXMirrorGroupNameInUseError:
mg = self.vnx.get_mirror_group(group_name)
return mg
def delete_mirror_group(self, group_name):
mg = self.vnx.get_mirror_group(group_name)
try:
mg.delete()
except storops_ex.VNXMirrorGroupNotFoundError:
LOG.info('Mirror group %s was already deleted.', group_name)
def add_mirror(self, group_name, mirror_name):
mg = self.vnx.get_mirror_group(group_name)
mv = self.vnx.get_mirror_view(mirror_name)
try:
mg.add_mirror(mv)
except storops_ex.VNXMirrorGroupAlreadyMemberError:
LOG.info('Mirror %(mirror)s is already a member of %(group)s',
{'mirror': mirror_name, 'group': group_name})
return mg
def remove_mirror(self, group_name, mirror_name):
mg = self.vnx.get_mirror_group(group_name)
mv = self.vnx.get_mirror_view(mirror_name)
try:
mg.remove_mirror(mv)
except storops_ex.VNXMirrorGroupMirrorNotMemberError:
LOG.info('Mirror %(mirror)s is not a member of %(group)s',
{'mirror': mirror_name, 'group': group_name})
def promote_mirror_group(self, group_name):
mg = self.vnx.get_mirror_group(group_name)
try:
mg.promote_group()
except storops_ex.VNXMirrorGroupAlreadyPromotedError:
LOG.info('Mirror group %s was already promoted.', group_name)
return mg
def sync_mirror_group(self, group_name):
mg = self.vnx.get_mirror_group(group_name)
mg.sync_group()
def fracture_mirror_group(self, group_name):
mg = self.vnx.get_mirror_group(group_name)
mg.fracture_group()
def get_pool_name(self, lun_name):
lun = self.get_lun(name=lun_name)
utils.update_res_without_poll(lun)

View File

@ -20,14 +20,14 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
storops = importutils.try_import('storops')
from cinder import exception
from cinder.i18n import _
from cinder.volume import configuration
from cinder.volume.drivers.dell_emc.vnx import const
from cinder.volume import group_types
from cinder.volume import volume_types
storops = importutils.try_import('storops')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -126,13 +126,14 @@ class ExtraSpecs(object):
PROVISION_DEFAULT = const.PROVISION_THICK
TIER_DEFAULT = None
def __init__(self, extra_specs):
def __init__(self, extra_specs, group_specs=None):
self.specs = extra_specs
self._provision = self._get_provision()
self.provision = self._provision
self._tier = self._get_tier()
self.tier = self._tier
self.apply_default_values()
self.group_specs = group_specs if group_specs else {}
def apply_default_values(self):
self.provision = (ExtraSpecs.PROVISION_DEFAULT
@ -161,6 +162,11 @@ class ExtraSpecs(object):
def is_replication_enabled(self):
return self.specs.get('replication_enabled', '').lower() == '<is> true'
@property
def is_group_replication_enabled(self):
return self.group_specs.get(
'consistent_group_replication_enabled', '').lower() == '<is> true'
def _parse_to_enum(self, key, enum_class):
value = (self.specs[key]
if key in self.specs else None)
@ -183,6 +189,16 @@ class ExtraSpecs(object):
return cls(specs)
@classmethod
def from_group(cls, group):
group_specs = {}
if group and group.group_type_id:
group_specs = group_types.get_group_type_specs(
group.group_type_id)
return cls(extra_specs={}, group_specs=group_specs)
@classmethod
def from_volume_type(cls, type):
return cls(type['extra_specs'])
@ -490,6 +506,7 @@ class VNXMirrorView(object):
self.primary_client.fracture_image(mirror_name)
def promote_image(self, mirror_name):
"""Promote the image on the secondary array."""
self.secondary_client.promote_image(mirror_name)
def destroy_mirror(self, mirror_name, secondary_lun_name):
@ -509,3 +526,25 @@ class VNXMirrorView(object):
self.remove_image(mirror_name)
self.delete_mirror(mirror_name)
self.delete_secondary_lun(lun_name=secondary_lun_name)
def create_mirror_group(self, group_name):
return self.primary_client.create_mirror_group(group_name)
def delete_mirror_group(self, group_name):
return self.primary_client.delete_mirror_group(group_name)
def add_mirror(self, group_name, mirror_name):
return self.primary_client.add_mirror(group_name, mirror_name)
def remove_mirror(self, group_name, mirror_name):
return self.primary_client.remove_mirror(group_name, mirror_name)
def sync_mirror_group(self, group_name):
return self.primary_client.sync_mirror_group(group_name)
def promote_mirror_group(self, group_name):
"""Promote the mirror group on the secondary array."""
return self.secondary_client.promote_mirror_group(group_name)
def fracture_mirror_group(self, group_name):
return self.primary_client.fracture_mirror_group(group_name)

View File

@ -73,9 +73,10 @@ class VNXDriver(driver.ManageableVD,
10.0.0 - Extend SMP size before aync migration when cloning from an
image cache volume
10.1.0 - Add QoS support
10.2.0 - Add replication group support
"""
VERSION = '10.01.00'
VERSION = '10.02.00'
VENDOR = 'Dell EMC'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "EMC_VNX_CI"
@ -338,3 +339,20 @@ class VNXDriver(driver.ManageableVD,
"""Deletes a group_snapshot."""
return self.adapter.delete_group_snapshot(
context, group_snapshot, snapshots)
def is_consistent_group_snapshot_enabled(self):
return self._stats.get('consistent_group_snapshot_enabled')
def enable_replication(self, context, group, volumes):
return self.adapter.enable_replication(context, group, volumes)
def disable_replication(self, context, group, volumes):
return self.adapter.disable_replication(context, group, volumes)
def failover_replication(self, context, group, volumes,
secondary_backend_id):
return self.adapter.failover_replication(
context, group, volumes, secondary_backend_id)
def get_replication_error_status(self, context, groups):
return self.adapter.get_replication_error_status(context, groups)

View File

@ -0,0 +1,357 @@
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume.drivers.dell_emc.vnx import client
from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.dell_emc.vnx import taskflows as emc_taskflow
from cinder.volume.drivers.dell_emc.vnx import utils
storops = importutils.try_import('storops')
if storops:
from storops import exception as storops_ex
LOG = logging.getLogger(__name__)
class ReplicationAdapter(object):
def __init__(self, client=None, config=None):
self.client = client
self.config = config
self.mirror_view = None
def do_setup(self):
pass
def setup_lun_replication(self, volume, primary_lun_id):
"""Setup replication for LUN, this only happens in primary system."""
specs = common.ExtraSpecs.from_volume(volume)
provision = specs.provision
tier = specs.tier
rep_update = {'replication_driver_data': None,
'replication_status': fields.ReplicationStatus.DISABLED}
mirror_name = utils.construct_mirror_name(volume)
if specs.is_replication_enabled:
LOG.debug('Starting setup replication '
'for volume: %s.', volume.id)
lun_size = volume.size
pool_name = utils.get_remote_pool(self.config, volume)
emc_taskflow.create_mirror_view(
self.mirror_view, mirror_name,
primary_lun_id, pool_name,
volume.name, lun_size,
provision, tier)
LOG.info('Successfully setup replication for %s.', volume.id)
rep_update.update({'replication_status':
fields.ReplicationStatus.ENABLED})
group_specs = common.ExtraSpecs.from_group(volume.group)
if volume.group and group_specs.is_group_replication_enabled:
# If in a group, add it to group then.
LOG.debug('Starting add volume %(volume)s to group %(group)s',
{'volume': volume.id, 'group': volume.group.id})
group_name = utils.construct_group_name(volume.group)
self.client.add_mirror(group_name, mirror_name)
return rep_update
def create_group_replication(self, group):
rep_update = {'replication_status': group.replication_status}
group_specs = common.ExtraSpecs.from_group(group)
if group_specs.is_group_replication_enabled:
group_name = utils.construct_group_name(group)
self.client.create_mirror_group(group_name)
rep_update['replication_status'] = (
fields.ReplicationStatus.ENABLED)
return rep_update
def add_volumes_to_group_replication(self, group, volumes):
group_specs = common.ExtraSpecs.from_group(group)
if group_specs.is_group_replication_enabled:
group_name = utils.construct_group_name(group)
for volume in volumes:
mirror_name = utils.construct_mirror_name(volume)
self.client.add_mirror(group_name, mirror_name)
def delete_group_replication(self, group):
group_specs = common.ExtraSpecs.from_group(group)
if group_specs.is_group_replication_enabled:
group_name = utils.construct_group_name(group)
self.client.delete_mirror_group(group_name)
def remove_volumes_from_group_replication(self, group, volumes):
group_name = utils.construct_group_name(group)
group_specs = common.ExtraSpecs.from_group(group)
if group_specs.is_group_replication_enabled:
for volume in volumes:
mirror_name = utils.construct_mirror_name(volume)
self.client.remove_mirror(group_name, mirror_name)
def cleanup_lun_replication(self, volume):
specs = common.ExtraSpecs.from_volume(volume)
group_specs = common.ExtraSpecs.from_group(volume.group)
if group_specs.is_group_replication_enabled:
# If in a group, remove from group first.
group_name = utils.construct_group_name(volume.group)
mirror_name = utils.construct_mirror_name(volume)
self.client.remove_mirror(group_name, mirror_name)
if specs.is_replication_enabled:
LOG.debug('Starting cleanup replication for volume: '
'%s.', volume.id)
mirror_name = utils.construct_mirror_name(volume)
mirror_view = self.build_mirror_view(self.config, True)
mirror_view.destroy_mirror(mirror_name, volume.name)
LOG.info(
'Successfully destroyed replication for volume: %s',
volume.id)
def append_replication_stats(self, stats):
if self.mirror_view:
stats['replication_enabled'] = True
stats['group_replication_enabled'] = False
stats['consistent_group_replication_enabled'] = True
stats['replication_count'] = 1
stats['replication_type'] = ['sync']
else:
stats['replication_enabled'] = False
stats['replication_targets'] = [
device.backend_id for device in common.ReplicationDeviceList(
self.config)]
def build_mirror_view(self, configuration, failover=True):
"""Builds a mirror view operation class.
:param configuration: driver configuration
:param failover: True if from primary to configured array,
False if from configured array to primary.
"""
rep_devices = configuration.replication_device
if not rep_devices:
LOG.info('Replication is not configured on backend: %s.',
configuration.config_group)
return None
elif len(rep_devices) == 1:
if not self.client.is_mirror_view_enabled():
error_msg = _('Replication is configured, '
'but no MirrorView/S enabler installed on VNX.')
raise exception.InvalidInput(reason=error_msg)
rep_list = common.ReplicationDeviceList(configuration)
device = rep_list[0]
secondary_client = client.Client(
ip=device.san_ip,
username=device.san_login,
password=device.san_password,
scope=device.storage_vnx_authentication_type,
naviseccli=self.client.naviseccli,
sec_file=device.storage_vnx_security_file_dir)
if failover:
mirror_view = common.VNXMirrorView(
self.client, secondary_client)
else:
# For fail-back, we need to take care of reversed ownership.
mirror_view = common.VNXMirrorView(
secondary_client, self.client)
return mirror_view
else:
error_msg = _('VNX Cinder driver does not support '
'multiple replication targets.')
raise exception.InvalidInput(reason=error_msg)
def validate_backend_id(self, backend_id):
# Currently, VNX driver only supports 1 remote device.
if self.active_backend_id:
if backend_id != 'default':
raise exception.InvalidReplicationTarget(
reason=_('Invalid backend_id specified.'))
elif backend_id not in (
common.ReplicationDeviceList.get_backend_ids(self.config)):
raise exception.InvalidReplicationTarget(
reason=_('Invalid backend_id specified.'))
def failover_host(self, context, volumes, secondary_backend_id, groups):
"""Fails over the volume back and forth.
Driver needs to update following info for failed-over volume:
1. provider_location: update serial number and lun id
2. replication_status: new status for replication-enabled volume
"""
volume_update_list = []
group_update_list = []
self.validate_backend_id(secondary_backend_id)
if secondary_backend_id != 'default':
rep_status = fields.ReplicationStatus.FAILED_OVER
mirror_view = self.build_mirror_view(self.config, True)
else:
rep_status = fields.ReplicationStatus.ENABLED
mirror_view = self.build_mirror_view(self.config, False)
def failover_volume(volume, new_status):
mirror_name = utils.construct_mirror_name(volume)
provider_location = volume.provider_location
try:
mirror_view.promote_image(mirror_name)
except storops_ex.VNXMirrorException as ex:
LOG.error(
'Failed to failover volume %(volume_id)s '
'to %(target)s: %(error)s.',
{'volume_id': volume.id,
'target': secondary_backend_id,
'error': ex})
new_status = fields.ReplicationStatus.FAILOVER_ERROR
else:
# Transfer ownership to secondary_backend_id and
# update provider_location field
secondary_client = mirror_view.secondary_client
provider_location = utils.update_remote_provider_location(
volume, secondary_client)
model_update = {'volume_id': volume.id,
'updates':
{'replication_status': new_status,
'provider_location': provider_location}}
volume_update_list.append(model_update)
# Fail over groups if needed.
def failover_group(group):
is_failover_needed = False
if (secondary_backend_id != 'default' and
group.replication_status ==
fields.ReplicationStatus.ENABLED):
# Group is on the primary VNX, failover is needed.
LOG.info('%(group_id)s will be failed over to secondary'
'%(secondary_backend_id)s.',
{'group_id': group.id,
'secondary_backend_id': secondary_backend_id})
is_failover_needed = True
if (secondary_backend_id == 'default' and
group.replication_status ==
fields.ReplicationStatus.FAILED_OVER):
# Group is on the secondary VNX, failover is needed.
LOG.info('%(group_id)s will be failed over to primary'
'%(secondary_backend_id)s.',
{'group_id': group.id,
'secondary_backend_id': secondary_backend_id})
is_failover_needed = True
if is_failover_needed:
group_update, volume_update_list = self.failover_replication(
context, group, group.volumes, secondary_backend_id)
return ({'group_id': group.id, 'updates': group_update},
[{'volume_id': vol_update['id'], 'updates': vol_update}
for vol_update in volume_update_list])
return [], []
for group in groups:
specs = common.ExtraSpecs.from_group(group)
if specs.is_group_replication_enabled:
group_update, vols_in_group_update = failover_group(group)
if group_update:
group_update_list.append(group_update)
volume_update_list.extend(vols_in_group_update)
# Filter out the volumes in passed-in groups.
group_ids = [group.id for group in groups]
for volume in [volume for volume in volumes
if volume.group_id not in group_ids]:
specs = common.ExtraSpecs.from_volume(volume)
if specs.is_replication_enabled:
failover_volume(volume, rep_status)
# After failover, the secondary is now the primary,
# any subsequent request will be redirected to it.
self.client = mirror_view.secondary_client
# Remember the current backend id.
self.active_backend_id = (None if secondary_backend_id == 'default'
else secondary_backend_id)
return secondary_backend_id, volume_update_list, group_update_list
def enable_replication(self, context, group, volumes):
"""Enable the group replication.
Note: this will not interfere with the replication on individual LUNs.
"""
self.create_group_replication(group)
self.add_volumes_to_group_replication(group, volumes)
return {}, []
def disable_replication(self, context, group, volumes):
"""Disable the group replication.
Note: This will not disable the replication on individual LUNs.
"""
self.remove_volumes_from_group_replication(group, volumes)
self.delete_group_replication(group)
return {}, []
def failover_replication(self, context, group, volumes,
secondary_backend_id):
""""Fail-over the consistent mirror group.
Note:
VNX supports fail over all the mirrors in a group as a whole,
no need to handle each mirror one by one.
"""
volume_update_list = []
group_update = {'replication_status': group.replication_status}
if secondary_backend_id != 'default':
mirror_view = self.build_mirror_view(self.config, True)
rep_status = fields.ReplicationStatus.FAILED_OVER
else:
mirror_view = self.build_mirror_view(self.config, False)
rep_status = fields.ReplicationStatus.ENABLED
# Update volume provider_location
secondary_client = mirror_view.secondary_client
group_name = utils.construct_group_name(group)
try:
mirror_view.promote_mirror_group(group_name)
except storops_ex.VNXMirrorException as ex:
LOG.error(
'Failed to failover group %(group_id)s '
'to %(target)s: %(error)s.',
{'group_id': group.id,
'target': secondary_backend_id,
'error': ex})
rep_status = fields.ReplicationStatus.FAILOVER_ERROR
for volume in volumes:
volume_update = {
'id': volume.id,
'provider_location': utils.update_remote_provider_location(
volume, secondary_client),
'replication_status': rep_status}
volume_update_list.append(volume_update)
group_update['replication_status'] = rep_status
return group_update, volume_update_list
def get_replication_error_status(self, context, groups):
"""The failover only happens manually, no need to update the status."""
return [], []

View File

@ -16,7 +16,6 @@
from oslo_log import log as logging
from oslo_utils import importutils
storops = importutils.try_import('storops')
import taskflow.engines
from taskflow.patterns import linear_flow
@ -29,6 +28,8 @@ from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.dell_emc.vnx import const
from cinder.volume.drivers.dell_emc.vnx import utils
storops = importutils.try_import('storops')
LOG = logging.getLogger(__name__)

View File

@ -22,7 +22,9 @@ from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import uuidutils
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.san.san import san_opts
from cinder.volume import utils as vol_utils
@ -30,6 +32,7 @@ from cinder.volume import utils as vol_utils
storops = importutils.try_import('storops')
storops = importutils.try_import('storops')
LOG = logging.getLogger(__name__)
@ -103,6 +106,18 @@ def update_provider_location(provider_location, items):
return dump_provider_location(location_dict)
def update_remote_provider_location(volume, client):
"""Update volume provider_location after volume failed-over."""
provider_location = volume.provider_location
updated = {}
updated['system'] = client.get_serial()
updated['id'] = six.text_type(
client.get_lun(name=volume.name).lun_id)
provider_location = update_provider_location(
provider_location, updated)
return provider_location
def get_pool_from_host(host):
return vol_utils.extract_host(host, 'pool')
@ -212,6 +227,15 @@ def construct_mirror_name(volume):
return 'mirror_' + six.text_type(volume.id)
def construct_group_name(group):
"""Constructs MirrorGroup name for volumes.
VNX only allows for 32-character group name, so
trim the dash(-) from group id.
"""
return group.id.replace('-', '')
def construct_tmp_cg_snap_name(cg_name):
"""Return CG snapshot name."""
return 'tmp-snap-' + six.text_type(cg_name)
@ -259,6 +283,49 @@ def get_migration_rate(volume):
return storops.VNXMigrationRate.HIGH
def check_type_matched(volume):
"""Check volume type and group type
This will make sure they do not conflict with each other.
:param volume: volume to be checked
:returns: None
:raises: InvalidInput
"""
# If volume is not a member of group, skip this check anyway.
if not volume.group:
return
extra_specs = common.ExtraSpecs.from_volume(volume)
group_specs = common.ExtraSpecs.from_group(volume.group)
if not (group_specs.is_group_replication_enabled ==
extra_specs.is_replication_enabled):
msg = _('Replication should be enabled or disabled for both '
'volume or group. volume replication status: %(vol_status)s, '
'group replication status: %(group_status)s') % {
'vol_status': extra_specs.is_replication_enabled,
'group_status': group_specs.is_group_replication_enabled}
raise exception.InvalidInput(reason=msg)
def check_rep_status_matched(group):
"""Check replication status for group.
Group status must be enabled before proceeding.
"""
group_specs = common.ExtraSpecs.from_group(group)
if group_specs.is_group_replication_enabled:
if group.replication_status != fields.ReplicationStatus.ENABLED:
msg = _('Replication status should be %s for replication-enabled '
'group.') % fields.ReplicationStatus.ENABLED
raise exception.InvalidInput(reason=msg)
else:
LOG.info('Replication is not enabled on group %s, skip status check.',
group.id)
def update_res_without_poll(res):
with res.with_no_poll():
res.update()

View File

@ -0,0 +1,3 @@
---
features:
- Add consistent replication group support in VNX cinder driver.