1368 lines
60 KiB
Python
1368 lines
60 KiB
Python
# Copyright (c) 2015 Clinton Knight. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
"""
|
|
NetApp Data ONTAP cDOT multi-SVM storage driver library.
|
|
|
|
This library extends the abstract base library and completes the multi-SVM
|
|
functionality needed by the cDOT multi-SVM Manila driver. This library
|
|
variant creates Data ONTAP storage virtual machines (i.e. 'vservers')
|
|
as needed to provision shares.
|
|
"""
|
|
|
|
import re
|
|
|
|
from oslo_log import log
|
|
from oslo_serialization import jsonutils
|
|
from oslo_utils import excutils
|
|
from oslo_utils import units
|
|
|
|
from manila import exception
|
|
from manila.i18n import _
|
|
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
|
from manila.share.drivers.netapp.dataontap.client import client_cmode
|
|
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
|
|
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
|
|
from manila.share.drivers.netapp import utils as na_utils
|
|
from manila.share import share_types
|
|
from manila.share import utils as share_utils
|
|
from manila import utils
|
|
|
|
LOG = log.getLogger(__name__)
|
|
SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan')
|
|
SEGMENTED_NETWORK_TYPES = ('vlan',)
|
|
DEFAULT_MTU = 1500
|
|
CLUSTER_IPSPACES = ('Cluster', 'Default')
|
|
|
|
|
|
class NetAppCmodeMultiSVMFileStorageLibrary(
|
|
lib_base.NetAppCmodeFileStorageLibrary):
|
|
|
|
@na_utils.trace
|
|
def check_for_setup_error(self):
|
|
|
|
if self._have_cluster_creds:
|
|
if self.configuration.netapp_vserver:
|
|
msg = ('Vserver is specified in the configuration. This is '
|
|
'ignored when the driver is managing share servers.')
|
|
LOG.warning(msg)
|
|
|
|
else: # only have vserver creds, which is an error in multi_svm mode
|
|
msg = _('Cluster credentials must be specified in the '
|
|
'configuration when the driver is managing share servers.')
|
|
raise exception.InvalidInput(reason=msg)
|
|
|
|
# Ensure one or more aggregates are available.
|
|
if not self._find_matching_aggregates():
|
|
msg = _('No aggregates are available for provisioning shares. '
|
|
'Ensure that the configuration option '
|
|
'netapp_aggregate_name_search_pattern is set correctly.')
|
|
raise exception.NetAppException(msg)
|
|
|
|
(super(NetAppCmodeMultiSVMFileStorageLibrary, self).
|
|
check_for_setup_error())
|
|
|
|
@na_utils.trace
|
|
def _get_vserver(self, share_server=None, vserver_name=None,
|
|
backend_name=None):
|
|
if share_server:
|
|
backend_details = share_server.get('backend_details')
|
|
vserver = backend_details.get(
|
|
'vserver_name') if backend_details else None
|
|
|
|
if not vserver:
|
|
msg = _('Vserver name is absent in backend details. Please '
|
|
'check whether Vserver was created properly.')
|
|
raise exception.VserverNotSpecified(msg)
|
|
elif vserver_name:
|
|
vserver = vserver_name
|
|
else:
|
|
msg = _('Share server or vserver name not provided')
|
|
raise exception.InvalidInput(reason=msg)
|
|
|
|
if backend_name:
|
|
vserver_client = data_motion.get_client_for_backend(
|
|
backend_name, vserver
|
|
)
|
|
else:
|
|
vserver_client = self._get_api_client(vserver)
|
|
|
|
if not vserver_client.vserver_exists(vserver):
|
|
raise exception.VserverNotFound(vserver=vserver)
|
|
|
|
return vserver, vserver_client
|
|
|
|
def _get_ems_pool_info(self):
|
|
return {
|
|
'pools': {
|
|
'vserver': None,
|
|
'aggregates': self._find_matching_aggregates(),
|
|
},
|
|
}
|
|
|
|
@na_utils.trace
|
|
def _handle_housekeeping_tasks(self):
|
|
"""Handle various cleanup activities."""
|
|
self._client.prune_deleted_nfs_export_policies()
|
|
self._client.prune_deleted_snapshots()
|
|
self._client.remove_unused_qos_policy_groups()
|
|
|
|
(super(NetAppCmodeMultiSVMFileStorageLibrary, self).
|
|
_handle_housekeeping_tasks())
|
|
|
|
@na_utils.trace
|
|
def _find_matching_aggregates(self):
|
|
"""Find all aggregates match pattern."""
|
|
aggregate_names = self._client.list_non_root_aggregates()
|
|
pattern = self.configuration.netapp_aggregate_name_search_pattern
|
|
return [aggr_name for aggr_name in aggregate_names
|
|
if re.match(pattern, aggr_name)]
|
|
|
|
@na_utils.trace
|
|
def setup_server(self, network_info, metadata=None):
|
|
"""Creates and configures new Vserver."""
|
|
vlan = network_info['segmentation_id']
|
|
ports = {}
|
|
for network_allocation in network_info['network_allocations']:
|
|
ports[network_allocation['id']] = network_allocation['ip_address']
|
|
|
|
nfs_config = self._default_nfs_config
|
|
if (self.is_nfs_config_supported and metadata and
|
|
'share_type_id' in metadata):
|
|
extra_specs = share_types.get_share_type_extra_specs(
|
|
metadata['share_type_id'])
|
|
self._check_nfs_config_extra_specs_validity(extra_specs)
|
|
nfs_config = self._get_nfs_config_provisioning_options(extra_specs)
|
|
|
|
@utils.synchronized('netapp-VLAN-%s' % vlan, external=True)
|
|
def setup_server_with_lock():
|
|
LOG.debug('Creating server %s', network_info['server_id'])
|
|
self._validate_network_type(network_info)
|
|
|
|
vserver_name = self._get_vserver_name(network_info['server_id'])
|
|
server_details = {
|
|
'vserver_name': vserver_name,
|
|
'ports': jsonutils.dumps(ports),
|
|
}
|
|
|
|
if self.is_nfs_config_supported:
|
|
server_details['nfs_config'] = jsonutils.dumps(nfs_config)
|
|
|
|
try:
|
|
self._create_vserver(vserver_name, network_info, metadata,
|
|
nfs_config=nfs_config)
|
|
except Exception as e:
|
|
e.detail_data = {'server_details': server_details}
|
|
raise
|
|
|
|
return server_details
|
|
|
|
return setup_server_with_lock()
|
|
|
|
@na_utils.trace
|
|
def _check_nfs_config_extra_specs_validity(self, extra_specs):
|
|
"""Check if the nfs config extra_spec has valid values."""
|
|
int_extra_specs = ['netapp:tcp_max_xfer_size',
|
|
'netapp:udp_max_xfer_size']
|
|
for key in int_extra_specs:
|
|
if key in extra_specs:
|
|
self._check_if_extra_spec_is_positive(
|
|
extra_specs[key], key)
|
|
|
|
@na_utils.trace
|
|
def _check_if_extra_spec_is_positive(self, value, key):
|
|
"""Check if extra_spec has a valid positive int value."""
|
|
if int(value) < 0:
|
|
args = {'value': value, 'key': key}
|
|
msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" '
|
|
'used by share server setup.')
|
|
raise exception.NetAppException(msg % args)
|
|
|
|
@na_utils.trace
|
|
def _get_nfs_config_provisioning_options(self, specs):
|
|
"""Return the nfs config provisioning option."""
|
|
nfs_config = self.get_string_provisioning_options(
|
|
specs, self.NFS_CONFIG_EXTRA_SPECS_MAP)
|
|
|
|
# Changes the no set config to the default value
|
|
for k, v in nfs_config.items():
|
|
if v is None:
|
|
nfs_config[k] = self._default_nfs_config[k]
|
|
|
|
return nfs_config
|
|
|
|
@na_utils.trace
|
|
def _validate_network_type(self, network_info):
|
|
"""Raises exception if the segmentation type is incorrect."""
|
|
if network_info['network_type'] not in SUPPORTED_NETWORK_TYPES:
|
|
msg = _('The specified network type %s is unsupported by the '
|
|
'NetApp clustered Data ONTAP driver')
|
|
raise exception.NetworkBadConfigurationException(
|
|
reason=msg % network_info['network_type'])
|
|
|
|
@na_utils.trace
|
|
def _get_vserver_name(self, server_id):
|
|
return self.configuration.netapp_vserver_name_template % server_id
|
|
|
|
@na_utils.trace
|
|
def _create_vserver(self, vserver_name, network_info, metadata=None,
|
|
nfs_config=None):
|
|
"""Creates Vserver with given parameters if it doesn't exist."""
|
|
|
|
if self._client.vserver_exists(vserver_name):
|
|
msg = _('Vserver %s already exists.')
|
|
raise exception.NetAppException(msg % vserver_name)
|
|
# NOTE(dviroel): check if this vserver will be a data protection server
|
|
is_dp_destination = False
|
|
if metadata and metadata.get('migration_destination') is True:
|
|
is_dp_destination = True
|
|
msg = _("Starting creation of a vserver with 'dp_destination' "
|
|
"subtype.")
|
|
LOG.debug(msg)
|
|
|
|
# NOTE(lseki): If there's already an ipspace created for the same VLAN
|
|
# port, reuse it. It will be named after the previously created share
|
|
# server's neutron subnet id.
|
|
node_name = self._client.list_cluster_nodes()[0]
|
|
port = self._get_node_data_port(node_name)
|
|
vlan = network_info['segmentation_id']
|
|
ipspace_name = self._client.get_ipspace_name_for_vlan_port(
|
|
node_name, port, vlan) or self._create_ipspace(network_info)
|
|
|
|
if is_dp_destination:
|
|
# Get Data ONTAP aggregate name as pool name.
|
|
LOG.debug('Creating a new Vserver (%s) for data protection.',
|
|
vserver_name)
|
|
self._client.create_vserver_dp_destination(
|
|
vserver_name,
|
|
self._find_matching_aggregates(),
|
|
ipspace_name)
|
|
# Set up port and broadcast domain for the current ipspace
|
|
self._create_port_and_broadcast_domain(ipspace_name, network_info)
|
|
else:
|
|
LOG.debug('Vserver %s does not exist, creating.', vserver_name)
|
|
self._client.create_vserver(
|
|
vserver_name,
|
|
self.configuration.netapp_root_volume_aggregate,
|
|
self.configuration.netapp_root_volume,
|
|
self._find_matching_aggregates(),
|
|
ipspace_name)
|
|
|
|
vserver_client = self._get_api_client(vserver=vserver_name)
|
|
|
|
security_services = network_info.get('security_services')
|
|
try:
|
|
self._setup_network_for_vserver(
|
|
vserver_name, vserver_client, network_info, ipspace_name,
|
|
security_services=security_services, nfs_config=nfs_config)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.error("Failed to configure Vserver.")
|
|
# NOTE(dviroel): At this point, the lock was already
|
|
# acquired by the caller of _create_vserver.
|
|
self._delete_vserver(vserver_name,
|
|
security_services=security_services,
|
|
needs_lock=False)
|
|
|
|
def _setup_network_for_vserver(self, vserver_name, vserver_client,
|
|
network_info, ipspace_name,
|
|
enable_nfs=True, security_services=None,
|
|
nfs_config=None):
|
|
self._create_vserver_lifs(vserver_name,
|
|
vserver_client,
|
|
network_info,
|
|
ipspace_name)
|
|
|
|
self._create_vserver_admin_lif(vserver_name,
|
|
vserver_client,
|
|
network_info,
|
|
ipspace_name)
|
|
|
|
self._create_vserver_routes(vserver_client,
|
|
network_info)
|
|
if enable_nfs:
|
|
vserver_client.enable_nfs(
|
|
self.configuration.netapp_enabled_share_protocols,
|
|
nfs_config=nfs_config)
|
|
|
|
if security_services:
|
|
self._client.setup_security_services(security_services,
|
|
vserver_client,
|
|
vserver_name)
|
|
|
|
def _get_valid_ipspace_name(self, network_id):
|
|
"""Get IPspace name according to network id."""
|
|
return 'ipspace_' + network_id.replace('-', '_')
|
|
|
|
@na_utils.trace
|
|
def _create_ipspace(self, network_info):
|
|
"""If supported, create an IPspace for a new Vserver."""
|
|
|
|
if not self._client.features.IPSPACES:
|
|
return None
|
|
|
|
if (network_info['network_allocations'][0]['network_type']
|
|
not in SEGMENTED_NETWORK_TYPES):
|
|
return client_cmode.DEFAULT_IPSPACE
|
|
|
|
# NOTE(cknight): Neutron needs cDOT IP spaces because it can provide
|
|
# overlapping IP address ranges for different subnets. That is not
|
|
# believed to be an issue for any of Manila's other network plugins.
|
|
ipspace_id = network_info.get('neutron_subnet_id')
|
|
if not ipspace_id:
|
|
return client_cmode.DEFAULT_IPSPACE
|
|
|
|
ipspace_name = self._get_valid_ipspace_name(ipspace_id)
|
|
self._client.create_ipspace(ipspace_name)
|
|
|
|
return ipspace_name
|
|
|
|
@na_utils.trace
|
|
def _create_vserver_lifs(self, vserver_name, vserver_client, network_info,
|
|
ipspace_name):
|
|
"""Create Vserver data logical interfaces (LIFs)."""
|
|
|
|
nodes = self._client.list_cluster_nodes()
|
|
node_network_info = zip(nodes, network_info['network_allocations'])
|
|
|
|
for node_name, network_allocation in node_network_info:
|
|
lif_name = self._get_lif_name(node_name, network_allocation)
|
|
self._create_lif(vserver_client, vserver_name, ipspace_name,
|
|
node_name, lif_name, network_allocation)
|
|
|
|
@na_utils.trace
|
|
def _create_vserver_admin_lif(self, vserver_name, vserver_client,
|
|
network_info, ipspace_name):
|
|
"""Create Vserver admin LIF, if defined."""
|
|
|
|
network_allocations = network_info.get('admin_network_allocations')
|
|
if not network_allocations:
|
|
LOG.info('No admin network defined for Vserver %s.',
|
|
vserver_name)
|
|
return
|
|
|
|
node_name = self._client.list_cluster_nodes()[0]
|
|
network_allocation = network_allocations[0]
|
|
lif_name = self._get_lif_name(node_name, network_allocation)
|
|
|
|
self._create_lif(vserver_client, vserver_name, ipspace_name,
|
|
node_name, lif_name, network_allocation)
|
|
|
|
@na_utils.trace
|
|
def _create_vserver_routes(self, vserver_client, network_info):
|
|
"""Create Vserver route and set gateways."""
|
|
route_gateways = []
|
|
# NOTE(gouthamr): Use the gateway from the tenant subnet/s
|
|
# for the static routes. Do not configure a route for the admin
|
|
# subnet because fast path routing will work for incoming
|
|
# connections and there are no requirements for outgoing
|
|
# connections on the admin network yet.
|
|
for net_allocation in (network_info['network_allocations']):
|
|
if net_allocation['gateway'] not in route_gateways:
|
|
vserver_client.create_route(net_allocation['gateway'])
|
|
route_gateways.append(net_allocation['gateway'])
|
|
|
|
@na_utils.trace
|
|
def _get_node_data_port(self, node):
|
|
port_names = self._client.list_node_data_ports(node)
|
|
pattern = self.configuration.netapp_port_name_search_pattern
|
|
matched_port_names = [port_name for port_name in port_names
|
|
if re.match(pattern, port_name)]
|
|
if not matched_port_names:
|
|
raise exception.NetAppException(
|
|
_('Could not find eligible network ports on node %s on which '
|
|
'to create Vserver LIFs.') % node)
|
|
return matched_port_names[0]
|
|
|
|
def _get_lif_name(self, node_name, network_allocation):
|
|
"""Get LIF name based on template from manila.conf file."""
|
|
lif_name_args = {
|
|
'node': node_name,
|
|
'net_allocation_id': network_allocation['id'],
|
|
}
|
|
return self.configuration.netapp_lif_name_template % lif_name_args
|
|
|
|
@na_utils.trace
|
|
def _create_lif(self, vserver_client, vserver_name, ipspace_name,
|
|
node_name, lif_name, network_allocation):
|
|
"""Creates LIF for Vserver."""
|
|
|
|
port = self._get_node_data_port(node_name)
|
|
ip_address = network_allocation['ip_address']
|
|
netmask = utils.cidr_to_netmask(network_allocation['cidr'])
|
|
vlan = network_allocation['segmentation_id']
|
|
network_mtu = network_allocation.get('mtu')
|
|
mtu = network_mtu or DEFAULT_MTU
|
|
|
|
if not vserver_client.network_interface_exists(
|
|
vserver_name, node_name, port, ip_address, netmask, vlan):
|
|
self._client.create_network_interface(
|
|
ip_address, netmask, vlan, node_name, port, vserver_name,
|
|
lif_name, ipspace_name, mtu)
|
|
|
|
@na_utils.trace
|
|
def _create_port_and_broadcast_domain(self, ipspace_name, network_info):
|
|
nodes = self._client.list_cluster_nodes()
|
|
node_network_info = zip(nodes, network_info['network_allocations'])
|
|
|
|
for node_name, network_allocation in node_network_info:
|
|
|
|
port = self._get_node_data_port(node_name)
|
|
vlan = network_allocation['segmentation_id']
|
|
network_mtu = network_allocation.get('mtu')
|
|
mtu = network_mtu or DEFAULT_MTU
|
|
|
|
self._client.create_port_and_broadcast_domain(
|
|
node_name, port, vlan, mtu, ipspace_name)
|
|
|
|
@na_utils.trace
|
|
def get_network_allocations_number(self):
|
|
"""Get number of network interfaces to be created."""
|
|
return len(self._client.list_cluster_nodes())
|
|
|
|
@na_utils.trace
|
|
def get_admin_network_allocations_number(self, admin_network_api):
|
|
"""Get number of network allocations for creating admin LIFs."""
|
|
return 1 if admin_network_api else 0
|
|
|
|
@na_utils.trace
|
|
def teardown_server(self, server_details, security_services=None):
|
|
"""Teardown share server."""
|
|
vserver = server_details.get(
|
|
'vserver_name') if server_details else None
|
|
|
|
if not vserver:
|
|
LOG.warning("Vserver not specified for share server being "
|
|
"deleted. Deletion of share server record will "
|
|
"proceed anyway.")
|
|
return
|
|
|
|
elif not self._client.vserver_exists(vserver):
|
|
LOG.warning("Could not find Vserver for share server being "
|
|
"deleted: %s. Deletion of share server "
|
|
"record will proceed anyway.", vserver)
|
|
return
|
|
|
|
self._delete_vserver(vserver, security_services=security_services)
|
|
|
|
@na_utils.trace
|
|
def _delete_vserver(self, vserver, security_services=None,
|
|
needs_lock=True):
|
|
"""Delete a Vserver plus IPspace and security services as needed."""
|
|
|
|
ipspace_name = self._client.get_vserver_ipspace(vserver)
|
|
|
|
vserver_client = self._get_api_client(vserver=vserver)
|
|
network_interfaces = vserver_client.get_network_interfaces()
|
|
snapmirror_policies = self._client.get_snapmirror_policies(vserver)
|
|
|
|
interfaces_on_vlans = []
|
|
vlans = []
|
|
for interface in network_interfaces:
|
|
if '-' in interface['home-port']:
|
|
interfaces_on_vlans.append(interface)
|
|
vlans.append(interface['home-port'])
|
|
|
|
if vlans:
|
|
vlans = '-'.join(sorted(set(vlans))) if vlans else None
|
|
vlan_id = vlans.split('-')[-1]
|
|
else:
|
|
vlan_id = None
|
|
|
|
def _delete_vserver_without_lock():
|
|
# NOTE(dviroel): always delete all policies before deleting the
|
|
# vserver
|
|
for policy in snapmirror_policies:
|
|
vserver_client.delete_snapmirror_policy(policy)
|
|
|
|
# NOTE(dviroel): Attempt to delete all vserver peering
|
|
# created by replication
|
|
self._delete_vserver_peers(vserver)
|
|
|
|
self._client.delete_vserver(vserver,
|
|
vserver_client,
|
|
security_services=security_services)
|
|
ipspace_deleted = False
|
|
if (ipspace_name and ipspace_name not in CLUSTER_IPSPACES
|
|
and not self._client.ipspace_has_data_vservers(
|
|
ipspace_name)):
|
|
self._client.delete_ipspace(ipspace_name)
|
|
ipspace_deleted = True
|
|
|
|
if not ipspace_name or ipspace_deleted:
|
|
# NOTE(dviroel): only delete vlans if they are not being used
|
|
# by any ipspaces and data vservers.
|
|
self._delete_vserver_vlans(interfaces_on_vlans)
|
|
|
|
@utils.synchronized('netapp-VLAN-%s' % vlan_id, external=True)
|
|
def _delete_vserver_with_lock():
|
|
_delete_vserver_without_lock()
|
|
|
|
if needs_lock:
|
|
return _delete_vserver_with_lock()
|
|
else:
|
|
return _delete_vserver_without_lock()
|
|
|
|
@na_utils.trace
|
|
def _delete_vserver_vlans(self, network_interfaces_on_vlans):
|
|
"""Delete Vserver's VLAN configuration from ports"""
|
|
for interface in network_interfaces_on_vlans:
|
|
try:
|
|
home_port = interface['home-port']
|
|
port, vlan = home_port.split('-')
|
|
node = interface['home-node']
|
|
self._client.delete_vlan(node, port, vlan)
|
|
except exception.NetAppException:
|
|
LOG.exception("Deleting Vserver VLAN failed.")
|
|
|
|
@na_utils.trace
|
|
def _delete_vserver_peers(self, vserver):
|
|
vserver_peers = self._get_vserver_peers(vserver=vserver)
|
|
for peer in vserver_peers:
|
|
self._delete_vserver_peer(peer.get('vserver'),
|
|
peer.get('peer-vserver'))
|
|
|
|
def get_configured_ip_versions(self):
|
|
versions = [4]
|
|
options = self._client.get_net_options()
|
|
if options['ipv6-enabled']:
|
|
versions.append(6)
|
|
return versions
|
|
|
|
@na_utils.trace
|
|
def create_replica(self, context, replica_list, new_replica,
|
|
access_rules, share_snapshots, share_server=None):
|
|
"""Creates the new replica on this backend and sets up SnapMirror.
|
|
|
|
It creates the peering between the associated vservers before creating
|
|
the share replica and setting up the SnapMirror.
|
|
"""
|
|
# 1. Retrieve source and destination vservers from both replicas,
|
|
# active and and new_replica
|
|
src_vserver, dst_vserver = self._get_vservers_from_replicas(
|
|
context, replica_list, new_replica)
|
|
|
|
# 2. Retrieve the active replica host's client and cluster name
|
|
src_replica = self.find_active_replica(replica_list)
|
|
|
|
src_replica_host = share_utils.extract_host(
|
|
src_replica['host'], level='backend_name')
|
|
src_replica_client = data_motion.get_client_for_backend(
|
|
src_replica_host, vserver_name=src_vserver)
|
|
# Cluster name is needed for setting up the vserver peering
|
|
src_replica_cluster_name = src_replica_client.get_cluster_name()
|
|
|
|
# 3. Retrieve new replica host's client
|
|
new_replica_host = share_utils.extract_host(
|
|
new_replica['host'], level='backend_name')
|
|
new_replica_client = data_motion.get_client_for_backend(
|
|
new_replica_host, vserver_name=dst_vserver)
|
|
new_replica_cluster_name = new_replica_client.get_cluster_name()
|
|
|
|
if (dst_vserver != src_vserver
|
|
and not self._get_vserver_peers(dst_vserver, src_vserver)):
|
|
# 3.1. Request vserver peer creation from new_replica's host
|
|
# to active replica's host
|
|
new_replica_client.create_vserver_peer(
|
|
dst_vserver, src_vserver,
|
|
peer_cluster_name=src_replica_cluster_name)
|
|
|
|
# 3.2. Accepts the vserver peering using active replica host's
|
|
# client (inter-cluster only)
|
|
if new_replica_cluster_name != src_replica_cluster_name:
|
|
src_replica_client.accept_vserver_peer(src_vserver,
|
|
dst_vserver)
|
|
|
|
return (super(NetAppCmodeMultiSVMFileStorageLibrary, self).
|
|
create_replica(context, replica_list, new_replica,
|
|
access_rules, share_snapshots))
|
|
|
|
def delete_replica(self, context, replica_list, replica, share_snapshots,
|
|
share_server=None):
|
|
"""Removes the replica on this backend and destroys SnapMirror.
|
|
|
|
Removes the replica, destroys the SnapMirror and delete the vserver
|
|
peering if needed.
|
|
"""
|
|
vserver, peer_vserver = self._get_vservers_from_replicas(
|
|
context, replica_list, replica)
|
|
super(NetAppCmodeMultiSVMFileStorageLibrary, self).delete_replica(
|
|
context, replica_list, replica, share_snapshots)
|
|
|
|
# Check if there are no remaining SnapMirror connections and if a
|
|
# vserver peering exists and delete it.
|
|
snapmirrors = self._get_snapmirrors(vserver, peer_vserver)
|
|
snapmirrors_from_peer = self._get_snapmirrors(peer_vserver, vserver)
|
|
peers = self._get_vserver_peers(peer_vserver, vserver)
|
|
if not (snapmirrors or snapmirrors_from_peer) and peers:
|
|
self._delete_vserver_peer(peer_vserver, vserver)
|
|
|
|
def manage_server(self, context, share_server, identifier, driver_options):
|
|
"""Manages a vserver by renaming it and returning backend_details."""
|
|
new_vserver_name = self._get_vserver_name(share_server['id'])
|
|
old_vserver_name = self._get_correct_vserver_old_name(identifier)
|
|
if new_vserver_name != old_vserver_name:
|
|
self._client.rename_vserver(old_vserver_name, new_vserver_name)
|
|
|
|
backend_details = {
|
|
'vserver_name': new_vserver_name,
|
|
}
|
|
|
|
if self.is_nfs_config_supported:
|
|
nfs_config = self._client.get_nfs_config(
|
|
list(self.NFS_CONFIG_EXTRA_SPECS_MAP.values()),
|
|
new_vserver_name)
|
|
backend_details['nfs_config'] = jsonutils.dumps(nfs_config)
|
|
|
|
return new_vserver_name, backend_details
|
|
|
|
def unmanage_server(self, server_details, security_services=None):
|
|
pass
|
|
|
|
def get_share_server_network_info(
|
|
self, context, share_server, identifier, driver_options):
|
|
"""Returns a list of IPs for each vserver network interface."""
|
|
vserver_name = self._get_correct_vserver_old_name(identifier)
|
|
|
|
vserver, vserver_client = self._get_vserver(vserver_name=vserver_name)
|
|
|
|
interfaces = vserver_client.get_network_interfaces()
|
|
allocations = []
|
|
for lif in interfaces:
|
|
allocations.append(lif['address'])
|
|
return allocations
|
|
|
|
def _get_correct_vserver_old_name(self, identifier):
|
|
|
|
# In case vserver_name includes the template, we check and add it here
|
|
if not self._client.vserver_exists(identifier):
|
|
return self._get_vserver_name(identifier)
|
|
return identifier
|
|
|
|
def _get_snapmirrors(self, vserver, peer_vserver):
|
|
return self._client.get_snapmirrors(
|
|
source_vserver=vserver, dest_vserver=peer_vserver)
|
|
|
|
def _get_vservers_from_replicas(self, context, replica_list, new_replica):
|
|
active_replica = self.find_active_replica(replica_list)
|
|
|
|
dm_session = data_motion.DataMotionSession()
|
|
vserver = dm_session.get_vserver_from_share(active_replica)
|
|
peer_vserver = dm_session.get_vserver_from_share(new_replica)
|
|
|
|
return vserver, peer_vserver
|
|
|
|
def _get_vserver_peers(self, vserver=None, peer_vserver=None):
|
|
return self._client.get_vserver_peers(vserver, peer_vserver)
|
|
|
|
def _create_vserver_peer(self, context, vserver, peer_vserver):
|
|
self._client.create_vserver_peer(vserver, peer_vserver)
|
|
|
|
def _delete_vserver_peer(self, vserver, peer_vserver):
|
|
self._client.delete_vserver_peer(vserver, peer_vserver)
|
|
|
|
def create_share_from_snapshot(self, context, share, snapshot,
|
|
share_server=None, parent_share=None):
|
|
# NOTE(dviroel): If both parent and child shares are in the same host,
|
|
# they belong to the same cluster, and we can skip all the processing
|
|
# below.
|
|
if parent_share['host'] != share['host']:
|
|
# 1. Retrieve source and destination vservers from source and
|
|
# destination shares
|
|
dm_session = data_motion.DataMotionSession()
|
|
src_vserver = dm_session.get_vserver_from_share(parent_share)
|
|
dest_vserver = dm_session.get_vserver_from_share_server(
|
|
share_server)
|
|
|
|
# 2. Retrieve the source share host's client and cluster name
|
|
src_share_host = share_utils.extract_host(
|
|
parent_share['host'], level='backend_name')
|
|
src_share_client = data_motion.get_client_for_backend(
|
|
src_share_host, vserver_name=src_vserver)
|
|
# Cluster name is needed for setting up the vserver peering
|
|
src_share_cluster_name = src_share_client.get_cluster_name()
|
|
|
|
# 3. Retrieve new share host's client
|
|
dest_share_host = share_utils.extract_host(
|
|
share['host'], level='backend_name')
|
|
dest_share_client = data_motion.get_client_for_backend(
|
|
dest_share_host, vserver_name=dest_vserver)
|
|
dest_share_cluster_name = dest_share_client.get_cluster_name()
|
|
# If source and destination shares are placed in a different
|
|
# clusters, we'll need the both vserver peered.
|
|
if src_share_cluster_name != dest_share_cluster_name:
|
|
if not self._get_vserver_peers(dest_vserver, src_vserver):
|
|
# 3.1. Request vserver peer creation from new_replica's
|
|
# host to active replica's host
|
|
dest_share_client.create_vserver_peer(
|
|
dest_vserver, src_vserver,
|
|
peer_cluster_name=src_share_cluster_name)
|
|
|
|
# 3.2. Accepts the vserver peering using active replica
|
|
# host's client
|
|
src_share_client.accept_vserver_peer(src_vserver,
|
|
dest_vserver)
|
|
|
|
return (super(NetAppCmodeMultiSVMFileStorageLibrary, self)
|
|
.create_share_from_snapshot(
|
|
context, share, snapshot, share_server=share_server,
|
|
parent_share=parent_share))
|
|
|
|
@na_utils.trace
|
|
def _is_share_server_compatible(self, share_server, expected_nfs_config):
|
|
"""Check if the share server has the given nfs config
|
|
|
|
The None and the default_nfs_config should be considered
|
|
as the same configuration.
|
|
"""
|
|
nfs_config = share_server.get('backend_details', {}).get('nfs_config')
|
|
share_server_nfs = jsonutils.loads(nfs_config) if nfs_config else None
|
|
|
|
if share_server_nfs == expected_nfs_config:
|
|
return True
|
|
elif (share_server_nfs is None and
|
|
expected_nfs_config == self._default_nfs_config):
|
|
return True
|
|
elif (expected_nfs_config is None and
|
|
share_server_nfs == self._default_nfs_config):
|
|
return True
|
|
|
|
return False
|
|
|
|
def choose_share_server_compatible_with_share(self, context, share_servers,
|
|
share, snapshot=None,
|
|
share_group=None):
|
|
"""Method that allows driver to choose share server for provided share.
|
|
|
|
If compatible share-server is not found, method should return None.
|
|
|
|
:param context: Current context
|
|
:param share_servers: list with share-server models
|
|
:param share: share model
|
|
:param snapshot: snapshot model
|
|
:param share_group: ShareGroup model with shares
|
|
:returns: share-server or None
|
|
"""
|
|
if not share_servers:
|
|
# No share server to reuse
|
|
return None
|
|
|
|
nfs_config = None
|
|
extra_specs = share_types.get_extra_specs_from_share(share)
|
|
if self.is_nfs_config_supported:
|
|
nfs_config = self._get_nfs_config_provisioning_options(extra_specs)
|
|
|
|
provisioning_options = self._get_provisioning_options(extra_specs)
|
|
# Get FPolicy extra specs to avoid incompatible share servers
|
|
fpolicy_ext_to_include = provisioning_options.get(
|
|
'fpolicy_extensions_to_include')
|
|
fpolicy_ext_to_exclude = provisioning_options.get(
|
|
'fpolicy_extensions_to_exclude')
|
|
fpolicy_file_operations = provisioning_options.get(
|
|
'fpolicy_file_operations')
|
|
|
|
# Avoid the reuse of 'dp_protection' vservers:
|
|
for share_server in share_servers:
|
|
if self._check_reuse_share_server(
|
|
share_server, nfs_config, share=share,
|
|
share_group=share_group,
|
|
fpolicy_ext_include=fpolicy_ext_to_include,
|
|
fpolicy_ext_exclude=fpolicy_ext_to_exclude,
|
|
fpolicy_file_operations=fpolicy_file_operations):
|
|
return share_server
|
|
|
|
# There is no compatible share server to be reused
|
|
return None
|
|
|
|
@na_utils.trace
|
|
def _check_reuse_share_server(self, share_server, nfs_config, share=None,
|
|
share_group=None, fpolicy_ext_include=None,
|
|
fpolicy_ext_exclude=None,
|
|
fpolicy_file_operations=None):
|
|
"""Check whether the share_server can be reused or not."""
|
|
if (share_group and share_group.get('share_server_id') !=
|
|
share_server['id']):
|
|
return False
|
|
|
|
backend_name = share_utils.extract_host(share_server['host'],
|
|
level='backend_name')
|
|
vserver_name, client = self._get_vserver(share_server,
|
|
backend_name=backend_name)
|
|
vserver_info = client.get_vserver_info(vserver_name)
|
|
if (vserver_info.get('operational_state') != 'running'
|
|
or vserver_info.get('state') != 'running'
|
|
or vserver_info.get('subtype') != 'default'):
|
|
return False
|
|
|
|
if self.is_nfs_config_supported:
|
|
# NOTE(felipe_rodrigues): Do not check that the share nfs_config
|
|
# matches with the group nfs_config, because the API guarantees
|
|
# that the share type is an element of the group types.
|
|
return self._is_share_server_compatible(share_server, nfs_config)
|
|
|
|
if fpolicy_ext_include or fpolicy_ext_exclude:
|
|
fpolicies = client.get_fpolicy_policies_status()
|
|
if len(fpolicies) >= self.FPOLICY_MAX_VSERVER_POLICIES:
|
|
# This share server already reached it maximum number of
|
|
# policies, we need to check if we can reuse one, otherwise,
|
|
# it is not suitable for this share.
|
|
reusable_scope = self._find_reusable_fpolicy_scope(
|
|
share, client,
|
|
fpolicy_extensions_to_include=fpolicy_ext_include,
|
|
fpolicy_extensions_to_exclude=fpolicy_ext_exclude,
|
|
fpolicy_file_operations=fpolicy_file_operations)
|
|
if not reusable_scope:
|
|
return False
|
|
|
|
return True
|
|
|
|
@na_utils.trace
|
|
def choose_share_server_compatible_with_share_group(
|
|
self, context, share_servers, share_group_ref,
|
|
share_group_snapshot=None):
|
|
"""Choose the server compatible with group.
|
|
|
|
If the NFS configuration is supported, it will check that the group
|
|
types agree for the NFS extra-specs values.
|
|
"""
|
|
if not share_servers:
|
|
# No share server to reuse
|
|
return None
|
|
|
|
nfs_config = None
|
|
if self.is_nfs_config_supported:
|
|
nfs_config = self._get_nfs_config_share_group(share_group_ref)
|
|
|
|
# NOTE(dviroel): FPolicy extra-specs won't be conflicting, since
|
|
# multiple policies can be created. The maximum number of policies or
|
|
# the reusability of existing ones, can only be analyzed at share
|
|
# instance creation.
|
|
for share_server in share_servers:
|
|
if self._check_reuse_share_server(share_server, nfs_config):
|
|
return share_server
|
|
|
|
return None
|
|
|
|
@na_utils.trace
|
|
def _get_nfs_config_share_group(self, share_group_ref):
|
|
"""Get the NFS config of the share group.
|
|
|
|
In case the group types do not agree for the NFS config, it throws an
|
|
exception.
|
|
"""
|
|
nfs_config = None
|
|
first = True
|
|
for st in share_group_ref.get('share_types', []):
|
|
extra_specs = share_types.get_share_type_extra_specs(
|
|
st['share_type_id'])
|
|
|
|
if first:
|
|
self._check_nfs_config_extra_specs_validity(extra_specs)
|
|
nfs_config = self._get_nfs_config_provisioning_options(
|
|
extra_specs)
|
|
first = False
|
|
continue
|
|
|
|
type_nfs_config = self._get_nfs_config_provisioning_options(
|
|
extra_specs)
|
|
if nfs_config != type_nfs_config:
|
|
msg = _("The specified share_types cannot have "
|
|
"conflicting values for the NFS configuration "
|
|
"extra-specs.")
|
|
raise exception.InvalidInput(reason=msg)
|
|
|
|
return nfs_config
|
|
|
|
@na_utils.trace
|
|
def manage_existing(self, share, driver_options, share_server=None):
|
|
|
|
# In case NFS config is supported, the share's nfs_config must be the
|
|
# same as the server
|
|
if share_server and self.is_nfs_config_supported:
|
|
extra_specs = share_types.get_extra_specs_from_share(share)
|
|
nfs_config = self._get_nfs_config_provisioning_options(extra_specs)
|
|
if not self._is_share_server_compatible(share_server, nfs_config):
|
|
args = {'server_id': share_server['id']}
|
|
msg = _('Invalid NFS configuration for the server '
|
|
'%(server_id)s . The extra-specs must match the '
|
|
'values of NFS of the server.')
|
|
raise exception.NetAppException(msg % args)
|
|
|
|
return (super(NetAppCmodeMultiSVMFileStorageLibrary, self).
|
|
manage_existing(share, driver_options,
|
|
share_server=share_server))
|
|
|
|
@na_utils.trace
|
|
def share_server_migration_check_compatibility(
|
|
self, context, source_share_server, dest_host, old_share_network,
|
|
new_share_network, shares_request_spec):
|
|
|
|
not_compatible = {
|
|
'compatible': False,
|
|
'writable': None,
|
|
'nondisruptive': None,
|
|
'preserve_snapshots': None,
|
|
'migration_cancel': None,
|
|
'migration_get_progress': None,
|
|
'share_network_id': None
|
|
}
|
|
|
|
# We need cluster creds, of course
|
|
if not self._have_cluster_creds:
|
|
msg = _("Cluster credentials have not been configured with this "
|
|
"share driver. Cannot perform server migration operation.")
|
|
LOG.error(msg)
|
|
return not_compatible
|
|
|
|
# Vserver will spread across aggregates in this implementation
|
|
if share_utils.extract_host(dest_host, level='pool') is not None:
|
|
msg = _("Cannot perform server migration to a specific pool. "
|
|
"Please choose a destination host 'host@backend' as "
|
|
"destination.")
|
|
LOG.error(msg)
|
|
return not_compatible
|
|
|
|
src_backend_name = share_utils.extract_host(
|
|
source_share_server['host'], level='backend_name')
|
|
src_vserver, src_client = self._get_vserver(
|
|
source_share_server, backend_name=src_backend_name)
|
|
dest_backend_name = share_utils.extract_host(dest_host,
|
|
level='backend_name')
|
|
# Block migration within the same backend.
|
|
if src_backend_name == dest_backend_name:
|
|
msg = _("Cannot perform server migration within the same backend. "
|
|
"Please choose a destination host different from the "
|
|
"source.")
|
|
LOG.error(msg)
|
|
return not_compatible
|
|
|
|
src_cluster_name = src_client.get_cluster_name()
|
|
# NOTE(dviroel): This call is supposed to made in the destination host
|
|
dest_cluster_name = self._client.get_cluster_name()
|
|
# Must be in different clusters too, SVM-DR restriction
|
|
if src_cluster_name == dest_cluster_name:
|
|
msg = _("Cannot perform server migration within the same cluster. "
|
|
"Please choose a destination host that's in a different "
|
|
"cluster.")
|
|
LOG.error(msg)
|
|
return not_compatible
|
|
|
|
# Check for SVM DR support
|
|
# NOTE(dviroel): These clients can only be used for non-tunneling
|
|
# requests.
|
|
dst_client = data_motion.get_client_for_backend(dest_backend_name,
|
|
vserver_name=None)
|
|
if (not src_client.is_svm_dr_supported()
|
|
or not dst_client.is_svm_dr_supported()):
|
|
msg = _("Cannot perform server migration because at leat one of "
|
|
"the backends doesn't support SVM DR.")
|
|
LOG.error(msg)
|
|
return not_compatible
|
|
|
|
# Blocking different security services for now
|
|
if old_share_network['id'] != new_share_network['id']:
|
|
new_sec_services = new_share_network.get('security_services', [])
|
|
old_sec_services = old_share_network.get('security_services', [])
|
|
if new_sec_services or old_sec_services:
|
|
new_sec_serv_ids = [ss['id'] for ss in new_sec_services]
|
|
old_sec_serv_ids = [ss['id'] for ss in old_sec_services]
|
|
if not set(new_sec_serv_ids) == set(old_sec_serv_ids):
|
|
msg = _("Cannot perform server migration for different "
|
|
"security services. Please choose a suitable "
|
|
"share network that matches the source security "
|
|
"service.")
|
|
LOG.error(msg)
|
|
return not_compatible
|
|
|
|
pools = self._get_pools()
|
|
# Check 'netapp_flexvol_encryption' and 'revert_to_snapshot_support'
|
|
specs_to_validate = ('netapp_flexvol_encryption',
|
|
'revert_to_snapshot_support')
|
|
for req_spec in shares_request_spec.get('shares_req_spec', []):
|
|
extra_specs = req_spec.get('share_type', {}).get('extra_specs', {})
|
|
for spec in specs_to_validate:
|
|
if extra_specs.get(spec) and not pools[0][spec]:
|
|
msg = _("Cannot perform server migration since the "
|
|
"destination host doesn't support the required "
|
|
"extra-spec %s.") % spec
|
|
LOG.error(msg)
|
|
return not_compatible
|
|
# TODO(dviroel): disk_type extra-spec
|
|
|
|
# Check capacity
|
|
server_total_size = (shares_request_spec.get('shares_size', 0) +
|
|
shares_request_spec.get('snapshots_size', 0))
|
|
# NOTE(dviroel): If the backend has a 'max_over_subscription_ratio'
|
|
# configured and greater than 1, we'll consider thin provisioning
|
|
# enable for all shares.
|
|
thin_provisioning = self.configuration.max_over_subscription_ratio > 1
|
|
if self.configuration.netapp_server_migration_check_capacity is True:
|
|
if not self._check_capacity_compatibility(pools, thin_provisioning,
|
|
server_total_size):
|
|
msg = _("Cannot perform server migration because destination "
|
|
"host doesn't have enough free space.")
|
|
LOG.error(msg)
|
|
return not_compatible
|
|
|
|
compatibility = {
|
|
'compatible': True,
|
|
'writable': True,
|
|
'nondisruptive': False,
|
|
'preserve_snapshots': True,
|
|
'share_network_id': new_share_network['id'],
|
|
'migration_cancel': True,
|
|
'migration_get_progress': False,
|
|
}
|
|
|
|
return compatibility
|
|
|
|
def share_server_migration_start(self, context, source_share_server,
|
|
dest_share_server, share_intances,
|
|
snapshot_instances):
|
|
"""Start share server migration using SVM DR.
|
|
|
|
1. Create vserver peering between source and destination
|
|
2. Create SnapMirror
|
|
"""
|
|
src_backend_name = share_utils.extract_host(
|
|
source_share_server['host'], level='backend_name')
|
|
src_vserver, src_client = self._get_vserver(
|
|
share_server=source_share_server, backend_name=src_backend_name)
|
|
src_cluster = src_client.get_cluster_name()
|
|
|
|
dest_backend_name = share_utils.extract_host(
|
|
dest_share_server['host'], level='backend_name')
|
|
dest_vserver, dest_client = self._get_vserver(
|
|
share_server=dest_share_server, backend_name=dest_backend_name)
|
|
dest_cluster = dest_client.get_cluster_name()
|
|
|
|
# 1. Check and create vserver peer if needed
|
|
if not self._get_vserver_peers(dest_vserver, src_vserver):
|
|
# Request vserver peer creation from destination to source
|
|
# NOTE(dviroel): vserver peering rollback is handled by
|
|
# '_delete_vserver' function.
|
|
dest_client.create_vserver_peer(
|
|
dest_vserver, src_vserver,
|
|
peer_cluster_name=src_cluster)
|
|
|
|
# Accepts the vserver peering using active replica host's
|
|
# client (inter-cluster only)
|
|
if dest_cluster != src_cluster:
|
|
src_client.accept_vserver_peer(src_vserver, dest_vserver)
|
|
|
|
# 2. Create SnapMirror
|
|
dm_session = data_motion.DataMotionSession()
|
|
try:
|
|
dm_session.create_snapmirror_svm(source_share_server,
|
|
dest_share_server)
|
|
except Exception:
|
|
# NOTE(dviroel): vserver peer delete will be handled on vserver
|
|
# teardown
|
|
dm_session.cancel_snapmirror_svm(source_share_server,
|
|
dest_share_server)
|
|
msg_args = {
|
|
'src': source_share_server['id'],
|
|
'dest': dest_share_server['id'],
|
|
}
|
|
msg = _('Could not initialize SnapMirror between %(src)s and '
|
|
'%(dest)s vservers.') % msg_args
|
|
raise exception.NetAppException(message=msg)
|
|
|
|
msg_args = {
|
|
'src': source_share_server['id'],
|
|
'dest': dest_share_server['id'],
|
|
}
|
|
msg = _('Starting share server migration from %(src)s to %(dest)s.')
|
|
LOG.info(msg, msg_args)
|
|
|
|
def _get_snapmirror_svm(self, source_share_server, dest_share_server):
|
|
dm_session = data_motion.DataMotionSession()
|
|
try:
|
|
snapmirrors = dm_session.get_snapmirrors_svm(
|
|
source_share_server, dest_share_server)
|
|
except netapp_api.NaApiError:
|
|
msg_args = {
|
|
'src': source_share_server['id'],
|
|
'dest': dest_share_server['id']
|
|
}
|
|
msg = _("Could not retrieve snapmirrors between source "
|
|
"%(src)s and destination %(dest)s vServers.") % msg_args
|
|
LOG.exception(msg)
|
|
raise exception.NetAppException(message=msg)
|
|
|
|
return snapmirrors
|
|
|
|
@na_utils.trace
|
|
def share_server_migration_continue(self, context, source_share_server,
|
|
dest_share_server, share_instances,
|
|
snapshot_instances):
|
|
"""Continues a share server migration using SVM DR."""
|
|
snapmirrors = self._get_snapmirror_svm(source_share_server,
|
|
dest_share_server)
|
|
if not snapmirrors:
|
|
msg_args = {
|
|
'src': source_share_server['id'],
|
|
'dest': dest_share_server['id']
|
|
}
|
|
msg = _("No snapmirror relationship was found between source "
|
|
"%(src)s and destination %(dest)s vServers.") % msg_args
|
|
LOG.exception(msg)
|
|
raise exception.NetAppException(message=msg)
|
|
|
|
snapmirror = snapmirrors[0]
|
|
in_progress_status = ['preparing', 'transferring', 'finalizing']
|
|
mirror_state = snapmirror.get('mirror-state')
|
|
status = snapmirror.get('relationship-status')
|
|
if mirror_state != 'snapmirrored' and status in in_progress_status:
|
|
LOG.debug("Data transfer still in progress.")
|
|
return False
|
|
elif mirror_state == 'snapmirrored' and status == 'idle':
|
|
LOG.info("Source and destination vServers are now snapmirrored.")
|
|
return True
|
|
|
|
msg = _("Snapmirror is not ready yet. The current mirror state is "
|
|
"'%(mirror_state)s' and relationship status is '%(status)s'.")
|
|
msg_args = {
|
|
'mirror_state': mirror_state,
|
|
'status': status,
|
|
}
|
|
LOG.debug(msg, msg_args)
|
|
return False
|
|
|
|
@na_utils.trace
|
|
def share_server_migration_complete(self, context, source_share_server,
|
|
dest_share_server, share_instances,
|
|
snapshot_instances, new_network_alloc):
|
|
"""Completes share server migration using SVM DR.
|
|
|
|
1. Do a last SnapMirror update.
|
|
2. Quiesce, abort and then break the relationship.
|
|
3. Stop the source vserver
|
|
4. Configure network interfaces in the destination vserver
|
|
5. Start the destinarion vserver
|
|
6. Delete and release the snapmirror
|
|
7. Build the list of export_locations for each share
|
|
8. Release all resources from the source share server
|
|
"""
|
|
dm_session = data_motion.DataMotionSession()
|
|
try:
|
|
# 1. Start an update to try to get a last minute transfer before we
|
|
# quiesce and break
|
|
dm_session.update_snapmirror_svm(source_share_server,
|
|
dest_share_server)
|
|
except exception.StorageCommunicationException:
|
|
# Ignore any errors since the current source may be unreachable
|
|
pass
|
|
|
|
src_backend_name = share_utils.extract_host(
|
|
source_share_server['host'], level='backend_name')
|
|
src_vserver, src_client = self._get_vserver(
|
|
share_server=source_share_server, backend_name=src_backend_name)
|
|
|
|
dest_backend_name = share_utils.extract_host(
|
|
dest_share_server['host'], level='backend_name')
|
|
dest_vserver, dest_client = self._get_vserver(
|
|
share_server=dest_share_server, backend_name=dest_backend_name)
|
|
try:
|
|
# 2. Attempt to quiesce, abort and then break SnapMirror
|
|
dm_session.quiesce_and_break_snapmirror_svm(source_share_server,
|
|
dest_share_server)
|
|
# NOTE(dviroel): Lets wait until the destination vserver be
|
|
# promoted to 'default' and state 'running', before starting
|
|
# shutting down the source
|
|
dm_session.wait_for_vserver_state(
|
|
dest_vserver, dest_client, subtype='default',
|
|
state='running', operational_state='stopped',
|
|
timeout=(self.configuration.
|
|
netapp_server_migration_state_change_timeout))
|
|
|
|
# 3. Stop source vserver
|
|
src_client.stop_vserver(src_vserver)
|
|
|
|
# 4. Setup network configuration
|
|
ipspace_name = dest_client.get_vserver_ipspace(dest_vserver)
|
|
|
|
# NOTE(dviroel): Security service and NFS configuration should be
|
|
# handled by SVM DR, so no changes will be made here.
|
|
vlan = new_network_alloc['segmentation_id']
|
|
|
|
@utils.synchronized('netapp-VLAN-%s' % vlan, external=True)
|
|
def setup_network_for_destination_vserver():
|
|
self._setup_network_for_vserver(
|
|
dest_vserver, dest_client, new_network_alloc, ipspace_name,
|
|
enable_nfs=False,
|
|
security_services=None)
|
|
|
|
setup_network_for_destination_vserver()
|
|
|
|
# 5. Start the destination.
|
|
dest_client.start_vserver(dest_vserver)
|
|
|
|
except Exception:
|
|
# Try to recover source vserver
|
|
try:
|
|
src_client.start_vserver(src_vserver)
|
|
except Exception:
|
|
LOG.warning("Unable to recover source share server after a "
|
|
"migration failure.")
|
|
# Destroy any snapmirror and make destination vserver to have its
|
|
# subtype set to 'default'
|
|
dm_session.cancel_snapmirror_svm(source_share_server,
|
|
dest_share_server)
|
|
# Rollback resources transferred to the destination
|
|
for instance in share_instances:
|
|
self._delete_share(instance, dest_vserver, dest_client,
|
|
remove_export=False)
|
|
|
|
msg_args = {
|
|
'src': source_share_server['id'],
|
|
'dest': dest_share_server['id'],
|
|
}
|
|
msg = _('Could not complete the migration between %(src)s and '
|
|
'%(dest)s vservers.') % msg_args
|
|
raise exception.NetAppException(message=msg)
|
|
|
|
# 6. Delete/release snapmirror
|
|
dm_session.delete_snapmirror_svm(source_share_server,
|
|
dest_share_server)
|
|
|
|
# 7. Build a dict with shares/snapshot location updates
|
|
# NOTE(dviroel): For SVM DR, the share names aren't modified, only the
|
|
# export_locations are updated due to network changes.
|
|
share_updates = {}
|
|
for instance in share_instances:
|
|
# Get the volume to find out the associated aggregate
|
|
# Update post-migration info that can't be replicated
|
|
try:
|
|
share_name = self._get_backend_share_name(instance['id'])
|
|
volume = dest_client.get_volume(share_name)
|
|
dest_aggregate = volume.get('aggregate')
|
|
# Update share attributes according with share extra specs
|
|
self._update_share_attributes_after_server_migration(
|
|
instance, src_client, dest_aggregate, dest_client)
|
|
|
|
except Exception:
|
|
msg_args = {
|
|
'src': source_share_server['id'],
|
|
'dest': dest_share_server['id'],
|
|
}
|
|
msg = _('Could not complete the migration between %(src)s and '
|
|
'%(dest)s vservers. One of the shares was not found '
|
|
'in the destination vserver.') % msg_args
|
|
raise exception.NetAppException(message=msg)
|
|
|
|
export_locations = self._create_export(
|
|
instance, dest_share_server, dest_vserver, dest_client,
|
|
clear_current_export_policy=False,
|
|
ensure_share_already_exists=True)
|
|
|
|
share_updates.update({
|
|
instance['id']: {
|
|
'export_locations': export_locations,
|
|
'pool_name': volume.get('aggregate')
|
|
}})
|
|
|
|
# NOTE(dviroel): Nothing to update in snapshot instances since the
|
|
# provider location didn't change.
|
|
|
|
# 8. Release source share resources
|
|
for instance in share_instances:
|
|
self._delete_share(instance, src_vserver, src_client,
|
|
remove_export=True)
|
|
|
|
# NOTE(dviroel): source share server deletion must be triggered by
|
|
# the manager after finishing the migration
|
|
LOG.info('Share server migration completed.')
|
|
return {
|
|
'share_updates': share_updates,
|
|
}
|
|
|
|
def share_server_migration_cancel(self, context, source_share_server,
|
|
dest_share_server, shares, snapshots):
|
|
"""Cancel a share server migration that is using SVM DR."""
|
|
|
|
dm_session = data_motion.DataMotionSession()
|
|
dest_backend_name = share_utils.extract_host(dest_share_server['host'],
|
|
level='backend_name')
|
|
dest_vserver, dest_client = self._get_vserver(
|
|
share_server=dest_share_server, backend_name=dest_backend_name)
|
|
|
|
try:
|
|
snapmirrors = self._get_snapmirror_svm(source_share_server,
|
|
dest_share_server)
|
|
if snapmirrors:
|
|
dm_session.cancel_snapmirror_svm(source_share_server,
|
|
dest_share_server)
|
|
# Do a simple volume cleanup in the destination vserver
|
|
for instance in shares:
|
|
self._delete_share(instance, dest_vserver, dest_client,
|
|
remove_export=False)
|
|
|
|
except Exception:
|
|
msg_args = {
|
|
'src': source_share_server['id'],
|
|
'dest': dest_share_server['id'],
|
|
}
|
|
msg = _('Unable to cancel SnapMirror relationship between %(src)s '
|
|
'and %(dest)s vservers.') % msg_args
|
|
raise exception.NetAppException(message=msg)
|
|
|
|
LOG.info('Share server migration was cancelled.')
|
|
|
|
def share_server_migration_get_progress(self, context, src_share_server,
|
|
dest_share_server, shares,
|
|
snapshots):
|
|
# TODO(dviroel): get snapmirror info to infer the progress
|
|
return {'total_progress': 0}
|
|
|
|
def _update_share_attributes_after_server_migration(
|
|
self, src_share_instance, src_client, dest_aggregate, dest_client):
|
|
"""Updates destination share instance with share type extra specs."""
|
|
extra_specs = share_types.get_extra_specs_from_share(
|
|
src_share_instance)
|
|
provisioning_options = self._get_provisioning_options(extra_specs)
|
|
volume_name = self._get_backend_share_name(src_share_instance['id'])
|
|
# NOTE(dviroel): Need to retrieve current autosize attributes since
|
|
# they aren't being updated by SVM DR.
|
|
autosize_attrs = src_client.get_volume_autosize_attributes(volume_name)
|
|
# NOTE(dviroel): In order to modify maximum and minimum size, we must
|
|
# convert from Kbytes to bytes.
|
|
for key in ('minimum-size', 'maximum-size'):
|
|
autosize_attrs[key] = int(autosize_attrs[key]) * units.Ki
|
|
provisioning_options['autosize_attributes'] = autosize_attrs
|
|
# NOTE(dviroel): SVM DR already creates a copy of the snapshot policies
|
|
# at the destination, using a different name. If we update the snapshot
|
|
# policy in these volumes, might end up with an error if the policy
|
|
# still does not exist in the destination cluster. Administrators will
|
|
# have the opportunity to add the snapshot policy after a successful
|
|
# migration.
|
|
provisioning_options.pop('snapshot_policy', None)
|
|
|
|
# Modify volume to match extra specs
|
|
dest_client.modify_volume(dest_aggregate, volume_name,
|
|
**provisioning_options)
|
|
|
|
def validate_provisioning_options_for_share(self, provisioning_options,
|
|
extra_specs=None,
|
|
qos_specs=None):
|
|
if provisioning_options.get('adaptive_qos_policy_group') is not None:
|
|
msg = _("The extra spec 'adaptive_qos_policy_group' is not "
|
|
"supported by backends configured with "
|
|
"'driver_handles_share_server' == True mode.")
|
|
raise exception.NetAppException(msg)
|
|
|
|
(super(NetAppCmodeMultiSVMFileStorageLibrary, self)
|
|
.validate_provisioning_options_for_share(provisioning_options,
|
|
extra_specs=extra_specs,
|
|
qos_specs=qos_specs))
|