diff --git a/contrib/ci/post_test_hook.sh b/contrib/ci/post_test_hook.sh index f2b7c0f868..0d5c248d49 100755 --- a/contrib/ci/post_test_hook.sh +++ b/contrib/ci/post_test_hook.sh @@ -161,7 +161,6 @@ if [[ "$DRIVER" == "lvm" ]]; then RUN_MANILA_CG_TESTS=False RUN_MANILA_MANAGE_TESTS=False iniset $TEMPEST_CONFIG share run_shrink_tests False - iniset $TEMPEST_CONFIG share run_migration_tests False iniset $TEMPEST_CONFIG share enable_ip_rules_for_protocols 'nfs' iniset $TEMPEST_CONFIG share enable_user_rules_for_protocols 'cifs' provide_user_rules diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 7d591a5204..aec050b17a 100755 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -306,7 +306,6 @@ function create_service_share_servers { iniset $MANILA_CONF $BE service_instance_name_or_id $vm_id iniset $MANILA_CONF $BE service_net_name_or_ip private iniset $MANILA_CONF $BE tenant_net_name_or_ip private - iniset $MANILA_CONF $BE migration_data_copy_node_ip $PUBLIC_NETWORK_GATEWAY else if is_service_enabled neutron; then if [ $created_admin_network == false ]; then @@ -316,13 +315,26 @@ function create_service_share_servers { fi iniset $MANILA_CONF $BE admin_network_id $admin_net_id iniset $MANILA_CONF $BE admin_subnet_id $admin_subnet_id - iniset $MANILA_CONF $BE migration_data_copy_node_ip $FIXED_RANGE fi fi fi done + configure_data_service_generic_driver } +function configure_data_service_generic_driver { + enabled_backends=(${MANILA_ENABLED_BACKENDS//,/ }) + share_driver=$(iniget $MANILA_CONF ${enabled_backends[0]} share_driver) + generic_driver='manila.share.drivers.generic.GenericShareDriver' + if [[ $share_driver == $generic_driver ]]; then + driver_handles_share_servers=$(iniget $MANILA_CONF ${enabled_backends[0]} driver_handles_share_servers) + if [[ $(trueorfalse False driver_handles_share_servers) == False ]]; then + iniset $MANILA_CONF DEFAULT data_node_access_ip $PUBLIC_NETWORK_GATEWAY + else + iniset $MANILA_CONF DEFAULT data_node_access_ip $FIXED_RANGE + fi + fi +} # create_manila_service_flavor - creates flavor, that will be used by backends # with configured generic driver to boot Nova VMs with. function create_manila_service_flavor { @@ -614,8 +626,9 @@ function configure_samba { for backend_name in ${MANILA_ENABLED_BACKENDS//,/ }; do iniset $MANILA_CONF $backend_name driver_handles_share_servers False - iniset $MANILA_CONF $backend_name lvm_share_export_ip $MANILA_SERVICE_HOST + iniset $MANILA_CONF $backend_name lvm_share_export_ip $HOST_IP done + iniset $MANILA_CONF DEFAULT data_node_access_ip $HOST_IP fi } diff --git a/etc/manila/policy.json b/etc/manila/policy.json index 9549ef8424..db28b5dbc2 100644 --- a/etc/manila/policy.json +++ b/etc/manila/policy.json @@ -32,7 +32,11 @@ "share:get_share_metadata": "rule:default", "share:delete_share_metadata": "rule:default", "share:update_share_metadata": "rule:default", - "share:migrate": "rule:admin_api", + "share:migration_start": "rule:admin_api", + "share:migration_complete": "rule:admin_api", + "share:migration_cancel": "rule:admin_api", + "share:migration_get_progress": "rule:admin_api", + "share:reset_task_state": "rule:admin_api", "share:manage": "rule:admin_api", "share:unmanage": "rule:admin_api", "share:force_delete": "rule:admin_api", diff --git a/etc/manila/rootwrap.d/share.filters b/etc/manila/rootwrap.d/share.filters index 912534b604..3b08331750 100644 --- a/etc/manila/rootwrap.d/share.filters +++ b/etc/manila/rootwrap.d/share.filters @@ -154,3 +154,9 @@ nsenter: CommandFilter, /usr/local/bin/nsenter, root lxc: CommandFilter, lxc, root # manila/share/drivers/lxd.py brctl: CommandFilter, brctl, root + +# manila/data/utils.py: 'ls', '-pA1', '--group-directories-first', '%s' +ls: CommandFilter, ls, root + +# manila/data/utils.py: 'touch', '--reference=%s', '%s' +touch: CommandFilter, touch, root diff --git a/manila/api/openstack/api_version_request.py b/manila/api/openstack/api_version_request.py index a3881fd67e..fef444350f 100644 --- a/manila/api/openstack/api_version_request.py +++ b/manila/api/openstack/api_version_request.py @@ -61,14 +61,17 @@ REST_API_VERSION_HISTORY = """ * 2.12 - Manage/unmanage snapshot API. * 2.13 - Add "cephx" auth type to allow_access * 2.14 - 'Preferred' attribute in export location metadata - + * 2.15 - Added Share migration 'migration_cancel', + 'migration_get_progress', 'migration_complete' APIs, renamed + 'migrate_share' to 'migration_start' and added notify parameter + to 'migration_start'. """ # The minimum and maximum versions of the API supported # The default api version request is defined to be the # the minimum version of the API supported. _MIN_API_VERSION = "2.0" -_MAX_API_VERSION = "2.14" +_MAX_API_VERSION = "2.15" DEFAULT_API_VERSION = _MIN_API_VERSION diff --git a/manila/api/openstack/rest_api_version_history.rst b/manila/api/openstack/rest_api_version_history.rst index 7d37399622..59d2d419d0 100644 --- a/manila/api/openstack/rest_api_version_history.rst +++ b/manila/api/openstack/rest_api_version_history.rst @@ -100,3 +100,9 @@ ____ field to identify which export locations are most efficient and should be used preferentially by clients. Also, change 'uuid' field to 'id', move timestamps to detail view, and return all non-admin fields to users. + +2.15 +---- + Added Share migration 'migration_cancel', 'migration_get_progress', + 'migration_complete' APIs, renamed 'migrate_share' to 'migration_start' and + added notify parameter to 'migration_start'. diff --git a/manila/api/openstack/wsgi.py b/manila/api/openstack/wsgi.py index baf0447f8c..3ac8314b11 100644 --- a/manila/api/openstack/wsgi.py +++ b/manila/api/openstack/wsgi.py @@ -1177,6 +1177,7 @@ class AdminActionsMixin(object): body_attributes = { 'status': 'reset_status', 'replica_state': 'reset_replica_state', + 'task_state': 'reset_task_state', } valid_statuses = { @@ -1193,6 +1194,7 @@ class AdminActionsMixin(object): constants.REPLICA_STATE_OUT_OF_SYNC, constants.STATUS_ERROR, ]), + 'task_state': set(constants.TASK_STATE_STATUSES), } def _update(self, *args, **kwargs): diff --git a/manila/api/v1/shares.py b/manila/api/v1/shares.py index 8c7abdd22c..5b609d4f7b 100644 --- a/manila/api/v1/shares.py +++ b/manila/api/v1/shares.py @@ -99,7 +99,7 @@ class ShareMixin(object): return webob.Response(status_int=202) - def _migrate_share(self, req, id, body): + def _migration_start(self, req, id, body, check_notify=False): """Migrate a share to the specified host.""" context = req.environ['manila.context'] try: @@ -107,26 +107,74 @@ class ShareMixin(object): except exception.NotFound: msg = _("Share %s not found.") % id raise exc.HTTPNotFound(explanation=msg) - params = body.get('migrate_share', body.get('os-migrate_share')) + params = body.get('migration_start', + body.get('migrate_share', + body.get('os-migrate_share'))) try: host = params['host'] except KeyError: - raise exc.HTTPBadRequest(explanation=_("Must specify 'host'")) + raise exc.HTTPBadRequest(explanation=_("Must specify 'host'.")) force_host_copy = params.get('force_host_copy', False) try: force_host_copy = strutils.bool_from_string(force_host_copy, strict=True) except ValueError: - raise exc.HTTPBadRequest( - explanation=_("Bad value for 'force_host_copy'")) + msg = _("Invalid value %s for 'force_host_copy'. " + "Expecting a boolean.") % force_host_copy + raise exc.HTTPBadRequest(explanation=msg) + if check_notify: + notify = params.get('notify', True) + try: + notify = strutils.bool_from_string(notify, strict=True) + except ValueError: + msg = _("Invalid value %s for 'notify'. " + "Expecting a boolean.") % notify + raise exc.HTTPBadRequest(explanation=msg) + else: + # NOTE(ganso): default notify value is True + notify = True try: - self.share_api.migrate_share(context, share, host, force_host_copy) + self.share_api.migration_start(context, share, host, + force_host_copy, notify) except exception.Conflict as e: raise exc.HTTPConflict(explanation=six.text_type(e)) return webob.Response(status_int=202) + def _migration_complete(self, req, id, body): + """Invokes 2nd phase of share migration.""" + context = req.environ['manila.context'] + try: + share = self.share_api.get(context, id) + except exception.NotFound: + msg = _("Share %s not found.") % id + raise exc.HTTPNotFound(explanation=msg) + self.share_api.migration_complete(context, share) + return webob.Response(status_int=202) + + def _migration_cancel(self, req, id, body): + """Attempts to cancel share migration.""" + context = req.environ['manila.context'] + try: + share = self.share_api.get(context, id) + except exception.NotFound: + msg = _("Share %s not found.") % id + raise exc.HTTPNotFound(explanation=msg) + self.share_api.migration_cancel(context, share) + return webob.Response(status_int=202) + + def _migration_get_progress(self, req, id, body): + """Retrieve share migration progress for a given share.""" + context = req.environ['manila.context'] + try: + share = self.share_api.get(context, id) + except exception.NotFound: + msg = _("Share %s not found.") % id + raise exc.HTTPNotFound(explanation=msg) + result = self.share_api.migration_get_progress(context, share) + return self._view_builder.migration_get_progress(result) + def index(self, req): """Returns a summary list of shares.""" return self._get_shares(req, is_detail=False) diff --git a/manila/api/v2/shares.py b/manila/api/v2/shares.py index f97731c35d..12cfd88b26 100644 --- a/manila/api/v2/shares.py +++ b/manila/api/v2/shares.py @@ -68,13 +68,45 @@ class ShareController(shares.ShareMixin, @wsgi.Controller.api_version('2.5', '2.6', experimental=True) @wsgi.action("os-migrate_share") + @wsgi.Controller.authorize("migration_start") def migrate_share_legacy(self, req, id, body): - return self._migrate_share(req, id, body) + return self._migration_start(req, id, body) - @wsgi.Controller.api_version('2.7', experimental=True) + @wsgi.Controller.api_version('2.7', '2.14', experimental=True) @wsgi.action("migrate_share") + @wsgi.Controller.authorize("migration_start") def migrate_share(self, req, id, body): - return self._migrate_share(req, id, body) + return self._migration_start(req, id, body) + + @wsgi.Controller.api_version('2.15', experimental=True) + @wsgi.action("migration_start") + @wsgi.Controller.authorize + def migration_start(self, req, id, body): + return self._migration_start(req, id, body, check_notify=True) + + @wsgi.Controller.api_version('2.15', experimental=True) + @wsgi.action("migration_complete") + @wsgi.Controller.authorize + def migration_complete(self, req, id, body): + return self._migration_complete(req, id, body) + + @wsgi.Controller.api_version('2.15', experimental=True) + @wsgi.action("migration_cancel") + @wsgi.Controller.authorize + def migration_cancel(self, req, id, body): + return self._migration_cancel(req, id, body) + + @wsgi.Controller.api_version('2.15', experimental=True) + @wsgi.action("migration_get_progress") + @wsgi.Controller.authorize + def migration_get_progress(self, req, id, body): + return self._migration_get_progress(req, id, body) + + @wsgi.Controller.api_version('2.15', experimental=True) + @wsgi.action("reset_task_state") + @wsgi.Controller.authorize + def reset_task_state(self, req, id, body): + return self._reset_status(req, id, body, status_attr='task_state') @wsgi.Controller.api_version('2.0', '2.6') @wsgi.action('os-allow_access') diff --git a/manila/api/views/shares.py b/manila/api/views/shares.py index f7a57dce14..f4606a9d10 100644 --- a/manila/api/views/shares.py +++ b/manila/api/views/shares.py @@ -94,6 +94,14 @@ class ViewBuilder(common.ViewBuilder): 'share_server_id') return {'share': share_dict} + def migration_get_progress(self, progress): + result = { + 'total_progress': progress['total_progress'], + 'current_file_path': progress['current_file_path'], + 'current_file_progress': progress['current_file_progress'] + } + return result + @common.ViewBuilder.versioned_method("2.2") def add_snapshot_support_field(self, share_dict, share): share_dict['snapshot_support'] = share.get('snapshot_support') diff --git a/manila/common/constants.py b/manila/common/constants.py index acd66403ab..836c1fc72c 100644 --- a/manila/common/constants.py +++ b/manila/common/constants.py @@ -32,26 +32,51 @@ STATUS_EXTENDING = 'extending' STATUS_EXTENDING_ERROR = 'extending_error' STATUS_SHRINKING = 'shrinking' STATUS_SHRINKING_ERROR = 'shrinking_error' +STATUS_MIGRATING = 'migrating' +STATUS_MIGRATING_TO = 'migrating_to' STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR = ( 'shrinking_possible_data_loss_error' ) STATUS_REPLICATION_CHANGE = 'replication_change' -STATUS_TASK_STATE_MIGRATION_STARTING = 'migration_starting' -STATUS_TASK_STATE_MIGRATION_IN_PROGRESS = 'migration_in_progress' -STATUS_TASK_STATE_MIGRATION_ERROR = 'migration_error' -STATUS_TASK_STATE_MIGRATION_SUCCESS = 'migration_success' -STATUS_TASK_STATE_MIGRATION_COMPLETING = 'migration_completing' + +TASK_STATE_MIGRATION_STARTING = 'migration_starting' +TASK_STATE_MIGRATION_IN_PROGRESS = 'migration_in_progress' +TASK_STATE_MIGRATION_COMPLETING = 'migration_completing' +TASK_STATE_MIGRATION_SUCCESS = 'migration_success' +TASK_STATE_MIGRATION_ERROR = 'migration_error' +TASK_STATE_MIGRATION_CANCELLED = 'migration_cancelled' +TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS = 'migration_driver_in_progress' +TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE = 'migration_driver_phase1_done' +TASK_STATE_DATA_COPYING_STARTING = 'data_copying_starting' +TASK_STATE_DATA_COPYING_IN_PROGRESS = 'data_copying_in_progress' +TASK_STATE_DATA_COPYING_COMPLETING = 'data_copying_completing' +TASK_STATE_DATA_COPYING_COMPLETED = 'data_copying_completed' +TASK_STATE_DATA_COPYING_CANCELLED = 'data_copying_cancelled' +TASK_STATE_DATA_COPYING_ERROR = 'data_copying_error' BUSY_TASK_STATES = ( - STATUS_TASK_STATE_MIGRATION_COMPLETING, - STATUS_TASK_STATE_MIGRATION_STARTING, - STATUS_TASK_STATE_MIGRATION_IN_PROGRESS, + TASK_STATE_MIGRATION_STARTING, + TASK_STATE_MIGRATION_IN_PROGRESS, + TASK_STATE_MIGRATION_COMPLETING, + TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + TASK_STATE_DATA_COPYING_STARTING, + TASK_STATE_DATA_COPYING_IN_PROGRESS, + TASK_STATE_DATA_COPYING_COMPLETING, + TASK_STATE_DATA_COPYING_COMPLETED, +) + +BUSY_COPYING_STATES = ( + TASK_STATE_DATA_COPYING_STARTING, + TASK_STATE_DATA_COPYING_IN_PROGRESS, + TASK_STATE_DATA_COPYING_COMPLETING, ) TRANSITIONAL_STATUSES = ( STATUS_CREATING, STATUS_DELETING, STATUS_MANAGING, STATUS_UNMANAGING, STATUS_EXTENDING, STATUS_SHRINKING, + STATUS_MIGRATING, STATUS_MIGRATING_TO, ) SUPPORTED_SHARE_PROTOCOLS = ( @@ -98,11 +123,20 @@ ACCESS_LEVELS = ( ) TASK_STATE_STATUSES = ( - STATUS_TASK_STATE_MIGRATION_STARTING, - STATUS_TASK_STATE_MIGRATION_ERROR, - STATUS_TASK_STATE_MIGRATION_SUCCESS, - STATUS_TASK_STATE_MIGRATION_COMPLETING, - STATUS_TASK_STATE_MIGRATION_IN_PROGRESS, + TASK_STATE_MIGRATION_STARTING, + TASK_STATE_MIGRATION_IN_PROGRESS, + TASK_STATE_MIGRATION_COMPLETING, + TASK_STATE_MIGRATION_SUCCESS, + TASK_STATE_MIGRATION_ERROR, + TASK_STATE_MIGRATION_CANCELLED, + TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE, + TASK_STATE_DATA_COPYING_STARTING, + TASK_STATE_DATA_COPYING_IN_PROGRESS, + TASK_STATE_DATA_COPYING_COMPLETING, + TASK_STATE_DATA_COPYING_COMPLETED, + TASK_STATE_DATA_COPYING_CANCELLED, + TASK_STATE_DATA_COPYING_ERROR ) REPLICA_STATE_ACTIVE = 'active' diff --git a/manila/data/helper.py b/manila/data/helper.py new file mode 100644 index 0000000000..2f4f430238 --- /dev/null +++ b/manila/data/helper.py @@ -0,0 +1,223 @@ +# Copyright (c) 2015 Hitachi Data Systems. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Helper class for Data Service operations.""" + +import os + +from oslo_config import cfg +from oslo_log import log + +from manila.common import constants +from manila import exception +from manila.i18n import _, _LW +from manila.share import rpcapi as share_rpc +from manila import utils + +LOG = log.getLogger(__name__) + +data_helper_opts = [ + cfg.IntOpt( + 'data_access_wait_access_rules_timeout', + default=180, + help="Time to wait for access rules to be allowed/denied on backends " + "when migrating a share (seconds)."), + cfg.StrOpt( + 'data_node_access_ip', + default=None, + help="The IP of the node interface connected to the admin network. " + "Used for allowing access to the mounting shares."), + cfg.StrOpt( + 'data_node_access_cert', + default=None, + help="The certificate installed in the data node in order to " + "allow access to certificate authentication-based shares."), + +] + +CONF = cfg.CONF +CONF.register_opts(data_helper_opts) + + +class DataServiceHelper(object): + + def __init__(self, context, db, share): + + self.db = db + self.share = share + self.context = context + self.share_rpc = share_rpc.ShareAPI() + self.wait_access_rules_timeout = ( + CONF.data_access_wait_access_rules_timeout) + + def _allow_data_access(self, access, share_instance_id, + dest_share_instance_id=None): + + values = { + 'share_id': self.share['id'], + 'access_type': access['access_type'], + 'access_level': access['access_level'], + 'access_to': access['access_to'] + } + + share_access_list = self.db.share_access_get_all_by_type_and_access( + self.context, self.share['id'], access['access_type'], + access['access_to']) + + for access in share_access_list: + self._change_data_access_to_instance( + share_instance_id, access, allow=False) + + access_ref = self.db.share_access_create(self.context, values) + + self._change_data_access_to_instance( + share_instance_id, access_ref, allow=True) + if dest_share_instance_id: + self._change_data_access_to_instance( + dest_share_instance_id, access_ref, allow=True) + + return access_ref + + def deny_access_to_data_service(self, access_ref, share_instance_id): + + self._change_data_access_to_instance( + share_instance_id, access_ref, allow=False) + + # NOTE(ganso): Cleanup methods do not throw exceptions, since the + # exceptions that should be thrown are the ones that call the cleanup + + def cleanup_data_access(self, access_ref, share_instance_id): + + try: + self.deny_access_to_data_service(access_ref, share_instance_id) + except Exception: + LOG.warning(_LW("Could not cleanup access rule of share %s."), + self.share['id']) + + def cleanup_temp_folder(self, instance_id, mount_path): + + try: + path = os.path.join(mount_path, instance_id) + if os.path.exists(path): + os.rmdir(path) + self._check_dir_not_exists(path) + except Exception: + LOG.warning(_LW("Could not cleanup instance %(instance_id)s " + "temporary folders for data copy of " + "share %(share_id)s."), { + 'instance_id': instance_id, + 'share_id': self.share['id']}) + + def cleanup_unmount_temp_folder(self, unmount_template, mount_path, + share_instance_id): + + try: + self.unmount_share_instance(unmount_template, mount_path, + share_instance_id) + except Exception: + LOG.warning(_LW("Could not unmount folder of instance" + " %(instance_id)s for data copy of " + "share %(share_id)s."), { + 'instance_id': share_instance_id, + 'share_id': self.share['id']}) + + def _change_data_access_to_instance( + self, instance_id, access_ref, allow=False): + + self.db.share_instance_update_access_status( + self.context, instance_id, constants.STATUS_OUT_OF_SYNC) + + instance = self.db.share_instance_get( + self.context, instance_id, with_share_data=True) + + if allow: + self.share_rpc.allow_access(self.context, instance, access_ref) + else: + self.share_rpc.deny_access(self.context, instance, access_ref) + + utils.wait_for_access_update( + self.context, self.db, instance, self.wait_access_rules_timeout) + + def allow_access_to_data_service(self, share, share_instance_id, + dest_share_instance_id): + + if share['share_proto'].upper() == 'GLUSTERFS': + + access_to = CONF.data_node_access_cert + access_type = 'cert' + + if not access_to: + msg = _("Data Node Certificate not specified. Cannot mount " + "instances for data copy of share %(share_id)s. " + "Aborting.") % {'share_id': share['id']} + raise exception.ShareDataCopyFailed(reason=msg) + + else: + + access_to = CONF.data_node_access_ip + access_type = 'ip' + + if not access_to: + msg = _("Data Node Admin Network IP not specified. Cannot " + "mount instances for data copy of share %(share_id)s. " + "Aborting.") % {'share_id': share['id']} + raise exception.ShareDataCopyFailed(reason=msg) + + access = {'access_type': access_type, + 'access_level': constants.ACCESS_LEVEL_RW, + 'access_to': access_to} + + access_ref = self._allow_data_access(access, share_instance_id, + dest_share_instance_id) + + return access_ref + + @utils.retry(exception.NotFound, 0.1, 10, 0.1) + def _check_dir_exists(self, path): + if not os.path.exists(path): + raise exception.NotFound("Folder %s could not be found." % path) + + @utils.retry(exception.Found, 0.1, 10, 0.1) + def _check_dir_not_exists(self, path): + if os.path.exists(path): + raise exception.Found("Folder %s was found." % path) + + def mount_share_instance(self, mount_template, mount_path, + share_instance_id): + + path = os.path.join(mount_path, share_instance_id) + + if not os.path.exists(path): + os.makedirs(path) + self._check_dir_exists(path) + + mount_command = mount_template % {'path': path} + + utils.execute(*(mount_command.split()), run_as_root=True) + + def unmount_share_instance(self, unmount_template, mount_path, + share_instance_id): + + path = os.path.join(mount_path, share_instance_id) + + unmount_command = unmount_template % {'path': path} + + utils.execute(*(unmount_command.split()), run_as_root=True) + + try: + if os.path.exists(path): + os.rmdir(path) + self._check_dir_not_exists(path) + except Exception: + LOG.warning(_LW("Folder %s could not be removed."), path) diff --git a/manila/data/manager.py b/manila/data/manager.py index 4339ddcd2d..c78ece0767 100644 --- a/manila/data/manager.py +++ b/manila/data/manager.py @@ -16,14 +16,32 @@ Data Service """ +import os + from oslo_config import cfg from oslo_log import log +import six +from manila.i18n import _, _LE, _LI, _LW +from manila.common import constants +from manila import context +from manila.data import helper +from manila.data import utils as data_utils +from manila import exception from manila import manager +from manila.share import rpcapi as share_rpc LOG = log.getLogger(__name__) +data_opts = [ + cfg.StrOpt( + 'migration_tmp_location', + default='/tmp/', + help="Temporary path to create and mount shares during migration."), +] + CONF = cfg.CONF +CONF.register_opts(data_opts) class DataManager(manager.Manager): @@ -33,6 +51,241 @@ class DataManager(manager.Manager): def __init__(self, service_name=None, *args, **kwargs): super(DataManager, self).__init__(*args, **kwargs) + self.busy_tasks_shares = {} def init_host(self): - pass + ctxt = context.get_admin_context() + shares = self.db.share_get_all(ctxt) + for share in shares: + if share['task_state'] in constants.BUSY_COPYING_STATES: + self.db.share_update( + ctxt, share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) + + def migration_start(self, context, ignore_list, share_id, + share_instance_id, dest_share_instance_id, + migration_info_src, migration_info_dest, notify): + + LOG.info(_LI( + "Received request to migrate share content from share instance " + "%(instance_id)s to instance %(dest_instance_id)s."), + {'instance_id': share_instance_id, + 'dest_instance_id': dest_share_instance_id}) + + share_ref = self.db.share_get(context, share_id) + + share_rpcapi = share_rpc.ShareAPI() + + mount_path = CONF.migration_tmp_location + + try: + copy = data_utils.Copy( + os.path.join(mount_path, share_instance_id), + os.path.join(mount_path, dest_share_instance_id), + ignore_list) + + self._copy_share_data( + context, copy, share_ref, share_instance_id, + dest_share_instance_id, migration_info_src, + migration_info_dest) + except exception.ShareDataCopyCancelled: + share_rpcapi.migration_complete( + context, share_ref, share_instance_id, dest_share_instance_id) + return + except Exception: + self.db.share_update( + context, share_id, + {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) + msg = _("Failed to copy contents from instance %(src)s to " + "instance %(dest)s.") % {'src': share_instance_id, + 'dest': dest_share_instance_id} + LOG.exception(msg) + share_rpcapi.migration_complete( + context, share_ref, share_instance_id, dest_share_instance_id) + raise exception.ShareDataCopyFailed(reason=msg) + finally: + self.busy_tasks_shares.pop(share_id) + + LOG.info(_LI( + "Completed copy operation of migrating share content from share " + "instance %(instance_id)s to instance %(dest_instance_id)s."), + {'instance_id': share_instance_id, + 'dest_instance_id': dest_share_instance_id}) + + if notify: + LOG.info(_LI( + "Notifying source backend that migrating share content from" + " share instance %(instance_id)s to instance " + "%(dest_instance_id)s completed."), + {'instance_id': share_instance_id, + 'dest_instance_id': dest_share_instance_id}) + + share_rpcapi.migration_complete( + context, share_ref, share_instance_id, dest_share_instance_id) + + def data_copy_cancel(self, context, share_id): + LOG.info(_LI("Received request to cancel share migration " + "of share %s."), share_id) + copy = self.busy_tasks_shares.get(share_id) + if copy: + copy.cancel() + else: + msg = _("Data copy for migration of share %s cannot be cancelled" + " at this moment.") % share_id + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + def data_copy_get_progress(self, context, share_id): + LOG.info(_LI("Received request to get share migration information " + "of share %s."), share_id) + copy = self.busy_tasks_shares.get(share_id) + if copy: + result = copy.get_progress() + LOG.info(_LI("Obtained following share migration information " + "of share %(share)s: %(info)s."), + {'share': share_id, + 'info': six.text_type(result)}) + return result + else: + msg = _("Migration of share %s data copy progress cannot be " + "obtained at this moment.") % share_id + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + def _copy_share_data( + self, context, copy, src_share, share_instance_id, + dest_share_instance_id, migration_info_src, migration_info_dest): + + copied = False + mount_path = CONF.migration_tmp_location + + self.db.share_update( + context, src_share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) + + helper_src = helper.DataServiceHelper(context, self.db, src_share) + helper_dest = helper_src + + access_ref_src = helper_src.allow_access_to_data_service( + src_share, share_instance_id, dest_share_instance_id) + access_ref_dest = access_ref_src + + def _call_cleanups(items): + for item in items: + if 'unmount_src' == item: + helper_src.cleanup_unmount_temp_folder( + migration_info_src['unmount'], mount_path, + share_instance_id) + elif 'temp_folder_src' == item: + helper_src.cleanup_temp_folder(share_instance_id, + mount_path) + elif 'temp_folder_dest' == item: + helper_dest.cleanup_temp_folder(dest_share_instance_id, + mount_path) + elif 'access_src' == item: + helper_src.cleanup_data_access(access_ref_src, + share_instance_id) + elif 'access_dest' == item: + helper_dest.cleanup_data_access(access_ref_dest, + dest_share_instance_id) + try: + helper_src.mount_share_instance( + migration_info_src['mount'], mount_path, share_instance_id) + except Exception: + msg = _("Share migration failed attempting to mount " + "share instance %s.") % share_instance_id + LOG.exception(msg) + _call_cleanups(['temp_folder_src', 'access_dest', 'access_src']) + raise exception.ShareDataCopyFailed(reason=msg) + + try: + helper_dest.mount_share_instance( + migration_info_dest['mount'], mount_path, + dest_share_instance_id) + except Exception: + msg = _("Share migration failed attempting to mount " + "share instance %s.") % dest_share_instance_id + LOG.exception(msg) + _call_cleanups(['temp_folder_dest', 'unmount_src', + 'temp_folder_src', 'access_dest', 'access_src']) + raise exception.ShareDataCopyFailed(reason=msg) + + self.busy_tasks_shares[src_share['id']] = copy + self.db.share_update( + context, src_share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_IN_PROGRESS}) + + try: + copy.run() + + self.db.share_update( + context, src_share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETING}) + + if copy.get_progress()['total_progress'] == 100: + copied = True + + except Exception: + LOG.exception(_LE("Failed to copy data from share instance " + "%(share_instance_id)s to " + "%(dest_share_instance_id)s."), + {'share_instance_id': share_instance_id, + 'dest_share_instance_id': dest_share_instance_id}) + + try: + helper_src.unmount_share_instance(migration_info_src['unmount'], + mount_path, share_instance_id) + except Exception: + LOG.exception(_LE("Could not unmount folder of instance" + " %s after its data copy."), share_instance_id) + + try: + helper_dest.unmount_share_instance( + migration_info_dest['unmount'], mount_path, + dest_share_instance_id) + except Exception: + LOG.exception(_LE("Could not unmount folder of instance" + " %s after its data copy."), dest_share_instance_id) + + try: + helper_src.deny_access_to_data_service( + access_ref_src, share_instance_id) + except Exception: + LOG.exception(_LE("Could not deny access to instance" + " %s after its data copy."), share_instance_id) + + try: + helper_dest.deny_access_to_data_service( + access_ref_dest, dest_share_instance_id) + except Exception: + LOG.exception(_LE("Could not deny access to instance" + " %s after its data copy."), dest_share_instance_id) + + if copy and copy.cancelled: + self.db.share_update( + context, src_share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_CANCELLED}) + LOG.warning(_LW("Copy of data from share instance " + "%(src_instance)s to share instance " + "%(dest_instance)s was cancelled."), + {'src_instance': share_instance_id, + 'dest_instance': dest_share_instance_id}) + raise exception.ShareDataCopyCancelled( + src_instance=share_instance_id, + dest_instance=dest_share_instance_id) + + elif not copied: + msg = _("Copying data from share instance %(instance_id)s " + "to %(dest_instance_id)s did not succeed.") % ( + {'instance_id': share_instance_id, + 'dest_instance_id': dest_share_instance_id}) + raise exception.ShareDataCopyFailed(reason=msg) + + self.db.share_update( + context, src_share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_COMPLETED}) + + LOG.debug("Copy of data from share instance %(src_instance)s to " + "share instance %(dest_instance)s was successful.", + {'src_instance': share_instance_id, + 'dest_instance': dest_share_instance_id}) diff --git a/manila/data/rpcapi.py b/manila/data/rpcapi.py index ae7e9dfcbc..7b76c5f259 100644 --- a/manila/data/rpcapi.py +++ b/manila/data/rpcapi.py @@ -29,7 +29,10 @@ class DataAPI(object): API version history: - 1.0 - Initial version. + 1.0 - Initial version, + Add migration_start(), + data_copy_cancel(), + data_copy_get_progress() """ BASE_RPC_API_VERSION = '1.0' @@ -39,3 +42,27 @@ class DataAPI(object): target = messaging.Target(topic=CONF.data_topic, version=self.BASE_RPC_API_VERSION) self.client = rpc.get_client(target, version_cap='1.0') + + def migration_start(self, context, share_id, ignore_list, + share_instance_id, dest_share_instance_id, + migration_info_src, migration_info_dest, notify): + call_context = self.client.prepare(version='1.0') + call_context.cast( + context, + 'migration_start', + share_id=share_id, + ignore_list=ignore_list, + share_instance_id=share_instance_id, + dest_share_instance_id=dest_share_instance_id, + migration_info_src=migration_info_src, + migration_info_dest=migration_info_dest, + notify=notify) + + def data_copy_cancel(self, context, share_id): + call_context = self.client.prepare(version='1.0') + call_context.call(context, 'data_copy_cancel', share_id=share_id) + + def data_copy_get_progress(self, context, share_id): + call_context = self.client.prepare(version='1.0') + return call_context.call(context, 'data_copy_get_progress', + share_id=share_id) diff --git a/manila/data/utils.py b/manila/data/utils.py new file mode 100644 index 0000000000..e284251b9e --- /dev/null +++ b/manila/data/utils.py @@ -0,0 +1,160 @@ +# Copyright 2015, Hitachi Data Systems. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo_log import log +import six + +from manila import utils + +LOG = log.getLogger(__name__) + + +class Copy(object): + + def __init__(self, src, dest, ignore_list): + self.src = src + self.dest = dest + self.total_size = 0 + self.current_size = 0 + self.files = [] + self.dirs = [] + self.current_copy = None + self.ignore_list = ignore_list + self.cancelled = False + + def get_progress(self): + + if self.current_copy is not None: + + try: + size, err = utils.execute("stat", "-c", "%s", + self.current_copy['file_path'], + run_as_root=True) + size = int(size) + except utils.processutils.ProcessExecutionError: + size = 0 + + total_progress = 0 + if self.total_size > 0: + total_progress = self.current_size * 100 / self.total_size + current_file_progress = 0 + if self.current_copy['size'] > 0: + current_file_progress = size * 100 / self.current_copy['size'] + current_file_path = self.current_copy['file_path'] + + progress = { + 'total_progress': total_progress, + 'current_file_path': current_file_path, + 'current_file_progress': current_file_progress + } + + return progress + else: + return {'total_progress': 100} + + def cancel(self): + + self.cancelled = True + + def run(self): + + self.get_total_size(self.src) + self.copy_data(self.src) + self.copy_stats(self.src) + + LOG.info(six.text_type(self.get_progress())) + + def get_total_size(self, path): + if self.cancelled: + return + out, err = utils.execute( + "ls", "-pA1", "--group-directories-first", path, + run_as_root=True) + for line in out.split('\n'): + if self.cancelled: + return + if len(line) == 0: + continue + src_item = os.path.join(path, line) + if line[-1] == '/': + if line[0:-1] in self.ignore_list: + continue + self.get_total_size(src_item) + else: + if line in self.ignore_list: + continue + size, err = utils.execute("stat", "-c", "%s", src_item, + run_as_root=True) + self.total_size += int(size) + + def copy_data(self, path): + if self.cancelled: + return + out, err = utils.execute( + "ls", "-pA1", "--group-directories-first", path, + run_as_root=True) + for line in out.split('\n'): + if self.cancelled: + return + if len(line) == 0: + continue + src_item = os.path.join(path, line) + dest_item = src_item.replace(self.src, self.dest) + if line[-1] == '/': + if line[0:-1] in self.ignore_list: + continue + utils.execute("mkdir", "-p", dest_item, run_as_root=True) + self.copy_data(src_item) + else: + if line in self.ignore_list: + continue + size, err = utils.execute("stat", "-c", "%s", src_item, + run_as_root=True) + + self.current_copy = {'file_path': dest_item, + 'size': int(size)} + + utils.execute("cp", "-P", "--preserve=all", src_item, + dest_item, run_as_root=True) + + self.current_size += int(size) + + LOG.info(six.text_type(self.get_progress())) + + def copy_stats(self, path): + if self.cancelled: + return + out, err = utils.execute( + "ls", "-pA1", "--group-directories-first", path, + run_as_root=True) + for line in out.split('\n'): + if self.cancelled: + return + if len(line) == 0: + continue + src_item = os.path.join(path, line) + dest_item = src_item.replace(self.src, self.dest) + # NOTE(ganso): Should re-apply attributes for folders. + if line[-1] == '/': + if line[0:-1] in self.ignore_list: + continue + self.copy_stats(src_item) + utils.execute("chmod", "--reference=%s" % src_item, dest_item, + run_as_root=True) + utils.execute("touch", "--reference=%s" % src_item, dest_item, + run_as_root=True) + utils.execute("chown", "--reference=%s" % src_item, dest_item, + run_as_root=True) diff --git a/manila/db/sqlalchemy/models.py b/manila/db/sqlalchemy/models.py index d0c2f11098..95131656b9 100644 --- a/manila/db/sqlalchemy/models.py +++ b/manila/db/sqlalchemy/models.py @@ -264,7 +264,8 @@ class Share(BASE, ManilaBase): result = None if len(self.instances) > 0: order = (constants.STATUS_REPLICATION_CHANGE, - constants.STATUS_AVAILABLE, constants.STATUS_ERROR) + constants.STATUS_MIGRATING, constants.STATUS_AVAILABLE, + constants.STATUS_ERROR) other_statuses = ( [x['status'] for x in self.instances if x['status'] not in order and diff --git a/manila/exception.py b/manila/exception.py index 8ac415f522..22531ddc33 100644 --- a/manila/exception.py +++ b/manila/exception.py @@ -203,6 +203,12 @@ class NotFound(ManilaException): safe = True +class Found(ManilaException): + message = _("Resource was found.") + code = 302 + safe = True + + class InUse(ManilaException): message = _("Resource is in use.") @@ -236,6 +242,15 @@ class ShareMigrationFailed(ManilaException): message = _("Share migration failed: %(reason)s") +class ShareDataCopyFailed(ManilaException): + message = _("Share Data copy failed: %(reason)s") + + +class ShareDataCopyCancelled(ManilaException): + message = _("Copy of contents from share instance %(src_instance)s " + "to share instance %(dest_instance)s was cancelled.") + + class ServiceIPNotFound(ManilaException): message = _("Service IP for instance not found: %(reason)s") diff --git a/manila/scheduler/manager.py b/manila/scheduler/manager.py index b150d06e0e..42c402169d 100644 --- a/manila/scheduler/manager.py +++ b/manila/scheduler/manager.py @@ -122,14 +122,14 @@ class SchedulerManager(manager.Manager): return self.driver.get_pools(context, filters) def migrate_share_to_host(self, context, share_id, host, - force_host_copy, request_spec, + force_host_copy, notify, request_spec, filter_properties=None): """Ensure that the host exists and can accept the share.""" def _migrate_share_set_error(self, context, ex, request_spec): self._set_share_state_and_notify( 'migrate_share_to_host', - {'task_state': constants.STATUS_TASK_STATE_MIGRATION_ERROR}, + {'task_state': constants.TASK_STATE_MIGRATION_ERROR}, context, ex, request_spec) try: @@ -138,16 +138,16 @@ class SchedulerManager(manager.Manager): filter_properties) except exception.NoValidHost as ex: - _migrate_share_set_error(self, context, ex, request_spec) + with excutils.save_and_reraise_exception(): + _migrate_share_set_error(self, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_share_set_error(self, context, ex, request_spec) else: share_ref = db.share_get(context, share_id) try: - share_rpcapi.ShareAPI().migrate_share(context, - share_ref, tgt_host, - force_host_copy) + share_rpcapi.ShareAPI().migration_start( + context, share_ref, tgt_host, force_host_copy, notify) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_share_set_error(self, context, ex, request_spec) diff --git a/manila/scheduler/rpcapi.py b/manila/scheduler/rpcapi.py index 047ff9f63d..366f726567 100644 --- a/manila/scheduler/rpcapi.py +++ b/manila/scheduler/rpcapi.py @@ -81,16 +81,16 @@ class SchedulerAPI(object): filter_properties=filter_properties) def migrate_share_to_host(self, context, share_id, host, - force_host_copy=False, request_spec=None, + force_host_copy, notify, request_spec=None, filter_properties=None): call_context = self.client.prepare(version='1.4') request_spec_p = jsonutils.to_primitive(request_spec) - return call_context.cast(context, - 'migrate_share_to_host', + return call_context.call(context, 'migrate_share_to_host', share_id=share_id, host=host, force_host_copy=force_host_copy, + notify=notify, request_spec=request_spec_p, filter_properties=filter_properties) diff --git a/manila/share/api.py b/manila/share/api.py index 3d67814833..439e94e8a2 100644 --- a/manila/share/api.py +++ b/manila/share/api.py @@ -29,6 +29,7 @@ import six from manila.api import extensions from manila.common import constants +from manila.data import rpcapi as data_rpcapi from manila.db import base from manila import exception from manila.i18n import _ @@ -742,12 +743,10 @@ class API(base.Base): self.share_rpcapi.create_snapshot(context, share, snapshot) return snapshot - @policy.wrap_check_policy('share') - def migrate_share(self, context, share, host, force_host_copy): + def migration_start(self, context, share, host, force_host_copy, + notify=True): """Migrates share to a new host.""" - policy.check_policy(context, 'share', 'migrate') - share_instance = share.instance # NOTE(gouthamr): Ensure share does not have replicas. @@ -793,7 +792,7 @@ class API(base.Base): # is made self.update( context, share, - {'task_state': constants.STATUS_TASK_STATE_MIGRATION_STARTING}) + {'task_state': constants.TASK_STATE_MIGRATION_STARTING}) share_type = {} share_type_id = share['share_type_id'] @@ -830,12 +829,91 @@ class API(base.Base): try: self.scheduler_rpcapi.migrate_share_to_host(context, share['id'], host, force_host_copy, - request_spec) + notify, request_spec) except Exception: - self.update( - context, share, - {'task_state': constants.STATUS_TASK_STATE_MIGRATION_ERROR}) - raise + msg = _('Destination host %(dest_host)s did not pass validation ' + 'for migration of share %(share)s.') % { + 'dest_host': host, + 'share': share['id']} + raise exception.InvalidHost(reason=msg) + + def migration_complete(self, context, share): + + if share['task_state'] not in ( + constants.TASK_STATE_DATA_COPYING_COMPLETED, + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): + msg = _("First migration phase of share %s not completed" + " yet.") % share['id'] + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + share_instance_id = None + new_share_instance_id = None + + if share['task_state'] == ( + constants.TASK_STATE_DATA_COPYING_COMPLETED): + + for instance in share.instances: + if instance['status'] == constants.STATUS_MIGRATING: + share_instance_id = instance['id'] + if instance['status'] == constants.STATUS_MIGRATING_TO: + new_share_instance_id = instance['id'] + + if None in (share_instance_id, new_share_instance_id): + msg = _("Share instances %(instance_id)s and " + "%(new_instance_id)s in inconsistent states, cannot" + " continue share migration for share %(share_id)s" + ".") % {'instance_id': share_instance_id, + 'new_instance_id': new_share_instance_id, + 'share_id': share['id']} + raise exception.ShareMigrationFailed(reason=msg) + + share_rpc = share_rpcapi.ShareAPI() + share_rpc.migration_complete(context, share, share_instance_id, + new_share_instance_id) + + def migration_get_progress(self, context, share): + + if share['task_state'] == ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + + share_rpc = share_rpcapi.ShareAPI() + return share_rpc.migration_get_progress(context, share) + + elif share['task_state'] == ( + constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): + data_rpc = data_rpcapi.DataAPI() + LOG.info(_LI("Sending request to get share migration information" + " of share %s.") % share['id']) + return data_rpc.data_copy_get_progress(context, share['id']) + + else: + msg = _("Migration of share %s data copy progress cannot be " + "obtained at this moment.") % share['id'] + LOG.error(msg) + raise exception.InvalidShare(reason=msg) + + def migration_cancel(self, context, share): + + if share['task_state'] == ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + + share_rpc = share_rpcapi.ShareAPI() + share_rpc.migration_cancel(context, share) + + elif share['task_state'] == ( + constants.TASK_STATE_DATA_COPYING_IN_PROGRESS): + + data_rpc = data_rpcapi.DataAPI() + LOG.info(_LI("Sending request to cancel migration of " + "share %s.") % share['id']) + data_rpc.data_copy_cancel(context, share['id']) + + else: + msg = _("Data copy for migration of share %s cannot be cancelled" + " at this moment.") % share['id'] + LOG.error(msg) + raise exception.InvalidShare(reason=msg) @policy.wrap_check_policy('share') def delete_snapshot(self, context, snapshot, force=False): diff --git a/manila/share/driver.py b/manila/share/driver.py index d3d53305e3..17e17d6ed9 100644 --- a/manila/share/driver.py +++ b/manila/share/driver.py @@ -18,17 +18,14 @@ Drivers for shares. """ -import re import time from oslo_config import cfg from oslo_log import log -import six from manila import exception from manila.i18n import _, _LE from manila import network -from manila.share import utils as share_utils from manila import utils LOG = log.getLogger(__name__) @@ -73,43 +70,32 @@ share_opts = [ 'total physical capacity. A ratio of 1.0 means ' 'provisioned capacity cannot exceed the total physical ' 'capacity. A ratio lower than 1.0 is invalid.'), - cfg.StrOpt( - 'migration_tmp_location', - default='/tmp/', - help="Temporary path to create and mount shares during migration."), cfg.ListOpt( 'migration_ignore_files', default=['lost+found'], help="List of files and folders to be ignored when migrating shares. " "Items should be names (not including any path)."), - cfg.IntOpt( - 'migration_wait_access_rules_timeout', - default=90, - help="Time to wait for access rules to be allowed/denied on backends " - "when migrating shares using generic approach (seconds)."), - cfg.IntOpt( - 'migration_create_delete_share_timeout', - default=300, - help='Timeout for creating and deleting share instances ' - 'when performing share migration (seconds).'), cfg.StrOpt( - 'migration_mounting_backend_ip', - help="Backend IP in admin network to use for mounting " - "shares during migration."), + 'share_mount_template', + default='mount -vt %(proto)s %(export)s %(path)s', + help="The template for mounting shares for this backend. Must specify " + "the executable with all necessary parameters for the protocol " + "supported. 'proto' template element may not be required if " + "included in the command. 'export' and 'path' template elements " + "are required. It is advisable to separate different commands " + "per backend."), cfg.StrOpt( - 'migration_data_copy_node_ip', - help="The IP of the node responsible for copying data during " - "migration, such as the data copy service node, reachable by " - "the backend."), - cfg.StrOpt( - 'migration_protocol_mount_command', - help="The command for mounting shares for this backend. Must specify" - "the executable and all necessary parameters for the protocol " - "supported. It is advisable to separate protocols per backend."), + 'share_unmount_template', + default='umount -v %(path)s', + help="The template for unmounting shares for this backend. Must " + "specify the executable with all necessary parameters for the " + "protocol supported. 'path' template element is required. It is " + "advisable to separate different commands per backend."), cfg.BoolOpt( - 'migration_readonly_support', + 'migration_readonly_rules_support', default=True, - help="Specify whether read only access mode is supported in this" + deprecated_name='migration_readonly_support', + help="Specify whether read only access rule mode is supported in this " "backend."), cfg.StrOpt( "admin_network_config_group", @@ -323,24 +309,74 @@ class ShareDriver(object): {'actual': self.driver_handles_share_servers, 'allowed': driver_handles_share_servers}) - def migrate_share(self, context, share_ref, host, - dest_driver_migration_info): - """Is called to perform driver migration. + def migration_start(self, context, share_ref, share_server, host, + dest_driver_migration_info, notify): + """Is called to perform 1st phase of driver migration of a given share. Driver should implement this method if willing to perform migration in an optimized way, useful for when driver understands destination backend. :param context: The 'context.RequestContext' object for the request. :param share_ref: Reference to the share being migrated. + :param share_server: Share server model or None. :param host: Destination host and its capabilities. :param dest_driver_migration_info: Migration information provided by destination host. + :param notify: whether the migration should complete or wait for + 2nd phase call. Driver may throw exception when validating this + parameter, exception if does not support 1-phase or 2-phase approach. :returns: Boolean value indicating if driver migration succeeded. :returns: Dictionary containing a model update. """ return None, None - def get_driver_migration_info(self, context, share, share_server): + def migration_complete(self, context, share_ref, share_server, + dest_driver_migration_info): + """Is called to perform 2nd phase of driver migration of a given share. + + If driver is implementing 2-phase migration, this method should + perform tasks related to the 2nd phase of migration, thus completing + it. + :param context: The 'context.RequestContext' object for the request. + :param share_ref: Reference to the share being migrated. + :param share_server: Share server model or None. + :param dest_driver_migration_info: Migration information provided by + destination host. + :returns: Dictionary containing a model update. + """ + return None + + def migration_cancel(self, context, share_ref, share_server, + dest_driver_migration_info): + """Is called to cancel driver migration. + + If possible, driver can implement a way to cancel an in-progress + migration. + :param context: The 'context.RequestContext' object for the request. + :param share_ref: Reference to the share being migrated. + :param share_server: Share server model or None. + :param dest_driver_migration_info: Migration information provided by + destination host. + """ + raise NotImplementedError() + + def migration_get_progress(self, context, share_ref, share_server, + dest_driver_migration_info): + """Is called to get migration progress. + + If possible, driver can implement a way to return migration progress + information. + :param context: The 'context.RequestContext' object for the request. + :param share_ref: Reference to the share being migrated. + :param share_server: Share server model or None. + :param dest_driver_migration_info: Migration information provided by + destination host. + :return: A dictionary with 'total_progress' field containing the + percentage value. + """ + raise NotImplementedError() + + def migration_get_driver_info(self, context, share, share_server): """Is called to provide necessary driver migration logic. :param context: The 'context.RequestContext' object for the request. @@ -350,7 +386,7 @@ class ShareDriver(object): """ return None - def get_migration_info(self, context, share, share_server): + def migration_get_info(self, context, share, share_server): """Is called to provide necessary generic migration logic. :param context: The 'context.RequestContext' object for the request. @@ -358,251 +394,39 @@ class ShareDriver(object): :param share_server: Share server model or None. :return: A dictionary with migration information. """ - mount_cmd = self._get_mount_command(context, share, share_server) + mount_template = self._get_mount_command(context, share, share_server) - umount_cmd = self._get_unmount_command(context, share, share_server) + unmount_template = self._get_unmount_command(context, share, + share_server) - access = self._get_access_rule_for_data_copy( - context, share, share_server) - return {'mount': mount_cmd, - 'umount': umount_cmd, - 'access': access} + return {'mount': mount_template, + 'unmount': unmount_template} def _get_mount_command(self, context, share_instance, share_server): """Is called to delegate mounting share logic.""" - mount_cmd = self._get_mount_command_protocol(share_instance, - share_server) - mount_ip = self._get_mount_ip(share_instance, share_server) - mount_cmd.append(mount_ip) + mount_template = self.configuration.safe_get('share_mount_template') - mount_path = self.configuration.safe_get( - 'migration_tmp_location') + share_instance['id'] - mount_cmd.append(mount_path) + mount_export = self._get_mount_export(share_instance, share_server) - return mount_cmd + format_template = {'proto': share_instance['share_proto'].lower(), + 'export': mount_export, + 'path': '%(path)s'} - def _get_mount_command_protocol(self, share_instance, share_server): - mount_cmd = self.configuration.safe_get( - 'migration_protocol_mount_command') - if mount_cmd: - return mount_cmd.split() - else: - return ['mount', '-t', share_instance['share_proto'].lower()] - - def _get_mount_ip(self, share_instance, share_server): + return mount_template % format_template + def _get_mount_export(self, share_instance, share_server): + # NOTE(ganso): If drivers want to override the export_location IP, + # they can do so using this configuration. This method can also be + # overridden if necessary. path = next((x['path'] for x in share_instance['export_locations'] if x['is_admin_only']), None) if not path: - mount_ip = self.configuration.safe_get( - 'migration_mounting_backend_ip') path = share_instance['export_locations'][0]['path'] - if mount_ip: - # NOTE(ganso): Does not currently work with hostnames and ipv6. - p = re.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}") - path = p.sub(mount_ip, path) return path def _get_unmount_command(self, context, share_instance, share_server): - return ['umount', - self.configuration.safe_get('migration_tmp_location') - + share_instance['id']] - - def _get_access_rule_for_data_copy( - self, context, share_instance, share_server): - """Is called to obtain access rule so data copy node can mount.""" - # Note(ganso): The current method implementation is intended to work - # with Data Copy Service approach. If Manila Node is used for copying, - # then DHSS = true drivers may need to override this method. - service_ip = self.configuration.safe_get('migration_data_copy_node_ip') - return {'access_type': 'ip', - 'access_level': 'rw', - 'access_to': service_ip} - - def copy_share_data(self, context, helper, share, share_instance, - share_server, new_share_instance, new_share_server, - migration_info_src, migration_info_dest): - """Copies share data of a given share to a new share. - - :param context: The 'context.RequestContext' object for the request. - :param helper: instance of a share migration helper. - :param share: the share to copy. - :param share_instance: current instance holding the share. - :param share_server: current share_server hosting the share. - :param new_share_instance: share instance to copy data to. - :param new_share_server: share server that hosts destination share. - :param migration_info_src: migration information (source). - :param migration_info_dest: migration information (destination). - """ - - # NOTE(ganso): This method is here because it is debatable if it can - # be overridden by a driver or not. Personally I think it should not, - # else it would be possible to lose compatibility with generic - # migration between backends, but allows the driver to use it on its - # own implementation if it wants to. - - migrated = False - - mount_path = self.configuration.safe_get('migration_tmp_location') - - src_access = migration_info_src['access'] - dest_access = migration_info_dest['access'] - - if None in (src_access['access_to'], dest_access['access_to']): - msg = _("Access rules not appropriate for mounting share instances" - " for migration of share %(share_id)s," - " source share access: %(src_ip)s, destination share" - " access: %(dest_ip)s. Aborting.") % { - 'src_ip': src_access['access_to'], - 'dest_ip': dest_access['access_to'], - 'share_id': share['id']} - raise exception.ShareMigrationFailed(reason=msg) - - # NOTE(ganso): Removing any previously conflicting access rules, which - # would cause the following access_allow to fail for one instance. - helper.deny_migration_access(None, src_access, share_instance) - helper.deny_migration_access(None, dest_access, new_share_instance) - - # NOTE(ganso): I would rather allow access to instances separately, - # but I require an access_id since it is a new access rule and - # destination manager must receive an access_id. I can either move - # this code to manager code so I can create the rule in DB manually, - # or ignore duplicate access rule errors for some specific scenarios. - - try: - src_access_ref = helper.allow_migration_access( - src_access, share_instance) - except Exception as e: - LOG.error(_LE("Share migration failed attempting to allow " - "access of %(access_to)s to share " - "instance %(instance_id)s.") % { - 'access_to': src_access['access_to'], - 'instance_id': share_instance['id']}) - msg = six.text_type(e) - LOG.exception(msg) - raise exception.ShareMigrationFailed(reason=msg) - - try: - dest_access_ref = helper.allow_migration_access( - dest_access, new_share_instance) - except Exception as e: - LOG.error(_LE("Share migration failed attempting to allow " - "access of %(access_to)s to share " - "instance %(instance_id)s.") % { - 'access_to': dest_access['access_to'], - 'instance_id': new_share_instance['id']}) - msg = six.text_type(e) - LOG.exception(msg) - helper.cleanup_migration_access( - src_access_ref, src_access, share_instance) - raise exception.ShareMigrationFailed(reason=msg) - - # NOTE(ganso): From here we have the possibility of not cleaning - # anything when facing an error. At this moment, we have the - # destination instance in "inactive" state, while we are performing - # operations on the source instance. I think it is best to not clean - # the instance, leave it in "inactive" state, but try to clean - # temporary access rules, mounts, folders, etc, since no additional - # harm is done. - - def _mount_for_migration(migration_info): - - try: - utils.execute(*migration_info['mount'], run_as_root=True) - except Exception: - LOG.error(_LE("Failed to mount temporary folder for " - "migration of share instance " - "%(share_instance_id)s " - "to %(new_share_instance_id)s") % { - 'share_instance_id': share_instance['id'], - 'new_share_instance_id': new_share_instance['id']}) - helper.cleanup_migration_access( - src_access_ref, src_access, share_instance) - helper.cleanup_migration_access( - dest_access_ref, dest_access, new_share_instance) - raise - - utils.execute('mkdir', '-p', - ''.join((mount_path, share_instance['id']))) - - utils.execute('mkdir', '-p', - ''.join((mount_path, new_share_instance['id']))) - - # NOTE(ganso): mkdir command sometimes returns faster than it - # actually runs, so we better sleep for 1 second. - - time.sleep(1) - - try: - _mount_for_migration(migration_info_src) - except Exception as e: - LOG.error(_LE("Share migration failed attempting to mount " - "share instance %s.") % share_instance['id']) - msg = six.text_type(e) - LOG.exception(msg) - helper.cleanup_temp_folder(share_instance, mount_path) - helper.cleanup_temp_folder(new_share_instance, mount_path) - raise exception.ShareMigrationFailed(reason=msg) - - try: - _mount_for_migration(migration_info_dest) - except Exception as e: - LOG.error(_LE("Share migration failed attempting to mount " - "share instance %s.") % new_share_instance['id']) - msg = six.text_type(e) - LOG.exception(msg) - helper.cleanup_unmount_temp_folder(share_instance, - migration_info_src) - helper.cleanup_temp_folder(share_instance, mount_path) - helper.cleanup_temp_folder(new_share_instance, mount_path) - raise exception.ShareMigrationFailed(reason=msg) - - try: - ignore_list = self.configuration.safe_get('migration_ignore_files') - copy = share_utils.Copy(mount_path + share_instance['id'], - mount_path + new_share_instance['id'], - ignore_list) - copy.run() - if copy.get_progress()['total_progress'] == 100: - migrated = True - - except Exception as e: - LOG.exception(six.text_type(e)) - LOG.error(_LE("Failed to copy files for " - "migration of share instance %(share_instance_id)s " - "to %(new_share_instance_id)s") % { - 'share_instance_id': share_instance['id'], - 'new_share_instance_id': new_share_instance['id']}) - - # NOTE(ganso): For some reason I frequently get AMQP errors after - # copying finishes, which seems like is the service taking too long to - # copy while not replying heartbeat messages, so AMQP closes the - # socket. There is no impact, it just shows a big trace and AMQP - # reconnects after, although I would like to prevent this situation - # without the use of additional threads. Suggestions welcome. - - utils.execute(*migration_info_src['umount'], run_as_root=True) - utils.execute(*migration_info_dest['umount'], run_as_root=True) - - utils.execute('rmdir', ''.join((mount_path, share_instance['id'])), - check_exit_code=False) - utils.execute('rmdir', ''.join((mount_path, new_share_instance['id'])), - check_exit_code=False) - - helper.deny_migration_access( - src_access_ref, src_access, share_instance) - helper.deny_migration_access( - dest_access_ref, dest_access, new_share_instance) - - if not migrated: - msg = ("Copying from share instance %(instance_id)s " - "to %(new_instance_id)s did not succeed." % { - 'instance_id': share_instance['id'], - 'new_instance_id': new_share_instance['id']}) - raise exception.ShareMigrationFailed(reason=msg) - - LOG.debug("Copying completed in migration for share %s.", share['id']) + return self.configuration.safe_get('share_unmount_template') def create_share(self, context, share, share_server=None): """Is called to create share.""" diff --git a/manila/share/drivers/huawei/v3/connection.py b/manila/share/drivers/huawei/v3/connection.py index c3343572a0..87c9fe43eb 100644 --- a/manila/share/drivers/huawei/v3/connection.py +++ b/manila/share/drivers/huawei/v3/connection.py @@ -27,6 +27,7 @@ from oslo_utils import units import six from manila.common import constants as common_constants +from manila.data import utils as data_utils from manila import exception from manila.i18n import _ from manila.i18n import _LE @@ -528,9 +529,7 @@ class V3StorageConnection(driver.HuaweiBase): src_path, dst_path) try: ignore_list = '' - copy = share_utils.Copy(src_path, - dst_path, - ignore_list) + copy = data_utils.Copy(src_path, dst_path, ignore_list) copy.run() if copy.get_progress()['total_progress'] == 100: copy_finish = True diff --git a/manila/share/manager.py b/manila/share/manager.py index fc7d26ab57..6d68012101 100644 --- a/manila/share/manager.py +++ b/manila/share/manager.py @@ -34,6 +34,7 @@ import six from manila.common import constants from manila import context +from manila.data import rpcapi as data_rpcapi from manila import exception from manila.i18n import _ from manila.i18n import _LE @@ -167,7 +168,7 @@ def add_hooks(f): class ShareManager(manager.SchedulerDependentManager): """Manages NAS storages.""" - RPC_API_VERSION = '1.9' + RPC_API_VERSION = '1.10' def __init__(self, share_driver=None, service_name=None, *args, **kwargs): """Load the driver from args, or from flags.""" @@ -192,10 +193,9 @@ class ShareManager(manager.SchedulerDependentManager): context=ctxt, backend_host=self.host, config_group=self.configuration.config_group ) - self.driver = importutils.import_object( share_driver, private_storage=private_storage, - configuration=self.configuration + configuration=self.configuration, ) self.access_helper = access.ShareInstanceAccess(self.db, self.driver) @@ -558,183 +558,354 @@ class ShareManager(manager.SchedulerDependentManager): else: return None - def get_migration_info(self, ctxt, share_instance_id, share_server): + @utils.require_driver_initialized + def migration_get_info(self, context, share_instance_id): share_instance = self.db.share_instance_get( - ctxt, share_instance_id, with_share_data=True) - return self.driver.get_migration_info(ctxt, share_instance, + context, share_instance_id, with_share_data=True) + + share_server = None + if share_instance.get('share_server_id'): + share_server = self.db.share_server_get( + context, share_instance['share_server_id']) + + return self.driver.migration_get_info(context, share_instance, share_server) - def get_driver_migration_info(self, ctxt, share_instance_id, share_server): + @utils.require_driver_initialized + def migration_get_driver_info(self, context, share_instance_id): share_instance = self.db.share_instance_get( - ctxt, share_instance_id, with_share_data=True) - return self.driver.get_driver_migration_info(ctxt, share_instance, + context, share_instance_id, with_share_data=True) + + share_server = None + if share_instance.get('share_server_id'): + share_server = self.db.share_server_get( + context, share_instance['share_server_id']) + + return self.driver.migration_get_driver_info(context, share_instance, share_server) @utils.require_driver_initialized - def migrate_share(self, ctxt, share_id, host, force_host_copy=False): + def migration_start(self, context, share_id, host, force_host_copy, + notify=True): """Migrates a share from current host to another host.""" - LOG.debug("Entered migrate_share method for share %s.", share_id) - - # NOTE(ganso): Cinder checks if driver is initialized before doing - # anything. This might not be needed, as this code may not be reached - # if driver service is not running. If for any reason service is - # running but driver is not, the following code should fail at specific - # points, which would be effectively the same as throwing an - # exception here. - - rpcapi = share_rpcapi.ShareAPI() - share_ref = self.db.share_get(ctxt, share_id) - share_instance = self._get_share_instance(ctxt, share_ref) - moved = False - msg = None + LOG.debug("Entered migration_start method for share %s.", share_id) self.db.share_update( - ctxt, share_ref['id'], - {'task_state': constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS}) + context, share_id, + {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}) + + rpcapi = share_rpcapi.ShareAPI() + share_ref = self.db.share_get(context, share_id) + share_instance = self._get_share_instance(context, share_ref) + moved = False + + self.db.share_instance_update(context, share_instance['id'], + {'status': constants.STATUS_MIGRATING}) if not force_host_copy: + try: - share_server = self._get_share_server(ctxt.elevated(), - share_instance) - share_server = { - 'id': share_server['id'], - 'share_network_id': share_server['share_network_id'], - 'host': share_server['host'], - 'status': share_server['status'], - 'backend_details': share_server['backend_details'], - } if share_server else share_server + dest_driver_migration_info = rpcapi.migration_get_driver_info( + context, share_instance) - dest_driver_migration_info = rpcapi.get_driver_migration_info( - ctxt, share_instance, share_server) + share_server = self._get_share_server(context.elevated(), + share_instance) LOG.debug("Calling driver migration for share %s.", share_id) - moved, model_update = self.driver.migrate_share( - ctxt, share_instance, host, dest_driver_migration_info) + self.db.share_update( + context, share_id, + {'task_state': ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) + + moved, model_update = self.driver.migration_start( + context, share_instance, share_server, host, + dest_driver_migration_info, notify) + + if moved and not notify: + self.db.share_update( + context, share_ref['id'], + {'task_state': + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE}) # NOTE(ganso): Here we are allowing the driver to perform # changes even if it has not performed migration. While this - # scenario may not be valid, I am not sure if it should be + # scenario may not be valid, I do not think it should be # forcefully prevented. if model_update: - self.db.share_instance_update(ctxt, share_instance['id'], - model_update) + self.db.share_instance_update( + context, share_instance['id'], model_update) - except exception.ManilaException as e: + except Exception as e: msg = six.text_type(e) LOG.exception(msg) + LOG.warning(_LW("Driver did not migrate share %s. Proceeding " + "with generic migration approach.") % share_id) if not moved: try: LOG.debug("Starting generic migration " "for share %s.", share_id) - moved = self._migrate_share_generic(ctxt, share_ref, host) - except Exception as e: - msg = six.text_type(e) + self._migration_start_generic(context, share_ref, + share_instance, host, notify) + except Exception: + msg = _("Generic migration failed for share %s.") % share_id LOG.exception(msg) - LOG.error(_LE("Generic migration failed for" - " share %s.") % share_id) + self.db.share_update( + context, share_id, + {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) + self.db.share_instance_update( + context, share_instance['id'], + {'status': constants.STATUS_AVAILABLE}) + raise exception.ShareMigrationFailed(reason=msg) - if moved: - self.db.share_update( - ctxt, share_id, - {'task_state': constants.STATUS_TASK_STATE_MIGRATION_SUCCESS}) - - LOG.info(_LI("Share Migration for share %s" - " completed successfully.") % share_id) - else: - self.db.share_update( - ctxt, share_id, - {'task_state': constants.STATUS_TASK_STATE_MIGRATION_ERROR}) - raise exception.ShareMigrationFailed(reason=msg) - - def _migrate_share_generic(self, context, share, host): + def _migration_start_generic(self, context, share, share_instance, host, + notify): rpcapi = share_rpcapi.ShareAPI() - share_instance = self._get_share_instance(context, share) + helper = migration.ShareMigrationHelper(context, self.db, share) - access_rule_timeout = self.driver.configuration.safe_get( - 'migration_wait_access_rules_timeout') - - create_delete_timeout = self.driver.configuration.safe_get( - 'migration_create_delete_share_timeout') - - helper = migration.ShareMigrationHelper( - context, self.db, create_delete_timeout, - access_rule_timeout, share) - - # NOTE(ganso): We are going to save all access rules prior to removal. - # Since we may have several instances of the same share, it may be - # a good idea to limit or remove all instances/replicas' access - # so they remain unchanged as well during migration. + share_server = self._get_share_server(context.elevated(), + share_instance) readonly_support = self.driver.configuration.safe_get( - 'migration_readonly_support') + 'migration_readonly_rules_support') - saved_rules = helper.change_to_read_only(readonly_support, - share_instance) + helper.change_to_read_only(share_instance, share_server, + readonly_support, self.driver) try: - new_share_instance = helper.create_instance_and_wait( - context, share, share_instance, host) + share, share_instance, host) self.db.share_instance_update( context, new_share_instance['id'], - {'status': constants.STATUS_INACTIVE} - ) + {'status': constants.STATUS_MIGRATING_TO}) + + except Exception: + msg = _("Failed to create instance on destination " + "backend during migration of share %s.") % share['id'] + LOG.exception(msg) + helper.cleanup_access_rules(share_instance, share_server, + self.driver) + raise exception.ShareMigrationFailed(reason=msg) + + ignore_list = self.driver.configuration.safe_get( + 'migration_ignore_files') + + data_rpc = data_rpcapi.DataAPI() + + try: + src_migration_info = self.driver.migration_get_info( + context, share_instance, share_server) + + dest_migration_info = rpcapi.migration_get_info( + context, new_share_instance) LOG.debug("Time to start copying in migration" " for share %s.", share['id']) - share_server = self._get_share_server(context.elevated(), - share_instance) - new_share_server = self._get_share_server(context.elevated(), - new_share_instance) - new_share_server = { - 'id': new_share_server['id'], - 'share_network_id': new_share_server['share_network_id'], - 'host': new_share_server['host'], - 'status': new_share_server['status'], - 'backend_details': new_share_server['backend_details'], - } if new_share_server else new_share_server + data_rpc.migration_start( + context, share['id'], ignore_list, share_instance['id'], + new_share_instance['id'], src_migration_info, + dest_migration_info, notify) - src_migration_info = self.driver.get_migration_info( - context, share_instance, share_server) + except Exception: + msg = _("Failed to obtain migration info from backends or" + " invoking Data Service for migration of " + "share %s.") % share['id'] + LOG.exception(msg) + helper.cleanup_new_instance(new_share_instance) + helper.cleanup_access_rules(share_instance, share_server, + self.driver) + raise exception.ShareMigrationFailed(reason=msg) - dest_migration_info = rpcapi.get_migration_info( - context, new_share_instance, new_share_server) + @utils.require_driver_initialized + def migration_complete(self, context, share_id, share_instance_id, + new_share_instance_id): - self.driver.copy_share_data(context, helper, share, share_instance, - share_server, new_share_instance, - new_share_server, src_migration_info, - dest_migration_info) + LOG.info(_LI("Received request to finish Share Migration for " + "share %s."), share_id) - except Exception as e: - LOG.exception(six.text_type(e)) - LOG.error(_LE("Share migration failed, reverting access rules for " - "share %s.") % share['id']) - helper.revert_access_rules(readonly_support, share_instance, None, - saved_rules) - raise + share_ref = self.db.share_get(context, share_id) - helper.revert_access_rules(readonly_support, share_instance, - new_share_instance, saved_rules) + if share_ref['task_state'] == ( + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE): + + rpcapi = share_rpcapi.ShareAPI() + + share_instance = self._get_share_instance(context, share_ref) + + share_server = self._get_share_server(context, share_instance) + + try: + dest_driver_migration_info = rpcapi.migration_get_driver_info( + context, share_instance) + + model_update = self.driver.migration_complete( + context, share_instance, share_server, + dest_driver_migration_info) + if model_update: + self.db.share_instance_update( + context, share_instance['id'], model_update) + self.db.share_update( + context, share_id, + {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) + except Exception: + msg = _("Driver migration completion failed for" + " share %s.") % share_id + LOG.exception(msg) + self.db.share_update( + context, share_id, + {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) + raise exception.ShareMigrationFailed(reason=msg) + + else: + try: + self._migration_complete( + context, share_ref, share_instance_id, + new_share_instance_id) + except Exception: + msg = _("Generic migration completion failed for" + " share %s.") % share_id + LOG.exception(msg) + self.db.share_update( + context, share_id, + {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) + self.db.share_instance_update( + context, share_instance_id, + {'status': constants.STATUS_AVAILABLE}) + raise exception.ShareMigrationFailed(reason=msg) + + def _migration_complete(self, context, share_ref, share_instance_id, + new_share_instance_id): + + share_instance = self.db.share_instance_get( + context, share_instance_id, with_share_data=True) + new_share_instance = self.db.share_instance_get( + context, new_share_instance_id, with_share_data=True) + + share_server = self._get_share_server(context, share_instance) + + helper = migration.ShareMigrationHelper(context, self.db, share_ref) + + task_state = share_ref['task_state'] + if task_state in (constants.TASK_STATE_DATA_COPYING_ERROR, + constants.TASK_STATE_DATA_COPYING_CANCELLED): + msg = _("Data copy of generic migration for share %s has not " + "completed successfully.") % share_ref['id'] + LOG.warning(msg) + helper.cleanup_new_instance(new_share_instance) + + helper.cleanup_access_rules(share_instance, share_server, + self.driver) + if task_state == constants.TASK_STATE_DATA_COPYING_CANCELLED: + self.db.share_instance_update( + context, share_instance_id, + {'status': constants.STATUS_AVAILABLE}) + self.db.share_update( + context, share_ref['id'], + {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) + + LOG.info(_LI("Share Migration for share %s" + " was cancelled."), share_ref['id']) + return + else: + raise exception.ShareMigrationFailed(reason=msg) + + elif task_state != constants.TASK_STATE_DATA_COPYING_COMPLETED: + msg = _("Data copy for migration of share %s not completed" + " yet.") % share_ref['id'] + LOG.error(msg) + raise exception.ShareMigrationFailed(reason=msg) + + try: + helper.apply_new_access_rules(share_instance, new_share_instance) + except Exception: + msg = _("Failed to apply new access rules during migration " + "of share %s.") % share_ref['id'] + LOG.exception(msg) + helper.cleanup_new_instance(new_share_instance) + helper.cleanup_access_rules(share_instance, share_server, + self.driver) + raise exception.ShareMigrationFailed(reason=msg) self.db.share_update( - context, share['id'], - {'task_state': constants.STATUS_TASK_STATE_MIGRATION_COMPLETING}) + context, share_ref['id'], + {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}) - self.db.share_instance_update(context, new_share_instance['id'], + self.db.share_instance_update(context, new_share_instance_id, {'status': constants.STATUS_AVAILABLE}) - helper.delete_instance_and_wait(context, share_instance) + self.db.share_instance_update(context, share_instance_id, + {'status': constants.STATUS_INACTIVE}) - return True + helper.delete_instance_and_wait(share_instance) + + self.db.share_update( + context, share_ref['id'], + {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) + + LOG.info(_LI("Share Migration for share %s" + " completed successfully."), share_ref['id']) + + @utils.require_driver_initialized + def migration_cancel(self, context, share_id): + + share_ref = self.db.share_get(context, share_id) + + # Confirm that it is driver migration scenario + if share_ref['task_state'] == ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + + share_server = None + if share_ref.instance.get('share_server_id'): + share_server = self.db.share_server_get( + context, share_ref.instance['share_server_id']) + + share_rpc = share_rpcapi.ShareAPI() + + driver_migration_info = share_rpc.migration_get_driver_info( + context, share_ref.instance) + + self.driver.migration_cancel( + context, share_ref.instance, share_server, + driver_migration_info) + else: + msg = _("Driver is not performing migration for" + " share %s") % share_id + raise exception.InvalidShare(reason=msg) + + @utils.require_driver_initialized + def migration_get_progress(self, context, share_id): + + share_ref = self.db.share_get(context, share_id) + + # Confirm that it is driver migration scenario + if share_ref['task_state'] == ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS): + + share_server = None + if share_ref.instance.get('share_server_id'): + share_server = self.db.share_server_get( + context, share_ref.instance['share_server_id']) + + share_rpc = share_rpcapi.ShareAPI() + + driver_migration_info = share_rpc.migration_get_driver_info( + context, share_ref.instance) + + return self.driver.migration_get_progress( + context, share_ref.instance, share_server, + driver_migration_info) + else: + msg = _("Driver is not performing migration for" + " share %s") % share_id + raise exception.InvalidShare(reason=msg) def _get_share_instance(self, context, share): if isinstance(share, six.string_types): diff --git a/manila/share/migration.py b/manila/share/migration.py index bdcfdee40e..5ae57b4928 100644 --- a/manila/share/migration.py +++ b/manila/share/migration.py @@ -16,44 +16,62 @@ import time +from oslo_config import cfg from oslo_log import log -import six from manila.common import constants from manila import exception from manila.i18n import _ -from manila.i18n import _LE from manila.i18n import _LW from manila.share import api as share_api -from manila import utils +import manila.utils as utils + LOG = log.getLogger(__name__) +migration_opts = [ + cfg.IntOpt( + 'migration_wait_access_rules_timeout', + default=180, + help="Time to wait for access rules to be allowed/denied on backends " + "when migrating shares using generic approach (seconds)."), + cfg.IntOpt( + 'migration_create_delete_share_timeout', + default=300, + help='Timeout for creating and deleting share instances ' + 'when performing share migration (seconds).'), +] + +CONF = cfg.CONF +CONF.register_opts(migration_opts) + class ShareMigrationHelper(object): - def __init__(self, context, db, create_delete_timeout, access_rule_timeout, - share): + def __init__(self, context, db, share): self.db = db self.share = share self.context = context self.api = share_api.API() - self.migration_create_delete_share_timeout = create_delete_timeout - self.migration_wait_access_rules_timeout = access_rule_timeout - def delete_instance_and_wait(self, context, share_instance): + self.migration_create_delete_share_timeout = ( + CONF.migration_create_delete_share_timeout) + self.migration_wait_access_rules_timeout = ( + CONF.migration_wait_access_rules_timeout) - self.api.delete_instance(context, share_instance, True) + def delete_instance_and_wait(self, share_instance): + + self.api.delete_instance(self.context, share_instance, True) # Wait for deletion. starttime = time.time() deadline = starttime + self.migration_create_delete_share_timeout - tries = -1 + tries = 0 instance = "Something not None" - while instance: + while instance is not None: try: - instance = self.db.share_instance_get(context, + instance = self.db.share_instance_get(self.context, share_instance['id']) tries += 1 now = time.time() @@ -66,18 +84,17 @@ class ShareMigrationHelper(object): else: time.sleep(tries ** 2) - def create_instance_and_wait(self, context, share, share_instance, host): + def create_instance_and_wait(self, share, share_instance, host): - api = share_api.API() - - new_share_instance = api.create_instance( - context, share, share_instance['share_network_id'], host['host']) + new_share_instance = self.api.create_instance( + self.context, share, share_instance['share_network_id'], + host['host']) # Wait for new_share_instance to become ready starttime = time.time() deadline = starttime + self.migration_create_delete_share_timeout new_share_instance = self.db.share_instance_get( - context, new_share_instance['id'], with_share_data=True) + self.context, new_share_instance['id'], with_share_data=True) tries = 0 while new_share_instance['status'] != constants.STATUS_AVAILABLE: tries += 1 @@ -87,196 +104,112 @@ class ShareMigrationHelper(object): " (from %(share_id)s) on " "destination host %(host_name)s") % { 'share_id': share['id'], 'host_name': host['host']} + self.cleanup_new_instance(new_share_instance) raise exception.ShareMigrationFailed(reason=msg) elif now > deadline: msg = _("Timeout creating new share instance " "(from %(share_id)s) on " "destination host %(host_name)s") % { 'share_id': share['id'], 'host_name': host['host']} + self.cleanup_new_instance(new_share_instance) raise exception.ShareMigrationFailed(reason=msg) else: time.sleep(tries ** 2) new_share_instance = self.db.share_instance_get( - context, new_share_instance['id'], with_share_data=True) + self.context, new_share_instance['id'], with_share_data=True) return new_share_instance - def deny_rules_and_wait(self, context, share_instance, saved_rules): + def _add_rules_and_wait(self, share_instance, access_rules): - api = share_api.API() - api.deny_access_to_instance(context, share_instance, saved_rules) - - self.wait_for_access_update(share_instance) - - def add_rules_and_wait(self, context, share_instance, access_rules, - access_level=None): - rules = [] for access in access_rules: values = { - 'share_id': share_instance['share_id'], + 'share_id': self.share['id'], 'access_type': access['access_type'], - 'access_level': access_level or access['access_level'], - 'access_to': access['access_to'], + 'access_level': access['access_level'], + 'access_to': access['access_to'] } - rules.append(self.db.share_access_create(context, values)) - self.api.allow_access_to_instance(context, share_instance, rules) - self.wait_for_access_update(share_instance) + # NOTE(ganso): Instance Access Mapping is created only on + # db.share_access_create. - def wait_for_access_update(self, share_instance): - starttime = time.time() - deadline = starttime + self.migration_wait_access_rules_timeout - tries = 0 + self.db.share_access_create(self.context, values) - while True: - instance = self.db.share_instance_get( - self.context, share_instance['id']) + self.api.allow_access_to_instance(self.context, share_instance, + access_rules) + utils.wait_for_access_update( + self.context, self.db, share_instance, + self.migration_wait_access_rules_timeout) - if instance['access_rules_status'] == constants.STATUS_ACTIVE: - break - - tries += 1 - now = time.time() - if instance['access_rules_status'] == constants.STATUS_ERROR: - msg = _("Failed to update access rules" - " on share instance %s") % share_instance['id'] - raise exception.ShareMigrationFailed(reason=msg) - elif now > deadline: - msg = _("Timeout trying to update access rules" - " on share instance %(share_id)s. Timeout " - "was %(timeout)s seconds.") % { - 'share_id': share_instance['id'], - 'timeout': self.migration_wait_access_rules_timeout} - raise exception.ShareMigrationFailed(reason=msg) - else: - time.sleep(tries ** 2) - - def allow_migration_access(self, access, share_instance): - - values = { - 'share_id': self.share['id'], - 'access_type': access['access_type'], - 'access_level': access['access_level'], - 'access_to': access['access_to'] - } - - share_access_list = self.db.share_access_get_all_by_type_and_access( - self.context, self.share['id'], access['access_type'], - access['access_to']) - - if len(share_access_list) == 0: - access_ref = self.db.share_access_create(self.context, values) - else: - access_ref = share_access_list[0] - - self.api.allow_access_to_instance( - self.context, share_instance, access_ref) - - self.wait_for_access_update(share_instance) - - return access_ref - - def deny_migration_access(self, access_ref, access, share_instance): - if access_ref: - try: - # Update status - access_ref = self.api.access_get( - self.context, access_ref['id']) - except exception.NotFound: - access_ref = None - LOG.warning(_LW("Access rule not found. " - "Access %(access_to)s - Share " - "%(share_id)s") % { - 'access_to': access['access_to'], - 'share_id': self.share['id']}) - else: - access_list = self.api.access_get_all(self.context, self.share) - for access_item in access_list: - if access_item['access_to'] == access['access_to']: - access_ref = access_item - break - if access_ref: - self.api.deny_access_to_instance( - self.context, share_instance, access_ref) - self.wait_for_access_update(share_instance) - - # NOTE(ganso): Cleanup methods do not throw exception, since the + # NOTE(ganso): Cleanup methods do not throw exceptions, since the # exceptions that should be thrown are the ones that call the cleanup - def cleanup_migration_access(self, access_ref, access, share_instance): + def cleanup_new_instance(self, new_instance): try: - self.deny_migration_access(access_ref, access, share_instance) - except Exception as mae: - LOG.exception(six.text_type(mae)) - LOG.error(_LE("Could not cleanup access rule of share " - "%s") % self.share['id']) - - def cleanup_temp_folder(self, instance, mount_path): + self.delete_instance_and_wait(new_instance) + except Exception: + LOG.warning(_LW("Failed to cleanup new instance during generic" + " migration for share %s."), self.share['id']) + def cleanup_access_rules(self, share_instance, share_server, driver): try: - utils.execute('rmdir', mount_path + instance['id'], - check_exit_code=False) + self.revert_access_rules(share_instance, share_server, driver) + except Exception: + LOG.warning(_LW("Failed to cleanup access rules during generic" + " migration for share %s."), self.share['id']) - except Exception as tfe: - LOG.exception(six.text_type(tfe)) - LOG.error(_LE("Could not cleanup instance %(instance_id)s " - "temporary folders for migration of " - "share %(share_id)s") % { - 'instance_id': instance['id'], - 'share_id': self.share['id']}) - - def cleanup_unmount_temp_folder(self, instance, migration_info): - - try: - utils.execute(*migration_info['umount'], run_as_root=True) - except Exception as utfe: - LOG.exception(six.text_type(utfe)) - LOG.error(_LE("Could not unmount folder of instance" - " %(instance_id)s for migration of " - "share %(share_id)s") % { - 'instance_id': instance['id'], - 'share_id': self.share['id']}) - - def change_to_read_only(self, readonly_support, share_instance): + def change_to_read_only(self, share_instance, share_server, + readonly_support, driver): # NOTE(ganso): If the share does not allow readonly mode we # should remove all access rules and prevent any access - saved_rules = self.db.share_access_get_all_for_share( - self.context, self.share['id']) + rules = self.db.share_access_get_all_for_instance( + self.context, share_instance['id']) - if len(saved_rules) > 0: - self.deny_rules_and_wait(self.context, share_instance, saved_rules) + if len(rules) > 0: if readonly_support: LOG.debug("Changing all of share %s access rules " "to read-only.", self.share['id']) - self.add_rules_and_wait(self.context, share_instance, - saved_rules, 'ro') + for rule in rules: + rule['access_level'] = 'ro' - return saved_rules + driver.update_access(self.context, share_instance, rules, + add_rules=[], delete_rules=[], + share_server=share_server) + else: - def revert_access_rules(self, readonly_support, share_instance, - new_share_instance, saved_rules): + LOG.debug("Removing all access rules for migration of " + "share %s." % self.share['id']) - if len(saved_rules) > 0: - if readonly_support: + driver.update_access(self.context, share_instance, [], + add_rules=[], delete_rules=rules, + share_server=share_server) - readonly_rules = self.db.share_access_get_all_for_share( - self.context, self.share['id']) + def revert_access_rules(self, share_instance, share_server, driver): - LOG.debug("Removing all of share %s read-only " - "access rules.", self.share['id']) + rules = self.db.share_access_get_all_for_instance( + self.context, share_instance['id']) - self.deny_rules_and_wait(self.context, share_instance, - readonly_rules) + if len(rules) > 0: + LOG.debug("Restoring all of share %s access rules according to " + "DB.", self.share['id']) - if new_share_instance: - self.add_rules_and_wait(self.context, new_share_instance, - saved_rules) - else: - self.add_rules_and_wait(self.context, share_instance, - saved_rules) + driver.update_access(self.context, share_instance, rules, + add_rules=[], delete_rules=[], + share_server=share_server) + + def apply_new_access_rules(self, share_instance, new_share_instance): + + rules = self.db.share_access_get_all_for_instance( + self.context, share_instance['id']) + + if len(rules) > 0: + LOG.debug("Restoring all of share %s access rules according to " + "DB.", self.share['id']) + + self._add_rules_and_wait(new_share_instance, rules) diff --git a/manila/share/rpcapi.py b/manila/share/rpcapi.py index 8d019fb8d8..6b8685bc75 100644 --- a/manila/share/rpcapi.py +++ b/manila/share/rpcapi.py @@ -31,27 +31,32 @@ class ShareAPI(object): API version history: - 1.0 - Initial version. - 1.1 - Add manage_share() and unmanage_share() methods - 1.2 - Add extend_share() method - 1.3 - Add shrink_share() method - 1.4 - Introduce Share Instances: + 1.0 - Initial version. + 1.1 - Add manage_share() and unmanage_share() methods + 1.2 - Add extend_share() method + 1.3 - Add shrink_share() method + 1.4 - Introduce Share Instances: create_share() -> create_share_instance() delete_share() -> delete_share_instance() Add share_instance argument to allow_access() & deny_access() - 1.5 - Add create_consistency_group, delete_consistency_group + 1.5 - Add create_consistency_group, delete_consistency_group create_cgsnapshot, and delete_cgsnapshot methods - 1.6 - Introduce Share migration: + 1.6 - Introduce Share migration: migrate_share() get_migration_info() get_driver_migration_info() - 1.7 - Update target call API in allow/deny access methods - 1.8 - Introduce Share Replication: + 1.7 - Update target call API in allow/deny access methods + 1.8 - Introduce Share Replication: create_share_replica() delete_share_replica() promote_share_replica() update_share_replica() - 1.9 - Add manage_snapshot() and unmanage_snapshot() methods + 1.9 - Add manage_snapshot() and unmanage_snapshot() methods + 1.10 - Add migration_complete(), migration_cancel() and + migration_get_progress(), rename migrate_share() to + migration_start(), rename get_migration_info() to + migration_get_info(), rename get_driver_migration_info() to + migration_get_driver_info() """ BASE_RPC_API_VERSION = '1.0' @@ -60,7 +65,7 @@ class ShareAPI(object): super(ShareAPI, self).__init__() target = messaging.Target(topic=CONF.share_topic, version=self.BASE_RPC_API_VERSION) - self.client = rpc.get_client(target, version_cap='1.9') + self.client = rpc.get_client(target, version_cap='1.10') def create_share_instance(self, context, share_instance, host, request_spec, filter_properties, @@ -111,32 +116,32 @@ class ShareAPI(object): 'delete_share_instance', share_instance_id=share_instance['id']) - def migrate_share(self, context, share, dest_host, force_host_copy): + def migration_start(self, context, share, dest_host, force_host_copy, + notify): new_host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=new_host, version='1.6') host_p = {'host': dest_host.host, 'capabilities': dest_host.capabilities} call_context.cast(context, - 'migrate_share', + 'migration_start', share_id=share['id'], host=host_p, - force_host_copy=force_host_copy) + force_host_copy=force_host_copy, + notify=notify) - def get_migration_info(self, context, share_instance, share_server): + def migration_get_info(self, context, share_instance): new_host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.6') return call_context.call(context, - 'get_migration_info', - share_instance_id=share_instance['id'], - share_server=share_server) + 'migration_get_info', + share_instance_id=share_instance['id']) - def get_driver_migration_info(self, context, share_instance, share_server): + def migration_get_driver_info(self, context, share_instance): new_host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.6') return call_context.call(context, - 'get_driver_migration_info', - share_instance_id=share_instance['id'], - share_server=share_server) + 'migration_get_driver_info', + share_instance_id=share_instance['id']) def delete_share_server(self, context, share_server): host = utils.extract_host(share_server['host']) @@ -268,3 +273,25 @@ class ShareAPI(object): 'update_share_replica', share_replica_id=share_replica['id'], share_id=share_replica['share_id']) + + def migration_complete(self, context, share, share_instance_id, + new_share_instance_id): + new_host = utils.extract_host(share['host']) + call_context = self.client.prepare(server=new_host, version='1.10') + call_context.cast(context, + 'migration_complete', + share_id=share['id'], + share_instance_id=share_instance_id, + new_share_instance_id=new_share_instance_id) + + def migration_cancel(self, context, share): + new_host = utils.extract_host(share['host']) + call_context = self.client.prepare(server=new_host, version='1.10') + call_context.call(context, 'migration_cancel', share_id=share['id']) + + def migration_get_progress(self, context, share): + new_host = utils.extract_host(share['host']) + call_context = self.client.prepare(server=new_host, version='1.10') + return call_context.call(context, + 'migration_get_progress', + share_id=share['id']) diff --git a/manila/share/utils.py b/manila/share/utils.py index 9ad66875db..b96218342f 100644 --- a/manila/share/utils.py +++ b/manila/share/utils.py @@ -16,11 +16,7 @@ """Share-related Utilities and helpers.""" -import os -import shutil - from oslo_log import log -import six LOG = log.getLogger(__name__) @@ -84,97 +80,3 @@ def append_host(host, pool): new_host = "#".join([host, pool]) return new_host - - -class Copy(object): - - def __init__(self, src, dest, ignore_list): - self.src = src - self.dest = dest - self.totalSize = 0 - self.currentSize = 0 - self.files = [] - self.dirs = [] - self.currentCopy = None - self.ignoreList = ignore_list - - def get_progress(self): - - if self.currentCopy is not None: - - try: - (mode, ino, dev, nlink, uid, gid, size, atime, mtime, - ctime) = os.stat(self.currentCopy['file_path']) - - except OSError: - size = 0 - - total_progress = 0 - if self.totalSize > 0: - total_progress = self.currentSize * 100 / self.totalSize - current_file_progress = 0 - if self.currentCopy['size'] > 0: - current_file_progress = size * 100 / self.currentCopy['size'] - current_file_path = six.text_type(self.currentCopy['file_path']) - - progress = { - 'total_progress': total_progress, - 'current_file_path': current_file_path, - 'current_file_progress': current_file_progress - } - - return progress - else: - return {'total_progress': 100} - - def run(self): - - self.explore(self.src) - self.copy(self.src, self.dest) - - LOG.info((six.text_type(self.get_progress()))) - - def copy(self, src, dest): - - # Create dirs with max permissions so files can be copied - for dir_item in self.dirs: - new_dir = dir_item['name'].replace(src, dest) - os.mkdir(new_dir) - - for file_item in self.files: - - file_path = file_item['name'].replace(src, dest) - self.currentCopy = {'file_path': file_path, - 'size': file_item['attr']} - - LOG.info(six.text_type(self.get_progress())) - - shutil.copy2(file_item['name'], - file_item['name'].replace(src, dest)) - self.currentSize += file_item['attr'] - - # Set permissions to dirs - for dir_item in self.dirs: - new_dir = dir_item['name'].replace(src, dest) - shutil.copystat(dir_item['name'], new_dir) - - def explore(self, path): - - for dirpath, dirnames, filenames in os.walk(path): - - for dirname in dirnames: - if dirname not in self.ignoreList: - dir_item = os.path.join(dirpath, dirname) - (mode, ino, dev, nlink, uid, gid, size, atime, mtime, - ctime) = os.stat(dir_item) - self.dirs.append({'name': dir_item, - 'attr': mode}) - - for filename in filenames: - if filename not in self.ignoreList: - file_item = os.path.join(dirpath, filename) - (mode, ino, dev, nlink, uid, gid, size, atime, mtime, - ctime) = os.stat(file_item) - self.files.append({'name': file_item, - 'attr': size}) - self.totalSize += size diff --git a/manila/tests/api/v2/test_shares.py b/manila/tests/api/v2/test_shares.py index ba5d8811ee..862359c38d 100644 --- a/manila/tests/api/v2/test_shares.py +++ b/manila/tests/api/v2/test_shares.py @@ -258,95 +258,358 @@ class ShareAPITest(test.TestCase): self.assertEqual("fakenetid", create_mock.call_args[1]['share_network_id']) - @ddt.data('2.5', '2.6', '2.7') - def test_migrate_share(self, version): + @ddt.data('2.6', '2.7', '2.14', '2.15') + def test_migration_start(self, version): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], - use_admin_context=True) + use_admin_context=True, version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' - req.api_version_request = api_version.APIVersionRequest(version) req.api_version_request.experimental = True - if float(version) > 2.6: + + if api_version.APIVersionRequest(version) < ( + api_version.APIVersionRequest("2.7")): + body = {'os-migrate_share': {'host': 'fake_host'}} + method = 'migrate_share_legacy' + elif api_version.APIVersionRequest(version) < ( + api_version.APIVersionRequest("2.15")): body = {'migrate_share': {'host': 'fake_host'}} method = 'migrate_share' else: - body = {'os-migrate_share': {'host': 'fake_host'}} - method = 'migrate_share_legacy' - self.mock_object(share_api.API, 'migrate_share') - getattr(self.controller, method)(req, share['id'], body) + body = {'migration_start': {'host': 'fake_host'}} + method = 'migration_start' - def test_migrate_share_has_replicas(self): + self.mock_object(share_api.API, 'migration_start') + response = getattr(self.controller, method)(req, share['id'], body) + self.assertEqual(202, response.status_int) + + def test_migration_start_has_replicas(self): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], use_admin_context=True) req.method = 'POST' req.headers['content-type'] = 'application/json' - req.api_version_request = api_version.APIVersionRequest('2.10') + req.api_version_request = api_version.APIVersionRequest('2.11') req.api_version_request.experimental = True body = {'migrate_share': {'host': 'fake_host'}} - self.mock_object(share_api.API, 'migrate_share', + self.mock_object(share_api.API, 'migration_start', mock.Mock(side_effect=exception.Conflict(err='err'))) self.assertRaises(webob.exc.HTTPConflict, self.controller.migrate_share, req, share['id'], body) - @ddt.data('2.5', '2.6', '2.7') - def test_migrate_share_no_share_id(self, version): + @ddt.data('2.6', '2.7', '2.14', '2.15') + def test_migration_start_no_share_id(self, version): req = fakes.HTTPRequest.blank('/shares/%s/action' % 'fake_id', use_admin_context=True, version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True - if float(version) > 2.6: - body = {'migrate_share': {}} + + if api_version.APIVersionRequest(version) < ( + api_version.APIVersionRequest("2.7")): + body = {'os-migrate_share': {'host': 'fake_host'}} + method = 'migrate_share_legacy' + elif api_version.APIVersionRequest(version) < ( + api_version.APIVersionRequest("2.15")): + body = {'migrate_share': {'host': 'fake_host'}} method = 'migrate_share' else: - body = {'os-migrate_share': {}} - method = 'migrate_share_legacy' - self.mock_object(share_api.API, 'migrate_share') + body = {'migration_start': {'host': 'fake_host'}} + method = 'migration_start' + + self.mock_object(share_api.API, 'migration_start') self.mock_object(share_api.API, 'get', mock.Mock(side_effect=[exception.NotFound])) self.assertRaises(webob.exc.HTTPNotFound, getattr(self.controller, method), req, 'fake_id', body) - @ddt.data('2.5', '2.6', '2.7') - def test_migrate_share_no_host(self, version): + @ddt.data('2.6', '2.7', '2.14', '2.15') + def test_migration_start_no_host(self, version): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], - use_admin_context=True) + use_admin_context=True, version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' - req.api_version_request = api_version.APIVersionRequest(version) req.api_version_request.experimental = True - if float(version) > 2.6: + + if api_version.APIVersionRequest(version) < ( + api_version.APIVersionRequest("2.7")): + body = {'os-migrate_share': {}} + method = 'migrate_share_legacy' + elif api_version.APIVersionRequest(version) < ( + api_version.APIVersionRequest("2.15")): body = {'migrate_share': {}} method = 'migrate_share' else: - body = {'os-migrate_share': {}} - method = 'migrate_share_legacy' - self.mock_object(share_api.API, 'migrate_share') + body = {'migration_start': {}} + method = 'migration_start' + + self.mock_object(share_api.API, 'migration_start') self.assertRaises(webob.exc.HTTPBadRequest, getattr(self.controller, method), req, share['id'], body) - def test_migrate_share_no_host_invalid_force_host_copy(self): + @ddt.data('2.6', '2.7', '2.14', '2.15') + def test_migration_start_invalid_force_host_copy(self, version): share = db_utils.create_share() req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], - use_admin_context=True, version='2.7') + use_admin_context=True, version=version) req.method = 'POST' req.headers['content-type'] = 'application/json' req.api_version_request.experimental = True - body = {'os-migrate_share': {'host': 'fake_host', - 'force_host_copy': 'fake'}} - self.mock_object(share_api.API, 'migrate_share') + + if api_version.APIVersionRequest(version) < ( + api_version.APIVersionRequest("2.7")): + body = {'os-migrate_share': {'host': 'fake_host', + 'force_host_copy': 'fake'}} + method = 'migrate_share_legacy' + elif api_version.APIVersionRequest(version) < ( + api_version.APIVersionRequest("2.15")): + body = {'migrate_share': {'host': 'fake_host', + 'force_host_copy': 'fake'}} + method = 'migrate_share' + else: + body = {'migration_start': {'host': 'fake_host', + 'force_host_copy': 'fake'}} + method = 'migration_start' + + self.mock_object(share_api.API, 'migration_start') self.assertRaises(webob.exc.HTTPBadRequest, - self.controller.migrate_share, + getattr(self.controller, method), req, share['id'], body) + def test_migration_start_invalid_notify(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + body = {'migration_start': {'host': 'fake_host', + 'notify': 'error'}} + + self.mock_object(share_api.API, 'migration_start') + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.migration_start, req, share['id'], + body) + + def test_reset_task_state(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR} + body = {'reset_task_state': update} + + self.mock_object(db, 'share_update') + + response = self.controller.reset_task_state(req, share['id'], body) + + self.assertEqual(202, response.status_int) + + db.share_update.assert_called_once_with(utils.IsAMatcher( + context.RequestContext), share['id'], update) + + def test_reset_task_state_error_body(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + update = {'error': 'error'} + body = {'reset_task_state': update} + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.reset_task_state, req, share['id'], + body) + + def test_reset_task_state_error_empty(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + update = {'task_state': None} + body = {'reset_task_state': update} + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.reset_task_state, req, share['id'], + body) + + def test_reset_task_state_error_invalid(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + update = {'task_state': 'error'} + body = {'reset_task_state': update} + + self.assertRaises(webob.exc.HTTPBadRequest, + self.controller.reset_task_state, req, share['id'], + body) + + def test_reset_task_state_not_found(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR} + body = {'reset_task_state': update} + + self.mock_object(db, 'share_update', + mock.Mock(side_effect=exception.NotFound())) + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.reset_task_state, req, share['id'], + body) + + db.share_update.assert_called_once_with(utils.IsAMatcher( + context.RequestContext), share['id'], update) + + def test_migration_complete(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + body = {'migration_complete': None} + + self.mock_object(share_api.API, 'get', + mock.Mock(return_value=share)) + + self.mock_object(share_api.API, 'migration_complete') + + response = self.controller.migration_complete(req, share['id'], body) + + self.assertEqual(202, response.status_int) + + share_api.API.migration_complete.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), share) + + def test_migration_complete_not_found(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + body = {'migration_complete': None} + + self.mock_object(share_api.API, 'get', + mock.Mock(side_effect=exception.NotFound())) + self.mock_object(share_api.API, 'migration_complete') + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.migration_complete, req, share['id'], + body) + + def test_migration_cancel(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + body = {'migration_cancel': None} + + self.mock_object(share_api.API, 'get', + mock.Mock(return_value=share)) + + self.mock_object(share_api.API, 'migration_cancel') + + response = self.controller.migration_cancel(req, share['id'], body) + + self.assertEqual(202, response.status_int) + + share_api.API.migration_cancel.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), share) + + def test_migration_cancel_not_found(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + body = {'migration_cancel': None} + + self.mock_object(share_api.API, 'get', + mock.Mock(side_effect=exception.NotFound())) + self.mock_object(share_api.API, 'migration_cancel') + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.migration_cancel, req, share['id'], + body) + + def test_migration_get_progress(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + body = {'migration_get_progress': None} + expected = {'total_progress': 'fake', + 'current_file_progress': 'fake', + 'current_file_path': 'fake', + } + + self.mock_object(share_api.API, 'get', + mock.Mock(return_value=share)) + + self.mock_object(share_api.API, 'migration_get_progress', + mock.Mock(return_value=expected)) + + response = self.controller.migration_get_progress(req, share['id'], + body) + + self.assertEqual(expected, response) + + share_api.API.migration_get_progress.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), share) + + def test_migration_get_progress_not_found(self): + share = db_utils.create_share() + req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'], + use_admin_context=True, version='2.15') + req.method = 'POST' + req.headers['content-type'] = 'application/json' + req.api_version_request.experimental = True + + body = {'migration_get_progress': None} + + self.mock_object(share_api.API, 'get', + mock.Mock(side_effect=exception.NotFound())) + self.mock_object(share_api.API, 'migration_get_progress') + + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.migration_get_progress, req, + share['id'], body) + def test_share_create_from_snapshot_without_share_net_no_parent(self): shr = { "size": 100, diff --git a/manila/tests/data/test_helper.py b/manila/tests/data/test_helper.py new file mode 100644 index 0000000000..48d2b27927 --- /dev/null +++ b/manila/tests/data/test_helper.py @@ -0,0 +1,279 @@ +# Copyright 2015 Hitachi Data Systems inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import ddt +import mock + +from manila.common import constants +from manila import context +from manila.data import helper as data_copy_helper +from manila import db +from manila import exception +from manila.share import rpcapi as share_rpc +from manila import test +from manila.tests import db_utils +from manila import utils + + +@ddt.ddt +class DataServiceHelperTestCase(test.TestCase): + """Tests DataServiceHelper.""" + + def setUp(self): + super(DataServiceHelperTestCase, self).setUp() + self.share = db_utils.create_share() + self.share_instance = db_utils.create_share_instance( + share_id=self.share['id'], + status=constants.STATUS_AVAILABLE) + self.context = context.get_admin_context() + self.access = db_utils.create_access(share_id=self.share['id']) + self.helper = data_copy_helper.DataServiceHelper( + self.context, db, self.share) + + def test_allow_data_access(self): + + access_create = {'access_type': self.access['access_type'], + 'access_to': self.access['access_to'], + 'access_level': self.access['access_level'], + 'share_id': self.access['share_id']} + + # mocks + self.mock_object( + self.helper.db, 'share_access_get_all_by_type_and_access', + mock.Mock(return_value=[self.access])) + + self.mock_object(self.helper, '_change_data_access_to_instance') + + self.mock_object(self.helper.db, 'share_access_create', + mock.Mock(return_value=self.access)) + + # run + self.helper._allow_data_access( + self.access, self.share_instance['id'], self.share_instance['id']) + + # asserts + self.helper.db.share_access_get_all_by_type_and_access.\ + assert_called_once_with( + self.context, self.share['id'], self.access['access_type'], + self.access['access_to']) + + self.helper.db.share_access_create.assert_called_once_with( + self.context, access_create) + + self.helper._change_data_access_to_instance.assert_has_calls( + [mock.call(self.share_instance['id'], self.access, allow=False), + mock.call(self.share_instance['id'], self.access, allow=True), + mock.call(self.share_instance['id'], self.access, allow=True)]) + + def test_deny_access_to_data_service(self): + + # mocks + self.mock_object(self.helper, '_change_data_access_to_instance') + + # run + self.helper.deny_access_to_data_service( + self.access, self.share_instance['id']) + + # asserts + self.helper._change_data_access_to_instance.\ + assert_called_once_with( + self.share_instance['id'], self.access, allow=False) + + @ddt.data(None, Exception('fake')) + def test_cleanup_data_access(self, exc): + + # mocks + self.mock_object(self.helper, 'deny_access_to_data_service', + mock.Mock(side_effect=exc)) + + self.mock_object(data_copy_helper.LOG, 'warning') + + # run + self.helper.cleanup_data_access(self.access, self.share_instance['id']) + + # asserts + self.helper.deny_access_to_data_service.assert_called_once_with( + self.access, self.share_instance['id']) + + if exc: + self.assertTrue(data_copy_helper.LOG.warning.called) + + @ddt.data(False, True) + def test_cleanup_temp_folder(self, exc): + + fake_path = ''.join(('/fake_path/', self.share_instance['id'])) + + # mocks + self.mock_object(os.path, 'exists', + mock.Mock(side_effect=[True, True, exc])) + self.mock_object(os, 'rmdir') + + self.mock_object(data_copy_helper.LOG, 'warning') + + # run + self.helper.cleanup_temp_folder( + self.share_instance['id'], '/fake_path/') + + # asserts + os.rmdir.assert_called_once_with(fake_path) + os.path.exists.assert_has_calls([ + mock.call(fake_path), + mock.call(fake_path), + mock.call(fake_path) + ]) + + if exc: + self.assertTrue(data_copy_helper.LOG.warning.called) + + @ddt.data(None, Exception('fake')) + def test_cleanup_unmount_temp_folder(self, exc): + + # mocks + self.mock_object(self.helper, 'unmount_share_instance', + mock.Mock(side_effect=exc)) + self.mock_object(data_copy_helper.LOG, 'warning') + + # run + self.helper.cleanup_unmount_temp_folder( + 'unmount_template', 'fake_path', self.share_instance['id']) + + # asserts + self.helper.unmount_share_instance.assert_called_once_with( + 'unmount_template', 'fake_path', self.share_instance['id']) + + if exc: + self.assertTrue(data_copy_helper.LOG.warning.called) + + @ddt.data(True, False) + def test__change_data_access_to_instance(self, allow): + + # mocks + self.mock_object(self.helper.db, 'share_instance_update_access_status') + + self.mock_object(self.helper.db, 'share_instance_get', + mock.Mock(return_value=self.share_instance)) + + if allow: + self.mock_object(share_rpc.ShareAPI, 'allow_access') + else: + self.mock_object(share_rpc.ShareAPI, 'deny_access') + + self.mock_object(utils, 'wait_for_access_update') + + # run + self.helper._change_data_access_to_instance( + self.share_instance['id'], self.access, allow=allow) + + # asserts + self.helper.db.share_instance_update_access_status.\ + assert_called_once_with(self.context, self.share_instance['id'], + constants.STATUS_OUT_OF_SYNC) + + self.helper.db.share_instance_get.assert_called_once_with( + self.context, self.share_instance['id'], with_share_data=True) + + if allow: + share_rpc.ShareAPI.allow_access.assert_called_once_with( + self.context, self.share_instance, self.access) + else: + share_rpc.ShareAPI.deny_access.assert_called_once_with( + self.context, self.share_instance, self.access) + + utils.wait_for_access_update.assert_called_once_with( + self.context, self.helper.db, self.share_instance, + data_copy_helper.CONF.data_access_wait_access_rules_timeout) + + @ddt.data({'proto': 'GLUSTERFS', 'conf': None}, + {'proto': 'GLUSTERFS', 'conf': 'cert'}, + {'proto': 'OTHERS', 'conf': None}, + {'proto': 'OTHERS', 'conf': 'ip'}) + @ddt.unpack + def test_allow_access_to_data_service(self, proto, conf): + + share = db_utils.create_share(share_proto=proto) + + access_allow = {'access_type': conf, + 'access_to': conf, + 'access_level': constants.ACCESS_LEVEL_RW} + + data_copy_helper.CONF.set_default('data_node_access_cert', conf) + data_copy_helper.CONF.set_default('data_node_access_ip', conf) + + # mocks + self.mock_object(self.helper, '_allow_data_access', + mock.Mock(return_value=self.access)) + + # run and asserts + if conf: + result = self.helper.allow_access_to_data_service( + share, 'ins1_id', 'ins2_id') + self.assertEqual(self.access, result) + self.helper._allow_data_access.assert_called_once_with( + access_allow, 'ins1_id', 'ins2_id') + else: + self.assertRaises(exception.ShareDataCopyFailed, + self.helper.allow_access_to_data_service, share, + 'ins1_id', 'ins2_id') + + def test_mount_share_instance(self): + + fake_path = ''.join(('/fake_path/', self.share_instance['id'])) + + # mocks + self.mock_object(utils, 'execute') + self.mock_object(os.path, 'exists', mock.Mock( + side_effect=[False, False, True])) + self.mock_object(os, 'makedirs') + + # run + self.helper.mount_share_instance( + 'mount %(path)s', '/fake_path', self.share_instance['id']) + + # asserts + utils.execute.assert_called_once_with('mount', fake_path, + run_as_root=True) + + os.makedirs.assert_called_once_with(fake_path) + os.path.exists.assert_has_calls([ + mock.call(fake_path), + mock.call(fake_path), + mock.call(fake_path) + ]) + + def test_unmount_share_instance(self): + + fake_path = ''.join(('/fake_path/', self.share_instance['id'])) + + # mocks + self.mock_object(utils, 'execute') + self.mock_object(os.path, 'exists', mock.Mock( + side_effect=[True, True, False])) + self.mock_object(os, 'rmdir') + + # run + self.helper.unmount_share_instance( + 'unmount %(path)s', '/fake_path', self.share_instance['id']) + + # asserts + utils.execute.assert_called_once_with('unmount', fake_path, + run_as_root=True) + os.rmdir.assert_called_once_with(fake_path) + os.path.exists.assert_has_calls([ + mock.call(fake_path), + mock.call(fake_path), + mock.call(fake_path) + ]) diff --git a/manila/tests/data/test_manager.py b/manila/tests/data/test_manager.py index 02716caa26..f6b6d951be 100644 --- a/manila/tests/data/test_manager.py +++ b/manila/tests/data/test_manager.py @@ -15,27 +15,405 @@ """ Tests For Data Manager """ +import ddt +import mock -from oslo_config import cfg - +from manila.common import constants from manila import context +from manila.data import helper from manila.data import manager +from manila.data import utils as data_utils +from manila import db +from manila import exception +from manila.share import rpcapi as share_rpc from manila import test - -CONF = cfg.CONF +from manila.tests import db_utils +from manila import utils +@ddt.ddt class DataManagerTestCase(test.TestCase): """Test case for data manager.""" - manager_cls = manager.DataManager - def setUp(self): super(DataManagerTestCase, self).setUp() - self.manager = self.manager_cls() - self.context = context.RequestContext('fake_user', 'fake_project') + self.manager = manager.DataManager() + self.context = context.get_admin_context() self.topic = 'fake_topic' + self.share = db_utils.create_share() + manager.CONF.set_default('migration_tmp_location', '/tmp/') def test_init(self): manager = self.manager self.assertIsNotNone(manager) + + @ddt.data(constants.TASK_STATE_DATA_COPYING_COMPLETING, + constants.TASK_STATE_DATA_COPYING_STARTING, + constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) + def test_init_host(self, status): + + share = db_utils.create_share( + task_state=status) + + # mocks + self.mock_object(db, 'share_get_all', mock.Mock( + return_value=[share])) + self.mock_object(db, 'share_update') + + # run + self.manager.init_host() + + # asserts + db.share_get_all.assert_called_once_with( + utils.IsAMatcher(context.RequestContext)) + + db.share_update.assert_called_with( + utils.IsAMatcher(context.RequestContext), share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) + + @ddt.data({'notify': True, 'exc': None}, + {'notify': False, 'exc': None}, + {'notify': 'fake', + 'exc': exception.ShareDataCopyCancelled(src_instance='ins1', + dest_instance='ins2')}, + {'notify': 'fake', 'exc': Exception('fake')}) + @ddt.unpack + def test_migration_start(self, notify, exc): + + # mocks + self.mock_object(db, 'share_get', mock.Mock(return_value=self.share)) + + self.mock_object(data_utils, 'Copy', + mock.Mock(return_value='fake_copy')) + + self.manager.busy_tasks_shares[self.share['id']] = 'fake_copy' + + self.mock_object(self.manager, '_copy_share_data', + mock.Mock(side_effect=exc)) + + self.mock_object(share_rpc.ShareAPI, 'migration_complete') + + if exc is not None and not isinstance( + exc, exception.ShareDataCopyCancelled): + self.mock_object(db, 'share_update') + + # run + if exc is None or isinstance(exc, exception.ShareDataCopyCancelled): + self.manager.migration_start( + self.context, [], self.share['id'], + 'ins1_id', 'ins2_id', 'info_src', 'info_dest', notify) + else: + self.assertRaises( + exception.ShareDataCopyFailed, self.manager.migration_start, + self.context, [], self.share['id'], 'ins1_id', 'ins2_id', + 'info_src', 'info_dest', notify) + + db.share_update.assert_called_once_with( + self.context, self.share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR}) + + # asserts + self.assertFalse(self.manager.busy_tasks_shares.get(self.share['id'])) + + self.manager._copy_share_data.assert_called_once_with( + self.context, 'fake_copy', self.share, 'ins1_id', 'ins2_id', + 'info_src', 'info_dest') + + if notify or exc: + share_rpc.ShareAPI.migration_complete.assert_called_once_with( + self.context, self.share, 'ins1_id', 'ins2_id') + + @ddt.data({'cancelled': False, 'exc': None}, + {'cancelled': False, 'exc': Exception('fake')}, + {'cancelled': True, 'exc': None}) + @ddt.unpack + def test__copy_share_data(self, cancelled, exc): + + access = db_utils.create_access(share_id=self.share['id']) + + migration_info_src = {'mount': 'mount_cmd_src', + 'unmount': 'unmount_cmd_src'} + migration_info_dest = {'mount': 'mount_cmd_dest', + 'unmount': 'unmount_cmd_dest'} + + get_progress = {'total_progress': 100} + + # mocks + fake_copy = mock.MagicMock(cancelled=cancelled) + + self.mock_object(db, 'share_update') + + self.mock_object(helper.DataServiceHelper, + 'allow_access_to_data_service', + mock.Mock(return_value=access)) + + self.mock_object(helper.DataServiceHelper, 'mount_share_instance') + + self.mock_object(fake_copy, 'run', mock.Mock(side_effect=exc)) + + self.mock_object(fake_copy, 'get_progress', + mock.Mock(return_value=get_progress)) + + self.mock_object(helper.DataServiceHelper, 'unmount_share_instance', + mock.Mock(side_effect=Exception('fake'))) + + self.mock_object(helper.DataServiceHelper, + 'deny_access_to_data_service', + mock.Mock(side_effect=Exception('fake'))) + + extra_updates = None + + # run + if cancelled: + self.assertRaises( + exception.ShareDataCopyCancelled, + self.manager._copy_share_data, self.context, fake_copy, + self.share, 'ins1_id', 'ins2_id', migration_info_src, + migration_info_dest) + extra_updates = [ + mock.call( + self.context, self.share['id'], + {'task_state': + constants.TASK_STATE_DATA_COPYING_COMPLETING}), + mock.call( + self.context, self.share['id'], + {'task_state': + constants.TASK_STATE_DATA_COPYING_CANCELLED}) + ] + + elif exc: + self.assertRaises( + exception.ShareDataCopyFailed, self.manager._copy_share_data, + self.context, fake_copy, self.share, 'ins1_id', + 'ins2_id', migration_info_src, migration_info_dest) + + else: + self.manager._copy_share_data( + self.context, fake_copy, self.share, 'ins1_id', + 'ins2_id', migration_info_src, migration_info_dest) + extra_updates = [ + mock.call( + self.context, self.share['id'], + {'task_state': + constants.TASK_STATE_DATA_COPYING_COMPLETING}), + mock.call( + self.context, self.share['id'], + {'task_state': + constants.TASK_STATE_DATA_COPYING_COMPLETED}) + ] + + # asserts + self.assertEqual( + self.manager.busy_tasks_shares[self.share['id']], fake_copy) + + update_list = [ + mock.call( + self.context, self.share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}), + mock.call( + self.context, self.share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_IN_PROGRESS}), + ] + if extra_updates: + update_list = update_list + extra_updates + + db.share_update.assert_has_calls(update_list) + + helper.DataServiceHelper.allow_access_to_data_service.\ + assert_called_once_with(self.share, 'ins1_id', 'ins2_id') + + helper.DataServiceHelper.mount_share_instance.assert_has_calls([ + mock.call(migration_info_src['mount'], '/tmp/', 'ins1_id'), + mock.call(migration_info_dest['mount'], '/tmp/', 'ins2_id')]) + + fake_copy.run.assert_called_once_with() + if exc is None: + fake_copy.get_progress.assert_called_once_with() + + helper.DataServiceHelper.unmount_share_instance.assert_has_calls([ + mock.call(migration_info_src['unmount'], '/tmp/', 'ins1_id'), + mock.call(migration_info_dest['unmount'], '/tmp/', 'ins2_id')]) + + helper.DataServiceHelper.deny_access_to_data_service.assert_has_calls([ + mock.call(access, 'ins1_id'), mock.call(access, 'ins2_id')]) + + def test__copy_share_data_exception_access(self): + + migration_info_src = {'mount': 'mount_cmd_src', + 'unmount': 'unmount_cmd_src'} + migration_info_dest = {'mount': 'mount_cmd_src', + 'unmount': 'unmount_cmd_src'} + + fake_copy = mock.MagicMock(cancelled=False) + + # mocks + self.mock_object(db, 'share_update') + + self.mock_object( + helper.DataServiceHelper, 'allow_access_to_data_service', + mock.Mock( + side_effect=exception.ShareDataCopyFailed(reason='fake'))) + + self.mock_object(helper.DataServiceHelper, 'cleanup_data_access') + + # run + self.assertRaises(exception.ShareDataCopyFailed, + self.manager._copy_share_data, self.context, + fake_copy, self.share, 'ins1_id', 'ins2_id', + migration_info_src, migration_info_dest) + + # asserts + db.share_update.assert_called_once_with( + self.context, self.share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) + + helper.DataServiceHelper.allow_access_to_data_service.\ + assert_called_once_with(self.share, 'ins1_id', 'ins2_id') + + def test__copy_share_data_exception_mount_1(self): + + access = db_utils.create_access(share_id=self.share['id']) + + migration_info_src = {'mount': 'mount_cmd_src', + 'unmount': 'unmount_cmd_src'} + migration_info_dest = {'mount': 'mount_cmd_src', + 'unmount': 'unmount_cmd_src'} + + fake_copy = mock.MagicMock(cancelled=False) + + # mocks + self.mock_object(db, 'share_update') + + self.mock_object(helper.DataServiceHelper, + 'allow_access_to_data_service', + mock.Mock(return_value=access)) + + self.mock_object(helper.DataServiceHelper, 'mount_share_instance', + mock.Mock(side_effect=Exception('fake'))) + + self.mock_object(helper.DataServiceHelper, 'cleanup_data_access') + self.mock_object(helper.DataServiceHelper, 'cleanup_temp_folder') + + # run + self.assertRaises(exception.ShareDataCopyFailed, + self.manager._copy_share_data, self.context, + fake_copy, self.share, 'ins1_id', 'ins2_id', + migration_info_src, migration_info_dest) + + # asserts + db.share_update.assert_called_once_with( + self.context, self.share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) + + helper.DataServiceHelper.allow_access_to_data_service.\ + assert_called_once_with(self.share, 'ins1_id', 'ins2_id') + + helper.DataServiceHelper.mount_share_instance.assert_called_once_with( + migration_info_src['mount'], '/tmp/', 'ins1_id') + + helper.DataServiceHelper.cleanup_temp_folder.assert_called_once_with( + 'ins1_id', '/tmp/') + + helper.DataServiceHelper.cleanup_data_access.assert_has_calls([ + mock.call(access, 'ins2_id'), mock.call(access, 'ins1_id')]) + + def test__copy_share_data_exception_mount_2(self): + + access = db_utils.create_access(share_id=self.share['id']) + + migration_info_src = {'mount': 'mount_cmd_src', + 'unmount': 'unmount_cmd_src'} + migration_info_dest = {'mount': 'mount_cmd_src', + 'unmount': 'unmount_cmd_src'} + + fake_copy = mock.MagicMock(cancelled=False) + + # mocks + self.mock_object(db, 'share_update') + + self.mock_object(helper.DataServiceHelper, + 'allow_access_to_data_service', + mock.Mock(return_value=access)) + + self.mock_object(helper.DataServiceHelper, 'mount_share_instance', + mock.Mock(side_effect=[None, Exception('fake')])) + + self.mock_object(helper.DataServiceHelper, 'cleanup_data_access') + self.mock_object(helper.DataServiceHelper, 'cleanup_temp_folder') + self.mock_object(helper.DataServiceHelper, + 'cleanup_unmount_temp_folder') + + # run + self.assertRaises(exception.ShareDataCopyFailed, + self.manager._copy_share_data, self.context, + fake_copy, self.share, 'ins1_id', 'ins2_id', + migration_info_src, migration_info_dest) + + # asserts + db.share_update.assert_called_once_with( + self.context, self.share['id'], + {'task_state': constants.TASK_STATE_DATA_COPYING_STARTING}) + + helper.DataServiceHelper.allow_access_to_data_service.\ + assert_called_once_with(self.share, 'ins1_id', 'ins2_id') + + helper.DataServiceHelper.mount_share_instance.assert_has_calls([ + mock.call(migration_info_src['mount'], '/tmp/', 'ins1_id'), + mock.call(migration_info_dest['mount'], '/tmp/', 'ins2_id')]) + + helper.DataServiceHelper.cleanup_unmount_temp_folder.\ + assert_called_once_with( + migration_info_src['unmount'], '/tmp/', 'ins1_id') + + helper.DataServiceHelper.cleanup_temp_folder.assert_has_calls([ + mock.call('ins2_id', '/tmp/'), mock.call('ins1_id', '/tmp/')]) + + helper.DataServiceHelper.cleanup_data_access.assert_has_calls([ + mock.call(access, 'ins2_id'), mock.call(access, 'ins1_id')]) + + def test_data_copy_cancel(self): + + share = db_utils.create_share() + + self.manager.busy_tasks_shares[share['id']] = data_utils.Copy + + # mocks + self.mock_object(data_utils.Copy, 'cancel') + + # run + self.manager.data_copy_cancel(self.context, share['id']) + + # asserts + data_utils.Copy.cancel.assert_called_once_with() + + def test_data_copy_cancel_not_copying(self): + + self.assertRaises(exception.InvalidShare, + self.manager.data_copy_cancel, self.context, + 'fake_id') + + def test_data_copy_get_progress(self): + + share = db_utils.create_share() + + self.manager.busy_tasks_shares[share['id']] = data_utils.Copy + + expected = 'fake_progress' + + # mocks + self.mock_object(data_utils.Copy, 'get_progress', + mock.Mock(return_value=expected)) + + # run + result = self.manager.data_copy_get_progress(self.context, share['id']) + + # asserts + self.assertEqual(expected, result) + + data_utils.Copy.get_progress.assert_called_once_with() + + def test_data_copy_get_progress_not_copying(self): + + self.assertRaises(exception.InvalidShare, + self.manager.data_copy_get_progress, self.context, + 'fake_id') diff --git a/manila/tests/data/test_rpcapi.py b/manila/tests/data/test_rpcapi.py index 52d2951993..c585e82a75 100644 --- a/manila/tests/data/test_rpcapi.py +++ b/manila/tests/data/test_rpcapi.py @@ -20,10 +20,13 @@ import copy import mock from oslo_config import cfg +from oslo_serialization import jsonutils +from manila.common import constants from manila import context from manila.data import rpcapi as data_rpcapi from manila import test +from manila.tests import db_utils CONF = cfg.CONF @@ -32,6 +35,12 @@ class DataRpcAPITestCase(test.TestCase): def setUp(self): super(DataRpcAPITestCase, self).setUp() + self.context = context.get_admin_context() + share = db_utils.create_share( + availability_zone=CONF.storage_availability_zone, + status=constants.STATUS_AVAILABLE + ) + self.fake_share = jsonutils.to_primitive(share) def tearDown(self): super(DataRpcAPITestCase, self).tearDown() @@ -71,3 +80,27 @@ class DataRpcAPITestCase(test.TestCase): expected_args = [ctxt, method, expected_msg] for arg, expected_arg in zip(self.fake_args, expected_args): self.assertEqual(expected_arg, arg) + + def test_migration_start(self): + self._test_data_api('migration_start', + rpc_method='cast', + version='1.0', + share_id=self.fake_share['id'], + ignore_list=[], + share_instance_id='fake_ins_id', + dest_share_instance_id='dest_fake_ins_id', + migration_info_src={}, + migration_info_dest={}, + notify=True) + + def test_data_copy_cancel(self): + self._test_data_api('data_copy_cancel', + rpc_method='call', + version='1.0', + share_id=self.fake_share['id']) + + def test_data_copy_get_progress(self): + self._test_data_api('data_copy_get_progress', + rpc_method='call', + version='1.0', + share_id=self.fake_share['id']) diff --git a/manila/tests/data/test_utils.py b/manila/tests/data/test_utils.py new file mode 100644 index 0000000000..02fab17bb0 --- /dev/null +++ b/manila/tests/data/test_utils.py @@ -0,0 +1,325 @@ +# Copyright 2015 Hitachi Data Systems inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import mock + +from manila.data import utils as data_utils +from manila import test +from manila import utils + + +class CopyClassTestCase(test.TestCase): + def setUp(self): + super(CopyClassTestCase, self).setUp() + src = '/path/fake/src' + dest = '/path/fake/dst' + ignore_list = ['item'] + self._copy = data_utils.Copy(src, dest, ignore_list) + self._copy.total_size = 10000 + self._copy.current_size = 100 + self._copy.current_copy = {'file_path': '/fake/path', 'size': 100} + + self.mock_log = self.mock_object(data_utils, 'LOG') + + def test_get_progress(self): + expected = {'total_progress': 1, + 'current_file_path': '/fake/path', + 'current_file_progress': 100} + + # mocks + self.mock_object(utils, 'execute', + mock.Mock(return_value=("100", ""))) + + # run + out = self._copy.get_progress() + + # asserts + self.assertEqual(expected, out) + + utils.execute.assert_called_once_with("stat", "-c", "%s", "/fake/path", + run_as_root=True) + + def test_get_progress_current_copy_none(self): + self._copy.current_copy = None + expected = {'total_progress': 100} + + # run + out = self._copy.get_progress() + + # asserts + self.assertEqual(expected, out) + + def test_get_progress_exception(self): + expected = {'total_progress': 1, + 'current_file_path': '/fake/path', + 'current_file_progress': 0} + + # mocks + self.mock_object( + utils, 'execute', + mock.Mock(side_effect=utils.processutils.ProcessExecutionError())) + + # run + out = self._copy.get_progress() + + # asserts + self.assertEqual(expected, out) + + utils.execute.assert_called_once_with("stat", "-c", "%s", "/fake/path", + run_as_root=True) + + def test_cancel(self): + self._copy.cancelled = False + + # run + self._copy.cancel() + + # asserts + self.assertEqual(self._copy.cancelled, True) + + # reset + self._copy.cancelled = False + + def test_get_total_size(self): + self._copy.total_size = 0 + + values = [("folder1/\nitem/\nfile1\nitem", ""), + ("", ""), + ("10000", "")] + + def get_output(*args, **kwargs): + return values.pop(0) + + # mocks + self.mock_object(utils, 'execute', mock.Mock( + side_effect=get_output)) + + # run + self._copy.get_total_size(self._copy.src) + + # asserts + self.assertEqual(self._copy.total_size, 10000) + + utils.execute.assert_has_calls([ + mock.call("ls", "-pA1", "--group-directories-first", + self._copy.src, run_as_root=True), + mock.call("ls", "-pA1", "--group-directories-first", + os.path.join(self._copy.src, "folder1/"), + run_as_root=True), + mock.call("stat", "-c", "%s", + os.path.join(self._copy.src, "file1"), run_as_root=True) + ]) + + def test_get_total_size_cancelled_1(self): + self._copy.total_size = 0 + self._copy.cancelled = True + + # run + self._copy.get_total_size(self._copy.src) + + # asserts + self.assertEqual(self._copy.total_size, 0) + + # reset + self._copy.total_size = 10000 + self._copy.cancelled = False + + def test_get_total_size_cancelled_2(self): + self._copy.total_size = 0 + + def ls_output(*args, **kwargs): + self._copy.cancelled = True + return "folder1/", "" + + # mocks + self.mock_object(utils, 'execute', mock.Mock( + side_effect=ls_output)) + + # run + self._copy.get_total_size(self._copy.src) + + # asserts + self.assertEqual(self._copy.total_size, 0) + utils.execute.assert_called_once_with( + "ls", "-pA1", "--group-directories-first", self._copy.src, + run_as_root=True) + + # reset + self._copy.total_size = 10000 + self._copy.cancelled = False + + def test_copy_data(self): + + values = [("folder1/\nitem/\nfile1\nitem", ""), + "", + ("", ""), + ("10000", ""), + ""] + + def get_output(*args, **kwargs): + return values.pop(0) + + # mocks + self.mock_object(utils, 'execute', mock.Mock( + side_effect=get_output)) + self.mock_object(self._copy, 'get_progress') + + # run + self._copy.copy_data(self._copy.src) + + # asserts + self._copy.get_progress.assert_called_once_with() + + utils.execute.assert_has_calls([ + mock.call("ls", "-pA1", "--group-directories-first", + self._copy.src, run_as_root=True), + mock.call("mkdir", "-p", os.path.join(self._copy.dest, "folder1/"), + run_as_root=True), + mock.call("ls", "-pA1", "--group-directories-first", + os.path.join(self._copy.src, "folder1/"), + run_as_root=True), + mock.call("stat", "-c", "%s", + os.path.join(self._copy.src, "file1"), run_as_root=True), + mock.call("cp", "-P", "--preserve=all", + os.path.join(self._copy.src, "file1"), + os.path.join(self._copy.dest, "file1"), run_as_root=True) + ]) + + def test_copy_data_cancelled_1(self): + + self._copy.cancelled = True + + # run + self._copy.copy_data(self._copy.src) + + # reset + self._copy.cancelled = False + + def test_copy_data_cancelled_2(self): + + def ls_output(*args, **kwargs): + self._copy.cancelled = True + return "folder1/", "" + + # mocks + self.mock_object(utils, 'execute', mock.Mock( + side_effect=ls_output)) + + # run + self._copy.copy_data(self._copy.src) + + # asserts + utils.execute.assert_called_once_with( + "ls", "-pA1", "--group-directories-first", self._copy.src, + run_as_root=True) + + # reset + self._copy.cancelled = False + + def test_copy_stats(self): + + values = [("folder1/\nitem/\nfile1\nitem", ""), + ("", ""), + "", + "", + "", + "", + "", + ""] + + def get_output(*args, **kwargs): + return values.pop(0) + + # mocks + self.mock_object(utils, 'execute', mock.Mock( + side_effect=get_output)) + + # run + self._copy.copy_stats(self._copy.src) + + # asserts + utils.execute.assert_has_calls([ + mock.call("ls", "-pA1", "--group-directories-first", + self._copy.src, run_as_root=True), + mock.call("ls", "-pA1", "--group-directories-first", + os.path.join(self._copy.src, "folder1/"), + run_as_root=True), + mock.call( + "chmod", + "--reference=%s" % os.path.join(self._copy.src, "folder1/"), + os.path.join(self._copy.dest, "folder1/"), + run_as_root=True), + mock.call( + "touch", + "--reference=%s" % os.path.join(self._copy.src, "folder1/"), + os.path.join(self._copy.dest, "folder1/"), + run_as_root=True), + mock.call( + "chown", + "--reference=%s" % os.path.join(self._copy.src, "folder1/"), + os.path.join(self._copy.dest, "folder1/"), + run_as_root=True), + ]) + + def test_copy_stats_cancelled_1(self): + + self._copy.cancelled = True + + # run + self._copy.copy_stats(self._copy.src) + + # reset + self._copy.cancelled = False + + def test_copy_stats_cancelled_2(self): + + def ls_output(*args, **kwargs): + self._copy.cancelled = True + return "folder1/", "" + + # mocks + self.mock_object(utils, 'execute', mock.Mock( + side_effect=ls_output)) + + # run + self._copy.copy_stats(self._copy.src) + + # asserts + utils.execute.assert_called_once_with( + "ls", "-pA1", "--group-directories-first", self._copy.src, + run_as_root=True) + + # reset + self._copy.cancelled = False + + def test_run(self): + + # mocks + self.mock_object(self._copy, 'get_total_size') + self.mock_object(self._copy, 'copy_data') + self.mock_object(self._copy, 'copy_stats') + self.mock_object(self._copy, 'get_progress') + + # run + self._copy.run() + + # asserts + self.assertTrue(data_utils.LOG.info.called) + self._copy.get_total_size.assert_called_once_with(self._copy.src) + self._copy.copy_data.assert_called_once_with(self._copy.src) + self._copy.copy_stats.assert_called_once_with(self._copy.src) + self._copy.get_progress.assert_called_once_with() diff --git a/manila/tests/policy.json b/manila/tests/policy.json index 8339561940..a6cc950f3b 100644 --- a/manila/tests/policy.json +++ b/manila/tests/policy.json @@ -33,6 +33,10 @@ "share:unmanage": "rule:admin_api", "share:force_delete": "rule:admin_api", "share:reset_status": "rule:admin_api", + "share:migration_start": "rule:admin_api", + "share:migration_complete": "rule:admin_api", + "share:migration_cancel": "rule:admin_api", + "share:migration_get_progress": "rule:admin_api", "share_export_location:index": "rule:default", "share_export_location:show": "rule:default", diff --git a/manila/tests/scheduler/test_manager.py b/manila/tests/scheduler/test_manager.py index d65e0c8737..7aee08d0fa 100644 --- a/manila/tests/scheduler/test_manager.py +++ b/manila/tests/scheduler/test_manager.py @@ -222,13 +222,13 @@ class SchedulerManagerTestCase(test.TestCase): host = 'fake@backend#pool' self.mock_object(db, 'share_get', mock.Mock(return_value=share)) - self.mock_object(share_rpcapi.ShareAPI, 'migrate_share') + self.mock_object(share_rpcapi.ShareAPI, 'migration_start') self.mock_object(base.Scheduler, 'host_passes_filters', mock.Mock(return_value=host)) self.manager.migrate_share_to_host(self.context, share['id'], host, - False, {}, None) + False, True, {}, None) def test_migrate_share_to_host_no_valid_host(self): @@ -239,5 +239,6 @@ class SchedulerManagerTestCase(test.TestCase): base.Scheduler, 'host_passes_filters', mock.Mock(side_effect=[exception.NoValidHost('fake')])) - self.manager.migrate_share_to_host(self.context, share['id'], host, - False, {}, None) + self.assertRaises( + exception.NoValidHost, self.manager.migrate_share_to_host, + self.context, share['id'], host, False, True, {}, None) diff --git a/manila/tests/scheduler/test_rpcapi.py b/manila/tests/scheduler/test_rpcapi.py index 414602d8db..da749fc9c2 100644 --- a/manila/tests/scheduler/test_rpcapi.py +++ b/manila/tests/scheduler/test_rpcapi.py @@ -103,10 +103,11 @@ class SchedulerRpcAPITestCase(test.TestCase): def test_migrate_share_to_host(self): self._test_scheduler_api('migrate_share_to_host', - rpc_method='cast', + rpc_method='call', share_id='share_id', host='host', force_host_copy=True, + notify=True, request_spec='fake_request_spec', filter_properties='filter_properties', version='1.4') diff --git a/manila/tests/share/drivers/huawei/test_huawei_nas.py b/manila/tests/share/drivers/huawei/test_huawei_nas.py index deb0682ea0..f24fae0b6c 100644 --- a/manila/tests/share/drivers/huawei/test_huawei_nas.py +++ b/manila/tests/share/drivers/huawei/test_huawei_nas.py @@ -27,6 +27,7 @@ import mock from oslo_serialization import jsonutils from manila import context +from manila.data import utils as data_utils from manila import db from manila import exception from manila.share import configuration as conf @@ -35,7 +36,6 @@ from manila.share.drivers.huawei import huawei_nas from manila.share.drivers.huawei.v3 import connection from manila.share.drivers.huawei.v3 import helper from manila.share.drivers.huawei.v3 import smartx -from manila.share import utils as share_utils from manila import test from manila import utils @@ -1906,6 +1906,7 @@ class HuaweiShareDriverTestCase(test.TestCase): def test_create_cifsshare_from_nfssnapshot_success(self): share_type = self.fake_type_not_extra['test_with_extra'] + self.mock_object(db, 'share_type_get', mock.Mock(return_value=share_type)) @@ -1914,7 +1915,7 @@ class HuaweiShareDriverTestCase(test.TestCase): mock.Mock(return_value={})) self.mock_object(utils, 'execute', - mock.Mock(return_value={})) + mock.Mock(return_value=("", ""))) self.driver.plugin.helper.login() self.driver.plugin.helper.snapshot_flag = True @@ -1926,7 +1927,7 @@ class HuaweiShareDriverTestCase(test.TestCase): self.assertTrue(db.share_type_get.called) self.assertTrue(self.driver.plugin. _get_access_id.called) - self.assertEqual(4, utils.execute.call_count) + self.assertEqual(7, utils.execute.call_count) self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location) def test_create_share_from_snapshot_nonefs(self): @@ -2065,7 +2066,7 @@ class HuaweiShareDriverTestCase(test.TestCase): self.mock_object(self.driver.plugin, 'mount_share_to_host', mock.Mock(return_value={})) - self.mock_object(share_utils, + self.mock_object(data_utils, 'Copy', mock.Mock(side_effect=Exception('err'))) self.mock_object(utils, @@ -2081,7 +2082,7 @@ class HuaweiShareDriverTestCase(test.TestCase): self.assertTrue(db.share_type_get.called) self.assertEqual(2, self.driver.plugin. mount_share_to_host.call_count) - self.assertTrue(share_utils.Copy.called) + self.assertTrue(data_utils.Copy.called) self.assertEqual(2, utils.execute.call_count) def test_create_nfsshare_from_nfssnapshot_umountshare_fail(self): diff --git a/manila/tests/share/test_api.py b/manila/tests/share/test_api.py index bb5c20019f..10be17a297 100644 --- a/manila/tests/share/test_api.py +++ b/manila/tests/share/test_api.py @@ -25,12 +25,14 @@ from oslo_utils import timeutils from manila.common import constants from manila import context +from manila.data import rpcapi as data_rpc from manila import db as db_api from manila.db.sqlalchemy import models from manila import exception from manila import quota from manila import share from manila.share import api as share_api +from manila.share import rpcapi as share_rpc from manila.share import share_types from manila import test from manila.tests import db_utils @@ -837,7 +839,7 @@ class ShareAPITestCase(test.TestCase): status=constants.STATUS_AVAILABLE, user_id=self.context.user_id, project_id=self.context.project_id, - task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS) + task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.unmanage, self.context, share) @@ -970,7 +972,7 @@ class ShareAPITestCase(test.TestCase): def test_create_snapshot_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, - task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS) + task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.create_snapshot, self.context, @@ -1129,7 +1131,7 @@ class ShareAPITestCase(test.TestCase): def test_delete_share_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, - task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS) + task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.delete, @@ -1562,7 +1564,7 @@ class ShareAPITestCase(test.TestCase): def test_extend_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, - task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS) + task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) new_size = 123 self.assertRaises(exception.ShareBusyException, @@ -1613,7 +1615,7 @@ class ShareAPITestCase(test.TestCase): def test_shrink_invalid_task_state(self): share = db_utils.create_share( status=constants.STATUS_AVAILABLE, - task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS) + task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) self.assertRaises(exception.ShareBusyException, self.api.shrink, self.context, share, 123) @@ -1642,7 +1644,7 @@ class ShareAPITestCase(test.TestCase): self.context, share, new_size ) - def test_migrate_share(self): + def test_migration_start(self): host = 'fake2@backend#pool' share = db_utils.create_share( status=constants.STATUS_AVAILABLE, @@ -1679,39 +1681,40 @@ class ShareAPITestCase(test.TestCase): mock.Mock(return_value='fake_type')) self.mock_object(utils, 'validate_service_host') - self.api.migrate_share(self.context, share, host, True) + self.api.migration_start(self.context, share, host, True, True) self.scheduler_rpcapi.migrate_share_to_host.assert_called_once_with( - self.context, share['id'], host, True, request_spec) + self.context, share['id'], host, True, True, request_spec) - def test_migrate_share_status_unavailable(self): + def test_migration_start_status_unavailable(self): host = 'fake2@backend#pool' share = db_utils.create_share( status=constants.STATUS_ERROR) - self.assertRaises(exception.InvalidShare, self.api.migrate_share, - self.context, share, host, True) + self.assertRaises(exception.InvalidShare, self.api.migration_start, + self.context, share, host, True, True) - def test_migrate_share_task_state_invalid(self): + def test_migration_start_task_state_invalid(self): host = 'fake2@backend#pool' share = db_utils.create_share( status=constants.STATUS_AVAILABLE, - task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS) + task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS) - self.assertRaises(exception.ShareBusyException, self.api.migrate_share, - self.context, share, host, True) + self.assertRaises(exception.ShareBusyException, + self.api.migration_start, + self.context, share, host, True, True) - def test_migrate_share_with_snapshots(self): + def test_migration_start_with_snapshots(self): host = 'fake2@backend#pool' share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE) self.mock_object(db_api, 'share_snapshot_get_all_for_share', mock.Mock(return_value=True)) - self.assertRaises(exception.InvalidShare, self.api.migrate_share, - self.context, share, host, True) + self.assertRaises(exception.InvalidShare, self.api.migration_start, + self.context, share, host, True, True) - def test_migrate_share_has_replicas(self): + def test_migration_start_has_replicas(self): host = 'fake2@backend#pool' share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, @@ -1727,12 +1730,12 @@ class ShareAPITestCase(test.TestCase): # Share was updated after adding replicas, grabbing it again. share = db_api.share_get(self.context, share['id']) - self.assertRaises(exception.Conflict, self.api.migrate_share, + self.assertRaises(exception.Conflict, self.api.migration_start, self.context, share, host, True) self.assertTrue(mock_log.error.called) self.assertFalse(mock_snapshot_get_call.called) - def test_migrate_share_invalid_host(self): + def test_migration_start_invalid_host(self): host = 'fake@backend#pool' share = db_utils.create_share( host='fake2@backend', status=constants.STATUS_AVAILABLE) @@ -1741,19 +1744,19 @@ class ShareAPITestCase(test.TestCase): mock.Mock(return_value=False)) self.assertRaises(exception.ServiceNotFound, - self.api.migrate_share, - self.context, share, host, True) + self.api.migration_start, + self.context, share, host, True, True) - def test_migrate_share_same_host(self): + def test_migration_start_same_host(self): host = 'fake@backend#pool' share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE) self.assertRaises(exception.InvalidHost, - self.api.migrate_share, - self.context, share, host, True) + self.api.migration_start, + self.context, share, host, True, True) - def test_migrate_share_exception(self): + def test_migration_start_exception(self): host = 'fake2@backend#pool' share = db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE) @@ -1766,9 +1769,9 @@ class ShareAPITestCase(test.TestCase): mock.Mock(side_effect=exception.ShareMigrationFailed( reason='fake'))) - self.assertRaises(exception.ShareMigrationFailed, - self.api.migrate_share, - self.context, share, host, True) + self.assertRaises(exception.InvalidHost, + self.api.migration_start, + self.context, share, host, True, True) db_api.share_update.assert_any_call( mock.ANY, share['id'], mock.ANY) @@ -1985,6 +1988,128 @@ class ShareAPITestCase(test.TestCase): self.assertTrue(mock_rpcapi_update_share_replica_call.called) self.assertIsNone(retval) + def test_migration_complete(self): + + instance1 = db_utils.create_share_instance( + share_id='fake_id', status=constants.STATUS_MIGRATING) + instance2 = db_utils.create_share_instance( + share_id='fake_id', status=constants.STATUS_MIGRATING_TO) + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED, + instances=[instance1, instance2]) + + self.mock_object(share_rpc.ShareAPI, 'migration_complete') + + self.api.migration_complete(self.context, share) + + share_rpc.ShareAPI.migration_complete.assert_called_once_with( + self.context, share, instance1['id'], instance2['id']) + + def test_migration_complete_task_state_invalid(self): + + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) + + self.assertRaises(exception.InvalidShare, self.api.migration_complete, + self.context, share) + + def test_migration_complete_status_invalid(self): + + instance1 = db_utils.create_share_instance( + share_id='fake_id', status=constants.STATUS_ERROR) + instance2 = db_utils.create_share_instance( + share_id='fake_id', status=constants.STATUS_ERROR) + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED, + instances=[instance1, instance2]) + + self.assertRaises(exception.ShareMigrationFailed, + self.api.migration_complete, self.context, + share) + + def test_migration_cancel(self): + + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) + + self.mock_object(data_rpc.DataAPI, 'data_copy_cancel') + + self.api.migration_cancel(self.context, share) + + data_rpc.DataAPI.data_copy_cancel.assert_called_once_with( + self.context, share['id']) + + def test_migration_cancel_driver(self): + + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + + self.mock_object(share_rpc.ShareAPI, 'migration_cancel') + + self.api.migration_cancel(self.context, share) + + share_rpc.ShareAPI.migration_cancel.assert_called_once_with( + self.context, share) + + def test_migration_cancel_task_state_invalid(self): + + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_STARTING) + + self.assertRaises(exception.InvalidShare, self.api.migration_cancel, + self.context, share) + + def test_migration_get_progress(self): + + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_IN_PROGRESS) + + expected = 'fake_progress' + + self.mock_object(data_rpc.DataAPI, 'data_copy_get_progress', + mock.Mock(return_value=expected)) + + result = self.api.migration_get_progress(self.context, share) + + self.assertEqual(expected, result) + + data_rpc.DataAPI.data_copy_get_progress.assert_called_once_with( + self.context, share['id']) + + def test_migration_get_progress_driver(self): + + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS) + + expected = 'fake_progress' + + self.mock_object(share_rpc.ShareAPI, 'migration_get_progress', + mock.Mock(return_value=expected)) + + result = self.api.migration_get_progress(self.context, share) + + self.assertEqual(expected, result) + + share_rpc.ShareAPI.migration_get_progress.assert_called_once_with( + self.context, share) + + def test_migration_get_progress_task_state_invalid(self): + + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_STARTING) + + self.assertRaises(exception.InvalidShare, + self.api.migration_get_progress, self.context, share) + class OtherTenantsShareActionsTestCase(test.TestCase): def setUp(self): diff --git a/manila/tests/share/test_driver.py b/manila/tests/share/test_driver.py index 3b2adbd940..60b1cd59a2 100644 --- a/manila/tests/share/test_driver.py +++ b/manila/tests/share/test_driver.py @@ -20,16 +20,11 @@ import time import ddt import mock -from manila.common import constants from manila import exception from manila import network from manila.share import configuration from manila.share import driver -from manila.share import migration -from manila.share import rpcapi -from manila.share import utils as share_utils from manila import test -from manila.tests import db_utils from manila.tests import utils as test_utils from manila import utils @@ -467,341 +462,66 @@ class ShareDriverTestCase(test.TestCase): ) mock_get_admin_network_allocations_number.assert_called_once_with() - def test_migrate_share(self): + def test_migration_start(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertEqual((None, None), - share_driver.migrate_share(None, None, None, None)) + share_driver.migration_start(None, None, None, + None, None, None)) - def test_get_driver_migration_info_default(self): + def test_migration_complete(self): + + driver.CONF.set_default('driver_handles_share_servers', False) + share_driver = driver.ShareDriver(False) + + share_driver.migration_complete(None, None, None, None) + + def test_migration_cancel(self): + + driver.CONF.set_default('driver_handles_share_servers', False) + share_driver = driver.ShareDriver(False) + + self.assertRaises(NotImplementedError, share_driver.migration_cancel, + None, None, None, None) + + def test_migration_get_progress(self): + + driver.CONF.set_default('driver_handles_share_servers', False) + share_driver = driver.ShareDriver(False) + + self.assertRaises(NotImplementedError, + share_driver.migration_get_progress, + None, None, None, None) + + def test_migration_get_driver_info_default(self): driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) self.assertIsNone( - share_driver.get_driver_migration_info(None, None, None), None) + share_driver.migration_get_driver_info(None, None, None), None) - def test_get_migration_info_default(self): - expected = {'mount': ['mount', '-t', 'fake_proto', '/fake/fake_id', - '/tmp/fake_id'], - 'umount': ['umount', '/tmp/fake_id'], - 'access': {'access_type': 'ip', - 'access_level': 'rw', - 'access_to': None}} + @ddt.data(True, False) + def test_migration_get_info(self, admin): + + expected = {'mount': 'mount -vt fake_proto /fake/fake_id %(path)s', + 'unmount': 'umount -v %(path)s'} fake_share = {'id': 'fake_id', 'share_proto': 'fake_proto', 'export_locations': [{'path': '/fake/fake_id', - 'is_admin_only': True}]} + 'is_admin_only': admin}]} driver.CONF.set_default('driver_handles_share_servers', False) share_driver = driver.ShareDriver(False) share_driver.configuration = configuration.Configuration(None) - migration_info = share_driver.get_migration_info(None, - fake_share, - "fake_server") + migration_info = share_driver.migration_get_info( + None, fake_share, "fake_server") self.assertEqual(expected, migration_info) - def test_get_migration_info_parameters(self): - - expected = {'mount': ['fake_mount', '/200.200.200.200/fake_id', - '/tmp/fake_id'], - 'umount': ['umount', '/tmp/fake_id'], - 'access': {'access_type': 'ip', - 'access_level': 'rw', - 'access_to': '100.100.100.100'}} - - fake_share = {'id': 'fake_id', - 'share_proto': 'fake_proto', - 'export_locations': [{'path': '/5.5.5.5/fake_id', - 'is_admin_only': False}]} - - driver.CONF.set_default('driver_handles_share_servers', False) - driver.CONF.set_default('migration_protocol_mount_command', - 'fake_mount') - driver.CONF.set_default('migration_mounting_backend_ip', - '200.200.200.200') - driver.CONF.set_default('migration_data_copy_node_ip', - '100.100.100.100') - - share_driver = driver.ShareDriver(False) - share_driver.configuration = configuration.Configuration(None) - migration_info = share_driver.get_migration_info(None, - fake_share, - "fake_server") - - self.assertEqual(expected, migration_info) - - def _setup_mocks_copy_share_data(self): - - get_migration_info_value = {'mount': 'fake', - 'umount': 'fake', - 'access': - {'access_type': 'fake', - 'access_to': 'fake'}} - - self.mock_object(rpcapi.ShareAPI, 'get_migration_info', - mock.Mock(return_value=get_migration_info_value)) - - self.mock_object(driver.ShareDriver, 'get_migration_info', - mock.Mock(return_value=get_migration_info_value)) - - self.mock_object(share_utils.Copy, 'run') - self.mock_object(time, 'sleep') - - driver.CONF.set_default('driver_handles_share_servers', False) - share_driver = driver.ShareDriver( - False, configuration=configuration.Configuration(None)) - - return share_driver - - def test_copy_share_data(self): - fake_share = db_utils.create_share( - id='fakeid', status=constants.STATUS_AVAILABLE, host='fake_host') - fake_share_instance = {'id': 'fake_id', 'host': 'fake_host'} - share_driver = self._setup_mocks_copy_share_data() - remote = {'access': {'access_to': '192.168.0.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - local = {'access': {'access_to': '192.168.1.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - helper = migration.ShareMigrationHelper(None, None, None, None, None) - - driver.CONF.set_default('migration_tmp_location', '/fake/path') - driver.CONF.set_default('migration_ignore_files', None) - - self.mock_object(migration.ShareMigrationHelper, - 'deny_migration_access') - self.mock_object(migration.ShareMigrationHelper, - 'allow_migration_access', - mock.Mock(return_value='fake_access_ref')) - self.mock_object(utils, 'execute') - self.mock_object(share_utils.Copy, 'get_progress', mock.Mock( - return_value={'total_progress': 100})) - - share_driver.copy_share_data('ctx', helper, fake_share, - fake_share_instance, None, - fake_share_instance, None, - local, remote) - - args = ((None, local['access'], fake_share_instance), - (None, remote['access'], fake_share_instance), - ('fake_access_ref', local['access'], fake_share_instance), - ('fake_access_ref', remote['access'], fake_share_instance)) - migration.ShareMigrationHelper.deny_migration_access.assert_has_calls( - [mock.call(*a) for a in args]) - - def test_copy_share_data_failed(self): - fake_share = db_utils.create_share( - id='fakeid', status=constants.STATUS_AVAILABLE, host='fake_host') - fake_share_instance = {'id': 'fake_id', 'host': 'fake_host'} - share_driver = self._setup_mocks_copy_share_data() - remote = {'access': {'access_to': '192.168.0.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - local = {'access': {'access_to': '192.168.1.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - helper = migration.ShareMigrationHelper(None, None, None, None, None) - - driver.CONF.set_default('migration_tmp_location', '/fake/path') - driver.CONF.set_default('migration_ignore_files', None) - - self.mock_object(migration.ShareMigrationHelper, - 'deny_migration_access') - self.mock_object(migration.ShareMigrationHelper, - 'allow_migration_access', - mock.Mock(return_value='fake_access_ref')) - self.mock_object(utils, 'execute') - self.mock_object(share_utils.Copy, 'get_progress', mock.Mock( - return_value=None)) - self.assertRaises(exception.ShareMigrationFailed, - share_driver.copy_share_data, 'ctx', helper, - fake_share, fake_share_instance, None, - fake_share_instance, None, local, remote) - - args = ((None, local['access'], fake_share_instance), - (None, remote['access'], fake_share_instance)) - migration.ShareMigrationHelper.deny_migration_access.assert_has_calls( - [mock.call(*a) for a in args]) - - def test_copy_share_data_local_access_exception(self): - fake_share = db_utils.create_share( - id='fakeid', status=constants.STATUS_AVAILABLE, host='fake_host') - fake_share_instance = {'id': 'fake_id', 'host': 'fake_host'} - share_driver = self._setup_mocks_copy_share_data() - remote = {'access': {'access_to': '192.168.0.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - local = {'access': {'access_to': '192.168.1.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - helper = migration.ShareMigrationHelper(None, None, None, None, None) - - driver.CONF.set_default('migration_tmp_location', '/fake/path') - driver.CONF.set_default('migration_ignore_files', None) - - self.mock_object(migration.ShareMigrationHelper, - 'deny_migration_access') - self.mock_object( - migration.ShareMigrationHelper, - 'allow_migration_access', - mock.Mock(side_effect=[ - exception.ShareMigrationFailed(reason='fake')])) - self.assertRaises(exception.ShareMigrationFailed, - share_driver.copy_share_data, 'ctx', helper, - fake_share, fake_share_instance, None, - fake_share_instance, None, local, remote) - - args = ((None, local['access'], fake_share_instance), - (None, remote['access'], fake_share_instance)) - migration.ShareMigrationHelper.deny_migration_access.assert_has_calls( - [mock.call(*a) for a in args]) - - def test_copy_share_data_remote_access_exception(self): - fake_share = db_utils.create_share( - id='fakeid', status=constants.STATUS_AVAILABLE, host='fake_host') - fake_share_instance = {'id': 'fake_id', 'host': 'fake_host'} - share_driver = self._setup_mocks_copy_share_data() - remote = {'access': {'access_to': '192.168.0.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - local = {'access': {'access_to': '192.168.1.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - helper = migration.ShareMigrationHelper(None, None, None, None, None) - - driver.CONF.set_default('migration_tmp_location', '/fake/path') - driver.CONF.set_default('migration_ignore_files', None) - - self.mock_object(migration.ShareMigrationHelper, - 'deny_migration_access') - self.mock_object( - migration.ShareMigrationHelper, - 'allow_migration_access', - mock.Mock(side_effect=[None, - exception.ShareMigrationFailed( - reason='fake')])) - self.mock_object(migration.ShareMigrationHelper, - 'cleanup_migration_access') - self.assertRaises(exception.ShareMigrationFailed, - share_driver.copy_share_data, 'ctx', helper, - fake_share, fake_share_instance, None, - fake_share_instance, None, local, remote) - - args = ((None, local['access'], fake_share_instance), - (None, remote['access'], fake_share_instance)) - migration.ShareMigrationHelper.deny_migration_access.assert_has_calls( - [mock.call(*a) for a in args]) - - def test_copy_share_data_mount_for_migration_exception(self): - fake_share = db_utils.create_share( - id='fakeid', status=constants.STATUS_AVAILABLE, host='fake_host') - fake_share_instance = {'id': 'fake_id', 'host': 'fake_host'} - share_driver = self._setup_mocks_copy_share_data() - remote = {'access': {'access_to': '192.168.0.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - local = {'access': {'access_to': '192.168.1.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - helper = migration.ShareMigrationHelper(None, None, None, None, None) - - msg = ('Failed to mount temporary folder for migration of share ' - 'instance fake_id to fake_id') - - driver.CONF.set_default('migration_tmp_location', '/fake/path') - - self.mock_object(migration.ShareMigrationHelper, - 'deny_migration_access') - self.mock_object(migration.ShareMigrationHelper, - 'allow_migration_access', - mock.Mock(return_value='fake_access_ref')) - self.mock_object(migration.ShareMigrationHelper, - 'cleanup_migration_access') - self.mock_object(migration.ShareMigrationHelper, - 'cleanup_temp_folder') - self.mock_object(utils, 'execute', mock.Mock( - side_effect=[None, None, exception.ShareMigrationFailed(msg)])) - - self.assertRaises(exception.ShareMigrationFailed, - share_driver.copy_share_data, - 'ctx', helper, fake_share, - fake_share_instance, None, - fake_share_instance, None, - local, remote) - args = ((None, local['access'], fake_share_instance), - (None, remote['access'], fake_share_instance)) - migration.ShareMigrationHelper.deny_migration_access.assert_has_calls( - [mock.call(*a) for a in args]) - - def test_copy_share_data_mount_for_migration_exception2(self): - fake_share = db_utils.create_share( - id='fakeid', status=constants.STATUS_AVAILABLE, host='fake_host') - fake_share_instance = {'id': 'fake_id', 'host': 'fake_host'} - share_driver = self._setup_mocks_copy_share_data() - remote = {'access': {'access_to': '192.168.0.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - local = {'access': {'access_to': '192.168.1.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - helper = migration.ShareMigrationHelper(None, None, None, None, None) - - msg = ('Failed to mount temporary folder for migration of share ' - 'instance fake_id to fake_id') - - driver.CONF.set_default('migration_tmp_location', '/fake/path') - - self.mock_object(migration.ShareMigrationHelper, - 'deny_migration_access') - self.mock_object(migration.ShareMigrationHelper, - 'allow_migration_access', - mock.Mock(return_value='fake_access_ref')) - self.mock_object(migration.ShareMigrationHelper, - 'cleanup_migration_access') - self.mock_object(migration.ShareMigrationHelper, - 'cleanup_temp_folder') - self.mock_object(migration.ShareMigrationHelper, - 'cleanup_unmount_temp_folder') - self.mock_object(utils, 'execute', mock.Mock( - side_effect=[None, None, None, - exception.ShareMigrationFailed(msg)])) - - self.assertRaises(exception.ShareMigrationFailed, - share_driver.copy_share_data, - 'ctx', helper, fake_share, - fake_share_instance, None, - fake_share_instance, None, - local, remote) - args = ((None, local['access'], fake_share_instance), - (None, remote['access'], fake_share_instance)) - migration.ShareMigrationHelper.deny_migration_access.assert_has_calls( - [mock.call(*a) for a in args]) - - def test_copy_share_data_access_rule_invalid(self): - - fake_share = db_utils.create_share( - id='fakeid', status=constants.STATUS_AVAILABLE, host='fake_host') - - share_driver = self._setup_mocks_copy_share_data() - remote = {'access': {'access_to': None}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - local = {'access': {'access_to': '192.168.1.1'}, - 'mount': 'fake_mount', - 'umount': 'fake_umount'} - - driver.CONF.set_default('migration_tmp_location', '/fake/path') - - self.assertRaises(exception.ShareMigrationFailed, - share_driver.copy_share_data, 'ctx', None, - fake_share, None, None, None, None, local, remote) - def test_update_access(self): share_driver = driver.ShareDriver(True, configuration=None) self.assertRaises( diff --git a/manila/tests/share/test_manager.py b/manila/tests/share/test_manager.py index 0c9584e723..5e6c8c84eb 100644 --- a/manila/tests/share/test_manager.py +++ b/manila/tests/share/test_manager.py @@ -27,6 +27,7 @@ import six from manila.common import constants from manila import context +from manila.data import rpcapi as data_rpc from manila import db from manila.db.sqlalchemy import models from manila import exception @@ -34,7 +35,7 @@ from manila import quota from manila.share import access as share_access from manila.share import drivers_private_data from manila.share import manager -from manila.share import migration +from manila.share import migration as migration_api from manila.share import rpcapi from manila.share import share_types from manila import test @@ -179,7 +180,12 @@ class ShareManagerTestCase(test.TestCase): assert_called_once_with() @ddt.data( - "migrate_share", + "migration_get_driver_info", + "migration_get_info", + "migration_cancel", + "migration_get_progress", + "migration_complete", + "migration_start", "create_share_instance", "manage_share", "unmanage_share", @@ -245,7 +251,7 @@ class ShareManagerTestCase(test.TestCase): db_utils.create_share( id='fake_id_4', status=constants.STATUS_AVAILABLE, - task_state=constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS, + task_state=constants.TASK_STATE_MIGRATION_IN_PROGRESS, display_name='fake_name_4').instance, db_utils.create_share(id='fake_id_5', status=constants.STATUS_AVAILABLE, @@ -385,7 +391,7 @@ class ShareManagerTestCase(test.TestCase): utils.IsAMatcher(context.RequestContext)) manager.LOG.info.assert_any_call( mock.ANY, - {'task': constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS, + {'task': constants.TASK_STATE_MIGRATION_IN_PROGRESS, 'id': instances[3]['id']}, ) manager.LOG.info.assert_any_call( @@ -447,7 +453,7 @@ class ShareManagerTestCase(test.TestCase): utils.IsAMatcher(context.RequestContext)) manager.LOG.info.assert_any_call( mock.ANY, - {'task': constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS, + {'task': constants.TASK_STATE_MIGRATION_IN_PROGRESS, 'id': instances[3]['id']}, ) manager.LOG.info.assert_any_call( @@ -3174,444 +3180,528 @@ class ShareManagerTestCase(test.TestCase): assert_called_once_with(mock.ANY, fake_snap['id'], {'status': constants.STATUS_ERROR}) - def test_get_migration_info(self): - share_instance = 'fake-share-instance' + def test_migration_get_info(self): + share_instance = {'share_server_id': 'fake_server_id'} + share_instance_id = 'fake_id' + share_server = 'fake_share_server' + migration_info = 'fake_info' + + # mocks + self.mock_object(self.share_manager.db, 'share_instance_get', + mock.Mock(return_value=share_instance)) + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=share_server)) + self.mock_object(self.share_manager.driver, 'migration_get_info', + mock.Mock(return_value=migration_info)) + + # run + result = self.share_manager.migration_get_info( + self.context, share_instance_id) + + # asserts + self.assertEqual(migration_info, result) + + self.share_manager.db.share_instance_get.assert_called_once_with( + self.context, share_instance_id, with_share_data=True) + + self.share_manager.driver.migration_get_info.assert_called_once_with( + self.context, share_instance, share_server) + + def test_migration_get_driver_info(self): + share_instance = {'share_server_id': 'fake_server_id'} share_instance_id = 'fake-id' share_server = 'fake-share-server' + migration_info = 'fake_info' - manager = self.share_manager - - self.mock_object(manager.db, 'share_instance_get', + # mocks + self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(return_value=share_instance)) - self.mock_object(manager.driver, 'get_migration_info') - - manager.get_migration_info(self.context, - share_instance_id, share_server) - - manager.db.share_instance_get.assert_called_once_with( - self.context, share_instance_id, with_share_data=True - ) - - manager.driver.get_migration_info.assert_called_once_with( - self.context, share_instance, share_server - ) - - def test_get_driver_migration_info(self): - share_instance = 'fake-share-instance' - share_instance_id = 'fake-id' - share_server = 'fake-share-server' - - manager = self.share_manager - - self.mock_object(manager.db, 'share_instance_get', - mock.Mock(return_value=share_instance)) - self.mock_object(manager.driver, 'get_driver_migration_info') - - manager.get_driver_migration_info(self.context, share_instance_id, - share_server) - - manager.db.share_instance_get.assert_called_once_with( - self.context, share_instance_id, with_share_data=True - ) - - manager.driver.get_driver_migration_info.assert_called_once_with( - self.context, share_instance, share_server - ) - - def test_migrate_share_not_moved_by_driver(self): - share = db_utils.create_share() - share_id = share['id'] - host = 'fake-host' - status_migrating = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS - } - status_success = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_SUCCESS - } - share_server = { - 'id': 'fake_share_server_id', - 'share_network_id': 'fake_share_network_id', - 'host': 'fake_host', - 'status': 'fake_status', - 'backend_details': {'foo': 'bar'}, - } - migration_info = 'fake-info' - - manager = self.share_manager - - self.mock_object(manager, 'driver') - self.mock_object(manager.db, 'share_update') - self.mock_object(manager, '_get_share_server', + self.mock_object(self.share_manager.db, 'share_server_get', mock.Mock(return_value=share_server)) - self.mock_object(rpcapi.ShareAPI, 'get_driver_migration_info', + self.mock_object(self.share_manager.driver, + 'migration_get_driver_info', mock.Mock(return_value=migration_info)) - self.mock_object(manager.driver, - 'migrate_share', - mock.Mock(return_value=[False, None])) - self.mock_object(manager, '_migrate_share_generic', - mock.Mock(return_value=True)) - manager.migrate_share(self.context, share_id, host) + result = self.share_manager.migration_get_driver_info( + self.context, share_instance_id) - manager.db.share_update.assert_any_call( - self.context, share_id, status_migrating - ) + # asserts + self.assertEqual(migration_info, result) - manager.driver.migrate_share.assert_called_once_with( - self.context, utils.IsAMatcher(models.ShareInstance), - host, migration_info - ) + self.share_manager.db.share_instance_get.assert_called_once_with( + self.context, share_instance_id, with_share_data=True) - manager._migrate_share_generic.assert_called_once_with( - self.context, utils.IsAMatcher(models.Share), host - ) + self.share_manager.driver.migration_get_driver_info.\ + assert_called_once_with(self.context, share_instance, share_server) - manager.db.share_update.assert_any_call( - self.context, share_id, status_success - ) + @ddt.data((True, 'fake_model_update'), exception.ManilaException()) + def test_migration_start(self, exc): - def test_migrate_share_driver_migration(self): - share = db_utils.create_share() - share_id = share['id'] - host = 'fake-host' - status_migrating = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS - } - status_success = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_SUCCESS - } - share_server = { - 'id': 'fake_share_server_id', - 'share_network_id': 'fake_share_network_id', - 'host': 'fake_host', - 'status': 'fake_status', - 'backend_details': {'foo': 'bar'}, - } - migration_info = 'fake-info' + server = 'fake_share_server' + instance = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_AVAILABLE, + share_server_id='fake_server_id') + share = db_utils.create_share(id='fake_id', instances=[instance]) + host = 'fake_host' + driver_migration_info = 'driver_fake_info' - manager = self.share_manager + # mocks + self.mock_object(self.share_manager.db, 'share_get', + mock.Mock(return_value=share)) + self.mock_object(self.share_manager.db, 'share_instance_get', + mock.Mock(return_value=instance)) + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=server)) + self.mock_object(self.share_manager.db, 'share_update') + self.mock_object(self.share_manager.db, 'share_instance_update') + self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', + mock.Mock(return_value=driver_migration_info)) - self.mock_object(manager, 'driver') - self.mock_object(manager.db, 'share_update') - self.mock_object(manager, '_get_share_server', - mock.Mock(return_value=share_server)) - self.mock_object(rpcapi.ShareAPI, 'get_driver_migration_info', - mock.Mock(return_value=migration_info)) - self.mock_object(manager.driver, - 'migrate_share', - mock.Mock(return_value=[True, None])) - self.mock_object(manager.db, 'share_instance_update') + if isinstance(exc, exception.ManilaException): + self.mock_object(self.share_manager.driver, 'migration_start', + mock.Mock(side_effect=exc)) + self.mock_object(self.share_manager, '_migration_start_generic', + mock.Mock(side_effect=Exception('fake'))) + self.mock_object(manager.LOG, 'exception') + else: + self.mock_object(self.share_manager.driver, 'migration_start', + mock.Mock(return_value=exc)) - manager.migrate_share(self.context, share_id, host) + # run + if isinstance(exc, exception.ManilaException): + self.assertRaises(exception.ShareMigrationFailed, + self.share_manager.migration_start, + self.context, 'fake_id', host, False, False) + else: + self.share_manager.migration_start( + self.context, 'fake_id', host, False, False) - manager.db.share_update.assert_any_call( - self.context, share_id, status_migrating - ) + # asserts + self.share_manager.db.share_get.assert_called_once_with( + self.context, share['id']) + self.share_manager.db.share_instance_get.assert_called_once_with( + self.context, instance['id'], with_share_data=True) + self.share_manager.db.share_server_get.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), + instance['share_server_id']) - manager.driver.migrate_share.assert_called_once_with( - self.context, utils.IsAMatcher(models.ShareInstance), - host, migration_info - ) + share_update_calls = [ + mock.call( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_IN_PROGRESS}), + mock.call( + self.context, share['id'], + {'task_state': ( + constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS)}) + ] + share_instance_update_calls = [ + mock.call(self.context, instance['id'], + {'status': constants.STATUS_MIGRATING}) + ] + if isinstance(exc, exception.ManilaException): + share_update_calls.append(mock.call( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR})) + share_instance_update_calls.append( + mock.call(self.context, instance['id'], + {'status': constants.STATUS_AVAILABLE})) + self.share_manager._migration_start_generic.\ + assert_called_once_with(self.context, share, instance, host, + False) + self.assertTrue(manager.LOG.exception.called) + else: + share_update_calls.append(mock.call( + self.context, share['id'], + {'task_state': + constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE})) + share_instance_update_calls.append( + mock.call(self.context, instance['id'], 'fake_model_update')) - manager.db.share_update.assert_any_call( - self.context, share_id, status_success - ) + self.share_manager.db.share_update.assert_has_calls(share_update_calls) + self.share_manager.db.share_instance_update.assert_has_calls( + share_instance_update_calls) + rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( + self.context, instance) + self.share_manager.driver.migration_start.assert_called_once_with( + self.context, instance, server, host, driver_migration_info, False) - def test_migrate_share_driver_migration_instance_update(self): - share = db_utils.create_share() - share_id = share['id'] - host = 'fake-host' - status_migrating = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS - } - status_success = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_SUCCESS - } - share_server = { - 'id': 'fake_share_server_id', - 'share_network_id': 'fake_share_network_id', - 'host': 'fake_host', - 'status': 'fake_status', - 'backend_details': {'foo': 'bar'}, - } - migration_info = 'fake-info' + @ddt.data(None, Exception('fake')) + def test__migration_start_generic(self, exc): + instance = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_AVAILABLE, + share_server_id='fake_server_id') + new_instance = db_utils.create_share_instance( + share_id='new_fake_id', + status=constants.STATUS_AVAILABLE) + share = db_utils.create_share(id='fake_id', instances=[instance]) + server = 'share_server' + src_migration_info = 'src_fake_info' + dest_migration_info = 'dest_fake_info' - manager = self.share_manager - - self.mock_object(manager, 'driver') - self.mock_object(manager.db, 'share_update') - self.mock_object(manager, '_get_share_server', - mock.Mock(return_value=share_server)) - self.mock_object(rpcapi.ShareAPI, 'get_driver_migration_info', - mock.Mock(return_value=migration_info)) - self.mock_object(manager.driver, - 'migrate_share', - mock.Mock(return_value=[True, mock.ANY])) - self.mock_object(manager.db, 'share_instance_update') - - manager.migrate_share(self.context, share_id, host) - - manager.db.share_update.assert_any_call( - self.context, share_id, status_migrating - ) - - manager.driver.migrate_share.assert_called_once_with( - self.context, utils.IsAMatcher(models.ShareInstance), - host, migration_info - ) - - manager.db.share_instance_update.assert_called_once_with( - self.context, mock.ANY, mock.ANY - ) - - manager.db.share_update.assert_any_call( - self.context, share_id, status_success - ) - - def test_migrate_share_exception_driver(self): - share = db_utils.create_share() - share_id = share['id'] - host = 'fake-host' - status_migrating = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS - } - status_error = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_ERROR - } - share_server = { - 'id': 'fake_share_server_id', - 'share_network_id': 'fake_share_network_id', - 'host': 'fake_host', - 'status': 'fake_status', - 'backend_details': {'foo': 'bar'}, - } - migration_info = 'fake-info' - - manager = self.share_manager - - self.mock_object(manager, 'driver') - self.mock_object(manager.db, 'share_update') - self.mock_object(manager, '_get_share_server', - mock.Mock(return_value=share_server)) - self.mock_object(rpcapi.ShareAPI, 'get_driver_migration_info', - mock.Mock(return_value=migration_info)) - self.mock_object(manager.driver, - 'migrate_share', - mock.Mock(side_effect=exception.ManilaException)) - self.mock_object(manager, '_migrate_share_generic', - mock.Mock(return_value=False)) - - self.assertRaises(exception.ShareMigrationFailed, - manager.migrate_share, - self.context, share_id, host) - - manager.db.share_update.assert_any_call( - self.context, share_id, status_migrating - ) - - manager.driver.migrate_share.assert_called_once_with( - self.context, utils.IsAMatcher(models.ShareInstance), - host, migration_info - ) - - manager._migrate_share_generic.assert_called_once_with( - self.context, utils.IsAMatcher(models.Share), host - ) - - manager.db.share_update.assert_any_call( - self.context, share_id, status_error - ) - - def test_migrate_share_exception_generic(self): - share = db_utils.create_share() - share_id = share['id'] - host = 'fake-host' - status_migrating = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS - } - status_error = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_ERROR - } - share_server = 'fake-share-server' - migration_info = 'fake-info' - - manager = self.share_manager - - self.mock_object(manager, 'driver') - self.mock_object(manager.db, 'share_update') - self.mock_object(manager, '_get_share_server', - mock.Mock(return_value=share_server)) - self.mock_object(rpcapi.ShareAPI, 'get_driver_migration_info', - mock.Mock(return_value=migration_info)) - self.mock_object(manager.driver, - 'migrate_share', - mock.Mock(return_value=[False, None])) - self.mock_object(manager, - '_migrate_share_generic', - mock.Mock(side_effect=Exception)) - - self.assertRaises(exception.ShareMigrationFailed, - manager.migrate_share, - self.context, share_id, host, migration_info) - - manager.db.share_update.assert_any_call( - self.context, share_id, status_migrating - ) - - manager.db.share_update.assert_any_call( - self.context, share_id, status_error - ) - - def test_migrate_share_force_host_copy(self): - share = db_utils.create_share() - share_id = share['id'] - host = 'fake-host' - status_migrating = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_IN_PROGRESS - } - status_success = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_SUCCESS - } - - manager = self.share_manager - - self.mock_object(manager, 'driver') - self.mock_object(manager.db, 'share_update') - self.mock_object(manager, '_migrate_share_generic', - mock.Mock(return_value=True)) - - manager.migrate_share(self.context, share_id, host, True) - - manager.db.share_update.assert_any_call( - self.context, share_id, status_migrating - ) - - manager._migrate_share_generic.assert_called_once_with( - self.context, utils.IsAMatcher(models.Share), host - ) - - manager.db.share_update.assert_any_call( - self.context, share_id, status_success - ) - - def test_migrate_share_generic(self): - share = db_utils.create_share() - share_id = share['id'] - host = {'host': 'fake-host'} - status_completing = { - 'task_state': constants.STATUS_TASK_STATE_MIGRATION_COMPLETING - } - status_inactive = {'status': constants.STATUS_INACTIVE} - status_available = {'status': constants.STATUS_AVAILABLE} - share_server = { - 'id': 'fake_share_server_id', - 'share_network_id': 'fake_share_network_id', - 'host': 'fake_host', - 'status': 'fake_status', - 'backend_details': {'foo': 'bar'}, - } - new_share_server = { - 'id': 'fake_share_server_id2', - 'share_network_id': 'fake_share_network_id2', - 'host': 'fake_host2', - 'status': 'fake_status2', - 'backend_details': {'foo2': 'bar2'}, - } - src_migration_info = 'fake-src-migration-info' - dest_migration_info = 'fake-dest-migration-info' - - manager = self.share_manager - - manager.create_share_instance(self.context, share.instance['id']) - share_instance = manager._get_share_instance(self.context, share) - - new_share_instance = {'id': 'fake-id', - 'status': constants.STATUS_CREATING} - - self.mock_object(manager, '_get_share_instance', - mock.Mock(return_value=share_instance)) - self.mock_object(migration.ShareMigrationHelper, + # mocks + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=server)) + self.mock_object(self.share_manager.db, 'share_instance_update', + mock.Mock(return_value=server)) + self.mock_object(migration_api.ShareMigrationHelper, 'change_to_read_only') - self.mock_object(migration.ShareMigrationHelper, - 'create_instance_and_wait', - mock.Mock(return_value=new_share_instance)) - self.mock_object(manager.db, 'share_instance_update') - self.mock_object( - manager, - '_get_share_server', - mock.Mock(side_effect=[share_server, new_share_server]) - ) - self.mock_object(manager.driver, 'get_migration_info', - mock.Mock(return_value=src_migration_info)) - self.mock_object(rpcapi.ShareAPI, 'get_migration_info', - mock.Mock(return_value=dest_migration_info)) - self.mock_object(manager.driver, 'copy_share_data') - self.mock_object(manager.db, 'share_update') - self.mock_object(migration.ShareMigrationHelper, - 'revert_access_rules') - self.mock_object(migration.ShareMigrationHelper, + if exc is None: + self.mock_object(migration_api.ShareMigrationHelper, + 'create_instance_and_wait', + mock.Mock(return_value=new_instance)) + self.mock_object(self.share_manager.driver, 'migration_get_info', + mock.Mock(return_value=src_migration_info)) + self.mock_object(rpcapi.ShareAPI, 'migration_get_info', + mock.Mock(return_value=dest_migration_info)) + self.mock_object(data_rpc.DataAPI, 'migration_start', + mock.Mock(side_effect=Exception('fake'))) + self.mock_object(migration_api.ShareMigrationHelper, + 'cleanup_new_instance') + else: + self.mock_object(migration_api.ShareMigrationHelper, + 'create_instance_and_wait', + mock.Mock(side_effect=exc)) + self.mock_object(migration_api.ShareMigrationHelper, + 'cleanup_access_rules') + + # run + self.assertRaises( + exception.ShareMigrationFailed, + self.share_manager._migration_start_generic, + self.context, share, instance, 'fake_host', False) + + # asserts + self.share_manager.db.share_server_get.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), + instance['share_server_id']) + + migration_api.ShareMigrationHelper.change_to_read_only.\ + assert_called_once_with(instance, server, True, + self.share_manager.driver) + migration_api.ShareMigrationHelper.create_instance_and_wait.\ + assert_called_once_with(share, instance, 'fake_host') + migration_api.ShareMigrationHelper.\ + cleanup_access_rules.assert_called_once_with( + instance, server, self.share_manager.driver) + if exc is None: + self.share_manager.db.share_instance_update.\ + assert_called_once_with( + self.context, new_instance['id'], + {'status': constants.STATUS_MIGRATING_TO}) + self.share_manager.driver.migration_get_info.\ + assert_called_once_with(self.context, instance, server) + rpcapi.ShareAPI.migration_get_info.assert_called_once_with( + self.context, new_instance) + data_rpc.DataAPI.migration_start.assert_called_once_with( + self.context, share['id'], ['lost+found'], instance['id'], + new_instance['id'], src_migration_info, dest_migration_info, + False) + migration_api.ShareMigrationHelper.\ + cleanup_new_instance.assert_called_once_with(new_instance) + + @ddt.data('fake_model_update', Exception('fake')) + def test_migration_complete_driver(self, exc): + server = 'fake_server' + model_update = 'fake_model_update' + instance = db_utils.create_share_instance( + share_id='fake_id', + status=constants.STATUS_AVAILABLE, + share_server_id='fake_server_id') + share = db_utils.create_share( + id='fake_id', + instances=[instance], + task_state=constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE) + + # mocks + self.mock_object(self.share_manager.db, 'share_get', + mock.Mock(return_value=share)) + self.mock_object(self.share_manager.db, 'share_instance_get', + mock.Mock(return_value=instance)) + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=server)) + self.mock_object(self.share_manager.db, 'share_update') + if isinstance(exc, Exception): + self.mock_object(self.share_manager.driver, 'migration_complete', + mock.Mock(side_effect=exc)) + else: + self.mock_object(self.share_manager.driver, 'migration_complete', + mock.Mock(return_value=exc)) + self.mock_object(self.share_manager.db, 'share_instance_update') + self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', + mock.Mock(return_value='fake_info')) + self.mock_object(manager.LOG, 'exception') + + # run + if isinstance(exc, Exception): + self.assertRaises( + exception.ShareMigrationFailed, + self.share_manager.migration_complete, + self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') + else: + self.share_manager.migration_complete( + self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') + + # asserts + self.share_manager.db.share_get.assert_called_once_with( + self.context, share['id']) + self.share_manager.db.share_instance_get.assert_called_once_with( + self.context, instance['id'], with_share_data=True) + self.share_manager.db.share_server_get.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), 'fake_server_id') + self.share_manager.driver.migration_complete.assert_called_once_with( + self.context, instance, server, 'fake_info') + rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( + self.context, instance) + if isinstance(exc, Exception): + self.share_manager.db.share_update.assert_called_once_with( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) + self.assertTrue(manager.LOG.exception.called) + else: + self.share_manager.db.share_update.assert_called_once_with( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}) + self.share_manager.db.share_instance_update.\ + assert_called_once_with(self.context, instance['id'], + model_update) + + @ddt.data(None, Exception('fake')) + def test_migration_complete_generic(self, exc): + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED) + + # mocks + self.mock_object(self.share_manager.db, 'share_get', + mock.Mock(return_value=share)) + self.mock_object(self.share_manager, '_migration_complete', + mock.Mock(side_effect=exc)) + self.mock_object(self.share_manager.db, 'share_update') + self.mock_object(self.share_manager.db, 'share_instance_update') + self.mock_object(manager.LOG, 'exception') + + # run + if exc: + self.assertRaises( + exception.ShareMigrationFailed, + self.share_manager.migration_complete, + self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') + else: + self.share_manager.migration_complete( + self.context, 'fake_id', 'fake_ins_id', 'new_fake_ins_id') + + # asserts + self.share_manager.db.share_get.assert_called_once_with( + self.context, share['id']) + self.share_manager._migration_complete.assert_called_once_with( + self.context, share, 'fake_ins_id', 'new_fake_ins_id') + if exc: + self.share_manager.db.share_update.assert_called_once_with( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_ERROR}) + self.share_manager.db.share_instance_update.\ + assert_called_once_with( + self.context, 'fake_ins_id', + {'status': constants.STATUS_AVAILABLE}) + self.assertTrue(manager.LOG.exception.called) + + @ddt.data(constants.TASK_STATE_DATA_COPYING_ERROR, + constants.TASK_STATE_DATA_COPYING_CANCELLED, + constants.TASK_STATE_DATA_COPYING_COMPLETED, + 'other') + def test__migration_complete_status(self, status): + + instance = db_utils.create_share_instance( + share_id='fake_id', + share_server_id='fake_server_id') + new_instance = db_utils.create_share_instance(share_id='fake_id') + share = db_utils.create_share(id='fake_id', task_state=status) + server = 'fake_server' + + # mocks + self.mock_object(self.share_manager.db, 'share_instance_get', + mock.Mock(side_effect=[instance, new_instance])) + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=server)) + self.mock_object(migration_api.ShareMigrationHelper, + 'cleanup_new_instance') + self.mock_object(migration_api.ShareMigrationHelper, + 'cleanup_access_rules') + self.mock_object(self.share_manager.db, 'share_instance_update') + self.mock_object(self.share_manager.db, 'share_update') + if status == constants.TASK_STATE_DATA_COPYING_COMPLETED: + self.mock_object(migration_api.ShareMigrationHelper, + 'apply_new_access_rules', + mock.Mock(side_effect=Exception('fake'))) + self.mock_object(manager.LOG, 'exception') + + # run + if status == constants.TASK_STATE_DATA_COPYING_CANCELLED: + self.share_manager._migration_complete( + self.context, share, instance['id'], new_instance['id']) + else: + self.assertRaises( + exception.ShareMigrationFailed, + self.share_manager._migration_complete, self.context, share, + instance['id'], new_instance['id']) + + # asserts + self.share_manager.db.share_instance_get.assert_has_calls([ + mock.call(self.context, instance['id'], with_share_data=True), + mock.call(self.context, new_instance['id'], with_share_data=True) + ]) + self.share_manager.db.share_server_get.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), 'fake_server_id') + + if status != 'other': + migration_api.ShareMigrationHelper.cleanup_new_instance.\ + assert_called_once_with(new_instance) + migration_api.ShareMigrationHelper.cleanup_access_rules.\ + assert_called_once_with(instance, server, + self.share_manager.driver) + if status == constants.TASK_STATE_MIGRATION_CANCELLED: + self.share_manager.db.share_instance_update.\ + assert_called_once_with(self.context, instance['id'], + {'status': constants.STATUS_AVAILABLE}) + self.share_manager.db.share_update.assert_called_once_with( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_CANCELLED}) + if status == constants.TASK_STATE_DATA_COPYING_COMPLETED: + migration_api.ShareMigrationHelper.apply_new_access_rules.\ + assert_called_once_with(instance, new_instance) + self.assertTrue(manager.LOG.exception.called) + + def test__migration_complete(self): + + instance = db_utils.create_share_instance( + share_id='fake_id', + share_server_id='fake_server_id') + new_instance = db_utils.create_share_instance(share_id='fake_id') + share = db_utils.create_share( + id='fake_id', + task_state=constants.TASK_STATE_DATA_COPYING_COMPLETED) + server = 'fake_server' + + # mocks + self.mock_object(self.share_manager.db, 'share_instance_get', + mock.Mock(side_effect=[instance, new_instance])) + self.mock_object(self.share_manager.db, 'share_server_get', + mock.Mock(return_value=server)) + self.mock_object(self.share_manager.db, 'share_instance_update') + self.mock_object(self.share_manager.db, 'share_update') + self.mock_object(migration_api.ShareMigrationHelper, 'delete_instance_and_wait') + self.mock_object(migration_api.ShareMigrationHelper, + 'apply_new_access_rules') - manager._migrate_share_generic(self.context, share, host) + # run + self.share_manager._migration_complete( + self.context, share, instance['id'], new_instance['id']) - manager._get_share_instance.assert_called_once_with( - self.context, share - ) + # asserts + self.share_manager.db.share_instance_get.assert_has_calls([ + mock.call(self.context, instance['id'], with_share_data=True), + mock.call(self.context, new_instance['id'], with_share_data=True) + ]) + self.share_manager.db.share_server_get.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), 'fake_server_id') - manager.db.share_instance_update.assert_any_call( - self.context, new_share_instance['id'], status_inactive - ) + self.share_manager.db.share_instance_update.assert_has_calls([ + mock.call(self.context, new_instance['id'], + {'status': constants.STATUS_AVAILABLE}), + mock.call(self.context, instance['id'], + {'status': constants.STATUS_INACTIVE}) + ]) + self.share_manager.db.share_update.assert_has_calls([ + mock.call( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_COMPLETING}), + mock.call( + self.context, share['id'], + {'task_state': constants.TASK_STATE_MIGRATION_SUCCESS}), + ]) + migration_api.ShareMigrationHelper.apply_new_access_rules.\ + assert_called_once_with(instance, new_instance) + migration_api.ShareMigrationHelper.delete_instance_and_wait.\ + assert_called_once_with(instance) - manager._get_share_server.assert_any_call( - mock.ANY, share_instance - ) + def test_migration_cancel(self): - manager._get_share_server.assert_any_call( - mock.ANY, new_share_instance - ) + server = db_utils.create_share_server() + share = db_utils.create_share( + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + share_server_id=server['id']) - manager.driver.get_migration_info.assert_called_once_with( - self.context, share_instance, share_server - ) + self.mock_object(db, 'share_get', mock.Mock(return_value=share)) - manager.driver.copy_share_data.assert_called_once_with( - self.context, mock.ANY, share, share_instance, - share_server, new_share_instance, new_share_server, - src_migration_info, dest_migration_info - ) + self.mock_object(db, 'share_server_get', + mock.Mock(return_value=server)) - manager.db.share_update.assert_called_once_with( - self.context, share_id, status_completing - ) + self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', + mock.Mock(return_value='migration_info')) - manager.db.share_instance_update.assert_any_call( - self.context, new_share_instance['id'], status_available - ) + self.mock_object(self.share_manager.driver, 'migration_cancel') + + self.share_manager.migration_cancel(self.context, share) + + rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( + self.context, share.instance) + + self.share_manager.driver.migration_cancel.assert_called_once_with( + self.context, share.instance, server, 'migration_info') + + def test_migration_cancel_invalid(self): - def test_migrate_share_generic_exception(self): share = db_utils.create_share() - host = {'host': 'fake-host'} - manager = self.share_manager + self.mock_object(db, 'share_get', mock.Mock(return_value=share)) - manager.create_share_instance(self.context, share.instance['id']) - share_instance = manager._get_share_instance(self.context, share) + self.assertRaises( + exception.InvalidShare, self.share_manager.migration_cancel, + self.context, share) - self.mock_object(manager, '_get_share_instance', - mock.Mock(return_value=share_instance)) + def test_migration_get_progress(self): - self.mock_object(migration.ShareMigrationHelper, - 'change_to_read_only') - self.mock_object(migration.ShareMigrationHelper, - 'create_instance_and_wait', - mock.Mock(side_effect=exception.ShareMigrationFailed( - reason='fake'))) - self.mock_object(migration.ShareMigrationHelper, - 'revert_access_rules') + server = db_utils.create_share_server() + share = db_utils.create_share( + task_state=constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, + share_server_id=server['id']) - self.assertRaises(exception.ShareMigrationFailed, - manager._migrate_share_generic, - self.context, share, host) + expected = 'fake_progress' + + self.mock_object(db, 'share_get', mock.Mock(return_value=share)) + + self.mock_object(db, 'share_server_get', + mock.Mock(return_value=server)) + + self.mock_object(rpcapi.ShareAPI, 'migration_get_driver_info', + mock.Mock(return_value='migration_info')) + + self.mock_object(self.share_manager.driver, 'migration_get_progress', + mock.Mock(return_value=expected)) + + result = self.share_manager.migration_get_progress(self.context, share) + + self.assertEqual(expected, result) + + rpcapi.ShareAPI.migration_get_driver_info.assert_called_once_with( + self.context, share.instance) + + self.share_manager.driver.migration_get_progress.\ + assert_called_once_with( + self.context, share.instance, server, 'migration_info') + + def test_migration_get_progress_invalid(self): + + share = db_utils.create_share() + + self.mock_object(db, 'share_get', mock.Mock(return_value=share)) + + self.assertRaises( + exception.InvalidShare, self.share_manager.migration_get_progress, + self.context, share) def test_manage_snapshot_invalid_driver_mode(self): self.mock_object(self.share_manager, 'driver') diff --git a/manila/tests/share/test_migration.py b/manila/tests/share/test_migration.py index 9dc7a6ce7a..189926949f 100644 --- a/manila/tests/share/test_migration.py +++ b/manila/tests/share/test_migration.py @@ -23,7 +23,6 @@ from manila import context from manila import db from manila import exception from manila.share import api as share_api -from manila.share import driver from manila.share import migration from manila import test from manila.tests import db_utils @@ -37,70 +36,42 @@ class ShareMigrationHelperTestCase(test.TestCase): def setUp(self): super(ShareMigrationHelperTestCase, self).setUp() self.share = db_utils.create_share() + self.share_instance = db_utils.create_share_instance( + share_id=self.share['id'], + share_network_id='fake_network_id') self.context = context.get_admin_context() self.helper = migration.ShareMigrationHelper( - self.context, db, - driver.CONF.migration_create_delete_share_timeout, - driver.CONF.migration_wait_access_rules_timeout, self.share) - - def test_deny_rules_and_wait(self): - saved_rules = [db_utils.create_access(share_id=self.share['id'])] - - fake_share_instances = [ - {"id": "1", "access_rules_status": constants.STATUS_OUT_OF_SYNC}, - {"id": "1", "access_rules_status": constants.STATUS_ACTIVE}, - ] - - self.mock_object(share_api.API, 'deny_access_to_instance') - self.mock_object(db, 'share_instance_get', - mock.Mock(side_effect=fake_share_instances)) - self.mock_object(time, 'sleep') - - self.helper.deny_rules_and_wait( - self.context, self.share.instance, saved_rules) - - db.share_instance_get.assert_any_call( - self.context, self.share.instance['id']) - - def test_add_rules_and_wait(self): - - fake_access_rules = [ - {'access_type': 'fake', 'access_level': 'ro', 'access_to': 'fake'}, - {'access_type': 'f0ke', 'access_level': 'rw', 'access_to': 'f0ke'}, - ] - - self.mock_object(share_api.API, 'allow_access_to_instance') - self.mock_object(self.helper, 'wait_for_access_update') - self.mock_object(db, 'share_access_create') - - self.helper.add_rules_and_wait(self.context, self.share.instance, - fake_access_rules) - - share_api.API.allow_access_to_instance.assert_called_once_with( - self.context, self.share.instance, mock.ANY - ) - self.helper.wait_for_access_update.assert_called_once_with( - self.share.instance - ) + self.context, db, self.share) def test_delete_instance_and_wait(self): + # mocks self.mock_object(share_api.API, 'delete_instance') self.mock_object(db, 'share_instance_get', - mock.Mock(side_effect=[self.share.instance, None])) + mock.Mock(side_effect=[self.share_instance, + exception.NotFound()])) self.mock_object(time, 'sleep') - self.helper.delete_instance_and_wait(self.context, - self.share.instance) + # run + self.helper.delete_instance_and_wait(self.share_instance) - db.share_instance_get.assert_any_call( - self.context, self.share.instance['id']) + # asserts + share_api.API.delete_instance.assert_called_once_with( + self.context, self.share_instance, True) + + db.share_instance_get.assert_has_calls([ + mock.call(self.context, self.share_instance['id']), + mock.call(self.context, self.share_instance['id'])]) + + time.sleep.assert_called_once_with(1) def test_delete_instance_and_wait_timeout(self): + # mocks self.mock_object(share_api.API, 'delete_instance') + self.mock_object(db, 'share_instance_get', - mock.Mock(side_effect=[self.share.instance, None])) + mock.Mock(side_effect=[self.share_instance, None])) self.mock_object(time, 'sleep') now = time.time() @@ -109,29 +80,40 @@ class ShareMigrationHelperTestCase(test.TestCase): self.mock_object(time, 'time', mock.Mock(side_effect=[now, timeout])) + # run self.assertRaises(exception.ShareMigrationFailed, self.helper.delete_instance_and_wait, - self.context, self.share.instance) + self.share_instance) + + # asserts + share_api.API.delete_instance.assert_called_once_with( + self.context, self.share_instance, True) db.share_instance_get.assert_called_once_with( - self.context, self.share.instance['id']) + self.context, self.share_instance['id']) + + time.time.assert_has_calls([mock.call(), mock.call()]) def test_delete_instance_and_wait_not_found(self): + # mocks self.mock_object(share_api.API, 'delete_instance') self.mock_object(db, 'share_instance_get', mock.Mock(side_effect=exception.NotFound)) - self.mock_object(time, 'sleep') - self.helper.delete_instance_and_wait(self.context, - self.share.instance) + # run + self.helper.delete_instance_and_wait(self.share_instance) + + # asserts + share_api.API.delete_instance.assert_called_once_with( + self.context, self.share_instance, True) db.share_instance_get.assert_called_once_with( - self.context, self.share.instance['id']) + self.context, self.share_instance['id']) def test_create_instance_and_wait(self): - host = {'host': 'fake-host'} + host = {'host': 'fake_host'} share_instance_creating = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_CREATING, @@ -140,6 +122,7 @@ class ShareMigrationHelperTestCase(test.TestCase): share_id=self.share['id'], status=constants.STATUS_AVAILABLE, share_network_id='fake_network_id') + # mocks self.mock_object(share_api.API, 'create_instance', mock.Mock(return_value=share_instance_creating)) self.mock_object(db, 'share_instance_get', @@ -147,43 +130,68 @@ class ShareMigrationHelperTestCase(test.TestCase): share_instance_available])) self.mock_object(time, 'sleep') - self.helper.create_instance_and_wait( - self.context, self.share, share_instance_creating, host) + # run + self.helper.create_instance_and_wait(self.share, + share_instance_creating, host) - db.share_instance_get.assert_any_call( - self.context, share_instance_creating['id'], with_share_data=True) + # asserts + share_api.API.create_instance.assert_called_once_with( + self.context, self.share, self.share_instance['share_network_id'], + 'fake_host') + + db.share_instance_get.assert_has_calls([ + mock.call(self.context, share_instance_creating['id'], + with_share_data=True), + mock.call(self.context, share_instance_creating['id'], + with_share_data=True)]) + + time.sleep.assert_called_once_with(1) def test_create_instance_and_wait_status_error(self): - host = {'host': 'fake-host'} + host = {'host': 'fake_host'} share_instance_error = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_ERROR, share_network_id='fake_network_id') + # mocks self.mock_object(share_api.API, 'create_instance', mock.Mock(return_value=share_instance_error)) + self.mock_object(self.helper, 'cleanup_new_instance') self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share_instance_error)) - self.mock_object(time, 'sleep') + # run self.assertRaises(exception.ShareMigrationFailed, self.helper.create_instance_and_wait, - self.context, self.share, share_instance_error, host) + self.share, self.share_instance, host) + + # asserts + share_api.API.create_instance.assert_called_once_with( + self.context, self.share, self.share_instance['share_network_id'], + 'fake_host') db.share_instance_get.assert_called_once_with( self.context, share_instance_error['id'], with_share_data=True) + self.helper.cleanup_new_instance.assert_called_once_with( + share_instance_error) + def test_create_instance_and_wait_timeout(self): - host = {'host': 'fake-host'} + host = {'host': 'fake_host'} share_instance_creating = db_utils.create_share_instance( share_id=self.share['id'], status=constants.STATUS_CREATING, share_network_id='fake_network_id') + # mocks self.mock_object(share_api.API, 'create_instance', mock.Mock(return_value=share_instance_creating)) + + self.mock_object(self.helper, 'cleanup_new_instance') + self.mock_object(db, 'share_instance_get', mock.Mock(return_value=share_instance_creating)) self.mock_object(time, 'sleep') @@ -191,244 +199,207 @@ class ShareMigrationHelperTestCase(test.TestCase): now = time.time() timeout = now + 310 - self.mock_object(time, 'time', - mock.Mock(side_effect=[now, timeout])) + self.mock_object(time, 'time', mock.Mock(side_effect=[now, timeout])) + # run self.assertRaises(exception.ShareMigrationFailed, self.helper.create_instance_and_wait, - self.context, self.share, share_instance_creating, - host) + self.share, self.share_instance, host) + + # asserts + share_api.API.create_instance.assert_called_once_with( + self.context, self.share, self.share_instance['share_network_id'], + 'fake_host') db.share_instance_get.assert_called_once_with( self.context, share_instance_creating['id'], with_share_data=True) - def test_wait_for_access_update(self): - sid = 1 - fake_share_instances = [ - {'id': sid, 'access_rules_status': constants.STATUS_OUT_OF_SYNC}, - {'id': sid, 'access_rules_status': constants.STATUS_ACTIVE}, - ] + time.time.assert_has_calls([mock.call(), mock.call()]) - self.mock_object(time, 'sleep') - self.mock_object(db, 'share_instance_get', - mock.Mock(side_effect=fake_share_instances)) + self.helper.cleanup_new_instance.assert_called_once_with( + share_instance_creating) - self.helper.wait_for_access_update(fake_share_instances[0]) + def test_change_to_read_only_with_ro_support(self): - db.share_instance_get.assert_has_calls( - [mock.call(mock.ANY, sid), mock.call(mock.ANY, sid)] - ) - time.sleep.assert_called_once_with(1) + share_instance = db_utils.create_share_instance( + share_id=self.share['id'], status=constants.STATUS_AVAILABLE) - @ddt.data( - ( - {'id': '1', 'access_rules_status': constants.STATUS_ERROR}, - exception.ShareMigrationFailed - ), - ( - {'id': '1', 'access_rules_status': constants.STATUS_OUT_OF_SYNC}, - exception.ShareMigrationFailed - ), - ) - @ddt.unpack - def test_wait_for_access_update_invalid(self, fake_instance, expected_exc): - self.mock_object(time, 'sleep') - self.mock_object(db, 'share_instance_get', - mock.Mock(return_value=fake_instance)) + access = db_utils.create_access(share_id=self.share['id'], + access_to='fake_ip', + access_level='rw') - now = time.time() - timeout = now + 100 + server = db_utils.create_share_server(share_id=self.share['id']) - self.mock_object(time, 'time', - mock.Mock(side_effect=[now, timeout])) + # mocks + share_driver = mock.Mock() + self.mock_object(share_driver, 'update_access') - self.assertRaises(expected_exc, - self.helper.wait_for_access_update, fake_instance) + self.mock_object(db, 'share_access_get_all_for_instance', + mock.Mock(return_value=[access])) - def test_allow_migration_access(self): - access = {'access_to': 'fake_ip', - 'access_type': 'fake_type', - 'access_level': constants.ACCESS_LEVEL_RW} + # run + self.helper.change_to_read_only(share_instance, server, True, + share_driver) - access_active = db_utils.create_access(share_id=self.share['id']) + # asserts + db.share_access_get_all_for_instance.assert_called_once_with( + self.context, share_instance['id']) + share_driver.update_access.assert_called_once_with( + self.context, share_instance, [access], add_rules=[], + delete_rules=[], share_server=server) - self.mock_object(self.helper, 'wait_for_access_update', - mock.Mock(return_value=access_active)) + def test_change_to_read_only_without_ro_support(self): - self.mock_object(self.helper.db, 'share_access_create', - mock.Mock(return_value=access_active)) - self.mock_object( - self.helper.db, 'share_access_get_all_by_type_and_access', - mock.Mock(return_value=[])) - self.mock_object(self.helper.api, 'allow_access_to_instance') + share_instance = db_utils.create_share_instance( + share_id=self.share['id'], status=constants.STATUS_AVAILABLE) - result = self.helper.allow_migration_access( - access, self.share.instance) + access = db_utils.create_access(share_id=self.share['id'], + access_to='fake_ip', + access_level='rw') - self.assertEqual(access_active, result) + server = db_utils.create_share_server(share_id=self.share['id']) - self.helper.wait_for_access_update.assert_called_once_with( - self.share.instance) + # mocks + share_driver = mock.Mock() + self.mock_object(share_driver, 'update_access') - def test_allow_migration_access_exists(self): - access = {'access_to': 'fake_ip', - 'access_type': 'fake_type', - 'access_level': 'fake_level'} + self.mock_object(db, 'share_access_get_all_for_instance', + mock.Mock(return_value=[access])) - access_active = db_utils.create_access(share_id=self.share['id'], - access_to='fake_ip') + # run + self.helper.change_to_read_only(share_instance, server, False, + share_driver) - self.mock_object(self.helper.api, 'allow_access_to_instance') + # asserts + db.share_access_get_all_for_instance.assert_called_once_with( + self.context, share_instance['id']) + share_driver.update_access.assert_called_once_with( + self.context, share_instance, [], add_rules=[], + delete_rules=[access], share_server=server) - self.mock_object( - self.helper.db, 'share_access_get_all_by_type_and_access', - mock.Mock(return_value=[access_active])) + def test_revert_access_rules(self): - result = self.helper.allow_migration_access( - access, self.share.instance) + share_instance = db_utils.create_share_instance( + share_id=self.share['id'], status=constants.STATUS_AVAILABLE) - self.assertEqual(access_active, result) + access = db_utils.create_access(share_id=self.share['id'], + access_to='fake_ip', + access_level='rw') - def test_deny_migration_access(self): + server = db_utils.create_share_server(share_id=self.share['id']) - access = {'access_to': 'fake_ip', - 'access_type': 'fake_type'} + # mocks + share_driver = mock.Mock() + self.mock_object(share_driver, 'update_access') - access_active = db_utils.create_access(share_id=self.share['id'], - access_to='fake_ip') + self.mock_object(db, 'share_access_get_all_for_instance', + mock.Mock(return_value=[access])) - self.mock_object(self.helper.api, 'access_get', - mock.Mock(return_value=access_active)) + # run + self.helper.revert_access_rules(share_instance, server, share_driver) - self.mock_object(self.helper.api, 'deny_access_to_instance') + # asserts + db.share_access_get_all_for_instance.assert_called_once_with( + self.context, share_instance['id']) + share_driver.update_access.assert_called_once_with( + self.context, share_instance, [access], add_rules=[], + delete_rules=[], share_server=server) - self.mock_object(self.helper, 'wait_for_access_update') + def test_apply_new_access_rules(self): - self.helper.deny_migration_access(access_active, access, - self.share.instance) + share_instance = db_utils.create_share_instance( + share_id=self.share['id'], status=constants.STATUS_AVAILABLE) + new_share_instance = db_utils.create_share_instance( + share_id=self.share['id'], status=constants.STATUS_AVAILABLE, + access_rules_status='active') - self.helper.wait_for_access_update.assert_called_once_with( - self.share.instance - ) + access = db_utils.create_access(share_id=self.share['id'], + access_to='fake_ip', + access_level='rw') - def test_deny_migration_access_not_found(self): + # mocks + self.mock_object(db, 'share_access_get_all_for_instance', + mock.Mock(return_value=[access])) + self.mock_object(self.helper, '_add_rules_and_wait') - access = {'access_to': 'fake_ip', - 'access_type': 'fake_type'} + # run + self.helper.apply_new_access_rules(share_instance, new_share_instance) - access_active = db_utils.create_access(share_id=self.share['id'], - access_to='fake_ip') + # asserts + db.share_access_get_all_for_instance.assert_called_once_with( + self.context, share_instance['id']) + self.helper._add_rules_and_wait.assert_called_once_with( + new_share_instance, [access]) - self.mock_object(self.helper.api, 'access_get', - mock.Mock(side_effect=exception.NotFound('fake'))) + @ddt.data(None, Exception('fake')) + def test_cleanup_new_instance(self, exc): - self.helper.deny_migration_access( - access_active, access, self.share.instance) + # mocks + self.mock_object(self.helper, 'delete_instance_and_wait', + mock.Mock(side_effect=exc)) - def test_deny_migration_access_none(self): + self.mock_object(migration.LOG, 'warning') - access = {'access_to': 'fake_ip', - 'access_type': 'fake_type'} + # run + self.helper.cleanup_new_instance(self.share_instance) - access_active = db_utils.create_access(share_id=self.share['id'], - access_to='fake_ip') + # asserts + self.helper.delete_instance_and_wait.assert_called_once_with( + self.share_instance) - self.mock_object(self.helper.api, 'access_get_all', - mock.Mock(return_value=[access_active])) + if exc: + migration.LOG.warning.called - self.mock_object(self.helper.api, 'deny_access_to_instance') + @ddt.data(None, Exception('fake')) + def test_cleanup_access_rules(self, exc): - self.mock_object(self.helper, 'wait_for_access_update') + # mocks + server = db_utils.create_share_server() + share_driver = mock.Mock() + self.mock_object(self.helper, 'revert_access_rules', + mock.Mock(side_effect=exc)) - self.helper.deny_migration_access(None, access, self.share.instance) + self.mock_object(migration.LOG, 'warning') - self.helper.wait_for_access_update.assert_called_once_with( - self.share.instance) + # run + self.helper.cleanup_access_rules(self.share_instance, server, + share_driver) - def test_deny_migration_access_exception(self): + # asserts + self.helper.revert_access_rules.assert_called_once_with( + self.share_instance, server, share_driver) - access = {'access_to': 'fake_ip', - 'access_type': 'fake_type'} + if exc: + migration.LOG.warning.called - access_active = db_utils.create_access(share_id=self.share['id'], - access_to='fake_ip') + def test__add_rules_and_wait(self): - self.mock_object(self.helper.api, 'access_get', - mock.Mock(return_value=access_active)) + access = db_utils.create_access(share_id=self.share['id']) - self.mock_object(self.helper.api, 'deny_access_to_instance', - mock.Mock(side_effect=[exception.NotFound('fake')])) + values = { + 'share_id': self.share['id'], + 'access_type': access['access_type'], + 'access_level': access['access_level'], + 'access_to': access['access_to'] + } - self.assertRaises(exception.NotFound, - self.helper.deny_migration_access, access_active, - access, self.share.instance) + self.helper.migration_wait_access_rules_timeout = 60 - def test_cleanup_migration_access_exception(self): + # mocks + self.mock_object(db, 'share_access_create') - self.mock_object(self.helper, 'deny_migration_access', - mock.Mock(side_effect=Exception('fake'))) + self.mock_object(share_api.API, 'allow_access_to_instance') - self.helper.cleanup_migration_access(None, None, self.share.instance) + self.mock_object(utils, 'wait_for_access_update') - def test_cleanup_temp_folder_exception(self): + # run + self.helper._add_rules_and_wait(self.share_instance, [access]) - self.mock_object(utils, 'execute', - mock.Mock(side_effect=Exception('fake'))) + # asserts + db.share_access_create.assert_called_once_with(self.context, values) - self.helper.cleanup_temp_folder(self.share.instance, None) + share_api.API.allow_access_to_instance.assert_called_once_with( + self.context, self.share_instance, [access]) - def test_cleanup_unmount_temp_folder_exception(self): - - self.mock_object(utils, 'execute', - mock.Mock(side_effect=Exception('fake'))) - - self.helper.cleanup_unmount_temp_folder(self.share.instance, None) - - def test_change_to_read_only(self): - - access_active = db_utils.create_access(share_id=self.share['id'], - access_to='fake_ip') - - self.mock_object(db, 'share_access_get_all_for_share', - mock.Mock(return_value=[access_active])) - - self.mock_object(self.helper, 'deny_rules_and_wait') - self.mock_object(self.helper, 'add_rules_and_wait') - - result = self.helper.change_to_read_only(True, self.share.instance) - - self.assertEqual([access_active], result) - - db.share_access_get_all_for_share.assert_called_once_with( - self.context, self.share['id']) - - self.helper.deny_rules_and_wait.assert_called_once_with( - self.context, self.share.instance, [access_active]) - self.helper.add_rules_and_wait.assert_called_once_with( - self.context, self.share.instance, [access_active], 'ro') - - @ddt.data(None, 'new_instance') - def test_revert_access_rules(self, new_instance): - - access_active = db_utils.create_access(share_id=self.share['id'], - access_to='fake_ip') - - self.mock_object(db, 'share_access_get_all_for_share', - mock.Mock(return_value=[access_active])) - - self.mock_object(self.helper, 'deny_rules_and_wait') - self.mock_object(self.helper, 'add_rules_and_wait') - - if new_instance: - new_instance = self.share.instance - self.helper.revert_access_rules(True, self.share.instance, - new_instance, [access_active]) - - db.share_access_get_all_for_share.assert_called_once_with( - self.context, self.share['id']) - - self.helper.deny_rules_and_wait.assert_called_once_with( - self.context, self.share.instance, [access_active]) - if new_instance: - self.helper.add_rules_and_wait.assert_called_once_with( - self.context, self.share.instance, [access_active]) + utils.wait_for_access_update.assert_called_once_with( + self.context, db, self.share_instance, 60) diff --git a/manila/tests/share/test_rpcapi.py b/manila/tests/share/test_rpcapi.py index 9fc6f20984..cb483bcd28 100644 --- a/manila/tests/share/test_rpcapi.py +++ b/manila/tests/share/test_rpcapi.py @@ -238,28 +238,47 @@ class ShareRpcAPITestCase(test.TestCase): cgsnapshot=self.fake_cgsnapshot, host='fake_host1') - def test_migrate_share(self): + def test_migration_start(self): fake_dest_host = self.Desthost() - self._test_share_api('migrate_share', + self._test_share_api('migration_start', rpc_method='cast', version='1.6', share=self.fake_share, dest_host=fake_dest_host, - force_host_copy='1') + force_host_copy=True, + notify=True) - def test_get_migration_info(self): - self._test_share_api('get_migration_info', + def test_migration_get_info(self): + self._test_share_api('migration_get_info', rpc_method='call', version='1.6', - share_instance=self.fake_share, - share_server=self.fake_share_server) + share_instance=self.fake_share) - def test_get_driver_migration_info(self): - self._test_share_api('get_driver_migration_info', + def test_migration_get_driver_info(self): + self._test_share_api('migration_get_driver_info', rpc_method='call', version='1.6', - share_instance=self.fake_share, - share_server=self.fake_share_server) + share_instance=self.fake_share) + + def test_migration_complete(self): + self._test_share_api('migration_complete', + rpc_method='cast', + version='1.10', + share=self.fake_share, + share_instance_id='fake_ins_id', + new_share_instance_id='new_fake_ins_id') + + def test_migration_cancel(self): + self._test_share_api('migration_cancel', + rpc_method='call', + version='1.10', + share=self.fake_share) + + def test_migration_get_progress(self): + self._test_share_api('migration_get_progress', + rpc_method='call', + version='1.10', + share=self.fake_share) def test_delete_share_replica(self): self._test_share_api('delete_share_replica', diff --git a/manila/tests/share/test_share_utils.py b/manila/tests/share/test_share_utils.py index 2071a5a7df..60f469fa13 100644 --- a/manila/tests/share/test_share_utils.py +++ b/manila/tests/share/test_share_utils.py @@ -16,11 +16,6 @@ """Tests For miscellaneous util methods used with share.""" -import os -import shutil - -import mock - from manila.share import utils as share_utils from manila import test @@ -145,126 +140,3 @@ class ShareUtilsTestCase(test.TestCase): expected = None self.assertEqual(expected, share_utils.append_host(host, pool)) - - -class CopyClassTestCase(test.TestCase): - def setUp(self): - super(CopyClassTestCase, self).setUp() - src = '/path/fake/src' - dest = '/path/fake/dst' - ignore_list = ['item'] - self._copy = share_utils.Copy(src, dest, ignore_list) - self._copy.totalSize = 10000 - self._copy.currentSize = 100 - self._copy.files = [{'name': '/fileA', 'attr': 100}, - {'name': '/fileB', 'attr': 150}, - {'name': '/fileC', 'attr': 200}] - self._copy.dirs = [{'name': '/fakeA', 'attr': 777}, - {'name': '/fakeB', 'attr': 666}, - {'name': '/fakeC', 'attr': 767}] - self._copy.currentCopy = {'file_path': '/fake/path', 'size': 100} - - self.stat_result = [777, 'ino', 'dev', 'nlink', 'uid', - 'gid', 100, 'at', 'mt', 'ct'] - - self.mock_log = self.mock_object(share_utils, 'LOG') - - def test_get_progress(self): - expected = {'total_progress': 1, - 'current_file_path': '/fake/path', - 'current_file_progress': 100} - - self.mock_object(os, 'stat', mock.Mock(return_value=self.stat_result)) - - out = self._copy.get_progress() - - self.assertEqual(expected, out) - os.stat.assert_called_once_with('/fake/path') - - def test_get_progress_current_copy_none(self): - self._copy.currentCopy = None - expected = {'total_progress': 100} - - out = self._copy.get_progress() - - self.assertEqual(expected, out) - - def test_get_progress_os_exception(self): - expected = {'total_progress': 1, - 'current_file_path': '/fake/path', - 'current_file_progress': 0} - - self.mock_object(os, 'stat', mock.Mock(side_effect=OSError)) - - out = self._copy.get_progress() - os.stat.assert_called_once_with('/fake/path') - self.assertEqual(expected, out) - - def test_run(self): - dirpath = '/dirpath1' - dirnames = [('dir1', 'dir2'), ('dir3', 'dir4')] - filenames = [('file1.txt', 'file2.exe'), ('file3.txt', 'file4.exe')] - os_walk_return = [(dirpath, dirnames[0], filenames[0]), - (dirpath, dirnames[1], filenames[1])] - - self.mock_object(shutil, 'copy2', mock.Mock()) - self.mock_object(shutil, 'copystat', mock.Mock()) - self.mock_object(os, 'stat', mock.Mock(return_value=self.stat_result)) - self.mock_object(os, 'walk', mock.Mock(return_value=os_walk_return)) - self.mock_object(os, 'mkdir', mock.Mock()) - - self._copy.run() - - self.assertTrue(self.mock_log.info.called) - os.walk.assert_called_once_with('/path/fake/src') - # os.stats called in explore and get_progress functions - self.assertEqual(16, os.stat.call_count) - - def test_copy(self): - src = '/path/fake/src' - dest = '/path/fake/dst' - - self.mock_object(os, 'stat', mock.Mock(return_value=self.stat_result)) - self.mock_object(os, 'mkdir', mock.Mock()) - self.mock_object(shutil, 'copy2', mock.Mock()) - self.mock_object(shutil, 'copystat', mock.Mock()) - - self._copy.copy(src, dest) - - self.assertTrue(self.mock_log.info.called) - # shutil.copystat should be called 3 times. - # Once for each entry in self._copy.dirs - self.assertEqual(3, shutil.copystat.call_count) - # os.stat should be called 3 times. - # Once for each entry in self._copy.files - self.assertEqual(3, os.stat.call_count) - self.assertEqual(3, os.mkdir.call_count) - - args = ('/fileA', '/fileB', '/fileC') - os.stat.assert_has_calls([mock.call(a) for a in args]) - args = ('/fakeA', '/fakeB', '/fakeC') - os.mkdir.assert_has_calls([mock.call(a) for a in args]) - - def test_explore(self): - path = '/dirpath1' - dirpath = '/dirpath1' - dirnames = [('dir1', 'dir2'), ('dir3', 'dir4')] - filenames = [('file1.txt', 'file2.exe'), ('file3.txt', 'file4.exe')] - os_walk_return = [(dirpath, dirnames[0], filenames[0]), - (dirpath, dirnames[1], filenames[1])] - - self.mock_object(os, 'stat', mock.Mock(return_value=self.stat_result)) - self.mock_object(os, 'walk', mock.Mock(return_value=os_walk_return)) - - self._copy.explore(path) - - os.walk.assert_called_once_with('/dirpath1') - # Function os.stat should be called 8 times. - # 4 times for dirname in dirnames, and 4 times for - # filename in filenames - self.assertEqual(8, os.stat.call_count) - - args = ('/dirpath1/dir1', '/dirpath1/dir2', '/dirpath1/file1.txt', - '/dirpath1/file2.exe', '/dirpath1/dir3', '/dirpath1/dir4', - '/dirpath1/file3.txt', '/dirpath1/file4.exe') - os.stat.assert_has_calls([mock.call(a) for a in args]) diff --git a/manila/tests/test_utils.py b/manila/tests/test_utils.py index 5e19fc71f9..085e4dce0d 100644 --- a/manila/tests/test_utils.py +++ b/manila/tests/test_utils.py @@ -31,6 +31,9 @@ import paramiko from six.moves import builtins import manila +from manila.common import constants +from manila import context +from manila.db import api as db from manila import exception from manila import test from manila import utils @@ -885,3 +888,57 @@ class WaitUntilTrueTestCase(test.TestCase): exc = exception.ManilaException self.assertRaises(exception.ManilaException, utils.wait_until_true, fake_predicate, 1, 1, exc) + + +@ddt.ddt +class ShareMigrationHelperTestCase(test.TestCase): + """Tests DataMigrationHelper.""" + + def setUp(self): + super(ShareMigrationHelperTestCase, self).setUp() + self.context = context.get_admin_context() + + def test_wait_for_access_update(self): + sid = 1 + fake_share_instances = [ + {'id': sid, 'access_rules_status': constants.STATUS_OUT_OF_SYNC}, + {'id': sid, 'access_rules_status': constants.STATUS_ACTIVE}, + ] + + self.mock_object(time, 'sleep') + self.mock_object(db, 'share_instance_get', + mock.Mock(side_effect=fake_share_instances)) + + utils.wait_for_access_update(self.context, db, + fake_share_instances[0], 1) + + db.share_instance_get.assert_has_calls( + [mock.call(mock.ANY, sid), mock.call(mock.ANY, sid)] + ) + time.sleep.assert_called_once_with(1) + + @ddt.data( + ( + {'id': '1', 'access_rules_status': constants.STATUS_ERROR}, + exception.ShareMigrationFailed + ), + ( + {'id': '1', 'access_rules_status': constants.STATUS_OUT_OF_SYNC}, + exception.ShareMigrationFailed + ), + ) + @ddt.unpack + def test_wait_for_access_update_invalid(self, fake_instance, expected_exc): + self.mock_object(time, 'sleep') + self.mock_object(db, 'share_instance_get', + mock.Mock(return_value=fake_instance)) + + now = time.time() + timeout = now + 100 + + self.mock_object(time, 'time', + mock.Mock(side_effect=[now, timeout])) + + self.assertRaises(expected_exc, + utils.wait_for_access_update, self.context, + db, fake_instance, 1) diff --git a/manila/utils.py b/manila/utils.py index 6b100ed12e..c147e73f1d 100644 --- a/manila/utils.py +++ b/manila/utils.py @@ -29,6 +29,7 @@ import shutil import socket import sys import tempfile +import time import eventlet from eventlet import pools @@ -43,6 +44,7 @@ import paramiko import retrying import six +from manila.common import constants from manila.db import api as db_api from manila import exception from manila.i18n import _ @@ -671,3 +673,32 @@ def wait_until_true(predicate, timeout=60, sleep=1, exception=None): with eventlet.timeout.Timeout(timeout, exception): while not predicate(): eventlet.sleep(sleep) + + +def wait_for_access_update(context, db, share_instance, + migration_wait_access_rules_timeout): + starttime = time.time() + deadline = starttime + migration_wait_access_rules_timeout + tries = 0 + + while True: + instance = db.share_instance_get(context, share_instance['id']) + + if instance['access_rules_status'] == constants.STATUS_ACTIVE: + break + + tries += 1 + now = time.time() + if instance['access_rules_status'] == constants.STATUS_ERROR: + msg = _("Failed to update access rules" + " on share instance %s") % share_instance['id'] + raise exception.ShareMigrationFailed(reason=msg) + elif now > deadline: + msg = _("Timeout trying to update access rules" + " on share instance %(share_id)s. Timeout " + "was %(timeout)s seconds.") % { + 'share_id': share_instance['id'], + 'timeout': migration_wait_access_rules_timeout} + raise exception.ShareMigrationFailed(reason=msg) + else: + time.sleep(tries ** 2) diff --git a/manila_tempest_tests/config.py b/manila_tempest_tests/config.py index 10da4cbf8b..5d935c26b2 100644 --- a/manila_tempest_tests/config.py +++ b/manila_tempest_tests/config.py @@ -36,7 +36,7 @@ ShareGroup = [ help="The minimum api microversion is configured to be the " "value of the minimum microversion supported by Manila."), cfg.StrOpt("max_api_microversion", - default="2.14", + default="2.15", help="The maximum api microversion is configured to be the " "value of the latest microversion supported by Manila."), cfg.StrOpt("region", @@ -183,7 +183,7 @@ ShareGroup = [ default="100", help="Flavor used for client vm in scenario tests."), cfg.IntOpt("migration_timeout", - default=1200, + default=1500, help="Time to wait for share migration before " "timing out (seconds)."), cfg.StrOpt("default_share_type_name", diff --git a/manila_tempest_tests/services/share/v2/json/shares_client.py b/manila_tempest_tests/services/share/v2/json/shares_client.py index 6b4766d629..4ce74eb83b 100644 --- a/manila_tempest_tests/services/share/v2/json/shares_client.py +++ b/manila_tempest_tests/services/share/v2/json/shares_client.py @@ -945,16 +945,19 @@ class SharesV2Client(shares_client.SharesClient): ############### - def migrate_share(self, share_id, host, version=LATEST_MICROVERSION, - action_name=None): + def migrate_share(self, share_id, host, notify, + version=LATEST_MICROVERSION, action_name=None): if action_name is None: - if utils.is_microversion_gt(version, "2.6"): + if utils.is_microversion_lt(version, "2.7"): + action_name = 'os-migrate_share' + elif utils.is_microversion_lt(version, "2.15"): action_name = 'migrate_share' else: - action_name = 'os-migrate_share' + action_name = 'migration_start' post_body = { action_name: { 'host': host, + 'notify': notify, } } body = json.dumps(post_body) @@ -962,27 +965,72 @@ class SharesV2Client(shares_client.SharesClient): headers=EXPERIMENTAL, extra_headers=True, version=version) - def wait_for_migration_completed(self, share_id, dest_host, - version=LATEST_MICROVERSION): + def migration_complete(self, share_id, version=LATEST_MICROVERSION, + action_name='migration_complete'): + post_body = { + action_name: None, + } + body = json.dumps(post_body) + return self.post('shares/%s/action' % share_id, body, + headers=EXPERIMENTAL, extra_headers=True, + version=version) + + def migration_cancel(self, share_id, version=LATEST_MICROVERSION, + action_name='migration_cancel'): + post_body = { + action_name: None, + } + body = json.dumps(post_body) + return self.post('shares/%s/action' % share_id, body, + headers=EXPERIMENTAL, extra_headers=True, + version=version) + + def migration_get_progress(self, share_id, version=LATEST_MICROVERSION, + action_name='migration_get_progress'): + post_body = { + action_name: None, + } + body = json.dumps(post_body) + return self.post('shares/%s/action' % share_id, body, + headers=EXPERIMENTAL, extra_headers=True, + version=version) + + def reset_task_state( + self, share_id, task_state, version=LATEST_MICROVERSION, + action_name='reset_task_state'): + post_body = { + action_name: { + 'task_state': task_state, + } + } + body = json.dumps(post_body) + return self.post('shares/%s/action' % share_id, body, + headers=EXPERIMENTAL, extra_headers=True, + version=version) + + def wait_for_migration_status(self, share_id, dest_host, status, + version=LATEST_MICROVERSION): """Waits for a share to migrate to a certain host.""" share = self.get_share(share_id, version=version) migration_timeout = CONF.share.migration_timeout start = int(time.time()) - while share['task_state'] != 'migration_success': + while share['task_state'] != status: time.sleep(self.build_interval) share = self.get_share(share_id, version=version) - if share['task_state'] == 'migration_success': + if share['task_state'] == status: return share elif share['task_state'] == 'migration_error': raise share_exceptions.ShareMigrationException( share_id=share['id'], src=share['host'], dest=dest_host) elif int(time.time()) - start >= migration_timeout: - message = ('Share %(share_id)s failed to migrate from ' - 'host %(src)s to host %(dest)s within the required ' - 'time %(timeout)s.' % { + message = ('Share %(share_id)s failed to reach status ' + '%(status)s when migrating from host %(src)s to ' + 'host %(dest)s within the required time ' + '%(timeout)s.' % { 'src': share['host'], 'dest': dest_host, 'share_id': share['id'], - 'timeout': self.build_timeout + 'timeout': self.build_timeout, + 'status': status, }) raise exceptions.TimeoutException(message) diff --git a/manila_tempest_tests/tests/api/admin/test_admin_actions.py b/manila_tempest_tests/tests/api/admin/test_admin_actions.py index 9ac085a8fc..108173e6c9 100644 --- a/manila_tempest_tests/tests/api/admin/test_admin_actions.py +++ b/manila_tempest_tests/tests/api/admin/test_admin_actions.py @@ -29,6 +29,8 @@ class AdminActionsTest(base.BaseSharesAdminTest): def resource_setup(cls): super(AdminActionsTest, cls).resource_setup() cls.states = ["error", "available"] + cls.task_states = ["migration_starting", "data_copying_in_progress", + "migration_success"] cls.bad_status = "error_deleting" cls.sh = cls.create_share() cls.sh_instance = ( @@ -116,3 +118,11 @@ class AdminActionsTest(base.BaseSharesAdminTest): # Snapshot with status 'error_deleting' should be deleted self.shares_v2_client.force_delete(sn["id"], s_type="snapshots") self.shares_v2_client.wait_for_resource_deletion(snapshot_id=sn["id"]) + + @test.attr(type=["gate", ]) + @base.skip_if_microversion_lt("2.15") + def test_reset_share_task_state(self): + for task_state in self.task_states: + self.shares_v2_client.reset_task_state(self.sh["id"], task_state) + self.shares_v2_client.wait_for_share_status( + self.sh["id"], task_state, 'task_state') diff --git a/manila_tempest_tests/tests/api/admin/test_admin_actions_negative.py b/manila_tempest_tests/tests/api/admin/test_admin_actions_negative.py index 45d9b4e08a..82fcd5a101 100644 --- a/manila_tempest_tests/tests/api/admin/test_admin_actions_negative.py +++ b/manila_tempest_tests/tests/api/admin/test_admin_actions_negative.py @@ -166,3 +166,24 @@ class AdminActionsNegativeTest(base.BaseSharesAdminTest): self.assertRaises(lib_exc.Forbidden, self.member_shares_v2_client.get_instances_of_share, self.sh['id']) + + @test.attr(type=["gate", "negative", ]) + @base.skip_if_microversion_lt("2.15") + def test_reset_task_state_share_not_found(self): + self.assertRaises( + lib_exc.NotFound, self.shares_v2_client.reset_task_state, + 'fake_share', 'migration_error') + + @test.attr(type=["gate", "negative", ]) + @base.skip_if_microversion_lt("2.15") + def test_reset_task_state_empty(self): + self.assertRaises( + lib_exc.BadRequest, self.shares_v2_client.reset_task_state, + self.sh['id'], None) + + @test.attr(type=["gate", "negative", ]) + @base.skip_if_microversion_lt("2.15") + def test_reset_task_state_invalid_state(self): + self.assertRaises( + lib_exc.BadRequest, self.shares_v2_client.reset_task_state, + self.sh['id'], 'fake_state') diff --git a/manila_tempest_tests/tests/api/admin/test_migration.py b/manila_tempest_tests/tests/api/admin/test_migration.py index 517f43d207..96f657a950 100644 --- a/manila_tempest_tests/tests/api/admin/test_migration.py +++ b/manila_tempest_tests/tests/api/admin/test_migration.py @@ -17,6 +17,7 @@ from tempest import config # noqa from tempest import test # noqa from manila_tempest_tests.tests.api import base +from manila_tempest_tests import utils CONF = config.CONF @@ -39,8 +40,45 @@ class MigrationNFSTest(base.BaseSharesAdminTest): raise cls.skipException("Migration tests disabled. Skipping.") @test.attr(type=["gate", ]) + @base.skip_if_microversion_lt("2.5") def test_migration_empty_v2_5(self): + share, dest_pool = self._setup_migration() + + old_exports = share['export_locations'] + + share = self.migrate_share(share['id'], dest_pool, version='2.5') + + self._validate_migration_successful(dest_pool, share, old_exports, + version='2.5') + + @test.attr(type=["gate", ]) + @base.skip_if_microversion_lt("2.15") + def test_migration_completion_empty_v2_15(self): + + share, dest_pool = self._setup_migration() + + old_exports = self.shares_v2_client.list_share_export_locations( + share['id'], version='2.15') + self.assertNotEmpty(old_exports) + old_exports = [x['path'] for x in old_exports + if x['is_admin_only'] is False] + self.assertNotEmpty(old_exports) + + share = self.migrate_share( + share['id'], dest_pool, version='2.15', notify=False, + wait_for_status='data_copying_completed') + + self._validate_migration_successful(dest_pool, share, + old_exports, '2.15', notify=False) + + share = self.migration_complete(share['id'], dest_pool, version='2.15') + + self._validate_migration_successful(dest_pool, share, old_exports, + version='2.15') + + def _setup_migration(self): + pools = self.shares_client.list_pools()['pools'] if len(pools) < 2: @@ -51,6 +89,18 @@ class MigrationNFSTest(base.BaseSharesAdminTest): share = self.create_share(self.protocol) share = self.shares_client.get_share(share['id']) + self.shares_v2_client.create_access_rule( + share['id'], access_to="50.50.50.50", access_level="rw") + + self.shares_v2_client.wait_for_share_status( + share['id'], 'active', status_attr='access_rules_status') + + self.shares_v2_client.create_access_rule( + share['id'], access_to="51.51.51.51", access_level="ro") + + self.shares_v2_client.wait_for_share_status( + share['id'], 'active', status_attr='access_rules_status') + dest_pool = next((x for x in pools if x['name'] != share['host']), None) @@ -59,10 +109,30 @@ class MigrationNFSTest(base.BaseSharesAdminTest): dest_pool = dest_pool['name'] - old_export_location = share['export_locations'][0] + return share, dest_pool - share = self.migrate_share(share['id'], dest_pool, version='2.5') + def _validate_migration_successful(self, dest_pool, share, + old_exports, version, notify=True): + if utils.is_microversion_lt(version, '2.9'): + new_exports = share['export_locations'] + self.assertNotEmpty(new_exports) + else: + new_exports = self.shares_v2_client.list_share_export_locations( + share['id'], version='2.9') + self.assertNotEmpty(new_exports) + new_exports = [x['path'] for x in new_exports if + x['is_admin_only'] is False] + self.assertNotEmpty(new_exports) - self.assertEqual(dest_pool, share['host']) - self.assertNotEqual(old_export_location, share['export_locations'][0]) - self.assertEqual('migration_success', share['task_state']) + # Share migrated + if notify: + self.assertEqual(dest_pool, share['host']) + for export in old_exports: + self.assertFalse(export in new_exports) + self.assertEqual('migration_success', share['task_state']) + # Share not migrated yet + else: + self.assertNotEqual(dest_pool, share['host']) + for export in old_exports: + self.assertTrue(export in new_exports) + self.assertEqual('data_copying_completed', share['task_state']) diff --git a/manila_tempest_tests/tests/api/admin/test_migration_negative.py b/manila_tempest_tests/tests/api/admin/test_migration_negative.py new file mode 100644 index 0000000000..b7d75c4fed --- /dev/null +++ b/manila_tempest_tests/tests/api/admin/test_migration_negative.py @@ -0,0 +1,97 @@ +# Copyright 2015 Hitachi Data Systems. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest import config # noqa +from tempest.lib import exceptions as lib_exc # noqa +from tempest import test # noqa + +from manila_tempest_tests.tests.api import base + +CONF = config.CONF + + +class MigrationNFSTest(base.BaseSharesAdminTest): + """Tests Share Migration. + + Tests migration in multi-backend environment. + """ + + protocol = "nfs" + + @classmethod + def resource_setup(cls): + super(MigrationNFSTest, cls).resource_setup() + if not CONF.share.run_migration_tests: + raise cls.skipException("Migration tests disabled. Skipping.") + + cls.share = cls.create_share(cls.protocol) + cls.share = cls.shares_client.get_share(cls.share['id']) + pools = cls.shares_client.list_pools()['pools'] + + if len(pools) < 2: + raise cls.skipException("At least two different pool entries " + "are needed to run migration tests. " + "Skipping.") + cls.dest_pool = next((x for x in pools + if x['name'] != cls.share['host']), None) + + @test.attr(type=["gate", "negative", ]) + @base.skip_if_microversion_lt("2.15") + def test_migration_cancel_invalid(self): + self.assertRaises( + lib_exc.BadRequest, self.shares_v2_client.migration_cancel, + self.share['id']) + + @test.attr(type=["gate", "negative", ]) + @base.skip_if_microversion_lt("2.15") + def test_migration_get_progress_invalid(self): + self.assertRaises( + lib_exc.BadRequest, self.shares_v2_client.migration_get_progress, + self.share['id']) + + @test.attr(type=["gate", "negative", ]) + @base.skip_if_microversion_lt("2.15") + def test_migration_complete_invalid(self): + self.assertRaises( + lib_exc.BadRequest, self.shares_v2_client.migration_complete, + self.share['id']) + + @test.attr(type=["gate", "negative", ]) + @base.skip_if_microversion_lt("2.5") + def test_migrate_share_with_snapshot_v2_5(self): + snap = self.create_snapshot_wait_for_active(self.share['id']) + self.assertRaises( + lib_exc.BadRequest, self.shares_v2_client.migrate_share, + self.share['id'], self.dest_pool, True, version='2.5') + self.shares_client.delete_snapshot(snap['id']) + self.shares_client.wait_for_resource_deletion(snapshot_id=snap["id"]) + + @test.attr(type=["gate", "negative", ]) + @base.skip_if_microversion_lt("2.5") + def test_migrate_share_same_host_v2_5(self): + self.assertRaises( + lib_exc.BadRequest, self.shares_v2_client.migrate_share, + self.share['id'], self.share['host'], True, version='2.5') + + @test.attr(type=["gate", "negative", ]) + @base.skip_if_microversion_lt("2.5") + def test_migrate_share_not_available_v2_5(self): + self.shares_client.reset_state(self.share['id'], 'error') + self.shares_client.wait_for_share_status(self.share['id'], 'error') + self.assertRaises( + lib_exc.BadRequest, self.shares_v2_client.migrate_share, + self.share['id'], self.dest_pool, True, version='2.5') + self.shares_client.reset_state(self.share['id'], 'available') + self.shares_client.wait_for_share_status(self.share['id'], 'available') diff --git a/manila_tempest_tests/tests/api/base.py b/manila_tempest_tests/tests/api/base.py index 6236cad674..0eae2add03 100644 --- a/manila_tempest_tests/tests/api/base.py +++ b/manila_tempest_tests/tests/api/base.py @@ -343,11 +343,22 @@ class BaseSharesTest(test.BaseTestCase): return share @classmethod - def migrate_share(cls, share_id, dest_host, client=None, **kwargs): + def migrate_share(cls, share_id, dest_host, client=None, notify=True, + wait_for_status='migration_success', **kwargs): client = client or cls.shares_v2_client - client.migrate_share(share_id, dest_host, **kwargs) - share = client.wait_for_migration_completed( - share_id, dest_host, version=kwargs.get('version')) + client.migrate_share(share_id, dest_host, notify, **kwargs) + share = client.wait_for_migration_status( + share_id, dest_host, wait_for_status, + version=kwargs.get('version')) + return share + + @classmethod + def migration_complete(cls, share_id, dest_host, client=None, **kwargs): + client = client or cls.shares_v2_client + client.migration_complete(share_id, **kwargs) + share = client.wait_for_migration_status( + share_id, dest_host, 'migration_success', + version=kwargs.get('version')) return share @classmethod diff --git a/manila_tempest_tests/tests/scenario/manager_share.py b/manila_tempest_tests/tests/scenario/manager_share.py index 36a10a7f3e..eb28362a5c 100644 --- a/manila_tempest_tests/tests/scenario/manager_share.py +++ b/manila_tempest_tests/tests/scenario/manager_share.py @@ -196,8 +196,9 @@ class ShareScenarioTest(manager.NetworkScenarioTest): def _migrate_share(self, share_id, dest_host, client=None): client = client or self.shares_admin_v2_client - client.migrate_share(share_id, dest_host) - share = client.wait_for_migration_completed(share_id, dest_host) + client.migrate_share(share_id, dest_host, True) + share = client.wait_for_migration_status(share_id, dest_host, + 'migration_success') return share def _create_share_type(self, name, is_public=True, **kwargs):