Huawei: Add manage share with share type in Huawei driver

Manage share that is already on array,
before this time we just changed share's name,
now we import it with share_type and retype it.

Implements: blueprint manage-share-on-array
Change-Id: I906916f6ae7f5625863ca9f8e3324489937263cd
This commit is contained in:
liucheng 2015-11-25 18:14:11 +08:00
parent 8aec6ac7a7
commit e2c94a599e
4 changed files with 730 additions and 11 deletions

View File

@ -32,6 +32,10 @@ ERROR_CONNECT_TO_SERVER = -403
ERROR_UNAUTHORIZED_TO_SERVER = -401
ALLOC_TYPE_THIN_FLAG = "1"
ALLOC_TYPE_THIN = "Thin"
ALLOC_TYPE_THICK = "Thick"
THIN_PROVISIONING = "true"
THICK_PROVISIONING = "false"
OPTS_CAPABILITIES = {
'dedupe': False,

View File

@ -16,6 +16,7 @@
import time
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import units
from manila.common import constants as common_constants
@ -28,6 +29,7 @@ from manila.share.drivers.huawei import constants
from manila.share.drivers.huawei import huawei_utils
from manila.share.drivers.huawei.v3 import helper
from manila.share.drivers.huawei.v3 import smartx
from manila.share import share_types
from manila.share import utils as share_utils
@ -474,19 +476,18 @@ class V3StorageConnection(driver.HuaweiBase):
old_export_location = share['export_locations'][0]['path']
pool_name = share_utils.extract_host(share['host'], level='pool')
share_url_type = self.helper._get_share_url_type(share_proto)
old_share_name = self.helper._get_share_name_by_export_location(
old_export_location, share_proto)
share = self.helper._get_share_by_name(old_share_name,
share_storage = self.helper._get_share_by_name(old_share_name,
share_url_type)
if not share:
if not share_storage:
err_msg = (_("Can not get share ID by share %s.")
% old_export_location)
LOG.error(err_msg)
raise exception.InvalidShare(reason=err_msg)
fs_id = share['FSID']
fs_id = share_storage['FSID']
fs = self.helper._get_fs_info_by_id(fs_id)
if not self.check_fs_status(fs['HEALTHSTATUS'],
fs['RUNNINGSTATUS']):
@ -503,12 +504,212 @@ class V3StorageConnection(driver.HuaweiBase):
% {'fs_pool': fs['POOLNAME'],
'host_pool': pool_name}))
self.helper._change_fs_name(fs_id, share_name)
share_size = int(fs['CAPACITY']) / units.Mi / 2
result = self.helper._find_all_pool_info()
poolinfo = self.helper._find_pool_info(pool_name, result)
opts = huawei_utils.get_share_extra_specs_params(
share['share_type_id'])
specs = share_types.get_share_type_extra_specs(share['share_type_id'])
if ('capabilities:thin_provisioning' not in specs.keys()
and 'thin_provisioning' not in specs.keys()):
if fs['ALLOCTYPE'] == constants.ALLOC_TYPE_THIN_FLAG:
opts['thin_provisioning'] = constants.THIN_PROVISIONING
else:
opts['thin_provisioning'] = constants.THICK_PROVISIONING
change_opts = self.check_retype_change_opts(opts, poolinfo, fs)
LOG.info(_LI('Retyping share (%(share)s), changed options are : '
'(%(change_opts)s).'),
{'share': old_share_name, 'change_opts': change_opts})
try:
self.retype_share(change_opts, fs_id)
except Exception as err:
message = (_("Retype share error. Share: %(share)s. "
"Reason: %(reason)s.")
% {'share': old_share_name,
'reason': err})
raise exception.InvalidShare(reason=message)
share_size = int(fs['CAPACITY']) / units.Mi / 2
self.helper._change_fs_name(fs_id, share_name)
location = self._get_location_path(share_name, share_proto)
return (share_size, [location])
def check_retype_change_opts(self, opts, poolinfo, fs):
change_opts = {
"partitionid": None,
"cacheid": None,
"dedupe&compression": None,
}
# SmartPartition
old_partition_id = fs['SMARTPARTITIONID']
old_partition_name = None
new_partition_id = None
new_partition_name = None
if strutils.bool_from_string(opts['huawei_smartpartition']):
if not opts['partitionname']:
raise exception.InvalidInput(
reason=_('Partition name is None, please set '
'huawei_smartpartition:partitionname in key.'))
new_partition_name = opts['partitionname']
new_partition_id = self.helper._get_partition_id_by_name(
new_partition_name)
if new_partition_id is None:
raise exception.InvalidInput(
reason=(_("Can't find partition name on the array, "
"partition name is: %(name)s.")
% {"name": new_partition_name}))
if old_partition_id != new_partition_id:
if old_partition_id:
partition_info = self.helper.get_partition_info_by_id(
old_partition_id)
old_partition_name = partition_info['NAME']
change_opts["partitionid"] = ([old_partition_id,
old_partition_name],
[new_partition_id,
new_partition_name])
# SmartCache
old_cache_id = fs['SMARTCACHEID']
old_cache_name = None
new_cache_id = None
new_cache_name = None
if strutils.bool_from_string(opts['huawei_smartcache']):
if not opts['cachename']:
raise exception.InvalidInput(
reason=_('Cache name is None, please set '
'huawei_smartcache:cachename in key.'))
new_cache_name = opts['cachename']
new_cache_id = self.helper._get_cache_id_by_name(
new_cache_name)
if new_cache_id is None:
raise exception.InvalidInput(
reason=(_("Can't find cache name on the array, "
"cache name is: %(name)s.")
% {"name": new_cache_name}))
if old_cache_id != new_cache_id:
if old_cache_id:
cache_info = self.helper.get_cache_info_by_id(
old_cache_id)
old_cache_name = cache_info['NAME']
change_opts["cacheid"] = ([old_cache_id, old_cache_name],
[new_cache_id, new_cache_name])
# SmartDedupe&SmartCompression
smartx_opts = constants.OPTS_CAPABILITIES
if opts is not None:
smart = smartx.SmartX()
smartx_opts = smart.get_smartx_extra_specs_opts(opts)
old_compression = fs['COMPRESSION']
new_compression = smartx_opts['compression']
old_dedupe = fs['DEDUP']
new_dedupe = smartx_opts['dedupe']
if fs['ALLOCTYPE'] == constants.ALLOC_TYPE_THIN_FLAG:
fs['ALLOCTYPE'] = constants.ALLOC_TYPE_THIN
else:
fs['ALLOCTYPE'] = constants.ALLOC_TYPE_THICK
if strutils.bool_from_string(opts['thin_provisioning']):
opts['thin_provisioning'] = constants.ALLOC_TYPE_THIN
else:
opts['thin_provisioning'] = constants.ALLOC_TYPE_THICK
if (fs['ALLOCTYPE'] != poolinfo['type']
or fs['ALLOCTYPE'] != opts['thin_provisioning']):
msg = (_("Manage existing share fs type and pool type "
"or fs type and new_share_type mismatch. "
"fs type is: %(fs_type)s, pool type is: "
"%(pool_type)s, new_share_type is: "
"%(new_share_type)s")
% {"fs_type": fs['ALLOCTYPE'],
"pool_type": poolinfo['type'],
"new_share_type": opts['thin_provisioning']})
raise exception.InvalidHost(reason=msg)
else:
if fs['ALLOCTYPE'] == constants.ALLOC_TYPE_THICK:
if new_compression or new_dedupe:
raise exception.InvalidInput(
reason=_("Dedupe or compression cannot be set for "
"thick filesystem."))
else:
if (old_dedupe != new_dedupe
or old_compression != new_compression):
change_opts["dedupe&compression"] = ([old_dedupe,
old_compression],
[new_dedupe,
new_compression])
return change_opts
def retype_share(self, change_opts, fs_id):
if change_opts.get('partitionid'):
old, new = change_opts['partitionid']
old_id = old[0]
old_name = old[1]
new_id = new[0]
new_name = new[1]
if old_id:
self.helper._remove_fs_from_partition(fs_id, old_id)
if new_id:
self.helper._add_fs_to_partition(fs_id, new_id)
msg = (_("Retype FS(id: %(fs_id)s) smartpartition from "
"(name: %(old_name)s, id: %(old_id)s) to "
"(name: %(new_name)s, id: %(new_id)s) "
"performed successfully.")
% {"fs_id": fs_id,
"old_id": old_id, "old_name": old_name,
"new_id": new_id, "new_name": new_name})
LOG.info(msg)
if change_opts.get('cacheid'):
old, new = change_opts['cacheid']
old_id = old[0]
old_name = old[1]
new_id = new[0]
new_name = new[1]
if old_id:
self.helper._remove_fs_from_cache(fs_id, old_id)
if new_id:
self.helper._add_fs_to_cache(fs_id, new_id)
msg = (_("Retype FS(id: %(fs_id)s) smartcache from "
"(name: %(old_name)s, id: %(old_id)s) to "
"(name: %(new_name)s, id: %(new_id)s) "
"performed successfully.")
% {"fs_id": fs_id,
"old_id": old_id, "old_name": old_name,
"new_id": new_id, "new_name": new_name})
LOG.info(msg)
if change_opts.get('dedupe&compression'):
old, new = change_opts['dedupe&compression']
old_dedupe = old[0]
old_compression = old[1]
new_dedupe = new[0]
new_compression = new[1]
if ((old_dedupe != new_dedupe)
or (old_compression != new_compression)):
new_smartx_opts = {"dedupe": new_dedupe,
"compression": new_compression}
self.helper._change_extra_specs(fs_id, new_smartx_opts)
msg = (_("Retype FS(id: %(fs_id)s) dedupe from %(old_dedupe)s "
"to %(new_dedupe)s performed successfully, "
"compression from "
"%(old_compression)s to %(new_compression)s "
"performed successfully.")
% {"fs_id": fs_id,
"old_dedupe": old_dedupe,
"new_dedupe": new_dedupe,
"old_compression": old_compression,
"new_compression": new_compression})
LOG.info(msg)
def _get_location_path(self, share_name, share_proto):
root = self.helper._read_xml()
target_ip = root.findtext('Storage/LogicalPortIP').strip()

View File

@ -606,6 +606,10 @@ class RestHelper(object):
fs['CAPACITY'] = result['data']['CAPACITY']
fs['ALLOCTYPE'] = result['data']['ALLOCTYPE']
fs['POOLNAME'] = result['data']['PARENTNAME']
fs['COMPRESSION'] = result['data']['ENABLECOMPRESSION']
fs['DEDUP'] = result['data']['ENABLEDEDUP']
fs['SMARTPARTITIONID'] = result['data']['CACHEPARTITIONID']
fs['SMARTCACHEID'] = result['data']['SMARTCACHEPARTITIONID']
return fs
def _get_share_path(self, share_name):
@ -679,6 +683,18 @@ class RestHelper(object):
msg = _("Change filesystem name error.")
self._assert_rest_result(result, msg)
def _change_extra_specs(self, fsid, extra_specs):
url = "/filesystem/%s" % fsid
fs_param = {
"ENABLEDEDUP": extra_specs['dedupe'],
"ENABLECOMPRESSION": extra_specs['compression']
}
data = jsonutils.dumps(fs_param)
result = self.call(url, data, "PUT")
msg = _("Change extra_specs error.")
self._assert_rest_result(result, msg)
def _get_partition_id_by_name(self, name):
url = "/cachepartition"
result = self.call(url, None, "GET")
@ -690,6 +706,14 @@ class RestHelper(object):
return item['ID']
return None
def get_partition_info_by_id(self, partitionid):
url = '/cachepartition/' + partitionid
result = self.call(url, None, "GET")
self._assert_rest_result(result,
_('Get partition by partition id error.'))
return result['data']
def _add_fs_to_partition(self, fs_id, partition_id):
url = "/filesystem/associate/cachepartition"
data = jsonutils.dumps({"ID": partition_id,
@ -701,6 +725,17 @@ class RestHelper(object):
self._assert_rest_result(result,
_('Add filesystem to partition error.'))
def _remove_fs_from_partition(self, fs_id, partition_id):
url = "/smartPartition/removeFs"
data = jsonutils.dumps({"ID": partition_id,
"ASSOCIATEOBJTYPE": 40,
"ASSOCIATEOBJID": fs_id,
"TYPE": 268})
result = self.call(url, data, "PUT")
self._assert_rest_result(result,
_('Remove filesystem from partition error.'))
def _get_cache_id_by_name(self, name):
url = "/SMARTCACHEPARTITION"
result = self.call(url, None, "GET")
@ -712,6 +747,17 @@ class RestHelper(object):
return item['ID']
return None
def get_cache_info_by_id(self, cacheid):
url = "/SMARTCACHEPARTITION/" + cacheid
data = jsonutils.dumps({"TYPE": "273",
"ID": cacheid})
result = self.call(url, data, "GET")
self._assert_rest_result(
result, _('Get smartcache by cache id error.'))
return result['data']
def _add_fs_to_cache(self, fs_id, cache_id):
url = "/SMARTCACHEPARTITION/CREATE_ASSOCIATE"
data = jsonutils.dumps({"ID": cache_id,
@ -721,3 +767,14 @@ class RestHelper(object):
result = self.call(url, data, "PUT")
self._assert_rest_result(result, _('Add filesystem to cache error.'))
def _remove_fs_from_cache(self, fs_id, cache_id):
url = "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE"
data = jsonutils.dumps({"ID": cache_id,
"ASSOCIATEOBJTYPE": 40,
"ASSOCIATEOBJID": fs_id,
"TYPE": 273})
result = self.call(url, data, "PUT")
self._assert_rest_result(result,
_('Remove filesystem from cache error.'))

View File

@ -71,6 +71,16 @@ def filesystem(method, data, fs_status_flag):
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"8388608"}}"""
elif data == jsonutils.dumps({"ENABLEDEDUP": True,
"ENABLECOMPRESSION": True}):
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"8388608"}}"""
elif data == jsonutils.dumps({"ENABLEDEDUP": False,
"ENABLECOMPRESSION": False}):
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"8388608"}}"""
elif method == "DELETE":
data = """{"error":{"code":0}}"""
elif method == "GET":
@ -80,14 +90,139 @@ def filesystem(method, data, fs_status_flag):
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"1",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool"}}"""
"PARENTNAME":"OpenStack_Pool",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"",
"SMARTCACHEPARTITIONID":""}}"""
else:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"0",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"0",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool"}}"""
"PARENTNAME":"OpenStack_Pool",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"",
"SMARTCACHEPARTITIONID":""}}"""
else:
data = '{"error":{"code":31755596}}'
return (data, extend_share_flag, shrink_share_flag)
def filesystem_thick(method, data, fs_status_flag):
extend_share_flag = False
shrink_share_flag = False
if method == "PUT":
if data == """{"CAPACITY": 10485760}""":
data = """{"error":{"code":0},
"data":{"ID":"5",
"CAPACITY":"8388608"}}"""
extend_share_flag = True
elif data == """{"CAPACITY": 2097152}""":
data = """{"error":{"code":0},
"data":{"ID":"5",
"CAPACITY":"2097152"}}"""
shrink_share_flag = True
elif data == """{"NAME": "share_fake_uuid_thickfs"}""":
data = """{"error":{"code":0},
"data":{"ID":"5",
"CAPACITY":"8388608"}}"""
elif data == jsonutils.dumps({"ENABLEDEDUP": False,
"ENABLECOMPRESSION": False}):
data = """{"error":{"code":0},
"data":{"ID":"5",
"CAPACITY":"8388608"}}"""
elif method == "DELETE":
data = """{"error":{"code":0}}"""
elif method == "GET":
if fs_status_flag:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"1",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"0",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool_Thick",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"",
"SMARTCACHEPARTITIONID":""}}"""
else:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"0",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"0",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool_Thick",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"",
"SMARTCACHEPARTITIONID":""}}"""
else:
data = '{"error":{"code":31755596}}'
return (data, extend_share_flag, shrink_share_flag)
def filesystem_inpartition(method, data, fs_status_flag):
extend_share_flag = False
shrink_share_flag = False
if method == "PUT":
if data == """{"CAPACITY": 10485760}""":
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"8388608"}}"""
extend_share_flag = True
elif data == """{"CAPACITY": 2097152}""":
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"2097152"}}"""
shrink_share_flag = True
elif data == """{"NAME": "share_fake_manage_uuid"}""":
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"8388608"}}"""
elif data == """{"NAME": "share_fake_uuid_inpartition"}""":
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"8388608"}}"""
elif data == jsonutils.dumps({"ENABLEDEDUP": True,
"ENABLECOMPRESSION": True}):
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"8388608"}}"""
elif data == jsonutils.dumps({"ENABLEDEDUP": False,
"ENABLECOMPRESSION": False}):
data = """{"error":{"code":0},
"data":{"ID":"6",
"CAPACITY":"8388608"}}"""
elif method == "DELETE":
data = """{"error":{"code":0}}"""
elif method == "GET":
if fs_status_flag:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"1",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"1",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"1",
"SMARTCACHEPARTITIONID":"1"}}"""
else:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"0",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"0",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool",
"ENABLECOMPRESSION":"false",
"ENABLEDEDUP":"false",
"CACHEPARTITIONID":"1",
"SMARTCACHEPARTITIONID":"1"}}"""
else:
data = '{"error":{"code":31755596}}'
return (data, extend_share_flag, shrink_share_flag)
@ -229,7 +364,15 @@ class FakeHuaweiNasHelper(helper.RestHelper):
"data":[{"ID":"1",
"FSID":"4",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid/"}]}"""
"SHAREPATH":"/share_fake_uuid/"},
{"ID":"2",
"FSID":"5",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid_thickfs/"},
{"ID":"3",
"FSID":"6",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid_inpartition/"}]}"""
else:
data = """{"error":{"code":0},
"data":[{"ID":"1",
@ -365,6 +508,16 @@ class FakeHuaweiNasHelper(helper.RestHelper):
filesystem(method, data, self.fs_status_flag))
self.delete_flag = True
if url == "/filesystem/5":
data, self.extend_share_flag, self.shrink_share_flag = (
filesystem_thick(method, data, self.fs_status_flag))
self.delete_flag = True
if url == "/filesystem/6":
data, self.extend_share_flag, self.shrink_share_flag = (
filesystem_inpartition(method, data, self.fs_status_flag))
self.delete_flag = True
if url == "/cachepartition":
if self.partition_exist:
data = """{"error":{"code":0},
@ -375,6 +528,16 @@ class FakeHuaweiNasHelper(helper.RestHelper):
"data":[{"ID":"7",
"NAME":"test_partition_name_fail"}]}"""
if url == "/cachepartition/1":
if self.partition_exist:
data = """{"error":{"code":0},
"data":{"ID":"7",
"NAME":"test_partition_name"}}"""
else:
data = """{"error":{"code":0},
"data":{"ID":"7",
"NAME":"test_partition_name_fail"}}"""
if url == "/SMARTCACHEPARTITION":
if self.cache_exist:
data = """{"error":{"code":0},
@ -385,6 +548,16 @@ class FakeHuaweiNasHelper(helper.RestHelper):
"data":[{"ID":"8",
"NAME":"test_cache_name_fail"}]}"""
if url == "/SMARTCACHEPARTITION/1":
if self.cache_exist:
data = """{"error":{"code":0},
"data":{"ID":"8",
"NAME":"test_cache_name"}}"""
else:
data = """{"error":{"code":0},
"data":{"ID":"8",
"NAME":"test_cache_name_fail"}}"""
if url == "/filesystem/associate/cachepartition":
data = """{"error":{"code":0}}"""
self.add_fs_to_partition_flag = True
@ -392,6 +565,12 @@ class FakeHuaweiNasHelper(helper.RestHelper):
if url == "/SMARTCACHEPARTITION/CREATE_ASSOCIATE":
data = """{"error":{"code":0}}"""
self.add_fs_to_cache_flag = True
if url == "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE":
data = """{"error":{"code":0}}"""
if url == "/smartPartition/removeFs":
data = """{"error":{"code":0}}"""
else:
data = '{"error":{"code":31755596}}'
@ -481,6 +660,54 @@ class HuaweiShareDriverTestCase(test.TestCase):
'share_type_id': 'fake_id',
}
self.share_nfs_thickfs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid-thickfs',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid_thickfs'},
],
'share_type_id': 'fake_id',
}
self.share_nfs_thick_thickfs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid-thickfs',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool_Thick',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid_thickfs'},
],
'share_type_id': 'fake_id',
}
self.share_nfs_inpartition = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid-inpartition',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid_inpartition'},
],
'share_type_id': 'fake_id',
}
self.share_manage_nfs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
@ -661,6 +888,56 @@ class HuaweiShareDriverTestCase(test.TestCase):
}
}
fake_extra_specs = {
'capabilities:dedupe': '<is> True',
'capabilities:compression': '<is> True',
'capabilities:huawei_smartcache': '<is> False',
'huawei_smartcache:cachename': None,
'capabilities:huawei_smartpartition': '<is> False',
'huawei_smartpartition:partitionname': None,
'capabilities:thin_provisioning': '<is> True',
'test:test:test': 'test',
}
fake_share_type_id = 'fooid-3'
self.fake_type_fake_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': 'test_with_extra',
'updated_at': None
}
}
fake_extra_specs = {
'capabilities:dedupe': '<is> True',
'capabilities:compression': '<is> True',
'capabilities:huawei_smartcache': '<is> False',
'huawei_smartcache:cachename': None,
'capabilities:huawei_smartpartition': '<is> False',
'huawei_smartpartition:partitionname': None,
'capabilities:thin_provisioning': '<is> False',
'test:test:test': 'test',
}
fake_share_type_id = 'fooid-4'
self.fake_type_thin_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': 'test_with_extra',
'updated_at': None
}
}
self.share_nfs_host_not_exist = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
@ -1405,7 +1682,41 @@ class HuaweiShareDriverTestCase(test.TestCase):
elif share_proto == "CIFS":
share = self.share_manage_cifs
share_type = self.fake_type_extra['test_with_extra']
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db, 'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
share_info = self.driver.manage_existing(share,
self.driver_options)
self.assertEqual(4, share_info["size"])
self.assertEqual(path, share_info["export_locations"])
@ddt.data({"fs_alloctype": "THIN",
"path": ["100.115.10.68:/share_fake_manage_uuid"]},
{"fs_alloctype": "THICK",
"path": ["100.115.10.68:/share_fake_uuid_thickfs"]})
@ddt.unpack
def test_manage_share_with_default_type(self, fs_alloctype, path):
if fs_alloctype == "THIN":
share = self.share_manage_nfs
elif fs_alloctype == "THICK":
share = self.share_nfs_thick_thickfs
share_type = self.fake_type_not_extra['test_with_extra']
self.mock_object(db, 'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
share_info = self.driver.manage_existing(share,
self.driver_options)
self.assertEqual(4, share_info["size"])
self.assertEqual(path, share_info["export_locations"])
@ddt.data({"path": ["100.115.10.68:/share_fake_uuid_inpartition"]})
@ddt.unpack
def test_manage_share_remove_from_partition(self, path):
share = self.share_nfs_inpartition
share_type = self.fake_type_fake_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
@ -1441,6 +1752,152 @@ class HuaweiShareDriverTestCase(test.TestCase):
share,
self.driver_options)
def test_manage_share_thickfs_set_dedupe_fail(self):
share = self.share_nfs_thick_thickfs
self.driver.plugin.helper.login()
share_type = self.fake_type_thin_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing,
share,
self.driver_options)
def test_manage_share_thickfs_not_match_thinpool_fail(self):
share = self.share_nfs_thickfs
self.driver.plugin.helper.login()
share_type = self.fake_type_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidHost,
self.driver.manage_existing,
share,
self.driver_options)
@ddt.data({"flag": "old_cache_id", "exc": exception.InvalidInput},
{"flag": "not_old_cache_id", "exc": exception.InvalidInput})
@ddt.unpack
def test_manage_share_cache_not_exist(self, flag, exc):
share = None
if flag == "old_cache_id":
share = self.share_nfs_inpartition
elif flag == "not_old_cache_id":
share = self.share_nfs
self.driver.plugin.helper.cache_exist = False
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
self.assertRaises(exc,
self.driver.manage_existing,
share,
self.share_server)
def test_manage_add_share_to_cache_fail(self):
opts = dict(
huawei_smartcache='true',
huawei_smartpartition='true',
cachename='test_cache_name_fake',
partitionname='test_partition_name_fake',
)
fs = dict(
SMARTCACHEID='6',
SMARTPARTITIONID=None,
)
poolinfo = dict(
type='Thin',
)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_retype_change_opts,
opts, poolinfo, fs)
def test_manage_notsetcache_fail(self):
opts = dict(
huawei_smartcache='true',
huawei_smartpartition='true',
cachename=None,
partitionname='test_partition_name_fake',
)
fs = dict(
SMARTCACHEID='6',
SMARTPARTITIONID='6',
)
poolinfo = dict(
type='Thin',
)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_retype_change_opts,
opts, poolinfo, fs)
@ddt.data({"flag": "old_partition_id", "exc": exception.InvalidInput},
{"flag": "not_old_partition_id", "exc": exception.InvalidInput})
@ddt.unpack
def test_manage_share_partition_not_exist(self, flag, exc):
share = None
if flag == "old_partition_id":
share = self.share_nfs_inpartition
elif flag == "not_old_partition_id":
share = self.share_nfs
self.driver.plugin.helper.partition_exist = False
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
self.assertRaises(exc,
self.driver.manage_existing,
share,
self.share_server)
def test_manage_add_share_to_partition_fail(self):
opts = dict(
huawei_smartcache='true',
huawei_smartpartition='true',
cachename='test_cache_name_fake',
partitionname='test_partition_name_fake',
)
fs = dict(
SMARTCACHEID=None,
SMARTPARTITIONID='6',
)
poolinfo = dict(
type='Thin',
)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_retype_change_opts,
opts, poolinfo, fs)
def test_manage_notset_partition_fail(self):
opts = dict(
huawei_smartcache='true',
huawei_smartpartition='true',
cachename='test_cache_name_fake',
partitionname=None,
)
fs = dict(
SMARTCACHEID=None,
SMARTPARTITIONID='6',
)
poolinfo = dict(
type='Thin',
)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_retype_change_opts,
opts, poolinfo, fs)
@ddt.data({"share_proto": "NFS",
"export_path": "fake_ip:/share_fake_uuid"},
{"share_proto": "NFS", "export_path": "fake_ip:/"},