New Manila HDS HNAS Driver
This patch adds new Manila HDS HNAS Driver, according to blueprint. Change-Id: I0f9ae2a940df5415b93f6a6c5c2b0fac1cb062fd Implements: blueprint hds-hnas
This commit is contained in:
parent
2a4bd59d49
commit
5fb872fed1
|
@ -632,3 +632,11 @@ class QBRpcException(ManilaException):
|
|||
message = _("Quobyte JsonRpc call to backend raised "
|
||||
"an exception: %(result)s, Quobyte error"
|
||||
" code %(qbcode)s")
|
||||
|
||||
|
||||
class SSHInjectionThreat(ManilaException):
|
||||
message = _("SSH command injection detected: %(command)s")
|
||||
|
||||
|
||||
class HNASBackendException(ManilaException):
|
||||
message = _("HNAS Backend Exception: %(msg)s")
|
||||
|
|
|
@ -57,6 +57,7 @@ import manila.share.drivers.glusterfs
|
|||
import manila.share.drivers.glusterfs_native
|
||||
import manila.share.drivers.hdfs.hdfs_native
|
||||
import manila.share.drivers.hds.sop
|
||||
import manila.share.drivers.hitachi.hds_hnas
|
||||
import manila.share.drivers.hp.hp_3par_driver
|
||||
import manila.share.drivers.huawei.huawei_nas
|
||||
import manila.share.drivers.ibm.gpfs
|
||||
|
@ -113,6 +114,7 @@ _global_opt_lists = [
|
|||
manila.share.drivers.glusterfs_native.glusterfs_native_manila_share_opts,
|
||||
manila.share.drivers.hdfs.hdfs_native.hdfs_native_share_opts,
|
||||
manila.share.drivers.hds.sop.hdssop_share_opts,
|
||||
manila.share.drivers.hitachi.hds_hnas.hds_hnas_opts,
|
||||
manila.share.drivers.hp.hp_3par_driver.HP3PAR_OPTS,
|
||||
manila.share.drivers.huawei.huawei_nas.huawei_opts,
|
||||
manila.share.drivers.ibm.gpfs.gpfs_share_opts,
|
||||
|
|
|
@ -0,0 +1,443 @@
|
|||
# Copyright (c) 2015 Hitachi Data Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import strutils
|
||||
import six
|
||||
|
||||
from manila.common import constants as const
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LI
|
||||
from manila.share import driver
|
||||
from manila.share.drivers.hitachi import ssh
|
||||
from manila.share import share_types
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
hds_hnas_opts = [
|
||||
cfg.StrOpt('hds_hnas_ip',
|
||||
default=None,
|
||||
help="HNAS management interface IP for communication "
|
||||
"between Manila controller and HNAS."),
|
||||
cfg.StrOpt('hds_hnas_user',
|
||||
default=None,
|
||||
help="HNAS username Base64 String in order to perform tasks "
|
||||
"such as create file-systems and network interfaces."),
|
||||
cfg.StrOpt('hds_hnas_password',
|
||||
default=None,
|
||||
secret=True,
|
||||
help="HNAS user password. Required only if private key is not "
|
||||
"provided."),
|
||||
cfg.StrOpt('hds_hnas_evs_id',
|
||||
default=None,
|
||||
help="Specify which EVS this backend is assigned to."),
|
||||
cfg.StrOpt('hds_hnas_evs_ip',
|
||||
default=None,
|
||||
help="Specify IP for mounting shares."),
|
||||
cfg.StrOpt('hds_hnas_file_system_name',
|
||||
default=None,
|
||||
help="Specify file-system name for creating shares."),
|
||||
cfg.StrOpt('hds_hnas_ssh_private_key',
|
||||
default=None,
|
||||
secret=True,
|
||||
help="RSA/DSA private key value used to connect into HNAS. "
|
||||
"Required only if password is not provided."),
|
||||
cfg.StrOpt('hds_hnas_cluster_admin_ip0',
|
||||
default=None,
|
||||
help="The IP of the clusters admin node. Only set in HNAS "
|
||||
"multinode clusters."),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(hds_hnas_opts)
|
||||
|
||||
|
||||
class HDSHNASDriver(driver.ShareDriver):
|
||||
"""Manila HNAS Driver implementation.
|
||||
|
||||
1.0 - Initial Version
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Do initialization."""
|
||||
|
||||
LOG.debug("Invoking base constructor for Manila HDS HNAS Driver.")
|
||||
super(HDSHNASDriver, self).__init__(False, *args, **kwargs)
|
||||
|
||||
LOG.debug("Setting up attributes for Manila HDS HNAS Driver.")
|
||||
self.configuration.append_config_values(hds_hnas_opts)
|
||||
|
||||
LOG.debug("Reading config parameters for Manila HDS HNAS Driver.")
|
||||
self.backend_name = self.configuration.safe_get('share_backend_name')
|
||||
hnas_ip = self.configuration.safe_get('hds_hnas_ip')
|
||||
hnas_username = self.configuration.safe_get('hds_hnas_user')
|
||||
hnas_password = self.configuration.safe_get('hds_hnas_password')
|
||||
hnas_evs_id = self.configuration.safe_get('hds_hnas_evs_id')
|
||||
self.hnas_evs_ip = self.configuration.safe_get('hds_hnas_evs_ip')
|
||||
fs_name = self.configuration.safe_get('hds_hnas_file_system_name')
|
||||
ssh_private_key = self.configuration.safe_get(
|
||||
'hds_hnas_ssh_private_key')
|
||||
cluster_admin_ip0 = self.configuration.safe_get(
|
||||
'hds_hnas_cluster_admin_ip0')
|
||||
self.private_storage = kwargs.get('private_storage')
|
||||
|
||||
if hnas_evs_id is None:
|
||||
msg = _("The config parameter hds_hnas_evs_id is not set.")
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
if self.hnas_evs_ip is None:
|
||||
msg = _("The config parameter hds_hnas_evs_ip is not set.")
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
if hnas_ip is None:
|
||||
msg = _("The config parameter hds_hnas_ip is not set.")
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
if hnas_username is None:
|
||||
msg = _("The config parameter hds_hnas_user is not set.")
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
if hnas_password is None and ssh_private_key is None:
|
||||
msg = _("Credentials configuration parameters missing: "
|
||||
"you need to set hds_hnas_password or "
|
||||
"hds_hnas_ssh_private_key.")
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
LOG.debug("Initializing HNAS Layer.")
|
||||
|
||||
self.hnas = ssh.HNASSSHBackend(hnas_ip, hnas_username, hnas_password,
|
||||
ssh_private_key, cluster_admin_ip0,
|
||||
hnas_evs_id, self.hnas_evs_ip, fs_name)
|
||||
|
||||
def allow_access(self, context, share, access, share_server=None):
|
||||
"""Allow access to a share.
|
||||
|
||||
:param context: The `context.RequestContext` object for the request
|
||||
:param share: Share to which access will be allowed.
|
||||
:param access: Information about the access that will be allowed, e.g.
|
||||
host allowed, type of access granted.
|
||||
:param share_server: Data structure with share server information.
|
||||
Not used by this driver.
|
||||
"""
|
||||
if ('nfs', 'ip') != (share['share_proto'].lower(),
|
||||
access['access_type'].lower()):
|
||||
msg = _("Only NFS protocol and IP access type currently "
|
||||
"supported.")
|
||||
raise exception.InvalidShareAccess(reason=msg)
|
||||
|
||||
LOG.debug("Sending HNAS Request to allow access to share: "
|
||||
"%(shr)s.", {'shr': (share['id'])})
|
||||
|
||||
share_id = self._get_hnas_share_id(share['id'])
|
||||
|
||||
self.hnas.allow_access(share_id, access['access_to'],
|
||||
share['share_proto'],
|
||||
access['access_level'])
|
||||
|
||||
LOG.info(_LI("Access allowed successfully to share: %(shr)s."),
|
||||
{'shr': six.text_type(share['id'])})
|
||||
|
||||
def deny_access(self, context, share, access, share_server=None):
|
||||
"""Deny access to a share.
|
||||
|
||||
:param context: The `context.RequestContext` object for the request
|
||||
:param share: Share to which access will be denied.
|
||||
:param access: Information about the access that will be denied, e.g.
|
||||
host and type of access denied.
|
||||
:param share_server: Data structure with share server information.
|
||||
Not used by this driver.
|
||||
"""
|
||||
if ('nfs', 'ip') != (share['share_proto'].lower(),
|
||||
access['access_type'].lower()):
|
||||
msg = _("Only NFS protocol and IP access type currently "
|
||||
"supported.")
|
||||
raise exception.InvalidShareAccess(reason=msg)
|
||||
|
||||
LOG.debug("Sending HNAS request to deny access to share:"
|
||||
" %(shr_id)s.",
|
||||
{'shr_id': six.text_type(share['id'])})
|
||||
|
||||
share_id = self._get_hnas_share_id(share['id'])
|
||||
|
||||
self.hnas.deny_access(share_id, access['access_to'],
|
||||
share['share_proto'], access['access_level'])
|
||||
|
||||
LOG.info(_LI("Access denied successfully to share: %(shr)s."),
|
||||
{'shr': six.text_type(share['id'])})
|
||||
|
||||
def create_share(self, context, share, share_server=None):
|
||||
"""Creates share.
|
||||
|
||||
:param context: The `context.RequestContext` object for the request
|
||||
:param share: Share that will be created.
|
||||
:param share_server: Data structure with share server information.
|
||||
Not used by this driver.
|
||||
:returns: Returns a path of EVS IP concatenate with the path
|
||||
of share in the filesystem (e.g. ['172.24.44.10:/shares/id']).
|
||||
"""
|
||||
LOG.debug("Creating share in HNAS: %(shr)s.",
|
||||
{'shr': six.text_type(share['id'])})
|
||||
|
||||
if share['share_proto'].lower() != 'nfs':
|
||||
msg = _("Only NFS protocol is currently supported.")
|
||||
raise exception.ShareBackendException(msg=msg)
|
||||
|
||||
ip = self.hnas_evs_ip
|
||||
|
||||
path = self.hnas.create_share(share['id'], share['size'],
|
||||
share['share_proto'])
|
||||
|
||||
LOG.debug("Share created successfully on path: %(ip)s:%(path)s.",
|
||||
{'ip': ip, 'path': path})
|
||||
return ip + ":" + path
|
||||
|
||||
def delete_share(self, context, share, share_server=None):
|
||||
"""Deletes share.
|
||||
|
||||
:param context: The `context.RequestContext` object for the request
|
||||
:param share: Share that will be deleted.
|
||||
:param share_server: Data structure with share server information.
|
||||
Not used by this driver.
|
||||
"""
|
||||
share_id = self._get_hnas_share_id(share['id'])
|
||||
|
||||
LOG.debug("Deleting share in HNAS: %(shr)s.",
|
||||
{'shr': six.text_type(share['id'])})
|
||||
|
||||
self.hnas.delete_share(share_id, share['share_proto'])
|
||||
|
||||
def create_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Creates snapshot.
|
||||
|
||||
:param context: The `context.RequestContext` object for the request
|
||||
:param snapshot: Snapshot that will be created.
|
||||
:param share_server: Data structure with share server information.
|
||||
Not used by this driver.
|
||||
"""
|
||||
share_id = self._get_hnas_share_id(snapshot['share_id'])
|
||||
|
||||
LOG.debug("The snapshot of share %(ss_sid)s will be created with "
|
||||
"id %(ss_id)s.", {'ss_sid': snapshot['share_id'],
|
||||
'ss_id': snapshot['id']})
|
||||
|
||||
self.hnas.create_snapshot(share_id, snapshot['id'])
|
||||
LOG.info(_LI("Snapshot %(id)s successfully created."),
|
||||
{'id': snapshot['id']})
|
||||
|
||||
def delete_snapshot(self, context, snapshot, share_server=None):
|
||||
"""Deletes snapshot.
|
||||
|
||||
:param context: The `context.RequestContext` object for the request
|
||||
:param snapshot: Snapshot that will be deleted.
|
||||
:param share_server:Data structure with share server information.
|
||||
Not used by this driver.
|
||||
"""
|
||||
share_id = self._get_hnas_share_id(snapshot['share_id'])
|
||||
|
||||
LOG.debug("The snapshot %(ss_sid)s will be deleted. The related "
|
||||
"share ID is %(ss_id)s.",
|
||||
{'ss_sid': snapshot['share_id'], 'ss_id': snapshot['id']})
|
||||
|
||||
self.hnas.delete_snapshot(share_id, snapshot['id'])
|
||||
LOG.info(_LI("Snapshot %(id)s successfully deleted."),
|
||||
{'id': snapshot['id']})
|
||||
|
||||
def create_share_from_snapshot(self, context, share, snapshot,
|
||||
share_server=None):
|
||||
"""Creates a new share from snapshot.
|
||||
|
||||
:param context: The `context.RequestContext` object for the request
|
||||
:param share: Information about the new share.
|
||||
:param snapshot: Information about the snapshot that will be copied
|
||||
to new share.
|
||||
:param share_server: Data structure with share server information.
|
||||
Not used by this driver.
|
||||
:returns: Returns a path of EVS IP concatenate with the path
|
||||
of new share in the filesystem (e.g. ['172.24.44.10:/shares/id']).
|
||||
"""
|
||||
LOG.debug("Creating a new share from snapshot: %(ss_id)s.",
|
||||
{'ss_id': six.text_type(snapshot['id'])})
|
||||
|
||||
ip = self.hnas_evs_ip
|
||||
path = self.hnas.create_share_from_snapshot(share, snapshot)
|
||||
|
||||
LOG.debug("Share created successfully on path: %(ip)s:%(path)s.",
|
||||
{'ip': ip, 'path': path})
|
||||
return ip + ":" + path
|
||||
|
||||
def ensure_share(self, context, share, share_server=None):
|
||||
"""Ensure that share is exported.
|
||||
|
||||
:param context: The `context.RequestContext` object for the request
|
||||
:param share: Share that will be checked.
|
||||
:param share_server: Data structure with share server information.
|
||||
Not used by this driver.
|
||||
:returns: Returns a list of EVS IP concatenated with the path
|
||||
of share in the filesystem (e.g. ['172.24.44.10:/shares/id']).
|
||||
"""
|
||||
LOG.debug("Ensuring share in HNAS: %(shr)s.",
|
||||
{'shr': six.text_type(share['id'])})
|
||||
|
||||
if share['share_proto'].lower() != 'nfs':
|
||||
msg = _("Only NFS protocol is currently supported.")
|
||||
raise exception.ShareBackendException(msg=msg)
|
||||
|
||||
path = self.hnas.ensure_share(share['id'], share['share_proto'])
|
||||
|
||||
export = self.hnas_evs_ip + ":" + path
|
||||
export_list = [export]
|
||||
|
||||
LOG.debug("Share ensured in HNAS: %(shr)s.",
|
||||
{'shr': six.text_type(share['id'])})
|
||||
return export_list
|
||||
|
||||
def extend_share(self, share, new_size, share_server=None):
|
||||
"""Extends a share to new size.
|
||||
|
||||
:param share: Share that will be extended.
|
||||
:param new_size: New size of share.
|
||||
:param share_server: Data structure with share server information.
|
||||
Not used by this driver.
|
||||
"""
|
||||
share_id = self._get_hnas_share_id(share['id'])
|
||||
|
||||
LOG.debug("Expanding share in HNAS: %(shr_id)s.",
|
||||
{'shr_id': six.text_type(share['id'])})
|
||||
|
||||
if share['share_proto'].lower() != 'nfs':
|
||||
msg = _("Only NFS protocol is currently supported.")
|
||||
raise exception.ShareBackendException(msg=msg)
|
||||
|
||||
self.hnas.extend_share(share_id, new_size, share['share_proto'])
|
||||
LOG.info(_LI("Share %(shr_id)s successfully extended to "
|
||||
"%(shr_size)s."),
|
||||
{'shr_id': six.text_type(share['id']),
|
||||
'shr_size': six.text_type(new_size)})
|
||||
|
||||
# TODO(alyson): Implement in DHSS = true mode
|
||||
def get_network_allocations_number(self):
|
||||
"""Track allocations_number in DHSS = true.
|
||||
|
||||
When using the setting driver_handles_share_server = false
|
||||
does not require to track allocations_number because we do not handle
|
||||
network stuff.
|
||||
"""
|
||||
return 0
|
||||
|
||||
def _update_share_stats(self):
|
||||
"""Updates the Capability of Backend."""
|
||||
LOG.debug("Updating Backend Capability Information - HDS HNAS.")
|
||||
|
||||
total_space, free_space = self.hnas.get_stats()
|
||||
|
||||
reserved = self.configuration.safe_get('reserved_share_percentage')
|
||||
|
||||
data = {
|
||||
'share_backend_name': self.backend_name,
|
||||
'driver_handles_share_servers': self.driver_handles_share_servers,
|
||||
'vendor_name': 'HDS',
|
||||
'driver_version': '1.0',
|
||||
'storage_protocol': 'NFS',
|
||||
'total_capacity_gb': total_space,
|
||||
'free_capacity_gb': free_space,
|
||||
'reserved_percentage': reserved,
|
||||
'QoS_support': False,
|
||||
}
|
||||
|
||||
LOG.info(_LI("HNAS Capabilities: %(data)s."),
|
||||
{'data': six.text_type(data)})
|
||||
|
||||
super(HDSHNASDriver, self)._update_share_stats(data)
|
||||
|
||||
def manage_existing(self, share, driver_options):
|
||||
"""Manages a share that exists on backend.
|
||||
|
||||
:param share: Share that will be managed.
|
||||
:param driver_options: Empty dict or dict with 'volume_id' option.
|
||||
:returns: Returns a dict with size of share managed
|
||||
and its location (your path in file-system).
|
||||
"""
|
||||
if self.driver_handles_share_servers:
|
||||
msg = (_("DHSS = %s") % self.driver_handles_share_servers)
|
||||
LOG.error(_LE("Operation 'manage' for shares is supported only "
|
||||
"when driver does not handle share servers."))
|
||||
raise exception.InvalidDriverMode(driver_mode=msg)
|
||||
|
||||
driver_mode = share_types.get_share_type_extra_specs(
|
||||
share['share_type_id'],
|
||||
const.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS)
|
||||
|
||||
if strutils.bool_from_string(driver_mode):
|
||||
msg = _("%(mode)s != False.") % {
|
||||
'mode': const.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS
|
||||
}
|
||||
raise exception.ManageExistingShareTypeMismatch(reason=msg)
|
||||
|
||||
share_id = self._get_hnas_share_id(share['id'])
|
||||
|
||||
LOG.info(_LI("Share %(shr_path)s will be managed with ID %(shr_id)s."),
|
||||
{'shr_path': six.text_type(
|
||||
share['export_locations'][0]['path']),
|
||||
'shr_id': six.text_type(share_id)})
|
||||
|
||||
old_path_info = share['export_locations'][0]['path'].split(':')
|
||||
old_path = old_path_info[1].split('/')
|
||||
|
||||
if len(old_path) == 3:
|
||||
evs_ip = old_path_info[0]
|
||||
share_id = old_path[2]
|
||||
else:
|
||||
msg = _("Incorrect path. It should have the following format: "
|
||||
"IP:/shares/share_id.")
|
||||
raise exception.ShareBackendException(msg=msg)
|
||||
|
||||
if evs_ip != self.hnas_evs_ip:
|
||||
msg = _("The EVS IP %(evs)s is not "
|
||||
"configured.") % {'evs': six.text_type(evs_ip)}
|
||||
raise exception.ShareBackendException(msg=msg)
|
||||
|
||||
if six.text_type(self.backend_name) not in share['host']:
|
||||
msg = _("The backend passed in the host parameter (%(shr)s) is "
|
||||
"not configured.") % {'shr': share['host']}
|
||||
raise exception.ShareBackendException(msg=msg)
|
||||
|
||||
output = self.hnas.manage_existing(share, share_id)
|
||||
self.private_storage.update(
|
||||
share['id'], {'hnas_id': share_id})
|
||||
|
||||
return output
|
||||
|
||||
def unmanage(self, share):
|
||||
"""Unmanages a share.
|
||||
|
||||
:param share: Share that will be unmanaged.
|
||||
"""
|
||||
self.private_storage.delete(share['id'])
|
||||
|
||||
LOG.info(_LI("The share with current path %(shr_path)s and ID "
|
||||
"%(shr_id)s is no longer being managed."),
|
||||
{'shr_path': six.text_type(
|
||||
share['export_locations'][0]['path']),
|
||||
'shr_id': six.text_type(share['id'])})
|
||||
|
||||
def _get_hnas_share_id(self, share_id):
|
||||
hnas_id = self.private_storage.get(share_id, 'hnas_id')
|
||||
|
||||
if hnas_id is None:
|
||||
hnas_id = share_id
|
||||
return hnas_id
|
|
@ -0,0 +1,708 @@
|
|||
# Copyright (c) 2015 Hitachi Data Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_log import log
|
||||
from oslo_utils import units
|
||||
import paramiko
|
||||
import six
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _
|
||||
from manila.i18n import _LE
|
||||
from manila.i18n import _LW
|
||||
from manila import utils as mutils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class HNASSSHBackend(object):
|
||||
def __init__(self, hnas_ip, hnas_username, hnas_password, ssh_private_key,
|
||||
cluster_admin_ip0, evs_id, evs_ip, fs_name):
|
||||
self.ip = hnas_ip
|
||||
self.port = 22
|
||||
self.user = hnas_username
|
||||
self.password = hnas_password
|
||||
self.priv_key = ssh_private_key
|
||||
self.admin_ip0 = cluster_admin_ip0
|
||||
self.evs_id = six.text_type(evs_id)
|
||||
self.fs_name = fs_name
|
||||
self.evs_ip = evs_ip
|
||||
self.sshpool = None
|
||||
|
||||
def get_stats(self):
|
||||
"""Get the stats from file-system.
|
||||
|
||||
The available space is calculated by total space - SUM(quotas).
|
||||
:returns:
|
||||
total_fs_space = Total size from filesystem in config file.
|
||||
available_space = Free space currently on filesystem.
|
||||
"""
|
||||
total_fs_space = self._get_filesystem_capacity()
|
||||
total_quota = 0
|
||||
share_list = self._get_vvol_list()
|
||||
|
||||
for item in share_list:
|
||||
share_quota = self._get_share_quota(item)
|
||||
if share_quota is not None:
|
||||
total_quota += share_quota
|
||||
available_space = total_fs_space - total_quota
|
||||
LOG.debug("Available space in the file system: %(space)s.",
|
||||
{'space': available_space})
|
||||
|
||||
return total_fs_space, available_space
|
||||
|
||||
def allow_access(self, share_id, host, share_proto, permission='rw'):
|
||||
"""Allow access to the share.
|
||||
|
||||
:param share_id: ID of share that access will be allowed.
|
||||
:param host: Host to which access will be allowed.
|
||||
:param share_proto: Storage protocol of share. Currently,
|
||||
only NFS storage protocol is supported.
|
||||
:param permission: permission (e.g. 'rw', 'ro') that will be allowed.
|
||||
"""
|
||||
# check if the share exists
|
||||
self.ensure_share(share_id, share_proto)
|
||||
export = self._nfs_export_list(share_id)
|
||||
|
||||
# get the list that contains all the hosts allowed on the share
|
||||
host_list = export[0].export_configuration
|
||||
|
||||
if permission in ('ro', 'rw'):
|
||||
host_access = host + '(' + permission + ')'
|
||||
else:
|
||||
msg = (_("Permission should be 'ro' or 'rw' instead "
|
||||
"of %s") % permission)
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
# check if the host(s) is already allowed
|
||||
if any(host in x for x in host_list):
|
||||
if host_access in host_list:
|
||||
LOG.debug("Host: %(host)s is already allowed.",
|
||||
{'host': host})
|
||||
else:
|
||||
# remove all the hosts with different permissions
|
||||
host_list = [
|
||||
x for x in host_list if not x.startswith(host)]
|
||||
# add the host with new permission
|
||||
host_list.append(host_access)
|
||||
self._update_access_rule(share_id, host_list)
|
||||
else:
|
||||
host_list.append(host_access)
|
||||
self._update_access_rule(share_id, host_list)
|
||||
|
||||
def deny_access(self, share_id, host, share_proto, permission):
|
||||
"""Deny access to the share.
|
||||
|
||||
:param share_id: ID of share that access will be denied.
|
||||
:param host: Host to which access will be denied.
|
||||
:param share_proto: Storage protocol of share. Currently,
|
||||
only NFS storage protocol is supported.
|
||||
:param permission: permission (e.g. 'rw', 'ro') that will be denied.
|
||||
"""
|
||||
# check if the share exists
|
||||
self.ensure_share(share_id, share_proto)
|
||||
export = self._nfs_export_list(share_id)
|
||||
|
||||
# get the list that contains all the hosts allowed on the share
|
||||
host_list = export[0].export_configuration
|
||||
|
||||
if permission in ('ro', 'rw'):
|
||||
host_access = host + '(' + permission + ')'
|
||||
else:
|
||||
msg = (_("Permission should be 'ro' or 'rw' instead "
|
||||
"of %s") % permission)
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
# check if the host(s) is already not allowed
|
||||
if host_access not in host_list:
|
||||
LOG.debug("Host: %(host)s is already not allowed.",
|
||||
{'host': host})
|
||||
else:
|
||||
# remove the host on host_list
|
||||
host_list.remove(host_access)
|
||||
self._update_access_rule(share_id, host_list)
|
||||
|
||||
def delete_share(self, share_id, share_proto):
|
||||
"""Deletes share.
|
||||
|
||||
It uses tree-delete-job-submit to format and delete virtual-volumes.
|
||||
Quota is deleted with virtual-volume.
|
||||
:param share_id: ID of share that will be deleted.
|
||||
:param share_proto: Storage protocol of share. Currently,
|
||||
only NFS storage protocol is supported.
|
||||
"""
|
||||
try:
|
||||
self.ensure_share(share_id, share_proto)
|
||||
except exception.HNASBackendException as e:
|
||||
LOG.warning(_LW("Share %s does not exist on backend anymore."),
|
||||
share_id)
|
||||
LOG.exception(six.text_type(e))
|
||||
|
||||
self._nfs_export_del(share_id)
|
||||
self._vvol_delete(share_id)
|
||||
|
||||
LOG.debug("Export and share successfully deleted: %(shr)s on Manila.",
|
||||
{'shr': share_id})
|
||||
|
||||
def ensure_share(self, share_id, share_proto):
|
||||
"""Ensure that share is exported.
|
||||
|
||||
:param share_id: ID of share that will be checked.
|
||||
:param share_proto: Storage protocol of share. Currently,
|
||||
only NFS storage protocol is supported.
|
||||
:returns: Returns a path of /shares/share_id if the export is ok.
|
||||
"""
|
||||
path = '/shares/' + share_id
|
||||
|
||||
if not self._check_fs_mounted(self.fs_name):
|
||||
self._mount(self.fs_name)
|
||||
LOG.debug("Filesystem %(fs)s is unmounted. Mounting...",
|
||||
{'fs': self.fs_name})
|
||||
self._check_vvol(share_id)
|
||||
self._check_quota(share_id)
|
||||
self._check_export(share_id)
|
||||
return path
|
||||
|
||||
def create_share(self, share_id, share_size, share_proto):
|
||||
"""Creates share.
|
||||
|
||||
Creates a virtual-volume, adds a quota limit and exports it.
|
||||
:param share_id: ID of share that will be created.
|
||||
:param share_size: Size limit of share.
|
||||
:param share_proto: Storage protocol of share. Currently,
|
||||
only NFS storage protocol is supported.
|
||||
:returns: Returns a path of /shares/share_id if the export was
|
||||
created successfully.
|
||||
"""
|
||||
path = '/shares/' + share_id
|
||||
self._vvol_create(share_id, share_size)
|
||||
LOG.debug("Share created with id %(shr)s, size %(size)sG.",
|
||||
{'shr': share_id, 'size': share_size})
|
||||
try:
|
||||
# Create NFS export
|
||||
self._nfs_export_add(share_id)
|
||||
LOG.debug("NFS Export created to %(shr)s.",
|
||||
{'shr': share_id})
|
||||
return path
|
||||
except processutils.ProcessExecutionError as e:
|
||||
self._vvol_delete(share_id)
|
||||
msg = six.text_type(e)
|
||||
LOG.exception(msg)
|
||||
raise e
|
||||
|
||||
def extend_share(self, share_id, share_size, share_proto):
|
||||
"""Extends a share to new size.
|
||||
|
||||
:param share_id: ID of share that will be extended.
|
||||
:param share_size: New size of share.
|
||||
:param share_proto: Storage protocol of share. Currently,
|
||||
only NFS storage protocol is supported.
|
||||
"""
|
||||
self.ensure_share(share_id, share_proto)
|
||||
|
||||
total, available_space = self.get_stats()
|
||||
|
||||
LOG.debug("Available space in filesystem: %(space)s.",
|
||||
{'space': available_space})
|
||||
|
||||
if share_size < available_space:
|
||||
self._extend_quota(share_id, share_size)
|
||||
else:
|
||||
msg = (_("Failed to extend share %s.") % share_id)
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
def manage_existing(self, share_proto, share_id):
|
||||
"""Manages a share that exists on backend.
|
||||
|
||||
:param share_proto: Storage protocol of share. Currently,
|
||||
only NFS storage protocol is supported.
|
||||
:param share_id: ID of share that will be managed.
|
||||
:returns: Returns a dict with size of share managed
|
||||
and its location (your path in file-system).
|
||||
"""
|
||||
self.ensure_share(share_id, share_proto)
|
||||
|
||||
share_size = self._get_share_quota(share_id)
|
||||
if share_size is None:
|
||||
msg = (_("The share %s trying to be managed does not have a "
|
||||
"quota limit, please set it before manage.") % share_id)
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
path = six.text_type(self.evs_ip) + ':/shares/' + share_id
|
||||
|
||||
return {'size': share_size, 'export_locations': [path]}
|
||||
|
||||
def create_snapshot(self, share_id, snapshot_id):
|
||||
"""Creates a snapshot of share.
|
||||
|
||||
It copies the directory and all files to a new directory inside
|
||||
/snapshots/share_id/.
|
||||
:param share_id: ID of share for snapshot.
|
||||
:param snapshot_id: ID of new snapshot.
|
||||
"""
|
||||
src_path = '/shares/' + share_id
|
||||
snap_path = '/snapshots/' + share_id + '/' + snapshot_id
|
||||
|
||||
try:
|
||||
command = ['tree-clone-job-submit', '-e', '-f', self.fs_name,
|
||||
src_path, snap_path]
|
||||
output, err = self._execute(command)
|
||||
if 'Request submitted successfully' in output:
|
||||
LOG.debug("Request for creating snapshot submitted "
|
||||
"successfully.")
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if ('Cannot find any clonable files in the source directory' in
|
||||
e.stderr):
|
||||
|
||||
LOG.warning(_LW("Source directory is empty, creating an empty "
|
||||
"snapshot."))
|
||||
self._locked_selectfs('create', snap_path)
|
||||
else:
|
||||
msg = six.text_type(e)
|
||||
LOG.exception(msg)
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
def delete_snapshot(self, share_id, snapshot_id):
|
||||
"""Deletes snapshot.
|
||||
|
||||
It receives the share_id only to mount the path for snapshot.
|
||||
:param share_id: ID of share that snapshot was created.
|
||||
:param snapshot_id: ID of snapshot.
|
||||
"""
|
||||
path = '/snapshots/' + share_id + '/' + snapshot_id
|
||||
command = ['tree-delete-job-submit', '--confirm', '-f', self.fs_name,
|
||||
path]
|
||||
try:
|
||||
output, err = self._execute(command)
|
||||
path = '/snapshots/' + share_id
|
||||
if 'Request submitted successfully' in output:
|
||||
self._locked_selectfs('delete', path)
|
||||
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if 'Source path: Cannot access' not in e.stderr:
|
||||
msg = six.text_type(e)
|
||||
LOG.exception(msg)
|
||||
raise e
|
||||
|
||||
def create_share_from_snapshot(self, share, snapshot):
|
||||
"""Creates a new share from snapshot.
|
||||
|
||||
It copies everything from snapshot directory to a new vvol,
|
||||
set a quota limit for it and export.
|
||||
:param share: a dict from new share.
|
||||
:param snapshot: a dict from snapshot that will be copied to
|
||||
new share.
|
||||
:returns: Returns the path for new share.
|
||||
"""
|
||||
output = ''
|
||||
dst_path = '/shares/' + share['id']
|
||||
src_path = '/snapshots/' + snapshot['share_id'] + '/' + snapshot['id']
|
||||
|
||||
# Before copying everything to new vvol, we need to create it,
|
||||
# because we only can transform an empty directory into a vvol.
|
||||
quota = self._get_share_quota(snapshot['share_id'])
|
||||
LOG.debug("Share size: %(quota)s.", {'quota': six.text_type(quota)})
|
||||
|
||||
if quota is None:
|
||||
msg = (_("The original share %s does not have a quota limit, "
|
||||
"please set it before creating a new "
|
||||
"share.") % share['id'])
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
self._vvol_create(share['id'], quota)
|
||||
|
||||
try:
|
||||
# Copy the directory to new vvol
|
||||
# Syntax: tree-clone-job-submit <source-directory> <new-share>
|
||||
LOG.debug("Started share create from: %(shr)s.",
|
||||
{'shr': six.text_type(snapshot['share_id'])})
|
||||
command = ['tree-clone-job-submit', '-f', self.fs_name,
|
||||
src_path, dst_path]
|
||||
output, err = self._execute(command)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if ('Cannot find any clonable files in the source directory' in
|
||||
e.stderr):
|
||||
LOG.warning(_LW("Source directory is empty, exporting "
|
||||
"directory."))
|
||||
if self._nfs_export_add(share['id']):
|
||||
return dst_path
|
||||
|
||||
if 'Request submitted successfully' in output:
|
||||
# Create NFS export
|
||||
if self._nfs_export_add(share['id']):
|
||||
# Return export path
|
||||
return dst_path
|
||||
else:
|
||||
msg = (_("Share %s was not created.") % share['id'])
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
def _execute(self, commands):
|
||||
command = ['ssc', '127.0.0.1']
|
||||
if self.admin_ip0 is not None:
|
||||
command = ['ssc', '--smuauth', self.admin_ip0]
|
||||
|
||||
command = command + ['console-context', '--evs', self.evs_id]
|
||||
commands = command + commands
|
||||
|
||||
mutils.check_ssh_injection(commands)
|
||||
commands = ' '.join(commands)
|
||||
|
||||
if not self.sshpool:
|
||||
self.sshpool = mutils.SSHPool(ip=self.ip,
|
||||
port=self.port,
|
||||
conn_timeout=None,
|
||||
login=self.user,
|
||||
password=self.password,
|
||||
privatekey=self.priv_key)
|
||||
with self.sshpool.item() as ssh:
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
try:
|
||||
out, err = processutils.ssh_execute(ssh, commands,
|
||||
check_exit_code=True)
|
||||
LOG.debug("Command %(cmd)s result: out = %(out)s - err = "
|
||||
"%(err)s.", {'cmd': commands,
|
||||
'out': out, 'err': err})
|
||||
return out, err
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.debug("Command %(cmd)s result: out = %(out)s - err = "
|
||||
"%(err)s - exit = %(exit)s.", {'cmd': e.cmd,
|
||||
'out': e.stdout,
|
||||
'err': e.stderr,
|
||||
'exit': e.exit_code})
|
||||
LOG.error(_LE("Error running SSH command."))
|
||||
raise
|
||||
|
||||
def _check_fs_mounted(self, fs_name):
|
||||
self._check_fs()
|
||||
fs_list = self._get_filesystem_list()
|
||||
for i in range(0, len(fs_list)):
|
||||
if fs_list[i].name == fs_name and fs_list[i].state == 'Mount':
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_filesystem_list(self):
|
||||
command = ['filesystem-list']
|
||||
output, err = self._execute(command)
|
||||
items = output.split('\n')
|
||||
filesystem_list = []
|
||||
fs_name = None
|
||||
if len(items) > 2:
|
||||
j = 0
|
||||
for i in range(2, len(items) - 1):
|
||||
if "Filesystem " in items[i] and len(items[i].split()) == 2:
|
||||
description, fs_name = items[i].split()
|
||||
fs_name = fs_name[:len(fs_name) - 1]
|
||||
elif "NoEVS" not in items[i]:
|
||||
# Not considering FS without EVS
|
||||
filesystem_list.append(FileSystem(items[i]))
|
||||
if fs_name is not None:
|
||||
filesystem_list[j].name = fs_name
|
||||
fs_name = None
|
||||
j += 1
|
||||
else:
|
||||
LOG.debug("Ignoring filesystems without EVS.")
|
||||
|
||||
return filesystem_list
|
||||
|
||||
def _nfs_export_add(self, share_id):
|
||||
path = '/shares/' + share_id
|
||||
# nfs-export add -S disable -c <export-name> <file-system> <path>
|
||||
command = ['nfs-export', 'add', '-S', 'disable', '-c', '127.0.0.1',
|
||||
path, self.fs_name, path]
|
||||
output, err = self._execute(command)
|
||||
return True
|
||||
|
||||
def _nfs_export_del(self, share_id):
|
||||
path = '/shares/' + share_id
|
||||
command = ['nfs-export', 'del', path]
|
||||
|
||||
try:
|
||||
output, err = self._execute(command)
|
||||
except exception.HNASBackendException as e:
|
||||
LOG.warning(_LW("Export %s does not exist on backend anymore."),
|
||||
path)
|
||||
LOG.exception(six.text_type(e))
|
||||
|
||||
def _update_access_rule(self, share_id, host_list):
|
||||
# mount the command line
|
||||
command = ['nfs-export', 'mod', '-c']
|
||||
|
||||
if len(host_list) == 0:
|
||||
command.append('127.0.0.1')
|
||||
else:
|
||||
string_command = '"' + six.text_type(host_list[0])
|
||||
|
||||
for i in range(1, len(host_list)):
|
||||
string_command += ',' + (six.text_type(host_list[i]))
|
||||
string_command += '"'
|
||||
command.append(string_command)
|
||||
|
||||
path = '/shares/' + share_id
|
||||
command.append(path)
|
||||
output, err = self._execute(command)
|
||||
|
||||
if ("Export modified successfully" in output or
|
||||
"Export modified successfully" in err):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _nfs_export_list(self, share_id=''):
|
||||
if share_id is not '':
|
||||
share_id = '/shares/' + share_id
|
||||
command = ['nfs-export', 'list ', six.text_type(share_id)]
|
||||
output, err = self._execute(command)
|
||||
nfs_export_list = []
|
||||
|
||||
if 'No exports are currently configured' not in output:
|
||||
items = output.split('Export name')
|
||||
|
||||
if items[0][0] == '\n':
|
||||
items.pop(0)
|
||||
|
||||
for i in range(0, len(items)):
|
||||
nfs_export_list.append(Export(items[i]))
|
||||
|
||||
return nfs_export_list
|
||||
|
||||
def _mount(self, fs):
|
||||
command = ['mount', fs]
|
||||
try:
|
||||
output, err = self._execute(command)
|
||||
if 'successfully mounted' in output:
|
||||
return True
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if 'file system is already mounted' in e.stderr:
|
||||
return True
|
||||
else:
|
||||
msg = six.text_type(e)
|
||||
LOG.exception(msg)
|
||||
raise e
|
||||
|
||||
def _vvol_create(self, vvol_name, vvol_quota):
|
||||
# create a virtual-volume inside directory
|
||||
if self._check_fs():
|
||||
path = '/shares/' + vvol_name
|
||||
command = ['virtual-volume', 'add', '--ensure', self.fs_name,
|
||||
vvol_name, path]
|
||||
output, err = self._execute(command)
|
||||
|
||||
# put a quota limit in virtual-volume to deny expand abuses
|
||||
self._quota_add(vvol_name, vvol_quota)
|
||||
return True
|
||||
else:
|
||||
msg = (_("Filesystem %s does not exist or it is not available "
|
||||
"in the current EVS context.") % self.fs_name)
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
def _quota_add(self, vvol_name, vvol_quota):
|
||||
if vvol_quota > 0:
|
||||
str_quota = six.text_type(vvol_quota) + 'G'
|
||||
command = ['quota', 'add', '--usage-limit',
|
||||
str_quota, '--usage-hard-limit',
|
||||
'yes', self.fs_name, vvol_name]
|
||||
output, err = self._execute(command)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _vvol_delete(self, vvol_name):
|
||||
path = '/shares/' + vvol_name
|
||||
# Virtual-volume and quota are deleted together
|
||||
command = ['tree-delete-job-submit', '--confirm', '-f',
|
||||
self.fs_name, path]
|
||||
try:
|
||||
output, err = self._execute(command)
|
||||
return True
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if 'Source path: Cannot access' in e.stderr:
|
||||
LOG.debug("Share %(shr)s does not exist.",
|
||||
{'shr': six.text_type(vvol_name)})
|
||||
else:
|
||||
msg = six.text_type(e)
|
||||
LOG.exception(msg)
|
||||
raise e
|
||||
|
||||
def _extend_quota(self, vvol_name, new_size):
|
||||
str_quota = six.text_type(new_size) + 'G'
|
||||
command = ['quota', 'mod', '--usage-limit', str_quota,
|
||||
self.fs_name, vvol_name]
|
||||
output, err = self._execute(command)
|
||||
return True
|
||||
|
||||
def _check_fs(self):
|
||||
fs_list = self._get_filesystem_list()
|
||||
fs_name_list = []
|
||||
for i in range(0, len(fs_list)):
|
||||
fs_name_list.append(fs_list[i].name)
|
||||
if fs_list[i].name == self.fs_name:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _check_vvol(self, vvol_name):
|
||||
command = ['virtual-volume', 'list', '--verbose', self.fs_name,
|
||||
vvol_name]
|
||||
try:
|
||||
output, err = self._execute(command)
|
||||
return True
|
||||
except processutils.ProcessExecutionError as e:
|
||||
msg = six.text_type(e)
|
||||
LOG.exception(msg)
|
||||
msg = (_("Virtual volume %s does not exist.") % vvol_name)
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
def _check_quota(self, vvol_name):
|
||||
command = ['quota', 'list', '--verbose', self.fs_name, vvol_name]
|
||||
output, err = self._execute(command)
|
||||
|
||||
if 'No quotas matching specified filter criteria' not in output:
|
||||
return True
|
||||
else:
|
||||
msg = (_("Virtual volume %s does not have any quota.") % vvol_name)
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
def _check_export(self, vvol_name):
|
||||
export = self._nfs_export_list(vvol_name)
|
||||
if (vvol_name in export[0].export_name and
|
||||
self.fs_name in export[0].file_system_label):
|
||||
return True
|
||||
else:
|
||||
msg = (_("Export %s does not exist.") % export[0].export_name)
|
||||
raise exception.HNASBackendException(msg=msg)
|
||||
|
||||
def _get_share_quota(self, share_id):
|
||||
command = ['quota', 'list', self.fs_name, six.text_type(share_id)]
|
||||
output, err = self._execute(command)
|
||||
items = output.split('\n')
|
||||
|
||||
for i in range(0, len(items) - 1):
|
||||
if ('Unset' not in items[i] and
|
||||
'No quotas matching' not in items[i]):
|
||||
if 'Limit' in items[i] and 'Hard' in items[i]:
|
||||
quota = float(items[i].split(' ')[12])
|
||||
|
||||
# If the quota is 1 or more TB, converts to GB
|
||||
if items[i].split(' ')[13] == 'TB':
|
||||
return quota * units.Ki
|
||||
|
||||
return quota
|
||||
else:
|
||||
# Returns None if the quota is unset
|
||||
return None
|
||||
|
||||
def _get_vvol_list(self):
|
||||
command = ['virtual-volume', 'list', self.fs_name]
|
||||
output, err = self._execute(command)
|
||||
|
||||
vvol_list = []
|
||||
items = output.split('\n')
|
||||
|
||||
for i in range(0, len(items) - 1):
|
||||
if ":" not in items[i]:
|
||||
vvol_list.append(items[i])
|
||||
|
||||
return vvol_list
|
||||
|
||||
def _get_filesystem_capacity(self):
|
||||
command = ['filesystem-limits', self.fs_name]
|
||||
output, err = self._execute(command)
|
||||
|
||||
items = output.split('\n')
|
||||
|
||||
for i in range(0, len(items) - 1):
|
||||
if 'Current capacity' in items[i]:
|
||||
fs_capacity = items[i].split(' ')
|
||||
|
||||
# Gets the index of the file system capacity (EX: 20GiB)
|
||||
index = [i for i, string in enumerate(fs_capacity)
|
||||
if 'GiB' in string]
|
||||
|
||||
fs_capacity = fs_capacity[index[0]]
|
||||
fs_capacity = fs_capacity.split('GiB')[0]
|
||||
|
||||
return int(fs_capacity)
|
||||
|
||||
@mutils.synchronized("hds_hnas_select_fs", external=True)
|
||||
def _locked_selectfs(self, op, path):
|
||||
if op == 'create':
|
||||
command = ['selectfs', self.fs_name, '\n',
|
||||
'ssc', '127.0.0.1', 'console-context', '--evs',
|
||||
self.evs_id, 'mkdir', '-p', path]
|
||||
output, err = self._execute(command)
|
||||
|
||||
if op == 'delete':
|
||||
command = ['selectfs', self.fs_name, '\n',
|
||||
'ssc', '127.0.0.1', 'console-context', '--evs',
|
||||
self.evs_id, 'rmdir', path]
|
||||
try:
|
||||
output, err = self._execute(command)
|
||||
except processutils.ProcessExecutionError:
|
||||
LOG.debug("Share %(path)s has more snapshots.", {'path': path})
|
||||
|
||||
|
||||
class FileSystem(object):
|
||||
def __init__(self, data):
|
||||
if data:
|
||||
items = data.split()
|
||||
if len(items) >= 7:
|
||||
self.name = items[0]
|
||||
self.dev = items[1]
|
||||
self.on_span = items[2]
|
||||
self.state = items[3]
|
||||
self.evs = int(items[4])
|
||||
self.capacity = int(items[5])
|
||||
self.confined = int(items[6])
|
||||
if len(items) == 8:
|
||||
self.flag = items[7]
|
||||
else:
|
||||
self.flag = ''
|
||||
|
||||
|
||||
class Export(object):
|
||||
def __init__(self, data):
|
||||
if data:
|
||||
split_data = data.split('Export configuration:\n')
|
||||
items = split_data[0].split('\n')
|
||||
|
||||
self.export_name = items[0].split(':')[1].strip()
|
||||
self.export_path = items[1].split(':')[1].strip()
|
||||
|
||||
if '*** not available ***' in items[2]:
|
||||
self.file_system_info = items[2].split(':')[1].strip()
|
||||
index = 0
|
||||
|
||||
else:
|
||||
self.file_system_label = items[2].split(':')[1].strip()
|
||||
self.file_system_size = items[3].split(':')[1].strip()
|
||||
self.file_system_free_space = items[4].split(':')[1].strip()
|
||||
self.file_system_state = items[5].split(':')[1]
|
||||
self.formatted = items[6].split('=')[1].strip()
|
||||
self.mounted = items[7].split('=')[1].strip()
|
||||
self.failed = items[8].split('=')[1].strip()
|
||||
self.thin_provisioned = items[9].split('=')[1].strip()
|
||||
index = 7
|
||||
|
||||
self.access_snapshots = items[3 + index].split(':')[1].strip()
|
||||
self.display_snapshots = items[4 + index].split(':')[1].strip()
|
||||
self.read_caching = items[5 + index].split(':')[1].strip()
|
||||
self.disaster_recovery_setting = items[6 + index].split(':')[1]
|
||||
self.recovered = items[7 + index].split('=')[1].strip()
|
||||
self.transfer_setting = items[8 + index].split('=')[1].strip()
|
||||
|
||||
self.export_configuration = []
|
||||
export_config = split_data[1].split('\n')
|
||||
for i in range(0, len(export_config)):
|
||||
if any(j.isdigit() or j.isalpha() for j in export_config[i]):
|
||||
self.export_configuration.append(export_config[i])
|
|
@ -0,0 +1,438 @@
|
|||
# Copyright (c) 2015 Hitachi Data Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from manila import context
|
||||
from manila import exception
|
||||
import manila.share.configuration
|
||||
import manila.share.driver
|
||||
from manila.share.drivers.hitachi import hds_hnas
|
||||
from manila.share.drivers.hitachi import ssh
|
||||
from manila.share import share_types
|
||||
from manila import test
|
||||
from manila.tests.db import fakes as db_fakes
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def fake_share(**kwargs):
|
||||
share = {
|
||||
'id': 'fake_id',
|
||||
'size': 1,
|
||||
'share_type_id': '7450f16e-4c7f-42ab-90f1-c1cfb2a6bc70',
|
||||
'share_proto': 'nfs',
|
||||
'share_network_id': 'fake_network_id',
|
||||
'share_server_id': 'fake_server_id',
|
||||
'host': ['None'],
|
||||
'export_locations': [{'path': '172.24.44.10:/nfs/volume-00002'}],
|
||||
}
|
||||
share.update(kwargs)
|
||||
return db_fakes.FakeModel(share)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class HDSHNASTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(HDSHNASTestCase, self).setUp()
|
||||
|
||||
self._context = context.get_admin_context()
|
||||
self._execute = mock.Mock(return_value=('', ''))
|
||||
CONF.set_default('driver_handles_share_servers', False)
|
||||
CONF.hds_hnas_evs_id = '2'
|
||||
CONF.hds_hnas_evs_ip = '172.24.44.10'
|
||||
CONF.hds_hnas_ip = '172.24.44.1'
|
||||
CONF.hds_hnas_ip_port = 'hds_hnas_ip_port'
|
||||
CONF.hds_hnas_user = 'hds_hnas_user'
|
||||
CONF.hds_hnas_password = 'hds_hnas_password'
|
||||
CONF.hds_hnas_file_system = 'file_system'
|
||||
CONF.hds_hnas_ssh_private_key = 'private_key'
|
||||
CONF.hds_hnas_cluster_admin_ip0 = None
|
||||
self.const_dhss = 'driver_handles_share_servers'
|
||||
self.fake_conf = manila.share.configuration.Configuration(None)
|
||||
self._db = mock.Mock()
|
||||
|
||||
self.fake_private_storage = mock.Mock()
|
||||
self.mock_object(self.fake_private_storage, 'get',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(self.fake_private_storage, 'delete',
|
||||
mock.Mock(return_value=None))
|
||||
|
||||
self.mock_log = self.mock_object(manila.share.drivers.hitachi.hds_hnas,
|
||||
'LOG')
|
||||
|
||||
self._driver = hds_hnas.HDSHNASDriver(
|
||||
private_storage=self.fake_private_storage,
|
||||
configuration=self.fake_conf)
|
||||
|
||||
self.server = {
|
||||
'instance_id': 'fake_instance_id',
|
||||
'ip': 'fake_ip',
|
||||
'username': 'fake_username',
|
||||
'password': 'fake_password',
|
||||
'pk_path': 'fake_pk_path',
|
||||
'backend_details': {
|
||||
'public_address': '1.2.3.4',
|
||||
'instance_id': 'fake',
|
||||
},
|
||||
}
|
||||
|
||||
self.invalid_server = {
|
||||
'backend_details': {
|
||||
'ip': '1.1.1.1',
|
||||
'instance_id': 'fake',
|
||||
},
|
||||
}
|
||||
|
||||
self.nfs_export_list = {'export_configuration': 'fake_export'}
|
||||
|
||||
self.share = fake_share()
|
||||
|
||||
self.invalid_share = {
|
||||
'id': 'fakeid',
|
||||
'name': 'fakename',
|
||||
'size': 1,
|
||||
'host': 'hnas',
|
||||
'share_proto': 'CIFS',
|
||||
'share_type_id': 1,
|
||||
'share_network_id': 'fake share network id',
|
||||
'share_server_id': 'fake share server id',
|
||||
'export_locations': [{'path': '172.24.44.110:'
|
||||
'/mnt/nfs/volume-00002'}],
|
||||
}
|
||||
|
||||
self.access = {
|
||||
'id': 'fakeaccid',
|
||||
'access_type': 'ip',
|
||||
'access_to': '10.0.0.2',
|
||||
'access_level': 'fake_level',
|
||||
'state': 'active',
|
||||
}
|
||||
|
||||
self.snapshot = {
|
||||
'id': 'snap_name',
|
||||
'share_id': 'fake_name',
|
||||
}
|
||||
|
||||
@ddt.data('hds_hnas_evs_id', 'hds_hnas_evs_ip',
|
||||
'hds_hnas_ip', 'hds_hnas_user')
|
||||
def test_init_invalid_conf_parameters(self, attr_name):
|
||||
self.mock_object(manila.share.driver.ShareDriver,
|
||||
'__init__')
|
||||
setattr(CONF, attr_name, None)
|
||||
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
self._driver.__init__)
|
||||
|
||||
def test_init_invalid_credentials(self):
|
||||
self.mock_object(manila.share.driver.ShareDriver,
|
||||
'__init__')
|
||||
CONF.hds_hnas_password = None
|
||||
CONF.hds_hnas_ssh_private_key = None
|
||||
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
self._driver.__init__)
|
||||
|
||||
def test_allow_access(self):
|
||||
self.mock_object(ssh.HNASSSHBackend, 'allow_access')
|
||||
|
||||
self._driver.allow_access(self._context, self.share,
|
||||
self.access, self.server)
|
||||
|
||||
ssh.HNASSSHBackend.allow_access.assert_called_once_with('fake_id',
|
||||
'10.0.0.2',
|
||||
'nfs',
|
||||
'fake_level')
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
self.assertTrue(self.mock_log.info.called)
|
||||
|
||||
def test_allow_access_invalid_access_type(self):
|
||||
access = {'access_type': 'user', 'access_to': 'fake_dest'}
|
||||
|
||||
self.assertRaises(exception.InvalidShareAccess,
|
||||
self._driver.allow_access, self._context,
|
||||
self.share, access, self.server)
|
||||
|
||||
def test_allow_access_invalid_share_protocol(self):
|
||||
self.assertRaises(exception.InvalidShareAccess,
|
||||
self._driver.allow_access, self._context,
|
||||
self.invalid_share, self.access, self.server)
|
||||
|
||||
def test_deny_access(self):
|
||||
self.mock_object(ssh.HNASSSHBackend, 'deny_access')
|
||||
|
||||
self._driver.deny_access(self._context, self.share,
|
||||
self.access, self.server)
|
||||
|
||||
ssh.HNASSSHBackend.deny_access.assert_called_once_with('fake_id',
|
||||
'10.0.0.2',
|
||||
'nfs',
|
||||
'fake_level')
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
self.assertTrue(self.mock_log.info.called)
|
||||
|
||||
def test_deny_access_invalid_share_protocol(self):
|
||||
self.assertRaises(exception.InvalidShareAccess,
|
||||
self._driver.deny_access, self._context,
|
||||
self.invalid_share, self.access, self.server)
|
||||
|
||||
def test_create_share(self):
|
||||
# share server none
|
||||
path = '/' + self.share['id']
|
||||
|
||||
self.mock_object(ssh.HNASSSHBackend, 'create_share',
|
||||
mock.Mock(return_value=path))
|
||||
|
||||
result = self._driver.create_share(self._context,
|
||||
self.share)
|
||||
|
||||
ssh.HNASSSHBackend.create_share.assert_called_once_with('fake_id', 1,
|
||||
'nfs')
|
||||
self.assertEqual('172.24.44.10:/fake_id', result)
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
|
||||
def test_create_share_invalid_share_protocol(self):
|
||||
self.assertRaises(exception.ShareBackendException,
|
||||
self._driver.create_share,
|
||||
self._context, self.invalid_share)
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
|
||||
def test_delete_share(self):
|
||||
self.mock_object(ssh.HNASSSHBackend, 'delete_share')
|
||||
|
||||
self._driver.delete_share(self._context, self.share)
|
||||
|
||||
ssh.HNASSSHBackend.delete_share.assert_called_once_with('fake_id',
|
||||
'nfs')
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
|
||||
def test_ensure_share(self):
|
||||
export_list = ['172.24.44.10:/shares/fake_id']
|
||||
path = '/shares/fake_id'
|
||||
|
||||
self.mock_object(ssh.HNASSSHBackend, 'ensure_share',
|
||||
mock.Mock(return_value=path))
|
||||
|
||||
out = self._driver.ensure_share(self._context, self.share)
|
||||
|
||||
ssh.HNASSSHBackend.ensure_share.assert_called_once_with('fake_id',
|
||||
'nfs')
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
self.assertEqual(export_list, out)
|
||||
|
||||
def test_ensure_share_invalid_share_protocol(self):
|
||||
# invalid share proto
|
||||
self.assertRaises(exception.ShareBackendException,
|
||||
self._driver.ensure_share,
|
||||
self._context, self.invalid_share)
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
|
||||
def test_extend_share(self):
|
||||
self.mock_object(ssh.HNASSSHBackend, 'extend_share')
|
||||
|
||||
self._driver.extend_share(self.share, 5)
|
||||
|
||||
ssh.HNASSSHBackend.extend_share.assert_called_once_with('fake_id', 5,
|
||||
'nfs')
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
self.assertTrue(self.mock_log.info.called)
|
||||
|
||||
def test_extend_share_invalid_share_protocol(self):
|
||||
# invalid share with proto != nfs
|
||||
m_extend = self.mock_object(ssh.HNASSSHBackend, 'extend_share')
|
||||
|
||||
self.assertRaises(exception.ShareBackendException,
|
||||
self._driver.extend_share,
|
||||
self.invalid_share, 5)
|
||||
self.assertFalse(m_extend.called)
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
|
||||
# TODO(alyson): Implement network tests in DHSS = true mode
|
||||
def test_get_network_allocations_number(self):
|
||||
self.assertEqual(0, self._driver.get_network_allocations_number())
|
||||
|
||||
def test_create_snapshot(self):
|
||||
# tests when hnas.create_snapshot returns successfully
|
||||
self.mock_object(ssh.HNASSSHBackend, 'create_snapshot')
|
||||
|
||||
self._driver.create_snapshot(self._context, self.snapshot)
|
||||
|
||||
ssh.HNASSSHBackend.create_snapshot.assert_called_once_with('fake_name',
|
||||
'snap_name')
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
self.assertTrue(self.mock_log.info.called)
|
||||
|
||||
def test_delete_snapshot(self):
|
||||
# tests when hnas.delete_snapshot returns True
|
||||
self.mock_object(ssh.HNASSSHBackend, 'delete_snapshot')
|
||||
|
||||
self._driver.delete_snapshot(self._context, self.snapshot)
|
||||
|
||||
ssh.HNASSSHBackend.delete_snapshot.assert_called_once_with('fake_name',
|
||||
'snap_name')
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
self.assertTrue(self.mock_log.info.called)
|
||||
|
||||
def test_create_share_from_snapshot(self):
|
||||
# share server none
|
||||
path = '/' + self.share['id']
|
||||
|
||||
self.mock_object(ssh.HNASSSHBackend, 'create_share_from_snapshot',
|
||||
mock.Mock(return_value=path))
|
||||
|
||||
result = self._driver.create_share_from_snapshot(self._context,
|
||||
self.share,
|
||||
self.snapshot)
|
||||
|
||||
(ssh.HNASSSHBackend.create_share_from_snapshot.
|
||||
assert_called_with(self.share, self.snapshot))
|
||||
self.assertEqual('172.24.44.10:/fake_id', result)
|
||||
self.assertTrue(self.mock_log.debug.called)
|
||||
|
||||
def test_manage_existing(self):
|
||||
driver_op = 'fake'
|
||||
local_id = 'volume-00002'
|
||||
manage_return = {
|
||||
'size': 1,
|
||||
'export_locations': '172.24.44.10:/mnt/nfs/volume-00002',
|
||||
}
|
||||
|
||||
CONF.set_default('share_backend_name', 'HDS1')
|
||||
self.mock_object(share_types, 'get_share_type_extra_specs',
|
||||
mock.Mock(return_value='False'))
|
||||
self.mock_object(ssh.HNASSSHBackend, 'manage_existing',
|
||||
mock.Mock(return_value=manage_return))
|
||||
|
||||
output = self._driver.manage_existing(self.share, driver_op)
|
||||
|
||||
self.assertEqual(manage_return, output)
|
||||
ssh.HNASSSHBackend.manage_existing.assert_called_once_with(self.share,
|
||||
local_id)
|
||||
self.assertTrue(self.mock_log.info.called)
|
||||
|
||||
CONF._unset_defaults_and_overrides()
|
||||
|
||||
def test_manage_share_type_dhss_true(self):
|
||||
driver_op = 'fake'
|
||||
|
||||
self.mock_object(share_types, 'get_share_type_extra_specs',
|
||||
mock.Mock(return_value='True'))
|
||||
|
||||
self.assertRaises(exception.ManageExistingShareTypeMismatch,
|
||||
self._driver.manage_existing,
|
||||
self.share, driver_op)
|
||||
share_types.get_share_type_extra_specs.assert_called_once_with(
|
||||
self.share['share_type_id'], self.const_dhss)
|
||||
|
||||
def test_manage_conf_dhss_true(self):
|
||||
driver_op = 'fake'
|
||||
|
||||
CONF.set_default('driver_handles_share_servers', True)
|
||||
self.mock_object(share_types, 'get_share_type_extra_specs',
|
||||
mock.Mock(return_value='True'))
|
||||
|
||||
self.assertRaises(exception.InvalidDriverMode,
|
||||
self._driver.manage_existing,
|
||||
self.share, driver_op)
|
||||
|
||||
def test_manage_invalid_host(self):
|
||||
driver_op = 'fake'
|
||||
self.share_invalid_host = {
|
||||
'id': 'fake_id',
|
||||
'size': 1,
|
||||
'share_type_id': '7450f16e-4c7f-42ab-90f1-c1cfb2a6bc70',
|
||||
'share_proto': 'nfs',
|
||||
'share_network_id': 'fake_network_id',
|
||||
'share_server_id': 'fake_server_id',
|
||||
'host': 'fake@INVALID#fake_pool',
|
||||
'export_locations': [{'path': '172.24.44.10:/nfs/volume-00002'}],
|
||||
}
|
||||
|
||||
self.mock_object(share_types, 'get_share_type_extra_specs',
|
||||
mock.Mock(return_value='False'))
|
||||
|
||||
self.assertRaises(exception.ShareBackendException,
|
||||
self._driver.manage_existing,
|
||||
self.share_invalid_host, driver_op)
|
||||
share_types.get_share_type_extra_specs.assert_called_once_with(
|
||||
self.share_invalid_host['share_type_id'], self.const_dhss)
|
||||
|
||||
def test_manage_invalid_path(self):
|
||||
driver_op = 'fake'
|
||||
self.share_invalid_path = {
|
||||
'id': 'fake_id',
|
||||
'size': 1,
|
||||
'share_type_id': '7450f16e-4c7f-42ab-90f1-c1cfb2a6bc70',
|
||||
'share_proto': 'nfs',
|
||||
'share_network_id': 'fake_network_id',
|
||||
'share_server_id': 'fake_server_id',
|
||||
'host': 'fake@INVALID#fake_pool',
|
||||
'export_locations': [{'path': '172.24.44.10:/volume-00002'}],
|
||||
}
|
||||
|
||||
self.mock_object(share_types, 'get_share_type_extra_specs',
|
||||
mock.Mock(return_value='False'))
|
||||
|
||||
self.assertRaises(exception.ShareBackendException,
|
||||
self._driver.manage_existing,
|
||||
self.share_invalid_path, driver_op)
|
||||
share_types.get_share_type_extra_specs.assert_called_once_with(
|
||||
self.share_invalid_path['share_type_id'], self.const_dhss)
|
||||
|
||||
def test_manage_invalid_evs_ip(self):
|
||||
driver_op = 'fake'
|
||||
self.share_invalid_ip = {
|
||||
'id': 'fake_id',
|
||||
'size': 1,
|
||||
'share_type_id': '7450f16e-4c7f-42ab-90f1-c1cfb2a6bc70',
|
||||
'share_proto': 'nfs',
|
||||
'share_network_id': 'fake_network_id',
|
||||
'share_server_id': 'fake_server_id',
|
||||
'host': 'fake@HDS1#fake_pool',
|
||||
'export_locations': [{'path': '9.9.9.9:/nfs/volume-00002'}],
|
||||
}
|
||||
|
||||
self.mock_object(share_types, 'get_share_type_extra_specs',
|
||||
mock.Mock(return_value='False'))
|
||||
|
||||
self.assertRaises(exception.ShareBackendException,
|
||||
self._driver.manage_existing,
|
||||
self.share_invalid_ip, driver_op)
|
||||
share_types.get_share_type_extra_specs.assert_called_once_with(
|
||||
self.share_invalid_ip['share_type_id'], self.const_dhss)
|
||||
|
||||
def test_unmanage(self):
|
||||
self._driver.unmanage(self.share)
|
||||
|
||||
self.assertTrue(self.mock_log.info.called)
|
||||
self.fake_private_storage.delete.assert_called_once_with(
|
||||
self.share['id'])
|
||||
|
||||
def test_update_share_stats(self):
|
||||
self.mock_object(ssh.HNASSSHBackend, 'get_stats',
|
||||
mock.Mock(return_value=[100, 30]))
|
||||
|
||||
self._driver._update_share_stats()
|
||||
self.assertEqual(False,
|
||||
self._driver._stats['driver_handles_share_servers'])
|
||||
self.assertEqual(100, self._driver._stats['total_capacity_gb'])
|
||||
self.assertEqual(30, self._driver._stats['free_capacity_gb'])
|
||||
self.assertEqual(0, self._driver._stats['reserved_percentage'])
|
||||
self.assertEqual(True, self._driver._stats['snapshot_support'])
|
||||
ssh.HNASSSHBackend.get_stats.assert_called_once_with()
|
||||
self.assertTrue(self.mock_log.info.called)
|
File diff suppressed because it is too large
Load Diff
|
@ -193,6 +193,7 @@ class GetFromPathTestCase(test.TestCase):
|
|||
self.assertEqual(['b_1'], f(input, "a/b"))
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class GenericUtilsTestCase(test.TestCase):
|
||||
def test_read_cached_file(self):
|
||||
cache_data = {"data": 1123, "mtime": 1}
|
||||
|
@ -332,6 +333,26 @@ class GenericUtilsTestCase(test.TestCase):
|
|||
self.assertFalse(utils.is_eventlet_bug105())
|
||||
fake_dns.getaddrinfo.assert_called_once_with('::1', 80)
|
||||
|
||||
@ddt.data(['ssh', '-D', 'my_name@name_of_remote_computer'],
|
||||
['echo', '"quoted arg with space"'],
|
||||
['echo', "'quoted arg with space'"])
|
||||
def test_check_ssh_injection(self, cmd):
|
||||
cmd_list = cmd
|
||||
self.assertIsNone(utils.check_ssh_injection(cmd_list))
|
||||
|
||||
@ddt.data(['ssh', 'my_name@ name_of_remote_computer'],
|
||||
['||', 'my_name@name_of_remote_computer'],
|
||||
['cmd', 'virus;ls'],
|
||||
['cmd', '"arg\"withunescaped"'],
|
||||
['cmd', 'virus;"quoted argument"'],
|
||||
['echo', '"quoted argument";rm -rf'],
|
||||
['echo', "'quoted argument `rm -rf`'"],
|
||||
['echo', '"quoted";virus;"quoted"'],
|
||||
['echo', '"quoted";virus;\'quoted\''])
|
||||
def test_check_ssh_injection_on_error0(self, cmd):
|
||||
self.assertRaises(exception.SSHInjectionThreat,
|
||||
utils.check_ssh_injection, cmd)
|
||||
|
||||
|
||||
class MonkeyPatchTestCase(test.TestCase):
|
||||
"""Unit test for utils.monkey_patch()."""
|
||||
|
|
|
@ -22,6 +22,7 @@ import errno
|
|||
import inspect
|
||||
import os
|
||||
import pyclbr
|
||||
import re
|
||||
import shutil
|
||||
import socket
|
||||
import sys
|
||||
|
@ -147,6 +148,41 @@ class SSHPool(pools.Pool):
|
|||
self.current_size -= 1
|
||||
|
||||
|
||||
def check_ssh_injection(cmd_list):
|
||||
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
|
||||
'<']
|
||||
|
||||
# Check whether injection attacks exist
|
||||
for arg in cmd_list:
|
||||
arg = arg.strip()
|
||||
|
||||
# Check for matching quotes on the ends
|
||||
is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
|
||||
if is_quoted:
|
||||
# Check for unescaped quotes within the quoted argument
|
||||
quoted = is_quoted.group('quoted')
|
||||
if quoted:
|
||||
if (re.match('[\'"]', quoted) or
|
||||
re.search('[^\\\\][\'"]', quoted)):
|
||||
raise exception.SSHInjectionThreat(command=cmd_list)
|
||||
else:
|
||||
# We only allow spaces within quoted arguments, and that
|
||||
# is the only special character allowed within quotes
|
||||
if len(arg.split()) > 1:
|
||||
raise exception.SSHInjectionThreat(command=cmd_list)
|
||||
|
||||
# Second, check whether danger character in command. So the shell
|
||||
# special operator must be a single argument.
|
||||
for c in ssh_injection_pattern:
|
||||
if c not in arg:
|
||||
continue
|
||||
|
||||
result = arg.find(c)
|
||||
if not result == -1:
|
||||
if result == 0 or not arg[result - 1] == '\\':
|
||||
raise exception.SSHInjectionThreat(command=cmd_list)
|
||||
|
||||
|
||||
class LazyPluggable(object):
|
||||
"""A pluggable backend loaded lazily based on some value."""
|
||||
|
||||
|
|
Loading…
Reference in New Issue