Merge "Add Cyborg SPDK Driver"

This commit is contained in:
Zuul 2018-02-05 04:07:44 +00:00 committed by Gerrit Code Review
commit 4e9dd99c29
21 changed files with 2634 additions and 1 deletions

1352
.idea/workspace.xml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,7 @@
"""Accelerator base exception handling. """
import collections
import json
from oslo_log import log as logging
import six
from six.moves import http_client
@ -115,3 +115,7 @@ class InvalidParameterValue(Invalid):
class MissingParameterValue(InvalidParameterValue):
_msg_fmt = "%(err)s"
class InvalidAccelerator(InvalidParameterValue):
_msg_fmt = "%(err)s"

View File

@ -0,0 +1,166 @@
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Configuration support for all drivers. from openstack/cyborg"""
from oslo_config import cfg
CONF = cfg.CONF
SHARED_CONF_GROUP = 'backend_defaults'
class DefaultGroupConfiguration(object):
"""Get config options from only DEFAULT."""
def __init__(self):
# set the local conf so that __call__'s know what to use
self.local_conf = CONF
def _ensure_config_values(self, accelerator_opts):
CONF.register_opts(accelerator_opts, group=None)
def append_config_values(self, accelerator_opts):
self._ensure_config_values(accelerator_opts)
def safe_get(self, value):
"""get default group value from CONF
:param value: value.
:return: get default group value from CONF.
"""
try:
return self.__getattr__(value)
except cfg.NoSuchOptError:
return None
def __getattr__(self, value):
"""Don't use self.local_conf to avoid reentrant call to __getattr__()
:param value: value.
:return: getattr(local_conf, value).
"""
local_conf = object.__getattribute__(self, 'local_conf')
return getattr(local_conf, value)
class BackendGroupConfiguration(object):
def __init__(self, accelerator_opts, config_group=None):
"""Initialize configuration.
This takes care of grafting the implementation's config
values into the config group and shared defaults. We will try to
pull values from the specified 'config_group', but fall back to
defaults from the SHARED_CONF_GROUP.
"""
self.config_group = config_group
# set the local conf so that __call__'s know what to use
self._ensure_config_values(accelerator_opts)
self.backend_conf = CONF._get(self.config_group)
self.shared_backend_conf = CONF._get(SHARED_CONF_GROUP)
def _safe_register(self, opt, group):
try:
CONF.register_opt(opt, group=group)
except cfg.DuplicateOptError:
pass # If it's already registered ignore it
def _ensure_config_values(self, accelerator_opts):
"""Register the options in the shared group.
When we go to get a config option we will try the backend specific
group first and fall back to the shared group. We override the default
from all the config options for the backend group so we can know if it
was set or not.
"""
for opt in accelerator_opts:
self._safe_register(opt, SHARED_CONF_GROUP)
# Assuming they aren't the same groups, graft on the options into
# the backend group and override its default value.
if self.config_group != SHARED_CONF_GROUP:
self._safe_register(opt, self.config_group)
CONF.set_default(opt.name, None, group=self.config_group)
def append_config_values(self, accelerator_opts):
self._ensure_config_values(accelerator_opts)
def set_default(self, opt_name, default):
CONF.set_default(opt_name, default, group=SHARED_CONF_GROUP)
def get(self, key, default=None):
return getattr(self, key, default)
def safe_get(self, value):
"""get config_group value from CONF
:param value: value.
:return: get config_group value from CONF.
"""
try:
return self.__getattr__(value)
except cfg.NoSuchOptError:
return None
def __getattr__(self, opt_name):
"""Don't use self.X to avoid reentrant call to __getattr__()
:param opt_name: opt_name.
:return: opt_value.
"""
backend_conf = object.__getattribute__(self, 'backend_conf')
opt_value = getattr(backend_conf, opt_name)
if opt_value is None:
shared_conf = object.__getattribute__(self, 'shared_backend_conf')
opt_value = getattr(shared_conf, opt_name)
return opt_value
class Configuration(object):
def __init__(self, accelerator_opts, config_group=None):
"""Initialize configuration.
This shim will allow for compatibility with the DEFAULT
style of backend configuration which is used by some of the users
of this configuration helper, or by the volume drivers that have
all been forced over to the config_group style.
"""
self.config_group = config_group
if config_group:
self.conf = BackendGroupConfiguration(accelerator_opts,
config_group)
else:
self.conf = DefaultGroupConfiguration()
def append_config_values(self, accelerator_opts):
self.conf.append_config_values(accelerator_opts)
def safe_get(self, value):
"""get value from CONF
:param value: value.
:return: get value from CONF.
"""
return self.conf.safe_get(value)
def __getattr__(self, value):
"""Don't use self.conf to avoid reentrant call to __getattr__()
:param value: value.
:return: getattr(conf, value).
"""
conf = object.__getattribute__(self, 'conf')
return getattr(conf, value)

View File

@ -0,0 +1,116 @@
"""
SPDK NVMFDRIVER module implementation.
"""
from cyborg.accelerator.drivers.spdk.util.pyspdk.nvmf_client import NvmfTgt
from oslo_log import log as logging
from cyborg.accelerator.common import exception
from cyborg.accelerator.drivers.spdk.util import common_fun
from cyborg.accelerator.drivers.spdk.spdk import SPDKDRIVER
from cyborg.accelerator.drivers.spdk.util.pyspdk.py_spdk import PySPDK
LOG = logging.getLogger(__name__)
class NVMFDRIVER(SPDKDRIVER):
"""NVMFDRIVER class.
nvmf_tgt server app should be able to implement this driver.
"""
SERVER = 'nvmf'
def __init__(self, *args, **kwargs):
super(NVMFDRIVER, self).__init__(*args, **kwargs)
self.servers = common_fun.discover_servers()
self.py = common_fun.get_py_client(self.SERVER)
def discover_accelerator(self):
if common_fun.check_for_setup_error(self.py, self.SERVER):
return self.get_one_accelerator()
def get_one_accelerator(self):
acc_client = NvmfTgt(self.py)
bdevs = acc_client.get_bdevs()
# Display current blockdev list
subsystems = acc_client.get_nvmf_subsystems()
# Display nvmf subsystems
accelerator_obj = {
'server': self.SERVER,
'bdevs': bdevs,
'subsystems': subsystems
}
return accelerator_obj
def install_accelerator(self, driver_id, driver_type):
pass
def uninstall_accelerator(self, driver_id, driver_type):
pass
def accelerator_list(self):
return self.get_all_accelerators()
def get_all_accelerators(self):
accelerators = []
for accelerator_i in range(len(self.servers)):
accelerator = self.servers[accelerator_i]
py_tmp = PySPDK(accelerator)
if py_tmp.is_alive():
accelerators.append(self.get_one_accelerator())
return accelerators
def update(self, driver_type, **kwargs):
pass
def attach_instance(self, instance_id):
pass
def detach_instance(self, instance_id):
pass
def delete_subsystem(self, nqn):
"""Delete a nvmf subsystem
:param nqn: Target nqn(ASCII).
:raise exception: Invaid
"""
if nqn == "":
acc_client = NvmfTgt(self.py)
acc_client.delete_nvmf_subsystem(nqn)
else:
raise exception.Invalid('Delete nvmf subsystem failed.')
def construct_subsystem(self,
nqn,
listen,
hosts,
serial_number,
namespaces
):
"""Add a nvmf subsystem
:param nqn: Target nqn(ASCII).
:param listen: comma-separated list of Listen
<trtype:transport_name traddr:address trsvcid:port_id>
pairs enclosed in quotes. Format:'trtype:transport0
traddr:traddr0 trsvcid:trsvcid0,trtype:transport1
traddr:traddr1 trsvcid:trsvcid1' etc.
Example: 'trtype:RDMA traddr:192.168.100.8 trsvcid:4420,
trtype:RDMA traddr:192.168.100.9 trsvcid:4420.'
:param hosts: Whitespace-separated list of host nqn list.
:param serial_number: Example: 'SPDK00000000000001.
:param namespaces: Whitespace-separated list of namespaces.
:raise exception: Invaid
"""
if ((namespaces != '' and listen != '') and
(hosts != '' and serial_number != '')) and nqn != '':
acc_client = NvmfTgt(self.py)
acc_client.construct_nvmf_subsystem(nqn,
listen,
hosts,
serial_number,
namespaces
)
else:
raise exception.Invalid('Construct nvmf subsystem failed.')

View File

@ -0,0 +1,85 @@
"""
Cyborg SPDK driver modules implementation.
"""
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class SPDKDRIVER(object):
"""SPDKDRIVER
This is just a virtual SPDK drivers interface.
SPDK-based app server should implement their specific drivers.
"""
@classmethod
def create(cls, server, *args, **kwargs):
for subclass in cls.__subclasses__():
if server == subclass.SERVER:
return subclass(*args, **kwargs)
raise LookupError("Could not find the driver for server %s" % server)
def __init__(self, *args, **kwargs):
super(SPDKDRIVER, self).__init__()
def discover_accelerator(self):
"""Discover a backend accelerator
:return: accelerator list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def install_accelerator(self, driver_id, driver_type):
"""install a backend accelerator
:param driver_id: driver id.
:param driver_type: driver type.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def uninstall_accelerator(self, driver_id, driver_type):
"""uninstall a backend accelerator
:param driver_id: driver id.
:param driver_type: driver type.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def accelerator_list(self):
"""Discover a backend accelerator list
:return: accelerator list.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def update(self, driver_type, **kwargs):
"""update
:param driver_type: driver type.
:param kwargs: kwargs.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def attach_instance(self, instance_id):
"""attach a backend instance
:param instance_id: instance id.
:return: instance.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')
def detach_instance(self, instance_id):
"""detach a backend instance
:param instance_id: instance id.
:return: instance.
:raise: NotImplementedError.
"""
raise NotImplementedError('Subclasses must implement this method.')

View File

@ -0,0 +1,216 @@
"""
Utils for SPDK driver.
"""
import glob
import os
import re
from oslo_config import cfg
from oslo_log import log as logging
from cyborg.accelerator import configuration
from cyborg.accelerator.common import exception
from cyborg.accelerator.drivers.spdk.util.pyspdk.py_spdk import PySPDK
from cyborg.common.i18n import _
from pyspdk.nvmf_client import NvmfTgt
from pyspdk.vhost_client import VhostTgt
LOG = logging.getLogger(__name__)
accelerator_opts = [
cfg.StrOpt('spdk_conf_file',
default='/etc/cyborg/spdk.conf',
help=_('SPDK conf file to be used for the SPDK driver')),
cfg.StrOpt('accelerator_servers',
default=['vhost', 'nvmf', 'iscsi'],
help=_('A list of accelerator servers to enable by default')),
cfg.StrOpt('spdk_dir',
default='/home/wewe/spdk',
help=_('The SPDK directory is /home/{user_name}/spdk')),
cfg.StrOpt('device_type',
default='NVMe',
help=_('Backend device type is NVMe by default')),
cfg.BoolOpt('remoteable',
default=False,
help=_('Remoteable is false by default'))
]
CONF = cfg.CONF
CONF.register_opts(accelerator_opts, group=configuration.SHARED_CONF_GROUP)
config = configuration.Configuration(accelerator_opts)
config.append_config_values(accelerator_opts)
SERVERS = config.safe_get('accelerator_servers')
SERVERS_PATTERN = re.compile("|".join(["(%s)" % s for s in SERVERS]))
SPDK_SERVER_APP_DIR = os.path.join(config.safe_get('spdk_dir'), 'app/')
def discover_servers():
"""Discover backend servers according to the CONF
:returns: server list.
"""
servers = set()
for p in glob.glob1(SPDK_SERVER_APP_DIR, "*"):
m = SERVERS_PATTERN.match(p)
if m:
servers.add(m.group())
return list(servers)
def delete_bdev(py, accelerator, name):
"""Delete a blockdev
:param py: py_client.
:param accelerator: accelerator.
:param name: Blockdev name to be deleted.
"""
acc_client = get_accelerator_client(py, accelerator)
acc_client.delete_bdev(name)
def kill_instance(py, accelerator, sig_name):
"""Send signal to instance
:param py: py_client.
:param accelerator: accelerator.
:param sig_name: signal will be sent to server.
"""
acc_client = get_accelerator_client(py, accelerator)
acc_client.kill_instance(sig_name)
def construct_aio_bdev(py, accelerator, filename, name, block_size):
"""Add a bdev with aio backend
:param py: py_client.
:param accelerator: accelerator.
:param filename: Path to device or file (ex: /dev/sda).
:param name: Block device name.
:param block_size: Block size for this bdev.
:return: name.
"""
acc_client = get_accelerator_client(py, accelerator)
acc_client.construct_aio_bdev(filename, name, block_size)
return name
def construct_error_bdev(py, accelerator, basename):
"""Add a bdev with error backend
:param py: py_client.
:param accelerator: accelerator.
:param basename: Path to device or file (ex: /dev/sda).
"""
acc_client = get_accelerator_client(py, accelerator)
acc_client.construct_error_bdev(basename)
def construct_nvme_bdev(py,
accelerator,
name,
trtype,
traddr,
adrfam,
trsvcid,
subnqn
):
"""Add a bdev with nvme backend
:param py: py_client.
:param accelerator: accelerator.
:param name: Name of the bdev.
:param trtype: NVMe-oF target trtype: e.g., rdma, pcie.
:param traddr: NVMe-oF target address: e.g., an ip address
or BDF.
:param adrfam: NVMe-oF target adrfam: e.g., ipv4, ipv6, ib,
fc, intra_host.
:param trsvcid: NVMe-oF target trsvcid: e.g., a port number.
:param subnqn: NVMe-oF target subnqn.
:return: name.
"""
acc_client = get_accelerator_client(py, accelerator)
acc_client.construct_nvme_bdev(name,
trtype,
traddr,
adrfam,
trsvcid,
subnqn
)
return name
def construct_null_bdev(py,
accelerator,
name,
total_size,
block_size
):
"""Add a bdev with null backend
:param py: py_client.
:param accelerator: accelerator.
:param name: Block device name.
:param total_size: Size of null bdev in MB (int > 0).
:param block_size: Block size for this bdev.
:return: name.
"""
acc_client = get_accelerator_client(py, accelerator)
acc_client.construct_null_bdev(name, total_size, block_size)
return name
def get_py_client(server):
"""Get the py_client instance
:param server: server.
:return: Boolean.
:raise: InvalidAccelerator.
"""
if server in SERVERS:
py = PySPDK(server)
return py
else:
msg = (_("Could not find %s accelerator") % server)
raise exception.InvalidAccelerator(msg)
def check_for_setup_error(py, server):
"""Check server's status
:param py: py_client.
:param server: server.
:return: Boolean.
:raise: AcceleratorException.
"""
if py.is_alive():
return True
else:
msg = (_("%s accelerator is down") % server)
raise exception.AcceleratorException(msg)
def get_accelerator_client(py, accelerator):
"""Get the specific client that communicates with server
:param py: py_client.
:param accelerator: accelerator.
:return: acc_client.
:raise: InvalidAccelerator.
"""
acc_client = None
if accelerator == 'vhost':
acc_client = VhostTgt(py)
return acc_client
elif accelerator == 'nvmf':
acc_client = NvmfTgt(py)
return acc_client
else:
exc_msg = (_("accelerator_client %(acc_client) is missing")
% acc_client)
raise exception.InvalidAccelerator(exc_msg)

View File

@ -0,0 +1,119 @@
import json
class NvmfTgt(object):
def __init__(self, py):
super(NvmfTgt, self).__init__()
self.py = py
def get_rpc_methods(self):
rpc_methods = self._get_json_objs(
'get_rpc_methods', '10.0.2.15')
return rpc_methods
def get_bdevs(self):
block_devices = self._get_json_objs(
'get_bdevs', '10.0.2.15')
return block_devices
def delete_bdev(self, name):
sub_args = [name]
res = self.py.exec_rpc('delete_bdev', '10.0.2.15', sub_args=sub_args)
print res
def kill_instance(self, sig_name):
sub_args = [sig_name]
res = self.py.exec_rpc('kill_instance', '10.0.2.15', sub_args=sub_args)
print res
def construct_aio_bdev(self, filename, name, block_size):
sub_args = [filename, name, str(block_size)]
res = self.py.exec_rpc(
'construct_aio_bdev',
'10.0.2.15',
sub_args=sub_args)
print res
def construct_error_bdev(self, basename):
sub_args = [basename]
res = self.py.exec_rpc(
'construct_error_bdev',
'10.0.2.15',
sub_args=sub_args)
print res
def construct_nvme_bdev(
self,
name,
trtype,
traddr,
adrfam=None,
trsvcid=None,
subnqn=None):
sub_args = ["-b", "-t", "-a"]
sub_args.insert(1, name)
sub_args.insert(2, trtype)
sub_args.insert(3, traddr)
if adrfam is not None:
sub_args.append("-f")
sub_args.append(adrfam)
if trsvcid is not None:
sub_args.append("-s")
sub_args.append(trsvcid)
if subnqn is not None:
sub_args.append("-n")
sub_args.append(subnqn)
res = self.py.exec_rpc(
'construct_nvme_bdev',
'10.0.2.15',
sub_args=sub_args)
return res
def construct_null_bdev(self, name, total_size, block_size):
sub_args = [name, str(total_size), str(block_size)]
res = self.py.exec_rpc(
'construct_null_bdev',
'10.0.2.15',
sub_args=sub_args)
return res
def construct_malloc_bdev(self, total_size, block_size):
sub_args = [str(total_size), str(block_size)]
res = self.py.exec_rpc(
'construct_malloc_bdev',
'10.0.2.15',
sub_args=sub_args)
print res
def delete_nvmf_subsystem(self, nqn):
sub_args = [nqn]
res = self.py.exec_rpc(
'delete_nvmf_subsystem',
'10.0.2.15',
sub_args=sub_args)
print res
def construct_nvmf_subsystem(
self,
nqn,
listen,
hosts,
serial_number,
namespaces):
sub_args = [nqn, listen, hosts, serial_number, namespaces]
res = self.py.exec_rpc(
'construct_nvmf_subsystem',
'10.0.2.15',
sub_args=sub_args)
print res
def get_nvmf_subsystems(self):
subsystems = self._get_json_objs(
'get_nvmf_subsystems', '10.0.2.15')
return subsystems
def _get_json_objs(self, method, server_ip):
res = self.py.exec_rpc(method, server_ip)
json_obj = json.loads(res)
return json_obj

View File

@ -0,0 +1,82 @@
import psutil
import re
import os
import subprocess
class PySPDK(object):
def __init__(self, pname):
super(PySPDK, self).__init__()
self.pid = None
self.pname = pname
def start_server(self, spdk_dir, server_name):
if not self.is_alive():
self.init_hugepages(spdk_dir)
server_dir = os.path.join(spdk_dir, 'app/')
file_dir = self._search_file(server_dir, server_name)
print file_dir
os.chdir(file_dir)
p = subprocess.Popen(
'sudo ./%s' % server_name,
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return out
def init_hugepages(self, spdk_dir):
huge_dir = os.path.join(spdk_dir, 'scripts/')
file_dir = self._search_file(huge_dir, 'setup.sh')
print file_dir
os.chdir(file_dir)
p = subprocess.Popen(
'sudo ./setup.sh',
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return out
@staticmethod
def _search_file(spdk_dir, file_name):
for dirpath, dirnames, filenames in os.walk(spdk_dir):
for filename in filenames:
if filename == file_name:
return dirpath
def _get_process_id(self):
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'cmdline'])
if re.search(self.pname, str(pinfo.get('cmdline'))):
self.pid = pinfo.get('pid')
return self.pid
except psutil.NoSuchProcess:
print "NoSuchProcess:%s" % self.pname
print "NoSuchProcess:%s" % self.pname
return self.pid
def is_alive(self):
self.pid = self._get_process_id()
if self.pid:
p = psutil.Process(self.pid)
if p.is_running():
return True
return False
@staticmethod
def exec_rpc(method, server='127.0.0.1', port=5260, sub_args=None):
exec_cmd = ["./rpc.py", "-s", "-p"]
exec_cmd.insert(2, server)
exec_cmd.insert(4, str(port))
exec_cmd.insert(5, method)
if sub_args is None:
sub_args = []
exec_cmd.extend(sub_args)
p = subprocess.Popen(
exec_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = p.communicate()
return out

View File

@ -0,0 +1,121 @@
import json
class VhostTgt(object):
def __init__(self, py):
super(VhostTgt, self).__init__()
self.py = py
def get_rpc_methods(self):
rpc_methods = self._get_json_objs('get_rpc_methods', '127.0.0.1')
return rpc_methods
def get_scsi_devices(self):
scsi_devices = self._get_json_objs(
'get_scsi_devices', '127.0.0.1')
return scsi_devices
def get_luns(self):
luns = self._get_json_objs('get_luns', '127.0.0.1')
return luns
def get_interfaces(self):
interfaces = self._get_json_objs(
'get_interfaces', '127.0.0.1')
return interfaces
def add_ip_address(self, ifc_index, ip_addr):
sub_args = [ifc_index, ip_addr]
res = self.py.exec_rpc(
'add_ip_address',
'127.0.0.1',
sub_args=sub_args)
return res
def delete_ip_address(self, ifc_index, ip_addr):
sub_args = [ifc_index, ip_addr]
res = self.py.exec_rpc(
'delete_ip_address',
'127.0.0.1',
sub_args=sub_args)
return res
def get_bdevs(self):
block_devices = self._get_json_objs(
'get_bdevs', '127.0.0.1')
return block_devices
def delete_bdev(self, name):
sub_args = [name]
res = self.py.exec_rpc('delete_bdev', '127.0.0.1', sub_args=sub_args)
print res
def kill_instance(self, sig_name):
sub_args = [sig_name]
res = self.py.exec_rpc('kill_instance', '127.0.0.1', sub_args=sub_args)
print res
def construct_aio_bdev(self, filename, name, block_size):
sub_args = [filename, name, str(block_size)]
res = self.py.exec_rpc(
'construct_aio_bdev',
'127.0.0.1',
sub_args=sub_args)
print res
def construct_error_bdev(self, basename):
sub_args = [basename]
res = self.py.exec_rpc(
'construct_error_bdev',
'127.0.0.1',
sub_args=sub_args)
print res
def construct_nvme_bdev(
self,
name,
trtype,
traddr,
adrfam=None,
trsvcid=None,
subnqn=None):
sub_args = ["-b", "-t", "-a"]
sub_args.insert(1, name)
sub_args.insert(2, trtype)
sub_args.insert(3, traddr)
if adrfam is not None:
sub_args.append("-f")
sub_args.append(adrfam)
if trsvcid is not None:
sub_args.append("-s")
sub_args.append(trsvcid)
if subnqn is not None:
sub_args.append("-n")
sub_args.append(subnqn)
res = self.py.exec_rpc(
'construct_nvme_bdev',
'127.0.0.1',
sub_args=sub_args)
return res
def construct_null_bdev(self, name, total_size, block_size):
sub_args = [name, str(total_size), str(block_size)]
res = self.py.exec_rpc(
'construct_null_bdev',
'127.0.0.1',
sub_args=sub_args)
return res
def construct_malloc_bdev(self, total_size, block_size):
sub_args = [str(total_size), str(block_size)]
res = self.py.exec_rpc(
'construct_malloc_bdev',
'10.0.2.15',
sub_args=sub_args)
print res
def _get_json_objs(self, method, server_ip):
res = self.py.exec_rpc(method, server_ip)
json_obj = json.loads(res)
return json_obj

View File

@ -0,0 +1,95 @@
"""
SPDK VHOSTDRIVER module implementation.
"""
from cyborg.accelerator.drivers.spdk.util.pyspdk.vhost_client import VhostTgt
from oslo_log import log as logging
from cyborg.accelerator.drivers.spdk.util import common_fun
from cyborg.accelerator.drivers.spdk.spdk import SPDKDRIVER
from cyborg.accelerator.drivers.spdk.util.pyspdk.py_spdk import PySPDK
LOG = logging.getLogger(__name__)
class VHOSTDRIVER(SPDKDRIVER):
"""VHOSTDRIVER class.
vhost server app should be able to implement this driver.
"""
SERVER = 'vhost'
def __init__(self, *args, **kwargs):
super(VHOSTDRIVER, self).__init__(*args, **kwargs)
self.servers = common_fun.discover_servers()
self.py = common_fun.get_py_client(self.SERVER)
def discover_accelerator(self):
if common_fun.check_for_setup_error(self.py, self.SERVER):
return self.get_one_accelerator()
def get_one_accelerator(self):
acc_client = VhostTgt(self.py)
bdevs = acc_client.get_bdevs()
# Display current blockdev list
scsi_devices = acc_client.get_scsi_devices()
# Display SCSI devices
luns = acc_client.get_luns()
# Display active LUNs
interfaces = acc_client.get_interfaces()
# Display current interface list
accelerator_obj = {
'server': self.SERVER,
'bdevs': bdevs,
'scsi_devices': scsi_devices,
'luns': luns,
'interfaces': interfaces
}
return accelerator_obj
def install_accelerator(self, driver_id, driver_type):
pass
def uninstall_accelerator(self, driver_id, driver_type):
pass
def accelerator_list(self):
return self.get_all_accelerators()
def get_all_accelerators(self):
accelerators = []
for accelerator_i in range(len(self.servers)):
accelerator = self.servers[accelerator_i]
py_tmp = PySPDK(accelerator)
if py_tmp.is_alive():
accelerators.append(self.get_one_accelerator())
return accelerators
def update(self, driver_type, **kwargs):
pass
def attach_instance(self, instance_id):
pass
def detach_instance(self, instance_id):
pass
def add_ip_address(self, ifc_index, ip_addr):
"""Add IP address
:param ifc_index: ifc index of the nic device.
:param ip_addr: ip address will be added.
:return: ip_address
"""
acc_client = VhostTgt(self.py)
return acc_client.add_ip_address(ifc_index, ip_addr)
def delete_ip_address(self, ifc_index, ip_addr):
"""Delete IP address
:param ifc_index: ifc index of the nic device.
:param ip_addr: ip address will be added.
:return: ip_address
"""
acc_client = VhostTgt(self.py)
return acc_client.delete_ip_address(ifc_index, ip_addr)

View File

@ -0,0 +1,131 @@
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cyborg.tests import base
import mock
from cyborg.accelerator.drivers.spdk.nvmf.nvmf import NVMFDRIVER
from cyborg.accelerator.drivers.spdk.util import common_fun
from cyborg.accelerator.drivers.spdk.util.pyspdk.nvmf_client import NvmfTgt
class TestNVMFDRIVER(base.TestCase):
def setUp(self,):
super(TestNVMFDRIVER, self).setUp()
self.nvmf_driver = NVMFDRIVER()
def tearDown(self):
super(TestNVMFDRIVER, self).tearDown()
self.vhost_driver = None
@mock.patch.object(NVMFDRIVER, 'get_one_accelerator')
def test_discover_accelerator(self, mock_get_one_accelerator):
expect_accelerator = {
'server': 'nvmf',
'bdevs': [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}],
'subsystems': [{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
}
alive = mock.Mock(return_value=False)
self.nvmf_driver.py.is_alive = alive
check_error = mock.Mock(return_value=False)
common_fun.check_for_setup_error = check_error
self.assertFalse(
mock_get_one_accelerator.called,
"Failed to discover_accelerator if py not alive."
)
alive = mock.Mock(return_value=True)
self.nvmf_driver.py.is_alive = alive
check_error = mock.Mock(return_value=True)
common_fun.check_for_setup_error = check_error
acce_client = NvmfTgt(self.nvmf_driver.py)
bdevs_fake = [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}]
bdev_list = mock.Mock(return_value=bdevs_fake)
acce_client.get_bdevs = bdev_list
subsystems_fake = [{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
subsystem_list = mock.Mock(return_value=subsystems_fake)
acce_client.get_nvmf_subsystems = subsystem_list
accelerator_fake = {
'server': self.nvmf_driver.SERVER,
'bdevs': acce_client.get_bdevs(),
'subsystems': acce_client.get_nvmf_subsystems()
}
success_send = mock.Mock(return_value=accelerator_fake)
self.nvmf_driver.get_one_accelerator = success_send
accelerator = self.nvmf_driver.discover_accelerator()
self.assertEqual(accelerator, expect_accelerator)
def test_accelerator_list(self):
expect_accelerators = [{
'server': 'nvmf',
'bdevs': [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}],
'subsystems':
[{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
},
{
'server': 'nvnf_tgt',
'bdevs': [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}],
'subsystems':
[{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
}
]
success_send = mock.Mock(return_value=expect_accelerators)
self.nvmf_driver.get_all_accelerators = success_send
self.assertEqual(self.nvmf_driver.accelerator_list(),
expect_accelerators)
def test_install_accelerator(self):
pass
def test_uninstall_accelerator(self):
pass
def test_update(self):
pass
def test_attach_instance(self):
pass
def test_detach_instance(self):
pass
def test_delete_subsystem(self):
pass
def test_construct_subsystem(self):
pass

View File

@ -0,0 +1,144 @@
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cyborg.tests import base
import mock
from cyborg.accelerator.drivers.spdk.vhost.vhost import VHOSTDRIVER
from cyborg.accelerator.drivers.spdk.util import common_fun
from cyborg.accelerator.drivers.spdk.util.pyspdk.vhost_client import VhostTgt
class TestVHOSTDRIVER(base.TestCase):
def setUp(self):
super(TestVHOSTDRIVER, self).setUp()
self.vhost_driver = VHOSTDRIVER()
def tearDown(self):
super(TestVHOSTDRIVER, self).tearDown()
self.vhost_driver = None
@mock.patch.object(VHOSTDRIVER, 'get_one_accelerator')
def test_discover_accelerator(self, mock_get_one_accelerator):
expect_accelerator = {
'server': 'vhost',
'bdevs': [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}],
'scsi_devices': [],
'luns': [{"claimed": True,
"name": "Malloc0"}],
'interfaces': [{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
}
alive = mock.Mock(return_value=True)
self.vhost_driver.py.is_alive = alive
check_error = mock.Mock(return_value=True)
common_fun.check_for_setup_error = check_error
self.assertFalse(
mock_get_one_accelerator.called,
"Failed to discover_accelerator if py not alive."
)
acce_client = VhostTgt(self.vhost_driver.py)
bdevs_fake = [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}]
bdev_list = mock.Mock(return_value=bdevs_fake)
acce_client.get_bdevs = bdev_list
scsi_devices_fake = []
scsi_device_list = mock.Mock(return_value=scsi_devices_fake)
acce_client.get_scsi_devices = scsi_device_list
luns_fake = [{"claimed": True,
"name": "Malloc0"}]
lun_list = mock.Mock(return_value=luns_fake)
acce_client.get_luns = lun_list
interfaces_fake = \
[{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
interface_list = mock.Mock(return_value=interfaces_fake)
acce_client.get_interfaces = interface_list
accelerator_fake = {
'server': self.vhost_driver.SERVER,
'bdevs': acce_client.get_bdevs(),
'scsi_devices': acce_client.get_scsi_devices(),
'luns': acce_client.get_luns(),
'interfaces': acce_client.get_interfaces()
}
success_send = mock.Mock(return_value=accelerator_fake)
self.vhost_driver.get_one_accelerator = success_send
accelerator = self.vhost_driver.discover_accelerator()
self.assertEqual(accelerator, expect_accelerator)
def test_accelerator_list(self):
expect_accelerators = [{
'server': 'vhost',
'bdevs': [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}],
'scsi_devices': [],
'luns': [{"claimed": True,
"name": "Malloc0"}],
'interfaces': [{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
},
{
'server': 'vhost_tgt',
'bdevs': [{"num_blocks": 131072,
"name": "nvme1",
"block_size": 512
}],
'scsi_devices': [],
'luns': [{"claimed": True,
"name": "Malloc0"}],
'interfaces': [{"core": 0,
"nqn": "nqn.2018-01.org.nvmexpress.discovery",
"hosts": []
}]
}
]
success_send = mock.Mock(return_value=expect_accelerators)
self.vhost_driver.get_all_accelerators = success_send
self.assertEqual(self.vhost_driver.accelerator_list(),
expect_accelerators)
def test_install_accelerator(self):
pass
def test_uninstall_accelerator(self):
pass
def test_update(self):
pass
def test_attach_instance(self):
pass
def test_detach_instance(self):
pass
def test_delete_ip_address(self):
pass
def test_add_ip_address(self):
pass

View File

@ -23,3 +23,5 @@ alembic>=0.8.10 # MIT
stevedore>=1.20.0 # Apache-2.0
keystonemiddleware>=4.17.0 # Apache-2.0
jsonpatch!=1.20,>=1.16 # BSD
psutil>=3.2.2 # BSD
mock>=2.0.0 # BSD