Remove Virtual Storage Array (VSA) code

* Remove core vsa code (api, manager, drivers)
* Remove nova-vsa executable
* Remove OpenStack Compute API v2 vsa extension
* Remove vsa scheduler
* Remove vsa db api methods
* Remove Zadara volume driver
* Do not migrate out any existing data
* Fixes bug 954490

Change-Id: Idab3d60796d5edbc23ef9f0887fcc1af558c6215
This commit is contained in:
Brian Waldon 2012-03-14 17:24:07 -07:00 committed by Chris Behrens
parent 30b8e35e80
commit a3bab242db
28 changed files with 2 additions and 4656 deletions

View File

@ -70,7 +70,7 @@ if __name__ == '__main__':
LOG.exception(_('Failed to load %s') % mod.__name__)
for binary in ['nova-compute', 'nova-volume',
'nova-network', 'nova-scheduler', 'nova-vsa', 'nova-cert']:
'nova-network', 'nova-scheduler', 'nova-cert']:
try:
servers.append(service.Service.create(binary=binary))
except (Exception, SystemExit):

View File

@ -87,7 +87,6 @@ from nova import quota
from nova import rpc
from nova import utils
from nova import version
from nova import vsa
from nova.api.ec2 import ec2utils
from nova.auth import manager
from nova.compute import instance_types
@ -1116,478 +1115,6 @@ class VersionCommands(object):
self.list()
class VsaCommands(object):
"""Methods for dealing with VSAs"""
def __init__(self, *args, **kwargs):
self.manager = manager.AuthManager()
self.vsa_api = vsa.API()
self.context = context.get_admin_context()
self._format_str_vsa = "%(id)-5s %(vsa_id)-15s %(name)-25s "\
"%(type)-10s %(vcs)-6s %(drives)-9s %(stat)-10s "\
"%(az)-10s %(time)-10s"
self._format_str_volume = "\t%(id)-4s %(name)-15s %(size)-5s "\
"%(stat)-10s %(att)-20s %(time)s"
self._format_str_drive = "\t%(id)-4s %(name)-15s %(size)-5s "\
"%(stat)-10s %(host)-20s %(type)-4s %(tname)-10s %(time)s"
self._format_str_instance = "\t%(id)-4s %(name)-10s %(dname)-20s "\
"%(image)-12s %(type)-10s %(fl_ip)-15s %(fx_ip)-15s "\
"%(stat)-10s %(host)-15s %(time)s"
def _print_vsa_header(self):
print self._format_str_vsa %\
dict(id=_('ID'),
vsa_id=_('vsa_id'),
name=_('displayName'),
type=_('vc_type'),
vcs=_('vc_cnt'),
drives=_('drive_cnt'),
stat=_('status'),
az=_('AZ'),
time=_('createTime'))
def _print_vsa(self, vsa):
print self._format_str_vsa %\
dict(id=vsa['id'],
vsa_id=vsa['name'],
name=vsa['display_name'],
type=vsa['vsa_instance_type'].get('name', None),
vcs=vsa['vc_count'],
drives=vsa['vol_count'],
stat=vsa['status'],
az=vsa['availability_zone'],
time=str(vsa['created_at']))
def _print_volume_header(self):
print _(' === Volumes ===')
print self._format_str_volume %\
dict(id=_('ID'),
name=_('name'),
size=_('size'),
stat=_('status'),
att=_('attachment'),
time=_('createTime'))
def _print_volume(self, vol):
print self._format_str_volume %\
dict(id=vol['id'],
name=vol['display_name'] or vol['name'],
size=vol['size'],
stat=vol['status'],
att=vol['attach_status'],
time=str(vol['created_at']))
def _print_drive_header(self):
print _(' === Drives ===')
print self._format_str_drive %\
dict(id=_('ID'),
name=_('name'),
size=_('size'),
stat=_('status'),
host=_('host'),
type=_('type'),
tname=_('typeName'),
time=_('createTime'))
def _print_drive(self, drive):
if drive['volume_type_id'] is not None and drive.get('volume_type'):
drive_type_name = drive['volume_type'].get('name')
else:
drive_type_name = ''
print self._format_str_drive %\
dict(id=drive['id'],
name=drive['display_name'],
size=drive['size'],
stat=drive['status'],
host=drive['host'],
type=drive['volume_type_id'],
tname=drive_type_name,
time=str(drive['created_at']))
def _print_instance_header(self):
print _(' === Instances ===')
print self._format_str_instance %\
dict(id=_('ID'),
name=_('name'),
dname=_('disp_name'),
image=_('image'),
type=_('type'),
fl_ip=_('floating_IP'),
fx_ip=_('fixed_IP'),
stat=_('status'),
host=_('host'),
time=_('createTime'))
def _print_instance(self, vc):
fixed_addr = None
floating_addr = None
if vc['fixed_ips']:
fixed = vc['fixed_ips'][0]
fixed_addr = fixed['address']
if fixed['floating_ips']:
floating_addr = fixed['floating_ips'][0]['address']
floating_addr = floating_addr or fixed_addr
print self._format_str_instance %\
dict(id=vc['id'],
name=ec2utils.id_to_ec2_id(vc['id']),
dname=vc['display_name'],
image=('ami-%08x' % int(vc['image_ref'])),
type=vc['instance_type']['name'],
fl_ip=floating_addr,
fx_ip=fixed_addr,
stat=vc['vm_state'],
host=vc['host'],
time=str(vc['created_at']))
def _list(self, context, vsas, print_drives=False,
print_volumes=False, print_instances=False):
if vsas:
self._print_vsa_header()
for vsa in vsas:
self._print_vsa(vsa)
vsa_id = vsa.get('id')
if print_instances:
instances = self.vsa_api.get_all_vsa_instances(context, vsa_id)
if instances:
print
self._print_instance_header()
for instance in instances:
self._print_instance(instance)
print
if print_drives:
drives = self.vsa_api.get_all_vsa_drives(context, vsa_id)
if drives:
self._print_drive_header()
for drive in drives:
self._print_drive(drive)
print
if print_volumes:
volumes = self.vsa_api.get_all_vsa_volumes(context, vsa_id)
if volumes:
self._print_volume_header()
for volume in volumes:
self._print_volume(volume)
print
@args('--storage', dest='storage',
metavar="[{'drive_name': 'type', 'num_drives': N, 'size': M},..]",
help='Initial storage allocation for VSA')
@args('--name', dest='name', metavar="<name>", help='VSA name')
@args('--description', dest='description', metavar="<description>",
help='VSA description')
@args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs')
@args('--instance_type', dest='instance_type_name', metavar="<name>",
help='Instance type name')
@args('--image', dest='image_name', metavar="<name>", help='Image name')
@args('--shared', dest='shared', action="store_true", default=False,
help='Use shared drives')
@args('--az', dest='az', metavar="<zone:host>", help='Availability zone')
@args('--user', dest="user_id", metavar='<User name>',
help='User name')
@args('--project', dest="project_id", metavar='<Project name>',
help='Project name')
def create(self, storage='[]', name=None, description=None, vc_count=1,
instance_type_name=None, image_name=None, shared=None,
az=None, user_id=None, project_id=None):
"""Create a VSA."""
if project_id is None:
try:
project_id = os.getenv("EC2_ACCESS_KEY").split(':')[1]
except Exception as exc:
print _("Failed to retrieve project id: %(exc)s") % exc
raise
if user_id is None:
try:
project = self.manager.get_project(project_id)
user_id = project.project_manager_id
except Exception as exc:
print _("Failed to retrieve user info: %(exc)s") % exc
raise
is_admin = self.manager.is_admin(user_id)
ctxt = context.RequestContext(user_id, project_id, is_admin=is_admin)
if not is_admin and \
not self.manager.is_project_member(user_id, project_id):
msg = _("%(user_id)s must be an admin or a "
"member of %(project_id)s")
logging.warn(msg % locals())
raise ValueError(msg % locals())
# Sanity check for storage string
storage_list = []
if storage is not None:
try:
storage_list = ast.literal_eval(storage)
except Exception:
print _("Invalid string format %s") % storage
raise
for node in storage_list:
if ('drive_name' not in node) or ('num_drives' not in node):
print (_("Invalid string format for element %s. " \
"Expecting keys 'drive_name' & 'num_drives'"),
str(node))
raise KeyError
if instance_type_name == '':
instance_type_name = None
instance_type = instance_types.get_instance_type_by_name(
instance_type_name)
if image_name == '':
image_name = None
if shared in [None, False, "--full_drives"]:
shared = False
elif shared in [True, "--shared"]:
shared = True
else:
raise ValueError(_('Shared parameter should be set either to "\
"--shared or --full_drives'))
values = {
'display_name': name,
'display_description': description,
'vc_count': int(vc_count),
'instance_type': instance_type,
'image_name': image_name,
'availability_zone': az,
'storage': storage_list,
'shared': shared,
}
result = self.vsa_api.create(ctxt, **values)
self._list(ctxt, [result])
@args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID')
@args('--name', dest='name', metavar="<name>", help='VSA name')
@args('--description', dest='description', metavar="<description>",
help='VSA description')
@args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs')
def update(self, vsa_id, name=None, description=None, vc_count=None):
"""Updates name/description of vsa and number of VCs."""
values = {}
if name is not None:
values['display_name'] = name
if description is not None:
values['display_description'] = description
if vc_count is not None:
values['vc_count'] = int(vc_count)
vsa_id = ec2utils.ec2_id_to_id(vsa_id)
result = self.vsa_api.update(self.context, vsa_id=vsa_id, **values)
self._list(self.context, [result])
@args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID')
def delete(self, vsa_id):
"""Delete a VSA."""
vsa_id = ec2utils.ec2_id_to_id(vsa_id)
self.vsa_api.delete(self.context, vsa_id)
@args('--id', dest='vsa_id', metavar="<vsa_id>",
help='VSA ID (optional)')
@args('--all', dest='all', action="store_true", default=False,
help='Show all available details')
@args('--drives', dest='drives', action="store_true",
help='Include drive-level details')
@args('--volumes', dest='volumes', action="store_true",
help='Include volume-level details')
@args('--instances', dest='instances', action="store_true",
help='Include instance-level details')
def list(self, vsa_id=None, all=False,
drives=False, volumes=False, instances=False):
"""Describe all available VSAs (or particular one)."""
vsas = []
if vsa_id is not None:
internal_id = ec2utils.ec2_id_to_id(vsa_id)
vsa = self.vsa_api.get(self.context, internal_id)
vsas.append(vsa)
else:
vsas = self.vsa_api.get_all(self.context)
if all:
drives = volumes = instances = True
self._list(self.context, vsas, drives, volumes, instances)
def update_capabilities(self):
"""Forces updates capabilities on all nova-volume nodes."""
rpc.fanout_cast(context.get_admin_context(),
FLAGS.volume_topic,
{"method": "notification",
"args": {"event": "startup"}})
class VsaDriveTypeCommands(object):
"""Methods for dealing with VSA drive types"""
def __init__(self, *args, **kwargs):
super(VsaDriveTypeCommands, self).__init__(*args, **kwargs)
self.context = context.get_admin_context()
self._drive_type_template = '%s_%sGB_%sRPM'
def _list(self, drives):
format_str = "%-5s %-30s %-10s %-10s %-10s %-20s %-10s %s"
if len(drives):
print format_str %\
(_('ID'),
_('name'),
_('type'),
_('size_gb'),
_('rpm'),
_('capabilities'),
_('visible'),
_('createTime'))
for name, vol_type in drives.iteritems():
drive = vol_type.get('extra_specs')
print format_str %\
(str(vol_type['id']),
drive['drive_name'],
drive['drive_type'],
drive['drive_size'],
drive['drive_rpm'],
drive.get('capabilities', ''),
str(drive.get('visible', '')),
str(vol_type['created_at']))
@args('--type', dest='type', metavar="<type>",
help='Drive type (SATA, SAS, SSD, etc.)')
@args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB')
@args('--rpm', dest='rpm', metavar="<rpm>", help='RPM')
@args('--capabilities', dest='capabilities', default=None,
metavar="<string>", help='Different capabilities')
@args('--hide', dest='hide', action="store_true", default=False,
help='Show or hide drive')
@args('--name', dest='name', metavar="<name>", help='Drive name')
def create(self, type, size_gb, rpm, capabilities=None,
hide=False, name=None):
"""Create drive type."""
hide = True if hide in [True, "True", "--hide", "hide"] else False
if name is None:
name = self._drive_type_template % (type, size_gb, rpm)
extra_specs = {'type': 'vsa_drive',
'drive_name': name,
'drive_type': type,
'drive_size': size_gb,
'drive_rpm': rpm,
'visible': True,
}
if hide:
extra_specs['visible'] = False
if capabilities is not None and capabilities != '':
extra_specs['capabilities'] = capabilities
try:
volume_types.create(self.context, name, extra_specs)
result = volume_types.get_volume_type_by_name(self.context, name)
self._list({name: result})
except exception.VolumeTypeExists:
print
print "Volume Type Exists"
print "Please ensure volume_type name is unique."
print "Currently defined volume types:"
print
self.list()
@args('--name', dest='name', metavar="<name>", help='Drive name')
def delete(self, name):
"""Marks volume types as deleted"""
try:
volume_types.destroy(self.context, name)
except exception.InvalidVolumeType:
print "Valid volume type name is required"
sys.exit(1)
except exception.DBError, e:
print "DB Error: %s" % e
sys.exit(2)
except Exception:
sys.exit(3)
else:
print "%s deleted" % name
@args('--all', dest='all', action="store_true", default=False,
help='Show all drives (including invisible)')
@args('--name', dest='name', metavar="<name>",
help='Show only specified drive')
def list(self, all=False, name=None):
"""Describe all available VSA drive types (or particular one)."""
all = False if all in ["--all", False, "False"] else True
search_opts = {'extra_specs': {'type': 'vsa_drive'}}
if name is not None:
search_opts['extra_specs']['name'] = name
if not all:
search_opts['extra_specs']['visible'] = '1'
drives = volume_types.get_all_types(self.context,
search_opts=search_opts)
self._list(drives)
@args('--name', dest='name', metavar="<name>", help='Drive name')
@args('--type', dest='type', metavar="<type>",
help='Drive type (SATA, SAS, SSD, etc.)')
@args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB')
@args('--rpm', dest='rpm', metavar="<rpm>", help='RPM')
@args('--capabilities', dest='capabilities', default=None,
metavar="<string>", help='Different capabilities')
@args('--visible', dest='visible',
metavar="<show|hide>", help='Show or hide drive')
def update(self, name, type=None, size_gb=None, rpm=None,
capabilities=None, visible=None):
"""Update drive type."""
volume_type = volume_types.get_volume_type_by_name(self.context, name)
extra_specs = {'type': 'vsa_drive'}
if type:
extra_specs['drive_type'] = type
if size_gb:
extra_specs['drive_size'] = size_gb
if rpm:
extra_specs['drive_rpm'] = rpm
if capabilities:
extra_specs['capabilities'] = capabilities
if visible is not None:
if visible in ["show", True, "True"]:
extra_specs['visible'] = True
elif visible in ["hide", False, "False"]:
extra_specs['visible'] = False
else:
raise ValueError(_('visible parameter should be set to '\
'show or hide'))
db.api.volume_type_extra_specs_update_or_create(self.context,
volume_type['id'],
extra_specs)
result = volume_types.get_volume_type_by_name(self.context, name)
self._list({name: result})
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state"""
@ -2067,7 +1594,6 @@ CATEGORIES = [
('agent', AgentBuildCommands),
('config', ConfigCommands),
('db', DbCommands),
('drive', VsaDriveTypeCommands),
('export', ExportCommands),
('fixed', FixedIpCommands),
('flavor', InstanceTypeCommands),
@ -2085,7 +1611,6 @@ CATEGORIES = [
('vm', VmCommands),
('volume', VolumeCommands),
('vpn', VpnCommands),
('vsa', VsaCommands),
('logs', GetLogCommands)]

View File

@ -1,49 +0,0 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Nova VSA."""
import eventlet
eventlet.monkey_patch()
import os
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
utils.monkey_patch()
server = service.Service.create(binary='nova-vsa')
service.serve(server)
service.wait()

View File

@ -57,8 +57,6 @@
# default_project="openstack"
###### (StrOpt) availability zone to use when user doesnt specify one
# default_schedule_zone=<None>
###### (StrOpt) default instance type for VSA instances
# default_vsa_instance_type="m1.small"
###### (StrOpt) the internal ip of the ec2 api server
# ec2_dmz_host="$my_ip"
###### (StrOpt) the ip of the ec2 api server
@ -121,8 +119,6 @@
# logfile=<None>
###### (StrOpt) Default file mode used when creating log files
# logfile_mode="0644"
###### (IntOpt) maxinum VCs in a VSA
# max_vcs_in_vsa=32
###### (ListOpt) Memcached servers or None for in process cache.
# memcached_servers=<None>
###### (StrOpt) the ip for the metadata api server
@ -241,8 +237,6 @@
# use_stderr=true
###### (BoolOpt) Use syslog for logging.
# use-syslog=false
###### (StrOpt) the VC image ID (for a VC image that exists in Glance)
# vc_image_name="vc_image"
###### (BoolOpt) Print more verbose output
# verbose=false
###### (StrOpt) The full class name of the volume API class to use
@ -255,12 +249,6 @@
# vpn_image_id="0"
###### (StrOpt) Suffix to add to project name for vpn key and secgroups
# vpn_key_suffix="-vpn"
###### (StrOpt) full class name for the Manager for VSA
# vsa_manager="nova.vsa.manager.VsaManager"
###### (IntOpt) default partition size for shared capacity
# vsa_part_size_gb=100
###### (StrOpt) the topic that nova-vsa service listens on
# vsa_topic="vsa"
###### (IntOpt) Number of seconds zombie instances are cleaned up.
# zombie_instance_updated_at_window=172800
@ -322,8 +310,6 @@
# snapshot_name_template="snapshot-%08x"
###### (StrOpt) Template string to be used to generate instance names
# volume_name_template="volume-%08x"
###### (StrOpt) Template string to be used to generate VSA names
# vsa_name_template="vsa-%08x"
######### defined in nova.crypto #########
@ -1020,28 +1006,6 @@
###### (BoolOpt) Allow overcommitting vcpus on isolated hosts
# skip_isolated_core_check=true
######### defined in nova.scheduler.vsa #########
###### (IntOpt) The percentage range for capacity comparison
# drive_type_approx_capacity_percent=10
###### (StrOpt) EC2 access key used by VSA for accessing nova
# vsa_ec2_access_key=<None>
###### (StrOpt) User ID used by VSA for accessing nova
# vsa_ec2_user_id=<None>
###### (BoolOpt) Ask scheduler to create multiple volumes in one call
# vsa_multi_vol_creation=true
###### (BoolOpt) Allow selection of same host for multiple drives
# vsa_select_unique_drives=true
###### (IntOpt) The number of unique hosts per storage allocation
# vsa_unique_hosts_per_alloc=10
###### (StrOpt) Name of volume type associated with FE VSA volumes
# vsa_volume_type_name="VSA volume type"
######### defined in nova.vsa.manager #########
###### (StrOpt) Driver to use for controlling VSAs
# vsa_driver="nova.vsa.connection.get_connection"
######### defined in nova.volume.driver #########
###### (StrOpt) iscsi target user-land tool to use

View File

@ -1,701 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" The virtul storage array extension"""
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.contrib import volumes
from nova.api.openstack import extensions
from nova.api.openstack.compute import servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import instance_types
from nova import network
from nova import exception
from nova import flags
from nova import log as logging
from nova.vsa import api as vsa_api
from nova import volume
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute',
'virtual_storage_arrays')
def _vsa_view(context, vsa, details=False, instances=None):
"""Map keys for vsa summary/detailed view."""
d = {}
d['id'] = vsa.get('id')
d['name'] = vsa.get('name')
d['displayName'] = vsa.get('display_name')
d['displayDescription'] = vsa.get('display_description')
d['createTime'] = vsa.get('created_at')
d['status'] = vsa.get('status')
if 'vsa_instance_type' in vsa:
d['vcType'] = vsa['vsa_instance_type'].get('name', None)
else:
d['vcType'] = vsa['instance_type_id']
d['vcCount'] = vsa.get('vc_count')
d['driveCount'] = vsa.get('vol_count')
d['ipAddress'] = None
for instance in instances:
fixed_addr = None
floating_addr = None
if instance['fixed_ips']:
fixed = instance['fixed_ips'][0]
fixed_addr = fixed['address']
if fixed['floating_ips']:
floating_addr = fixed['floating_ips'][0]['address']
if floating_addr:
d['ipAddress'] = floating_addr
break
else:
d['ipAddress'] = d['ipAddress'] or fixed_addr
return d
def make_vsa(elem):
elem.set('id')
elem.set('name')
elem.set('displayName')
elem.set('displayDescription')
elem.set('createTime')
elem.set('status')
elem.set('vcType')
elem.set('vcCount')
elem.set('driveCount')
elem.set('ipAddress')
class VsaTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('vsa', selector='vsa')
make_vsa(root)
return xmlutil.MasterTemplate(root, 1)
class VsaSetTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('vsaSet')
elem = xmlutil.SubTemplateElement(root, 'vsa', selector='vsaSet')
make_vsa(elem)
return xmlutil.MasterTemplate(root, 1)
class VsaController(object):
"""The Virtual Storage Array API controller for the OpenStack API."""
def __init__(self):
self.vsa_api = vsa_api.API()
self.compute_api = compute.API()
self.network_api = network.API()
super(VsaController, self).__init__()
def _get_instances_by_vsa_id(self, context, id):
return self.compute_api.get_all(context,
search_opts={'metadata': dict(vsa_id=str(id))})
def _items(self, req, details):
"""Return summary or detailed list of VSAs."""
context = req.environ['nova.context']
authorize(context)
vsas = self.vsa_api.get_all(context)
limited_list = common.limited(vsas, req)
vsa_list = []
for vsa in limited_list:
instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
vsa_list.append(_vsa_view(context, vsa, details, instances))
return {'vsaSet': vsa_list}
@wsgi.serializers(xml=VsaSetTemplate)
def index(self, req):
"""Return a short list of VSAs."""
return self._items(req, details=False)
@wsgi.serializers(xml=VsaSetTemplate)
def detail(self, req):
"""Return a detailed list of VSAs."""
return self._items(req, details=True)
@wsgi.serializers(xml=VsaTemplate)
def show(self, req, id):
"""Return data about the given VSA."""
context = req.environ['nova.context']
authorize(context)
try:
vsa = self.vsa_api.get(context, vsa_id=id)
except exception.NotFound:
raise exc.HTTPNotFound()
instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
return {'vsa': _vsa_view(context, vsa, True, instances)}
@wsgi.serializers(xml=VsaTemplate)
def create(self, req, body):
"""Create a new VSA."""
context = req.environ['nova.context']
authorize(context)
if not body or 'vsa' not in body:
LOG.debug(_("No body provided"), context=context)
raise exc.HTTPUnprocessableEntity()
vsa = body['vsa']
display_name = vsa.get('displayName')
vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type)
try:
instance_type = instance_types.get_instance_type_by_name(vc_type)
except exception.NotFound:
raise exc.HTTPNotFound()
LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"),
locals(), context=context)
_vsa_placement = vsa.get('placement', {})
args = dict(display_name=display_name,
display_description=vsa.get('displayDescription'),
instance_type=instance_type,
storage=vsa.get('storage'),
shared=vsa.get('shared'),
availability_zone=_vsa_placement.get('AvailabilityZone'))
vsa = self.vsa_api.create(context, **args)
instances = self._get_instances_by_vsa_id(context, vsa.get('id'))
return {'vsa': _vsa_view(context, vsa, True, instances)}
def delete(self, req, id):
"""Delete a VSA."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete VSA with id: %s"), id, context=context)
try:
self.vsa_api.delete(context, vsa_id=id)
except exception.NotFound:
raise exc.HTTPNotFound()
def associate_address(self, req, id, body):
""" /zadr-vsa/{vsa_id}/associate_address
auto or manually associate an IP to VSA
"""
context = req.environ['nova.context']
authorize(context)
# FIXME(comstud): Seems like this always assigns 'auto' right
# now despite what the docstring says this should support.
if body is None:
ip = 'auto'
else:
ip = body.get('ipAddress', 'auto')
LOG.audit(_("Associate address %(ip)s to VSA %(id)s"),
locals(), context=context)
try:
instances = self._get_instances_by_vsa_id(context, id)
if instances is None or len(instances) == 0:
raise exc.HTTPNotFound()
for instance in instances:
self.network_api.allocate_for_instance(context, instance,
vpn=False)
# Placeholder
return
except exception.NotFound:
raise exc.HTTPNotFound()
def disassociate_address(self, req, id, body):
""" /zadr-vsa/{vsa_id}/disassociate_address
auto or manually associate an IP to VSA
"""
context = req.environ['nova.context']
authorize(context)
if body is None:
ip = 'auto'
else:
ip = body.get('ipAddress', 'auto')
LOG.audit(_("Disassociate address from VSA %(id)s"),
locals(), context=context)
# Placeholder
def make_volume(elem):
volumes.make_volume(elem)
elem.set('name')
elem.set('vsaId')
class VsaVolumeDriveController(volumes.VolumeController):
"""The base class for VSA volumes & drives.
A child resource of the VSA object. Allows operations with
volumes and drives created to/from particular VSA
"""
def __init__(self):
self.volume_api = volume.API()
self.vsa_api = vsa_api.API()
super(VsaVolumeDriveController, self).__init__()
def _translation(self, context, vol, vsa_id, details):
if details:
translation = volumes._translate_volume_detail_view
else:
translation = volumes._translate_volume_summary_view
d = translation(context, vol)
d['vsaId'] = vsa_id
d['name'] = vol['name']
return d
def _check_volume_ownership(self, context, vsa_id, id):
obj = self.object
try:
volume_ref = self.volume_api.get(context, id)
except exception.NotFound:
LOG.error(_("%(obj)s with ID %(id)s not found"), locals())
raise
own_vsa_id = self.volume_api.get_volume_metadata_value(volume_ref,
self.direction)
if own_vsa_id != vsa_id:
LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"
" and not to VSA %(vsa_id)s."), locals())
raise exception.Invalid()
def _items(self, req, vsa_id, details):
"""Return summary or detailed list of volumes for particular VSA."""
context = req.environ['nova.context']
authorize(context)
vols = self.volume_api.get_all(context,
search_opts={'metadata': {self.direction: str(vsa_id)}})
limited_list = common.limited(vols, req)
res = [self._translation(context, vol, vsa_id, details)
for vol in limited_list]
return {self.objects: res}
def index(self, req, vsa_id):
"""Return a short list of volumes created from particular VSA."""
LOG.audit(_("Index. vsa_id=%(vsa_id)s"), locals())
return self._items(req, vsa_id, details=False)
def detail(self, req, vsa_id):
"""Return a detailed list of volumes created from particular VSA."""
LOG.audit(_("Detail. vsa_id=%(vsa_id)s"), locals())
return self._items(req, vsa_id, details=True)
def create(self, req, vsa_id, body):
"""Create a new volume from VSA."""
LOG.audit(_("Create. vsa_id=%(vsa_id)s, body=%(body)s"), locals())
context = req.environ['nova.context']
authorize(context)
if not body:
raise exc.HTTPUnprocessableEntity()
vol = body[self.object]
size = vol['size']
LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"),
locals(), context=context)
try:
# create is supported for volumes only (drives created through VSA)
volume_type = self.vsa_api.get_vsa_volume_type(context)
except exception.NotFound:
raise exc.HTTPNotFound()
new_volume = self.volume_api.create(context,
size,
vol.get('displayName'),
vol.get('displayDescription'),
None,
volume_type=volume_type,
metadata=dict(from_vsa_id=str(vsa_id)))
return {self.object: self._translation(context, new_volume,
vsa_id, True)}
def update(self, req, vsa_id, id, body):
"""Update a volume."""
context = req.environ['nova.context']
authorize(context)
try:
self._check_volume_ownership(context, vsa_id, id)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.Invalid:
raise exc.HTTPBadRequest()
vol = body[self.object]
updatable_fields = [{'displayName': 'display_name'},
{'displayDescription': 'display_description'},
{'status': 'status'},
{'providerLocation': 'provider_location'},
{'providerAuth': 'provider_auth'}]
changes = {}
for field in updatable_fields:
key = field.keys()[0]
val = field[key]
if key in vol:
changes[val] = vol[key]
obj = self.object
LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"),
locals(), context=context)
try:
volume = self.volume_api.get(context, id)
self.volume_api.update(context, volume, fields=changes)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
def delete(self, req, vsa_id, id):
"""Delete a volume."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Delete. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
try:
self._check_volume_ownership(context, vsa_id, id)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.Invalid:
raise exc.HTTPBadRequest()
return super(VsaVolumeDriveController, self).delete(req, id)
def show(self, req, vsa_id, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
authorize(context)
LOG.audit(_("Show. vsa_id=%(vsa_id)s, id=%(id)s"), locals())
try:
self._check_volume_ownership(context, vsa_id, id)
except exception.NotFound:
raise exc.HTTPNotFound()
except exception.Invalid:
raise exc.HTTPBadRequest()
return super(VsaVolumeDriveController, self).show(req, id)
class VsaVolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1)
class VsaVolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1)
class VsaVolumeController(VsaVolumeDriveController):
"""The VSA volume API controller for the OpenStack API.
A child resource of the VSA object. Allows operations with volumes created
by particular VSA
"""
def __init__(self):
self.direction = 'from_vsa_id'
self.objects = 'volumes'
self.object = 'volume'
super(VsaVolumeController, self).__init__()
@wsgi.serializers(xml=VsaVolumesTemplate)
def index(self, req, vsa_id):
return super(VsaVolumeController, self).index(req, vsa_id)
@wsgi.serializers(xml=VsaVolumesTemplate)
def detail(self, req, vsa_id):
return super(VsaVolumeController, self).detail(req, vsa_id)
@wsgi.serializers(xml=VsaVolumeTemplate)
def create(self, req, vsa_id, body):
return super(VsaVolumeController, self).create(req, vsa_id, body)
@wsgi.serializers(xml=VsaVolumeTemplate)
def update(self, req, vsa_id, id, body):
return super(VsaVolumeController, self).update(req, vsa_id, id, body)
@wsgi.serializers(xml=VsaVolumeTemplate)
def show(self, req, vsa_id, id):
return super(VsaVolumeController, self).show(req, vsa_id, id)
class VsaDriveTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('drive', selector='drive')
make_volume(root)
return xmlutil.MasterTemplate(root, 1)
class VsaDrivesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('drives')
elem = xmlutil.SubTemplateElement(root, 'drive', selector='drives')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1)
class VsaDriveController(VsaVolumeDriveController):
"""The VSA Drive API controller for the OpenStack API.
A child resource of the VSA object. Allows operations with drives created
for particular VSA
"""
def __init__(self):
self.direction = 'to_vsa_id'
self.objects = 'drives'
self.object = 'drive'
super(VsaDriveController, self).__init__()
def create(self, req, vsa_id, body):
"""Create a new drive for VSA. Should be done through VSA APIs"""
raise exc.HTTPBadRequest()
def update(self, req, vsa_id, id, body):
"""Update a drive. Should be done through VSA APIs"""
raise exc.HTTPBadRequest()
def delete(self, req, vsa_id, id):
"""Delete a volume. Should be done through VSA APIs"""
raise exc.HTTPBadRequest()
@wsgi.serializers(xml=VsaDrivesTemplate)
def index(self, req, vsa_id):
return super(VsaDriveController, self).index(req, vsa_id)
@wsgi.serializers(xml=VsaDrivesTemplate)
def detail(self, req, vsa_id):
return super(VsaDriveController, self).detail(req, vsa_id)
@wsgi.serializers(xml=VsaDriveTemplate)
def show(self, req, vsa_id, id):
return super(VsaDriveController, self).show(req, vsa_id, id)
def make_vpool(elem):
elem.set('id')
elem.set('vsaId')
elem.set('name')
elem.set('displayName')
elem.set('displayDescription')
elem.set('driveCount')
elem.set('protection')
elem.set('stripeSize')
elem.set('stripeWidth')
elem.set('createTime')
elem.set('status')
drive_ids = xmlutil.SubTemplateElement(elem, 'driveIds')
drive_id = xmlutil.SubTemplateElement(drive_ids, 'driveId',
selector='driveIds')
drive_id.text = xmlutil.Selector()
class VsaVPoolTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('vpool', selector='vpool')
make_vpool(root)
return xmlutil.MasterTemplate(root, 1)
class VsaVPoolsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('vpools')
elem = xmlutil.SubTemplateElement(root, 'vpool', selector='vpools')
make_vpool(elem)
return xmlutil.MasterTemplate(root, 1)
class VsaVPoolController(object):
"""The vPool VSA API controller for the OpenStack API."""
def __init__(self):
self.vsa_api = vsa_api.API()
super(VsaVPoolController, self).__init__()
@wsgi.serializers(xml=VsaVPoolsTemplate)
def index(self, req, vsa_id):
"""Return a short list of vpools created from particular VSA."""
return {'vpools': []}
def create(self, req, vsa_id, body):
"""Create a new vPool for VSA."""
raise exc.HTTPBadRequest()
def update(self, req, vsa_id, id, body):
"""Update vPool parameters."""
raise exc.HTTPBadRequest()
def delete(self, req, vsa_id, id):
"""Delete a vPool."""
raise exc.HTTPBadRequest()
def show(self, req, vsa_id, id):
"""Return data about the given vPool."""
raise exc.HTTPBadRequest()
class VsaVCController(servers.Controller):
"""The VSA Virtual Controller API controller for the OpenStack API."""
def __init__(self):
self.vsa_api = vsa_api.API()
self.compute_api = compute.API()
self.vsa_id = None # VP-TODO(vladimir.p): temporary ugly hack
super(VsaVCController, self).__init__()
def _get_servers(self, req, is_detail):
"""Returns a list of servers, taking into account any search
options specified.
"""
if self.vsa_id is None:
super(VsaVCController, self)._get_servers(req, is_detail)
context = req.environ['nova.context']
search_opts = {'metadata': dict(vsa_id=str(self.vsa_id))}
instance_list = self.compute_api.get_all(
context, search_opts=search_opts)
limited_list = self._limit_items(instance_list, req)
servers = [self._build_view(req, inst, is_detail)['server']
for inst in limited_list]
return dict(servers=servers)
@wsgi.serializers(xml=servers.MinimalServersTemplate)
def index(self, req, vsa_id):
"""Return list of instances for particular VSA."""
LOG.audit(_("Index instances for VSA %s"), vsa_id)
self.vsa_id = vsa_id # VP-TODO(vladimir.p): temporary ugly hack
result = super(VsaVCController, self).detail(req)
self.vsa_id = None
return result
def create(self, req, vsa_id, body):
"""Create a new instance for VSA."""
raise exc.HTTPBadRequest()
def update(self, req, vsa_id, id, body):
"""Update VSA instance."""
raise exc.HTTPBadRequest()
def delete(self, req, vsa_id, id):
"""Delete VSA instance."""
raise exc.HTTPBadRequest()
@wsgi.serializers(xml=servers.ServerTemplate)
def show(self, req, vsa_id, id):
"""Return data about the given instance."""
return super(VsaVCController, self).show(req, id)
class Virtual_storage_arrays(extensions.ExtensionDescriptor):
"""Virtual Storage Arrays support"""
name = "VSAs"
alias = "zadr-vsa"
namespace = "http://docs.openstack.org/compute/ext/vsa/api/v1.1"
updated = "2011-08-25T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'zadr-vsa',
VsaController(),
collection_actions={'detail': 'GET'},
member_actions={'add_capacity': 'POST',
'remove_capacity': 'POST',
'associate_address': 'POST',
'disassociate_address': 'POST'})
resources.append(res)
res = extensions.ResourceExtension('volumes',
VsaVolumeController(),
collection_actions={'detail': 'GET'},
parent=dict(
member_name='vsa',
collection_name='zadr-vsa'))
resources.append(res)
res = extensions.ResourceExtension('drives',
VsaDriveController(),
collection_actions={'detail': 'GET'},
parent=dict(
member_name='vsa',
collection_name='zadr-vsa'))
resources.append(res)
res = extensions.ResourceExtension('vpools',
VsaVPoolController(),
parent=dict(
member_name='vsa',
collection_name='zadr-vsa'))
resources.append(res)
res = extensions.ResourceExtension('instances',
VsaVCController(),
parent=dict(
member_name='vsa',
collection_name='zadr-vsa'))
resources.append(res)
return resources

View File

@ -65,9 +65,6 @@ db_opts = [
cfg.StrOpt('snapshot_name_template',
default='snapshot-%08x',
help='Template string to be used to generate snapshot names'),
cfg.StrOpt('vsa_name_template',
default='vsa-%08x',
help='Template string to be used to generate VSA names'),
]
FLAGS = flags.FLAGS
@ -1571,39 +1568,6 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id,
extra_specs)
####################
def vsa_create(context, values):
"""Creates Virtual Storage Array record."""
return IMPL.vsa_create(context, values)
def vsa_update(context, vsa_id, values):
"""Updates Virtual Storage Array record."""
return IMPL.vsa_update(context, vsa_id, values)
def vsa_destroy(context, vsa_id):
"""Deletes Virtual Storage Array record."""
return IMPL.vsa_destroy(context, vsa_id)
def vsa_get(context, vsa_id):
"""Get Virtual Storage Array record by ID."""
return IMPL.vsa_get(context, vsa_id)
def vsa_get_all(context):
"""Get all Virtual Storage Array records."""
return IMPL.vsa_get_all(context)
def vsa_get_all_by_project(context, project_id):
"""Get all Virtual Storage Array records by project ID."""
return IMPL.vsa_get_all_by_project(context, project_id)
###################

View File

@ -3851,88 +3851,6 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id,
####################
def _vsa_get_query(context, session=None, project_only=False):
return model_query(context, models.VirtualStorageArray, session=session,
project_only=project_only).\
options(joinedload('vsa_instance_type'))
@require_admin_context
def vsa_create(context, values):
"""
Creates Virtual Storage Array record.
"""
try:
vsa_ref = models.VirtualStorageArray()
vsa_ref.update(values)
vsa_ref.save()
except Exception, e:
raise exception.DBError(e)
return vsa_ref
@require_admin_context
def vsa_update(context, vsa_id, values):
"""
Updates Virtual Storage Array record.
"""
session = get_session()
with session.begin():
vsa_ref = vsa_get(context, vsa_id, session=session)
vsa_ref.update(values)
vsa_ref.save(session=session)
return vsa_ref
@require_admin_context
def vsa_destroy(context, vsa_id):
"""
Deletes Virtual Storage Array record.
"""
session = get_session()
with session.begin():
session.query(models.VirtualStorageArray).\
filter_by(id=vsa_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def vsa_get(context, vsa_id, session=None):
"""
Get Virtual Storage Array record by ID.
"""
result = _vsa_get_query(context, session=session, project_only=True).\
filter_by(id=vsa_id).\
first()
if not result:
raise exception.VirtualStorageArrayNotFound(id=vsa_id)
return result
@require_admin_context
def vsa_get_all(context):
"""
Get all Virtual Storage Array records.
"""
return _vsa_get_query(context).all()
@require_context
def vsa_get_all_by_project(context, project_id):
"""
Get all Virtual Storage Array records by project ID.
"""
authorize_project_context(context, project_id)
return _vsa_get_query(context).filter_by(project_id=project_id).all()
####################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id"""
result = model_query(context, models.S3Image, read_deleted="yes").\

View File

@ -300,32 +300,6 @@ class InstanceInfoCache(BASE, NovaBase):
primaryjoin=instance_id == Instance.uuid)
class VirtualStorageArray(BASE, NovaBase):
"""
Represents a virtual storage array supplying block storage to instances.
"""
__tablename__ = 'virtual_storage_arrays'
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
return FLAGS.vsa_name_template % self.id
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
project_id = Column(String(255))
availability_zone = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'))
image_ref = Column(String(255))
vc_count = Column(Integer, default=0) # number of requested VC instances
vol_count = Column(Integer, default=0) # total number of BE volumes
status = Column(String(255))
class InstanceActions(BASE, NovaBase):
"""Represents a guest VM's actions and results"""
__tablename__ = "instance_actions"
@ -357,13 +331,6 @@ class InstanceTypes(BASE, NovaBase):
'InstanceTypes.id, '
'InstanceTypes.deleted == False)')
vsas = relationship(VirtualStorageArray,
backref=backref('vsa_instance_type', uselist=False),
foreign_keys=id,
primaryjoin='and_('
'VirtualStorageArray.instance_type_id == '
'InstanceTypes.id, InstanceTypes.deleted == False)')
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
@ -1047,7 +1014,6 @@ def register_models():
SMFlavors,
SMVolume,
User,
VirtualStorageArray,
Volume,
VolumeMetadata,
VolumeTypeExtraSpecs,

View File

@ -906,18 +906,6 @@ class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
class VSANovaAccessParamNotFound(Invalid):
message = _("Nova access parameters were not specified.")
class VirtualStorageArrayNotFound(NotFound):
message = _("Virtual Storage Array %(id)d could not be found.")
class VirtualStorageArrayNotFoundByName(NotFound):
message = _("Virtual Storage Array %(name)s could not be found.")
class CannotResizeToSameSize(NovaException):
message = _("When resizing, instances must change size!")

View File

@ -185,9 +185,6 @@ global_opts = [
cfg.StrOpt('network_topic',
default='network',
help='the topic network nodes listen on'),
cfg.StrOpt('vsa_topic',
default='vsa',
help='the topic that nova-vsa service listens on'),
cfg.StrOpt('rabbit_host',
default='localhost',
help='the RabbitMQ host'),
@ -351,21 +348,6 @@ global_opts = [
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='full class name for the Manager for scheduler'),
cfg.StrOpt('vsa_manager',
default='nova.vsa.manager.VsaManager',
help='full class name for the Manager for VSA'),
cfg.StrOpt('vc_image_name',
default='vc_image',
help='the VC image ID (for a VC image that exists in Glance)'),
cfg.StrOpt('default_vsa_instance_type',
default='m1.small',
help='default instance type for VSA instances'),
cfg.IntOpt('max_vcs_in_vsa',
default=32,
help='maxinum VCs in a VSA'),
cfg.IntOpt('vsa_part_size_gb',
default=100,
help='default partition size for shared capacity'),
cfg.StrOpt('firewall_driver',
default='nova.virt.firewall.IptablesFirewallDriver',
help='Firewall driver (defaults to iptables)'),

View File

@ -42,7 +42,4 @@ filterlist = [
# nova/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',...
# nova/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ...
filters.CommandFilter("/sbin/iscsiadm", "root"),
# nova/volume/driver.py:'/var/lib/zadara/bin/zadara_sncfg', *
# sudoers does not allow zadara_sncfg yet
]

View File

@ -1,532 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
VSA Simple Scheduler
"""
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import rpc
from nova import utils
from nova.scheduler import driver
from nova.scheduler import simple
from nova.volume import volume_types
from nova.vsa import api as vsa_api
LOG = logging.getLogger(__name__)
vsa_scheduler_opts = [
cfg.IntOpt('drive_type_approx_capacity_percent',
default=10,
help='The percentage range for capacity comparison'),
cfg.IntOpt('vsa_unique_hosts_per_alloc',
default=10,
help='The number of unique hosts per storage allocation'),
cfg.BoolOpt('vsa_select_unique_drives',
default=True,
help='Allow selection of same host for multiple drives'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(vsa_scheduler_opts)
def BYTES_TO_GB(bytes):
return bytes >> 30
def GB_TO_BYTES(gb):
return gb << 30
class VsaScheduler(simple.SimpleScheduler):
"""Implements Scheduler for volume placement."""
def __init__(self, *args, **kwargs):
super(VsaScheduler, self).__init__(*args, **kwargs)
self._notify_all_volume_hosts("startup")
def _notify_all_volume_hosts(self, event):
rpc.fanout_cast(context.get_admin_context(),
FLAGS.volume_topic,
{"method": "notification",
"args": {"event": event}})
def _qosgrp_match(self, drive_type, qos_values):
def _compare_names(str1, str2):
return str1.lower() == str2.lower()
def _compare_sizes_approxim(cap_capacity, size):
cap_capacity = BYTES_TO_GB(int(cap_capacity))
size = int(size)
size_perc = size * FLAGS.drive_type_approx_capacity_percent / 100
return (cap_capacity >= size - size_perc and
cap_capacity <= size + size_perc)
# Add more entries for additional comparisons
compare_list = [{'cap1': 'DriveType',
'cap2': 'type',
'cmp_func': _compare_names},
{'cap1': 'DriveCapacity',
'cap2': 'size',
'cmp_func': _compare_sizes_approxim}]
for cap in compare_list:
if (cap['cap1'] in qos_values.keys() and
cap['cap2'] in drive_type.keys() and
cap['cmp_func'] is not None and
cap['cmp_func'](qos_values[cap['cap1']],
drive_type[cap['cap2']])):
pass
else:
return False
return True
def _get_service_states(self):
return self.host_manager.service_states
def _filter_hosts(self, topic, request_spec, host_list=None):
LOG.debug(_("_filter_hosts: %(request_spec)s"), locals())
drive_type = request_spec['drive_type']
LOG.debug(_("Filter hosts for drive type %s"), drive_type['name'])
if host_list is None:
host_list = self._get_service_states().iteritems()
filtered_hosts = [] # returns list of (hostname, capability_dict)
for host, host_dict in host_list:
for service_name, service_dict in host_dict.iteritems():
if service_name != topic:
continue
gos_info = service_dict.get('drive_qos_info', {})
for qosgrp, qos_values in gos_info.iteritems():
if self._qosgrp_match(drive_type, qos_values):
if qos_values['AvailableCapacity'] > 0:
filtered_hosts.append((host, gos_info))
else:
LOG.debug(_("Host %s has no free capacity. Skip"),
host)
break
host_names = [item[0] for item in filtered_hosts]
LOG.debug(_("Filter hosts: %s"), host_names)
return filtered_hosts
def _allowed_to_use_host(self, host, selected_hosts, unique):
if not unique or host not in [item[0] for item in selected_hosts]:
return True
else:
return False
def _add_hostcap_to_list(self, selected_hosts, host, cap):
if host not in [item[0] for item in selected_hosts]:
selected_hosts.append((host, cap))
def host_selection_algorithm(self, request_spec, all_hosts,
selected_hosts, unique):
"""Must override this method for VSA scheduler to work."""
raise NotImplementedError(_("Must implement host selection mechanism"))
def _select_hosts(self, request_spec, all_hosts, selected_hosts=None):
if selected_hosts is None:
selected_hosts = []
host = None
if len(selected_hosts) >= FLAGS.vsa_unique_hosts_per_alloc:
# try to select from already selected hosts only
LOG.debug(_("Maximum number of hosts selected (%d)"),
len(selected_hosts))
unique = False
(host, qos_cap) = self.host_selection_algorithm(request_spec,
selected_hosts,
selected_hosts,
unique)
LOG.debug(_("Selected excessive host %(host)s"), locals())
else:
unique = FLAGS.vsa_select_unique_drives
if host is None:
# if we've not tried yet (# of sel hosts < max) - unique=True
# or failed to select from selected_hosts - unique=False
# select from all hosts
(host, qos_cap) = self.host_selection_algorithm(request_spec,
all_hosts,
selected_hosts,
unique)
if host is None:
raise exception.NoValidHost(reason="")
return (host, qos_cap)
def _provision_volume(self, context, vol, vsa_id, availability_zone):
if availability_zone is None:
availability_zone = FLAGS.storage_availability_zone
now = utils.utcnow()
options = {
'size': vol['size'],
'user_id': context.user_id,
'project_id': context.project_id,
'snapshot_id': None,
'availability_zone': availability_zone,
'status': "creating",
'attach_status': "detached",
'display_name': vol['name'],
'display_description': vol['description'],
'volume_type_id': vol['volume_type_id'],
'metadata': dict(to_vsa_id=vsa_id),
}
size = vol['size']
host = vol['host']
name = vol['name']
LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "
"host %(host)s"), locals())
volume_ref = db.volume_create(context.elevated(), options)
driver.cast_to_volume_host(context, vol['host'],
'create_volume', volume_id=volume_ref['id'],
snapshot_id=None)
def _check_host_enforcement(self, context, availability_zone):
if (availability_zone
and ':' in availability_zone
and context.is_admin):
zone, _x, host = availability_zone.partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-volume')
if service['disabled'] or not utils.service_is_up(service):
raise exception.WillNotSchedule(host=host)
return host
else:
return None
def _assign_hosts_to_volumes(self, context, volume_params, forced_host):
prev_volume_type_id = None
request_spec = {}
selected_hosts = []
LOG.debug(_("volume_params %(volume_params)s") % locals())
i = 1
for vol in volume_params:
name = vol['name']
LOG.debug(_("%(i)d: Volume %(name)s"), locals())
i += 1
if forced_host:
vol['host'] = forced_host
vol['capabilities'] = None
continue
volume_type_id = vol['volume_type_id']
request_spec['size'] = vol['size']
if (prev_volume_type_id is None or
prev_volume_type_id != volume_type_id):
# generate list of hosts for this drive type
volume_type = volume_types.get_volume_type(context,
volume_type_id)
drive_type = {
'name': volume_type['extra_specs'].get('drive_name'),
'type': volume_type['extra_specs'].get('drive_type'),
'size': int(volume_type['extra_specs'].get('drive_size')),
'rpm': volume_type['extra_specs'].get('drive_rpm'),
}
request_spec['drive_type'] = drive_type
all_hosts = self._filter_hosts("volume", request_spec)
prev_volume_type_id = volume_type_id
(host, qos_cap) = self._select_hosts(request_spec,
all_hosts, selected_hosts)
vol['host'] = host
vol['capabilities'] = qos_cap
self._consume_resource(qos_cap, vol['size'], -1)
def schedule_create_volumes(self, context, request_spec,
availability_zone=None, *_args, **_kwargs):
"""Picks hosts for hosting multiple volumes."""
num_volumes = request_spec.get('num_volumes')
LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") %
locals())
vsa_id = request_spec.get('vsa_id')
volume_params = request_spec.get('volumes')
host = self._check_host_enforcement(context, availability_zone)
try:
self._print_capabilities_info()
self._assign_hosts_to_volumes(context, volume_params, host)
for vol in volume_params:
self._provision_volume(context, vol, vsa_id, availability_zone)
except Exception:
LOG.exception(_("Error creating volumes"))
if vsa_id:
db.vsa_update(context, vsa_id,
dict(status=vsa_api.VsaState.FAILED))
for vol in volume_params:
if 'capabilities' in vol:
self._consume_resource(vol['capabilities'],
vol['size'], 1)
raise
return None
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks the best host based on requested drive type capability."""
volume_ref = db.volume_get(context, volume_id)
host = self._check_host_enforcement(context,
volume_ref['availability_zone'])
if host:
driver.cast_to_volume_host(context, host, 'create_volume',
volume_id=volume_id, **_kwargs)
return None
volume_type_id = volume_ref['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
if (volume_type_id is None or
volume_types.is_vsa_volume(volume_type_id, volume_type)):
LOG.debug(_("Non-VSA volume %d"), volume_ref['id'])
return super(VsaScheduler, self).schedule_create_volume(context,
volume_id, *_args, **_kwargs)
self._print_capabilities_info()
drive_type = {
'name': volume_type['extra_specs'].get('drive_name'),
'type': volume_type['extra_specs'].get('drive_type'),
'size': int(volume_type['extra_specs'].get('drive_size')),
'rpm': volume_type['extra_specs'].get('drive_rpm'),
}
LOG.debug(_("Spawning volume %(volume_id)s with drive type "
"%(drive_type)s"), locals())
request_spec = {'size': volume_ref['size'],
'drive_type': drive_type}
hosts = self._filter_hosts("volume", request_spec)
try:
(host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts)
except Exception:
LOG.exception(_("Error creating volume"))
if volume_ref['to_vsa_id']:
db.vsa_update(context, volume_ref['to_vsa_id'],
dict(status=vsa_api.VsaState.FAILED))
raise
if host:
driver.cast_to_volume_host(context, host, 'create_volume',
volume_id=volume_id, **_kwargs)
def _consume_full_drive(self, qos_values, direction):
qos_values['FullDrive']['NumFreeDrives'] += direction
qos_values['FullDrive']['NumOccupiedDrives'] -= direction
def _consume_partition(self, qos_values, size, direction):
if qos_values['PartitionDrive']['PartitionSize'] != 0:
partition_size = qos_values['PartitionDrive']['PartitionSize']
else:
partition_size = size
part_per_drive = qos_values['DriveCapacity'] / partition_size
if (direction == -1 and
qos_values['PartitionDrive']['NumFreePartitions'] == 0):
self._consume_full_drive(qos_values, direction)
qos_values['PartitionDrive']['NumFreePartitions'] += part_per_drive
qos_values['PartitionDrive']['NumFreePartitions'] += direction
qos_values['PartitionDrive']['NumOccupiedPartitions'] -= direction
if (direction == 1 and
qos_values['PartitionDrive']['NumFreePartitions'] >=
part_per_drive):
self._consume_full_drive(qos_values, direction)
qos_values['PartitionDrive']['NumFreePartitions'] -= part_per_drive
def _consume_resource(self, qos_values, size, direction):
if qos_values is None:
LOG.debug(_("No capability selected for volume of size %(size)s"),
locals())
return
if size == 0: # full drive match
qos_values['AvailableCapacity'] += (direction *
qos_values['DriveCapacity'])
self._consume_full_drive(qos_values, direction)
else:
qos_values['AvailableCapacity'] += direction * GB_TO_BYTES(size)
self._consume_partition(qos_values, GB_TO_BYTES(size), direction)
return
def _print_capabilities_info(self):
host_list = self._get_service_states().iteritems()
for host, host_dict in host_list:
for service_name, service_dict in host_dict.iteritems():
if service_name != "volume":
continue
LOG.info(_("Host %s:"), host)
gos_info = service_dict.get('drive_qos_info', {})
for qosgrp, qos_values in gos_info.iteritems():
total = qos_values['TotalDrives']
used = qos_values['FullDrive']['NumOccupiedDrives']
free = qos_values['FullDrive']['NumFreeDrives']
avail = BYTES_TO_GB(qos_values['AvailableCapacity'])
LOG.info(_("\tDrive %(qosgrp)-25s: total %(total)2s, "
"used %(used)2s, free %(free)2s. Available "
"capacity %(avail)-5s"), locals())
class VsaSchedulerLeastUsedHost(VsaScheduler):
"""
Implements VSA scheduler to select the host with least used capacity
of particular type.
"""
def __init__(self, *args, **kwargs):
super(VsaSchedulerLeastUsedHost, self).__init__(*args, **kwargs)
def host_selection_algorithm(self, request_spec, all_hosts,
selected_hosts, unique):
size = request_spec['size']
drive_type = request_spec['drive_type']
best_host = None
best_qoscap = None
best_cap = None
min_used = 0
for (host, capabilities) in all_hosts:
has_enough_capacity = False
used_capacity = 0
for qosgrp, qos_values in capabilities.iteritems():
used_capacity = (used_capacity + qos_values['TotalCapacity'] -
qos_values['AvailableCapacity'])
if self._qosgrp_match(drive_type, qos_values):
# we found required qosgroup
if size == 0: # full drive match
if qos_values['FullDrive']['NumFreeDrives'] > 0:
has_enough_capacity = True
matched_qos = qos_values
else:
break
else:
_fp = qos_values['PartitionDrive']['NumFreePartitions']
_fd = qos_values['FullDrive']['NumFreeDrives']
if (qos_values['AvailableCapacity'] >= size and
(_fp > 0 or _fd > 0)):
has_enough_capacity = True
matched_qos = qos_values
else:
break
if (has_enough_capacity and
self._allowed_to_use_host(host, selected_hosts, unique) and
(best_host is None or used_capacity < min_used)):
min_used = used_capacity
best_host = host
best_qoscap = matched_qos
best_cap = capabilities
if best_host:
self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
min_used = BYTES_TO_GB(min_used)
LOG.debug(_("\t LeastUsedHost: Best host: %(best_host)s. "
"(used capacity %(min_used)s)"), locals())
return (best_host, best_qoscap)
class VsaSchedulerMostAvailCapacity(VsaScheduler):
"""
Implements VSA scheduler to select the host with most available capacity
of one particular type.
"""
def __init__(self, *args, **kwargs):
super(VsaSchedulerMostAvailCapacity, self).__init__(*args, **kwargs)
def host_selection_algorithm(self, request_spec, all_hosts,
selected_hosts, unique):
size = request_spec['size']
drive_type = request_spec['drive_type']
best_host = None
best_qoscap = None
best_cap = None
max_avail = 0
for (host, capabilities) in all_hosts:
for qosgrp, qos_values in capabilities.iteritems():
if self._qosgrp_match(drive_type, qos_values):
# we found required qosgroup
if size == 0: # full drive match
available = qos_values['FullDrive']['NumFreeDrives']
else:
available = qos_values['AvailableCapacity']
if (available > max_avail and
self._allowed_to_use_host(host, selected_hosts,
unique)):
max_avail = available
best_host = host
best_qoscap = qos_values
best_cap = capabilities
break # go to the next host
if best_host:
self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
type_str = "drives" if size == 0 else "bytes"
LOG.debug(_("\t MostAvailCap: Best host: %(best_host)s. "
"(available %(max_avail)s %(type_str)s)"), locals())
return (best_host, best_qoscap)

View File

@ -1,637 +0,0 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import virtual_storage_arrays as \
vsa_ext
import nova.db
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova.tests.api.openstack import fakes
from nova.volume import api as volume_api
from nova.vsa import api as vsa_api
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
last_param = {}
def _get_default_vsa_param():
return {
'display_name': 'Test_VSA_name',
'display_description': 'Test_VSA_description',
'vc_count': 1,
'instance_type': 'm1.small',
'instance_type_id': 5,
'image_name': None,
'availability_zone': None,
'storage': [],
'shared': False,
}
def stub_vsa_create(self, context, **param):
global last_param
LOG.debug(_("_create: param=%s"), param)
param['id'] = 123
param['name'] = 'Test name'
param['instance_type_id'] = 5
last_param = param
return param
def stub_vsa_delete(self, context, vsa_id):
global last_param
last_param = dict(vsa_id=vsa_id)
LOG.debug(_("_delete: %s"), locals())
if vsa_id != '123':
raise exception.NotFound
def stub_vsa_get(self, context, vsa_id):
global last_param
last_param = dict(vsa_id=vsa_id)
LOG.debug(_("_get: %s"), locals())
if vsa_id != '123':
raise exception.NotFound
param = _get_default_vsa_param()
param['id'] = vsa_id
return param
def stub_vsa_get_all(self, context):
LOG.debug(_("_get_all: %s"), locals())
param = _get_default_vsa_param()
param['id'] = 123
return [param]
class VSAApiTest(test.TestCase):
def setUp(self):
super(VSAApiTest, self).setUp()
fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
self.stubs.Set(vsa_api.API, "create", stub_vsa_create)
self.stubs.Set(vsa_api.API, "delete", stub_vsa_delete)
self.stubs.Set(vsa_api.API, "get", stub_vsa_get)
self.stubs.Set(vsa_api.API, "get_all", stub_vsa_get_all)
def test_vsa_create(self):
global last_param
last_param = {}
vsa = {"displayName": "VSA Test Name",
"displayDescription": "VSA Test Desc"}
body = dict(vsa=vsa)
req = webob.Request.blank('/v2/fake/zadr-vsa')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
# Compare if parameters were correctly passed to stub
self.assertEqual(last_param['display_name'], "VSA Test Name")
self.assertEqual(last_param['display_description'], "VSA Test Desc")
resp_dict = json.loads(resp.body)
self.assertTrue('vsa' in resp_dict)
self.assertEqual(resp_dict['vsa']['displayName'], vsa['displayName'])
self.assertEqual(resp_dict['vsa']['displayDescription'],
vsa['displayDescription'])
def test_vsa_create_no_body(self):
req = webob.Request.blank('/v2/fake/zadr-vsa')
req.method = 'POST'
req.body = json.dumps({})
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 422)
def test_vsa_delete(self):
global last_param
last_param = {}
vsa_id = 123
req = webob.Request.blank('/v2/fake/zadr-vsa/%d' % vsa_id)
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
def test_vsa_delete_invalid_id(self):
global last_param
last_param = {}
vsa_id = 234
req = webob.Request.blank('/v2/fake/zadr-vsa/%d' % vsa_id)
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 404)
self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
def test_vsa_show(self):
global last_param
last_param = {}
vsa_id = 123
req = webob.Request.blank('/v2/fake/zadr-vsa/%d' % vsa_id)
req.method = 'GET'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
resp_dict = json.loads(resp.body)
self.assertTrue('vsa' in resp_dict)
self.assertEqual(resp_dict['vsa']['id'], str(vsa_id))
def test_vsa_show_invalid_id(self):
global last_param
last_param = {}
vsa_id = 234
req = webob.Request.blank('/v2/fake/zadr-vsa/%d' % vsa_id)
req.method = 'GET'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 404)
self.assertEqual(str(last_param['vsa_id']), str(vsa_id))
def test_vsa_index(self):
req = webob.Request.blank('/v2/fake/zadr-vsa')
req.method = 'GET'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
resp_dict = json.loads(resp.body)
self.assertTrue('vsaSet' in resp_dict)
resp_vsas = resp_dict['vsaSet']
self.assertEqual(len(resp_vsas), 1)
resp_vsa = resp_vsas.pop()
self.assertEqual(resp_vsa['id'], 123)
def test_vsa_detail(self):
req = webob.Request.blank('/v2/fake/zadr-vsa/detail')
req.method = 'GET'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
resp_dict = json.loads(resp.body)
self.assertTrue('vsaSet' in resp_dict)
resp_vsas = resp_dict['vsaSet']
self.assertEqual(len(resp_vsas), 1)
resp_vsa = resp_vsas.pop()
self.assertEqual(resp_vsa['id'], 123)
def stub_get_vsa_volume_type(self, context):
return {'id': 1,
'name': 'VSA volume type',
'extra_specs': {'type': 'vsa_volume'}}
def return_vsa(context, vsa_id):
return {'id': vsa_id}
class VSAVolumeApiTest(test.TestCase):
def setUp(self, test_obj=None, test_objs=None):
super(VSAVolumeApiTest, self).setUp()
fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
self.stubs.Set(nova.db, 'vsa_get', return_vsa)
self.stubs.Set(vsa_api.API, "get_vsa_volume_type",
stub_get_vsa_volume_type)
self.stubs.Set(volume_api.API, "update", fakes.stub_volume_update)
self.stubs.Set(volume_api.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get)
self.stubs.Set(volume_api.API, "get_all", fakes.stub_volume_get_all)
self.test_obj = test_obj if test_obj else "volume"
self.test_objs = test_objs if test_objs else "volumes"
def test_vsa_volume_create(self):
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
vol = {"size": 100,
"displayName": "VSA Volume Test Name",
"displayDescription": "VSA Volume Test Desc"}
body = {self.test_obj: vol}
req = webob.Request.blank('/v2/fake/zadr-vsa/123/%s' % self.test_objs)
req.method = 'POST'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
if self.test_obj == "volume":
self.assertEqual(resp.status_int, 200)
resp_dict = json.loads(resp.body)
self.assertTrue(self.test_obj in resp_dict)
self.assertEqual(resp_dict[self.test_obj]['size'],
vol['size'])
self.assertEqual(resp_dict[self.test_obj]['displayName'],
vol['displayName'])
self.assertEqual(resp_dict[self.test_obj]['displayDescription'],
vol['displayDescription'])
else:
self.assertEqual(resp.status_int, 400)
def test_vsa_volume_create_no_body(self):
req = webob.Request.blank('/v2/fake/zadr-vsa/123/%s' % self.test_objs)
req.method = 'POST'
req.body = json.dumps({})
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
if self.test_obj == "volume":
self.assertEqual(resp.status_int, 422)
else:
self.assertEqual(resp.status_int, 400)
def test_vsa_volume_index(self):
req = webob.Request.blank('/v2/fake/zadr-vsa/123/%s' % self.test_objs)
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
def test_vsa_volume_detail(self):
req = webob.Request.blank('/v2/fake/zadr-vsa/123/%s/detail' %
self.test_objs)
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
def test_vsa_volume_show(self):
obj_num = 234 if self.test_objs == "volumes" else 345
req = webob.Request.blank('/v2/fake/zadr-vsa/123/%s/%s' %
(self.test_objs, obj_num))
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 200)
def test_vsa_volume_show_no_vsa_assignment(self):
req = webob.Request.blank('/v2/fake/zadr-vsa/4/%s/333' %
self.test_objs)
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 400)
def test_vsa_volume_show_no_volume(self):
self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
req = webob.Request.blank('/v2/fake/zadr-vsa/123/%s/333' %
self.test_objs)
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 404)
def test_vsa_volume_update(self):
obj_num = 234 if self.test_objs == "volumes" else 345
update = {"status": "available",
"displayName": "Test Display name"}
body = {self.test_obj: update}
req = webob.Request.blank('/v2/fake/zadr-vsa/123/%s/%s' %
(self.test_objs, obj_num))
req.method = 'PUT'
req.body = json.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(fakes.wsgi_app())
if self.test_obj == "volume":
self.assertEqual(resp.status_int, 202)
else:
self.assertEqual(resp.status_int, 400)
def test_vsa_volume_delete(self):
obj_num = 234 if self.test_objs == "volumes" else 345
req = webob.Request.blank('/v2/fake/zadr-vsa/123/%s/%s' %
(self.test_objs, obj_num))
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
if self.test_obj == "volume":
self.assertEqual(resp.status_int, 202)
else:
self.assertEqual(resp.status_int, 400)
def test_vsa_volume_delete_no_vsa_assignment(self):
req = webob.Request.blank('/v2/fake/zadr-vsa/4/%s/333' %
self.test_objs)
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
self.assertEqual(resp.status_int, 400)
def test_vsa_volume_delete_no_volume(self):
self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound)
req = webob.Request.blank('/v2/fake/zadr-vsa/123/%s/333' %
self.test_objs)
req.method = 'DELETE'
resp = req.get_response(fakes.wsgi_app())
if self.test_obj == "volume":
self.assertEqual(resp.status_int, 404)
else:
self.assertEqual(resp.status_int, 400)
class VSADriveApiTest(VSAVolumeApiTest):
def setUp(self):
super(VSADriveApiTest, self).setUp(test_obj="drive",
test_objs="drives")
class SerializerTestCommon(test.TestCase):
def _verify_attrs(self, obj, tree, attrs):
for attr in attrs:
self.assertEqual(str(obj[attr]), tree.get(attr))
class VsaSerializerTest(SerializerTestCommon):
def test_serialize_show_create(self):
serializer = vsa_ext.VsaTemplate()
exemplar = dict(
id='vsa_id',
name='vsa_name',
displayName='vsa_display_name',
displayDescription='vsa_display_desc',
createTime=datetime.datetime.now(),
status='active',
vcType='vsa_instance_type',
vcCount=24,
driveCount=48,
ipAddress='10.11.12.13')
text = serializer.serialize(dict(vsa=exemplar))
print text
tree = etree.fromstring(text)
self.assertEqual('vsa', tree.tag)
self._verify_attrs(exemplar, tree, exemplar.keys())
def test_serialize_index_detail(self):
serializer = vsa_ext.VsaSetTemplate()
exemplar = [dict(
id='vsa1_id',
name='vsa1_name',
displayName='vsa1_display_name',
displayDescription='vsa1_display_desc',
createTime=datetime.datetime.now(),
status='active',
vcType='vsa1_instance_type',
vcCount=24,
driveCount=48,
ipAddress='10.11.12.13'),
dict(
id='vsa2_id',
name='vsa2_name',
displayName='vsa2_display_name',
displayDescription='vsa2_display_desc',
createTime=datetime.datetime.now(),
status='active',
vcType='vsa2_instance_type',
vcCount=42,
driveCount=84,
ipAddress='11.12.13.14')]
text = serializer.serialize(dict(vsaSet=exemplar))
print text
tree = etree.fromstring(text)
self.assertEqual('vsaSet', tree.tag)
self.assertEqual(len(exemplar), len(tree))
for idx, child in enumerate(tree):
self.assertEqual('vsa', child.tag)
self._verify_attrs(exemplar[idx], child, exemplar[idx].keys())
class VsaVolumeSerializerTest(SerializerTestCommon):
show_serializer = vsa_ext.VsaVolumeTemplate
index_serializer = vsa_ext.VsaVolumesTemplate
object = 'volume'
objects = 'volumes'
def _verify_voldrive(self, vol, tree):
self.assertEqual(self.object, tree.tag)
self._verify_attrs(vol, tree, ('id', 'status', 'size',
'availabilityZone', 'createdAt',
'displayName', 'displayDescription',
'volumeType', 'vsaId', 'name'))
for child in tree:
self.assertTrue(child.tag in ('attachments', 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_attrs(vol['attachments'][0], child[0],
('id', 'volumeId', 'serverId', 'device'))
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertTrue(gr_child.tag in not_seen)
self.assertEqual(str(vol['metadata'][gr_child.tag]),
gr_child.text)
not_seen.remove(gr_child.tag)
self.assertEqual(0, len(not_seen))
def test_show_create_serializer(self):
serializer = self.show_serializer()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availabilityZone='vol_availability',
createdAt=datetime.datetime.now(),
attachments=[dict(
id='vol_id',
volumeId='vol_id',
serverId='instance_uuid',
device='/foo')],
displayName='vol_name',
displayDescription='vol_desc',
volumeType='vol_type',
metadata=dict(
foo='bar',
baz='quux',
),
vsaId='vol_vsa_id',
name='vol_vsa_name',
)
text = serializer.serialize({self.object: raw_volume})
print text
tree = etree.fromstring(text)
self._verify_voldrive(raw_volume, tree)
def test_index_detail_serializer(self):
serializer = self.index_serializer()
raw_volumes = [dict(
id='vol1_id',
status='vol1_status',
size=1024,
availabilityZone='vol1_availability',
createdAt=datetime.datetime.now(),
attachments=[dict(
id='vol1_id',
volumeId='vol1_id',
serverId='instance_uuid',
device='/foo1')],
displayName='vol1_name',
displayDescription='vol1_desc',
volumeType='vol1_type',
metadata=dict(
foo='vol1_foo',
bar='vol1_bar',
),
vsaId='vol1_vsa_id',
name='vol1_vsa_name',
),
dict(
id='vol2_id',
status='vol2_status',
size=1024,
availabilityZone='vol2_availability',
createdAt=datetime.datetime.now(),
attachments=[dict(
id='vol2_id',
volumeId='vol2_id',
serverId='instance_uuid',
device='/foo2')],
displayName='vol2_name',
displayDescription='vol2_desc',
volumeType='vol2_type',
metadata=dict(
foo='vol2_foo',
bar='vol2_bar',
),
vsaId='vol2_vsa_id',
name='vol2_vsa_name',
)]
text = serializer.serialize({self.objects: raw_volumes})
print text
tree = etree.fromstring(text)
self.assertEqual(self.objects, tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_voldrive(raw_volumes[idx], child)
class VsaDriveSerializerTest(VsaVolumeSerializerTest):
show_serializer = vsa_ext.VsaDriveTemplate
index_serializer = vsa_ext.VsaDrivesTemplate
object = 'drive'
objects = 'drives'
class VsaVPoolSerializerTest(SerializerTestCommon):
def _verify_vpool(self, vpool, tree):
self._verify_attrs(vpool, tree, ('id', 'vsaId', 'name', 'displayName',
'displayDescription', 'driveCount',
'protection', 'stripeSize',
'stripeWidth', 'createTime',
'status'))
self.assertEqual(1, len(tree))
self.assertEqual('driveIds', tree[0].tag)
self.assertEqual(len(vpool['driveIds']), len(tree[0]))
for idx, gr_child in enumerate(tree[0]):
self.assertEqual('driveId', gr_child.tag)
self.assertEqual(str(vpool['driveIds'][idx]), gr_child.text)
def test_vpool_create_show_serializer(self):
serializer = vsa_ext.VsaVPoolTemplate()
exemplar = dict(
id='vpool_id',
vsaId='vpool_vsa_id',
name='vpool_vsa_name',
displayName='vpool_display_name',
displayDescription='vpool_display_desc',
driveCount=24,
driveIds=['drive1', 'drive2', 'drive3'],
protection='protected',
stripeSize=1024,
stripeWidth=2048,
createTime=datetime.datetime.now(),
status='available')
text = serializer.serialize(dict(vpool=exemplar))
print text
tree = etree.fromstring(text)
self._verify_vpool(exemplar, tree)
def test_vpool_index_serializer(self):
serializer = vsa_ext.VsaVPoolsTemplate()
exemplar = [dict(
id='vpool1_id',
vsaId='vpool1_vsa_id',
name='vpool1_vsa_name',
displayName='vpool1_display_name',
displayDescription='vpool1_display_desc',
driveCount=24,
driveIds=['drive1', 'drive2', 'drive3'],
protection='protected',
stripeSize=1024,
stripeWidth=2048,
createTime=datetime.datetime.now(),
status='available'),
dict(
id='vpool2_id',
vsaId='vpool2_vsa_id',
name='vpool2_vsa_name',
displayName='vpool2_display_name',
displayDescription='vpool2_display_desc',
driveCount=42,
driveIds=['drive4', 'drive5', 'drive6'],
protection='protected',
stripeSize=512,
stripeWidth=256,
createTime=datetime.datetime.now(),
status='available')]
text = serializer.serialize(dict(vpools=exemplar))
print text
tree = etree.fromstring(text)
self.assertEqual('vpools', tree.tag)
self.assertEqual(len(exemplar), len(tree))
for idx, child in enumerate(tree):
self._verify_vpool(exemplar[idx], child)

View File

@ -182,7 +182,6 @@ class ExtensionControllerTest(ExtensionTestCase):
"ServerStartStop",
"SimpleTenantUsage",
"Users",
"VSAs",
"VirtualInterfaces",
"Volumes",
"VolumeTypes",

View File

@ -645,14 +645,7 @@ def stub_volume_delete(self, context, *args, **param):
def stub_volume_get(self, context, volume_id):
vol = stub_volume(volume_id)
if volume_id == '234':
meta = {'key': 'from_vsa_id', 'value': '123'}
vol['volume_metadata'].append(meta)
if volume_id == '345':
meta = {'key': 'to_vsa_id', 'value': '123'}
vol['volume_metadata'].append(meta)
return vol
return stub_volume(volume_id)
def stub_volume_get_notfound(self, context, volume_id):

View File

@ -1,626 +0,0 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova.scheduler import vsa as vsa_sched
from nova.tests.scheduler import test_scheduler
from nova import utils
from nova.volume import volume_types
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
scheduled_volumes = []
scheduled_volume = {}
global_volume = {}
def fake_rpc_cast(*args, **kwargs):
pass
class FakeVsaLeastUsedScheduler(
vsa_sched.VsaSchedulerLeastUsedHost):
# No need to stub anything at the moment
pass
class FakeVsaMostAvailCapacityScheduler(
vsa_sched.VsaSchedulerMostAvailCapacity):
# No need to stub anything at the moment
pass
class VsaSchedulerTestCase(test_scheduler.SchedulerTestCase):
driver_cls = FakeVsaLeastUsedScheduler
def setUp(self):
super(VsaSchedulerTestCase, self).setUp()
self.host_num = 10
self.drive_type_num = 5
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
self.stubs.Set(self.driver,
'_get_service_states', self._fake_get_service_states)
self.stubs.Set(self.driver,
'_provision_volume', self._fake_provision_volume)
self.stubs.Set(db, 'vsa_update', self._fake_vsa_update)
self.stubs.Set(db, 'volume_get', self._fake_volume_get)
self.stubs.Set(db, 'volume_update', self._fake_volume_update)
self.created_types_lst = []
def tearDown(self):
for name in self.created_types_lst:
volume_types.destroy(self.context.elevated(), name)
super(VsaSchedulerTestCase, self).tearDown()
def _get_vol_creation_request(self, num_vols, drive_ix, size=0):
volume_params = []
for i in range(num_vols):
name = 'name_' + str(i)
try:
volume_types.create(self.context.elevated(), name,
extra_specs={'type': 'vsa_drive',
'drive_name': name,
'drive_type': 'type_' + str(drive_ix),
'drive_size': 1 + 100 * (drive_ix)})
self.created_types_lst.append(name)
except exception.VolumeTypeExists:
# type is already created
pass
volume_type = volume_types.get_volume_type_by_name(self.context,
name)
volume = {'size': size,
'snapshot_id': None,
'name': 'vol_' + str(i),
'description': None,
'volume_type_id': volume_type['id']}
volume_params.append(volume)
return {'num_volumes': len(volume_params),
'vsa_id': 123,
'volumes': volume_params}
def _generate_default_service_states(self):
service_states = {}
for i in range(self.host_num):
host = {}
hostname = 'host_' + str(i)
if hostname in self.exclude_host_list:
continue
host['volume'] = {'timestamp': utils.utcnow(),
'drive_qos_info': {}}
for j in range(self.drive_type_start_ix,
self.drive_type_start_ix + self.drive_type_num):
dtype = {}
dtype['Name'] = 'name_' + str(j)
dtype['DriveType'] = 'type_' + str(j)
dtype['TotalDrives'] = 2 * (self.init_num_drives + i)
dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j)
dtype['TotalCapacity'] = (dtype['TotalDrives'] *
dtype['DriveCapacity'])
dtype['AvailableCapacity'] = ((dtype['TotalDrives'] - i) *
dtype['DriveCapacity'])
dtype['DriveRpm'] = 7200
dtype['DifCapable'] = 0
dtype['SedCapable'] = 0
dtype['PartitionDrive'] = {
'PartitionSize': 0,
'NumOccupiedPartitions': 0,
'NumFreePartitions': 0}
dtype['FullDrive'] = {
'NumFreeDrives': dtype['TotalDrives'] - i,
'NumOccupiedDrives': i}
host['volume']['drive_qos_info'][dtype['Name']] = dtype
service_states[hostname] = host
return service_states
def _print_service_states(self):
for host, host_val in self.service_states.iteritems():
LOG.info(_("Host %s"), host)
total_used = 0
total_available = 0
qos = host_val['volume']['drive_qos_info']
for k, d in qos.iteritems():
LOG.info("\t%s: type %s: drives (used %2d, total %2d) "
"size %3d, total %4d, used %4d, avail %d",
k,
d['DriveType'],
d['FullDrive']['NumOccupiedDrives'],
d['TotalDrives'],
vsa_sched.BYTES_TO_GB(d['DriveCapacity']),
vsa_sched.BYTES_TO_GB(d['TotalCapacity']),
vsa_sched.BYTES_TO_GB(d['TotalCapacity'] -
d['AvailableCapacity']),
vsa_sched.BYTES_TO_GB(d['AvailableCapacity']))
total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] -
d['AvailableCapacity'])
total_available += vsa_sched.BYTES_TO_GB(
d['AvailableCapacity'])
LOG.info("Host %s: used %d, avail %d",
host, total_used, total_available)
def _set_service_states(self, host_num,
drive_type_start_ix, drive_type_num,
init_num_drives=10,
exclude_host_list=[]):
self.host_num = host_num
self.drive_type_start_ix = drive_type_start_ix
self.drive_type_num = drive_type_num
self.exclude_host_list = exclude_host_list
self.init_num_drives = init_num_drives
self.service_states = self._generate_default_service_states()
def _get_service_states(self):
return self.service_states
def _fake_get_service_states(self):
return self._get_service_states()
def _fake_provision_volume(self, context, vol, vsa_id, availability_zone):
global scheduled_volumes
scheduled_volumes.append(dict(vol=vol,
vsa_id=vsa_id,
az=availability_zone))
name = vol['name']
host = vol['host']
LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
locals())
LOG.debug(_("\t vol=%(vol)s"), locals())
def _fake_vsa_update(self, context, vsa_id, values):
LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "
"values=%(values)s"), locals())
def _fake_volume_create(self, context, options):
LOG.debug(_("Test: Volume create: %s"), options)
options['id'] = 123
global global_volume
global_volume = options
return options
def _fake_volume_get(self, context, volume_id):
LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals())
global global_volume
global_volume['id'] = volume_id
global_volume['availability_zone'] = None
return global_volume
def _fake_volume_update(self, context, volume_id, values):
LOG.debug(_("Test: Volume update request: id=%(volume_id)s "
"values=%(values)s"), locals())
global scheduled_volume
scheduled_volume = {'id': volume_id, 'host': values['host']}
def _fake_service_get_by_args(self, context, host, binary):
return {'host': 'fake_host', 'disabled': False}
def _fake_service_is_up_True(self, service):
return True
def _fake_service_is_up_False(self, service):
return False
def test_vsa_sched_create_volumes_simple(self):
global scheduled_volumes
scheduled_volumes = []
self._set_service_states(host_num=10,
drive_type_start_ix=0,
drive_type_num=5,
init_num_drives=10,
exclude_host_list=['host_1', 'host_3'])
prev = self._generate_default_service_states()
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
self.assertEqual(len(scheduled_volumes), 3)
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0')
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2')
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
cur = self._get_service_states()
for host in ['host_0', 'host_2', 'host_4']:
cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
def test_vsa_sched_no_drive_type(self):
self._set_service_states(host_num=10,
drive_type_start_ix=0,
drive_type_num=5,
init_num_drives=1)
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6)
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_volumes,
self.context,
request_spec,
availability_zone=None)
def test_vsa_sched_no_enough_drives(self):
global scheduled_volumes
scheduled_volumes = []
self._set_service_states(host_num=3,
drive_type_start_ix=0,
drive_type_num=1,
init_num_drives=0)
prev = self._generate_default_service_states()
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0)
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_volumes,
self.context,
request_spec,
availability_zone=None)
# check that everything was returned back
cur = self._get_service_states()
for k, v in prev.iteritems():
self.assertEqual(prev[k]['volume']['drive_qos_info'],
cur[k]['volume']['drive_qos_info'])
def test_vsa_sched_wrong_topic(self):
self._set_service_states(host_num=1,
drive_type_start_ix=0,
drive_type_num=5,
init_num_drives=1)
states = self._get_service_states()
new_states = {}
new_states['host_0'] = {'compute': states['host_0']['volume']}
self.service_states = new_states
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_volumes,
self.context,
request_spec,
availability_zone=None)
def test_vsa_sched_provision_volume(self):
global global_volume
global_volume = {}
self._set_service_states(host_num=1,
drive_type_start_ix=0,
drive_type_num=1,
init_num_drives=1)
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
self.stubs.UnsetAll()
self.stubs.Set(self.driver,
'_get_service_states', self._fake_get_service_states)
self.stubs.Set(db, 'volume_create', self._fake_volume_create)
self.stubs.Set(db, 'volume_update', self._fake_volume_update)
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
self.assertEqual(request_spec['volumes'][0]['name'],
global_volume['display_name'])
def test_vsa_sched_no_free_drives(self):
self._set_service_states(host_num=1,
drive_type_start_ix=0,
drive_type_num=1,
init_num_drives=1)
request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0)
self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
cur = self._get_service_states()
cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0']
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1)
new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0)
self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
self._print_service_states()
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_volumes,
self.context,
new_request,
availability_zone=None)
def test_vsa_sched_forced_host(self):
global scheduled_volumes
scheduled_volumes = []
self._set_service_states(host_num=10,
drive_type_start_ix=0,
drive_type_num=5,
init_num_drives=10)
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
self.assertRaises(exception.HostBinaryNotFound,
self.driver.schedule_create_volumes,
self.context.elevated(),
request_spec,
availability_zone="nova:host_5")
self.stubs.Set(db,
'service_get_by_args', self._fake_service_get_by_args)
self.stubs.Set(utils,
'service_is_up', self._fake_service_is_up_False)
self.assertRaises(exception.WillNotSchedule,
self.driver.schedule_create_volumes,
self.context.elevated(),
request_spec,
availability_zone="nova:host_5")
self.stubs.Set(utils,
'service_is_up', self._fake_service_is_up_True)
self.driver.schedule_create_volumes(self.context.elevated(),
request_spec,
availability_zone="nova:host_5")
self.assertEqual(len(scheduled_volumes), 3)
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5')
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5')
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5')
def test_vsa_sched_create_volumes_partition(self):
global scheduled_volumes
scheduled_volumes = []
self._set_service_states(host_num=5,
drive_type_start_ix=0,
drive_type_num=5,
init_num_drives=1,
exclude_host_list=['host_0', 'host_2'])
prev = self._generate_default_service_states()
request_spec = self._get_vol_creation_request(num_vols=3,
drive_ix=3,
size=50)
self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
self.assertEqual(len(scheduled_volumes), 3)
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1')
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4')
cur = self._get_service_states()
for host in ['host_1', 'host_3', 'host_4']:
cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
self.assertEqual(prev_dtype['PartitionDrive']
['NumOccupiedPartitions'], 0)
self.assertEqual(cur_dtype['PartitionDrive']
['NumOccupiedPartitions'], 1)
self.assertEqual(cur_dtype['PartitionDrive']
['NumFreePartitions'], 5)
self.assertEqual(prev_dtype['PartitionDrive']
['NumFreePartitions'], 0)
self.assertEqual(prev_dtype['PartitionDrive']
['PartitionSize'], 0)
def test_vsa_sched_create_single_volume_az(self):
global scheduled_volume
scheduled_volume = {}
def _fake_volume_get_az(context, volume_id):
LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals())
return {'id': volume_id, 'availability_zone': 'nova:host_3'}
self.stubs.Set(db, 'volume_get', _fake_volume_get_az)
self.stubs.Set(db, 'service_get_by_args',
self._fake_service_get_by_args)
self.stubs.Set(utils,
'service_is_up', self._fake_service_is_up_True)
self.driver.schedule_create_volume(self.context.elevated(),
123, availability_zone=None)
self.assertEqual(scheduled_volume['id'], 123)
self.assertEqual(scheduled_volume['host'], 'host_3')
def test_vsa_sched_create_single_non_vsa_volume(self):
global scheduled_volume
scheduled_volume = {}
global global_volume
global_volume = {}
global_volume['volume_type_id'] = None
self.assertRaises(exception.NoValidHost,
self.driver.schedule_create_volume,
self.context,
123,
availability_zone=None)
def test_vsa_sched_create_single_volume(self):
global scheduled_volume
scheduled_volume = {}
self._set_service_states(host_num=10,
drive_type_start_ix=0,
drive_type_num=5,
init_num_drives=10,
exclude_host_list=['host_0', 'host_1'])
prev = self._generate_default_service_states()
global global_volume
global_volume = {}
drive_ix = 2
name = 'name_' + str(drive_ix)
volume_types.create(self.context.elevated(), name,
extra_specs={'type': 'vsa_drive',
'drive_name': name,
'drive_type': 'type_' + str(drive_ix),
'drive_size': 1 + 100 * (drive_ix)})
self.created_types_lst.append(name)
volume_type = volume_types.get_volume_type_by_name(self.context, name)
global_volume['volume_type_id'] = volume_type['id']
global_volume['size'] = 0
self.driver.schedule_create_volume(self.context,
123, availability_zone=None)
self.assertEqual(scheduled_volume['id'], 123)
self.assertEqual(scheduled_volume['host'], 'host_2')
class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
driver_cls = FakeVsaMostAvailCapacityScheduler
def test_vsa_sched_create_single_volume(self):
global scheduled_volume
scheduled_volume = {}
self._set_service_states(host_num=10,
drive_type_start_ix=0,
drive_type_num=5,
init_num_drives=10,
exclude_host_list=['host_0', 'host_1'])
prev = self._generate_default_service_states()
global global_volume
global_volume = {}
drive_ix = 2
name = 'name_' + str(drive_ix)
volume_types.create(self.context.elevated(), name,
extra_specs={'type': 'vsa_drive',
'drive_name': name,
'drive_type': 'type_' + str(drive_ix),
'drive_size': 1 + 100 * (drive_ix)})
self.created_types_lst.append(name)
volume_type = volume_types.get_volume_type_by_name(self.context, name)
global_volume['volume_type_id'] = volume_type['id']
global_volume['size'] = 0
self.driver.schedule_create_volume(self.context,
123, availability_zone=None)
self.assertEqual(scheduled_volume['id'], 123)
self.assertEqual(scheduled_volume['host'], 'host_9')
def test_vsa_sched_create_volumes_simple(self):
global scheduled_volumes
scheduled_volumes = []
self._set_service_states(host_num=10,
drive_type_start_ix=0,
drive_type_num=5,
init_num_drives=10,
exclude_host_list=['host_1', 'host_3'])
prev = self._generate_default_service_states()
request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2)
self._print_service_states()
self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
self.assertEqual(len(scheduled_volumes), 3)
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9')
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8')
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7')
cur = self._get_service_states()
for host in ['host_9', 'host_8', 'host_7']:
cur_dtype = cur[host]['volume']['drive_qos_info']['name_2']
prev_dtype = prev[host]['volume']['drive_qos_info']['name_2']
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
def test_vsa_sched_create_volumes_partition(self):
global scheduled_volumes
scheduled_volumes = []
self._set_service_states(host_num=5,
drive_type_start_ix=0,
drive_type_num=5,
init_num_drives=1,
exclude_host_list=['host_0', 'host_2'])
prev = self._generate_default_service_states()
request_spec = self._get_vol_creation_request(num_vols=3,
drive_ix=3,
size=50)
self.driver.schedule_create_volumes(self.context,
request_spec,
availability_zone=None)
self.assertEqual(len(scheduled_volumes), 3)
self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4')
self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3')
self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1')
cur = self._get_service_states()
for host in ['host_1', 'host_3', 'host_4']:
cur_dtype = cur[host]['volume']['drive_qos_info']['name_3']
prev_dtype = prev[host]['volume']['drive_qos_info']['name_3']
self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType'])
self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'],
prev_dtype['FullDrive']['NumFreeDrives'] - 1)
self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'],
prev_dtype['FullDrive']['NumOccupiedDrives'] + 1)
self.assertEqual(prev_dtype['PartitionDrive']
['NumOccupiedPartitions'], 0)
self.assertEqual(cur_dtype['PartitionDrive']
['NumOccupiedPartitions'], 1)
self.assertEqual(cur_dtype['PartitionDrive']
['NumFreePartitions'], 5)
self.assertEqual(prev_dtype['PartitionDrive']
['NumFreePartitions'], 0)
self.assertEqual(prev_dtype['PartitionDrive']
['PartitionSize'], 0)

View File

@ -1,171 +0,0 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from xml.etree import ElementTree
from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova import vsa
from nova.volume import volume_types
from nova.vsa import utils as vsa_utils
import nova.image.fake
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class VsaTestCase(test.TestCase):
def setUp(self):
super(VsaTestCase, self).setUp()
self.vsa_api = vsa.api.API()
self.flags(quota_volumes=100, quota_gigabytes=10000)
self.context = context.get_admin_context()
volume_types.create(self.context,
'SATA_500_7200',
extra_specs={'type': 'vsa_drive',
'drive_name': 'SATA_500_7200',
'drive_type': 'SATA',
'drive_size': '500',
'drive_rpm': '7200'})
def fake_show_by_name(meh, context, name):
if name == 'wrong_image_name':
LOG.debug(_("Test: Emulate wrong VSA name. Raise"))
raise exception.ImageNotFound
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(nova.image.fake._FakeImageService,
'show_by_name',
fake_show_by_name)
def test_vsa_create_delete_defaults(self):
param = {'display_name': 'VSA name test'}
vsa_ref = self.vsa_api.create(self.context, **param)
self.assertEqual(vsa_ref['display_name'], param['display_name'])
self.vsa_api.delete(self.context, vsa_ref['id'])
def test_vsa_create_delete_check_in_db(self):
vsa_list1 = self.vsa_api.get_all(self.context)
vsa_ref = self.vsa_api.create(self.context)
vsa_list2 = self.vsa_api.get_all(self.context)
self.assertEqual(len(vsa_list2), len(vsa_list1) + 1)
self.vsa_api.delete(self.context, vsa_ref['id'])
vsa_list3 = self.vsa_api.get_all(self.context)
self.assertEqual(len(vsa_list3), len(vsa_list2) - 1)
def test_vsa_create_delete_high_vc_count(self):
param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
vsa_ref = self.vsa_api.create(self.context, **param)
self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
self.vsa_api.delete(self.context, vsa_ref['id'])
def test_vsa_create_wrong_image_name(self):
param = {'image_name': 'wrong_image_name'}
self.assertRaises(exception.ImageNotFound,
self.vsa_api.create, self.context, **param)
def test_vsa_create_db_error(self):
def fake_vsa_create(context, options):
LOG.debug(_("Test: Emulate DB error. Raise"))
raise exception.Error
self.stubs.Set(nova.db, 'vsa_create', fake_vsa_create)
self.assertRaises(exception.Error,
self.vsa_api.create, self.context)
def test_vsa_create_wrong_storage_params(self):
vsa_list1 = self.vsa_api.get_all(self.context)
param = {'storage': [{'stub': 1}]}
self.assertRaises(exception.InvalidVolumeType,
self.vsa_api.create, self.context, **param)
vsa_list2 = self.vsa_api.get_all(self.context)
self.assertEqual(len(vsa_list2), len(vsa_list1))
param = {'storage': [{'drive_name': 'wrong name'}]}
self.assertRaises(exception.InvalidVolumeType,
self.vsa_api.create, self.context, **param)
def test_vsa_create_with_storage(self, multi_vol_creation=True):
"""Test creation of VSA with BE storage"""
self.flags(vsa_multi_vol_creation=multi_vol_creation)
param = {'storage': [{'drive_name': 'SATA_500_7200',
'num_drives': 3}]}
vsa_ref = self.vsa_api.create(self.context, **param)
self.assertEqual(vsa_ref['vol_count'], 3)
self.vsa_api.delete(self.context, vsa_ref['id'])
param = {'storage': [{'drive_name': 'SATA_500_7200',
'num_drives': 3}],
'shared': True}
vsa_ref = self.vsa_api.create(self.context, **param)
self.assertEqual(vsa_ref['vol_count'], 15)
self.vsa_api.delete(self.context, vsa_ref['id'])
def test_vsa_create_with_storage_single_volumes(self):
self.test_vsa_create_with_storage(multi_vol_creation=False)
def test_vsa_update(self):
vsa_ref = self.vsa_api.create(self.context)
param = {'vc_count': FLAGS.max_vcs_in_vsa + 1}
vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa)
param = {'vc_count': 2}
vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param)
self.assertEqual(vsa_ref['vc_count'], 2)
self.vsa_api.delete(self.context, vsa_ref['id'])
def test_vsa_generate_user_data(self):
self.flags(vsa_multi_vol_creation=False)
param = {'display_name': 'VSA name test',
'display_description': 'VSA desc test',
'vc_count': 2,
'storage': [{'drive_name': 'SATA_500_7200',
'num_drives': 3}]}
vsa_ref = self.vsa_api.create(self.context, **param)
volumes = self.vsa_api.get_all_vsa_drives(self.context,
vsa_ref['id'])
user_data = vsa_utils.generate_user_data(vsa_ref, volumes)
user_data = base64.b64decode(user_data)
LOG.debug(_("Test: user_data = %s"), user_data)
elem = ElementTree.fromstring(user_data)
self.assertEqual(elem.findtext('name'),
param['display_name'])
self.assertEqual(elem.findtext('description'),
param['display_description'])
self.assertEqual(elem.findtext('vc_count'),
str(param['vc_count']))
self.vsa_api.delete(self.context, vsa_ref['id'])

View File

@ -1,133 +0,0 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import flags
from nova.vsa import api as vsa_api
from nova import volume
from nova import context
from nova import test
from nova import log as logging
import nova.image.fake
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class VsaVolumesTestCase(test.TestCase):
def setUp(self):
super(VsaVolumesTestCase, self).setUp()
self.vsa_api = vsa_api.API()
self.volume_api = volume.API()
self.context = context.get_admin_context()
self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context)
def fake_show_by_name(meh, context, name):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(nova.image.fake._FakeImageService,
'show_by_name',
fake_show_by_name)
param = {'display_name': 'VSA name test'}
vsa_ref = self.vsa_api.create(self.context, **param)
self.vsa_id = vsa_ref['id']
def tearDown(self):
if self.vsa_id:
self.vsa_api.delete(self.context, self.vsa_id)
super(VsaVolumesTestCase, self).tearDown()
def _default_volume_param(self):
return {
'size': 1,
'snapshot': None,
'name': 'Test volume name',
'description': 'Test volume desc name',
'volume_type': self.default_vol_type,
'metadata': {'from_vsa_id': self.vsa_id}
}
def _get_all_volumes_by_vsa(self):
return self.volume_api.get_all(self.context,
search_opts={'metadata': {"from_vsa_id": str(self.vsa_id)}})
def test_vsa_volume_create_delete(self):
""" Check if volume properly created and deleted. """
volume_param = self._default_volume_param()
volume_ref = self.volume_api.create(self.context, **volume_param)
self.assertEqual(volume_ref['display_name'],
volume_param['name'])
self.assertEqual(volume_ref['display_description'],
volume_param['description'])
self.assertEqual(volume_ref['size'],
volume_param['size'])
self.assertEqual(volume_ref['status'],
'creating')
vols2 = self._get_all_volumes_by_vsa()
self.assertEqual(1, len(vols2))
volume_ref = vols2[0]
self.assertEqual(volume_ref['display_name'],
volume_param['name'])
self.assertEqual(volume_ref['display_description'],
volume_param['description'])
self.assertEqual(volume_ref['size'],
volume_param['size'])
self.assertEqual(volume_ref['status'],
'creating')
self.volume_api.update(self.context, volume_ref,
{'status': 'available', 'host': 'fake'})
volume_ref = self.volume_api.get(self.context, volume_ref['id'])
self.volume_api.delete(self.context, volume_ref)
vols3 = self._get_all_volumes_by_vsa()
self.assertEqual(1, len(vols2))
volume_ref = vols3[0]
self.assertEqual(volume_ref['status'],
'deleting')
def test_vsa_volume_delete_nonavail_volume(self):
""" Check volume deletion in different states. """
volume_param = self._default_volume_param()
volume_ref = self.volume_api.create(self.context, **volume_param)
self.volume_api.update(self.context, volume_ref,
{'status': 'in-use', 'host': 'fake'})
volume_ref = self.volume_api.get(self.context, volume_ref['id'])
self.assertRaises(exception.InvalidVolume,
self.volume_api.delete,
self.context, volume_ref)
def test_vsa_volume_delete_vsa_with_volumes(self):
""" Check volume deleton in different states. """
vols1 = self._get_all_volumes_by_vsa()
for i in range(3):
volume_param = self._default_volume_param()
volume_ref = self.volume_api.create(self.context, **volume_param)
vols2 = self._get_all_volumes_by_vsa()
self.assertEqual(len(vols1) + 3, len(vols2))
self.vsa_api.delete(self.context, self.vsa_id)
vols3 = self._get_all_volumes_by_vsa()
self.assertEqual(len(vols1), len(vols3))

View File

@ -20,9 +20,7 @@ Drivers for volumes.
"""
import os
import time
from xml.etree import ElementTree
from nova import exception
from nova import flags
@ -30,7 +28,6 @@ from nova import log as logging
from nova.openstack.common import cfg
from nova import utils
from nova.volume import iscsi
from nova.volume import volume_types
LOG = logging.getLogger(__name__)
@ -697,324 +694,5 @@ class LoggingVolumeDriver(VolumeDriver):
return matches
class ZadaraBEDriver(ISCSIDriver):
"""Performs actions to configure Zadara BE module."""
def _is_vsa_volume(self, volume):
return volume_types.is_vsa_volume(volume['volume_type_id'])
def _is_vsa_drive(self, volume):
return volume_types.is_vsa_drive(volume['volume_type_id'])
def _not_vsa_volume_or_drive(self, volume):
"""Returns True if volume is not VSA BE volume."""
if not volume_types.is_vsa_object(volume['volume_type_id']):
LOG.debug(_("\tVolume %s is NOT VSA volume"), volume['name'])
return True
else:
return False
def check_for_setup_error(self):
"""No setup necessary for Zadara BE."""
pass
""" Volume Driver methods """
def create_volume(self, volume):
"""Creates BE volume."""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).create_volume(volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s creation - do nothing"),
volume['name'])
return
if int(volume['size']) == 0:
sizestr = '0' # indicates full-partition
else:
sizestr = '%s' % (int(volume['size']) << 30) # size in bytes
# Set the qos-str to default type sas
qosstr = 'SAS_1000'
volume_type = volume_types.get_volume_type(None,
volume['volume_type_id'])
if volume_type is not None:
qosstr = '_'.join([volume_type['extra_specs']['drive_type'],
volume_type['extra_specs']['drive_size']])
vsa_id = None
for i in volume.get('volume_metadata'):
if i['key'] == 'to_vsa_id':
vsa_id = i['value']
break
try:
self._execute('/var/lib/zadara/bin/zadara_sncfg',
'create_qospart',
'--qos', qosstr,
'--pname', volume['name'],
'--psize', sizestr,
'--vsaid', vsa_id,
run_as_root=True,
check_exit_code=0)
except exception.ProcessExecutionError:
LOG.debug(_("VSA BE create_volume for %s failed"), volume['name'])
raise
LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name'])
def delete_volume(self, volume):
"""Deletes BE volume."""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).delete_volume(volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s deletion - do nothing"),
volume['name'])
return
try:
self._execute('/var/lib/zadara/bin/zadara_sncfg',
'delete_partition',
'--pname', volume['name'],
run_as_root=True,
check_exit_code=0)
except exception.ProcessExecutionError:
LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name'])
return
LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name'])
def _discover_volume(self, context, volume):
"""Discover volume on a remote host."""
iscsi_properties = self._get_iscsi_properties(volume)
if not iscsi_properties['target_discovered']:
self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
"node.session.auth.authmethod",
iscsi_properties['auth_method'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.username",
iscsi_properties['auth_username'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.password",
iscsi_properties['auth_password'])
self._run_iscsiadm(iscsi_properties, ("--login", ))
self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
mount_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn'],
iscsi_properties['target_lun']))
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
while not os.path.exists(mount_device):
if tries >= FLAGS.num_iscsi_scan_tries:
raise exception.Error(_("iSCSI device not found at %s") %
(mount_device))
LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
"Will rescan & retry. Try number: %(tries)s") %
locals())
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(iscsi_properties, ("--rescan", ))
tries = tries + 1
if not os.path.exists(mount_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(mount_device)s "
"(after %(tries)s rescans)") %
locals())
return mount_device
def local_path(self, volume):
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).local_path(volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s local path call - call discover"),
volume['name'])
# NOTE(vish): Copied discover from iscsi_driver since it is used
# but this should probably be refactored into a common
# area because it is used in libvirt driver.
return self._discover_volume(None, volume)
raise exception.Error(_("local_path not supported"))
def ensure_export(self, context, volume):
"""ensure BE export for a volume"""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).ensure_export(context, volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s ensure export - do nothing"),
volume['name'])
return
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping ensure_export. No iscsi_target " +
"provisioned for volume: %d"), volume['id'])
return
try:
ret = self._common_be_export(context, volume, iscsi_target)
except exception.ProcessExecutionError:
return
return ret
def create_export(self, context, volume):
"""create BE export for a volume"""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).create_export(context, volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s create export - do nothing"),
volume['name'])
return
self._ensure_iscsi_targets(context, volume['host'])
iscsi_target = self.db.volume_allocate_iscsi_target(context,
volume['id'],
volume['host'])
try:
ret = self._common_be_export(context, volume, iscsi_target)
except Exception:
raise exception.ProcessExecutionError
return ret
def remove_export(self, context, volume):
"""Removes BE export for a volume."""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).remove_export(context, volume)
if self._is_vsa_volume(volume):
LOG.debug(_("\tFE VSA Volume %s remove export - do nothing"),
volume['name'])
return
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping remove_export. No iscsi_target " +
"provisioned for volume: %d"), volume['id'])
return
try:
self._execute('/var/lib/zadara/bin/zadara_sncfg',
'remove_export',
'--pname', volume['name'],
'--tid', iscsi_target,
run_as_root=True,
check_exit_code=0)
except exception.ProcessExecutionError:
LOG.debug(_("VSA BE remove_export for %s failed"), volume['name'])
return
def create_snapshot(self, snapshot):
"""Nothing required for snapshot"""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).create_snapshot(volume)
pass
def delete_snapshot(self, snapshot):
"""Nothing required to delete a snapshot"""
if self._not_vsa_volume_or_drive(volume):
return super(ZadaraBEDriver, self).delete_snapshot(volume)
pass
""" Internal BE Volume methods """
def _common_be_export(self, context, volume, iscsi_target):
"""
Common logic that asks zadara_sncfg to setup iSCSI target/lun for
this volume
"""
(out, err) = self._execute('/var/lib/zadara/bin/zadara_sncfg',
'create_export',
'--pname', volume['name'],
'--tid', iscsi_target,
run_as_root=True,
check_exit_code=0)
result_xml = ElementTree.fromstring(out)
response_node = result_xml.find("Sn")
if response_node is None:
msg = "Malformed response from zadara_sncfg"
raise exception.Error(msg)
sn_ip = response_node.findtext("SnIp")
sn_iqn = response_node.findtext("IqnName")
model_update = {}
model_update['provider_location'] = _iscsi_location(
sn_ip, iscsi_target, sn_iqn)
return model_update
def _get_qosgroup_summary(self):
"""gets the list of qosgroups from Zadara BE"""
try:
(out, err) = self._execute('/var/lib/zadara/bin/zadara_sncfg',
'get_qosgroups_xml',
run_as_root=True,
check_exit_code=0)
except exception.ProcessExecutionError:
LOG.debug(_("Failed to retrieve QoS info"))
return {}
qos_groups = {}
result_xml = ElementTree.fromstring(out)
for element in result_xml.findall('QosGroup'):
qos_group = {}
# get the name of the group.
# If we cannot find it, forget this element
group_name = element.findtext("Name")
if not group_name:
continue
# loop through all child nodes & fill up attributes of this group
for child in element.getchildren():
# two types of elements - property of qos-group & sub property
# classify them accordingly
if child.text:
qos_group[child.tag] = (int(child.text)
if child.text.isdigit()
else child.text)
else:
subelement = {}
for subchild in child.getchildren():
subelement[subchild.tag] = (int(subchild.text)
if subchild.text.isdigit()
else subchild.text)
qos_group[child.tag] = subelement
# Now add this group to the master qos_groups
qos_groups[group_name] = qos_group
return qos_groups
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service. If 'refresh' is
True, run the update first."""
drive_info = self._get_qosgroup_summary()
return {'drive_qos_info': drive_info}
def _iscsi_location(ip, target, iqn, lun=None):
return "%s:%s,%s %s %s" % (ip, FLAGS.iscsi_port, target, iqn, lun)

View File

@ -136,36 +136,15 @@ class VolumeManager(manager.SchedulerDependentManager):
with utils.save_and_reraise_exception():
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
self._notify_vsa(context, volume_ref, 'error')
now = utils.utcnow()
self.db.volume_update(context,
volume_ref['id'], {'status': 'available',
'launched_at': now})
LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
self._notify_vsa(context, volume_ref, 'available')
self._reset_stats()
return volume_id
def _notify_vsa(self, context, volume_ref, status):
if volume_ref['volume_type_id'] is None:
return
if volume_types.is_vsa_drive(volume_ref['volume_type_id']):
vsa_id = None
for i in volume_ref.get('volume_metadata'):
if i['key'] == 'to_vsa_id':
vsa_id = int(i['value'])
break
if vsa_id:
rpc.cast(context,
FLAGS.vsa_topic,
{"method": "vsa_volume_created",
"args": {"vol_id": volume_ref['id'],
"vsa_id": vsa_id,
"status": status}})
def delete_volume(self, context, volume_id):
"""Deletes and unexports volume."""
context = context.elevated()

View File

@ -123,24 +123,3 @@ def is_key_value_present(volume_type_id, key, value, volume_type=None):
return False
else:
return True
def is_vsa_drive(volume_type_id, volume_type=None):
return is_key_value_present(volume_type_id,
'type', 'vsa_drive', volume_type)
def is_vsa_volume(volume_type_id, volume_type=None):
return is_key_value_present(volume_type_id,
'type', 'vsa_volume', volume_type)
def is_vsa_object(volume_type_id):
if volume_type_id is None:
return False
volume_type = get_volume_type(context.get_admin_context(),
volume_type_id)
return (is_vsa_drive(volume_type_id, volume_type) or
is_vsa_volume(volume_type_id, volume_type))

View File

@ -1,16 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,412 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to Virtual Storage Arrays (VSAs).
Experimental code. Requires special VSA image.
For assistance and guidelines pls contact
Zadara Storage Inc & OpenStack community
"""
from nova import compute
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import rpc
from nova import volume
from nova.compute import instance_types
from nova.db import base
from nova.volume import volume_types
class VsaState:
CREATING = 'creating' # VSA creating (not ready yet)
LAUNCHING = 'launching' # Launching VCs (all BE volumes were created)
CREATED = 'created' # VSA fully created and ready for use
PARTIAL = 'partial' # Some BE drives were allocated
FAILED = 'failed' # Some BE storage allocations failed
DELETING = 'deleting' # VSA started the deletion procedure
vsa_opts = [
cfg.StrOpt('vsa_ec2_access_key',
default=None,
help='EC2 access key used by VSA for accessing nova'),
cfg.StrOpt('vsa_ec2_user_id',
default=None,
help='User ID used by VSA for accessing nova'),
cfg.BoolOpt('vsa_multi_vol_creation',
default=True,
help='Ask scheduler to create multiple volumes in one call'),
cfg.StrOpt('vsa_volume_type_name',
default='VSA volume type',
help='Name of volume type associated with FE VSA volumes'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(vsa_opts)
LOG = logging.getLogger(__name__)
class API(base.Base):
"""API for interacting with the VSA manager."""
def __init__(self, compute_api=None, volume_api=None, **kwargs):
self.compute_api = compute_api or compute.API()
self.volume_api = volume_api or volume.API()
super(API, self).__init__(**kwargs)
def _check_volume_type_correctness(self, vol_type):
if (vol_type.get('extra_specs') is None or
vol_type['extra_specs'].get('type') != 'vsa_drive' or
vol_type['extra_specs'].get('drive_type') is None or
vol_type['extra_specs'].get('drive_size') is None):
msg = _("invalid drive data")
raise exception.InvalidVolumeType(reason=msg)
def _get_default_vsa_instance_type(self):
return instance_types.get_instance_type_by_name(
FLAGS.default_vsa_instance_type)
def _check_storage_parameters(self, context, vsa_name, storage,
shared, first_index=0):
"""
Translates storage array of disks to the list of volumes
:param storage: List of dictionaries with following keys:
disk_name, num_disks, size
:param shared: Specifies if storage is dedicated or shared.
For shared storage disks split into partitions
"""
volume_params = []
for node in storage:
name = node.get('drive_name', None)
num_disks = node.get('num_drives', 1)
if name is None:
msg = _("drive_name not defined")
raise exception.InvalidVolumeType(reason=msg)
try:
vol_type = volume_types.get_volume_type_by_name(context, name)
except exception.NotFound:
msg = _("invalid drive type name %s")
raise exception.InvalidVolumeType(reason=msg % name)
self._check_volume_type_correctness(vol_type)
# if size field present - override disk size specified in DB
size = int(node.get('size',
vol_type['extra_specs'].get('drive_size')))
if shared:
part_size = FLAGS.vsa_part_size_gb
total_capacity = num_disks * size
num_volumes = total_capacity / part_size
size = part_size
else:
num_volumes = num_disks
size = 0 # special handling for full drives
for i in range(num_volumes):
volume_name = "drive-%03d" % first_index
first_index += 1
volume_desc = 'BE volume for VSA %s type %s' % (vsa_name, name)
volume = {
'size': size,
'name': volume_name,
'description': volume_desc,
'volume_type_id': vol_type['id'],
}
volume_params.append(volume)
return volume_params
def create(self, context, display_name='', display_description='',
vc_count=1, instance_type=None, image_name=None,
availability_zone=None, storage=[], shared=None):
"""Provision VSA instance with compute instances and volumes
:param storage: List of dictionaries with following keys:
disk_name, num_disks, size
:param shared: Specifies if storage is dedicated or shared.
For shared storage disks split into partitions
"""
LOG.info(_("*** Experimental VSA code ***"))
if vc_count > FLAGS.max_vcs_in_vsa:
LOG.warning(_("Requested number of VCs (%d) is too high."
" Setting to default"), vc_count)
vc_count = FLAGS.max_vcs_in_vsa
if instance_type is None:
instance_type = self._get_default_vsa_instance_type()
if availability_zone is None:
availability_zone = FLAGS.storage_availability_zone
if storage is None:
storage = []
if not shared or shared == 'False':
shared = False
else:
shared = True
# check if image is ready before starting any work
if image_name is None:
image_name = FLAGS.vc_image_name
image_service = self.compute_api.image_service
vc_image = image_service.show_by_name(context, image_name)
vc_image_href = vc_image['id']
options = {
'display_name': display_name,
'display_description': display_description,
'project_id': context.project_id,
'availability_zone': availability_zone,
'instance_type_id': instance_type['id'],
'image_ref': vc_image_href,
'vc_count': vc_count,
'status': VsaState.CREATING,
}
LOG.info(_("Creating VSA: %s") % options)
# create DB entry for VSA instance
vsa_ref = self.db.vsa_create(context, options)
vsa_id = vsa_ref['id']
vsa_name = vsa_ref['name']
# check storage parameters
try:
volume_params = self._check_storage_parameters(context, vsa_name,
storage, shared)
except exception.InvalidVolumeType:
self.db.vsa_destroy(context, vsa_id)
raise
# after creating DB entry, re-check and set some defaults
updates = {}
if (not hasattr(vsa_ref, 'display_name') or
vsa_ref.display_name is None or
vsa_ref.display_name == ''):
updates['display_name'] = display_name = vsa_name
updates['vol_count'] = len(volume_params)
vsa_ref = self.update(context, vsa_id, **updates)
# create volumes
if FLAGS.vsa_multi_vol_creation:
if len(volume_params) > 0:
request_spec = {
'num_volumes': len(volume_params),
'vsa_id': str(vsa_id),
'volumes': volume_params,
}
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "create_volumes",
"args": {"topic": FLAGS.volume_topic,
"request_spec": request_spec,
"availability_zone": availability_zone}})
else:
# create BE volumes one-by-one
for vol in volume_params:
try:
vol_name = vol['name']
vol_size = vol['size']
vol_type_id = vol['volume_type_id']
LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "
"volume %(vol_name)s, %(vol_size)d GB, "
"type %(vol_type_id)s"), locals())
vol_type = volume_types.get_volume_type(context,
vol['volume_type_id'])
vol_ref = self.volume_api.create(context,
vol_size,
vol_name,
vol['description'],
None,
volume_type=vol_type,
metadata=dict(to_vsa_id=str(vsa_id)),
availability_zone=availability_zone)
except Exception:
self.update_vsa_status(context, vsa_id,
status=VsaState.PARTIAL)
raise
if len(volume_params) == 0:
# No BE volumes - ask VSA manager to start VCs
rpc.cast(context,
FLAGS.vsa_topic,
{"method": "create_vsa",
"args": {"vsa_id": str(vsa_id)}})
return vsa_ref
def update_vsa_status(self, context, vsa_id, status):
updates = dict(status=status)
LOG.info(_("VSA ID %(vsa_id)d: Update VSA status to %(status)s"),
locals())
return self.update(context, vsa_id, **updates)
def update(self, context, vsa_id, **kwargs):
"""Updates the VSA instance in the datastore.
:param context: The security context
:param vsa_id: ID of the VSA instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: None
"""
LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals())
updatable_fields = ['status', 'vc_count', 'vol_count',
'display_name', 'display_description']
changes = {}
for field in updatable_fields:
if field in kwargs:
changes[field] = kwargs[field]
vc_count = kwargs.get('vc_count', None)
if vc_count is not None:
# VP-TODO(vladimir.p):
# This request may want to update number of VCs
# Get number of current VCs and add/delete VCs appropriately
vsa = self.get(context, vsa_id)
vc_count = int(vc_count)
if vc_count > FLAGS.max_vcs_in_vsa:
LOG.warning(_("Requested number of VCs (%d) is too high."
" Setting to default"), vc_count)
vc_count = FLAGS.max_vcs_in_vsa
if vsa['vc_count'] != vc_count:
self.update_num_vcs(context, vsa, vc_count)
changes['vc_count'] = vc_count
return self.db.vsa_update(context, vsa_id, changes)
def update_num_vcs(self, context, vsa, vc_count):
vsa_name = vsa['name']
old_vc_count = int(vsa['vc_count'])
if vc_count > old_vc_count:
add_cnt = vc_count - old_vc_count
LOG.debug(_("Adding %(add_cnt)s VCs to VSA %(vsa_name)s."),
locals())
# VP-TODO(vladimir.p): actual code for adding new VCs
elif vc_count < old_vc_count:
del_cnt = old_vc_count - vc_count
LOG.debug(_("Deleting %(del_cnt)s VCs from VSA %(vsa_name)s."),
locals())
# VP-TODO(vladimir.p): actual code for deleting extra VCs
def _force_volume_delete(self, ctxt, volume):
"""Delete a volume, bypassing the check that it must be available."""
host = volume['host']
if not host:
# Deleting volume from database and skipping rpc.
self.db.volume_destroy(ctxt, volume['id'])
return
rpc.cast(ctxt,
self.db.queue_get_for(ctxt, FLAGS.volume_topic, host),
{"method": "delete_volume",
"args": {"volume_id": volume['id']}})
def delete_vsa_volumes(self, context, vsa_id, direction,
force_delete=True):
if direction == "FE":
volumes = self.get_all_vsa_volumes(context, vsa_id)
else:
volumes = self.get_all_vsa_drives(context, vsa_id)
for volume in volumes:
try:
vol_name = volume['name']
LOG.info(_("VSA ID %(vsa_id)s: Deleting %(direction)s "
"volume %(vol_name)s"), locals())
self.volume_api.delete(context, volume)
except exception.InvalidVolume:
LOG.info(_("Unable to delete volume %s"), volume['name'])
if force_delete:
LOG.info(_("VSA ID %(vsa_id)s: Forced delete. "
"%(direction)s volume %(vol_name)s"), locals())
self._force_volume_delete(context, volume)
def delete(self, context, vsa_id):
"""Terminate a VSA instance."""
LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id)
# Delete all FrontEnd and BackEnd volumes
self.delete_vsa_volumes(context, vsa_id, "FE", force_delete=True)
self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True)
# Delete all VC instances
instances = self.compute_api.get_all(context,
search_opts={'metadata': dict(vsa_id=str(vsa_id))})
for instance in instances:
name = instance['name']
LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"),
locals())
self.compute_api.delete(context, instance['id'])
# Delete VSA instance
self.db.vsa_destroy(context, vsa_id)
def get(self, context, vsa_id):
rv = self.db.vsa_get(context, vsa_id)
return rv
def get_all(self, context):
if context.is_admin:
return self.db.vsa_get_all(context)
return self.db.vsa_get_all_by_project(context, context.project_id)
def get_vsa_volume_type(self, context):
name = FLAGS.vsa_volume_type_name
try:
vol_type = volume_types.get_volume_type_by_name(context, name)
except exception.NotFound:
volume_types.create(context, name,
extra_specs=dict(type='vsa_volume'))
vol_type = volume_types.get_volume_type_by_name(context, name)
return vol_type
def get_all_vsa_instances(self, context, vsa_id):
return self.compute_api.get_all(context,
search_opts={'metadata': dict(vsa_id=str(vsa_id))})
def get_all_vsa_volumes(self, context, vsa_id):
return self.volume_api.get_all(context,
search_opts={'metadata': dict(from_vsa_id=str(vsa_id))})
def get_all_vsa_drives(self, context, vsa_id):
return self.volume_api.get_all(context,
search_opts={'metadata': dict(to_vsa_id=str(vsa_id))})

View File

@ -1,25 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Abstraction of the underlying connection to VC."""
from nova.vsa import fake
def get_connection():
# Return an object that is able to talk to VCs
return fake.FakeVcConnection()

View File

@ -1,22 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeVcConnection(object):
def init_host(self, host):
pass

View File

@ -1,181 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all processes relating to Virtual Storage Arrays (VSA).
**Related Flags**
"""
from nova import compute
from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
from nova.openstack.common import cfg
from nova import volume
from nova import utils
from nova.compute import instance_types
from nova.vsa import utils as vsa_utils
from nova.vsa import api as vsa_api
vsa_driver_opt = cfg.StrOpt('vsa_driver',
default='nova.vsa.connection.get_connection',
help='Driver to use for controlling VSAs')
FLAGS = flags.FLAGS
FLAGS.register_opt(vsa_driver_opt)
LOG = logging.getLogger(__name__)
class VsaManager(manager.SchedulerDependentManager):
"""Manages Virtual Storage Arrays (VSAs)."""
def __init__(self, vsa_driver=None, *args, **kwargs):
if not vsa_driver:
vsa_driver = FLAGS.vsa_driver
self.driver = utils.import_object(vsa_driver)
self.compute_manager = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute.API()
self.volume_api = volume.API()
self.vsa_api = vsa_api.API()
if FLAGS.vsa_ec2_user_id is None or FLAGS.vsa_ec2_access_key is None:
raise exception.VSANovaAccessParamNotFound()
super(VsaManager, self).__init__(*args, **kwargs)
def init_host(self):
self.driver.init_host(host=self.host)
super(VsaManager, self).init_host()
@exception.wrap_exception()
def create_vsa(self, context, vsa_id):
"""Called by API if there were no BE volumes assigned"""
LOG.debug(_("Create call received for VSA %s"), vsa_id)
vsa_id = int(vsa_id) # just in case
try:
vsa = self.vsa_api.get(context, vsa_id)
except Exception as ex:
msg = _("Failed to find VSA %(vsa_id)d") % locals()
LOG.exception(msg)
return
return self._start_vcs(context, vsa)
@exception.wrap_exception()
def vsa_volume_created(self, context, vol_id, vsa_id, status):
"""Callback for volume creations"""
LOG.debug(_("VSA ID %(vsa_id)s: Drive %(vol_id)s created. "
"Status %(status)s"), locals())
vsa_id = int(vsa_id) # just in case
# Get all volumes for this VSA
# check if any of them still in creating phase
drives = self.vsa_api.get_all_vsa_drives(context, vsa_id)
for drive in drives:
if drive['status'] == 'creating':
vol_name = drive['name']
vol_disp_name = drive['display_name']
LOG.debug(_("Drive %(vol_name)s (%(vol_disp_name)s) still "
"in creating phase - wait"), locals())
return
try:
vsa = self.vsa_api.get(context, vsa_id)
except Exception as ex:
msg = _("Failed to find VSA %(vsa_id)d") % locals()
LOG.exception(msg)
return
if len(drives) != vsa['vol_count']:
cvol_real = len(drives)
cvol_exp = vsa['vol_count']
LOG.debug(_("VSA ID %(vsa_id)d: Not all volumes are created "
"(%(cvol_real)d of %(cvol_exp)d)"), locals())
return
# all volumes created (successfully or not)
return self._start_vcs(context, vsa, drives)
def _start_vcs(self, context, vsa, drives=[]):
"""Start VCs for VSA """
vsa_id = vsa['id']
if vsa['status'] == vsa_api.VsaState.CREATING:
self.vsa_api.update_vsa_status(context, vsa_id,
vsa_api.VsaState.LAUNCHING)
else:
return
# in _separate_ loop go over all volumes and mark as "attached"
has_failed_volumes = False
for drive in drives:
vol_name = drive['name']
vol_disp_name = drive['display_name']
status = drive['status']
LOG.info(_("VSA ID %(vsa_id)d: Drive %(vol_name)s "
"(%(vol_disp_name)s) is in %(status)s state"),
locals())
if status == 'available':
try:
# self.volume_api.update(context, volume,
# dict(attach_status="attached"))
pass
except Exception as ex:
msg = _("Failed to update attach status for volume "
"%(vol_name)s. %(ex)s") % locals()
LOG.exception(msg)
else:
has_failed_volumes = True
if has_failed_volumes:
LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals())
self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True)
self.vsa_api.update_vsa_status(context, vsa_id,
vsa_api.VsaState.FAILED)
return
# create user-data record for VC
storage_data = vsa_utils.generate_user_data(vsa, drives)
instance_type = instance_types.get_instance_type(
vsa['instance_type_id'])
# now start the VC instance
vc_count = vsa['vc_count']
LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"),
locals())
vc_instances = self.compute_api.create(context,
instance_type, # vsa['vsa_instance_type'],
vsa['image_ref'],
min_count=1,
max_count=vc_count,
display_name='vc-' + vsa['display_name'],
display_description='VC for VSA ' + vsa['display_name'],
availability_zone=vsa['availability_zone'],
user_data=storage_data,
metadata=dict(vsa_id=str(vsa_id)))
self.vsa_api.update_vsa_status(context, vsa_id,
vsa_api.VsaState.CREATED)

View File

@ -1,80 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from xml.etree import ElementTree
from nova import flags
FLAGS = flags.FLAGS
def generate_user_data(vsa, volumes):
SubElement = ElementTree.SubElement
e_vsa = ElementTree.Element("vsa")
e_vsa_detail = SubElement(e_vsa, "id")
e_vsa_detail.text = str(vsa['id'])
e_vsa_detail = SubElement(e_vsa, "name")
e_vsa_detail.text = vsa['display_name']
e_vsa_detail = SubElement(e_vsa, "description")
e_vsa_detail.text = vsa['display_description']
e_vsa_detail = SubElement(e_vsa, "vc_count")
e_vsa_detail.text = str(vsa['vc_count'])
e_vsa_detail = SubElement(e_vsa, "auth_user")
e_vsa_detail.text = FLAGS.vsa_ec2_user_id
e_vsa_detail = SubElement(e_vsa, "auth_access_key")
e_vsa_detail.text = FLAGS.vsa_ec2_access_key
e_volumes = SubElement(e_vsa, "volumes")
for volume in volumes:
loc = volume['provider_location']
if loc is None:
ip = ''
iscsi_iqn = ''
iscsi_portal = ''
else:
(iscsi_target, _sep, iscsi_iqn) = loc.partition(" ")
(ip, iscsi_portal) = iscsi_target.split(":", 1)
e_vol = SubElement(e_volumes, "volume")
e_vol_detail = SubElement(e_vol, "id")
e_vol_detail.text = str(volume['id'])
e_vol_detail = SubElement(e_vol, "name")
e_vol_detail.text = volume['name']
e_vol_detail = SubElement(e_vol, "display_name")
e_vol_detail.text = volume['display_name']
e_vol_detail = SubElement(e_vol, "size_gb")
e_vol_detail.text = str(volume['size'])
e_vol_detail = SubElement(e_vol, "status")
e_vol_detail.text = volume['status']
e_vol_detail = SubElement(e_vol, "ip")
e_vol_detail.text = ip
e_vol_detail = SubElement(e_vol, "iscsi_iqn")
e_vol_detail.text = iscsi_iqn
e_vol_detail = SubElement(e_vol, "iscsi_portal")
e_vol_detail.text = iscsi_portal
e_vol_detail = SubElement(e_vol, "lun")
e_vol_detail.text = '0'
e_vol_detail = SubElement(e_vol, "sn_host")
e_vol_detail.text = volume['host']
_xml = ElementTree.tostring(e_vsa)
return base64.b64encode(_xml)

View File

@ -83,7 +83,6 @@ setuptools.setup(name='nova',
'bin/nova-rootwrap',
'bin/nova-scheduler',
'bin/nova-volume',
'bin/nova-vsa',
'bin/nova-xvpvncproxy',
'bin/stack',
'tools/nova-debug'],