unused modules removed
Change-Id: I0c4d7f8981f4968ba9951e46700fb7b5239aac06
This commit is contained in:
parent
e244766ec9
commit
53c01d49f4
|
@ -1,332 +0,0 @@
|
|||
# Copyright 2012 SINA Corporation
|
||||
# Copyright 2014 Cisco Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""Extracts OpenStack config option info from module(s)."""
|
||||
|
||||
# NOTE(GheRivero): Copied from oslo_incubator before getting removed in
|
||||
# Change-Id: If15b77d31a8c615aad8fca30f6dd9928da2d08bb
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import imp
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import textwrap
|
||||
|
||||
from oslo_config import cfg
|
||||
import oslo_i18n
|
||||
from oslo_utils import importutils
|
||||
import six
|
||||
import stevedore.named
|
||||
|
||||
|
||||
oslo_i18n.install('iotronic')
|
||||
|
||||
STROPT = "StrOpt"
|
||||
BOOLOPT = "BoolOpt"
|
||||
INTOPT = "IntOpt"
|
||||
FLOATOPT = "FloatOpt"
|
||||
LISTOPT = "ListOpt"
|
||||
DICTOPT = "DictOpt"
|
||||
MULTISTROPT = "MultiStrOpt"
|
||||
|
||||
OPT_TYPES = {
|
||||
STROPT: 'string value',
|
||||
BOOLOPT: 'boolean value',
|
||||
INTOPT: 'integer value',
|
||||
FLOATOPT: 'floating point value',
|
||||
LISTOPT: 'list value',
|
||||
DICTOPT: 'dict value',
|
||||
MULTISTROPT: 'multi valued',
|
||||
}
|
||||
|
||||
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
|
||||
FLOATOPT, LISTOPT, DICTOPT,
|
||||
MULTISTROPT]))
|
||||
|
||||
PY_EXT = ".py"
|
||||
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
"../../../../"))
|
||||
WORDWRAP_WIDTH = 60
|
||||
|
||||
|
||||
def raise_extension_exception(extmanager, ep, err):
|
||||
raise
|
||||
|
||||
|
||||
def generate(argv):
|
||||
parser = argparse.ArgumentParser(
|
||||
description='generate sample configuration file',
|
||||
)
|
||||
parser.add_argument('-m', dest='modules', action='append')
|
||||
parser.add_argument('-l', dest='libraries', action='append')
|
||||
parser.add_argument('srcfiles', nargs='*')
|
||||
parsed_args = parser.parse_args(argv)
|
||||
|
||||
mods_by_pkg = dict()
|
||||
for filepath in parsed_args.srcfiles:
|
||||
pkg_name = filepath.split(os.sep)[1]
|
||||
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
|
||||
os.path.basename(filepath).split('.')[0]])
|
||||
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
|
||||
# NOTE(lzyeval): place top level modules before packages
|
||||
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
|
||||
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
|
||||
pkg_names.extend(ext_names)
|
||||
|
||||
# opts_by_group is a mapping of group name to an options list
|
||||
# The options list is a list of (module, options) tuples
|
||||
opts_by_group = {'DEFAULT': []}
|
||||
|
||||
if parsed_args.modules:
|
||||
for module_name in parsed_args.modules:
|
||||
module = _import_module(module_name)
|
||||
if module:
|
||||
for group, opts in _list_opts(module):
|
||||
opts_by_group.setdefault(group, []).append((module_name,
|
||||
opts))
|
||||
|
||||
# Look for entry points defined in libraries (or applications) for
|
||||
# option discovery, and include their return values in the output.
|
||||
#
|
||||
# Each entry point should be a function returning an iterable
|
||||
# of pairs with the group name (or None for the default group)
|
||||
# and the list of Opt instances for that group.
|
||||
if parsed_args.libraries:
|
||||
loader = stevedore.named.NamedExtensionManager(
|
||||
'oslo.config.opts',
|
||||
names=list(set(parsed_args.libraries)),
|
||||
invoke_on_load=False,
|
||||
on_load_failure_callback=raise_extension_exception
|
||||
)
|
||||
for ext in loader:
|
||||
for group, opts in ext.plugin():
|
||||
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
|
||||
opt_list.append((ext.name, opts))
|
||||
|
||||
for pkg_name in pkg_names:
|
||||
mods = sorted(mods_by_pkg.get(pkg_name))
|
||||
for mod_str in mods:
|
||||
if mod_str.endswith('.__init__'):
|
||||
mod_str = mod_str[:mod_str.rfind(".")]
|
||||
|
||||
mod_obj = _import_module(mod_str)
|
||||
if not mod_obj:
|
||||
raise RuntimeError("Unable to import module %s" % mod_str)
|
||||
|
||||
for group, opts in _list_opts(mod_obj):
|
||||
opts_by_group.setdefault(group, []).append((mod_str, opts))
|
||||
|
||||
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
|
||||
for group in sorted(opts_by_group.keys()):
|
||||
print_group_opts(group, opts_by_group[group])
|
||||
|
||||
|
||||
def _import_module(mod_str):
|
||||
try:
|
||||
if mod_str.startswith('bin.'):
|
||||
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
|
||||
return sys.modules[mod_str[4:]]
|
||||
else:
|
||||
return importutils.import_module(mod_str)
|
||||
except Exception as e:
|
||||
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
|
||||
return None
|
||||
|
||||
|
||||
def _is_in_group(opt, group):
|
||||
"""Check if opt is in group."""
|
||||
for value in group._opts.values():
|
||||
# NOTE(llu): Temporary workaround for bug #1262148, wait until
|
||||
# newly released oslo.config support '==' operator.
|
||||
if not(value['opt'] != opt):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _guess_groups(opt):
|
||||
# is it in the DEFAULT group?
|
||||
if _is_in_group(opt, cfg.CONF):
|
||||
return 'DEFAULT'
|
||||
|
||||
# what other groups is it in?
|
||||
for value in cfg.CONF.values():
|
||||
if isinstance(value, cfg.CONF.GroupAttr):
|
||||
if _is_in_group(opt, value._group):
|
||||
return value._group.name
|
||||
|
||||
raise RuntimeError(
|
||||
"Unable to find group for option %s, "
|
||||
"maybe it's defined twice in the same group?"
|
||||
% opt.name
|
||||
)
|
||||
|
||||
|
||||
def _list_opts(obj):
|
||||
def is_opt(o):
|
||||
return (isinstance(o, cfg.Opt) and
|
||||
not isinstance(o, cfg.SubCommandOpt))
|
||||
|
||||
opts = list()
|
||||
|
||||
if 'list_opts' in dir(obj):
|
||||
group_opts = getattr(obj, 'list_opts')()
|
||||
# NOTE(GheRivero): Options without a defined group,
|
||||
# must be registered to the DEFAULT section
|
||||
fixed_list = []
|
||||
for section, opts in group_opts:
|
||||
if not section:
|
||||
section = 'DEFAULT'
|
||||
fixed_list.append((section, opts))
|
||||
return fixed_list
|
||||
|
||||
for attr_str in dir(obj):
|
||||
attr_obj = getattr(obj, attr_str)
|
||||
if is_opt(attr_obj):
|
||||
opts.append(attr_obj)
|
||||
elif (isinstance(attr_obj, list) and
|
||||
all(map(lambda x: is_opt(x), attr_obj))):
|
||||
opts.extend(attr_obj)
|
||||
|
||||
ret = {}
|
||||
for opt in opts:
|
||||
ret.setdefault(_guess_groups(opt), []).append(opt)
|
||||
return ret.items()
|
||||
|
||||
|
||||
def print_group_opts(group, opts_by_module):
|
||||
print("[%s]" % group)
|
||||
print('')
|
||||
for mod, opts in opts_by_module:
|
||||
print('#')
|
||||
print('# Options defined in %s' % mod)
|
||||
print('#')
|
||||
print('')
|
||||
for opt in opts:
|
||||
_print_opt(opt)
|
||||
print('')
|
||||
|
||||
|
||||
def _get_my_ip():
|
||||
try:
|
||||
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
csock.connect(('8.8.8.8', 80))
|
||||
(addr, port) = csock.getsockname()
|
||||
csock.close()
|
||||
return addr
|
||||
except socket.error:
|
||||
return None
|
||||
|
||||
|
||||
def _sanitize_default(name, value):
|
||||
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
|
||||
hostname = socket.gethostname()
|
||||
fqdn = socket.getfqdn()
|
||||
if value.startswith(sys.prefix):
|
||||
# NOTE(jd) Don't use os.path.join, because it is likely to think the
|
||||
# second part is an absolute pathname and therefore drop the first
|
||||
# part.
|
||||
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
|
||||
elif value.startswith(BASEDIR):
|
||||
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
|
||||
elif BASEDIR in value:
|
||||
return value.replace(BASEDIR, '')
|
||||
elif value == _get_my_ip():
|
||||
return '10.0.0.1'
|
||||
elif value in (hostname, fqdn):
|
||||
if 'host' in name:
|
||||
return 'iotronic'
|
||||
elif value.endswith(hostname):
|
||||
return value.replace(hostname, 'iotronic')
|
||||
elif value.endswith(fqdn):
|
||||
return value.replace(fqdn, 'iotronic')
|
||||
elif value.strip() != value:
|
||||
return '"%s"' % value
|
||||
return value
|
||||
|
||||
|
||||
def _print_opt(opt):
|
||||
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
|
||||
if not opt_help:
|
||||
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
|
||||
opt_help = ""
|
||||
try:
|
||||
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
|
||||
except (ValueError, AttributeError) as err:
|
||||
sys.stderr.write("%s\n" % str(err))
|
||||
sys.exit(1)
|
||||
opt_help = u'%s (%s)' % (opt_help,
|
||||
OPT_TYPES[opt_type])
|
||||
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
|
||||
if opt.deprecated_opts:
|
||||
for deprecated_opt in opt.deprecated_opts:
|
||||
if deprecated_opt.name:
|
||||
deprecated_group = (deprecated_opt.group if
|
||||
deprecated_opt.group else "DEFAULT")
|
||||
print('# Deprecated group/name - [%s]/%s' %
|
||||
(deprecated_group,
|
||||
deprecated_opt.name))
|
||||
try:
|
||||
if opt_default is None:
|
||||
print('#%s=<None>' % opt_name)
|
||||
else:
|
||||
_print_type(opt_type, opt_name, opt_default)
|
||||
print('')
|
||||
except Exception:
|
||||
sys.stderr.write('Error in option "%s"\n' % opt_name)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _print_type(opt_type, opt_name, opt_default):
|
||||
if opt_type == STROPT:
|
||||
assert(isinstance(opt_default, six.string_types))
|
||||
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
|
||||
opt_default)))
|
||||
elif opt_type == BOOLOPT:
|
||||
assert(isinstance(opt_default, bool))
|
||||
print('#%s=%s' % (opt_name, str(opt_default).lower()))
|
||||
elif opt_type == INTOPT:
|
||||
assert(isinstance(opt_default, int) and
|
||||
not isinstance(opt_default, bool))
|
||||
print('#%s=%s' % (opt_name, opt_default))
|
||||
elif opt_type == FLOATOPT:
|
||||
assert(isinstance(opt_default, float))
|
||||
print('#%s=%s' % (opt_name, opt_default))
|
||||
elif opt_type == LISTOPT:
|
||||
assert(isinstance(opt_default, list))
|
||||
print('#%s=%s' % (opt_name, ','.join(opt_default)))
|
||||
elif opt_type == DICTOPT:
|
||||
assert(isinstance(opt_default, dict))
|
||||
opt_default_strlist = [str(key) + ':' + str(value)
|
||||
for (key, value) in opt_default.items()]
|
||||
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
|
||||
elif opt_type == MULTISTROPT:
|
||||
assert(isinstance(opt_default, list))
|
||||
if not opt_default:
|
||||
opt_default = ['']
|
||||
for default in opt_default:
|
||||
print('#%s=%s' % (opt_name, default))
|
||||
|
||||
|
||||
def main():
|
||||
generate(sys.argv[1:])
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,100 +0,0 @@
|
|||
# Copyright 2014 Rackspace, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_config import cfg
|
||||
import stevedore
|
||||
|
||||
from iotronic.common import exception
|
||||
|
||||
|
||||
dhcp_provider_opts = [
|
||||
cfg.StrOpt('dhcp_provider',
|
||||
default='neutron',
|
||||
help='DHCP provider to use. "neutron" uses Neutron, and '
|
||||
'"none" uses a no-op provider.'
|
||||
),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(dhcp_provider_opts, group='dhcp')
|
||||
|
||||
_dhcp_provider = None
|
||||
|
||||
EM_SEMAPHORE = 'dhcp_provider'
|
||||
|
||||
|
||||
class DHCPFactory(object):
|
||||
|
||||
# NOTE(lucasagomes): Instantiate a stevedore.driver.DriverManager
|
||||
# only once, the first time DHCPFactory.__init__
|
||||
# is called.
|
||||
_dhcp_provider = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
if not DHCPFactory._dhcp_provider:
|
||||
DHCPFactory._set_dhcp_provider(**kwargs)
|
||||
|
||||
# NOTE(lucasagomes): Use lockutils to avoid a potential race in eventlet
|
||||
# that might try to create two dhcp factories.
|
||||
@classmethod
|
||||
@lockutils.synchronized(EM_SEMAPHORE, 'iotronic-')
|
||||
def _set_dhcp_provider(cls, **kwargs):
|
||||
"""Initialize the dhcp provider
|
||||
|
||||
:raises: DHCPLoadError if the dhcp_provider cannot be loaded.
|
||||
"""
|
||||
|
||||
# NOTE(lucasagomes): In case multiple greenthreads queue up on
|
||||
# this lock before _dhcp_provider is initialized,
|
||||
# prevent creation of multiple DriverManager.
|
||||
if cls._dhcp_provider:
|
||||
return
|
||||
|
||||
dhcp_provider_name = CONF.dhcp.dhcp_provider
|
||||
try:
|
||||
_extension_manager = stevedore.driver.DriverManager(
|
||||
'iotronic.dhcp',
|
||||
dhcp_provider_name,
|
||||
invoke_kwds=kwargs,
|
||||
invoke_on_load=True)
|
||||
except Exception as e:
|
||||
raise exception.DHCPLoadError(
|
||||
dhcp_provider_name=dhcp_provider_name, reason=e
|
||||
)
|
||||
|
||||
cls._dhcp_provider = _extension_manager.driver
|
||||
|
||||
def update_dhcp(self, task, dhcp_opts, ports=None):
|
||||
"""Send or update the DHCP BOOT options for this node.
|
||||
|
||||
:param task: A TaskManager instance.
|
||||
:param dhcp_opts: this will be a list of dicts, e.g.
|
||||
|
||||
::
|
||||
|
||||
[{'opt_name': 'bootfile-name',
|
||||
'opt_value': 'pxelinux.0'},
|
||||
{'opt_name': 'server-ip-address',
|
||||
'opt_value': '123.123.123.456'},
|
||||
{'opt_name': 'tftp-server',
|
||||
'opt_value': '123.123.123.123'}]
|
||||
:param ports: a list of Neutron port dicts to update DHCP options on.
|
||||
If None, will get the list of ports from the Iotronic port objects.
|
||||
"""
|
||||
self.provider.update_dhcp_opts(task, dhcp_opts, ports)
|
||||
|
||||
@property
|
||||
def provider(self):
|
||||
return self._dhcp_provider
|
|
@ -1,577 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
# Copyright (c) 2010 Citrix Systems, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Handling of VM disk images.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import jinja2
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from iotronic.common import exception
|
||||
from iotronic.common.glance_service import service_utils as glance_utils
|
||||
from iotronic.common.i18n import _
|
||||
from iotronic.common.i18n import _LE
|
||||
from iotronic.common import image_service as service
|
||||
from iotronic.common import paths
|
||||
from iotronic.common import utils
|
||||
from iotronic.openstack.common import fileutils
|
||||
from iotronic.openstack.common import imageutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
image_opts = [
|
||||
cfg.BoolOpt('force_raw_images',
|
||||
default=True,
|
||||
help='If True, convert backing images to "raw" disk image '
|
||||
'format.'),
|
||||
cfg.StrOpt('isolinux_bin',
|
||||
default='/usr/lib/syslinux/isolinux.bin',
|
||||
help='Path to isolinux binary file.'),
|
||||
cfg.StrOpt('isolinux_config_template',
|
||||
default=paths.basedir_def('common/isolinux_config.template'),
|
||||
help='Template file for isolinux configuration file.'),
|
||||
cfg.StrOpt('grub_config_template',
|
||||
default=paths.basedir_def('common/grub_conf.template'),
|
||||
help='Template file for grub configuration file.'),
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(image_opts)
|
||||
|
||||
|
||||
def _create_root_fs(root_directory, files_info):
|
||||
"""Creates a filesystem root in given directory.
|
||||
|
||||
Given a mapping of absolute path of files to their relative paths
|
||||
within the filesystem, this method copies the files to their
|
||||
destination.
|
||||
|
||||
:param root_directory: the filesystem root directory.
|
||||
:param files_info: A dict containing absolute path of file to be copied
|
||||
-> relative path within the vfat image. For example,
|
||||
{
|
||||
'/absolute/path/to/file' -> 'relative/path/within/root'
|
||||
...
|
||||
}
|
||||
:raises: OSError, if creation of any directory failed.
|
||||
:raises: IOError, if copying any of the files failed.
|
||||
"""
|
||||
for src_file, path in files_info.items():
|
||||
target_file = os.path.join(root_directory, path)
|
||||
dirname = os.path.dirname(target_file)
|
||||
if not os.path.exists(dirname):
|
||||
os.makedirs(dirname)
|
||||
|
||||
shutil.copyfile(src_file, target_file)
|
||||
|
||||
|
||||
def _umount_without_raise(mount_dir):
|
||||
"""Helper method to umount without raise."""
|
||||
try:
|
||||
utils.umount(mount_dir)
|
||||
except processutils.ProcessExecutionError:
|
||||
pass
|
||||
|
||||
|
||||
def create_vfat_image(output_file, files_info=None, parameters=None,
|
||||
parameters_file='parameters.txt', fs_size_kib=100):
|
||||
"""Creates the fat fs image on the desired file.
|
||||
|
||||
This method copies the given files to a root directory (optional),
|
||||
writes the parameters specified to the parameters file within the
|
||||
root directory (optional), and then creates a vfat image of the root
|
||||
directory.
|
||||
|
||||
:param output_file: The path to the file where the fat fs image needs
|
||||
to be created.
|
||||
:param files_info: A dict containing absolute path of file to be copied
|
||||
-> relative path within the vfat image. For example,
|
||||
{
|
||||
'/absolute/path/to/file' -> 'relative/path/within/root'
|
||||
...
|
||||
}
|
||||
:param parameters: A dict containing key-value pairs of parameters.
|
||||
:param parameters_file: The filename for the parameters file.
|
||||
:param fs_size_kib: size of the vfat filesystem in KiB.
|
||||
:raises: ImageCreationFailed, if image creation failed while doing any
|
||||
of filesystem manipulation activities like creating dirs, mounting,
|
||||
creating filesystem, copying files, etc.
|
||||
"""
|
||||
try:
|
||||
utils.dd('/dev/zero', output_file, 'count=1', "bs=%dKiB" % fs_size_kib)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
raise exception.ImageCreationFailed(image_type='vfat', error=e)
|
||||
|
||||
with utils.tempdir() as tmpdir:
|
||||
|
||||
try:
|
||||
# The label helps ramdisks to find the partition containing
|
||||
# the parameters (by using /dev/disk/by-label/ir-vfd-dev).
|
||||
# NOTE: FAT filesystem label can be up to 11 characters long.
|
||||
utils.mkfs('vfat', output_file, label="ir-vfd-dev")
|
||||
utils.mount(output_file, tmpdir, '-o', 'umask=0')
|
||||
except processutils.ProcessExecutionError as e:
|
||||
raise exception.ImageCreationFailed(image_type='vfat', error=e)
|
||||
|
||||
try:
|
||||
if files_info:
|
||||
_create_root_fs(tmpdir, files_info)
|
||||
|
||||
if parameters:
|
||||
parameters_file = os.path.join(tmpdir, parameters_file)
|
||||
params_list = ['%(key)s=%(val)s' % {'key': k, 'val': v}
|
||||
for k, v in parameters.items()]
|
||||
file_contents = '\n'.join(params_list)
|
||||
utils.write_to_file(parameters_file, file_contents)
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(_LE("vfat image creation failed. Error: %s"), e)
|
||||
raise exception.ImageCreationFailed(image_type='vfat', error=e)
|
||||
|
||||
finally:
|
||||
try:
|
||||
utils.umount(tmpdir)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
raise exception.ImageCreationFailed(image_type='vfat', error=e)
|
||||
|
||||
|
||||
def _generate_cfg(kernel_params, template, options):
|
||||
"""Generates a isolinux or grub configuration file.
|
||||
|
||||
Given a given a list of strings containing kernel parameters, this method
|
||||
returns the kernel cmdline string.
|
||||
:param kernel_params: a list of strings(each element being a string like
|
||||
'K=V' or 'K' or combination of them like 'K1=V1 K2 K3=V3') to be added
|
||||
as the kernel cmdline.
|
||||
:param template: the path of the config template file.
|
||||
:param options: a dictionary of keywords which need to be replaced in
|
||||
template file to generate a proper config file.
|
||||
:returns: a string containing the contents of the isolinux configuration
|
||||
file.
|
||||
"""
|
||||
if not kernel_params:
|
||||
kernel_params = []
|
||||
kernel_params_str = ' '.join(kernel_params)
|
||||
|
||||
tmpl_path, tmpl_file = os.path.split(template)
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
|
||||
template = env.get_template(tmpl_file)
|
||||
|
||||
options.update({'kernel_params': kernel_params_str})
|
||||
|
||||
cfg = template.render(options)
|
||||
return cfg
|
||||
|
||||
|
||||
def create_isolinux_image_for_bios(output_file, kernel, ramdisk,
|
||||
kernel_params=None):
|
||||
"""Creates an isolinux image on the specified file.
|
||||
|
||||
Copies the provided kernel, ramdisk to a directory, generates the isolinux
|
||||
configuration file using the kernel parameters provided, and then generates
|
||||
a bootable ISO image.
|
||||
|
||||
:param output_file: the path to the file where the iso image needs to be
|
||||
created.
|
||||
:param kernel: the kernel to use.
|
||||
:param ramdisk: the ramdisk to use.
|
||||
:param kernel_params: a list of strings(each element being a string like
|
||||
'K=V' or 'K' or combination of them like 'K1=V1,K2,...') to be added
|
||||
as the kernel cmdline.
|
||||
:raises: ImageCreationFailed, if image creation failed while copying files
|
||||
or while running command to generate iso.
|
||||
"""
|
||||
ISOLINUX_BIN = 'isolinux/isolinux.bin'
|
||||
ISOLINUX_CFG = 'isolinux/isolinux.cfg'
|
||||
|
||||
options = {'kernel': '/vmlinuz', 'ramdisk': '/initrd'}
|
||||
|
||||
with utils.tempdir() as tmpdir:
|
||||
files_info = {
|
||||
kernel: 'vmlinuz',
|
||||
ramdisk: 'initrd',
|
||||
CONF.isolinux_bin: ISOLINUX_BIN,
|
||||
}
|
||||
try:
|
||||
_create_root_fs(tmpdir, files_info)
|
||||
except (OSError, IOError) as e:
|
||||
LOG.exception(_LE("Creating the filesystem root failed."))
|
||||
raise exception.ImageCreationFailed(image_type='iso', error=e)
|
||||
|
||||
cfg = _generate_cfg(kernel_params,
|
||||
CONF.isolinux_config_template, options)
|
||||
|
||||
isolinux_cfg = os.path.join(tmpdir, ISOLINUX_CFG)
|
||||
utils.write_to_file(isolinux_cfg, cfg)
|
||||
|
||||
try:
|
||||
utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO",
|
||||
'-cache-inodes', '-J', '-l', '-no-emul-boot',
|
||||
'-boot-load-size', '4', '-boot-info-table',
|
||||
'-b', ISOLINUX_BIN, '-o', output_file, tmpdir)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.exception(_LE("Creating ISO image failed."))
|
||||
raise exception.ImageCreationFailed(image_type='iso', error=e)
|
||||
|
||||
|
||||
def create_isolinux_image_for_uefi(output_file, deploy_iso, kernel, ramdisk,
|
||||
kernel_params=None):
|
||||
"""Creates an isolinux image on the specified file.
|
||||
|
||||
Copies the provided kernel, ramdisk, efiboot.img to a directory, creates
|
||||
the path for grub config file, generates the isolinux configuration file
|
||||
using the kernel parameters provided, generates the grub configuration
|
||||
file using kernel parameters and then generates a bootable ISO image
|
||||
for uefi.
|
||||
|
||||
:param output_file: the path to the file where the iso image needs to be
|
||||
created.
|
||||
:param deploy_iso: deploy iso used to initiate the deploy.
|
||||
:param kernel: the kernel to use.
|
||||
:param ramdisk: the ramdisk to use.
|
||||
:param kernel_params: a list of strings(each element being a string like
|
||||
'K=V' or 'K' or combination of them like 'K1=V1,K2,...') to be added
|
||||
as the kernel cmdline.
|
||||
:raises: ImageCreationFailed, if image creation failed while copying files
|
||||
or while running command to generate iso.
|
||||
"""
|
||||
ISOLINUX_BIN = 'isolinux/isolinux.bin'
|
||||
ISOLINUX_CFG = 'isolinux/isolinux.cfg'
|
||||
|
||||
isolinux_options = {'kernel': '/vmlinuz', 'ramdisk': '/initrd'}
|
||||
grub_options = {'linux': '/vmlinuz', 'initrd': '/initrd'}
|
||||
|
||||
with utils.tempdir() as tmpdir:
|
||||
files_info = {
|
||||
kernel: 'vmlinuz',
|
||||
ramdisk: 'initrd',
|
||||
CONF.isolinux_bin: ISOLINUX_BIN,
|
||||
}
|
||||
|
||||
# Open the deploy iso used to initiate deploy and copy the
|
||||
# efiboot.img i.e. boot loader to the current temporary
|
||||
# directory.
|
||||
with utils.tempdir() as mountdir:
|
||||
uefi_path_info, e_img_rel_path, grub_rel_path = (
|
||||
_mount_deploy_iso(deploy_iso, mountdir))
|
||||
|
||||
# if either of these variables are not initialized then the
|
||||
# uefi efiboot.img cannot be created.
|
||||
files_info.update(uefi_path_info)
|
||||
try:
|
||||
_create_root_fs(tmpdir, files_info)
|
||||
except (OSError, IOError) as e:
|
||||
LOG.exception(_LE("Creating the filesystem root failed."))
|
||||
raise exception.ImageCreationFailed(image_type='iso', error=e)
|
||||
finally:
|
||||
_umount_without_raise(mountdir)
|
||||
|
||||
cfg = _generate_cfg(kernel_params,
|
||||
CONF.isolinux_config_template, isolinux_options)
|
||||
|
||||
isolinux_cfg = os.path.join(tmpdir, ISOLINUX_CFG)
|
||||
utils.write_to_file(isolinux_cfg, cfg)
|
||||
|
||||
# Generate and copy grub config file.
|
||||
grub_cfg = os.path.join(tmpdir, grub_rel_path)
|
||||
grub_conf = _generate_cfg(kernel_params,
|
||||
CONF.grub_config_template, grub_options)
|
||||
utils.write_to_file(grub_cfg, grub_conf)
|
||||
|
||||
# Create the boot_iso.
|
||||
try:
|
||||
utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO",
|
||||
'-cache-inodes', '-J', '-l', '-no-emul-boot',
|
||||
'-boot-load-size', '4', '-boot-info-table',
|
||||
'-b', ISOLINUX_BIN, '-eltorito-alt-boot',
|
||||
'-e', e_img_rel_path, '-no-emul-boot',
|
||||
'-o', output_file, tmpdir)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.exception(_LE("Creating ISO image failed."))
|
||||
raise exception.ImageCreationFailed(image_type='iso', error=e)
|
||||
|
||||
|
||||
def qemu_img_info(path):
|
||||
"""Return an object containing the parsed output from qemu-img info."""
|
||||
if not os.path.exists(path):
|
||||
return imageutils.QemuImgInfo()
|
||||
|
||||
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
|
||||
'qemu-img', 'info', path)
|
||||
return imageutils.QemuImgInfo(out)
|
||||
|
||||
|
||||
def convert_image(source, dest, out_format, run_as_root=False):
|
||||
"""Convert image to other format."""
|
||||
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
|
||||
utils.execute(*cmd, run_as_root=run_as_root)
|
||||
|
||||
|
||||
def fetch(context, image_href, path, image_service=None, force_raw=False):
|
||||
# TODO(vish): Improve context handling and add owner and auth data
|
||||
# when it is added to glance. Right now there is no
|
||||
# auth checking in glance, so we assume that access was
|
||||
# checked before we got here.
|
||||
if not image_service:
|
||||
image_service = service.get_image_service(image_href,
|
||||
context=context)
|
||||
LOG.debug("Using %(image_service)s to download image %(image_href)s." %
|
||||
{'image_service': image_service.__class__,
|
||||
'image_href': image_href})
|
||||
|
||||
with fileutils.remove_path_on_error(path):
|
||||
with open(path, "wb") as image_file:
|
||||
image_service.download(image_href, image_file)
|
||||
|
||||
if force_raw:
|
||||
image_to_raw(image_href, path, "%s.part" % path)
|
||||
|
||||
|
||||
def image_to_raw(image_href, path, path_tmp):
|
||||
with fileutils.remove_path_on_error(path_tmp):
|
||||
data = qemu_img_info(path_tmp)
|
||||
|
||||
fmt = data.file_format
|
||||
if fmt is None:
|
||||
raise exception.ImageUnacceptable(
|
||||
reason=_("'qemu-img info' parsing failed."),
|
||||
image_id=image_href)
|
||||
|
||||
backing_file = data.backing_file
|
||||
if backing_file is not None:
|
||||
raise exception.ImageUnacceptable(
|
||||
image_id=image_href,
|
||||
reason=_("fmt=%(fmt)s backed by: %(backing_file)s") %
|
||||
{'fmt': fmt, 'backing_file': backing_file})
|
||||
|
||||
if fmt != "raw":
|
||||
staged = "%s.converted" % path
|
||||
LOG.debug("%(image)s was %(format)s, converting to raw" %
|
||||
{'image': image_href, 'format': fmt})
|
||||
with fileutils.remove_path_on_error(staged):
|
||||
convert_image(path_tmp, staged, 'raw')
|
||||
os.unlink(path_tmp)
|
||||
|
||||
data = qemu_img_info(staged)
|
||||
if data.file_format != "raw":
|
||||
raise exception.ImageConvertFailed(
|
||||
image_id=image_href,
|
||||
reason=_("Converted to raw, but format is "
|
||||
"now %s") % data.file_format)
|
||||
|
||||
os.rename(staged, path)
|
||||
else:
|
||||
os.rename(path_tmp, path)
|
||||
|
||||
|
||||
def download_size(context, image_href, image_service=None):
|
||||
if not image_service:
|
||||
image_service = service.get_image_service(image_href, context=context)
|
||||
return image_service.show(image_href)['size']
|
||||
|
||||
|
||||
def converted_size(path):
|
||||
"""Get size of converted raw image.
|
||||
|
||||
The size of image converted to raw format can be growing up to the virtual
|
||||
size of the image.
|
||||
|
||||
:param path: path to the image file.
|
||||
:returns: virtual size of the image or 0 if conversion not needed.
|
||||
|
||||
"""
|
||||
data = qemu_img_info(path)
|
||||
return data.virtual_size
|
||||
|
||||
|
||||
def get_image_properties(context, image_href, properties="all"):
|
||||
"""Returns the values of several properties of an image
|
||||
|
||||
:param context: context
|
||||
:param image_href: href of the image
|
||||
:param properties: the properties whose values are required.
|
||||
This argument is optional, default value is "all", so if not specified
|
||||
all properties will be returned.
|
||||
:returns: a dict of the values of the properties. A property not on the
|
||||
glance metadata will have a value of None.
|
||||
"""
|
||||
img_service = service.get_image_service(image_href, context=context)
|
||||
iproperties = img_service.show(image_href)['properties']
|
||||
|
||||
if properties == "all":
|
||||
return iproperties
|
||||
|
||||
return {p: iproperties.get(p) for p in properties}
|
||||
|
||||
|
||||
def get_temp_url_for_glance_image(context, image_uuid):
|
||||
"""Returns the tmp url for a glance image.
|
||||
|
||||
:param context: context
|
||||
:param image_uuid: the UUID of the image in glance
|
||||
:returns: the tmp url for the glance image.
|
||||
"""
|
||||
# Glance API version 2 is required for getting direct_url of the image.
|
||||
glance_service = service.GlanceImageService(version=2, context=context)
|
||||
image_properties = glance_service.show(image_uuid)
|
||||
LOG.debug('Got image info: %(info)s for image %(image_uuid)s.',
|
||||
{'info': image_properties, 'image_uuid': image_uuid})
|
||||
return glance_service.swift_temp_url(image_properties)
|
||||
|
||||
|
||||
def create_boot_iso(context, output_filename, kernel_href,
|
||||
ramdisk_href, deploy_iso_uuid, root_uuid=None,
|
||||
kernel_params=None, boot_mode=None):
|
||||
"""Creates a bootable ISO image for a node.
|
||||
|
||||
Given the hrefs for kernel, ramdisk, root partition's UUID and
|
||||
kernel cmdline arguments, this method fetches the kernel and ramdisk,
|
||||
and builds a bootable ISO image that can be used to boot up the
|
||||
baremetal node.
|
||||
|
||||
:param context: context
|
||||
:param output_filename: the absolute path of the output ISO file
|
||||
:param kernel_href: URL or glance uuid of the kernel to use
|
||||
:param ramdisk_href: URL or glance uuid of the ramdisk to use
|
||||
:param deploy_iso_uuid: URL or glance uuid of the deploy iso used
|
||||
:param root_uuid: uuid of the root filesystem (optional)
|
||||
:param kernel_params: a string containing whitespace separated values
|
||||
kernel cmdline arguments of the form K=V or K (optional).
|
||||
:boot_mode: the boot mode in which the deploy is to happen.
|
||||
:raises: ImageCreationFailed, if creating boot ISO failed.
|
||||
"""
|
||||
with utils.tempdir() as tmpdir:
|
||||
kernel_path = os.path.join(tmpdir, kernel_href.split('/')[-1])
|
||||
ramdisk_path = os.path.join(tmpdir, ramdisk_href.split('/')[-1])
|
||||
fetch(context, kernel_href, kernel_path)
|
||||
fetch(context, ramdisk_href, ramdisk_path)
|
||||
|
||||
params = []
|
||||
if root_uuid:
|
||||
params.append('root=UUID=%s' % root_uuid)
|
||||
if kernel_params:
|
||||
params.append(kernel_params)
|
||||
|
||||
if boot_mode == 'uefi':
|
||||
deploy_iso = os.path.join(tmpdir, deploy_iso_uuid)
|
||||
fetch(context, deploy_iso_uuid, deploy_iso)
|
||||
create_isolinux_image_for_uefi(output_filename,
|
||||
deploy_iso,
|
||||
kernel_path,
|
||||
ramdisk_path,
|
||||
params)
|
||||
else:
|
||||
create_isolinux_image_for_bios(output_filename,
|
||||
kernel_path,
|
||||
ramdisk_path,
|
||||
params)
|
||||
|
||||
|
||||
def is_whole_disk_image(ctx, instance_info):
|
||||
"""Find out if the image is a partition image or a whole disk image.
|
||||
|
||||
:param ctx: an admin context
|
||||
:param instance_info: a node's instance info dict
|
||||
|
||||
:returns True for whole disk images and False for partition images
|
||||
and None on no image_source or Error.
|
||||
"""
|
||||
image_source = instance_info.get('image_source')
|
||||
if not image_source:
|
||||
return
|
||||
|
||||
is_whole_disk_image = False
|
||||
if glance_utils.is_glance_image(image_source):
|
||||
try:
|
||||
iproperties = get_image_properties(ctx, image_source)
|
||||
except Exception:
|
||||
return
|
||||
is_whole_disk_image = (not iproperties.get('kernel_id') and
|
||||
not iproperties.get('ramdisk_id'))
|
||||
else:
|
||||
# Non glance image ref
|
||||
if (not instance_info.get('kernel') and
|
||||
not instance_info.get('ramdisk')):
|
||||
is_whole_disk_image = True
|
||||
|
||||
return is_whole_disk_image
|
||||
|
||||
|
||||
def _mount_deploy_iso(deploy_iso, mountdir):
|
||||
"""This function opens up the deploy iso used for deploy.
|
||||
|
||||
:param: deploy_iso: path to the deploy iso where its
|
||||
contents are fetched to.
|
||||
:raises: ImageCreationFailed if mount fails.
|
||||
:returns: a tuple consisting of - 1. a dictionary containing
|
||||
the values as required
|
||||
by create_isolinux_image,
|
||||
2. efiboot.img relative path, and
|
||||
3. grub.cfg relative path.
|
||||
|
||||
"""
|
||||
e_img_rel_path = None
|
||||
e_img_path = None
|
||||
grub_rel_path = None
|
||||
grub_path = None
|
||||
|
||||
try:
|
||||
utils.mount(deploy_iso, mountdir, '-o', 'loop')
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.exception(_LE("mounting the deploy iso failed."))
|
||||
raise exception.ImageCreationFailed(image_type='iso', error=e)
|
||||
|
||||
try:
|
||||
for (dir, subdir, files) in os.walk(mountdir):
|
||||
if 'efiboot.img' in files:
|
||||
e_img_path = os.path.join(dir, 'efiboot.img')
|
||||
e_img_rel_path = os.path.relpath(e_img_path,
|
||||
mountdir)
|
||||
if 'grub.cfg' in files:
|
||||
grub_path = os.path.join(dir, 'grub.cfg')
|
||||
grub_rel_path = os.path.relpath(grub_path,
|
||||
mountdir)
|
||||
except (OSError, IOError) as e:
|
||||
LOG.exception(_LE("examining the deploy iso failed."))
|
||||
_umount_without_raise(mountdir)
|
||||
raise exception.ImageCreationFailed(image_type='iso', error=e)
|
||||
|
||||
# check if the variables are assigned some values or not during
|
||||
# walk of the mountdir.
|
||||
if not (e_img_path and e_img_rel_path and grub_path and grub_rel_path):
|
||||
error = (_("Deploy iso didn't contain efiboot.img or grub.cfg"))
|
||||
_umount_without_raise(mountdir)
|
||||
raise exception.ImageCreationFailed(image_type='iso', error=error)
|
||||
|
||||
uefi_path_info = {e_img_path: e_img_rel_path,
|
||||
grub_path: grub_rel_path}
|
||||
|
||||
# Returning a tuple as it makes the code simpler and clean.
|
||||
# uefi_path_info: is needed by the caller for _create_root_fs to create
|
||||
# appropriate directory structures for uefi boot iso.
|
||||
# grub_rel_path: is needed to copy the new grub.cfg generated using
|
||||
# generate_cfg() to the same directory path structure where it was
|
||||
# present in deploy iso. This path varies for different OS vendors.
|
||||
# e_img_rel_path: is required by mkisofs to generate boot iso.
|
||||
return uefi_path_info, e_img_rel_path, grub_rel_path
|
|
@ -1,30 +0,0 @@
|
|||
# Copyright 2014 Rackspace, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
def get_node_vif_ids(task):
|
||||
"""Get all VIF ids for a node.
|
||||
|
||||
This function does not handle multi node operations.
|
||||
|
||||
:param task: a TaskManager instance.
|
||||
:returns: A dict of the Node's port UUIDs and their associated VIFs
|
||||
|
||||
"""
|
||||
port_vifs = {}
|
||||
for port in task.ports:
|
||||
vif = port.extra.get('vif_port_id')
|
||||
if vif:
|
||||
port_vifs[port.uuid] = vif
|
||||
return port_vifs
|
|
@ -1,53 +0,0 @@
|
|||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Utilities and helper functions that won't produce circular imports."""
|
||||
|
||||
import inspect
|
||||
|
||||
|
||||
def getcallargs(function, *args, **kwargs):
|
||||
"""This is a simplified inspect.getcallargs (2.7+).
|
||||
|
||||
It should be replaced when python >= 2.7 is standard.
|
||||
"""
|
||||
keyed_args = {}
|
||||
argnames, varargs, keywords, defaults = inspect.getargspec(function)
|
||||
|
||||
keyed_args.update(kwargs)
|
||||
|
||||
# NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
|
||||
# argnames but not in args or kwargs. Uses 'in' rather than '==' because
|
||||
# some tests use 'self2'.
|
||||
if 'self' in argnames[0] or 'cls' == argnames[0]:
|
||||
# The function may not actually be a method or have __self__.
|
||||
# Typically seen when it's stubbed with mox.
|
||||
if inspect.ismethod(function) and hasattr(function, '__self__'):
|
||||
keyed_args[argnames[0]] = function.__self__
|
||||
else:
|
||||
keyed_args[argnames[0]] = None
|
||||
|
||||
remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
|
||||
keyed_args.update(dict(zip(remaining_argnames, args)))
|
||||
|
||||
if defaults:
|
||||
num_defaults = len(defaults)
|
||||
for argname, value in zip(argnames[-num_defaults:], defaults):
|
||||
if argname not in keyed_args:
|
||||
keyed_args[argname] = value
|
||||
|
||||
return keyed_args
|
|
@ -1,191 +0,0 @@
|
|||
#
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from six.moves.urllib import parse
|
||||
from swiftclient import client as swift_client
|
||||
from swiftclient import exceptions as swift_exceptions
|
||||
from swiftclient import utils as swift_utils
|
||||
|
||||
from iotronic.common import exception
|
||||
from iotronic.common.i18n import _
|
||||
from iotronic.common import keystone
|
||||
|
||||
swift_opts = [
|
||||
cfg.IntOpt('swift_max_retries',
|
||||
default=2,
|
||||
help='Maximum number of times to retry a Swift request, '
|
||||
'before failing.')
|
||||
]
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(swift_opts, group='swift')
|
||||
|
||||
CONF.import_opt('admin_user', 'keystonemiddleware.auth_token',
|
||||
group='keystone_authtoken')
|
||||
CONF.import_opt('admin_tenant_name', 'keystonemiddleware.auth_token',
|
||||
group='keystone_authtoken')
|
||||
CONF.import_opt('admin_password', 'keystonemiddleware.auth_token',
|
||||
group='keystone_authtoken')
|
||||
CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token',
|
||||
group='keystone_authtoken')
|
||||
CONF.import_opt('auth_version', 'keystonemiddleware.auth_token',
|
||||
group='keystone_authtoken')
|
||||
CONF.import_opt('insecure', 'keystonemiddleware.auth_token',
|
||||
group='keystone_authtoken')
|
||||
CONF.import_opt('cafile', 'keystonemiddleware.auth_token',
|
||||
group='keystone_authtoken')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SwiftAPI(object):
|
||||
"""API for communicating with Swift."""
|
||||
|
||||
def __init__(self,
|
||||
user=CONF.keystone_authtoken.admin_user,
|
||||
tenant_name=CONF.keystone_authtoken.admin_tenant_name,
|
||||
key=CONF.keystone_authtoken.admin_password,
|
||||
auth_url=CONF.keystone_authtoken.auth_uri,
|
||||
auth_version=CONF.keystone_authtoken.auth_version):
|
||||
"""Constructor for creating a SwiftAPI object.
|
||||
|
||||
:param user: the name of the user for Swift account
|
||||
:param tenant_name: the name of the tenant for Swift account
|
||||
:param key: the 'password' or key to authenticate with
|
||||
:param auth_url: the url for authentication
|
||||
:param auth_version: the version of api to use for authentication
|
||||
"""
|
||||
auth_url = keystone.get_keystone_url(auth_url, auth_version)
|
||||
params = {'retries': CONF.swift.swift_max_retries,
|
||||
'insecure': CONF.keystone_authtoken.insecure,
|
||||
'cacert': CONF.keystone_authtoken.cafile,
|
||||
'user': user,
|
||||
'tenant_name': tenant_name,
|
||||
'key': key,
|
||||
'authurl': auth_url,
|
||||
'auth_version': auth_version}
|
||||
|
||||
self.connection = swift_client.Connection(**params)
|
||||
|
||||
def create_object(self, container, object, filename,
|
||||
object_headers=None):
|
||||
"""Uploads a given file to Swift.
|
||||
|
||||
:param container: The name of the container for the object.
|
||||
:param object: The name of the object in Swift
|
||||
:param filename: The file to upload, as the object data
|
||||
:param object_headers: the headers for the object to pass to Swift
|
||||
:returns: The Swift UUID of the object
|
||||
:raises: SwiftOperationError, if any operation with Swift fails.
|
||||
"""
|
||||
try:
|
||||
self.connection.put_container(container)
|
||||
except swift_exceptions.ClientException as e:
|
||||
operation = _("put container")
|
||||
raise exception.SwiftOperationError(operation=operation, error=e)
|
||||
|
||||
with open(filename, "r") as fileobj:
|
||||
|
||||
try:
|
||||
obj_uuid = self.connection.put_object(container,
|
||||
object,
|
||||
fileobj,
|
||||
headers=object_headers)
|
||||
except swift_exceptions.ClientException as e:
|
||||
operation = _("put object")
|
||||
raise exception.SwiftOperationError(operation=operation,
|
||||
error=e)
|
||||
|
||||
return obj_uuid
|
||||
|
||||
def get_temp_url(self, container, object, timeout):
|
||||
"""Returns the temp url for the given Swift object.
|
||||
|
||||
:param container: The name of the container in which Swift object
|
||||
is placed.
|
||||
:param object: The name of the Swift object.
|
||||
:param timeout: The timeout in seconds after which the generated url
|
||||
should expire.
|
||||
:returns: The temp url for the object.
|
||||
:raises: SwiftOperationError, if any operation with Swift fails.
|
||||
"""
|
||||
try:
|
||||
account_info = self.connection.head_account()
|
||||
except swift_exceptions.ClientException as e:
|
||||
operation = _("head account")
|
||||
raise exception.SwiftOperationError(operation=operation,
|
||||
error=e)
|
||||
|
||||
storage_url, token = self.connection.get_auth()
|
||||
parse_result = parse.urlparse(storage_url)
|
||||
swift_object_path = '/'.join((parse_result.path, container, object))
|
||||
temp_url_key = account_info['x-account-meta-temp-url-key']
|
||||
url_path = swift_utils.generate_temp_url(swift_object_path, timeout,
|
||||
temp_url_key, 'GET')
|
||||
return parse.urlunparse((parse_result.scheme,
|
||||
parse_result.netloc,
|
||||
url_path,
|
||||
None,
|
||||
None,
|
||||
None))
|
||||
|
||||
def delete_object(self, container, object):
|
||||
"""Deletes the given Swift object.
|
||||
|
||||
:param container: The name of the container in which Swift object
|
||||
is placed.
|
||||
:param object: The name of the object in Swift to be deleted.
|
||||
:raises: SwiftOperationError, if operation with Swift fails.
|
||||
"""
|
||||
try:
|
||||
self.connection.delete_object(container, object)
|
||||
except swift_exceptions.ClientException as e:
|
||||
operation = _("delete object")
|
||||
raise exception.SwiftOperationError(operation=operation, error=e)
|
||||
|
||||
def head_object(self, container, object):
|
||||
"""Retrieves the information about the given Swift object.
|
||||
|
||||
:param container: The name of the container in which Swift object
|
||||
is placed.
|
||||
:param object: The name of the object in Swift
|
||||
:returns: The information about the object as returned by
|
||||
Swift client's head_object call.
|
||||
:raises: SwiftOperationError, if operation with Swift fails.
|
||||
"""
|
||||
try:
|
||||
return self.connection.head_object(container, object)
|
||||
except swift_exceptions.ClientException as e:
|
||||
operation = _("head object")
|
||||
raise exception.SwiftOperationError(operation=operation, error=e)
|
||||
|
||||
def update_object_meta(self, container, object, object_headers):
|
||||
"""Update the metadata of a given Swift object.
|
||||
|
||||
:param container: The name of the container in which Swift object
|
||||
is placed.
|
||||
:param object: The name of the object in Swift
|
||||
:param object_headers: the headers for the object to pass to Swift
|
||||
:raises: SwiftOperationError, if operation with Swift fails.
|
||||
"""
|
||||
try:
|
||||
self.connection.post_object(container, object, object_headers)
|
||||
except swift_exceptions.ClientException as e:
|
||||
operation = _("post object")
|
||||
raise exception.SwiftOperationError(operation=operation, error=e)
|
Loading…
Reference in New Issue