Prototype a simpler anvil (with only packaging)

Since it seems not that many people are using the
stage ['install', 'remove', 'restart', 'start',
'status', 'stop', 'test', 'uninstall'] and such we
might as well reduce the goal of anvil to what people
are using to save in maintenance and such; making
anvil simpler to be used and understand.

Change-Id: I471b003e92e2572c4eac87c4ec62b23a4ee7d2ef
This commit is contained in:
Joshua Harlow 2015-01-12 15:54:04 -08:00
parent 6c22352db6
commit d295fc9502
94 changed files with 13 additions and 6534 deletions

View File

@ -15,28 +15,12 @@
# under the License.
from anvil.actions import build
from anvil.actions import install
from anvil.actions import prepare
from anvil.actions import remove
from anvil.actions import restart
from anvil.actions import start
from anvil.actions import status
from anvil.actions import stop
from anvil.actions import test
from anvil.actions import uninstall
_NAMES_TO_RUNNER = {
'build': build.BuildAction,
'install': install.InstallAction,
'prepare': prepare.PrepareAction,
'purge': remove.RemoveAction,
'restart': restart.RestartAction,
'start': start.StartAction,
'status': status.StatusAction,
'stop': stop.StopAction,
'test': test.TestAction,
'uninstall': uninstall.UninstallAction,
}
_RUNNER_TO_NAMES = dict((v, k) for k, v in _NAMES_TO_RUNNER.items())

View File

@ -23,7 +23,6 @@ from anvil import env
from anvil import exceptions as excp
from anvil import importer
from anvil import log as logging
from anvil import passwords as pw
from anvil import persona as _persona
from anvil import phase
from anvil import shell as sh
@ -33,17 +32,11 @@ import six
LOG = logging.getLogger(__name__)
BASE_ENTRYPOINTS = {
'coverage': 'anvil.components.base_testing:EmptyTestingComponent',
'install': 'anvil.components.pkglist:Installer',
'running': 'anvil.components.base_runtime:EmptyRuntime',
'test': 'anvil.components.base_testing:EmptyTestingComponent',
'uninstall': 'anvil.components.base_install:PkgUninstallComponent',
}
BASE_PYTHON_ENTRYPOINTS = dict(BASE_ENTRYPOINTS)
BASE_PYTHON_ENTRYPOINTS.update({
'coverage': 'anvil.components.base_testing:PythonTestingComponent',
'install': 'anvil.components.base_install:PythonInstallComponent',
'test': 'anvil.components.base_testing:PythonTestingComponent',
})
SPECIAL_GROUPS = _persona.SPECIAL_GROUPS
@ -72,43 +65,9 @@ class Action(object):
self.config_loader = cfg.YamlMergeLoader(root_dir,
origins_path=cli_opts['origins_fn'])
# Keyring/pw settings + cache
self.passwords = {}
self.keyring_path = cli_opts.pop('keyring_path')
self.keyring_encrypted = cli_opts.pop('keyring_encrypted')
self.prompt_for_passwords = cli_opts.pop('prompt_for_passwords', False)
self.store_passwords = cli_opts.pop('store_passwords', True)
# Stored for components to get any options
self.cli_opts = cli_opts
def _establish_passwords(self, groups):
kr = pw.KeyringProxy(self.keyring_path,
self.keyring_encrypted,
self.prompt_for_passwords,
True)
LOG.info("Reading passwords using a %s", kr)
to_save = {}
self.passwords.clear()
already_gotten = set()
for _group, instances in groups:
for _c, instance in six.iteritems(instances):
wanted_passwords = instance.get_option('wanted_passwords')
if not wanted_passwords:
continue
for (name, prompt) in wanted_passwords.items():
if name in already_gotten:
continue
(from_keyring, pw_provided) = kr.read(name, prompt)
if not from_keyring and self.store_passwords:
to_save[name] = pw_provided
self.passwords[name] = pw_provided
already_gotten.add(name)
if to_save:
LOG.info("Saving %s passwords using a %s", len(to_save), kr)
for (name, pw_provided) in to_save.items():
kr.save(name, pw_provided)
@abc.abstractproperty
@property
def lookup_name(self):
@ -189,7 +148,6 @@ class Action(object):
desired_subsystems=persona.wanted_subsystems.get(c, []))
sibling_params['subsystems'] = active_subsystems
sibling_params['siblings'] = {} # This gets adjusted during construction
sibling_params['passwords'] = self.passwords
sibling_params['distro'] = self.distro
sibling_params['options'] = self.config_loader.load(
distro=d_component, component=c,
@ -231,7 +189,6 @@ class Action(object):
LOG.debug("Starting environment settings:")
utils.log_object(env.get(), logger=LOG, level=logging.DEBUG, item_max_len=64)
sh.mkdirslist(self.phase_dir)
self._establish_passwords(groups)
self._verify_components(groups)
self._warm_components(groups)

View File

@ -1,121 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from StringIO import StringIO
import six
from anvil.actions import base as action
from anvil.actions import states
from anvil import colorizer
from anvil import log
from anvil import shell as sh
from anvil import utils
LOG = log.getLogger(__name__)
class InstallAction(action.Action):
@property
def lookup_name(self):
return 'install'
def _on_finish(self, persona, groups):
action.Action._on_finish(self, persona, groups)
self._write_exports(groups, sh.joinpths("/etc/anvil", "%s.rc" % (self.name)))
def _write_exports(self, groups, path):
entries = []
contents = StringIO()
contents.write("# Exports for action %s\n\n" % (self.name))
for _group, instances in groups:
for c, instance in six.iteritems(instances):
exports = instance.env_exports
if exports:
contents.write("# Exports for %s\n" % (c))
for (k, v) in exports.items():
export_entry = "export %s=%s" % (k, sh.shellquote(str(v).strip()))
entries.append(export_entry)
contents.write("%s\n" % (export_entry))
contents.write("\n")
if entries:
sh.write_file(path, contents.getvalue())
utils.log_iterable(entries,
header="Wrote to %s %s exports" % (path, len(entries)),
logger=LOG)
def _run(self, persona, groups):
prior_groups = []
for group, instances in groups:
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values(),
self.cli_opts,
group,
prior_groups)
removals = states.reverts("pre-install")
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Preinstalling %s.', colorizer.quote(i.name)),
run=lambda i: i.pre_install(),
end=None,
),
group,
instances,
"pre-install",
*removals
)
removals.extend(states.reverts("package-install"))
general_package = "general"
if general_package in instances:
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info("Installing packages"),
run=dependency_handler.install,
end=None,
),
group,
{general_package: instances[general_package]},
"package-install",
*removals
)
removals.extend(states.reverts("configure"))
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Configuring %s.', colorizer.quote(i.name)),
run=lambda i: i.configure(),
end=None,
),
group,
instances,
"configure",
*removals
)
removals.extend(states.reverts("post-install"))
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Post-installing %s.', colorizer.quote(i.name)),
run=lambda i: i.post_install(),
end=None
),
group,
instances,
"post-install",
*removals
)
prior_groups.append((group, instances))

View File

@ -1,78 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log
from anvil.actions import base as action
from anvil.actions import states
from anvil.actions import uninstall
LOG = log.getLogger(__name__)
class RemoveAction(uninstall.UninstallAction):
def _run(self, persona, groups):
super(RemoveAction, self)._run(persona, groups)
prior_groups = []
for group, instances in groups:
LOG.info("Removing group %s...", colorizer.quote(group))
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values(),
self.cli_opts,
group,
prior_groups)
removals = states.reverts("package-destroy")
general_package = "general"
if general_package in instances:
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info("Destroying packages"),
run=lambda i: dependency_handler.destroy(),
end=None,
),
group,
{general_package: instances[general_package]},
"package-destroy",
*removals
)
removals.extend(states.reverts('uninstall'))
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Uninstalling %s.', colorizer.quote(i.name)),
run=lambda i: i.uninstall(),
end=None,
),
group,
instances,
'uninstall',
*removals
)
removals.extend(states.reverts('post-uninstall'))
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Post-uninstalling %s.', colorizer.quote(i.name)),
run=lambda i: i.post_uninstall(),
end=None,
),
group,
instances,
'post-uninstall',
*removals
)
prior_groups.append((group, instances))

View File

@ -1,36 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil.actions import base as action
from anvil.actions import start
from anvil.actions import stop
class RestartAction(action.Action):
def __init__(self, name, distro, root_dir, cli_opts):
super(RestartAction, self).__init__(
name, distro, root_dir, cli_opts.copy())
self.start = start.StartAction(name, distro, root_dir, cli_opts.copy())
self.stop = stop.StopAction(name, distro, root_dir, cli_opts.copy())
@property
def lookup_name(self):
return 'running'
def run(self, persona):
self.stop.run(persona)
self.start.run(persona)

View File

@ -1,72 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log
from anvil.actions import base as action
from anvil.actions import states
LOG = log.getLogger(__name__)
class StartAction(action.Action):
@property
def lookup_name(self):
return 'running'
def _run(self, persona, groups):
for group, instances in groups:
LOG.info("Starting group %s...", colorizer.quote(group))
removals = states.reverts("pre-start")
self._run_phase(
action.PhaseFunctors(
start=None,
run=lambda i: i.pre_start(),
end=None,
),
group,
instances,
"pre-start",
*removals
)
removals.extend(states.reverts('start'))
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Starting %s.',
colorizer.quote(i.name)),
run=lambda i: i.start(),
end=lambda i, result: LOG.info("Started %s application(s).",
colorizer.quote(result)),
),
group,
instances,
"start",
*removals
)
removals.extend(states.reverts('post-start'))
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Post-starting %s.',
colorizer.quote(i.name)),
run=lambda i: i.post_start(),
end=None,
),
group,
instances,
"post-start",
*removals
)

View File

@ -19,24 +19,8 @@
# we can skip states that have already completed as well as redo states when
# the inverse is applied.
_INVERSES = {
"configure": ["unconfigure"],
"download": [],
"download-patch": [],
"package": ["package-destroy"],
"package-destroy": ["package-install", "install", "package"],
"package-install": ["package-uninstall", "uninstall", "package-destroy"],
"package-install-all-deps": [],
"package-uninstall": ["package-install", "package-install-all-deps"],
"post-install": [],
"post-start": [],
"post-uninstall": ["pre-install", "post-install"],
"pre-install": ["pre-uninstall", "post-uninstall"],
"pre-start": [],
"pre-uninstall": ["post-install"],
"start": ["stopped"],
"stopped": ["pre-start", "start", "post-start"],
"unconfigure": ["configure"],
"uninstall": ["prepare", "download", "download-patch"],
}

View File

@ -1,107 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log
from anvil import utils
from anvil.actions import base as action
LOG = log.getLogger(__name__)
from anvil.components.base_runtime import STATUS_INSTALLED
from anvil.components.base_runtime import STATUS_STARTED
from anvil.components.base_runtime import STATUS_STOPPED
from anvil.components.base_runtime import STATUS_UNKNOWN
STATUS_COLOR_MAP = {
STATUS_INSTALLED: 'green',
STATUS_STARTED: 'green',
STATUS_UNKNOWN: 'yellow',
STATUS_STOPPED: 'red',
}
class StatusAction(action.Action):
def __init__(self, name, distro, root_dir, cli_opts):
action.Action.__init__(self, name, distro, root_dir, cli_opts)
self.show_amount = cli_opts.get('show_amount', 0)
@property
def lookup_name(self):
return 'running'
def _fetch_status(self, component):
return component.statii()
def _quote_status(self, status):
return colorizer.quote(status, quote_color=STATUS_COLOR_MAP.get(status, 'red'))
def _print_status(self, component, result):
if not result:
LOG.info("Status of %s is %s.", colorizer.quote(component.name), self._quote_status(STATUS_INSTALLED))
return
def log_details(text, spacing, max_len):
text = utils.truncate_text(text, max_len=max_len, from_bottom=True)
for line in text.splitlines():
line = line.replace("\t", "\\t")
line = line.replace("\r", "\\r")
line = utils.truncate_text(line, max_len=120)
LOG.info("%s>> %s", (" " * spacing), line)
def details_printer(entry, spacing, max_len):
details = entry.details
if isinstance(details, (basestring, str)):
log_details(details, spacing, max_len)
elif isinstance(details, (dict)):
keys = sorted(details.keys())
for k in keys:
LOG.info("%s%s:", (" " * spacing), str(k))
log_details(details[k], spacing + 1, max_len)
elif details is None:
pass
else:
raise RuntimeError("Unknown how to print the details of %s" % (entry.name))
if len(result) == 1:
s = result[0]
if s.name and s.name != component.name:
LOG.info("Status of %s (%s) is %s.", colorizer.quote(component.name), s.name, self._quote_status(s.status))
else:
LOG.info("Status of %s is %s.", colorizer.quote(component.name), self._quote_status(s.status))
if self.show_amount > 0 and s.details:
details_printer(s, 2, self.show_amount)
else:
LOG.info("Status of %s is:", colorizer.quote(component.name))
for s in result:
LOG.info("|-- %s is %s.", s.name, self._quote_status(s.status))
if self.show_amount > 0 and s.details:
details_printer(s, 4, self.show_amount)
def _run(self, persona, groups):
for group, instances in groups:
LOG.info("Getting status of group %s...", colorizer.quote(group))
self._run_phase(
action.PhaseFunctors(
start=None,
run=self._fetch_status,
end=self._print_status,
),
group,
instances,
None,
)

View File

@ -1,51 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log
from anvil.actions import base as action
from anvil.actions import states
LOG = log.getLogger(__name__)
class StopAction(action.Action):
@property
def lookup_name(self):
return 'running'
def _order_components(self, components):
components = super(StopAction, self)._order_components(components)
components.reverse()
return components
def _run(self, persona, groups):
for group, instances in groups:
LOG.info("Stopping group %s...", colorizer.quote(group))
removals = states.reverts("stopped")
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Stopping %s.', colorizer.quote(i.name)),
run=lambda i: i.stop(),
end=lambda i, result: LOG.info("Stopped %s application(s).",
colorizer.quote(result)),
),
group,
instances,
"stopped",
*removals
)

View File

@ -1,63 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log
from anvil.actions import base as action
from anvil.actions import states
LOG = log.getLogger(__name__)
class TestAction(action.Action):
@property
def lookup_name(self):
return 'test'
def _run(self, persona, groups):
for group, instances in groups:
LOG.info("Testing group %s...", colorizer.quote(group))
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values(),
self.cli_opts)
removals = states.reverts("package-install-all-deps")
general_package = "general"
if general_package in groups:
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info("Installing packages"),
run=lambda i: dependency_handler.install_all_deps(),
end=None,
),
group,
{general_package: instances[general_package]},
"package-install-all-deps",
*removals
)
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Running tests of component %s.',
colorizer.quote(i.name)),
run=lambda i: i.run_tests(),
end=None,
),
group,
instances,
None,
)

View File

@ -1,85 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log
from anvil.actions import base as action
from anvil.actions import states
LOG = log.getLogger(__name__)
class UninstallAction(action.Action):
@property
def lookup_name(self):
return 'uninstall'
def _order_components(self, components):
components = super(UninstallAction, self)._order_components(components)
components.reverse()
return components
def _run(self, persona, groups):
prior_groups = []
for group, instances in groups:
LOG.info("Uninstalling group %s...", colorizer.quote(group))
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values(),
self.cli_opts,
group,
prior_groups)
removals = states.reverts("unconfigure")
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Unconfiguring %s.', colorizer.quote(i.name)),
run=lambda i: i.unconfigure(),
end=None,
),
group,
instances,
'unconfigure',
*removals
)
removals.extend(states.reverts('pre-uninstall'))
self._run_phase(
action.PhaseFunctors(
start=None,
run=lambda i: i.pre_uninstall(),
end=None,
),
group,
instances,
'pre-uninstall',
*removals
)
removals.extend(states.reverts("package-uninstall"))
general_package = "general"
if general_package in instances:
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info("Uninstalling packages"),
run=lambda i: dependency_handler.uninstall(),
end=None,
),
group,
{general_package: instances[general_package]},
"package-uninstall",
*removals
)
prior_groups.append((group, instances))

View File

@ -23,13 +23,11 @@ from anvil import trace as tr
from anvil import type_utils as tu
from anvil import utils
from anvil.components.configurators import base as conf
LOG = logging.getLogger(__name__)
class Component(object):
def __init__(self, name, subsystems, instances, options, siblings, distro, passwords, **kwargs):
def __init__(self, name, subsystems, instances, options, siblings, distro, **kwargs):
# Subsystems this was requested with
self.subsystems = subsystems
@ -51,9 +49,6 @@ class Component(object):
# Turned on and off as phases get activated
self.activated = False
# How we get any passwords we need
self.passwords = passwords
# Where our binaries will be located
self.bin_dir = "/usr/bin/"
@ -139,7 +134,6 @@ class BasicComponent(Component):
super(BasicComponent, self).__init__(*args, **kargs)
trace_fn = tr.trace_filename(self.get_option('trace_dir'), 'created')
self.tracewriter = tr.TraceWriter(trace_fn, break_if_there=False)
self.configurator = conf.Configurator(self)
def download(self):
return []

View File

@ -16,7 +16,6 @@ from anvil.components import base
from anvil import downloader as down
from anvil import log as logging
from anvil import shell as sh
from anvil import trace as tr
from anvil import utils
from anvil.packaging.helpers import pip_helper
@ -154,41 +153,3 @@ class PkgInstallComponent(base.BasicComponent, InstallableMixin):
class PythonInstallComponent(PythonComponent, InstallableMixin):
pass
class PkgUninstallComponent(base.Component):
def __init__(self, *args, **kargs):
super(PkgUninstallComponent, self).__init__(*args, **kargs)
trace_fn = tr.trace_filename(self.get_option('trace_dir'), 'created')
self.tracereader = tr.TraceReader(trace_fn)
def unconfigure(self):
pass
def post_uninstall(self):
self._uninstall_files()
self._uninstall_dirs()
def pre_uninstall(self):
pass
def uninstall(self):
pass
def _uninstall_files(self):
files_touched = self.tracereader.files_touched()
files_alive = filter(sh.isfile, files_touched)
if files_alive:
utils.log_iterable(files_alive, logger=LOG,
header="Removing %s miscellaneous files" % (len(files_alive)))
for fn in files_alive:
sh.unlink(fn)
def _uninstall_dirs(self):
dirs_made = self.tracereader.dirs_made()
dirs_alive = filter(sh.isdir, dirs_made)
if dirs_alive:
utils.log_iterable(dirs_alive, logger=LOG,
header="Removing %s created directories" % (len(dirs_alive)))
for dir_name in dirs_alive:
sh.deldir(dir_name)

View File

@ -1,241 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import exceptions as excp
from anvil import log as logging
from anvil import shell as sh
from anvil import utils
from anvil.components import base
import time
LOG = logging.getLogger(__name__)
####
#### STATUS CONSTANTS
####
STATUS_INSTALLED = 'installed'
STATUS_STARTED = "started"
STATUS_STOPPED = "stopped"
STATUS_UNKNOWN = "unknown"
class ProgramStatus(object):
def __init__(self, status, name=None, details=''):
self.name = name
self.status = status
self.details = details
class Program(object):
def __init__(self, name, path=None, working_dir=None, argv=None):
self.name = name
if path is None:
self.path = name
else:
self.path = path
self.working_dir = working_dir
if argv is None:
self.argv = tuple()
else:
self.argv = tuple(argv)
def __str__(self):
what = str(self.name)
if self.path:
what += " (%s)" % (self.path)
return what
class ProgramRuntime(base.Component):
@property
def applications(self):
# A list of applications since a single component sometimes
# has a list of programs to start (ie nova) instead of a single application (ie the db)
return []
def restart(self):
# How many applications restarted
self.stop()
return self.start()
def post_start(self):
pass
def pre_start(self):
pass
def statii(self):
# A list of statuses since a single component sometimes
# has a list of programs to report on (ie nova) instead of a single application (ie the db)
return []
def start(self):
# How many applications started
return 0
def stop(self):
# How many applications stopped
return 0
def wait_active(self, max_attempts=5):
# Attempt to wait until all potentially started applications
# are actually started (for whatever definition of started is applicable)
# for up to a given amount of attempts and wait time between attempts.
num_started = len(self.subsystems)
def waiter(between_wait):
LOG.info("Waiting %.2f seconds for component %s programs to start.", between_wait, colorizer.quote(self.name))
LOG.info("Please wait...")
sh.sleep(between_wait)
start_time = time.time()
for wait_time in utils.ExponentialBackoff(attempts=max_attempts):
statii = self.statii()
if len(statii) >= num_started: # >= if someone reports more than started...
not_worked = []
for p in statii:
if p.status != STATUS_STARTED:
not_worked.append(p)
if len(not_worked) == 0:
return
else:
# Eck less applications were found with status then what were started!
LOG.warn("%s less applications reported status than were actually started!",
num_started - len(statii))
waiter(wait_time)
end_time = time.time()
tot_time = end_time - start_time
raise excp.StatusException("Failed waiting %.2f seconds for component %r programs to become active..."
% (tot_time, self.name))
class EmptyRuntime(ProgramRuntime):
pass
class ServiceRuntime(ProgramRuntime):
def get_command(self, command, program):
program = self.daemon_name(program)
cmd_template = self.distro.get_command("service", command)
return utils.expand_template_deep(cmd_template, {'NAME': program})
def _get_details(self, program, status):
if status != STATUS_STARTED:
return None
daemon_program = self.daemon_name(program)
# TODO(harlowja): we can likely figure this out in a different manner,
# but for now try a bunch of likely paths and select the first path that
# exists and is readable as the location of the log file of the program.
log_paths = [
sh.joinpths('/var/log/', self.name, "%s.log" % (daemon_program)),
sh.joinpths('/var/log/', self.name, "%s.log" % (program)),
sh.joinpths('/var/log/', self.name, "%s-%s.log" % (self.name, program)),
sh.joinpths('/var/log/', self.name, "%s-%s.log" % (self.name, daemon_program)),
]
for path in log_paths:
if sh.isfile(path):
try:
return sh.load_file(path)
except (OSError, IOError):
pass
return None
def daemon_name(self, program):
return program
def start(self):
amount = 0
failed_programs = set()
for program in self.applications:
if not self.status_app(program):
if self.start_app(program):
amount += 1
else:
failed_programs.add(program)
if failed_programs:
raise RuntimeError('Failed to start %s for component %s'
% (', '.join(sorted(failed_programs)),
self.name))
return amount
def start_app(self, program):
LOG.info("Starting program %s under component %s.",
colorizer.quote(program), self.name)
start_cmd = self.get_command("start", program)
try:
sh.execute(start_cmd, shell=True)
except excp.ProcessExecutionError:
LOG.error("Failed to start program %s under component %s.",
colorizer.quote(program), self.name)
return False
return True
def stop(self):
amount = 0
for program in self.applications:
if self.status_app(program):
if self.stop_app(program):
amount += 1
return amount
def stop_app(self, program):
LOG.info("Stopping program %s under component %s.",
colorizer.quote(program), self.name)
stop_cmd = self.get_command("stop", program)
try:
sh.execute(stop_cmd, shell=True)
except excp.ProcessExecutionError:
LOG.error("Failed to stop program %s under component %s.",
colorizer.quote(program), self.name)
return False
return True
def status_app(self, program):
status_cmd = self.get_command("status", program)
try:
sh.execute(status_cmd, shell=True)
except excp.ProcessExecutionError:
return False
return True
def statii(self):
# Get the investigators/runners which can be used
# to actually do the status inquiry and attempt to perform said inquiry.
statii = []
for program in self.applications:
status = (STATUS_STARTED
if self.status_app(program)
else STATUS_STOPPED)
details = self._get_details(program, status)
statii.append(ProgramStatus(name=program,
status=status,
details=details))
return statii
class OpenStackRuntime(ServiceRuntime):
@property
def applications(self):
return self.subsystem_names()
def daemon_name(self, program):
return "openstack-%s-%s" % (self.name, program)

View File

@ -1,169 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from anvil import colorizer
from anvil import exceptions as excp
from anvil import ini_parser
from anvil import log as logging
from anvil import shell as sh
from anvil import utils
from anvil.components import base
LOG = logging.getLogger(__name__)
# Environment to run tests
DEFAULT_ENV = {
'NOSE_WITH_OPENSTACK': '1',
'NOSE_OPENSTACK_RED': '0.05',
'NOSE_OPENSTACK_YELLOW': '0.025',
'NOSE_OPENSTACK_SHOW_ELAPSED': '1',
}
def _using_testr(test_type, app_dir):
if test_type == 'testr':
return True
for i in ['.testr.conf', '.testrepository']:
if sh.exists(sh.joinpths(app_dir, i)):
return True
return False
class EmptyTestingComponent(base.Component):
def run_tests(self):
return
class PythonTestingComponent(base.Component):
def __init__(self, *args, **kargs):
base.Component.__init__(self, *args, **kargs)
self.test_type = self.get_option('test_type', default_value='').lower().strip()
self.ignore_test_failures = kargs.get('ignore_test_failures', False)
def _get_test_exclusions(self):
return self.get_option('exclude_tests', default_value=[])
def _get_test_dir_exclusions(self):
return self.get_option('exclude_tests_dir', default_value=[])
def _get_pre_test_command(self):
app_dir = self.get_option('app_dir')
if (_using_testr(self.test_type, app_dir) and
not sh.isdir(sh.joinpths(app_dir, '.testrepository'))):
return ['testr', 'init']
return None
def _get_test_command(self):
# See: http://docs.openstack.org/developer/nova/devref/unit_tests.html
# And: http://wiki.openstack.org/ProjectTestingInterface
# And: https://wiki.openstack.org/wiki/Testr
def get_testr_cmd():
# See: https://testrepository.readthedocs.org
#
# NOTE(harlowja): it appears that testr doesn't seem to support all
# the 'advanced' features (exclusion, coverage?, verbosity, xunit) as
# nose. Need to verify this...
return ['testr', 'run', '--parallel']
def get_nose_cmd():
# See: $ man nosetests
cmd = ['coverage', 'run', '/usr/bin/nosetests']
if not colorizer.color_enabled():
cmd.append('--openstack-nocolor')
else:
cmd.append('--openstack-color')
if self.get_bool_option("verbose", default_value=True):
cmd.append('--verbosity=2')
cmd.append('--detailed-errors')
else:
cmd.append('--verbosity=1')
cmd.append('--openstack-num-slow=0')
for e in self._get_test_exclusions():
cmd.append('--exclude=%s' % (e))
for e in self._get_test_dir_exclusions():
cmd.append('--exclude-dir=%s' % (e))
xunit_fn = self.get_option("xunit_filename")
if xunit_fn:
cmd.append("--with-xunit")
cmd.append("--xunit-file=%s" % (xunit_fn))
return cmd
if _using_testr(self.test_type, self.get_option('app_dir')):
return get_testr_cmd()
else:
# Assume nose will work then.
return get_nose_cmd()
def _get_env(self):
env_addons = DEFAULT_ENV.copy()
tox_fn = sh.joinpths(self.get_option('app_dir'), 'tox.ini')
if sh.isfile(tox_fn):
# Suck out some settings from the tox file
try:
tox_cfg = ini_parser.BuiltinConfigParser(fns=[tox_fn])
env_values = tox_cfg.get('testenv', 'setenv') or ''
for env_line in env_values.splitlines():
env_line = env_line.strip()
env_line = env_line.split("#")[0].strip()
if not env_line:
continue
env_entry = env_line.split('=', 1)
if len(env_entry) == 2:
(name, value) = env_entry
name = name.strip()
value = value.strip()
if name.lower() != 'virtual_env':
env_addons[name] = value
if env_addons:
LOG.debug("From %s we read in %s environment settings:", tox_fn, len(env_addons))
utils.log_object(env_addons, logger=LOG, level=logging.DEBUG)
except IOError:
pass
return env_addons
def run_tests(self):
app_dir = self.get_option('app_dir')
if not sh.isdir(app_dir):
LOG.warn("Unable to find application directory at %s, can not run %s tests.",
colorizer.quote(app_dir), colorizer.quote(self.name))
return
pre_cmd = self._get_pre_test_command()
cmd = self._get_test_command()
if not cmd:
LOG.warn("Unable to determine test command for %s, can not run tests.",
colorizer.quote(self.name))
return
env = self._get_env()
try:
if pre_cmd:
LOG.info("Running test setup via: %s",
utils.truncate_text(" ".join(pre_cmd), 80))
sh.execute(pre_cmd, stdout_fh=sys.stdout, stderr_fh=sys.stdout,
cwd=app_dir, env_overrides=env)
LOG.info("Running tests via: %s",
utils.truncate_text(" ".join(cmd), 80))
sh.execute(cmd, stdout_fh=sys.stdout, stderr_fh=sys.stdout,
cwd=app_dir, env_overrides=env)
except excp.ProcessExecutionError as e:
if self.ignore_test_failures:
LOG.warn("Ignoring test failure of component %s: %s", colorizer.quote(self.name), e)
else:
raise

View File

@ -1,44 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log as logging
from anvil import utils
from anvil.components import base_install as binstall
from anvil.components.configurators import ceilometer as cconf
LOG = logging.getLogger(__name__)
# Sync db command
SYNC_DB_CMD = ['sudo', '-u', 'ceilometer', '/usr/bin/ceilometer-dbsync']
class CeilometerInstaller(binstall.PythonInstallComponent):
def __init__(self, *args, **kargs):
binstall.PythonInstallComponent.__init__(self, *args, **kargs)
self.configurator = cconf.CeilometerConfigurator(self)
def post_install(self):
binstall.PythonInstallComponent.post_install(self)
if self.get_bool_option('db-sync'):
self._sync_db()
def _sync_db(self):
LOG.info("Syncing ceilometer to database: %s", colorizer.quote(self.configurator.DB_NAME))
cmds = [{'cmd': SYNC_DB_CMD}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))

View File

@ -1,47 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log as logging
from anvil import utils
from anvil.components import base_install as binstall
from anvil.components.configurators import cinder as cconf
LOG = logging.getLogger(__name__)
# Sync db command
SYNC_DB_CMD = ['sudo', '-u', 'cinder', '/usr/bin/cinder-manage',
# Available commands:
'db', 'sync']
class CinderInstaller(binstall.PythonInstallComponent):
def __init__(self, *args, **kargs):
binstall.PythonInstallComponent.__init__(self, *args, **kargs)
self.configurator = cconf.CinderConfigurator(self)
def post_install(self):
binstall.PythonInstallComponent.post_install(self)
if self.get_bool_option('db-sync'):
self.configurator.setup_db()
self._sync_db()
def _sync_db(self):
LOG.info("Syncing cinder to database: %s", colorizer.quote(self.configurator.DB_NAME))
cmds = [{'cmd': SYNC_DB_CMD}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))

View File

@ -1,15 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,150 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import io
import weakref
from anvil import exceptions
from anvil import ini_parser
from anvil import shell as sh
from anvil import utils
from anvil.components.helpers import db as dbhelper
from anvil.components.helpers import keystone as khelper
class Configurator(object):
DB_NAME = "undefined"
def __init__(self, installer, configs=None):
self.installer = weakref.proxy(installer)
self.configs = configs or []
self.source_configs = {}
self.config_adjusters = {}
self.config_dir = None
@property
def config_files(self):
return list(self.configs)
@property
def link_dir(self):
link_dir_base = self.installer.distro.get_command_config('base_link_dir')
return sh.joinpths(link_dir_base, self.installer.name)
def config_adjust(self, contents, name):
adjuster = self.config_adjusters.get(name)
if adjuster:
if isinstance(contents, unicode):
contents = contents.encode("utf-8")
with io.BytesIO(contents) as stream:
config = ini_parser.create_parser(
ini_parser.RewritableConfigParser, self.installer)
config.readfp(stream)
adjuster(ini_parser.DefaultConf(config))
contents = config.stringify(name)
return contents
def replace_config(self, config_fn):
return config_fn not in self.config_adjusters
def source_config(self, config_fn):
if self.config_dir:
allow_missing = False
if config_fn in self.source_configs:
config_data = self.source_configs.get(config_fn)
if isinstance(config_data, (tuple, list)):
config_fn, allow_missing = config_data
else:
config_fn = config_data
fn = sh.joinpths(self.config_dir, config_fn)
try:
return (fn, sh.load_file(fn))
except IOError as e:
if e.errno == errno.ENOENT and allow_missing:
return (fn, '')
else:
raise
return utils.load_template(self.installer.name, config_fn)
def config_param_replace(self, config_fn, contents, parameters):
if self.replace_config(config_fn):
return utils.expand_template(contents, parameters)
return contents
def target_config(self, config_fn):
return sh.joinpths(self.installer.cfg_dir, config_fn)
def setup_rpc(self, conf, rpc_backends=None, mq_type=None):
# How is your message queue setup?
if not mq_type:
raw_mq_type = self.installer.get_option('mq-type')
if raw_mq_type:
mq_type = utils.canon_mq_type(raw_mq_type)
if not mq_type:
msg = ("%s requires a message queue to operate. "
"Please specify a 'mq-type' in configuration."
% self.installer.name.title())
raise exceptions.ConfigException(msg)
if rpc_backends is not None:
try:
conf.add('rpc_backend', rpc_backends[mq_type])
except KeyError:
msg = ("%s does not support mq type %s."
% (self.installer.name.title(), mq_type))
raise exceptions.ConfigException(msg)
if mq_type == 'rabbit':
conf.add('rabbit_host',
self.installer.get_option('rabbit', 'host',
default_value=self.installer.get_option('ip')))
conf.add('rabbit_password', self.installer.get_password('rabbit'))
conf.add('rabbit_userid', self.installer.get_option('rabbit', 'user_id'))
elif mq_type == 'qpid':
conf.add('qpid_hostname',
self.installer.get_option('qpid', 'host',
default_value=self.installer.get_option('ip')))
conf.add('qpid_password', self.installer.get_password('qpid'))
conf.add('qpid_username', self.installer.get_option('qpid', 'user_id'))
def fetch_dbdsn(self):
return dbhelper.fetch_dbdsn(
dbname=self.DB_NAME,
utf8=True,
dbtype=self.installer.get_option('db', 'type'),
**utils.merge_dicts(self.installer.get_option('db'),
dbhelper.get_shared_passwords(self.installer)))
def get_keystone_params(self, service_user):
return khelper.get_shared_params(
ip=self.installer.get_option('ip'),
service_user=service_user,
**utils.merge_dicts(self.installer.get_option('keystone'),
khelper.get_shared_passwords(self.installer)))
def setup_db(self):
dbhelper.drop_db(distro=self.installer.distro,
dbtype=self.installer.get_option('db', 'type'),
dbname=self.DB_NAME,
**utils.merge_dicts(self.installer.get_option('db'),
dbhelper.get_shared_passwords(self.installer)))
dbhelper.create_db(distro=self.installer.distro,
dbtype=self.installer.get_option('db', 'type'),
dbname=self.DB_NAME,
**utils.merge_dicts(self.installer.get_option('db'),
dbhelper.get_shared_passwords(self.installer)))

View File

@ -1,45 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import shell as sh
from anvil.components.configurators import base
API_CONF = 'ceilometer.conf'
PIPELINE_CONF = 'pipeline.yaml'
SOURCES_CONF = 'sources.json'
POLICY_CONF = 'policy.json'
CONFIGS = [PIPELINE_CONF, API_CONF, POLICY_CONF, SOURCES_CONF]
class CeilometerConfigurator(base.Configurator):
DB_NAME = 'ceilometer'
def __init__(self, installer):
super(CeilometerConfigurator, self).__init__(installer, CONFIGS)
self.config_adjusters = {
API_CONF: self._config_adjust_api,
}
self.source_configs = {API_CONF: 'ceilometer.conf.sample'}
self.config_dir = sh.joinpths(self.installer.get_option('app_dir'),
'etc',
installer.name)
def _config_adjust_api(self, config):
# Setup your log dir
config.add('log_dir', '/var/log/ceilometer')
# Setup your sql connection
config.add_with_section('database', 'connection', self.fetch_dbdsn())

View File

@ -1,76 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import shell as sh
from anvil.components.configurators import base
API_CONF = 'cinder.conf'
PASTE_CONF = 'api-paste.ini'
POLICY_CONF = 'policy.json'
CONFIGS = [PASTE_CONF, API_CONF, POLICY_CONF]
MQ_BACKENDS = {
'qpid': 'cinder.openstack.common.rpc.impl_qpid',
'rabbit': 'cinder.openstack.common.rpc.impl_kombu',
}
class CinderConfigurator(base.Configurator):
# This db will be dropped then created
DB_NAME = 'cinder'
def __init__(self, installer):
super(CinderConfigurator, self).__init__(installer, CONFIGS)
self.config_adjusters = {PASTE_CONF: self._config_adjust_paste,
API_CONF: self._config_adjust_api}
self.source_configs = {API_CONF: 'cinder.conf.sample'}
self.config_dir = sh.joinpths(self.installer.get_option('app_dir'),
'etc',
installer.name)
def _config_adjust_paste(self, config):
for (k, v) in self._fetch_keystone_params().items():
config.add_with_section('filter:authtoken', k, v)
def _config_adjust_api(self, config):
config.add('log_dir', '/var/log/cinder')
self.setup_rpc(config, rpc_backends=MQ_BACKENDS)
# Setup your sql connection
config.add('sql_connection', self.fetch_dbdsn())
# Auth will be using keystone
config.add('auth_strategy', 'keystone')
# Where our paste config is
config.add('api_paste_config', self.target_config(PASTE_CONF))
def _fetch_keystone_params(self):
params = self.get_keystone_params('cinder')
return {
'auth_host': params['endpoints']['admin']['host'],
'auth_port': params['endpoints']['admin']['port'],
'auth_protocol': params['endpoints']['admin']['protocol'],
'auth_uri': params['endpoints']['public']['uri'],
'admin_tenant_name': params['service_tenant'],
'admin_user': params['service_user'],
'admin_password': params['service_password'],
'service_host': params['endpoints']['internal']['host'],
'service_port': params['endpoints']['internal']['port'],
'service_protocol': params['endpoints']['internal']['protocol'],
'auth_version': 'v2.0'
}

View File

@ -1,101 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import log as logging
from anvil import shell as sh
from anvil.components.helpers import glance as ghelper
from anvil.components.configurators import base
# Config files/sections
API_CONF = "glance-api.conf"
REG_CONF = "glance-registry.conf"
API_PASTE_CONF = 'glance-api-paste.ini'
REG_PASTE_CONF = 'glance-registry-paste.ini'
LOGGING_CONF = "logging.conf"
POLICY_JSON = 'policy.json'
CONFIGS = [API_CONF, REG_CONF, API_PASTE_CONF,
REG_PASTE_CONF, POLICY_JSON, LOGGING_CONF]
LOG = logging.getLogger(__name__)
class GlanceConfigurator(base.Configurator):
# This db will be dropped and created
DB_NAME = "glance"
def __init__(self, installer):
super(GlanceConfigurator, self).__init__(installer, CONFIGS)
self.config_adjusters = {REG_CONF: self._config_adjust_reg,
API_CONF: self._config_adjust_api,
REG_PASTE_CONF: self._config_adjust_paste,
API_PASTE_CONF: self._config_adjust_paste,
LOGGING_CONF: self._config_adjust_logging}
self.source_configs = {LOGGING_CONF: 'logging.cnf.sample'}
self.config_dir = sh.joinpths(self.installer.get_option('app_dir'), 'etc')
self.img_dir = "/var/lib/glance/images"
def _config_adjust_paste(self, config):
for (k, v) in self._fetch_keystone_params().items():
config.add_with_section('filter:authtoken', k, v)
def _config_adjust_api_reg(self, config):
config.add('debug', self.installer.get_bool_option('verbose'))
config.add('verbose', self.installer.get_bool_option('verbose'))
config.add('sql_connection', self.fetch_dbdsn())
config.add_with_section('paste_deploy', 'flavor', self.installer.get_option('paste_flavor'))
for (k, v) in self._fetch_keystone_params().items():
config.add_with_section('keystone_authtoken', k, v)
def _config_adjust_api(self, config):
self._config_adjust_api_reg(config)
gparams = ghelper.get_shared_params(**self.installer.options)
config.add('bind_port', gparams['endpoints']['public']['port'])
def ensure_image_storage(img_store_dir):
if sh.isdir(img_store_dir):
return
LOG.debug("Ensuring file system store directory %r exists.",
img_store_dir)
sh.mkdirslist(img_store_dir,
tracewriter=self.installer.tracewriter)
config.add('default_store', 'file')
config.add('filesystem_store_datadir', self.img_dir)
ensure_image_storage(self.img_dir)
def _config_adjust_reg(self, config):
self._config_adjust_api_reg(config)
gparams = ghelper.get_shared_params(**self.installer.options)
config.add('bind_port', gparams['endpoints']['registry']['port'])
def _config_adjust_logging(self, config):
config.add_with_section('logger_root', 'level', 'DEBUG')
config.add_with_section('logger_root', 'handlers', "devel,production")
def _fetch_keystone_params(self):
params = self.get_keystone_params('glance')
return {
'auth_host': params['endpoints']['admin']['host'],
'auth_port': params['endpoints']['admin']['port'],
'auth_protocol': params['endpoints']['admin']['protocol'],
'auth_uri': params['endpoints']['public']['uri'],
'admin_tenant_name': params['service_tenant'],
'admin_user': params['service_user'],
'admin_password': params['service_password'],
}

View File

@ -1,36 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import shell as sh
from anvil.components.configurators import base
API_CONF = 'heat.conf'
PASTE_CONF = 'api-paste.ini'
POLICY_CONF = 'policy.json'
CONFIGS = [API_CONF, PASTE_CONF, POLICY_CONF]
class HeatConfigurator(base.Configurator):
DB_NAME = 'heat'
def __init__(self, installer):
super(HeatConfigurator, self).__init__(installer, CONFIGS)
self.config_adjusters = {}
self.source_configs = {API_CONF: 'heat.conf.sample'}
self.config_dir = sh.joinpths(self.installer.get_option('app_dir'),
'etc',
installer.name)

View File

@ -1,36 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2014 GoDaddy Operating Company, LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import shell as sh
from anvil.components.configurators import base
API_CONF = 'ironic.conf'
PASTE_CONF = 'api-paste.ini'
POLICY_CONF = 'policy.json'
CONFIGS = [API_CONF, PASTE_CONF, POLICY_CONF]
class IronicConfigurator(base.Configurator):
DB_NAME = 'ironic'
def __init__(self, installer):
super(IronicConfigurator, self).__init__(installer, CONFIGS)
self.config_adjusters = {}
self.source_configs = {API_CONF: 'ironic.conf.sample'}
self.config_dir = sh.joinpths(self.installer.get_option('app_dir'),
'etc',
installer.name)

View File

@ -1,77 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import shell as sh
from anvil import utils
from anvil.components.helpers import keystone as khelper
from anvil.components.configurators import base
# Configuration files keystone expects...
ROOT_CONF = "keystone.conf"
LOGGING_CONF = "logging.conf"
POLICY_JSON = 'policy.json'
PASTE_CONFIG = 'keystone-paste.ini'
CONFIGS = [ROOT_CONF, LOGGING_CONF, POLICY_JSON, PASTE_CONFIG]
# PKI base files
PKI_FILES = {
'ca_certs': 'ssl/certs/ca.pem',
'keyfile': 'ssl/private/signing_key.pem',
'certfile': 'ssl/certs/signing_cert.pem',
}
class KeystoneConfigurator(base.Configurator):
# This db will be dropped then created
DB_NAME = "keystone"
def __init__(self, installer):
super(KeystoneConfigurator, self).__init__(installer, CONFIGS)
self.config_adjusters = {ROOT_CONF: self._config_adjust_root,
LOGGING_CONF: self._config_adjust_logging}
self.source_configs = {LOGGING_CONF: 'logging.conf.sample',
ROOT_CONF: 'keystone.conf.sample',
PASTE_CONFIG: PASTE_CONFIG}
self.config_dir = sh.joinpths(self.installer.get_option('app_dir'), 'etc')
def _config_adjust_logging(self, config):
config.add_with_section('logger_root', 'level', 'DEBUG')
config.add_with_section('logger_root', 'handlers', "devel,production")
def _config_adjust_root(self, config):
config.add('log_dir', '/var/log/keystone')
config.add('log_file', 'keystone-all.log')
params = khelper.get_shared_params(**utils.merge_dicts(self.installer.options,
khelper.get_shared_passwords(self.installer)))
config.add('admin_token', params['service_token'])
config.add('admin_port', params['endpoints']['admin']['port'])
config.add('public_port', params['endpoints']['public']['port'])
config.add('verbose', True)
config.add('debug', True)
if self.installer.get_bool_option('enable-pki'):
config.add_with_section('signing', 'token_format', 'PKI')
for (k, v) in PKI_FILES.items():
path = sh.joinpths(self.link_dir, v)
config.add_with_section('signing', k, path)
else:
config.add_with_section('signing', 'token_format', 'UUID')
config.add_with_section('catalog', 'driver', 'keystone.catalog.backends.sql.Catalog')
config.remove('DEFAULT', 'log_config')
config.add_with_section('sql', 'connection', self.fetch_dbdsn())
config.add_with_section('ec2', 'driver', "keystone.contrib.ec2.backends.sql.Ec2")

View File

@ -1,125 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import importer
from anvil import shell as sh
# NOTE(imelnikov) used by plugin configurators, so should defined
# before that configurators are imported.
MQ_BACKENDS = {
'qpid': 'neutron.openstack.common.rpc.impl_qpid',
'rabbit': 'neutron.openstack.common.rpc.impl_kombu',
}
from anvil.components.configurators import base
from anvil.components.configurators.neutron_plugins import dhcp
from anvil.components.configurators.neutron_plugins import l3
from anvil.components.configurators.neutron_plugins import metadata
# Special generated conf
API_CONF = "neutron.conf"
# Config files/sections
PASTE_CONF = "api-paste.ini"
CONFIGS = [PASTE_CONF, API_CONF]
class NeutronConfigurator(base.Configurator):
# This db will be dropped and created
DB_NAME = "neutron"
def __init__(self, installer):
super(NeutronConfigurator, self).__init__(installer, CONFIGS)
self.core_plugin = installer.get_option("core_plugin")
self.plugin_configurators = {
'core_plugin': importer.import_entry_point(
"anvil.components.configurators.neutron_plugins.%s:%sConfigurator" %
(self.core_plugin, self.core_plugin.title()))(installer),
'l3': l3.L3Configurator(installer),
'metadata': metadata.MetadataConfigurator(installer),
'dhcp': dhcp.DhcpConfigurator(installer),
}
self.config_adjusters = {
PASTE_CONF: self._config_adjust_paste,
API_CONF: self._config_adjust_api,
}
for plugin_configurator in self.plugin_configurators.values():
self.config_adjusters.update(plugin_configurator.config_adjusters)
@property
def config_files(self):
config_files = list(self.configs)
for plugin_configurator in self.plugin_configurators.values():
config_files.extend(plugin_configurator.config_files)
return config_files
def source_config(self, config_fn):
if (config_fn.startswith("plugins") or
config_fn.startswith("rootwrap.d")):
real_fn = "neutron/%s" % config_fn
else:
real_fn = config_fn
fn = sh.joinpths(self.installer.get_option("app_dir"), "etc", real_fn)
return (fn, sh.load_file(fn))
def _config_adjust_paste(self, config):
config.current_section = "filter:authtoken"
for (k, v) in self._fetch_keystone_params().items():
config.add(k, v)
def _config_adjust_api(self, config):
config.add("core_plugin", self.plugin_configurators['core_plugin'].PLUGIN_CLASS)
config.add('auth_strategy', 'keystone')
config.add("api_paste_config", self.target_config(PASTE_CONF))
# TODO(aababilov): add debug to other services conf files
config.add('debug', self.installer.get_bool_option("debug"))
config.add("log_file", "")
config.add("log_dir", "/var/log/neutron")
# Setup the interprocess locking directory
# (don't put me on shared storage)
config.add('lock_path', '/var/lock/neutron')
self.setup_rpc(config, rpc_backends=MQ_BACKENDS)
config.current_section = "agent"
config.add("root_helper", "sudo neutron-rootwrap /etc/neutron/rootwrap.conf")
config.current_section = "keystone_authtoken"
for (k, v) in self._fetch_keystone_params().items():
config.add(k, v)
def _fetch_keystone_params(self):
params = self.get_keystone_params('neutron')
return {
"auth_host": params["endpoints"]["admin"]["host"],
"auth_port": params["endpoints"]["admin"]["port"],
"auth_protocol": params["endpoints"]["admin"]["protocol"],
# This uses the public uri not the admin one...
"auth_uri": params["endpoints"]["admin"]["uri"],
"admin_tenant_name": params["service_tenant"],
"admin_user": params["service_user"],
"admin_password": params["service_password"],
}
@property
def path_to_plugin_config(self):
return self.plugin_configurators['core_plugin'].path_to_plugin_config

View File

@ -1,15 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,85 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from anvil.components.configurators import base
from anvil.components.configurators.neutron import MQ_BACKENDS
from anvil import shell as sh
class Configurator(six.with_metaclass(abc.ABCMeta, base.Configurator)):
DB_NAME = "neutron"
PLUGIN_CLASS = "neutron.plugins.UNKNOWN"
PLUGIN_CONF = None
def __init__(self, installer):
super(Configurator, self).__init__(installer)
self.core_plugin = installer.get_option("core_plugin")
if self.PLUGIN_CONF is not None:
config_path = self._config_path(self.PLUGIN_CONF)
self.configs = [config_path]
self.config_adjusters = {
config_path: self._adjust_plugin_config
}
@abc.abstractmethod
def _adjust_plugin_config(self, plugin_conf):
pass
@abc.abstractmethod
def _config_path(self, name):
pass
@property
def path_to_plugin_config(self):
return self._config_path(self.PLUGIN_CONF)
class AgentConfigurator(Configurator):
def __init__(self, installer):
super(AgentConfigurator, self).__init__(installer)
def _adjust_plugin_config(self, plugin_conf):
params = self.get_keystone_params("neutron")
plugin_conf.add("admin_password", params["service_password"])
plugin_conf.add("admin_user", params["service_user"])
plugin_conf.add("admin_tenant_name", params["service_tenant"])
plugin_conf.add("auth_url", params["endpoints"]["admin"]["uri"])
plugin_conf.add("debug", self.installer.get_bool_option("debug"))
plugin_conf.add("verbose", self.installer.get_bool_option("verbose"))
def _config_path(self, name):
return name
class CorePluginConfigurator(Configurator):
def __init__(self, installer):
super(CorePluginConfigurator, self).__init__(installer)
def _adjust_plugin_config(self, plugin_conf):
self.setup_rpc(plugin_conf, rpc_backends=MQ_BACKENDS)
plugin_conf.add_with_section(
"DATABASE",
"sql_connection",
self.fetch_dbdsn())
def _config_path(self, name):
return sh.joinpths('plugins', self.core_plugin, name)

View File

@ -1,36 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil.components.configurators.neutron import MQ_BACKENDS
from anvil.components.configurators.neutron_plugins import base
class DhcpConfigurator(base.AgentConfigurator):
PLUGIN_CONF = "dhcp_agent.ini"
def _adjust_plugin_config(self, plugin_conf):
super(DhcpConfigurator, self)._adjust_plugin_config(plugin_conf)
plugin_conf.add("dhcp_driver", "neutron.agent.linux.dhcp.Dnsmasq")
plugin_conf.add("root_helper", "sudo neutron-rootwrap /etc/neutron/rootwrap.conf")
plugin_conf.add("use_namespaces", self.installer.get_option("use_namespaces",
default_value=True))
self.setup_rpc(plugin_conf, rpc_backends=MQ_BACKENDS)
if self.core_plugin == 'openvswitch':
plugin_conf.add("interface_driver", "neutron.agent.linux.interface.OVSInterfaceDriver")

View File

@ -1,37 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil.components.configurators.neutron import MQ_BACKENDS
from anvil.components.configurators.neutron_plugins import base
class L3Configurator(base.AgentConfigurator):
PLUGIN_CONF = "l3_agent.ini"
def _adjust_plugin_config(self, plugin_conf):
super(L3Configurator, self)._adjust_plugin_config(plugin_conf)
plugin_conf.add("external_network_bridge",
self.installer.get_option("external_bridge"))
plugin_conf.add("root_helper", "sudo neutron-rootwrap /etc/neutron/rootwrap.conf")
plugin_conf.add("use_namespaces", self.installer.get_option("use_namespaces",
default_value=True))
self.setup_rpc(plugin_conf, rpc_backends=MQ_BACKENDS)
if self.core_plugin == 'openvswitch':
plugin_conf.add("interface_driver", "neutron.agent.linux.interface.OVSInterfaceDriver")

View File

@ -1,37 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil.components.configurators.neutron_plugins import base
class LinuxbridgeConfigurator(base.CorePluginConfigurator):
PLUGIN_CONF = "linuxbridge_conf.ini"
PLUGIN_CLASS = "neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2"
def __init__(self, installer):
super(LinuxbridgeConfigurator, self).__init__(installer)
def _adjust_plugin_config(self, plugin_conf):
super(LinuxbridgeConfigurator, self)._adjust_plugin_config(plugin_conf)
plugin_conf.add_with_section(
"VLANS",
"network_vlan_ranges",
self.installer.get_option("network_vlan_ranges"))
plugin_conf.add_with_section(
"LINUX_BRIDGE",
"physical_interface_mappings",
self.installer.get_option("physical_interface_mappings"))

View File

@ -1,22 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil.components.configurators.neutron_plugins import base
class MetadataConfigurator(base.AgentConfigurator):
PLUGIN_CONF = "metadata_agent.ini"

View File

@ -1,32 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil.components.configurators.neutron_plugins import base
class OpenvswitchConfigurator(base.CorePluginConfigurator):
PLUGIN_CONF = "ovs_neutron_plugin.ini"
PLUGIN_CLASS = "neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2"
def __init__(self, installer):
super(OpenvswitchConfigurator, self).__init__(installer)
def _adjust_plugin_config(self, plugin_conf):
super(OpenvswitchConfigurator, self)._adjust_plugin_config(plugin_conf)
plugin_conf.add("integration_bridge",
self.installer.get_option("integration_bridge"))

View File

@ -1,347 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from anvil import exceptions
from anvil import log as logging
from anvil import shell as sh
from anvil import utils
from anvil.components.helpers import neutron as net_helper
from anvil.components.helpers import virt as lv
from anvil.components.configurators import base
API_CONF = 'nova.conf'
PASTE_CONF = 'nova-api-paste.ini'
POLICY_CONF = 'policy.json'
LOGGING_CONF = "logging.conf"
CONFIGS = [PASTE_CONF, POLICY_CONF, LOGGING_CONF, API_CONF]
MQ_BACKENDS = {
'qpid': 'nova.rpc.impl_qpid',
'rabbit': 'nova.rpc.impl_kombu',
}
LOG = logging.getLogger(__name__)
class NovaConfigurator(base.Configurator):
# This db will be dropped then created
DB_NAME = 'nova'
def __init__(self, installer):
super(NovaConfigurator, self).__init__(installer, CONFIGS)
self.config_adjusters = {PASTE_CONF: self._config_adjust_paste,
API_CONF: self._config_adjust_api,
LOGGING_CONF: self._config_adjust_logging}
self.source_configs = {PASTE_CONF: 'api-paste.ini',
LOGGING_CONF: 'logging_sample.conf',
API_CONF: ['nova.conf.sample', True]}
self.tracewriter = self.installer.tracewriter
self.config_dir = sh.joinpths(self.installer.get_option('app_dir'),
'etc',
installer.name)
def _config_adjust_paste(self, config):
for (k, v) in self._fetch_keystone_params().items():
config.add_with_section('filter:authtoken', k, v)
def _config_adjust_api(self, nova_conf):
'''This method has the smarts to build the configuration file based on
various runtime values. A useful reference for figuring out this
is at http://docs.openstack.org/diablo/openstack-compute/admin/content/ch_configuring-openstack-compute.html
See also: https://github.com/openstack/nova/blob/master/etc/nova/nova.conf.sample
'''
# Used more than once so we calculate it ahead of time
hostip = self.installer.get_option('ip')
nova_conf.add('verbose', self.installer.get_bool_option('log_verbose'))
nova_conf.add('state_path', '/var/lib/nova')
nova_conf.add('log_dir', '/var/log/nova')
nova_conf.add('bindir', '/usr/bin')
# Allow destination machine to match source for resize.
nova_conf.add('allow_resize_to_same_host', True)
# Which scheduler do u want?
nova_conf.add('compute_scheduler_driver',
self.installer.get_option('scheduler', default_value='nova.scheduler.filter_scheduler.FilterScheduler'))
# Rate limit the api??
nova_conf.add('api_rate_limit', self.installer.get_bool_option('api_rate_limit'))
# Ensure the policy.json is referenced correctly
nova_conf.add('policy_file', '/etc/nova/policy.json')
# Setup nova network/settings
self._configure_network_settings(nova_conf)
# The ip of where we are running
nova_conf.add('my_ip', hostip)
# Setup how the database will be connected.
nova_conf.add('sql_connection', self.fetch_dbdsn())
# Configure anything libvirt related?
virt_driver = utils.canon_virt_driver(self.installer.get_option('virt_driver'))
if virt_driver == 'libvirt':
self._configure_libvirt(lv.canon_libvirt_type(self.installer.get_option('libvirt_type')), nova_conf)
# How instances will be presented
instance_template = "%s%s" % (self.installer.get_option('instance_name_prefix'),
self.installer.get_option('instance_name_postfix'))
if not instance_template:
instance_template = 'instance-%08x'
nova_conf.add('instance_name_template', instance_template)
# Enable the standard extensions
nova_conf.add('osapi_compute_extension',
'nova.api.openstack.compute.contrib.standard_extensions')
# Auth will be using keystone
nova_conf.add('auth_strategy', 'keystone')
# Is config drive being forced on?
if self.installer.get_bool_option('force_cfg_drive'):
nova_conf.add('force_config_drive', 'always')
# Don't always force images to raw, which makes things take time to get to raw...
nova_conf.add('force_raw_images', self.installer.get_bool_option('force_raw_images'))
# Add a checksum for images fetched for each hypervisor?
# This check absorbs cpu cycles, warning....
nova_conf.add('checksum_base_images', self.installer.get_bool_option('checksum_base_images'))
# Setup the interprocess locking directory (don't put me on shared storage)
nova_conf.add('lock_path', '/var/lock/nova')
# Vnc settings setup
self._configure_vnc(nova_conf)
# Where our paste config is
nova_conf.add('api_paste_config', self.target_config(PASTE_CONF))
# What our imaging service will be
self._configure_image_service(nova_conf, hostip)
# Configs for ec2 / s3 stuff
nova_conf.add('ec2_dmz_host', self.installer.get_option('ec2_dmz_host', default_value=hostip))
nova_conf.add('s3_host', hostip)
# How is your message queue setup?
self.setup_rpc(nova_conf, rpc_backends=MQ_BACKENDS)
# The USB tablet device is meant to improve mouse behavior in
# the VNC console, but it has the side effect of increasing
# the CPU usage of an idle VM tenfold.
nova_conf.add('use_usb_tablet', False)
# Is this a multihost setup?
self._configure_multihost(nova_conf)
# Handle any virt driver specifics
self._configure_virt_driver(nova_conf)
# Handle configuring the conductor service
self._configure_conductor(nova_conf)
def _config_adjust_logging(self, config):
config.add_with_section('logger_root', 'level', 'DEBUG')
config.add_with_section('logger_root', 'handlers', "stdout")
def _fetch_keystone_params(self):
params = self.get_keystone_params('nova')
return {
'auth_host': params['endpoints']['admin']['host'],
'auth_port': params['endpoints']['admin']['port'],
'auth_protocol': params['endpoints']['admin']['protocol'],
'admin_tenant_name': params['service_tenant'],
'admin_user': params['service_user'],
'admin_password': params['service_password'],
'service_host': params['endpoints']['internal']['host'],
'service_port': params['endpoints']['internal']['port'],
'service_protocol': params['endpoints']['internal']['protocol'],
}
def _get_extra(self, key):
extras = self.installer.get_option(key)
if not extras:
return []
cleaned_lines = []
extra_lines = str(extras).splitlines()
for line in extra_lines:
cleaned_line = line.strip()
if cleaned_line:
cleaned_lines.append(cleaned_line)
return cleaned_lines
def _convert_extra_flags(self, extra_flags):
converted_flags = list()
for f in extra_flags:
cleaned_opt = f.lstrip("-")
if len(cleaned_opt) == 0:
continue
if cleaned_opt.find("=") == -1:
cleaned_opt += "=%s" % (True)
converted_flags.append(cleaned_opt)
return converted_flags
def _get_content(self, nova_conf):
generated_content = nova_conf.generate()
extra_flags = self._get_extra('extra_flags')
if extra_flags:
LOG.warn("EXTRA_FLAGS is defined and may need to be converted to EXTRA_OPTS!")
extra_flags = self._convert_extra_flags(extra_flags)
extra_opts = self._get_extra('extra_opts')
if extra_flags or extra_opts:
new_contents = list()
new_contents.append(generated_content)
new_contents.append("")
new_contents.append("# Extra flags")
new_contents.append("")
new_contents.extend(extra_flags)
new_contents.append("")
new_contents.append("# Extra options")
new_contents.append("")
new_contents.extend(extra_opts)
new_contents.append("")
generated_content = utils.joinlinesep(*new_contents)
return generated_content
def _configure_image_service(self, nova_conf, hostip):
# What image service we will u be using sir?
img_service = self.installer.get_option('img_service', default_value='nova.image.glance.GlanceImageService')
nova_conf.add('image_service', img_service)
# If glance then where is it?
if img_service.lower().find("glance") != -1:
glance_api_server = self.installer.get_option('glance_server', default_value=("%s:9292" % (hostip)))
nova_conf.add('glance_api_servers', glance_api_server)
def _configure_vnc(self, nova_conf):
# All nova-compute workers need to know the vnc configuration options
# These settings don't hurt anything if n-xvnc and n-novnc are disabled
nova_conf.add('novncproxy_base_url', self.installer.get_option('vncproxy_url'))
nova_conf.add('xvpvncproxy_base_url', self.installer.get_option('xvpvncproxy_url'))
nova_conf.add('vncserver_listen', self.installer.get_option('vncserver_listen', default_value='127.0.0.1'))
nova_conf.add('vncserver_proxyclient_address', self.installer.get_option('vncserver_proxyclient_address', default_value='127.0.0.1'))
def _configure_neutron(self, nova_conf):
params = self.get_keystone_params('nova')
params['neutron'] = net_helper.get_shared_params(
ip=self.installer.get_option('ip'),
**self.installer.get_option('neutron'))
nova_conf.add("network_api_class", "nova.network.neutronv2.api.API")
nova_conf.add("neutron_admin_username", params['service_user'])
nova_conf.add("neutron_admin_password", params['service_password'])
nova_conf.add("neutron_admin_auth_url", params['endpoints']['admin']['uri'])
nova_conf.add("neutron_auth_strategy", "keystone")
nova_conf.add("neutron_admin_tenant_name", params['service_tenant'])
nova_conf.add("neutron_url", params['neutron']['endpoints']['admin']['uri'])
nova_conf.add("libvirt_vif_driver",
"nova.virt.libvirt.vif.LibvirtGenericVIFDriver")
def _configure_cells(self, nova_conf):
cells_enabled = self.installer.get_bool_option('enable-cells')
nova_conf.add_with_section('cells', 'enable', cells_enabled)
def _configure_spice(self, nova_conf):
spicy = self.installer.get_bool_option('enable-spice')
nova_conf.add_with_section('spice', 'enable', spicy)
def _configure_conductor(self, nova_conf):
conductor_local = self.installer.get_bool_option('local-conductor')
nova_conf.add_with_section('conductor', 'use_local', conductor_local)
def _configure_network_settings(self, nova_conf):
if self.installer.get_bool_option('neutron-enabled'):
self._configure_neutron(nova_conf)
else:
nova_conf.add('network_manager', self.installer.get_option('network_manager'))
# Configs dhcp bridge stuff???
# TODO(harlowja) why is this the same as the nova.conf?
nova_conf.add('dhcpbridge_flagfile', sh.joinpths(self.installer.cfg_dir, API_CONF))
# Network prefix for the IP network that all the projects for future VM guests reside on. Example: 192.168.0.0/12
nova_conf.add('fixed_range', self.installer.get_option('fixed_range'))
# The value for vlan_interface may default to the the current value
# of public_interface. We'll grab the value and keep it handy.
public_interface = self._select_and_verify_interface('public_interface')
vlan_interface = self._select_and_verify_interface('vlan_interface', default_value=public_interface)
nova_conf.add('public_interface', public_interface)
nova_conf.add('vlan_interface', vlan_interface)
# This forces dnsmasq to update its leases table when an instance is terminated.
nova_conf.add('force_dhcp_release', True)
# Special virt driver network settings
nova_conf.add('flat_network_bridge', self.installer.get_option('flat_network_bridge', default_value='br100'))
nova_conf.add('flat_injected', self.installer.get_bool_option('flat_injected'))
flat_interface = self._select_and_verify_interface('flat_interface', default_value=public_interface)
nova_conf.add('flat_interface', flat_interface)
# Enables multihost (??)
def _configure_multihost(self, nova_conf):
if self.installer.get_bool_option('multi_host'):
nova_conf.add('multi_host', True)
nova_conf.add('send_arp_for_ha', True)
# Any special libvirt configurations go here
def _configure_libvirt(self, virt_type, nova_conf):
nova_conf.add('libvirt_type', virt_type)
# https://blueprints.launchpad.net/nova/+spec/libvirt-xml-cpu-model
nova_conf.add('libvirt_cpu_mode', 'none')
# Configures any virt driver settings
def _configure_virt_driver(self, nova_conf):
drive_canon = utils.canon_virt_driver(self.installer.get_option('virt_driver'))
nova_conf.add('compute_driver', utils.VIRT_DRIVER_MAP.get(drive_canon, drive_canon))
if drive_canon == 'libvirt':
nova_conf.add('firewall_driver', self.installer.get_option('libvirt_firewall_driver'))
else:
nova_conf.add('firewall_driver', self.installer.get_option('basic_firewall_driver'))
def _select_and_verify_interface(self, option_name, default_value=None):
interfaces = self.installer.get_option(option_name, default_value=default_value)
if not interfaces:
raise exceptions.ConfigException("Could not find a value for option '%s'" % (option_name))
if isinstance(interfaces, six.string_types):
interfaces = [interfaces]
valid_interfaces = list(utils.get_interfaces())
LOG.debug("Checking if any of %s interfaces are valid (comparing against interfaces %s)",
interfaces, valid_interfaces)
matches = []
for name in interfaces:
if name in valid_interfaces:
matches.append(name)
if not matches:
raise exceptions.ConfigException("Interfaces %s (under key '%s') do not match any known"
" interfaces %s" % (interfaces, option_name, valid_interfaces))
return matches[0]
def verify(self):
# Do a little check to make sure actually have that interface/s
public_interface = self._select_and_verify_interface('public_interface')
self._select_and_verify_interface('vlan_interface', default_value=public_interface)
self._select_and_verify_interface('flat_interface', default_value=public_interface)

View File

@ -1,140 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log as logging
from anvil import utils
from anvil.components import base_install as binstall
from anvil.components import base_runtime as bruntime
from anvil.components.helpers import db as dbhelper
import abc
LOG = logging.getLogger(__name__)
# Need to reset pw to blank since this distributions don't seem to
# always reset it when u uninstall the db
RESET_BASE_PW = ''
# Copies from helper
BASE_ERROR = dbhelper.BASE_ERROR
class DBUninstaller(binstall.PkgUninstallComponent):
def __init__(self, *args, **kargs):
binstall.PkgUninstallComponent.__init__(self, *args, **kargs)
self.runtime = self.siblings.get('running')
def warm_configs(self):
dbhelper.get_shared_passwords(self)
def pre_uninstall(self):
dbtype = self.get_option("type")
dbactions = self.distro.get_command_config(dbtype, quiet=True)
try:
if dbactions:
LOG.info(("Attempting to reset your db password to %s so"
" that we can set it the next time you install."), colorizer.quote(RESET_BASE_PW))
pwd_cmd = self.distro.get_command(dbtype, 'set_pwd')
if pwd_cmd:
LOG.info("Ensuring your database is started before we operate on it.")
self.runtime.start()
self.runtime.wait_active()
params = {
'OLD_PASSWORD': dbhelper.get_shared_passwords(self)['pw'],
'NEW_PASSWORD': RESET_BASE_PW,
'USER': self.get_option("user", default_value='root'),
}
cmds = [{'cmd': pwd_cmd}]
utils.execute_template(*cmds, params=params)
except IOError:
LOG.warn(("Could not reset the database password. You might have to manually "
"reset the password to %s before the next install"), colorizer.quote(RESET_BASE_PW))
class DBInstaller(binstall.PkgInstallComponent):
__meta__ = abc.ABCMeta
def __init__(self, *args, **kargs):
binstall.PkgInstallComponent.__init__(self, *args, **kargs)
self.runtime = self.siblings.get('running')
def config_params(self, config_fn):
# This dictionary will be used for parameter replacement
# In pre-install and post-install sections
mp = binstall.PkgInstallComponent.config_params(self, config_fn)
mp.update({
'PASSWORD': dbhelper.get_shared_passwords(self)['pw'],
'BOOT_START': "true",
'USER': self.get_option("user", default_value='root'),
'SERVICE_HOST': self.get_option('ip'),
'HOST_IP': self.get_option('ip'),
})
return mp
def warm_configs(self):
dbhelper.get_shared_passwords(self)
@abc.abstractmethod
def _configure_db_confs(self):
pass
def post_install(self):
binstall.PkgInstallComponent.post_install(self)
# Fix up the db configs
self._configure_db_confs()
# Extra actions to ensure we are granted access
dbtype = self.get_option("type")
dbactions = self.distro.get_command_config(dbtype, quiet=True)
# Set your password
try:
if dbactions:
pwd_cmd = self.distro.get_command(dbtype, 'set_pwd')
if pwd_cmd:
LOG.info(("Attempting to set your db password"
" just incase it wasn't set previously."))
LOG.info("Ensuring your database is started before we operate on it.")
self.runtime.start()
self.runtime.wait_active()
params = {
'NEW_PASSWORD': dbhelper.get_shared_passwords(self)['pw'],
'USER': self.get_option("user", default_value='root'),
'OLD_PASSWORD': RESET_BASE_PW,
}
cmds = [{'cmd': pwd_cmd}]
utils.execute_template(*cmds, params=params)
except IOError:
LOG.warn(("Couldn't set your db password. It might have already "
"been set by a previous process."))
# Ensure access granted
dbhelper.grant_permissions(dbtype,
distro=self.distro,
user=self.get_option("user", default_value='root'),
restart_func=self.runtime.restart,
**dbhelper.get_shared_passwords(self))
class DBRuntime(bruntime.ServiceRuntime):
@property
def applications(self):
return [self.distro.get_command(self.get_option("type"), "daemon")[0]]

View File

@ -1,93 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log as logging
from anvil import utils
from anvil.utils import OrderedDict
from anvil.components import base_install as binstall
from anvil.components import base_runtime as bruntime
from anvil.components import base_testing as btesting
from anvil.components.helpers import glance as ghelper
from anvil.components.helpers import keystone as khelper
from anvil.components.configurators import glance as gconf
LOG = logging.getLogger(__name__)
# Sync db command
SYNC_DB_CMD = ['sudo', '-u', 'glance', '/usr/bin/glance-manage',
'--debug', '-v',
# Available commands:
'db_sync']
class GlanceInstaller(binstall.PythonInstallComponent):
def __init__(self, *args, **kargs):
binstall.PythonInstallComponent.__init__(self, *args, **kargs)
self.configurator = gconf.GlanceConfigurator(self)
def post_install(self):
binstall.PythonInstallComponent.post_install(self)
if self.get_bool_option('db-sync'):
self.configurator.setup_db()
self._sync_db()
def _sync_db(self):
LOG.info("Syncing glance to database: %s", colorizer.quote(self.configurator.DB_NAME))
cmds = [{'cmd': SYNC_DB_CMD}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
@property
def env_exports(self):
to_set = OrderedDict()
params = ghelper.get_shared_params(**self.options)
for (endpoint, details) in params['endpoints'].items():
to_set[("GLANCE_%s_URI" % (endpoint.upper()))] = details['uri']
return to_set
class GlanceRuntime(bruntime.OpenStackRuntime):
def _get_image_urls(self):
uris = self.get_option('image_urls', default_value=[])
return [u.strip() for u in uris if len(u.strip())]
def post_start(self):
bruntime.OpenStackRuntime.post_start(self)
if self.get_bool_option('load-images'):
# Install any images that need activating...
self.wait_active()
params = {}
params['glance'] = ghelper.get_shared_params(**self.options)
params['keystone'] = khelper.get_shared_params(ip=self.get_option('ip'),
service_user='glance',
**utils.merge_dicts(self.get_option('keystone'),
khelper.get_shared_passwords(self)))
cache_dir = self.get_option('image_cache_dir')
if cache_dir:
params['cache_dir'] = cache_dir
ghelper.UploadService(**params).install(self._get_image_urls())
class GlanceTester(btesting.PythonTestingComponent):
# NOTE: only run the unit tests
def _get_test_command(self):
base_cmd = btesting.PythonTestingComponent._get_test_command(self)
base_cmd = base_cmd + ['--unittests-only']
return base_cmd

View File

@ -1,22 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil.components import base_testing as btesting
class GlanceClientTester(btesting.PythonTestingComponent):
def _use_run_tests(self):
return False

View File

@ -1,28 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import shell as sh
from anvil.components import base_install as base
class GlobalRequirements(base.PythonComponent):
def __init__(self, *args, **kargs):
super(GlobalRequirements, self).__init__(*args, **kargs)
app_dir = self.get_option('app_dir')
self.requires_files = [
sh.joinpths(app_dir, 'global-requirements.txt'),
]

View File

@ -1,44 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log as logging
from anvil import utils
from anvil.components import base_install as binstall
from anvil.components.configurators import heat as hconf
LOG = logging.getLogger(__name__)
# Sync db command
SYNC_DB_CMD = ['sudo', '-u', 'heat', '/usr/bin/heat-db-setup', '-y']
class HeatInstaller(binstall.PythonInstallComponent):
def __init__(self, *args, **kargs):
binstall.PythonInstallComponent.__init__(self, *args, **kargs)
self.configurator = hconf.HeatConfigurator(self)
def post_install(self):
binstall.PythonInstallComponent.post_install(self)
if self.get_bool_option('db-sync'):
self._sync_db()
def _sync_db(self):
LOG.info("Syncing heat to database: %s", colorizer.quote(self.configurator.DB_NAME))
cmds = [{'cmd': SYNC_DB_CMD}]
utils.execute_template(*cmds, cwd=self.bin_dir)

View File

@ -1,15 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,40 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import utils
# v1 seems correct until bug 1190331 is fixed
# where the cinderclient doesn't seem to know anything
# beyond v1 unless told.
VERSION = "v1"
def get_shared_params(ip, api_host, api_port=8776, protocol='http', **kwargs):
mp = {}
mp['service_host'] = ip
# Uri's of the various cinder endpoints
mp['endpoints'] = {
'admin': {
'uri': utils.make_url(protocol, api_host, api_port, VERSION),
'port': api_port,
'host': api_host,
'protocol': protocol,
},
}
mp['endpoints']['internal'] = dict(mp['endpoints']['admin'])
mp['endpoints']['public'] = dict(mp['endpoints']['admin'])
return mp

View File

@ -1,124 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import exceptions as excp
from anvil import log
from anvil import utils
LOG = log.getLogger(__name__)
# Used as a generic error message
BASE_ERROR = 'Currently we do not know how to %r for database type %r'
# PW keys we warm up so u won't be prompted later
PASSWORD_PROMPT = 'the database user'
def get_shared_passwords(component):
return {
'pw': component.get_password('sql'),
}
def drop_db(distro, dbtype, user, pw, dbname, **kwargs):
dropcmd = distro.get_command(dbtype, 'drop_db', silent=True)
if dropcmd:
LOG.info('Dropping %s database: %s', colorizer.quote(dbtype), colorizer.quote(dbname))
params = dict()
params['PASSWORD'] = pw
params['USER'] = user
params['DB'] = dbname
cmds = list()
cmds.append({
'cmd': dropcmd,
})
utils.execute_template(*cmds, params=params)
else:
msg = BASE_ERROR % ('drop', dbtype)
raise NotImplementedError(msg)
def create_db(distro, dbtype, user, pw, dbname, **kwargs):
createcmd = distro.get_command(dbtype, 'create_db', silent=True)
if createcmd:
charset = kwargs.get('charset', 'utf8')
LOG.info('Creating %s database: %s (%s)', colorizer.quote(dbtype), colorizer.quote(dbname), charset)
params = dict()
params['PASSWORD'] = pw
params['USER'] = user
params['DB'] = dbname
params['CHARACTER_SET'] = charset
cmds = list()
cmds.append({
'cmd': createcmd,
})
utils.execute_template(*cmds, params=params)
else:
msg = BASE_ERROR % ('create', dbtype)
raise NotImplementedError(msg)
def grant_permissions(dbtype, distro, user, pw, restart_func=None):
"""Grant permissions on the database."""
dbactions = distro.get_command_config(dbtype, quiet=True)
if dbactions:
grant_cmd = distro.get_command(dbtype, 'grant_all')
if grant_cmd:
if restart_func:
LOG.info("Ensuring the database is started.")
restart_func()
params = {
'PASSWORD': pw,
'USER': user,
}
cmds = [{'cmd': grant_cmd}]
LOG.info("Giving user %s full control of all databases.", colorizer.quote(user))
utils.execute_template(*cmds, params=params)
return
def fetch_dbdsn(dbtype, user, host, port, pw, dbname, **kwargs):
"""Return the database connection string, including password."""
# Form the dsn (from components we have...)
# dsn = "<driver>://<username>:<password>@<host>:<port>/<database>"
# See: http://en.wikipedia.org/wiki/Data_Source_Name
if not host:
msg = "Unable to fetch a database dsn - no sql host found"
raise excp.BadParamException(msg)
driver = dbtype
if not driver:
msg = "Unable to fetch a database dsn - no db driver type found"
raise excp.BadParamException(msg)
dsn = str(driver) + "://"
if user:
dsn += str(user)
if pw:
dsn += ":" + str(pw)
if user or pw:
dsn += "@"
dsn += str(host)
if port:
dsn += ":" + str(port)
if dbname:
dsn += "/" + str(dbname)
if kwargs.get('utf8'):
dsn += "?charset=utf8"
else:
dsn += "/"
LOG.debug("For database %r fetched dsn %r" % (dbname, dsn))
return dsn

View File

@ -1,510 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import hashlib
import os
import re
import tarfile
import urlparse
from anvil import colorizer
from anvil import downloader as down
from anvil import exceptions as exc
from anvil import importer
from anvil import log
from anvil import shell as sh
from anvil import utils
LOG = log.getLogger(__name__)
# Extensions that tarfile knows how to work with
TAR_EXTS = ['.tgz', '.gzip', '.gz', '.bz2', '.tar']
# Used to attempt to produce a name for images (to see if we already have it)
# And to use as the final name...
# Reverse sorted so that .tar.gz replaces before .tar (and so on)
NAME_CLEANUPS = [
'.tar.gz',
'.img.gz',
'.qcow2',
'.img',
] + TAR_EXTS
NAME_CLEANUPS.sort()
NAME_CLEANUPS.reverse()
# Used to match various file names with what could be a kernel image
KERNEL_CHECKS = [
re.compile(r"(.*)vmlinuz(.*)$", re.I),
re.compile(r'(.*?)aki-tty/image$', re.I),
]
# Used to match various file names with what could be a root image
ROOT_CHECKS = [
re.compile(r"(.*)img$", re.I),
re.compile(r"(.*)qcow2$", re.I),
re.compile(r'(.*?)aki-tty/image$', re.I),
]
# Used to match various file names with what could be a ram disk image
RAMDISK_CHECKS = [
re.compile(r"(.*)-initrd$", re.I),
re.compile(r"initrd[-]?(.*)$", re.I),
re.compile(r"(.*)initramfs(.*)$", re.I),
re.compile(r'(.*?)ari-tty/image$', re.I),
]
# Skip files that match these patterns
SKIP_CHECKS = [
re.compile(r"^[.]", re.I),
]
# File extensions we will skip over (typically of content hashes)
BAD_EXTENSIONS = ['md5', 'sha', 'sfv']
class Unpacker(object):
def _get_tar_file_members(self, arc_fn):
LOG.info("Finding what exists in %s.", colorizer.quote(arc_fn))
files = []
with contextlib.closing(tarfile.open(arc_fn, 'r')) as tfh:
for tmemb in tfh.getmembers():
if not tmemb.isfile():
continue
files.append(tmemb.name)
return files
def _pat_checker(self, fn, patterns):
(_root_fn, fn_ext) = os.path.splitext(fn)
if utils.has_any(fn_ext.lower(), *BAD_EXTENSIONS):
return False
for pat in patterns:
if pat.search(fn):
return True
return False
def _find_pieces(self, files, files_location):
"""Match files against the patterns in KERNEL_CHECKS,
RAMDISK_CHECKS, and ROOT_CHECKS to determine which files
contain which image parts.
"""
kernel_fn = None
ramdisk_fn = None
img_fn = None
utils.log_iterable(
files,
logger=LOG,
header="Looking at %s files from %s to find the "
"kernel/ramdisk/root images" %
(len(files), colorizer.quote(files_location))
)
for fn in files:
if self._pat_checker(fn, KERNEL_CHECKS):
kernel_fn = fn
LOG.debug("Found kernel: %r" % (fn))
elif self._pat_checker(fn, RAMDISK_CHECKS):
ramdisk_fn = fn
LOG.debug("Found ram disk: %r" % (fn))
elif self._pat_checker(fn, ROOT_CHECKS):
img_fn = fn
LOG.debug("Found root image: %r" % (fn))
else:
LOG.debug("Unknown member %r - skipping" % (fn))
return (img_fn, ramdisk_fn, kernel_fn)
def _unpack_tar_member(self, tarhandle, member, output_location):
LOG.info("Extracting %s to %s.", colorizer.quote(member.name), colorizer.quote(output_location))
with contextlib.closing(tarhandle.extractfile(member)) as mfh:
with open(output_location, "wb") as ofh:
return sh.pipe_in_out(mfh, ofh)
def _describe(self, root_fn, ramdisk_fn, kernel_fn):
"""Make an "info" dict that describes the path, disk format, and
container format of each component of an image.
"""
info = dict()
if kernel_fn:
info['kernel'] = {
'file_name': kernel_fn,
'disk_format': 'aki',
'container_format': 'aki',
}
if ramdisk_fn:
info['ramdisk'] = {
'file_name': ramdisk_fn,
'disk_format': 'ari',
'container_format': 'ari',
}
info['file_name'] = root_fn
info['disk_format'] = 'ami'
info['container_format'] = 'ami'
return info
def _filter_files(self, files):
filtered = []
for fn in files:
if self._pat_checker(fn, SKIP_CHECKS):
pass
else:
filtered.append(fn)
return filtered
def _unpack_tar(self, file_name, file_location, tmp_dir):
(root_name, _) = os.path.splitext(file_name)
tar_members = self._filter_files(self._get_tar_file_members(file_location))
(root_img_fn, ramdisk_fn, kernel_fn) = self._find_pieces(tar_members, file_location)
if not root_img_fn:
msg = "Tar file %r has no root image member" % (file_name)
raise IOError(msg)
kernel_real_fn = None
root_real_fn = None
ramdisk_real_fn = None
self._log_pieces_found('archive', root_img_fn, ramdisk_fn, kernel_fn)
extract_dir = sh.mkdir(sh.joinpths(tmp_dir, root_name))
with contextlib.closing(tarfile.open(file_location, 'r')) as tfh:
for m in tfh.getmembers():
if m.name == root_img_fn:
root_real_fn = sh.joinpths(extract_dir, sh.basename(root_img_fn))
self._unpack_tar_member(tfh, m, root_real_fn)
elif ramdisk_fn and m.name == ramdisk_fn:
ramdisk_real_fn = sh.joinpths(extract_dir, sh.basename(ramdisk_fn))
self._unpack_tar_member(tfh, m, ramdisk_real_fn)
elif kernel_fn and m.name == kernel_fn:
kernel_real_fn = sh.joinpths(extract_dir, sh.basename(kernel_fn))
self._unpack_tar_member(tfh, m, kernel_real_fn)
return self._describe(root_real_fn, ramdisk_real_fn, kernel_real_fn)
def _log_pieces_found(self, src_type, root_fn, ramdisk_fn, kernel_fn):
pieces = []
if root_fn:
pieces.append("%s (root image)" % (colorizer.quote(root_fn)))
if ramdisk_fn:
pieces.append("%s (ramdisk image)" % (colorizer.quote(ramdisk_fn)))
if kernel_fn:
pieces.append("%s (kernel image)" % (colorizer.quote(kernel_fn)))
if pieces:
utils.log_iterable(pieces, logger=LOG,
header="Found %s images from a %s" % (len(pieces), src_type))
def _unpack_dir(self, dir_path):
"""Pick through a directory to figure out which files are which
image pieces, and create a dict that describes them.
"""
potential_files = set()
for fn in self._filter_files(sh.listdir(dir_path)):
full_fn = sh.joinpths(dir_path, fn)
if sh.isfile(full_fn):
potential_files.add(sh.canon_path(full_fn))
(root_fn, ramdisk_fn, kernel_fn) = self._find_pieces(potential_files, dir_path)
if not root_fn:
msg = "Directory %r has no root image member" % (dir_path)
raise IOError(msg)
self._log_pieces_found('directory', root_fn, ramdisk_fn, kernel_fn)
return self._describe(root_fn, ramdisk_fn, kernel_fn)
def unpack(self, file_name, file_location, tmp_dir):
if sh.isdir(file_location):
return self._unpack_dir(file_location)
elif sh.isfile(file_location):
(_, fn_ext) = os.path.splitext(file_name)
fn_ext = fn_ext.lower()
if fn_ext in TAR_EXTS:
return self._unpack_tar(file_name, file_location, tmp_dir)
elif fn_ext in ['.img', '.qcow2']:
info = dict()
info['file_name'] = file_location
if fn_ext == '.img':
info['disk_format'] = 'raw'
else:
info['disk_format'] = 'qcow2'
info['container_format'] = 'bare'
return info
msg = "Currently we do not know how to unpack %r" % (file_location)
raise IOError(msg)
class Cache(object):
"""Represents an image cache."""
def __init__(self, cache_dir, url):
self._cache_dir = cache_dir
self._url = url
hashed_url = self._hash(self._url)
self._cache_path = sh.joinpths(self._cache_dir, hashed_url)
self._details_path = sh.joinpths(self._cache_dir,
hashed_url + ".details")
@staticmethod
def _hash(data, alg='md5'):
"""Hash data with a given algorithm."""
hasher = hashlib.new(alg)
hasher.update(data)
return hasher.hexdigest()
def load_details(self):
"""Load cached image details."""
return utils.load_yaml_text(sh.load_file(self._details_path))
def save_details(self, details):
"""Save cached image details."""
sh.write_file(self._details_path, utils.prettify_yaml(details))
@property
def path(self):
return self._cache_path
@property
def is_valid(self):
"""Check if cache is valid."""
for path in (self._cache_path, self._details_path):
if not sh.exists(path):
return False
check_files = []
try:
image_details = self.load_details()
check_files.append(image_details['file_name'])
if 'kernel' in image_details:
check_files.append(image_details['kernel']['file_name'])
if 'ramdisk' in image_details:
check_files.append(image_details['ramdisk']['file_name'])
except Exception:
return False
for path in check_files:
if not sh.isfile(path):
return False
return True
class Image(object):
"""Represents an image with its own cache."""
def __init__(self, client, url, is_public, cache_dir):
self._client = client
self._url = url
self._parsed_url = urlparse.urlparse(url)
self._is_public = is_public
self._cache = Cache(cache_dir, url)
def _check_name(self, image_name):
"""Check if image already present in glance."""
for image in self._client.images.list():
if image_name == image.name:
raise exc.DuplicateException(
"Image %s already exists in glance." %
colorizer.quote(image_name)
)
def _create(self, file_name, **kwargs):
"""Create image in glance."""
with open(file_name, 'r') as fh:
image = self._client.images.create(data=fh, **kwargs)
return image.id
def _register(self, image_name, location):
"""Register image in glance."""
# Upload the kernel, if we have one
kernel = location.pop('kernel', None)
kernel_id = ''
if kernel:
kernel_image_name = "%s-vmlinuz" % (image_name)
self._check_name(kernel_image_name)
LOG.info('Adding kernel %s to glance.',
colorizer.quote(kernel_image_name))
LOG.info("Please wait installing...")
conf = {
'container_format': kernel['container_format'],
'disk_format': kernel['disk_format'],
'name': kernel_image_name,
'is_public': self._is_public,
}
kernel_id = self._create(kernel['file_name'], **conf)
# Upload the ramdisk, if we have one
initrd = location.pop('ramdisk', None)
initrd_id = ''
if initrd:
ram_image_name = "%s-initrd" % (image_name)
self._check_name(ram_image_name)
LOG.info('Adding ramdisk %s to glance.',
colorizer.quote(ram_image_name))
LOG.info("Please wait installing...")
conf = {
'container_format': initrd['container_format'],
'disk_format': initrd['disk_format'],
'name': ram_image_name,
'is_public': self._is_public,
}
initrd_id = self._create(initrd['file_name'], **conf)
# Upload the root, we must have one
LOG.info('Adding image %s to glance.', colorizer.quote(image_name))
self._check_name(image_name)
conf = {
'name': image_name,
'container_format': location['container_format'],
'disk_format': location['disk_format'],
'is_public': self._is_public,
'properties': {},
}
if kernel_id or initrd_id:
if kernel_id:
conf['properties']['kernel_id'] = kernel_id
if initrd_id:
conf['properties']['ramdisk_id'] = initrd_id
LOG.info("Please wait installing...")
image_id = self._create(location['file_name'], **conf)
return image_id
def _generate_image_name(self, url_fn):
"""Generate image name from a given url file name."""
name = url_fn
for look_for in NAME_CLEANUPS:
name = name.replace(look_for, '')
return name
def _extract_url_fn(self):
"""Extract filename from an image url."""
return sh.basename(self._parsed_url.path)
def _is_url_local(self):
"""Check if image url is local."""
return sh.exists(self._url) or (self._parsed_url.scheme == '' and
self._parsed_url.netloc == '')
def install(self):
"""Process image installation."""
url_fn = self._extract_url_fn()
if not url_fn:
raise IOError("Can not determine file name from url: %r" %
self._url)
if self._cache.is_valid:
LOG.info("Found valid cached image+metadata at: %s",
colorizer.quote(self._cache.path))
image_details = self._cache.load_details()
else:
sh.mkdir(self._cache.path)
if not self._is_url_local():
fetched_fn, bytes_down = down.UrlLibDownloader(
self._url,
sh.joinpths(self._cache.path, url_fn)).download()
LOG.debug("For url %s we downloaded %s bytes to %s", self._url,
bytes_down, fetched_fn)
else:
fetched_fn = self._url
image_details = Unpacker().unpack(url_fn, fetched_fn,
self._cache.path)
self._cache.save_details(image_details)
image_name = self._generate_image_name(url_fn)
image_id = self._register(image_name, image_details)
return image_name, image_id
class UploadService(object):
def __init__(self, glance, keystone,
cache_dir='/usr/share/anvil/glance/cache', is_public=True):
self._glance_params = glance
self._keystone_params = keystone
self._cache_dir = cache_dir
self._is_public = is_public
def _get_token(self, kclient_v2):
LOG.info("Getting your keystone token so that image uploads may proceed.")
k_params = self._keystone_params
client = kclient_v2.Client(username=k_params['admin_user'],
password=k_params['admin_password'],
tenant_name=k_params['admin_tenant'],
auth_url=k_params['endpoints']['public']['uri'])
return client.auth_token
def install(self, urls):
am_installed = 0
try:
# Done at a function level since this module may be used
# before these libraries actually exist.
gclient_v1 = importer.import_module('glanceclient.v1.client')
gexceptions = importer.import_module('glanceclient.common.exceptions')
kclient_v2 = importer.import_module('keystoneclient.v2_0.client')
kexceptions = importer.import_module('keystoneclient.exceptions')
except RuntimeError as e:
LOG.exception("Failed at importing required client modules: %s", e)
return am_installed
if urls:
try:
# Ensure all services are up
for params in (self._glance_params, self._keystone_params):
utils.wait_for_url(params['endpoints']['public']['uri'])
g_params = self._glance_params
client = gclient_v1.Client(endpoint=g_params['endpoints']['public']['uri'],
token=self._get_token(kclient_v2))
except (RuntimeError, gexceptions.ClientException,
kexceptions.ClientException, IOError) as e:
LOG.exception('Failed fetching needed clients for image calls due to: %s', e)
return am_installed
utils.log_iterable(urls, logger=LOG,
header="Attempting to download+extract+upload %s images" % len(urls))
for url in urls:
try:
img_handle = Image(client, url,
is_public=self._is_public,
cache_dir=self._cache_dir)
(name, img_id) = img_handle.install()
LOG.info("Installed image %s with id %s.",
colorizer.quote(name), colorizer.quote(img_id))
am_installed += 1
except exc.DuplicateException as e:
LOG.warning(e)
except (IOError,
tarfile.TarError,
gexceptions.ClientException,
kexceptions.ClientException) as e:
LOG.exception('Installing %r failed due to: %s', url, e)
return am_installed
def get_shared_params(ip, api_port=9292, protocol='http', reg_port=9191, **kwargs):
mp = {}
mp['service_host'] = ip
glance_host = ip
glance_port = api_port
glance_protocol = protocol
glance_registry_port = reg_port
# Uri's of the http/https endpoints
mp['endpoints'] = {
'admin': {
'uri': utils.make_url(glance_protocol, glance_host, glance_port),
'port': glance_port,
'host': glance_host,
'protocol': glance_protocol,
},
'registry': {
'uri': utils.make_url(glance_protocol, glance_host, glance_registry_port),
'port': glance_registry_port,
'host': glance_host,
'protocol': glance_protocol,
}
}
mp['endpoints']['internal'] = dict(mp['endpoints']['admin'])
mp['endpoints']['public'] = dict(mp['endpoints']['admin'])
return mp

View File

@ -1,215 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import importer
from anvil import log as logging
from anvil import utils
LOG = logging.getLogger(__name__)
class Initializer(object):
def __init__(self, service_token, admin_uri):
# Late load since its using a client lib that is only avail after install...
self.client = importer.construct_entry_point(
"keystoneclient.v2_0.client:Client",
token=service_token, endpoint=admin_uri
)
def _create_tenants(self, tenants):
tenants_made = dict()
for entry in tenants:
name = entry['name']
if name in tenants_made:
LOG.warn("Already created tenant %s", colorizer.quote(name))
tenant = {
'tenant_name': name,
'description': entry['description'],
'enabled': True,
}
tenants_made[name] = self.client.tenants.create(**tenant)
return tenants_made
def _create_users(self, users, tenants):
created = dict()
for entry in users:
name = entry['name']
if name in created:
LOG.warn("Already created user %s", colorizer.quote(name))
password = entry['password']
email = entry['email']
user = {
'name': name,
'password': password,
'email': email,
}
created[name] = self.client.users.create(**user)
return created
def _create_roles(self, roles):
roles_made = dict()
for r in roles:
role = r
if role in roles_made:
LOG.warn("Already created role %s", colorizer.quote(role))
roles_made[role] = self.client.roles.create(role)
return roles_made
def _connect_roles(self, users, roles_made, tenants_made, users_made):
roles_attached = set()
for info in users:
name = info['name']
if name in roles_attached:
LOG.warn("Already attached roles to user %s", colorizer.quote(name))
roles_attached.add(name)
user = users_made[name]
for role_entry in info['roles']:
# Role:Tenant
(r, _sep, t) = role_entry.partition(":")
role_name = r
tenant_name = t
if not role_name or not tenant_name:
raise RuntimeError("Role or tenant name missing for user %s" % (name))
if role_name not in roles_made:
raise RuntimeError("Role %s not previously created for user %s" % (role_name, name))
if tenant_name not in tenants_made:
raise RuntimeError("Tenant %s not previously created for user %s" % (tenant_name, name))
user_role = {
'user': user,
'role': roles_made[role_name],
'tenant': tenants_made[tenant_name],
}
self.client.roles.add_user_role(**user_role)
def _create_services(self, services):
created_services = dict()
for info in services:
name = info['name']
if name in created_services:
LOG.warn("Already created service %s", colorizer.quote(name))
service = {
'name': name,
'service_type': info['type'],
'description': info.get('description') or ''
}
created_services[name] = self.client.services.create(**service)
return created_services
def _create_endpoints(self, endpoints, services):
for entry in endpoints:
name = entry['service']
if name not in services:
raise RuntimeError("Endpoint %s not attached to a previously created service" % (name))
service = services[name]
endpoint = {
'region': entry['region'],
'publicurl': entry['public_url'],
'adminurl': entry['admin_url'],
'internalurl': entry['internal_url'],
'service_id': service.id,
}
self.client.endpoints.create(**endpoint)
def initialize(self, users, tenants, roles, services, endpoints):
created_tenants = self._create_tenants(tenants)
created_users = self._create_users(users, created_tenants)
created_roles = self._create_roles(roles)
self._connect_roles(users, created_roles, created_tenants, created_users)
services_made = self._create_services(services)
self._create_endpoints(endpoints, services_made)
def get_shared_passwords(component):
mp = {}
mp['service_token'] = component.get_password("service_token")
mp['admin_password'] = component.get_password('admin_password')
mp['service_password'] = component.get_password('service_password')
return mp
def get_shared_params(ip, service_token, admin_password, service_password,
auth_host, auth_port, auth_proto, service_host, service_port, service_proto,
**kwargs):
mp = {}
# Tenants and users
mp['tenants'] = ['admin', 'service']
mp['users'] = ['admin']
mp['admin_tenant'] = 'admin'
mp['admin_user'] = 'admin'
mp['service_tenant'] = 'service'
if 'service_user' in kwargs:
mp['users'].append(kwargs['service_user'])
mp['service_user'] = kwargs['service_user']
# Tokens and passwords
mp['service_token'] = service_token
mp['admin_password'] = admin_password
mp['service_password'] = service_password
host_ip = ip
mp['service_host'] = host_ip
# Components of the admin endpoint
keystone_auth_host = auth_host
keystone_auth_port = auth_port
keystone_auth_proto = auth_proto
keystone_auth_uri = utils.make_url(keystone_auth_proto,
keystone_auth_host, keystone_auth_port, path="v2.0")
# Components of the public+internal endpoint
keystone_service_host = service_host
keystone_service_port = service_port
keystone_service_proto = service_proto
keystone_service_uri = utils.make_url(keystone_service_proto,
keystone_service_host, keystone_service_port, path="v2.0")
mp['endpoints'] = {
'admin': {
'uri': keystone_auth_uri,
'port': keystone_auth_port,
'protocol': keystone_auth_proto,
'host': keystone_auth_host,
},
'admin_templated': {
'uri': utils.make_url(keystone_auth_proto, keystone_auth_host,
port='$(admin_port)s', path="v2.0"),
'protocol': keystone_auth_proto,
'host': keystone_auth_host,
},
'public': {
'uri': keystone_service_uri,
'port': keystone_service_port,
'protocol': keystone_service_proto,
'host': keystone_service_host,
},
'public_templated': {
'uri': utils.make_url(keystone_service_proto,
keystone_service_host,
port='$(public_port)s', path="v2.0"),
'protocol': keystone_service_proto,
'host': keystone_service_host,
},
}
mp['endpoints']['internal'] = dict(mp['endpoints']['public'])
mp['endpoints']['internal_templated'] = dict(mp['endpoints']['public_templated'])
return mp

View File

@ -1,35 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import utils
def get_shared_params(ip, api_host, api_port=9696, protocol='http', **kwargs):
mp = {}
mp['service_host'] = ip
# Uri's of the http/https endpoints
mp['endpoints'] = {
'admin': {
'uri': utils.make_url(protocol, api_host, api_port),
'port': api_port,
'host': api_host,
'protocol': protocol,
},
}
mp['endpoints']['internal'] = dict(mp['endpoints']['admin'])
mp['endpoints']['public'] = dict(mp['endpoints']['admin'])
return mp

View File

@ -1,196 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import psutil
import re
import weakref
from anvil import shell as sh
from anvil import utils
from anvil.components.configurators import nova as nconf
from anvil.components.helpers import virt as lv
def get_shared_params(ip, protocol,
api_host, api_port,
s3_host, s3_port,
ec2_host, ec2_port,
ec2_admin_host, ec2_admin_port, **kwargs):
mp = {}
mp['service_host'] = ip
# Uri's of the various nova endpoints
mp['endpoints'] = {
'ec2_admin': {
'uri': utils.make_url(protocol, ec2_admin_host, ec2_admin_port, "services/Admin"),
'port': ec2_admin_port,
'host': ec2_admin_host,
'protocol': protocol,
},
'ec2_cloud': {
'uri': utils.make_url(protocol, ec2_host, ec2_port, "services/Cloud"),
'port': ec2_port,
'host': ec2_host,
'protocol': protocol,
},
's3': {
'uri': utils.make_url(protocol, s3_host, s3_port),
'port': s3_port,
'host': s3_host,
'protocol': protocol,
},
'api': {
'uri': utils.make_url(protocol, api_host, api_port, "v2"),
'port': api_port,
'host': api_host,
'protocol': protocol,
},
}
return mp
class ComputeCleaner(object):
def __init__(self, uninstaller):
self.uninstaller = weakref.proxy(uninstaller)
def clean(self):
virsh = lv.Virsh(self.uninstaller.get_int_option('service_wait_seconds'), self.uninstaller.distro)
virt_driver = utils.canon_virt_driver(self.uninstaller.get_option('virt_driver'))
if virt_driver == 'libvirt':
inst_prefix = self.uninstaller.get_option('instance_name_prefix', default_value='instance-')
libvirt_type = lv.canon_libvirt_type(self.uninstaller.get_option('libvirt_type'))
virsh.clear_domains(libvirt_type, inst_prefix)
class NetworkCleaner(object):
def __init__(self, uninstaller):
self.uninstaller = weakref.proxy(uninstaller)
def _stop_dnsmasq(self):
# Shutdown dnsmasq which is typically used by nova-network
# to provide dhcp leases and since nova currently doesn't
# seem to shut them down itself (why not?) we have to do it for it..
#
# TODO(harlowja) file a bug to get that fixed...
to_kill = []
for proc in psutil.process_iter():
if proc.name.find("dnsmasq") == -1:
continue
cwd = ''
cmdline = ''
cwd = proc.getcwd()
cmdline = " ".join(proc.cmdline)
to_try = False
for t in [cwd, cmdline]:
if t.lower().find("nova") != -1:
to_try = True
if to_try:
to_kill.append(proc.pid)
if len(to_kill):
utils.log_iterable(to_kill,
header="Killing leftover nova dnsmasq processes with process ids",
logger=nconf.LOG)
for pid in to_kill:
sh.kill(pid)
def _clean_iptables(self):
# Nova doesn't seem to cleanup its iptables rules that it
# establishes when it is removed, this is unfortunate as that
# means that when nova is uninstalled it may have just left the
# host machine in a un-useable state...
#
# TODO(harlowja) file a bug to get that fixed...
def line_matcher(line, start_text):
if not line:
return False
if not line.startswith(start_text):
return False
if line.lower().find("nova") == -1:
return False
return True
def translate_rule(line, start_search, start_replace):
line = re.sub(r"-c\s+[0-9]*\s+[0-9]*", "", line, re.I)
if not line.startswith(start_search):
return line
return line.replace(start_search, start_replace, 1)
# Isolate the nova rules
clean_rules = []
list_cmd = ['iptables', '--list-rules', '--verbose']
(stdout, _stderr) = sh.execute(list_cmd)
for line in stdout.splitlines():
line = line.strip()
if not line_matcher(line, "-A"):
continue
# Translate it into a delete rule operation
rule = translate_rule(line, "-A", "-D")
if rule:
clean_rules.append(rule)
# Isolate the nova nat rules
clean_nats = []
nat_cmd = ['iptables', '--list-rules', '--verbose', '--table', 'nat']
(stdout, _stderr) = sh.execute(nat_cmd)
for line in stdout.splitlines():
line = line.strip()
if not line_matcher(line, "-A"):
continue
# Translate it into a delete rule operation
rule = translate_rule(line, "-A", "-D")
if rule:
clean_nats.append(rule)
# Isolate the nova chains
clean_chains = []
chain_cmd = ['iptables', '--list-rules', '--verbose']
(stdout, _stderr) = sh.execute(chain_cmd)
for line in stdout.splitlines():
if not line_matcher(line, "-N"):
continue
# Translate it into a delete rule operation
rule = translate_rule(line, "-N", "-X")
if rule:
clean_chains.append(rule)
# Isolate the nova nat chains
clean_nat_chains = []
nat_chain_cmd = ['iptables', '--list-rules', '--verbose', '--table', 'nat']
(stdout, _stderr) = sh.execute(nat_chain_cmd)
for line in stdout.splitlines():
if not line_matcher(line, "-N"):
continue
# Translate it into a delete rule operation
rule = translate_rule(line, "-N", "-X")
if rule:
clean_nat_chains.append(rule)
# Now execute them...
for r in clean_rules + clean_chains:
pieces = r.split(None)
pieces = ['iptables'] + pieces
sh.execute(pieces, shell=True)
for r in clean_nats + clean_nat_chains:
pieces = r.split(None)
pieces = ['iptables', '--table', 'nat'] + pieces
sh.execute(pieces, shell=True)
def clean(self):
self._stop_dnsmasq()
self._clean_iptables()

View File

@ -1,151 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from anvil import colorizer
from anvil import exceptions as excp
from anvil import importer
from anvil import log as logging
from anvil import shell as sh
from anvil import utils
LOG = logging.getLogger(__name__)
# See: http://libvirt.org/uri.html
LIBVIRT_PROTOCOL_MAP = {
'qemu': "qemu:///system",
'kvm': "qemu:///system",
'xen': 'xen:///',
'uml': 'uml:///system',
'lxc': 'lxc:///',
}
# Status is either dead or alive!
_DEAD = 'DEAD'
_ALIVE = 'ALIVE'
# Type that should always work
DEF_VIRT_TYPE = 'qemu'
def canon_libvirt_type(virt_type):
virt_type = str(virt_type).lower().strip()
if virt_type not in LIBVIRT_PROTOCOL_MAP:
return DEF_VIRT_TYPE
else:
return virt_type
class Virsh(object):
def __init__(self, service_wait, distro):
self.distro = distro
self.wait_time = service_wait
self.wait_attempts = 5
def _service_status(self):
cmd = self.distro.get_command('libvirt', 'status')
(stdout, stderr) = sh.execute(cmd, check_exit_code=False)
combined = (stdout + stderr)
if combined.lower().find("running") != -1 or combined.lower().find('start') != -1:
return (_ALIVE, combined)
else:
return (_DEAD, combined)
def _destroy_domain(self, libvirt, conn, dom_name):
try:
dom = conn.lookupByName(dom_name)
if dom:
LOG.debug("Destroying domain (%r) (id=%s) running %r" % (dom_name, dom.ID(), dom.OSType()))
dom.destroy()
dom.undefine()
except libvirt.libvirtError as e:
LOG.warn("Could not clear out libvirt domain %s due to: %s", colorizer.quote(dom_name), e)
def restart_service(self):
cmd = self.distro.get_command('libvirt', 'restart')
sh.execute(cmd)
def wait_active(self):
# TODO(harlowja) fix this by using the component wait active...
started = False
for _i in range(0, self.wait_attempts):
(st, output) = self._service_status()
if st != _ALIVE:
LOG.info("Please wait %s seconds until libvirt is started.", self.wait_time)
sh.sleep(self.wait_time)
else:
started = True
if not started:
raise excp.StartException("Unable to start the libvirt daemon due to: %s" % (output))
def check_virt(self, virt_type):
virt_protocol = LIBVIRT_PROTOCOL_MAP.get(virt_type)
self.restart_service()
self.wait_active()
cmds = [{
'cmd': self.distro.get_command('libvirt', 'verify'),
}]
mp = {
'VIRT_PROTOCOL': virt_protocol,
'VIRT_TYPE': virt_type,
}
utils.execute_template(*cmds, params=mp)
def clear_domains(self, virt_type, inst_prefix):
libvirt = None
try:
# A late import is done since this code could be used before libvirt is actually
# installed, and that will cause the top level python import to fail which will
# make anvil not work, so import it dynamically to bypass the previous mechanism
libvirt = importer.import_module('libvirt')
except RuntimeError as e:
pass
if not libvirt:
LOG.warn("Could not clear out libvirt domains, libvirt not available for python.")
return
virt_protocol = LIBVIRT_PROTOCOL_MAP.get(virt_type)
if not virt_protocol:
LOG.warn("Could not clear out libvirt domains, no known protocol for virt type: %s", colorizer.quote(virt_type))
return
LOG.info("Attempting to clear out leftover libvirt domains using protocol: %s", colorizer.quote(virt_protocol))
try:
self.restart_service()
self.wait_active()
except (excp.StartException, IOError) as e:
LOG.warn("Could not restart the libvirt daemon due to: %s", e)
return
try:
conn = libvirt.open(virt_protocol)
except libvirt.libvirtError as e:
LOG.warn("Could not connect to libvirt using protocol %s due to: %s", colorizer.quote(virt_protocol), e)
return
with contextlib.closing(conn) as ch:
try:
defined_domains = ch.listDefinedDomains()
kill_domains = list()
for domain in defined_domains:
if domain.startswith(inst_prefix):
kill_domains.append(domain)
if kill_domains:
header = ("Found %s old domains to destroy" %
len(kill_domains))
utils.log_iterable(kill_domains, logger=LOG, header=header)
for domain in sorted(kill_domains):
self._destroy_domain(libvirt, ch, domain)
except libvirt.libvirtError as e:
LOG.warn("Could not clear out libvirt domains due to: %s", e)

View File

@ -1,28 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import log as logging
from anvil.components import base_runtime as bruntime
LOG = logging.getLogger(__name__)
class HorizonRuntime(bruntime.ServiceRuntime):
@property
def applications(self):
return [self.distro.get_command("apache", "daemon")[0]]

View File

@ -1,45 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2014 GoDaddy Operating Company, LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log as logging
from anvil import utils
from anvil.components import base_install as binstall
from anvil.components.configurators import ironic as iconf
LOG = logging.getLogger(__name__)
# Sync db command
SYNC_DB_CMD = ['sudo', '-u', 'ironic', '/usr/bin/ironic-dbsync',
'create_schema']
class IronicInstaller(binstall.PythonInstallComponent):
def __init__(self, *args, **kargs):
binstall.PythonInstallComponent.__init__(self, *args, **kargs)
self.configurator = iconf.IronicConfigurator(self)
def post_install(self):
binstall.PythonInstallComponent.post_install(self)
if self.get_bool_option('db-sync'):
self._sync_db()
def _sync_db(self):
LOG.info("Syncing ironic to database: %s", colorizer.quote(self.configurator.DB_NAME))
cmds = [{'cmd': SYNC_DB_CMD}]
utils.execute_template(*cmds, cwd=self.bin_dir)

View File

@ -1,155 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log as logging
from anvil import shell as sh
from anvil import utils
from anvil.utils import OrderedDict
from anvil.components import base_install as binstall
from anvil.components import base_runtime as bruntime
from anvil.components import base_testing as btesting
from anvil.components.helpers import cinder as chelper
from anvil.components.helpers import glance as ghelper
from anvil.components.helpers import keystone as khelper
from anvil.components.helpers import neutron as net_helper
from anvil.components.helpers import nova as nhelper
from anvil.components.configurators import keystone as kconf
LOG = logging.getLogger(__name__)
# This yaml file controls keystone initialization
INIT_WHAT_FN = 'init_what.yaml'
# Existence of this file signifies that initialization ran
INIT_WHAT_HAPPENED = "keystone.inited.yaml"
# Invoking the keystone manage command uses this template
MANAGE_CMD = ['sudo', '-u', 'keystone', '/usr/bin/keystone-manage',
'--debug', '-v']
class KeystoneInstaller(binstall.PythonInstallComponent):
def __init__(self, *args, **kargs):
binstall.PythonInstallComponent.__init__(self, *args, **kargs)
self.configurator = kconf.KeystoneConfigurator(self)
def post_install(self):
binstall.PythonInstallComponent.post_install(self)
if self.get_bool_option('db-sync'):
self.configurator.setup_db()
self._sync_db()
if self.get_bool_option('enable-pki'):
self._setup_pki()
def _sync_db(self):
LOG.info("Syncing keystone to database: %s", colorizer.quote(self.configurator.DB_NAME))
sync_cmd = MANAGE_CMD + ['db_sync']
cmds = [{'cmd': sync_cmd}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
@property
def env_exports(self):
params = khelper.get_shared_params(**utils.merge_dicts(self.options,
khelper.get_shared_passwords(self)))
to_set = OrderedDict()
to_set['OS_PASSWORD'] = params['admin_password']
to_set['OS_TENANT_NAME'] = params['admin_tenant']
to_set['OS_USERNAME'] = params['admin_user']
to_set['OS_AUTH_URL'] = params['endpoints']['public']['uri']
for (endpoint, details) in params['endpoints'].items():
if endpoint.find('templated') != -1:
continue
to_set[("KEYSTONE_%s_URI" % (endpoint.upper()))] = details['uri']
return to_set
def _setup_pki(self):
LOG.info("Setting up keystone's pki support.")
for value in kconf.PKI_FILES.values():
sh.mkdirslist(sh.dirname(sh.joinpths(self.configurator.link_dir, value)),
tracewriter=self.tracewriter)
pki_cmd = MANAGE_CMD + ['pki_setup']
cmds = [{'cmd': pki_cmd}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
def warm_configs(self):
khelper.get_shared_passwords(self)
class KeystoneUninstaller(binstall.PkgUninstallComponent):
def __init__(self, *args, **kargs):
binstall.PkgUninstallComponent.__init__(self, *args, **kargs)
self.init_fn = sh.joinpths(self.get_option('trace_dir'), INIT_WHAT_HAPPENED)
def unconfigure(self):
if sh.isfile(self.init_fn):
sh.unlink(self.init_fn)
class KeystoneRuntime(bruntime.OpenStackRuntime):
def __init__(self, *args, **kargs):
bruntime.OpenStackRuntime.__init__(self, *args, **kargs)
self.init_fn = sh.joinpths(self.get_option('trace_dir'), INIT_WHAT_HAPPENED)
def daemon_name(self, program):
# NOTE(harlowja): 'all' just runs the keystone service due to change in service
# name from 'all' to without the '-all' part, so we need to reflect that here.
if program in ('all',):
return "openstack-%s" % (self.name)
return super(KeystoneRuntime, self).daemon_name(program)
def post_start(self):
if not sh.isfile(self.init_fn) and self.get_bool_option('do-init'):
self.wait_active()
LOG.info("Running commands to initialize keystone.")
(fn, contents) = utils.load_template(self.name, INIT_WHAT_FN)
LOG.debug("Initializing with contents of %s", fn)
params = {}
params['keystone'] = khelper.get_shared_params(**utils.merge_dicts(self.options, khelper.get_shared_passwords(self)))
params['glance'] = ghelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('glance'))
params['nova'] = nhelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('nova'))
params['neutron'] = net_helper.get_shared_params(ip=self.get_option('ip'), **self.get_option('neutron'))
params['cinder'] = chelper.get_shared_params(ip=self.get_option('ip'), **self.get_option('cinder'))
wait_urls = [
params['keystone']['endpoints']['admin']['uri'],
params['keystone']['endpoints']['public']['uri'],
]
for url in wait_urls:
utils.wait_for_url(url)
init_what = utils.load_yaml_text(contents)
init_what = utils.expand_template_deep(init_what, params)
try:
init_how = khelper.Initializer(params['keystone']['service_token'],
params['keystone']['endpoints']['admin']['uri'])
init_how.initialize(**init_what)
except RuntimeError:
LOG.exception("Failed to initialize keystone, is the keystone client library available?")
else:
# Writing this makes sure that we don't init again
sh.write_file(self.init_fn, utils.prettify_yaml(init_what))
LOG.info("If you wish to re-run initialization, delete %s", colorizer.quote(self.init_fn))
class KeystoneTester(btesting.PythonTestingComponent):
# Disable the keystone client integration tests
def _get_test_command(self):
base_cmd = btesting.PythonTestingComponent._get_test_command(self)
base_cmd = base_cmd + ['-xintegration']
return base_cmd

View File

@ -1,88 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import exceptions
from anvil import log as logging
from anvil import shell as sh
from anvil.components import base
from anvil.components import base_install as binstall
from anvil.components import base_runtime as bruntime
from anvil.components.configurators import neutron as qconf
LOG = logging.getLogger(__name__)
# Sync db command
# FIXME(aababilov)
SYNC_DB_CMD = ["sudo", "-u", "neutron", "/usr/bin/neutron-db-manage",
"sync"]
class NeutronPluginMixin(base.Component):
def subsystem_names(self):
core_plugin = self.get_option("core_plugin")
return [(name if name != "agent" else "%s-agent" % (core_plugin))
for name in self.subsystems.iterkeys()]
class NeutronInstaller(binstall.PythonInstallComponent, NeutronPluginMixin):
def __init__(self, *args, **kargs):
super(NeutronInstaller, self).__init__(*args, **kargs)
self.configurator = qconf.NeutronConfigurator(self)
def pre_install(self):
# Check if network namespaces are supported.
if self.get_option("use_namespaces", default_value=True):
try:
# "ip netns" command is used for network namespace management.
# We are trying to execute this command and if it was executed
# successfully then network namespaces support is enabled.
sh.execute(["ip", "netns"])
except exceptions.ProcessExecutionError:
raise exceptions.InstallException(
"Network namespaces are not supported in your system. "
"Please, install kernel and iproute with network "
"namespaces support. To install them from RDO you can "
"use the following script: "
"./tools/install-neutron-ns-packages.sh")
super(NeutronInstaller, self).pre_install()
def post_install(self):
super(NeutronInstaller, self).post_install()
if self.get_bool_option("db-sync"):
self.configurator.setup_db()
self._sync_db()
self.create_symlink_to_conf_file()
def _sync_db(self):
LOG.info("Syncing neutron to database: %s", colorizer.quote(self.configurator.DB_NAME))
# TODO(aababilov): update db if required
def create_symlink_to_conf_file(self):
sh.symlink(self.configurator.path_to_plugin_config,
"/etc/neutron/plugin.ini",
force=True)
class NeutronUninstaller(binstall.PkgUninstallComponent, NeutronPluginMixin):
pass
class NeutronRuntime(bruntime.OpenStackRuntime, NeutronPluginMixin):
pass

View File

@ -1,193 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import exceptions as excp
from anvil import log as logging
from anvil import shell as sh
from anvil import utils
from anvil.components import base_install as binstall
from anvil.components import base_runtime as bruntime
from anvil.components.configurators import nova as nconf
from anvil.components.helpers import nova as nhelper
from anvil.components.helpers import virt as lv
LOG = logging.getLogger(__name__)
# This is a special marker file that when it exists, signifies that nova net was inited
NET_INITED_FN = 'nova.network.inited.yaml'
# This makes the database be in sync with nova
DB_SYNC_CMD = [
{'cmd': ['sudo', '-u', 'nova', '/usr/bin/nova-manage', 'db', 'sync']},
]
# Used to create a fixed network when initializating nova
FIXED_NET_CMDS = [
{
'cmd': ['sudo', '-u', 'nova', '/usr/bin/nova-manage',
'network', 'create', 'private', '$FIXED_RANGE', '1', '$FIXED_NETWORK_SIZE'],
},
]
# Used to create a floating network + test floating pool
FLOATING_NET_CMDS = [
{
'cmd': ['sudo', '-u', 'nova', '/usr/bin/nova-manage',
'floating', 'create', '$FLOATING_RANGE'],
},
{
'cmd': ['sudo', '-u', 'nova', '/usr/bin/nova-manage',
'floating', 'create', '--ip_range=$TEST_FLOATING_RANGE', '--pool=$TEST_FLOATING_POOL'],
},
]
class NovaUninstaller(binstall.PkgUninstallComponent):
def __init__(self, *args, **kargs):
binstall.PkgUninstallComponent.__init__(self, *args, **kargs)
self.virsh = lv.Virsh(self.get_int_option('service_wait_seconds'), self.distro)
self.net_init_fn = sh.joinpths(self.get_option('trace_dir'), NET_INITED_FN)
def pre_uninstall(self):
if 'compute' in self.subsystems:
self._clean_compute()
if 'network' in self.subsystems:
self._clean_net()
def unconfigure(self):
if sh.isfile(self.net_init_fn):
sh.unlink(self.net_init_fn)
def _clean_net(self):
try:
LOG.info("Cleaning up nova-network's dirty laundry.")
cleaner = nhelper.NetworkCleaner(self)
cleaner.clean()
except Exception as e:
LOG.warn("Failed cleaning up nova-network's dirty laundry due to: %s", e)
def _clean_compute(self):
try:
LOG.info("Cleaning up nova-compute's dirty laundry.")
cleaner = nhelper.ComputeCleaner(self)
cleaner.clean()
except Exception as e:
LOG.warn("Failed cleaning up nova-compute's dirty laundry due to: %s", e)
class NovaInstaller(binstall.PythonInstallComponent):
def __init__(self, *args, **kargs):
binstall.PythonInstallComponent.__init__(self, *args, **kargs)
self.configurator = nconf.NovaConfigurator(self)
@property
def env_exports(self):
to_set = utils.OrderedDict()
to_set['OS_COMPUTE_API_VERSION'] = self.get_option('nova_version')
n_params = nhelper.get_shared_params(**self.options)
for (endpoint, details) in n_params['endpoints'].items():
to_set[("NOVA_%s_URI" % (endpoint.upper()))] = details['uri']
return to_set
def verify(self):
binstall.PythonInstallComponent.verify(self)
self.configurator.verify()
def _sync_db(self):
LOG.info("Syncing nova to database named: %s", colorizer.quote(self.configurator.DB_NAME))
utils.execute_template(*DB_SYNC_CMD, params=self.config_params(None))
def _fix_virt(self):
virt_driver = utils.canon_virt_driver(self.get_option('virt_driver'))
if virt_driver == 'libvirt':
virt_type = lv.canon_libvirt_type(self.get_option('libvirt_type'))
if virt_type == 'qemu':
# On RHEL it appears a sym-link needs to be created
# to enable qemu to actually work, apparently fixed
# in RHEL 6.4.
#
# See: http://fedoraproject.org/wiki/Getting_started_with_OpenStack_EPEL
if not sh.isfile('/usr/bin/qemu-system-x86_64'):
sh.symlink('/usr/libexec/qemu-kvm', '/usr/bin/qemu-system-x86_64',
tracewriter=self.tracewriter)
def post_install(self):
binstall.PythonInstallComponent.post_install(self)
# Extra actions to do nova setup
if self.get_bool_option('db-sync'):
self.configurator.setup_db()
self._sync_db()
# Patch up your virtualization system
self._fix_virt()
class NovaRuntime(bruntime.OpenStackRuntime):
def __init__(self, *args, **kargs):
bruntime.OpenStackRuntime.__init__(self, *args, **kargs)
self.wait_time = self.get_int_option('service_wait_seconds')
self.virsh = lv.Virsh(self.wait_time, self.distro)
self.net_init_fn = sh.joinpths(self.get_option('trace_dir'), NET_INITED_FN)
def _do_network_init(self):
if not sh.isfile(self.net_init_fn) and self.get_bool_option('do-network-init'):
# Figure out the commands to run
cmds = []
mp = {}
if self.get_bool_option('enable_fixed'):
# Create a fixed network
mp['FIXED_NETWORK_SIZE'] = self.get_option('fixed_network_size', default_value='256')
mp['FIXED_RANGE'] = self.get_option('fixed_range', default_value='10.0.0.0/24')
cmds.extend(FIXED_NET_CMDS)
if self.get_bool_option('enable_floating'):
# Create a floating network + test floating pool
cmds.extend(FLOATING_NET_CMDS)
mp['FLOATING_RANGE'] = self.get_option('floating_range', default_value='172.24.4.224/28')
mp['TEST_FLOATING_RANGE'] = self.get_option('test_floating_range', default_value='192.168.253.0/29')
mp['TEST_FLOATING_POOL'] = self.get_option('test_floating_pool', default_value='test')
# Anything to run??
if cmds:
LOG.info("Creating your nova network to be used with instances.")
utils.execute_template(*cmds, params=mp)
# Writing this makes sure that we don't init again
cmd_mp = {
'cmds': cmds,
'replacements': mp,
}
sh.write_file(self.net_init_fn, utils.prettify_yaml(cmd_mp))
LOG.info("If you wish to re-run network initialization, delete %s", colorizer.quote(self.net_init_fn))
def post_start(self):
self._do_network_init()
def pre_start(self):
# Let the parent class do its thing
bruntime.OpenStackRuntime.pre_start(self)
virt_driver = utils.canon_virt_driver(self.get_option('virt_driver'))
if virt_driver == 'libvirt':
virt_type = lv.canon_libvirt_type(self.get_option('libvirt_type'))
LOG.info("Checking that your selected libvirt virtualization type %s is working and running.", colorizer.quote(virt_type))
try:
self.virsh.check_virt(virt_type)
self.virsh.restart_service()
LOG.info("Libvirt virtualization type %s seems to be working and running.", colorizer.quote(virt_type))
except excp.ProcessExecutionError as e:
msg = ("Libvirt type %r does not seem to be active or configured correctly, "
"perhaps you should be using %r instead: %s" %
(virt_type, lv.DEF_VIRT_TYPE, e))
raise excp.StartException(msg)

View File

@ -1,22 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil.components import base_testing as btesting
class OpenStackClientTester(btesting.PythonTestingComponent):
def _use_run_tests(self):
return False

View File

@ -1,105 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import exceptions as excp
from anvil import log as logging
from anvil import shell as sh
from anvil import utils
from anvil.components import base_install as binstall
from anvil.components import base_runtime as bruntime
LOG = logging.getLogger(__name__)
class OpenvswitchUninstaller(binstall.PkgUninstallComponent):
def __init__(self, *args, **kwargs):
binstall.PkgUninstallComponent.__init__(self, *args, **kwargs)
self.runtime = self.siblings.get('running')
def _del_bridge(self, name):
cmd_template = self.distro.get_command('openvswitch', 'del_bridge')
cmd = utils.expand_template_deep(cmd_template, {'NAME': name})
try:
sh.execute(cmd)
except excp.ProcessExecutionError:
LOG.warn("Failed to delete '%s' openvswitch bridge." % name)
def pre_uninstall(self):
bridges = self.get_option('bridges', default_value=[])
if bridges:
LOG.info("Attempting to delete %s bridges: %s."
% (colorizer.quote(self.name), ", ".join(bridges)))
LOG.info("Ensuring %s service is started before we use it."
% colorizer.quote(self.name))
self.runtime.start()
self.runtime.wait_active()
for bridge in bridges:
self._del_bridge(bridge)
class OpenvswitchInstaller(binstall.PkgInstallComponent):
def __init__(self, *args, **kwargs):
binstall.PkgInstallComponent.__init__(self, *args, **kwargs)
self.runtime = self.siblings.get('running')
def _add_bridge(self, name):
cmd_template = self.distro.get_command('openvswitch', 'add_bridge')
cmd = utils.expand_template_deep(cmd_template, {'NAME': name})
try:
sh.execute(cmd)
except excp.ProcessExecutionError:
LOG.warn("Failed to create '%s' openvswitch bridge." % name)
def post_install(self):
binstall.PkgInstallComponent.post_install(self)
bridges = self.get_option('bridges', default_value=[])
if bridges:
LOG.info("Attempting to create %s bridges: %s."
% (colorizer.quote(self.name), ", ".join(bridges)))
LOG.info("Ensuring %s service is started before we use it."
% colorizer.quote(self.name))
self.runtime.start()
self.runtime.wait_active()
for bridge in bridges:
self._add_bridge(bridge)
def configure(self):
# NOTE(skudriashev): configuration is not required for this component
pass
class OpenvswitchRuntime(bruntime.ServiceRuntime):
@property
def applications(self):
return ["openvswitch"]
def status_app(self, program):
status_cmd = self.get_command("status", program)
try:
output = sh.execute(status_cmd, shell=True)[0]
except excp.ProcessExecutionError:
return False
if utils.has_any(output, "is not running"):
return False
return True

View File

@ -20,7 +20,3 @@ from anvil.components import base_install as binstall
class Installer(binstall.PkgInstallComponent):
pass
class Uninstaller(binstall.PkgUninstallComponent):
pass

View File

@ -1,67 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import log as logging
from anvil import shell as sh
from anvil import utils
from anvil.components import base_install as binstall
from anvil.components import base_runtime as bruntime
LOG = logging.getLogger(__name__)
# The guest is by default always existing, leave it be.
NO_DELETE = ['guest']
class QpidUninstaller(binstall.PkgUninstallComponent):
def post_uninstall(self):
binstall.PkgUninstallComponent.post_uninstall(self)
user_name = self.get_option('user_id')
if user_name in NO_DELETE:
return
try:
LOG.debug("Attempting to delete the qpid user '%s' and their associated password.",
user_name)
cmd_template = self.distro.get_command('qpid', 'delete_user')
cmd = utils.expand_template_deep(cmd_template, {'USER': user_name})
if cmd:
sh.execute(cmd)
except IOError:
LOG.warn(("Could not delete the user/password. You might have to manually "
"reset the user/password before the next install."))
class QpidInstaller(binstall.PkgInstallComponent):
def post_install(self):
binstall.PkgInstallComponent.post_install(self)
user_name = self.get_option('user_id')
try:
LOG.debug("Attempting to create the qpid user '%s' and their associated password.",
user_name)
cmd_template = self.distro.get_command('qpid', 'create_user')
cmd = utils.expand_template_deep(cmd_template, {'USER': user_name})
if cmd:
sh.execute(cmd, process_input=self.get_password('qpid'))
except IOError:
LOG.warn(("Could not create the user/password. You might have to manually "
"create the user/password before running."))
class QpidRuntime(bruntime.ServiceRuntime):
@property
def applications(self):
return [self.distro.get_command('qpid', "daemon")[0]]

View File

@ -1,152 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempfile import TemporaryFile
from anvil import colorizer
from anvil import log as logging
from anvil import shell as sh
from anvil import utils
from anvil.components import base_install as binstall
from anvil.components import base_runtime as bruntime
LOG = logging.getLogger(__name__)
# Default password (guest)
RESET_BASE_PW = ''
class RabbitUninstaller(binstall.PkgUninstallComponent):
def __init__(self, *args, **kargs):
binstall.PkgUninstallComponent.__init__(self, *args, **kargs)
self.runtime = self.siblings.get('running')
def pre_uninstall(self):
try:
LOG.debug("Attempting to reset the rabbit-mq guest password to: %s", colorizer.quote(RESET_BASE_PW))
self.runtime.start()
self.runtime.wait_active()
cmd = self.distro.get_command('rabbit-mq', 'change_password') + [RESET_BASE_PW]
sh.execute(cmd)
LOG.info("Restarting so that your rabbit-mq password is reflected.")
self.runtime.restart()
self.runtime.wait_active()
except IOError:
LOG.warn(("Could not reset the rabbit-mq password. You might have to manually "
"reset the password to %s before the next install"), colorizer.quote(RESET_BASE_PW))
class RabbitInstaller(binstall.PkgInstallComponent):
def __init__(self, *args, **kargs):
binstall.PkgInstallComponent.__init__(self, *args, **kargs)
self.runtime = self.siblings.get('running')
def _setup_pw(self):
user_id = self.get_option('user_id')
LOG.info("Setting up your rabbit-mq %s password.", colorizer.quote(user_id))
self.runtime.start()
self.runtime.wait_active()
cmd = list(self.distro.get_command('rabbit-mq', 'change_password'))
cmd += [user_id, self.get_password('rabbit')]
sh.execute(cmd)
LOG.info("Restarting so that your rabbit-mq password is reflected.")
self.runtime.restart()
self.runtime.wait_active()
def post_install(self):
binstall.PkgInstallComponent.post_install(self)
self._setup_pw()
class RabbitRuntime(bruntime.ProgramRuntime):
def start(self):
def is_active():
status = self.statii()[0].status
if status == bruntime.STATUS_STARTED:
return True
return False
if is_active():
return 1
self._run_action('start')
for sleep_secs in utils.ExponentialBackoff():
LOG.info("Sleeping for %s seconds, rabbit-mq is still not active.",
sleep_secs)
sh.sleep(sleep_secs)
if is_active():
return 1
raise RuntimeError('Failed to start rabbit-mq')
@property
def applications(self):
return [
bruntime.Program('rabbit-mq'),
]
def statii(self):
# This has got to be the worst status output.
#
# I have ever seen (its like a weird mix json+crap)
(sysout, stderr) = self._run_action('status', check_exit_code=False)
st = bruntime.STATUS_UNKNOWN
combined = (sysout + stderr).lower()
if utils.has_any(combined, 'nodedown', "unable to connect to node", 'unrecognized'):
st = bruntime.STATUS_STOPPED
elif combined.find('running_applications') != -1:
st = bruntime.STATUS_STARTED
return [
bruntime.ProgramStatus(status=st,
details={
'STDOUT': sysout,
'STDERR': stderr
})
]
def _run_action(self, action, check_exit_code=True):
cmd = self.distro.get_command('rabbit-mq', action)
if not cmd:
raise NotImplementedError("No distro command provided to perform action %r" % (action))
# This seems to fix one of the bugs with rabbit mq starting and stopping
# not cool, possibly connected to the following bugs:
#
# See: https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
# See: https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
#
# RHEL seems to have this bug also...
with TemporaryFile() as s_fh:
with TemporaryFile() as e_fh:
sh.execute(cmd,
stdout_fh=s_fh, stderr_fh=e_fh,
check_exit_code=check_exit_code)
# Read from the file handles instead of the typical output...
for a_fh in [s_fh, e_fh]:
a_fh.flush()
a_fh.seek(0)
return (s_fh.read(), e_fh.read())
def restart(self):
self._run_action('restart')
return 1
def stop(self):
if self.statii()[0].status != bruntime.STATUS_STOPPED:
self._run_action('stop')
return 1
else:
return 0

View File

@ -43,13 +43,13 @@ class Distro(object):
def __init__(self,
name, platform_pattern,
install_helper, dependency_handler,
commands, components, **kwargs):
components, **kwargs):
self.name = name
self._platform_pattern_text = platform_pattern
self._platform_pattern = re.compile(platform_pattern, re.IGNORECASE)
self._install_helper = install_helper
self._dependency_handler = dependency_handler
self._commands = commands
self._commands = kwargs.get('commands', {})
self._components = components
self.inject_platform_overrides(kwargs)

View File

@ -1,75 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2012 New Dream Network, LLC (DreamHost) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Platform-specific logic for RedHat Enterprise Linux components.
"""
import re
from anvil import colorizer
from anvil import ini_parser
from anvil import log as logging
from anvil import shell as sh
from anvil.components import db
from anvil.components import rabbit
LOG = logging.getLogger(__name__)
class DBInstaller(db.DBInstaller):
MYSQL_CONF = '/etc/my.cnf'
def _configure_db_confs(self):
LOG.info("Fixing up %s mysql configs.", colorizer.quote(self.distro.name))
my_cnf = ini_parser.RewritableConfigParser(fns=[DBInstaller.MYSQL_CONF])
my_cnf.remove_option('mysqld', 'skip-grant-tables')
my_cnf.set('mysqld', 'default-storage-engine', 'InnoDB')
my_cnf.set('mysqld', 'bind-address', '0.0.0.0')
sh.write_file_and_backup(DBInstaller.MYSQL_CONF, my_cnf.stringify())
class RabbitRuntime(rabbit.RabbitRuntime):
def _fix_log_dir(self):
# This seems needed...
#
# Due to the following:
# <<< Restarting rabbitmq-server: RabbitMQ is not running
# <<< sh: /var/log/rabbitmq/startup_log: Permission denied
# <<< FAILED - check /var/log/rabbitmq/startup_{log, _err}
#
# See: http://lists.rabbitmq.com/pipermail/rabbitmq-discuss/2011-March/011916.html
# This seems like a bug, since we are just using service init and service restart...
# And not trying to run this service directly...
base_dir = sh.joinpths("/var/log", 'rabbitmq')
if sh.isdir(base_dir):
# Seems like we need root perms to list that directory...
for fn in sh.listdir(base_dir):
if re.match("(.*?)(err|log)$", fn, re.I):
sh.chmod(sh.joinpths(base_dir, fn), 0o666)
def start(self):
self._fix_log_dir()
return rabbit.RabbitRuntime.start(self)
def restart(self):
self._fix_log_dir()
return rabbit.RabbitRuntime.restart(self)

View File

@ -37,10 +37,6 @@ class DownloadException(AnvilException):
pass
class InstallException(AnvilException):
pass
class BadParamException(AnvilException):
pass
@ -53,30 +49,10 @@ class NoReplacementException(AnvilException):
pass
class StartException(AnvilException):
pass
class PackageException(AnvilException):
pass
class StopException(AnvilException):
pass
class RestartException(AnvilException):
pass
class StatusException(AnvilException):
pass
class PasswordException(AnvilException):
pass
class FileException(AnvilException):
pass
@ -89,10 +65,6 @@ class DependencyException(AnvilException):
pass
class DuplicateException(AnvilException):
"Raised when a duplicate entry is found."
class ProcessExecutionError(IOError):
MESSAGE_TPL = (
'%(description)s\n'

View File

@ -34,29 +34,15 @@ from anvil import version
OVERVIEW = """Overview: Anvil is a forging tool to help build OpenStack components
and their dependencies into a complete system. It git checkouts the components and
builds them and their dependencies into packages. It can then install components
from the repositories it created with packages it made, perform configuration
and then start, stop and uninstall the components and their associated packages."""
builds them and their dependencies into packages."""
STEPS = """Steps: For smooth experience please make sure you go through the
following steps when running."""
STEP_SECTIONS = {
'installing': [
'building': [
'./smithy -a prepare',
'./smithy -a build',
'./smithy -a install',
'./smithy -a start',
'./smithy -a status',
],
'uninstalling': [
'./smithy -a stop',
'./smithy -a uninstall',
],
'purging': [
'./smithy -a stop',
'./smithy -a uninstall',
'./smithy -a purge',
],
}
@ -122,26 +108,6 @@ def parse(previous_settings=None):
dest="verbose",
default=False,
help="make the output logging verbose")
parser.add_option('-k', "--keyring",
action="store",
dest="keyring_path",
default="/etc/anvil/passwords.cfg",
help=("read and create passwords using this keyring file (default: %default)"))
parser.add_option('-e', "--encrypt",
action="store_true",
dest="keyring_encrypted",
default=False,
help=("use a encrypted keyring file (default: %default)"))
parser.add_option("--no-prompt-passwords",
action="store_false",
dest="prompt_for_passwords",
default=True,
help="do not prompt the user for passwords")
parser.add_option("--no-store-passwords",
action="store_false",
dest="store_passwords",
default=True,
help="do not save the users passwords into the users keyring")
# Install/start/stop/uninstall specific options
base_group = OptionGroup(parser, "Action specific options")
@ -196,19 +162,6 @@ def parse(previous_settings=None):
help=("empty root DIR or DIR with existing components (default: %default)"))
parser.add_option_group(base_group)
suffixes = ("Known suffixes 'K' (kilobyte, 1024),"
" 'M' (megabyte, 1024k), 'G' (gigabyte, 1024M)"
" are supported, 'B' is the default and is ignored")
status_group = OptionGroup(parser, "Status specific options")
status_group.add_option('-s', "--show",
action="callback",
dest="show_amount",
type='string',
metavar="SIZE",
callback=_size_cb,
help="show SIZE 'details' when showing component status. " + suffixes)
parser.add_option_group(status_group)
build_group = OptionGroup(parser, "Build specific options")
build_group.add_option('-u', "--usr-only",
action="store_true",
@ -226,15 +179,6 @@ def parse(previous_settings=None):
"build directory"))
parser.add_option_group(build_group)
test_group = OptionGroup(parser, "Test specific options")
test_group.add_option('-i', "--ignore-failures",
action="store_true",
dest="ignore_test_failures",
default=False,
help=("when running tests ignore component test failures"
" (default: %default)"))
parser.add_option_group(test_group)
# Extract only what we care about, these will be passed
# to the constructor of actions as arguments
# so don't adjust the naming wily nilly...
@ -257,10 +201,4 @@ def parse(previous_settings=None):
with open(options.distros_patch_fn) as fp:
values['distros_patch'] = json.load(fp)
values['venv_deploy_dir'] = options.venv_deploy_dir
values['prompt_for_passwords'] = options.prompt_for_passwords
values['show_amount'] = max(0, options.show_amount)
values['store_passwords'] = options.store_passwords
values['keyring_path'] = options.keyring_path
values['keyring_encrypted'] = options.keyring_encrypted
values['ignore_test_failures'] = options.ignore_test_failures
return values

View File

@ -1,100 +0,0 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2012 New Dream Network, LLC (DreamHost) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import getpass
from keyring.backend import CryptedFileKeyring
from keyring.backend import UncryptedFileKeyring
from anvil import log as logging
from anvil import utils
LOG = logging.getLogger(__name__)
RAND_PW_LEN = 20
PW_USER = 'anvil'
class KeyringProxy(object):
def __init__(self, path, keyring_encrypted=False, enable_prompt=True, random_on_empty=True):
self.keyring_encrypted = keyring_encrypted
if self.keyring_encrypted and not path.endswith(".crypt"):
path = "%s.crypt" % (path)
self.path = path
if keyring_encrypted:
self.ring = CryptedFileKeyring()
else:
self.ring = UncryptedFileKeyring()
self.ring.file_path = path
self.enable_prompt = enable_prompt
self.random_on_empty = random_on_empty
def read(self, name, prompt):
pw_val = self.ring.get_password(name, PW_USER)
if pw_val:
return (True, pw_val)
pw_val = ''
if self.enable_prompt and prompt:
pw_val = InputPassword().get_password(name, prompt)
if self.random_on_empty and len(pw_val) == 0:
pw_val = RandomPassword().get_password(name, RAND_PW_LEN)
return (False, pw_val)
def save(self, name, password):
self.ring.set_password(name, PW_USER, password)
def __str__(self):
prefix = 'encrypted'
if not self.keyring_encrypted:
prefix = "un" + prefix
return '%s keyring @ %s' % (prefix, self.path)
class InputPassword(object):
def _valid_password(self, pw):
cleaned_pw = pw.strip()
if len(cleaned_pw) == 0:
return False
else:
return True
def _prompt_user(self, prompt_text):
prompt_text = prompt_text.strip()
message = ("Enter a secret to use for the %s "
"[or press enter to get a generated one]: ")
message = message % (prompt_text)
rc = ""
while True:
rc = getpass.getpass(message)
# Length zero seems to mean just enter was pressed (which means skip in our case)
if len(rc) == 0 or self._valid_password(rc):
break
else:
LOG.warn("Invalid secret %r (please try again)" % (rc))
return rc
def get_password(self, option, prompt_text):
return self._prompt_user(prompt_text)
class RandomPassword(object):
def generate_random(self, length):
"""Returns a randomly generated password of the specified length."""
LOG.debug("Generating a pseudo-random secret of %d characters", length)
return utils.get_random_string(length)
def get_password(self, option, length):
return self.generate_random(int(length))

View File

@ -1,4 +1,3 @@
# Settings for component cinder-client
---
...

View File

@ -1,11 +1,6 @@
# Settings for component cinder
---
# Host and ports for the different cinder services
api_host: "$(auto:ip)"
api_port: 8776
protocol: http
# Used for associating the client package with a human understandable
# name in its package description (not a code-name, like cinder).
api_name: "Volume"

View File

@ -1,10 +0,0 @@
# Settings for component db
---
# Where you db is located at and how to access it.
host: localhost
port: 3306
type: mysql
user: root
...

View File

@ -3,43 +3,4 @@
ip: "$(auto:ip)"
# How many seconds to wait until a service comes online before using it.
# For example, before uploading to glance we need keystone and glance to be online.
# Sometimes this takes 5 to 10 seconds to start these up....
service_wait_seconds: 5
# Needed for setting up your database
db:
type: "$(db:type)"
user: "$(db:user)"
host: "$(db:host)"
port: "$(db:port)"
# Interactions with keystone are via the following settings
keystone:
auth_host: "$(keystone:auth_host)"
auth_port: "$(keystone:auth_port)"
auth_proto: "$(keystone:auth_proto)"
service_host: "$(keystone:service_host)"
service_port: "$(keystone:service_port)"
service_proto: "$(keystone:service_proto)"
# Rabbit mq hookins
rabbit:
user_id: "$(rabbit-mq:user_id)"
host: "$(rabbit-mq:host)"
# Qpid mq hookins
qpid:
user_id: "$(qpid:user_id)"
host: "$(qpid:host)"
# This is needed to allow installs based on personas
wanted_passwords:
service_token: 'service admin token'
admin_password: 'keystone admin user'
service_password: 'service authentication password'
sql: "database user"
...

View File

@ -1,9 +1,4 @@
# Settings for component glance-client
---
# These seem to require swift, not always installed...
exclude_tests:
- "test_ssl_cert_mismatch"
- "test_ssl_cert_subject_alt_name"
...

View File

@ -1,31 +1,6 @@
# Settings for component glance
---
host: "$(auto:ip)"
api_port: 9292
reg_port: 9191
protocol: http
verbose: True
# List of images to download and install into glance.
image_urls:
- "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img"
# Test exclusions...
#
# TODO(harlowja) these should probably be bugs...
exclude_tests:
# These seem to require swift, not always installed...
- test_swift_store
# Interactions with keystone are via the following settings
paste_flavor: 'keystone'
# Images that are downloaded are stored here with
# metadata about them, so that re-examination before
# uploading does not have to occur
image_cache_dir: "/usr/share/anvil/glance/images"
# Used by install section in the specfile (conflicts with the client binary...)
remove_file: "/bin/rm -rf %{buildroot}/usr/bin/glance"

View File

@ -1,7 +1,4 @@
# Settings for component keystone-client
---
# This code is out of compliance, so skip it...
use_pep8: False
...

View File

@ -1,56 +1,8 @@
# Settings for component keystone
---
# Where is the keystone auth host at?
auth_host: "$(auto:ip)"
auth_port: 35357
auth_proto: http
# Where is the keystone service host at?
service_host: "$(auto:ip)"
service_port: 5000
service_proto: http
# Used for associating the client package with a human understandable
# name in its package description (not a code-name, like keystone).
api_name: "Identity"
# Test exclusions...
#
# TODO(harlowja) these should probably be bugs...
exclude_tests:
# These 2 seem to require swift, not always installed...
- test_swift_auth_middleware
- test_s3_token_middleware
# Aren't always installing memcache...
- test_backend_memcache
- test_nomemcache
# Aren't always installing ldap...
- test_backend_ldap
# Needed when running to setup the right roles/endpoints...
glance:
api_port: "$(glance:api_port)"
protocol: "$(glance:protocol)"
reg_port: "$(glance:reg_port)"
nova:
api_host: "$(nova:api_host)"
api_port: "$(nova:api_port)"
s3_host: "$(nova:s3_host)"
s3_port: "$(nova:s3_port)"
ec2_host: "$(nova:ec2_host)"
ec2_port: "$(nova:ec2_port)"
ec2_admin_host: "$(nova:ec2_admin_host)"
ec2_admin_port: "$(nova:ec2_admin_port)"
protocol: "$(nova:protocol)"
neutron:
api_host: "$(neutron:api_host)"
api_port: "$(neutron:api_port)"
cinder:
api_host: "$(cinder:api_host)"
api_port: "$(cinder:api_port)"
...

View File

@ -1,11 +1,6 @@
# Settings for component neutron-client
---
# Host and ports for the different neutron services
api_host: "$(auto:ip)"
api_port: 9696
protocol: http
# Used for associating the client package with a human understandable
# name in its package description (not a code-name, like neutron).
api_name: "Networking"
@ -13,12 +8,6 @@ api_name: "Networking"
core_plugin: openvswitch
use_namespaces: True
network_vlan_ranges: physnet1:100:299
physical_interface_mappings: physnet1:100:299
external_bridge: br-ex
integration_bridge: br-int
# When building a package for the neutron the arguments to the individual daemons
# will be expanded to include the following runtime arguments.
daemon_args:

View File

@ -1,146 +1,8 @@
# Settings for component nova
---
# Host and ports for the different nova services
api_host: "$(auto:ip)"
api_port: 8774
s3_host: "$(auto:ip)"
s3_port: 3333
volume_host: "$(auto:ip)"
volume_port: 8776
ec2_host: "$(auto:ip)"
ec2_port: 8773
ec2_admin_host: "$(auto:ip)"
ec2_admin_port: 8773
protocol: http
# Very useful to read over the following
#
# http://docs.openstack.org/trunk/openstack-compute/admin/content/configuring-networking-on-the-compute-node.html
# https://github.com/openstack/nova/blob/master/etc/nova/nova.conf.sample
# Set api_rate_limit = 0 (or blank) to turn OFF rate limiting
api_rate_limit: False
# The internal ip of the ec2 api server
ec2_dmz_host: "$(auto:ip)"
# A fixed network will be created for you (unless disabled)
enable_fixed: True
fixed_network_size: 256
fixed_range: "10.0.0.0/24"
# Used however you want - ensure you know nova's conf file format if you use this!
extra_flags: ""
# DHCP Warning: If your flat interface device uses DHCP, there will be a hiccup while the network
# is moved from the flat interface to the flat network bridge. This will happen when you launch
# your first instance. Upon launch you will lose all connectivity to the node, and the vm launch
# will probably fail.
#
# If you are running on a single node and don't need to access the VMs from devices other than
# that node, you can set the flat interface to the same value as FLAT_NETWORK_BRIDGE.
# This will stop the network hiccup from occurring.
#
# See: http://docs.openstack.org/developer/nova/api/nova.network.manager.html
# If using a flat manager (not dhcp) then you probably want this on
flat_injected: False
# Interface for attached IP addresses (if a list is provided the first valid one is selected)
flat_interface: [eth0, p2p1]
flat_network_bridge: br100
# A floating network will be created for you (unless disabled)
enable_floating: True
floating_range: "172.24.4.224/28"
test_floating_pool: test
test_floating_range: "192.168.253.0/29"
# Force backing images to raw format?
force_raw_images: True
checksum_base_images: True
glance_server: "$(glance:host):$(glance:api_port)"
img_service: nova.image.glance.GlanceImageService
# Force the config drive to turn on?
force_cfg_drive: False
# How instances will be named and where
instance_name_postfix: "%08x"
instance_name_prefix: "instance-"
# Defaults to $NOVA_DIR/instances if empty
instances_path: ""
# This decides which firewall driver to use:
# The default here should work with linux + iptables + libvirt special sauce...
libvirt_firewall_driver: nova.virt.libvirt.firewall.IptablesFirewallDriver
# Only useful if above virt_driver is "libvirt"
# Types known (qemu, kvm, xen, uml, lxc)
# Defaults to qemu (the most compatible) if unknown (or blank).
libvirt_type: "qemu"
# This is just a firewall based on iptables, for non-libvirt usage
basic_firewall_driver: nova.virt.firewall.IptablesFirewallDriver
# Multi-host is a mode where each compute node runs its own network node.
# This allows network operations and routing for a VM to occur on the server
# that is running the VM - removing a SPOF and bandwidth bottleneck.
multi_host: False
# Which network manager and which interface should be used
network_manager: nova.network.manager.FlatDHCPManager
# Interface for public IP addresses (if a list is provided the first valid one is selected)
public_interface: [eth0, p2p1]
neutron:
api_host: "$(neutron:api_host)"
api_port: "$(neutron:api_port)"
# Currently novaclient needs you to specify the *compute api* version.
nova_version: "1.1"
# Which scheduler will nova be running with?
# Nova supports pluggable schedulers. FilterScheduler should work in most cases.
scheduler: nova.scheduler.filter_scheduler.FilterScheduler
# Should nova be in verbose mode?
log_verbose: True
# Virtualization settings
# Drivers known (libvirt, xensever, vmware, baremetal)
# Defaults to libvirt (the most compatible) if unknown.
virt_driver: libvirt
# Vnc server settings
vncproxy_url: "http://$(auto:ip):6080/vnc_auto.html"
vncserver_listen: 127.0.0.1
vncserver_proxyclient_address: ""
xvpvncproxy_url: "http://$(auto:ip):6081/console"
# Used for associating the client package with a human understandable
# name in its package description (not a code-name, like nova).
api_name: "Compute"
# Test exclusions...
#
# TODO(harlowja) these should probably be bugs...
exclude_tests:
# Disable since neutronclient is not always installed.
- test_neutronv2
# Will fail if ipv6 turned off
- test_service_random_port_with_ipv6
- test_start_random_port_with_ipv6
- test_app_using_ipv6_and_ssl
# Bug in tests if libvirt installed
- test_libvirt
# Bug
- test_archive_deleted_rows_fk_constraint
exclude_tests_dir:
- smoketests
...

View File

@ -1,9 +0,0 @@
# Settings for component openvswitch
---
# List of bridges to manage
bridges:
- $(neutron:external_bridge)
- $(neutron:integration_bridge)
...

View File

@ -1,13 +0,0 @@
# Settings for component qpid
---
# Where is qpid located?
host: "$(auto:ip)"
# Which qpid user should be used
user_id: guest
wanted_passwords:
qpid: 'qpid user'
...

View File

@ -1,13 +0,0 @@
# Settings for component rabbit-mq
---
# Where is rabbit located?
host: "$(auto:ip)"
# Which rabbit user should be used
user_id: guest
wanted_passwords:
rabbit: 'rabbit user'
...

View File

@ -48,57 +48,11 @@ dependency_handler:
- "%global _python_bytecompile_errors_terminate_build 0"
tablib:
- "%global _python_bytecompile_errors_terminate_build 0"
commands:
service:
restart: service $NAME restart
start: service $NAME start
status: service $NAME status
stop: service $NAME stop
apache:
daemon: httpd
libvirt:
restart: service libvirtd restart
status: service libvirtd status
# This is just used to check that libvirt will work with a given protocol
verify: virsh -c $VIRT_PROTOCOL uri
mysql:
# NOTE: we aren't stopping any sql injection...
create_db: mysql --user=$USER --password=$PASSWORD -e
"CREATE DATABASE $DB CHARACTER SET $CHARACTER_SET;"
drop_db: mysql --user=$USER --password=$PASSWORD -e
"DROP DATABASE IF EXISTS $DB;"
grant_all: mysql --user=$USER --password=$PASSWORD -e
"GRANT ALL PRIVILEGES ON *.* TO '$USER'@'%' IDENTIFIED BY '$PASSWORD'; FLUSH PRIVILEGES;"
restart: service mysqld restart
set_pwd: mysql --user=$USER --password=$OLD_PASSWORD -e
"USE mysql; UPDATE user SET password=PASSWORD('$NEW_PASSWORD') WHERE User='$USER'; FLUSH PRIVILEGES;"
daemon: mysqld
openvswitch:
add_bridge: ovs-vsctl --may-exist add-br $NAME
del_bridge: ovs-vsctl --if-exists del-br $NAME
# Where component symlinks will go, the component name will become a directory
# under this directory where its configuration files will be connected to there
# actual location.
base_link_dir: /etc
qpid:
# See: http://qpid.apache.org/books/trunk/AMQP-Messaging-Broker-CPP-Book/html/chap-Messaging_User_Guide-Security.html
create_user: saslpasswd2 -c -p -f /var/lib/qpidd/qpidd.sasldb -u QPID $USER
delete_user: saslpasswd2 -d -f /var/lib/qpidd/qpidd.sasldb -u QPID $USER
daemon: qpidd
rabbit-mq:
change_password: rabbitmqctl change_password
restart: service rabbitmq-server restart
start: service rabbitmq-server start
status: service rabbitmq-server status
stop: service rabbitmq-server stop
components:
ceilometer-client:
python_entrypoints: True
cinder:
python_entrypoints: True
action_classes:
install: anvil.components.cinder:CinderInstaller
running: anvil.components.base_runtime:OpenStackRuntime
daemon_to_package:
all: openstack-cinder
volume: openstack-cinder
@ -108,17 +62,7 @@ components:
- name: hp3parclient
cinder-client:
python_entrypoints: True
db:
action_classes:
install: anvil.distros.rhel:DBInstaller
running: anvil.components.db:DBRuntime
uninstall: anvil.components.db:DBUninstaller
packages:
- name: mysql
- name: mysql-server
general:
action_classes:
uninstall: anvil.components.pkglist:Uninstaller
build-requires:
# Build time dependencies
- name: libxml2-devel
@ -161,11 +105,6 @@ components:
- name: "coverage"
glance:
python_entrypoints: True
action_classes:
install: anvil.components.glance:GlanceInstaller
running: anvil.components.glance:GlanceRuntime
coverage: anvil.components.glance:GlanceTester
test: anvil.components.glance:GlanceTester
pips:
# pip setup and download of xattr>=0.7 seems to have problems find cffi
# so lets just use an restrict the upper bound until this is fixed upstream
@ -178,15 +117,10 @@ components:
scrubber: openstack-glance
glance-client:
python_entrypoints: True
action_classes:
test: anvil.components.glance_client:GlanceClientTester
coverage: anvil.components.glance_client:GlanceClientTester
heat-client:
python_entrypoints: True
horizon:
python_entrypoints: True
action_classes:
running: anvil.components.horizon:HorizonRuntime
packages:
- name: openstack-dashboard
pips:
@ -195,12 +129,6 @@ components:
python_entrypoints: True
keystone:
python_entrypoints: True
action_classes:
install: anvil.components.keystone:KeystoneInstaller
running: anvil.components.keystone:KeystoneRuntime
test: anvil.components.keystone:KeystoneTester
coverage: anvil.components.keystone:KeystoneTester
uninstall: anvil.components.keystone:KeystoneUninstaller
daemon_to_package:
all: openstack-keystone
keystone-client:
@ -209,10 +137,6 @@ components:
python_entrypoints: True
nova:
python_entrypoints: True
action_classes:
install: anvil.components.nova:NovaInstaller
running: anvil.components.nova:NovaRuntime
uninstall: anvil.components.nova:NovaUninstaller
pips:
# This seems to be a core dependency for a 'cas' tool
# so don't try to remove it since it will also remove
@ -241,16 +165,6 @@ components:
python_entrypoints: True
openstack-client:
python_entrypoints: True
action_classes:
test: anvil.components.openstack_client:OpenStackClientTester
coverage: anvil.components.openstack_client:OpenStackClientTester
openvswitch:
action_classes:
install: anvil.components.openvswitch:OpenvswitchInstaller
running: anvil.components.openvswitch:OpenvswitchRuntime
uninstall: anvil.components.openvswitch:OpenvswitchUninstaller
packages:
- name: openvswitch
oslo-config:
python_entrypoints: True
oslo-incubator:
@ -261,10 +175,6 @@ components:
python_entrypoints: True
neutron:
python_entrypoints: True
action_classes:
install: anvil.components.neutron:NeutronInstaller
running: anvil.components.neutron:NeutronRuntime
uninstall: anvil.components.neutron:NeutronUninstaller
daemon_to_package:
dhcp-agent: openstack-neutron
l3-agent: openstack-neutron
@ -281,48 +191,6 @@ components:
cisco-cfg-agent: openstack-neutron-cisco
lbaas-agent: openstack-neutron
netns-cleanup: openstack-neutron
qpid:
action_classes:
install: anvil.components.qpid:QpidInstaller
uninstall: anvil.components.qpid:QpidUninstaller
running: anvil.components.qpid:QpidRuntime
packages:
- name: qpid-cpp-client
- name: qpid-tools
- name: qpid-cpp-server
# Disable rabbitmq as these rabbitmq & qpidd conflict
pre-install:
- cmd:
- service
- rabbitmq-server
- stop
ignore_failure: true
# Also stop it from starting on boot (if rebooted)
- cmd:
- chkconfig
- rabbitmq-server
- 'off'
ignore_failure: true
rabbit-mq:
action_classes:
install: anvil.components.rabbit:RabbitInstaller
running: anvil.distros.rhel:RabbitRuntime
uninstall: anvil.components.rabbit:RabbitUninstaller
packages:
- name: rabbitmq-server
# Disable qpidd as these rabbitmq & qpidd conflict
pre-install:
- cmd:
- service
- qpidd
- stop
ignore_failure: true
# Also stop it from starting on boot (if rebooted)
- cmd:
- chkconfig
- qpidd
- 'off'
ignore_failure: true
swift-client:
python_entrypoints: True
trove:
@ -331,8 +199,6 @@ components:
python_entrypoints: True
heat:
python_entrypoints: True
action_classes:
install: anvil.components.heat:HeatInstaller
daemon_to_package:
api: openstack-heat-api
api-cfn: openstack-heat-api-cfn
@ -340,12 +206,8 @@ components:
engine: openstack-heat-engine
global-requirements:
python_entrypoints: True
action_classes:
install: anvil.components.global_requirements:GlobalRequirements
ceilometer:
python_entrypoints: True
action_classes:
install: anvil.components.ceilometer:CeilometerInstaller
daemon_to_package:
api: openstack-ceilometer-api
central: openstack-ceilometer-central
@ -354,8 +216,6 @@ components:
ipmi: openstack-ceilometer-ipmi
ironic:
python_entrypoints: True
action_classes:
install: anvil.components.ironic:IronicInstaller
daemon_to_package:
api: openstack-ironic-api
conductor: openstack-ironic-conductor

View File

@ -2,9 +2,6 @@
# Persona that includes all known components
components:
# Order matters here!
- general
- db
- rabbit-mq
# Oslo libraries
- oslo-config
- oslo-messaging
@ -29,42 +26,11 @@ components:
- swift-client
- trove-client
- openstack-client
# Additional libraries
- openvswitch
# Horizon is given a later priority (typically everything is done at the
# same time in stage zero); in its own stage since it requires basically all
# the existing things to be pre-built/started... before it can be...
- django-openstack-auth: 1
- horizon: 2
options:
general:
install-all-deps: false
nova:
db-sync: true
do-network-init: true
enable-cells: false
enable-spice: false
local-conductor: false
mq-type: rabbit
neutron-enabled: true
glance:
db-sync: true
load-images: true
keystone:
db-sync: true
do-init: true
enable-pki: false
heat: {}
ceilometer:
db-sync: true
horizon:
make-blackhole: true
cinder:
db-sync: true
mq-type: rabbit
neutron:
db-sync: true
mq-type: rabbit
subsystems:
glance:
- api
@ -97,12 +63,6 @@ subsystems:
- collector
- compute
- central
no-origin:
# These components don't need an origin to be enabled
- general
- db
- rabbit-mq
- openvswitch
supports:
- rhel
- fedora

View File

@ -1,9 +1,6 @@
---
components:
# Order matters here!
- general
- db
- rabbit-mq
# Oslo libraries
- oslo-config
- oslo-messaging
@ -22,25 +19,6 @@ components:
- nova-client
- swift-client
- heat-client
options:
nova:
db-sync: true
do-network-init: true
enable-cells: false
enable-spice: false
local-conductor: false
mq-type: rabbit
glance:
db-sync: true
load-images: true
keystone:
db-sync: true
do-init: true
enable-pki: false
cinder:
mq-type: rabbit
db-sync: true
heat: {}
subsystems:
glance:
- api
@ -63,11 +41,6 @@ subsystems:
- api-cfn
- api-cloudwatch
- engine
no-origin:
# These components don't need an origin to be enabled
- general
- db
- rabbit-mq
supports:
- rhel
- fedora

View File

@ -2,9 +2,6 @@
# Persona that includes all known components
components:
# Order matters here!
- general
- db
- rabbit-mq
# Oslo libraries
- oslo-config
- oslo-messaging
@ -37,37 +34,6 @@ components:
# the existing things to be pre-built/started... before it can be...
- django-openstack-auth: 1
- horizon: 2
options:
general:
install-all-deps: false
nova:
db-sync: true
do-network-init: true
enable-cells: false
enable-spice: false
local-conductor: false
mq-type: rabbit
neutron-enabled: true
glance:
db-sync: true
load-images: true
keystone:
db-sync: true
do-init: true
enable-pki: false
heat: {}
ironic:
db-sync: true
ceilometer:
db-sync: true
horizon:
make-blackhole: true
cinder:
db-sync: true
mq-type: rabbit
neutron:
db-sync: true
mq-type: rabbit
subsystems:
glance:
- api
@ -103,12 +69,8 @@ subsystems:
ironic:
- api
- conductor
no-origin:
# These components don't need an origin to be enabled
- general
- db
- rabbit-mq
- openvswitch
supports:
- rhel
- fedora
- centos
...

View File

@ -1,9 +1,6 @@
---
components:
# Order matters here!
- general
- db
- rabbit-mq
# Oslo libraries
- oslo-config
- oslo-messaging
@ -21,30 +18,6 @@ components:
- neutron-client
- nova-client
- swift-client
# Additional libraries
- openvswitch
options:
nova:
db-sync: true
do-network-init: true
enable-cells: false
enable-spice: false
local-conductor: false
mq-type: rabbit
neutron-enabled: true
glance:
db-sync: true
load-images: true
keystone:
db-sync: true
do-init: true
enable-pki: false
cinder:
db-sync: true
mq-type: rabbit
neutron:
db-sync: true
mq-type: rabbit
subsystems:
glance:
- api
@ -67,12 +40,6 @@ subsystems:
- api
- scheduler
- volume
no-origin:
# These components don't need an origin to be enabled
- general
- db
- rabbit-mq
- openvswitch
supports:
- rhel
- fedora

View File

@ -1,67 +0,0 @@
---
components:
# Order matters here!
- general
- db
- qpid
# Oslo libraries
- oslo-config
- oslo-messaging
- pycadf
# Core components
- keystone
- glance
- cinder
- nova
# Client used by components
- cinder-client
- glance-client
- keystone-client
- neutron-client
- nova-client
- swift-client
options:
nova:
db-sync: true
do-network-init: true
mq-type: qpid
enable-cells: false
enable-spice: false
local-conductor: false
glance:
db-sync: true
load-images: true
keystone:
db-sync: true
do-init: true
enable-pki: false
cinder:
mq-type: qpid
db-sync: true
subsystems:
glance:
- api
- registry
keystone:
- all
nova:
- api
- cert
- compute
- conductor
- network
- scheduler
cinder:
- api
- scheduler
- volume
no-origin:
# These components don't need an origin to be enabled
- general
- db
- qpid
supports:
- rhel
- fedora
- centos
...

View File

@ -1,9 +1,6 @@
---
components:
# Order matters here!
- general
- db
- rabbit-mq
# Oslo libraries
- oslo-config
- oslo-messaging
@ -22,24 +19,6 @@ components:
- nova-client
- swift-client
- trove-client
options:
nova:
db-sync: true
do-network-init: true
mq-type: rabbit
enable-cells: false
enable-spice: false
local-conductor: false
glance:
db-sync: true
load-images: true
keystone:
db-sync: true
do-init: true
enable-pki: false
cinder:
mq-type: rabbit
db-sync: true
subsystems:
glance:
- api
@ -57,11 +36,6 @@ subsystems:
- api
- scheduler
- volume
no-origin:
# These components don't need an origin to be enabled
- general
- db
- rabbit-mq
supports:
- rhel
- fedora

View File

@ -1,9 +1,6 @@
---
components:
# Order matters here!
- general
- db
- rabbit-mq
# Oslo libraries
- oslo-config
- oslo-messaging
@ -24,39 +21,11 @@ components:
- nova-client
- swift-client
- trove-client
# Additional libraries
- openvswitch
# Horizon is given a later priority (typically everything is done at the
# same time in stage zero); in its own stage since it requires basically all
# the existing things to be pre-built/started... before it can be...
- django-openstack-auth: 1
- horizon: 2
options:
general:
install-all-deps: false
nova:
db-sync: true
do-network-init: true
mq-type: rabbit
enable-cells: false
enable-spice: false
local-conductor: false
neutron-enabled: true
glance:
db-sync: true
load-images: true
keystone:
db-sync: true
do-init: true
enable-pki: false
horizon:
make-blackhole: true
cinder:
db-sync: true
mq-type: rabbit
neutron:
db-sync: true
mq-type: rabbit
subsystems:
glance:
- api
@ -82,12 +51,6 @@ subsystems:
- api
- scheduler
- volume
no-origin:
# These components don't need an origin to be enabled
- general
- db
- rabbit-mq
- openvswitch
supports:
- rhel
- fedora

View File

@ -1,9 +1,6 @@
---
components:
# Order matters here!
- general
- db
- rabbit-mq
# Oslo libraries
- oslo-config
- oslo-messaging
@ -20,24 +17,6 @@ components:
- neutron-client
- nova-client
- swift-client
options:
nova:
db-sync: true
do-network-init: true
enable-cells: false
enable-spice: false
local-conductor: false
mq-type: rabbit
glance:
db-sync: true
load-images: true
keystone:
db-sync: true
do-init: true
enable-pki: false
cinder:
mq-type: rabbit
db-sync: true
subsystems:
glance:
- api
@ -55,11 +34,6 @@ subsystems:
- api
- scheduler
- volume
no-origin:
# These components don't need an origin to be enabled
- general
- db
- rabbit-mq
supports:
- rhel
- fedora

View File

@ -1,6 +1,5 @@
---
components:
- general
- global-requirements
supports:
- rhel

View File

@ -1,117 +0,0 @@
##
## This is a yaml template (with cheetah template
## strings that will be filled in)...
##
---
endpoints:
- service: ec2
admin_url: "$nova.endpoints.ec2_admin.uri"
internal_url: "$nova.endpoints.ec2_cloud.uri"
public_url: "$nova.endpoints.ec2_cloud.uri"
region: RegionOne
- service: network
admin_url: "$neutron.endpoints.admin.uri"
internal_url: "$neutron.endpoints.internal.uri"
public_url: "$neutron.endpoints.public.uri"
region: RegionOne
- service: glance
admin_url: "$glance.endpoints.admin.uri"
internal_url: "$glance.endpoints.internal.uri"
public_url: "$glance.endpoints.public.uri"
region: RegionOne
- service: volume
admin_url: "${cinder.endpoints.admin.uri}/%(tenant_id)s"
internal_url: "${cinder.endpoints.internal.uri}/%(tenant_id)s"
public_url: "${cinder.endpoints.public.uri}/%(tenant_id)s"
region: RegionOne
- service: keystone
admin_url: "$keystone.endpoints.admin_templated.uri"
internal_url: "$keystone.endpoints.internal_templated.uri"
public_url: "$keystone.endpoints.public_templated.uri"
region: RegionOne
- service: nova
admin_url: "${nova.endpoints.api.uri}/%(tenant_id)s"
internal_url: "${nova.endpoints.api.uri}/%(tenant_id)s"
public_url: "${nova.endpoints.api.uri}/%(tenant_id)s"
region: RegionOne
roles:
- admin
- KeystoneAdmin
- KeystoneServiceAdmin
# The Member role is used by Horizon and Swift so we need to keep it.
- Member
services:
- description: EC2 Compatibility Layer
name: ec2
type: ec2
- description: Glance Image Service
name: glance
type: image
- description: Keystone Identity Service
name: keystone
type: identity
- description: Nova Compute Service
name: nova
type: compute
- description: Object Storage Service
name: swift
type: object-store
- description: Cinder Service
name: volume
type: volume
- description: Neutron Service
name: network
type: network
tenants:
- description: Admin tenant
name: '$keystone.admin_tenant'
- description: Service tenant
name: '$keystone.service_tenant'
users:
- email: admin@example.com
name: '$keystone.admin_user'
password: '$keystone.admin_password'
roles:
- "${keystone.admin_user}:admin"
- "KeystoneAdmin:admin"
- "KeystoneServiceAdmin:admin"
tenants:
- "${keystone.admin_tenant}"
- service
- email: ec2@example.com
name: ec2
password: '$keystone.service_password'
roles:
- admin:service
tenants:
- service
- email: glance@example.com
name: glance
password: '$keystone.service_password'
roles:
- admin:service
tenants:
- service
- email: nova@example.com
name: nova
password: '$keystone.service_password'
roles:
- admin:service
tenants:
- service
- email: cinder@example.com
name: cinder
password: '$keystone.service_password'
roles:
- admin:service
tenants:
- service
- email: neutron@example.com
name: neutron
password: '$keystone.service_password'
roles:
- admin:service
tenants:
- service
...

View File

@ -5,8 +5,7 @@ ANVIL Documentation
=====================
.. rubric:: Everything about ANVIL, a set of **python** scripts and utilities
to forge raw openstack into a productive tool!
to forge raw openstack into a productive tool!
----

View File

@ -51,29 +51,6 @@ Anvil is designed to have the following set of software components:
* Creation of a binary RPM repository with all built packages and
dependencies (converting the prepared source RPMs into binary RPMs).
* Install
* Configuring.
* Pre-installing.
* Installing packages from previously prepared repositories.
* Post-installing.
* Uninstall
* Unconfiguring.
* Pre-uninstalling.
* Uninstalling previously installed packages.
* Post-uninstalling.
* Starting
* Pre-starting.
* Starting.
* Post-starting.
* Stopping.
* Testing.
* **Phases:** a phase is a step of an action which can be tracked as an
individual unit and can be marked as being completed. In the above install
action for each component that installed when each step occurs for that

View File

@ -1,8 +1,8 @@
.. _adding_own_distro:
====
============================
Adding your own distribution
====
============================
This little ``HOWTO`` can be used by those who wish to
add-on to anvil to be able to support their own distribution

View File

@ -44,71 +44,6 @@ components themselves.
**Terminal recording**: `<http://showterm.io/2fee38794dcf536ccd437/>`_
Installing
----------
This is the stage that is responsible for ensuring the needed rpms are still
available and installing them onto your system (using all the created dependencies
and repositories from the previous stages). It also configures the components
configuration files (paste for example) and sets up the needed databases and MQ
components (rabbit or qpid).
::
$ sudo ./smithy -a install
**Terminal recording**: `<http://showterm.io/ed2611a6f9c086acfa8f8/>`_
Testing
-------
This acts as a single entrypoint to run the various components test suites, which
is typically a mixture of ``testr`` or ``nose``.
::
$ sudo ./smithy -a test
**Note:** to ignore component test failures pass a ``-i`` to ``smithy``.
Starting
--------
This stage now starts the services for the individual components. At this stage,
since each component was packaged as an rpm we also nicely included a set of init.d
scripts for each component in its rpm; this starting support uses those init.d scripts
to start those components up. It also goes about running the needed post-start actions,
including downloading+installing an image for you, setting up keystone
configuration and making your nova network.
::
$ sudo ./smithy -a start
**Terminal recording**: `<http://showterm.io/8ad5f96882e09a4d97ca3/>`_
Status
------
This stage uses the service control layer to show the status of all components.
::
$ sudo ./smithy -a status
**Terminal recording**: `<http://showterm.io/d5f692b8cf8f7e6e8325f/>`_
Stopping
--------
This stage uses the service control layer to stop all components.
::
$ sudo ./smithy -a stop
**Terminal recording**: `<http://showterm.io/a3a23838ebd476d93a6a1/>`_
Packaging
---------
@ -137,30 +72,3 @@ To see the packages built (after prepare has finished).
.. literalinclude:: examples/nova-spec.txt
:language: none
:linenos:
Uninstalling
------------
This removes the packages that were installed (+ it does some extra cleanup of
some components dirty laundry that is sometimes left behind), restoring your
environment back to its pre-installation state.
::
$ sudo ./smithy -a uninstall
**Terminal recording**: `<http://showterm.io/3e4d8892084e5f66ac18d/>`_
Purging
-------
This completly purges the anvil installation, uninstalling packages that were
installed, removing files and directories created (and any files there-in).
It is the single way to completely remove all traces of an anvil installation.
::
$ sudo ./smithy -a purge
**Terminal recording**: `<http://showterm.io/e4fb03115ad3a224cafd5/>`_

View File

@ -31,39 +31,6 @@ One of the tested distributions.
You can get CentOS 6.2+ (**64-bit** is preferred) from https://www.centos.org/
Networking
----------
**Important!**
--------------
Since networking can affect how your cloud runs please check out this link:
http://docs.openstack.org/admin-guide-cloud/content/section_networking-nova.html
Check out the root article and the sub-chapters there to understand more
of what these settings mean.
**This is typically one of the hardest aspects of OpenStack to configure
and get right!**
--------------
The following settings in ``conf/components/nova.yaml`` are an example of
settings that will affect the configuration of your compute nodes network.
::
flat_network_bridge: br100
flat_interface: eth0
public_interface: eth0
fixed_range: 10.0.0.0/24
fixed_network_size: 256
floating_range: 172.24.4.224/28
test_floating_pool: test
test_floating_range: 192.168.253.0/29
Installation
============
@ -160,14 +127,6 @@ or edit an origins configuration file from ``<conf/origins/>``.
You can use ``-o <conf/origins/origins_file.yaml>`` to specify this
different origins file.
Networking notes for those on RedHat/CentOS/Fedora
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you are planning on using the `FlatManager`_ then you might want to read
and follow:
* http://www.techotopia.com/index.php/Creating_an_RHEL_5_KVM_Networked_Bridge_Interface
Respository notes for those with RedHat subscriptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -221,176 +180,6 @@ you [#verbose]_. One repository will be the dependencies that the OpenStack
components need to run and th other will be the OpenStack components
themselves.
Installing
----------
Now install *OpenStacks* components by running the following:
::
sudo ./smithy -a install
You should see a set of distribution packages and/or pips being
installed and configuration files being written as ANVIL figures out how to
install your desired components from the prepared packages built in the last
step [#verbose]_.
**Note:** You can specify conf file just like in the ``prepare`` action.
Without a specified conf file the command will execute with ``conf/personas/in-a-box/basic.yaml``
**Note:** Also to avoid qemu errors please follow the
solution @ https://bugs.launchpad.net/anvil/+bug/985786
which will ensure that the ``qemu`` user can write to your instances
directory. If needed edit ``conf/components/nova.yaml`` and also adjust
the ``instances_path`` option.
Also as documented at http://docs.openstack.org/essex/openstack-compute/admin/content/qemu.html#fixes-rhel-qemu
please run the following (**after** installation).
::
$ setsebool -P virt_use_execmem on # optional
$ sudo ln -s /usr/libexec/qemu-kvm /usr/bin/qemu-system-x86_64
$ sudo service libvirtd restart
Testing
----------
Now (if you choose) you can run each *OpenStack* components unit tests by
running the following:
::
sudo ./smithy -a test
You should see a set of unit tests being ran (ideally with zero
failures) [#verbose]_.
Starting
--------
Now that you have installed *OpenStack* you can now start your
*OpenStack* components by running the following [#verbose]_.
::
sudo ./smithy -a start
Check horizon (if applicable)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Once that occurs you should be able to go to your hosts ip with a web
browser and view horizon which can be logged in with the user ``admin``
and the password you entered when prompted for.
If you see a login page and can access horizon then:
``Congratulations. You did it!``
Command line tools
~~~~~~~~~~~~~~~~~~
In your ANVIL directory:
::
source /etc/anvil/install.rc
This should set up the environment variables you need to run OpenStack
CLI tools:
::
nova <command> [options] [args]
nova-manage <command> [options] [args]
keystone <command> [options] [args]
glance <command> [options] [args]
....
If you desire to use eucalyptus tools (ie `euca2ools`_) which use the
EC2 apis run the following to get your EC2 certs:
::
./tools/euca.sh $OS_USERNAME $OS_TENANT_NAME
It broke?
~~~~~~~~~
First run the following to check the status of each component [#verbose]_.
::
sudo ./smithy -a status
If you do not see all green status then you should run the following and see
if any of the ``/var/log/nova,glance,keystone,cinder,...`` log files will give
you more information about what is occuring.
::
sudo ./smithy -a status --show
This will dump out those files (truncated to not be to verbose) so that anything
peculaliar can be seen. If nothing can be then go to the installation
directory (typically ``~/openstack``) and check the ``traces`` directory of
each component and check if anything looks fishy.
Stopping
--------
Once you have started *OpenStack* services you can stop them by running
the following:
::
sudo ./smithy -a stop
You should see a set of stop actions happening [#verbose]_. This
ensures the above a daemon that was started is now killed.
**Note:** A good way to check if it killed everything correctly is to run
the following.
::
sudo ps -elf | grep python
sudo ps -elf | grep apache
There should be no entries like ``nova``, ``glance``, ``apache``,
``httpd``. If there are then the stop may have not occurred correctly.
If this is the case run again with a ``-v`` or a ``-vv`` or check the
``/var/log/nova,glance,keystone,cinder,...`` files for any useful information
on what is happening.
Uninstalling
------------
Once you have installed and stopped *OpenStack* services you
can uninstall them by running the following:
::
sudo ./smithy -a uninstall
You should see a set of packages being removed [#verbose]_.
Purging
-------
Once you have uninstalled *OpenStack* services you
can purge the whole anvil installation by running the following:
::
sudo ./smithy -a purge
You should see a set of packages, configuration and directories, being
removed [#verbose]_. On completion the directory specified at
~/openstack should be empty.
Issues
======

View File

@ -58,33 +58,3 @@ An example of this, lets adjust ``nova`` to use the ``stable/essex`` branch.
If no branch nor tag parameters were specified then ``branch: master`` is used by default.
**Note:** tag overrides branch (so you can't really include both)
`OMG` the images take forever to download!
------------------------------------------
Sometimes the images that will be uploaded to glance take a long time to
download and extract and upload.
To adjust this edit ``conf/components/glance.yaml`` and change the following:
::
...
# List of images to download and install into glance.
image_urls:
- http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz
- http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz
To something like the following (shortening that list):
::
image_urls:
- http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz
This will remove the larger ubuntu image and just use the smaller `cirros`_ image (which should not take to long to upload).
Note that repeated downloads occur due to the fact that the files inside the image do not match the name of what is installed
into glance (this can be avoided by completely disabling the image uploading, see the persona file for the flag for this).
.. _cirros: https://launchpad.net/cirros

View File

@ -8,17 +8,11 @@ Anvil is a forging tool to help build OpenStack components and their
dependencies into a complete package-oriented system.
It automates the git checkouts of the OpenStack components, analyzes & builds
their dependencies and the components themselves into packages. It can then
install from the package repositories it created, perform configuration and
start, stop, restart and uninstall the components and their dependencies as a
complete system.
their dependencies and the components themselves into packages.
It allows a developer to setup an environment using the automatically created
packages (and dependencies, ex. ``RPMs``) with the help of anvil configuring
the components to work correctly for the developer's needs. After the developer
has tested out their features or changes they can stop the OpenStack
components, uninstall the packages and bring back their system to a
pre-installation/pre-anvil state.
the components to work correctly for the developer's needs.
The distinguishing part from devstack_ (besides being written in Python and not
shell), is that after building those packages (currently ``RPMs``) the same
@ -44,7 +38,7 @@ All the `yaml`_ configuration files could be found in:
* subdirectories of ``conf/personas/``
Installing
Packaging
----------
* Automatically downloading source from git and performing tag/branch checkouts.
@ -55,42 +49,6 @@ Installing
* Automatically configuring the needed files, symlinks, adjustments, and
any patches.
Testing
-------
Automatically running each component unit tests.
Starting
--------
Starting of the components sub-programs with the needed configuration via the
common `sysvinit`_ model.
Stopping
--------
Stopping of the previously started components.
Uninstalling
------------
Getting you back to an initial 'clean' state:
* Removing installed configuration.
* Undoing of installed files/directories.
* Removing of packages installed.
Packaging
---------
* Ceating a basic set of packages that matches the components selected.
* Supports automatic injection of dependencies.
Status
------
* Checking the status of the running components sub-programs.
Pythonic
--------
@ -107,12 +65,6 @@ Code decoupling
This encouraging re-use by others...
Resumption
----------
Install/start/stop resumption so that when you install you can ``ctrl+c`` and
resume later (where applicable).
Extensive logging
-----------------

View File

@ -1,7 +1,6 @@
cheetah>=2.4.4
iniparse
iso8601>=0.1.8
keyring>=1.6.1,<2.0
netifaces>=0.5
ordereddict
progressbar