Spring cleaning

- Remove dryrun mode that is not really useful
  to have as the side-effects that anvil creates
  are not really going to work in dryrun mode,
  and it doesn't seem like anyone uses this mode
  anyway.
- Remove root_mode() as this is not used anymore.
- Update wait_for_url to take optional functors as
  well as have it validate the max_attempts that
  is given (before using it).
- Move json helper function to utils.

Change-Id: Ieb0abb814b4e1839afc75b68b42b6f14f18d02fd
This commit is contained in:
Joshua Harlow 2014-03-22 23:54:10 -07:00
parent a9333843e0
commit 2e4964090b
14 changed files with 148 additions and 225 deletions

View File

@ -32,7 +32,6 @@ from anvil import distro
from anvil import exceptions as excp
from anvil import log as logging
from anvil import opts
from anvil.packaging import yum
from anvil import persona
from anvil import pprint
from anvil import settings
@ -89,10 +88,6 @@ def run(args):
# Here on out we should be using the logger (and not print)!!
# !!
# Stash the dryrun value (if any)
if 'dryrun' in args:
sh.set_dry_run(args['dryrun'])
# Ensure the anvil dirs are there if others are about to use it...
ensure_anvil_dirs(root_dir)
@ -106,7 +101,6 @@ def run(args):
except Exception as e:
raise excp.OptionException("Error loading persona file: %s due to %s" % (persona_fn, e))
yum.YumDependencyHandler.jobs = args["jobs"]
# Get the object we will be running with...
runner = runner_cls(distro=dist,
root_dir=root_dir,
@ -161,7 +155,7 @@ def store_current_settings(c_settings):
try:
# Remove certain keys that just shouldn't be saved
to_save = dict(c_settings)
for k in ['action', 'verbose', 'dryrun']:
for k in ['action', 'verbose']:
if k in c_settings:
to_save.pop(k, None)
with open("/etc/anvil/settings.yaml", 'w') as fh:
@ -193,7 +187,7 @@ def main():
# Configure logging levels
log_level = logging.INFO
if args['verbose'] or args['dryrun']:
if args['verbose']:
log_level = logging.DEBUG
logging.setupLogging(log_level)
LOG.debug("Log level is: %s" % (logging.getLevelName(log_level)))
@ -238,12 +232,4 @@ def main():
if __name__ == "__main__":
return_code = main()
# Switch back to root mode for anything
# that needs to run in that mode for cleanups and etc...
if return_code != 2:
try:
sh.root_mode(quiet=False)
except Exception:
pass
sys.exit(return_code)
sys.exit(main())

View File

@ -25,11 +25,6 @@ LOG = log.getLogger(__name__)
class BuildAction(action.Action):
needs_sudo = True
def __init__(self, name, distro, root_dir, cli_opts):
action.Action.__init__(self, name, distro, root_dir, cli_opts)
self.usr_only = cli_opts.get('usr_only')
self.jobs = cli_opts.get('jobs')
@property
def lookup_name(self):
return 'install'
@ -39,6 +34,5 @@ class BuildAction(action.Action):
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values(),
opts={"usr_only": self.usr_only,
"jobs": self.jobs})
self.cli_opts)
dependency_handler.build_binary()

View File

@ -56,6 +56,11 @@ class InstallAction(action.Action):
logger=LOG)
def _run(self, persona, component_order, instances):
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values(),
self.cli_opts)
removals = ['pre-uninstall', 'post-uninstall']
self._run_phase(
action.PhaseFunctors(
@ -68,12 +73,7 @@ class InstallAction(action.Action):
"pre-install",
*removals
)
removals += ["package-uninstall", 'uninstall', "package-destroy"]
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values())
general_package = "general"
self._run_phase(
action.PhaseFunctors(
@ -86,7 +86,6 @@ class InstallAction(action.Action):
"package-install",
*removals
)
removals += ['unconfigure']
self._run_phase(
action.PhaseFunctors(
@ -99,7 +98,6 @@ class InstallAction(action.Action):
"configure",
*removals
)
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Post-installing %s.', colorizer.quote(i.name)),

View File

@ -26,10 +26,6 @@ LOG = log.getLogger(__name__)
class PrepareAction(action.Action):
needs_sudo = False
def __init__(self, name, distro, root_dir, cli_opts):
action.Action.__init__(self, name, distro, root_dir, cli_opts)
self.jobs = cli_opts.get('jobs')
@property
def lookup_name(self):
return 'install'
@ -39,7 +35,7 @@ class PrepareAction(action.Action):
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values(),
opts={"jobs": self.jobs})
self.cli_opts)
removals = []
self._run_phase(
action.PhaseFunctors(
@ -63,17 +59,19 @@ class PrepareAction(action.Action):
"download-patch",
*removals
)
removals += ["package-destroy"]
dependency_handler.package_start()
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info("Packaging %s.", colorizer.quote(i.name)),
run=dependency_handler.package_instance,
end=None,
),
component_order,
instances,
"package",
*removals
)
dependency_handler.package_finish()
try:
removals += ["package-destroy"]
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info("Packaging %s.", colorizer.quote(i.name)),
run=dependency_handler.package_instance,
end=None,
),
component_order,
instances,
"package",
*removals
)
finally:
dependency_handler.package_finish()

View File

@ -26,12 +26,12 @@ LOG = log.getLogger(__name__)
class RemoveAction(uninstall.UninstallAction):
def _run(self, persona, component_order, instances):
super(RemoveAction, self)._run(persona, component_order, instances)
removals = ['package-install', 'install', 'package']
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values())
instances.values(),
self.cli_opts)
removals = ['package-install', 'install', 'package']
general_package = "general"
self._run_phase(
action.PhaseFunctors(
@ -44,7 +44,6 @@ class RemoveAction(uninstall.UninstallAction):
"package-destroy",
*removals
)
removals += ['prepare', 'download', "download-patch"]
self._run_phase(
action.PhaseFunctors(
@ -57,7 +56,6 @@ class RemoveAction(uninstall.UninstallAction):
'uninstall',
*removals
)
removals += ['pre-install', 'post-install']
self._run_phase(
action.PhaseFunctors(

View File

@ -31,7 +31,8 @@ class TestAction(action.Action):
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values())
instances.values(),
self.cli_opts)
general_package = "general"
self._run_phase(
action.PhaseFunctors(

View File

@ -33,6 +33,11 @@ class UninstallAction(action.Action):
return components
def _run(self, persona, component_order, instances):
dependency_handler_class = self.distro.dependency_handler_class
dependency_handler = dependency_handler_class(self.distro,
self.root_dir,
instances.values(),
self.cli_opts)
removals = ['configure']
self._run_phase(
action.PhaseFunctors(
@ -45,7 +50,6 @@ class UninstallAction(action.Action):
'unconfigure',
*removals
)
removals += ['post-install']
self._run_phase(
action.PhaseFunctors(
@ -58,11 +62,8 @@ class UninstallAction(action.Action):
'pre-uninstall',
*removals
)
removals += ['package-install', 'package-install-all-deps']
general_package = "general"
dependency_handler = self.distro.dependency_handler_class(
self.distro, self.root_dir, instances.values())
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info("Uninstalling packages"),

View File

@ -121,12 +121,6 @@ def parse(previous_settings=None):
dest="verbose",
default=False,
help="make the output logging verbose")
parser.add_option("--dryrun",
action="store_true",
dest="dryrun",
default=False,
help=("perform ACTION but do not actually run any of the commands"
" that would normally complete ACTION"))
parser.add_option('-k', "--keyring",
action="store",
dest="keyring_path",
@ -227,7 +221,6 @@ def parse(previous_settings=None):
(options, _args) = parser.parse_args()
values = {}
values['dir'] = (options.dir or "")
values['dryrun'] = (options.dryrun or False)
values['action'] = (options.action or "")
values['jobs'] = options.jobs
values['persona_fn'] = options.persona_fn

View File

@ -53,7 +53,7 @@ class DependencyHandler(object):
"""Basic class for handler of OpenStack dependencies."""
MAX_PIP_DOWNLOAD_ATTEMPTS = 4
def __init__(self, distro, root_dir, instances, opts=None):
def __init__(self, distro, root_dir, instances, opts):
self.distro = distro
self.root_dir = root_dir
self.instances = instances

View File

@ -14,28 +14,16 @@
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
from anvil import exceptions as excp
from anvil import log as logging
from anvil import shell as sh
from anvil import utils
LOG = logging.getLogger(__name__)
def _parse_json(value):
"""Load JSON from string
If string is whitespace-only, returns None
"""
value = value.strip()
if value:
return json.loads(value)
else:
return None
class Helper(object):
def __init__(self, log_dir, repos):
@ -54,7 +42,7 @@ class Helper(object):
'--log-file', self._log_file]
cmdline.extend(arglist)
(stdout, _) = sh.execute(cmdline, stderr_fh=sys.stderr)
return _parse_json(stdout)
return utils.parse_json(stdout)
def _traced_yyoom(self, arglist, tracewriter):
try:
@ -62,7 +50,7 @@ class Helper(object):
except excp.ProcessExecutionError:
with excp.reraise() as ex:
try:
data = _parse_json(ex.stdout)
data = utils.parse_json(ex.stdout)
except Exception:
LOG.exception("Failed to parse YYOOM output")
else:

View File

@ -69,7 +69,7 @@ class YumDependencyHandler(base.DependencyHandler):
REPOS = ["anvil-deps", "anvil"]
JOBS = 2
def __init__(self, distro, root_dir, instances, opts=None):
def __init__(self, distro, root_dir, instances, opts):
super(YumDependencyHandler, self).__init__(distro, root_dir, instances, opts)
# Various paths we will use while operating
self.rpmbuild_dir = sh.joinpths(self.deps_dir, "rpmbuild")
@ -83,14 +83,10 @@ class YumDependencyHandler(base.DependencyHandler):
# We inspect yum for packages, this helper allows us to do this.
self.helper = yum_helper.Helper(self.log_dir, self.REPOS)
# See if we are requested to run at a higher make parallelism level
self._jobs = self.JOBS
if 'jobs' in self.opts:
try:
self._jobs = int(self.opts.get('jobs', self.JOBS))
if self._jobs <= 0:
self._jobs = self.JOBS
except (TypeError, ValueError):
pass
try:
self.jobs = max(self.JOBS, int(self.opts.get('jobs')))
except (TypeError, ValueError):
self.jobs = self.JOBS
@property
def py2rpm_helper(self):
@ -186,7 +182,7 @@ class YumDependencyHandler(base.DependencyHandler):
utils.log_iterable(src_repo_files,
header=('Building %s RPM packages from their'
' SRPMs for repo %s using %s jobs') %
(len(src_repo_files), self.SRC_REPOS[repo_name], self._jobs),
(len(src_repo_files), self.SRC_REPOS[repo_name], self.jobs),
logger=LOG)
rpmbuild_flags = "--rebuild"
if self.opts.get("usr_only", False):
@ -195,7 +191,7 @@ class YumDependencyHandler(base.DependencyHandler):
self._create_rpmbuild_subdirs()
self.py2rpm_helper.build_all_binaries(repo_name, src_repo_dir,
rpmbuild_flags, self.tracewriter,
self._jobs)
self.jobs)
repo_dir = sh.joinpths(self.anvil_repo_dir, repo_name)
for d in sh.listdir(self.rpmbuild_dir, dirs_only=True):
self._move_rpm_files(sh.joinpths(d, "RPMS"), repo_dir)
@ -346,7 +342,7 @@ class YumDependencyHandler(base.DependencyHandler):
# Now build them into SRPM rpm files.
self.py2rpm_helper.build_all_srpms(package_files=package_files,
tracewriter=self.tracewriter,
jobs=self._jobs)
jobs=self.jobs)
def _write_spec_file(self, instance, rpm_name, template_name, params):
requires_what = params.get('requires', [])

View File

@ -51,9 +51,6 @@ SUDO_GID = env.get_key('SUDO_GID')
# piped to said file.
_TRUNCATED_OUTPUT_LINES = 7
# Set only once
IS_DRYRUN = None
# Take over some functions directly from os.path/os/... so that we don't have
# to type as many long function names to access these.
getsize = os.path.getsize
@ -74,19 +71,6 @@ class Process(psutil.Process):
return "%s (%s)" % (self.pid, self.name)
def set_dry_run(on_off):
global IS_DRYRUN
if not isinstance(on_off, (bool)):
raise TypeError("Dry run value must be a boolean")
if IS_DRYRUN is not None:
raise RuntimeError("Dry run value has already been previously set to '%s'" % (IS_DRYRUN))
IS_DRYRUN = on_off
def is_dry_run():
return bool(IS_DRYRUN)
# Originally borrowed from nova compute execute.
def execute(cmd,
process_input=None,
@ -151,20 +135,17 @@ def execute(cmd,
'env': process_env,
}
result = ("", "")
if is_dry_run():
rc = 0
try:
obj = subprocess.Popen(cmd, **exec_kwargs)
result = obj.communicate(process_input)
except OSError as e:
raise excp.ProcessExecutionError(
str_cmd,
exec_kwargs=exec_kwargs,
description="%s: [%s, %s]" % (e, e.errno, e.strerror)
)
else:
try:
obj = subprocess.Popen(cmd, **exec_kwargs)
result = obj.communicate(process_input)
except OSError as e:
raise excp.ProcessExecutionError(
str_cmd,
exec_kwargs=exec_kwargs,
description="%s: [%s, %s]" % (e, e.errno, e.strerror)
)
else:
rc = obj.returncode
rc = obj.returncode
# Handle process exit code.
stdout = result[0] or ""
@ -319,8 +300,7 @@ def chown(path, uid, gid):
if uid == -1 and gid == -1:
return 0
LOG.debug("Changing ownership of %r to %s:%s" % (path, uid, gid))
if not is_dry_run():
os.chown(path, uid, gid)
os.chown(path, uid, gid)
return 1
@ -384,7 +364,7 @@ def _attempt_kill(proc, signal_type, max_try, wait_time):
def kill(pid, max_try=4, wait_time=1):
if not is_running(pid) or is_dry_run():
if not is_running(pid):
return (True, 0)
proc = Process(pid)
# Try the nicer sig-int first.
@ -399,8 +379,6 @@ def kill(pid, max_try=4, wait_time=1):
def is_running(pid):
if is_dry_run():
return True
try:
return Process(pid).is_running()
except psutil.error.NoSuchProcess:
@ -423,11 +401,10 @@ def append_file(fn, text, flush=True, quiet=False):
if not quiet:
LOG.debug("Appending to file %r (%d bytes) (flush=%s)", fn, len(text), (flush))
LOG.debug(">> %s" % (text))
if not is_dry_run():
with open(fn, "a") as f:
f.write(text)
if flush:
f.flush()
with open(fn, "a") as f:
f.write(text)
if flush:
f.flush()
return fn
@ -435,14 +412,13 @@ def write_file(fn, text, flush=True, quiet=False, tracewriter=None):
if not quiet:
LOG.debug("Writing to file %r (%d bytes) (flush=%s)", fn, len(text), (flush))
LOG.debug("> %s" % (text))
if not is_dry_run():
mkdirslist(dirname(fn), tracewriter=tracewriter)
with open(fn, "w") as fh:
if isinstance(text, unicode):
text = text.encode("utf-8")
fh.write(text)
if flush:
fh.flush()
mkdirslist(dirname(fn), tracewriter=tracewriter)
with open(fn, "w") as fh:
if isinstance(text, unicode):
text = text.encode("utf-8")
fh.write(text)
if flush:
fh.flush()
if tracewriter:
tracewriter.file_touched(fn)
@ -451,12 +427,11 @@ def touch_file(fn, die_if_there=True, quiet=False, file_size=0, tracewriter=None
if not isfile(fn):
if not quiet:
LOG.debug("Touching and truncating file %r (truncate size=%s)", fn, file_size)
if not is_dry_run():
mkdirslist(dirname(fn), tracewriter=tracewriter)
with open(fn, "w") as fh:
fh.truncate(file_size)
if tracewriter:
tracewriter.file_touched(fn)
mkdirslist(dirname(fn), tracewriter=tracewriter)
with open(fn, "w") as fh:
fh.truncate(file_size)
if tracewriter:
tracewriter.file_touched(fn)
else:
if die_if_there:
msg = "Can not touch & truncate file %r since it already exists" % (fn)
@ -472,20 +447,17 @@ def mkdir(path, recurse=True):
if not isdir(path):
if recurse:
LOG.debug("Recursively creating directory %r" % (path))
if not is_dry_run():
os.makedirs(path)
os.makedirs(path)
else:
LOG.debug("Creating directory %r" % (path))
if not is_dry_run():
os.mkdir(path)
os.mkdir(path)
return path
def deldir(path):
if isdir(path):
LOG.debug("Recursively deleting directory tree starting at %r" % (path))
if not is_dry_run():
shutil.rmtree(path)
shutil.rmtree(path)
def rmdir(path, quiet=True):
@ -493,8 +465,7 @@ def rmdir(path, quiet=True):
return
try:
LOG.debug("Deleting directory %r with the cavet that we will fail if it's not empty." % (path))
if not is_dry_run():
os.rmdir(path)
os.rmdir(path)
LOG.debug("Deleted directory %r" % (path))
except OSError:
if not quiet:
@ -506,12 +477,11 @@ def rmdir(path, quiet=True):
def symlink(source, link, force=True, tracewriter=None):
LOG.debug("Creating symlink from %r => %r" % (link, source))
mkdirslist(dirname(link), tracewriter=tracewriter)
if not is_dry_run():
if force and (exists(link) and islink(link)):
unlink(link, True)
os.symlink(source, link)
if tracewriter:
tracewriter.symlink_made(link)
if force and (exists(link) and islink(link)):
unlink(link, True)
os.symlink(source, link)
if tracewriter:
tracewriter.symlink_made(link)
def user_exists(username):
@ -561,20 +531,18 @@ def getgroupname():
def unlink(path, ignore_errors=True):
LOG.debug("Unlinking (removing) %r" % (path))
if not is_dry_run():
try:
os.unlink(path)
except OSError:
if not ignore_errors:
raise
else:
pass
try:
os.unlink(path)
except OSError:
if not ignore_errors:
raise
else:
pass
def copy(src, dst, tracewriter=None):
LOG.debug("Copying: %r => %r" % (src, dst))
if not is_dry_run():
shutil.copy(src, dst)
shutil.copy(src, dst)
if tracewriter:
tracewriter.file_touched(dst)
return dst
@ -582,13 +550,12 @@ def copy(src, dst, tracewriter=None):
def move(src, dst, force=False):
LOG.debug("Moving: %r => %r" % (src, dst))
if not is_dry_run():
if force:
if isdir(dst):
dst = joinpths(dst, basename(src))
if isfile(dst):
unlink(dst)
shutil.move(src, dst)
if force:
if isdir(dst):
dst = joinpths(dst, basename(src))
if isfile(dst):
unlink(dst)
shutil.move(src, dst)
return dst
@ -611,8 +578,7 @@ def write_file_and_backup(path, contents, bk_ext='org'):
def chmod(fname, mode):
LOG.debug("Applying chmod: %r to %o" % (fname, mode))
if not is_dry_run():
os.chmod(fname, mode)
os.chmod(fname, mode)
return fname
@ -625,27 +591,10 @@ def got_root():
return True
def root_mode(quiet=True):
root_uid = 0
root_gid = 0
try:
os.setreuid(0, root_uid)
os.setregid(0, root_gid)
except OSError as e:
msg = "Cannot escalate permissions to (uid=%s, gid=%s): %s" % (root_uid, root_gid, e)
if quiet:
LOG.warn(msg)
else:
raise excp.PermException(msg)
def sleep(winks):
if winks <= 0:
return
if is_dry_run():
LOG.debug("Not really sleeping for: %s seconds" % (winks))
else:
time.sleep(winks)
time.sleep(winks)
def which_first(bin_names, additional_dirs=None, ensure_executable=True):

View File

@ -36,12 +36,6 @@ class TestShell(test.MockTestCase):
self.popen_inst_mock.returncode = 0
self.popen_inst_mock.communicate.return_value = self.result
def test_execute_dry_run(self):
sh.IS_DRYRUN = True
self.assertEqual(sh.execute(self.cmd), ('', ''))
self.assertEqual(self.master_mock.mock_calls, [])
sh.IS_DRYRUN = False
def test_execute_default_params(self):
result = sh.execute(self.cmd)
master_mock_calls = [

View File

@ -19,10 +19,12 @@
import contextlib
import glob
import json
import os
import random
import re
import socket
import sys
import tempfile
import urllib2
@ -38,6 +40,7 @@ from urlparse import urlunparse
import netifaces
import progressbar
import six
import yaml
from Cheetah.Template import Template
@ -138,6 +141,18 @@ def expand_template_deep(root, params):
return root
def parse_json(text):
"""Load JSON from string
If string is whitespace-only, returns None
"""
text = text.strip()
if len(text):
return json.loads(text)
else:
return None
def load_yaml(path):
return load_yaml_text(sh.load_file(path))
@ -155,40 +170,52 @@ def has_any(text, *look_for):
return False
def wait_for_url(url, max_attempts=5):
LOG.info("Waiting for url %s to become active (max_attempts=%s)",
colorizer.quote(url), max_attempts)
def wait_for_url(url, max_attempts=5,
on_start=None, on_wait=None, on_success=None):
if max_attempts <= 0:
raise ValueError("Wait maximum attempts must be > 0")
def waiter(sleep_secs):
def log_start():
LOG.info("Waiting for url %s to become active (max_attempts=%s)",
colorizer.quote(url), max_attempts)
def log_wait(sleep_secs):
LOG.info("Sleeping for %s seconds, %s is still not active.", sleep_secs, colorizer.quote(url))
sh.sleep(sleep_secs)
return sleep_secs
def success(attempts):
def log_success(attempts):
LOG.info("Url %s became active after %s attempts!", colorizer.quote(url), attempts)
excps = []
attempts = 0
for sleep_time in ExponentialBackoff(attempts=max_attempts):
attempts += 1
if not on_wait:
on_wait = log_wait
if not on_success:
on_success = log_success
if not on_start:
on_start = log_start
failures = []
for i, sleep_time in enumerate(ExponentialBackoff(attempts=max_attempts)):
if i == 0:
on_start()
try:
with contextlib.closing(urllib2.urlopen(urllib2.Request(url))) as req:
req.read()
success(attempts)
return
on_success(i + 1)
return url
except urllib2.HTTPError as e:
failures.append(sys.exc_info())
if e.code in range(200, 600):
# Should be ok, at least its responding...
# although potentially incorrectly...
success(attempts)
return
on_success(i + 1)
return url
else:
excps.append(e)
waiter(sleep_time)
except IOError as e:
excps.append(e)
waiter(sleep_time)
if excps:
raise excps[-1]
sh.sleep(on_wait(sleep_time))
except IOError:
failures.append(sys.exc_info())
sh.sleep(on_wait(sleep_time))
exc_type, exc, exc_tb = failures[-1]
six.reraise(exc_type, exc, exc_tb)
def add_header(fn, contents, adjusted=True):