Helpful cleanups.

1. Remove the usage of the path.join function
   now that all code should be going through
   the util file methods (and they can be
   mocked out as needed).
2. Adjust all occurences of the above join
   function to either not use it or replace
   it with the standard os.path.join (which
   can also be mocked out as needed)
3. Fix pylint from complaining about the
   tests folder 'helpers.py' not being found
4. Add a pylintrc file that is used instead
   of the options hidden in the 'run_pylint' 
   tool.
This commit is contained in:
harlowja 2012-10-27 19:25:48 -07:00
parent 47c95ed210
commit 4d1d7a9bb5
35 changed files with 170 additions and 251 deletions

View File

@ -1,20 +1,20 @@
CWD=$(shell pwd) CWD=$(shell pwd)
PY_FILES=$(shell find cloudinit bin tests tools -name "*.py") PY_FILES=$(shell find cloudinit bin tests tools -type f -name "*.py")
PY_FILES+="bin/cloud-init" PY_FILES+="bin/cloud-init"
all: test all: test
pep8: pep8:
$(CWD)/tools/run-pep8 $(PY_FILES) @$(CWD)/tools/run-pep8 $(PY_FILES)
pylint: pylint:
$(CWD)/tools/run-pylint $(PY_FILES) @$(CWD)/tools/run-pylint $(PY_FILES)
pyflakes: pyflakes:
pyflakes $(PY_FILES) pyflakes $(PY_FILES)
test: test:
nosetests $(noseopts) tests/unittests/ @nosetests $(noseopts) tests/
2to3: 2to3:
2to3 $(PY_FILES) 2to3 $(PY_FILES)

View File

@ -34,26 +34,24 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
# on TCP connections - otherwise data corruption will occur. # on TCP connections - otherwise data corruption will occur.
def handle(_name, cfg, cloud, log, _args): def handle(_name, cfg, _cloud, log, _args):
apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
apt_pipe_value_s = str(apt_pipe_value).lower().strip() apt_pipe_value_s = str(apt_pipe_value).lower().strip()
if apt_pipe_value_s == "false": if apt_pipe_value_s == "false":
write_apt_snippet(cloud, "0", log, DEFAULT_FILE) write_apt_snippet("0", log, DEFAULT_FILE)
elif apt_pipe_value_s in ("none", "unchanged", "os"): elif apt_pipe_value_s in ("none", "unchanged", "os"):
return return
elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]: elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]:
write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE) write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
else: else:
log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
def write_apt_snippet(cloud, setting, log, f_name): def write_apt_snippet(setting, log, f_name):
"""Writes f_name with apt pipeline depth 'setting'.""" """Writes f_name with apt pipeline depth 'setting'."""
file_contents = APT_PIPE_TPL % (setting) file_contents = APT_PIPE_TPL % (setting)
util.write_file(f_name, file_contents)
util.write_file(cloud.paths.join(False, f_name), file_contents)
log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)

View File

@ -78,8 +78,7 @@ def handle(name, cfg, cloud, log, _args):
try: try:
# See man 'apt.conf' # See man 'apt.conf'
contents = PROXY_TPL % (proxy) contents = PROXY_TPL % (proxy)
util.write_file(cloud.paths.join(False, proxy_filename), util.write_file(proxy_filename, contents)
contents)
except Exception as e: except Exception as e:
util.logexc(log, "Failed to write proxy to %s", proxy_filename) util.logexc(log, "Failed to write proxy to %s", proxy_filename)
elif os.path.isfile(proxy_filename): elif os.path.isfile(proxy_filename):
@ -90,7 +89,7 @@ def handle(name, cfg, cloud, log, _args):
params = mirrors params = mirrors
params['RELEASE'] = release params['RELEASE'] = release
params['MIRROR'] = mirror params['MIRROR'] = mirror
errors = add_sources(cloud, cfg['apt_sources'], params) errors = add_sources(cfg['apt_sources'], params)
for e in errors: for e in errors:
log.warn("Source Error: %s", ':'.join(e)) log.warn("Source Error: %s", ':'.join(e))
@ -196,11 +195,10 @@ def generate_sources_list(codename, mirrors, cloud, log):
params = {'codename': codename} params = {'codename': codename}
for k in mirrors: for k in mirrors:
params[k] = mirrors[k] params[k] = mirrors[k]
out_fn = cloud.paths.join(False, '/etc/apt/sources.list') templater.render_to_file(template_fn, '/etc/apt/sources.list', params)
templater.render_to_file(template_fn, out_fn, params)
def add_sources(cloud, srclist, template_params=None): def add_sources(srclist, template_params=None):
""" """
add entries in /etc/apt/sources.list.d for each abbreviated add entries in /etc/apt/sources.list.d for each abbreviated
sources.list entry in 'srclist'. When rendering template, also sources.list entry in 'srclist'. When rendering template, also
@ -250,8 +248,7 @@ def add_sources(cloud, srclist, template_params=None):
try: try:
contents = "%s\n" % (source) contents = "%s\n" % (source)
util.write_file(cloud.paths.join(False, ent['filename']), util.write_file(ent['filename'], contents, omode="ab")
contents, omode="ab")
except: except:
errorlist.append([source, errorlist.append([source,
"failed write to file %s" % ent['filename']]) "failed write to file %s" % ent['filename']])

View File

@ -22,6 +22,7 @@ CA_CERT_PATH = "/usr/share/ca-certificates/"
CA_CERT_FILENAME = "cloud-init-ca-certs.crt" CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
CA_CERT_CONFIG = "/etc/ca-certificates.conf" CA_CERT_CONFIG = "/etc/ca-certificates.conf"
CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
distros = ['ubuntu', 'debian'] distros = ['ubuntu', 'debian']
@ -33,7 +34,7 @@ def update_ca_certs():
util.subp(["update-ca-certificates"], capture=False) util.subp(["update-ca-certificates"], capture=False)
def add_ca_certs(paths, certs): def add_ca_certs(certs):
""" """
Adds certificates to the system. To actually apply the new certificates Adds certificates to the system. To actually apply the new certificates
you must also call L{update_ca_certs}. you must also call L{update_ca_certs}.
@ -43,27 +44,24 @@ def add_ca_certs(paths, certs):
if certs: if certs:
# First ensure they are strings... # First ensure they are strings...
cert_file_contents = "\n".join([str(c) for c in certs]) cert_file_contents = "\n".join([str(c) for c in certs])
cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644)
cert_file_fullpath = paths.join(False, cert_file_fullpath)
util.write_file(cert_file_fullpath, cert_file_contents, mode=0644)
# Append cert filename to CA_CERT_CONFIG file. # Append cert filename to CA_CERT_CONFIG file.
util.write_file(paths.join(False, CA_CERT_CONFIG), util.write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="ab")
"\n%s" % CA_CERT_FILENAME, omode="ab")
def remove_default_ca_certs(paths): def remove_default_ca_certs():
""" """
Removes all default trusted CA certificates from the system. To actually Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}. apply the change you must also call L{update_ca_certs}.
""" """
util.delete_dir_contents(paths.join(False, CA_CERT_PATH)) util.delete_dir_contents(CA_CERT_PATH)
util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH)) util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644) util.write_file(CA_CERT_CONFIG, "", mode=0644)
debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
util.subp(('debconf-set-selections', '-'), debconf_sel) util.subp(('debconf-set-selections', '-'), debconf_sel)
def handle(name, cfg, cloud, log, _args): def handle(name, cfg, _cloud, log, _args):
""" """
Call to handle ca-cert sections in cloud-config file. Call to handle ca-cert sections in cloud-config file.
@ -85,14 +83,14 @@ def handle(name, cfg, cloud, log, _args):
# default trusted CA certs first. # default trusted CA certs first.
if ca_cert_cfg.get("remove-defaults", False): if ca_cert_cfg.get("remove-defaults", False):
log.debug("Removing default certificates") log.debug("Removing default certificates")
remove_default_ca_certs(cloud.paths) remove_default_ca_certs()
# If we are given any new trusted CA certs to add, add them. # If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg: if "trusted" in ca_cert_cfg:
trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
if trusted_certs: if trusted_certs:
log.debug("Adding %d certificates" % len(trusted_certs)) log.debug("Adding %d certificates" % len(trusted_certs))
add_ca_certs(cloud.paths, trusted_certs) add_ca_certs(trusted_certs)
# Update the system with the new cert configuration. # Update the system with the new cert configuration.
log.debug("Updating certificates") log.debug("Updating certificates")

View File

@ -26,6 +26,15 @@ from cloudinit import util
RUBY_VERSION_DEFAULT = "1.8" RUBY_VERSION_DEFAULT = "1.8"
CHEF_DIRS = [
'/etc/chef',
'/var/log/chef',
'/var/lib/chef',
'/var/cache/chef',
'/var/backups/chef',
'/var/run/chef',
]
def handle(name, cfg, cloud, log, _args): def handle(name, cfg, cloud, log, _args):
@ -37,24 +46,15 @@ def handle(name, cfg, cloud, log, _args):
chef_cfg = cfg['chef'] chef_cfg = cfg['chef']
# Ensure the chef directories we use exist # Ensure the chef directories we use exist
c_dirs = [ for d in CHEF_DIRS:
'/etc/chef', util.ensure_dir(d)
'/var/log/chef',
'/var/lib/chef',
'/var/cache/chef',
'/var/backups/chef',
'/var/run/chef',
]
for d in c_dirs:
util.ensure_dir(cloud.paths.join(False, d))
# Set the validation key based on the presence of either 'validation_key' # Set the validation key based on the presence of either 'validation_key'
# or 'validation_cert'. In the case where both exist, 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key'
# takes precedence # takes precedence
for key in ('validation_key', 'validation_cert'): for key in ('validation_key', 'validation_cert'):
if key in chef_cfg and chef_cfg[key]: if key in chef_cfg and chef_cfg[key]:
v_fn = cloud.paths.join(False, '/etc/chef/validation.pem') util.write_file('/etc/chef/validation.pem', chef_cfg[key])
util.write_file(v_fn, chef_cfg[key])
break break
# Create the chef config from template # Create the chef config from template
@ -68,8 +68,7 @@ def handle(name, cfg, cloud, log, _args):
'_default'), '_default'),
'validation_name': chef_cfg['validation_name'] 'validation_name': chef_cfg['validation_name']
} }
out_fn = cloud.paths.join(False, '/etc/chef/client.rb') templater.render_to_file(template_fn, '/etc/chef/client.rb', params)
templater.render_to_file(template_fn, out_fn, params)
else: else:
log.warn("No template found, not rendering to /etc/chef/client.rb") log.warn("No template found, not rendering to /etc/chef/client.rb")
@ -81,8 +80,7 @@ def handle(name, cfg, cloud, log, _args):
initial_attributes = chef_cfg['initial_attributes'] initial_attributes = chef_cfg['initial_attributes']
for k in list(initial_attributes.keys()): for k in list(initial_attributes.keys()):
initial_json[k] = initial_attributes[k] initial_json[k] = initial_attributes[k]
firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json') util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json))
util.write_file(firstboot_fn, json.dumps(initial_json))
# If chef is not installed, we install chef based on 'install_type' # If chef is not installed, we install chef based on 'install_type'
if not os.path.isfile('/usr/bin/chef-client'): if not os.path.isfile('/usr/bin/chef-client'):

View File

@ -66,22 +66,16 @@ def handle(_name, cfg, cloud, log, _args):
merge_data = [ merge_data = [
LSC_BUILTIN_CFG, LSC_BUILTIN_CFG,
cloud.paths.join(True, LSC_CLIENT_CFG_FILE), LSC_CLIENT_CFG_FILE,
ls_cloudcfg, ls_cloudcfg,
] ]
merged = merge_together(merge_data) merged = merge_together(merge_data)
lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE)
lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn))
if not os.path.isdir(lsc_dir):
util.ensure_dir(lsc_dir)
contents = StringIO() contents = StringIO()
merged.write(contents) merged.write(contents)
contents.flush()
util.write_file(lsc_client_fn, contents.getvalue()) util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
log.debug("Wrote landscape config file to %s", lsc_client_fn) util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue())
log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
util.write_file(LS_DEFAULT_FILE, "RUN=1\n") util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
util.subp(["service", "landscape-client", "restart"]) util.subp(["service", "landscape-client", "restart"])

View File

@ -29,6 +29,7 @@ from cloudinit import util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
SERVER_CFG = '/etc/mcollective/server.cfg'
def handle(name, cfg, cloud, log, _args): def handle(name, cfg, cloud, log, _args):
@ -48,26 +49,23 @@ def handle(name, cfg, cloud, log, _args):
if 'conf' in mcollective_cfg: if 'conf' in mcollective_cfg:
# Read server.cfg values from the # Read server.cfg values from the
# original file in order to be able to mix the rest up # original file in order to be able to mix the rest up
server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg') mcollective_config = ConfigObj(SERVER_CFG)
mcollective_config = ConfigObj(server_cfg_fn)
# See: http://tiny.cc/jh9agw # See: http://tiny.cc/jh9agw
for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems(): for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems():
if cfg_name == 'public-cert': if cfg_name == 'public-cert':
pubcert_fn = cloud.paths.join(True, PUBCERT_FILE) util.write_file(PUBCERT_FILE, cfg, mode=0644)
util.write_file(pubcert_fn, cfg, mode=0644) mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE
mcollective_config['plugin.ssl_server_public'] = pubcert_fn
mcollective_config['securityprovider'] = 'ssl' mcollective_config['securityprovider'] = 'ssl'
elif cfg_name == 'private-cert': elif cfg_name == 'private-cert':
pricert_fn = cloud.paths.join(True, PRICERT_FILE) util.write_file(PRICERT_FILE, cfg, mode=0600)
util.write_file(pricert_fn, cfg, mode=0600) mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE
mcollective_config['plugin.ssl_server_private'] = pricert_fn
mcollective_config['securityprovider'] = 'ssl' mcollective_config['securityprovider'] = 'ssl'
else: else:
if isinstance(cfg, (basestring, str)): if isinstance(cfg, (basestring, str)):
# Just set it in the 'main' section # Just set it in the 'main' section
mcollective_config[cfg_name] = cfg mcollective_config[cfg_name] = cfg
elif isinstance(cfg, (dict)): elif isinstance(cfg, (dict)):
# Iterate throug the config items, create a section # Iterate through the config items, create a section
# if it is needed and then add/or create items as needed # if it is needed and then add/or create items as needed
if cfg_name not in mcollective_config.sections: if cfg_name not in mcollective_config.sections:
mcollective_config[cfg_name] = {} mcollective_config[cfg_name] = {}
@ -78,14 +76,12 @@ def handle(name, cfg, cloud, log, _args):
mcollective_config[cfg_name] = str(cfg) mcollective_config[cfg_name] = str(cfg)
# We got all our config as wanted we'll rename # We got all our config as wanted we'll rename
# the previous server.cfg and create our new one # the previous server.cfg and create our new one
old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old') util.rename(SERVER_CFG, "%s.old" % (SERVER_CFG))
util.rename(server_cfg_fn, old_fn)
# Now we got the whole file, write to disk... # Now we got the whole file, write to disk...
contents = StringIO() contents = StringIO()
mcollective_config.write(contents) mcollective_config.write(contents)
contents = contents.getvalue() contents = contents.getvalue()
server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg') util.write_file(SERVER_CFG, contents, mode=0644)
util.write_file(server_cfg_rw, contents, mode=0644)
# Start mcollective # Start mcollective
util.subp(['service', 'mcollective', 'start'], capture=False) util.subp(['service', 'mcollective', 'start'], capture=False)

View File

@ -28,6 +28,7 @@ from cloudinit import util
SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$" SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$"
SHORTNAME = re.compile(SHORTNAME_FILTER) SHORTNAME = re.compile(SHORTNAME_FILTER)
WS = re.compile("[%s]+" % (whitespace)) WS = re.compile("[%s]+" % (whitespace))
FSTAB_PATH = "/etc/fstab"
def is_mdname(name): def is_mdname(name):
@ -167,8 +168,7 @@ def handle(_name, cfg, cloud, log, _args):
cc_lines.append('\t'.join(line)) cc_lines.append('\t'.join(line))
fstab_lines = [] fstab_lines = []
fstab = util.load_file(cloud.paths.join(True, "/etc/fstab")) for line in util.load_file(FSTAB_PATH).splitlines():
for line in fstab.splitlines():
try: try:
toks = WS.split(line) toks = WS.split(line)
if toks[3].find(comment) != -1: if toks[3].find(comment) != -1:
@ -179,7 +179,7 @@ def handle(_name, cfg, cloud, log, _args):
fstab_lines.extend(cc_lines) fstab_lines.extend(cc_lines)
contents = "%s\n" % ('\n'.join(fstab_lines)) contents = "%s\n" % ('\n'.join(fstab_lines))
util.write_file(cloud.paths.join(False, "/etc/fstab"), contents) util.write_file(FSTAB_PATH, contents)
if needswap: if needswap:
try: try:
@ -188,9 +188,8 @@ def handle(_name, cfg, cloud, log, _args):
util.logexc(log, "Activating swap via 'swapon -a' failed") util.logexc(log, "Activating swap via 'swapon -a' failed")
for d in dirs: for d in dirs:
real_dir = cloud.paths.join(False, d)
try: try:
util.ensure_dir(real_dir) util.ensure_dir(d)
except: except:
util.logexc(log, "Failed to make '%s' config-mount", d) util.logexc(log, "Failed to make '%s' config-mount", d)

View File

@ -84,10 +84,10 @@ def handle(name, cfg, cloud, log, args):
for (n, path) in pubkeys.iteritems(): for (n, path) in pubkeys.iteritems():
try: try:
all_keys[n] = util.load_file(cloud.paths.join(True, path)) all_keys[n] = util.load_file(path)
except: except:
util.logexc(log, ("%s: failed to open, can not" util.logexc(log, ("%s: failed to open, can not"
" phone home that data"), path) " phone home that data!"), path)
submit_keys = {} submit_keys = {}
for k in post_list: for k in post_list:

View File

@ -21,12 +21,32 @@
from StringIO import StringIO from StringIO import StringIO
import os import os
import pwd
import socket import socket
from cloudinit import helpers from cloudinit import helpers
from cloudinit import util from cloudinit import util
PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/'
PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem'
def _autostart_puppet(log):
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
util.subp(['sed', '-i',
'-e', 's/^START=.*/START=yes/',
'/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
capture=False)
elif os.path.exists('/sbin/chkconfig'):
util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
log.warn(("Sorry we do not know how to enable"
" puppet services on this system"))
def handle(name, cfg, cloud, log, _args): def handle(name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything # If there isn't a puppet key in the configuration don't do anything
@ -43,8 +63,7 @@ def handle(name, cfg, cloud, log, _args):
# ... and then update the puppet configuration # ... and then update the puppet configuration
if 'conf' in puppet_cfg: if 'conf' in puppet_cfg:
# Add all sections from the conf object to puppet.conf # Add all sections from the conf object to puppet.conf
puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf') contents = util.load_file(PUPPET_CONF_PATH)
contents = util.load_file(puppet_conf_fn)
# Create object for reading puppet.conf values # Create object for reading puppet.conf values
puppet_config = helpers.DefaultingConfigParser() puppet_config = helpers.DefaultingConfigParser()
# Read puppet.conf values from original file in order to be able to # Read puppet.conf values from original file in order to be able to
@ -53,28 +72,19 @@ def handle(name, cfg, cloud, log, _args):
cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines) cleaned_contents = '\n'.join(cleaned_lines)
puppet_config.readfp(StringIO(cleaned_contents), puppet_config.readfp(StringIO(cleaned_contents),
filename=puppet_conf_fn) filename=PUPPET_CONF_PATH)
for (cfg_name, cfg) in puppet_cfg['conf'].iteritems(): for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
# Cert configuration is a special case # Cert configuration is a special case
# Dump the puppet master ca certificate in the correct place # Dump the puppet master ca certificate in the correct place
if cfg_name == 'ca_cert': if cfg_name == 'ca_cert':
# Puppet ssl sub-directory isn't created yet # Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership # Create it with the proper permissions and ownership
pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl') util.ensure_dir(PUPPET_SSL_DIR, 0771)
util.ensure_dir(pp_ssl_dir, 0771) util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
util.chownbyid(pp_ssl_dir, util.ensure_dir(PUPPET_SSL_CERT_DIR)
pwd.getpwnam('puppet').pw_uid, 0) util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
pp_ssl_certs = cloud.paths.join(False, util.write_file(PUPPET_SSL_CERT_PATH, str(cfg))
'/var/lib/puppet/ssl/certs/') util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
util.ensure_dir(pp_ssl_certs)
util.chownbyid(pp_ssl_certs,
pwd.getpwnam('puppet').pw_uid, 0)
pp_ssl_ca_certs = cloud.paths.join(False,
('/var/lib/puppet/'
'ssl/certs/ca.pem'))
util.write_file(pp_ssl_ca_certs, cfg)
util.chownbyid(pp_ssl_ca_certs,
pwd.getpwnam('puppet').pw_uid, 0)
else: else:
# Iterate throug the config items, we'll use ConfigParser.set # Iterate throug the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed # to overwrite or create new items as needed
@ -90,25 +100,11 @@ def handle(name, cfg, cloud, log, _args):
puppet_config.set(cfg_name, o, v) puppet_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename # We got all our config as wanted we'll rename
# the previous puppet.conf and create our new one # the previous puppet.conf and create our new one
conf_old_fn = cloud.paths.join(False, util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH))
'/etc/puppet/puppet.conf.old') util.write_file(PUPPET_CONF_PATH, puppet_config.stringify())
util.rename(puppet_conf_fn, conf_old_fn)
puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf')
util.write_file(puppet_conf_rw, puppet_config.stringify())
# Set puppet to automatically start # Set it up so it autostarts
if os.path.exists('/etc/default/puppet'): _autostart_puppet(log)
util.subp(['sed', '-i',
'-e', 's/^START=.*/START=yes/',
'/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
capture=False)
elif os.path.exists('/sbin/chkconfig'):
util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
log.warn(("Sorry we do not know how to enable"
" puppet services on this system"))
# Start puppetd # Start puppetd
util.subp(['service', 'puppet', 'start'], capture=False) util.subp(['service', 'puppet', 'start'], capture=False)

View File

@ -62,7 +62,7 @@ def get_fs_type(st_dev, path, log):
raise raise
def handle(name, cfg, cloud, log, args): def handle(name, cfg, _cloud, log, args):
if len(args) != 0: if len(args) != 0:
resize_root = args[0] resize_root = args[0]
else: else:
@ -74,11 +74,10 @@ def handle(name, cfg, cloud, log, args):
# TODO(harlowja) is the directory ok to be used?? # TODO(harlowja) is the directory ok to be used??
resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
resize_root_d = cloud.paths.join(False, resize_root_d)
util.ensure_dir(resize_root_d) util.ensure_dir(resize_root_d)
# TODO(harlowja): allow what is to be resized to be configurable?? # TODO(harlowja): allow what is to be resized to be configurable??
resize_what = cloud.paths.join(False, "/") resize_what = "/"
with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.",
dir=resize_root_d, delete=True) as tfh: dir=resize_root_d, delete=True) as tfh:
devpth = tfh.name devpth = tfh.name

View File

@ -71,8 +71,7 @@ def handle(name, cfg, cloud, log, _args):
try: try:
contents = "%s\n" % (content) contents = "%s\n" % (content)
util.write_file(cloud.paths.join(False, filename), util.write_file(filename, contents, omode=omode)
contents, omode=omode)
except Exception: except Exception:
util.logexc(log, "Failed to write to %s", filename) util.logexc(log, "Failed to write to %s", filename)

View File

@ -33,6 +33,6 @@ def handle(name, cfg, cloud, log, _args):
cmd = cfg["runcmd"] cmd = cfg["runcmd"]
try: try:
content = util.shellify(cmd) content = util.shellify(cmd)
util.write_file(cloud.paths.join(False, out_fn), content, 0700) util.write_file(out_fn, content, 0700)
except: except:
util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn) util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)

View File

@ -34,8 +34,7 @@ def handle(name, cfg, cloud, log, _args):
cloud.distro.install_packages(["salt-minion"]) cloud.distro.install_packages(["salt-minion"])
# Ensure we can configure files at the right dir # Ensure we can configure files at the right dir
config_dir = cloud.paths.join(False, salt_cfg.get("config_dir", config_dir = salt_cfg.get("config_dir", '/etc/salt')
'/etc/salt'))
util.ensure_dir(config_dir) util.ensure_dir(config_dir)
# ... and then update the salt configuration # ... and then update the salt configuration
@ -47,8 +46,7 @@ def handle(name, cfg, cloud, log, _args):
# ... copy the key pair if specified # ... copy the key pair if specified
if 'public_key' in salt_cfg and 'private_key' in salt_cfg: if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
pki_dir = cloud.paths.join(False, salt_cfg.get('pki_dir', pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
'/etc/salt/pki'))
with util.umask(077): with util.umask(077):
util.ensure_dir(pki_dir) util.ensure_dir(pki_dir)
pub_name = os.path.join(pki_dir, 'minion.pub') pub_name = os.path.join(pki_dir, 'minion.pub')

View File

@ -114,8 +114,7 @@ def handle(_name, cfg, cloud, log, args):
replaced_auth = False replaced_auth = False
# See: man sshd_config # See: man sshd_config
conf_fn = cloud.paths.join(True, ssh_util.DEF_SSHD_CFG) old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
old_lines = ssh_util.parse_ssh_config(conf_fn)
new_lines = [] new_lines = []
i = 0 i = 0
for (i, line) in enumerate(old_lines): for (i, line) in enumerate(old_lines):
@ -134,8 +133,7 @@ def handle(_name, cfg, cloud, log, args):
pw_auth)) pw_auth))
lines = [str(e) for e in new_lines] lines = [str(e) for e in new_lines]
ssh_rw_fn = cloud.paths.join(False, ssh_util.DEF_SSHD_CFG) util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))
util.write_file(ssh_rw_fn, "\n".join(lines))
try: try:
cmd = ['service'] cmd = ['service']

View File

@ -59,7 +59,7 @@ def handle(_name, cfg, cloud, log, _args):
# remove the static keys from the pristine image # remove the static keys from the pristine image
if cfg.get("ssh_deletekeys", True): if cfg.get("ssh_deletekeys", True):
key_pth = cloud.paths.join(False, "/etc/ssh/", "ssh_host_*key*") key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
for f in glob.glob(key_pth): for f in glob.glob(key_pth):
try: try:
util.del_file(f) util.del_file(f)
@ -72,8 +72,7 @@ def handle(_name, cfg, cloud, log, _args):
if key in KEY_2_FILE: if key in KEY_2_FILE:
tgt_fn = KEY_2_FILE[key][0] tgt_fn = KEY_2_FILE[key][0]
tgt_perms = KEY_2_FILE[key][1] tgt_perms = KEY_2_FILE[key][1]
util.write_file(cloud.paths.join(False, tgt_fn), util.write_file(tgt_fn, val, tgt_perms)
val, tgt_perms)
for (priv, pub) in PRIV_2_PUB.iteritems(): for (priv, pub) in PRIV_2_PUB.iteritems():
if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']: if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']:
@ -94,7 +93,7 @@ def handle(_name, cfg, cloud, log, _args):
'ssh_genkeytypes', 'ssh_genkeytypes',
GENERATE_KEY_NAMES) GENERATE_KEY_NAMES)
for keytype in genkeys: for keytype in genkeys:
keyfile = cloud.paths.join(False, KEY_FILE_TPL % (keytype)) keyfile = KEY_FILE_TPL % (keytype)
util.ensure_dir(os.path.dirname(keyfile)) util.ensure_dir(os.path.dirname(keyfile))
if not os.path.exists(keyfile): if not os.path.exists(keyfile):
cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
@ -118,17 +117,16 @@ def handle(_name, cfg, cloud, log, _args):
cfgkeys = cfg["ssh_authorized_keys"] cfgkeys = cfg["ssh_authorized_keys"]
keys.extend(cfgkeys) keys.extend(cfgkeys)
apply_credentials(keys, user, cloud.paths, apply_credentials(keys, user, disable_root, disable_root_opts)
disable_root, disable_root_opts)
except: except:
util.logexc(log, "Applying ssh credentials failed!") util.logexc(log, "Applying ssh credentials failed!")
def apply_credentials(keys, user, paths, disable_root, disable_root_opts): def apply_credentials(keys, user, disable_root, disable_root_opts):
keys = set(keys) keys = set(keys)
if user: if user:
ssh_util.setup_user_keys(keys, user, '', paths) ssh_util.setup_user_keys(keys, user, '')
if disable_root: if disable_root:
if not user: if not user:
@ -137,4 +135,4 @@ def apply_credentials(keys, user, paths, disable_root, disable_root_opts):
else: else:
key_prefix = '' key_prefix = ''
ssh_util.setup_user_keys(keys, 'root', key_prefix, paths) ssh_util.setup_user_keys(keys, 'root', key_prefix)

View File

@ -97,9 +97,8 @@ def handle(name, cfg, cloud, log, _args):
"logging of ssh fingerprints disabled"), name) "logging of ssh fingerprints disabled"), name)
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
extract_func = ssh_util.extract_authorized_keys
(users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items(): for (user_name, _cfg) in users.items():
(auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths) (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
_pprint_key_entries(user_name, auth_key_fn, _pprint_key_entries(user_name, key_fn,
auth_key_entries, hash_meth) key_entries, hash_meth)

View File

@ -42,8 +42,7 @@ def handle(name, cfg, cloud, log, _args):
raise RuntimeError(("No hosts template could be" raise RuntimeError(("No hosts template could be"
" found for distro %s") % (cloud.distro.name)) " found for distro %s") % (cloud.distro.name))
out_fn = cloud.paths.join(False, '/etc/hosts') templater.render_to_file(tpl_fn_name, '/etc/hosts',
templater.render_to_file(tpl_fn_name, out_fn,
{'hostname': hostname, 'fqdn': fqdn}) {'hostname': hostname, 'fqdn': fqdn})
elif manage_hosts == "localhost": elif manage_hosts == "localhost":

View File

@ -122,8 +122,7 @@ class Distro(object):
new_etchosts = StringIO() new_etchosts = StringIO()
need_write = False need_write = False
need_change = True need_change = True
hosts_ro_fn = self._paths.join(True, "/etc/hosts") for line in util.load_file("/etc/hosts").splitlines():
for line in util.load_file(hosts_ro_fn).splitlines():
if line.strip().startswith(header): if line.strip().startswith(header):
continue continue
if not line.strip() or line.strip().startswith("#"): if not line.strip() or line.strip().startswith("#"):
@ -147,8 +146,7 @@ class Distro(object):
need_write = True need_write = True
if need_write: if need_write:
contents = new_etchosts.getvalue() contents = new_etchosts.getvalue()
util.write_file(self._paths.join(False, "/etc/hosts"), util.write_file("/etc/hosts", contents, mode=0644)
contents, mode=0644)
def _bring_up_interface(self, device_name): def _bring_up_interface(self, device_name):
cmd = ['ifup', device_name] cmd = ['ifup', device_name]
@ -262,7 +260,7 @@ class Distro(object):
# Import SSH keys # Import SSH keys
if 'ssh_authorized_keys' in kwargs: if 'ssh_authorized_keys' in kwargs:
keys = set(kwargs['ssh_authorized_keys']) or [] keys = set(kwargs['ssh_authorized_keys']) or []
ssh_util.setup_user_keys(keys, name, None, self._paths) ssh_util.setup_user_keys(keys, name, key_prefix=None)
return True return True

View File

@ -43,7 +43,7 @@ class Distro(distros.Distro):
def apply_locale(self, locale, out_fn=None): def apply_locale(self, locale, out_fn=None):
if not out_fn: if not out_fn:
out_fn = self._paths.join(False, '/etc/default/locale') out_fn = '/etc/default/locale'
util.subp(['locale-gen', locale], capture=False) util.subp(['locale-gen', locale], capture=False)
util.subp(['update-locale', locale], capture=False) util.subp(['update-locale', locale], capture=False)
lines = ["# Created by cloud-init", 'LANG="%s"' % (locale), ""] lines = ["# Created by cloud-init", 'LANG="%s"' % (locale), ""]
@ -54,8 +54,7 @@ class Distro(distros.Distro):
self.package_command('install', pkglist) self.package_command('install', pkglist)
def _write_network(self, settings): def _write_network(self, settings):
net_fn = self._paths.join(False, "/etc/network/interfaces") util.write_file("/etc/network/interfaces", settings)
util.write_file(net_fn, settings)
return ['all'] return ['all']
def _bring_up_interfaces(self, device_names): def _bring_up_interfaces(self, device_names):
@ -69,12 +68,9 @@ class Distro(distros.Distro):
return distros.Distro._bring_up_interfaces(self, device_names) return distros.Distro._bring_up_interfaces(self, device_names)
def set_hostname(self, hostname): def set_hostname(self, hostname):
out_fn = self._paths.join(False, "/etc/hostname") self._write_hostname(hostname, "/etc/hostname")
self._write_hostname(hostname, out_fn) LOG.debug("Setting hostname to %s", hostname)
if out_fn == '/etc/hostname': util.subp(['hostname', hostname])
# Only do this if we are running in non-adjusted root mode
LOG.debug("Setting hostname to %s", hostname)
util.subp(['hostname', hostname])
def _write_hostname(self, hostname, out_fn): def _write_hostname(self, hostname, out_fn):
# "" gives trailing newline. # "" gives trailing newline.
@ -82,16 +78,14 @@ class Distro(distros.Distro):
def update_hostname(self, hostname, prev_fn): def update_hostname(self, hostname, prev_fn):
hostname_prev = self._read_hostname(prev_fn) hostname_prev = self._read_hostname(prev_fn)
read_fn = self._paths.join(True, "/etc/hostname") hostname_in_etc = self._read_hostname("/etc/hostname")
hostname_in_etc = self._read_hostname(read_fn)
update_files = [] update_files = []
if not hostname_prev or hostname_prev != hostname: if not hostname_prev or hostname_prev != hostname:
update_files.append(prev_fn) update_files.append(prev_fn)
if (not hostname_in_etc or if (not hostname_in_etc or
(hostname_in_etc == hostname_prev and (hostname_in_etc == hostname_prev and
hostname_in_etc != hostname)): hostname_in_etc != hostname)):
write_fn = self._paths.join(False, "/etc/hostname") update_files.append("/etc/hostname")
update_files.append(write_fn)
for fn in update_files: for fn in update_files:
try: try:
self._write_hostname(hostname, fn) self._write_hostname(hostname, fn)
@ -103,7 +97,6 @@ class Distro(distros.Distro):
LOG.debug(("%s differs from /etc/hostname." LOG.debug(("%s differs from /etc/hostname."
" Assuming user maintained hostname."), prev_fn) " Assuming user maintained hostname."), prev_fn)
if "/etc/hostname" in update_files: if "/etc/hostname" in update_files:
# Only do this if we are running in non-adjusted root mode
LOG.debug("Setting hostname to %s", hostname) LOG.debug("Setting hostname to %s", hostname)
util.subp(['hostname', hostname]) util.subp(['hostname', hostname])
@ -130,9 +123,8 @@ class Distro(distros.Distro):
" no file found at %s") % (tz, tz_file)) " no file found at %s") % (tz, tz_file))
# "" provides trailing newline during join # "" provides trailing newline during join
tz_lines = ["# Created by cloud-init", str(tz), ""] tz_lines = ["# Created by cloud-init", str(tz), ""]
tz_fn = self._paths.join(False, "/etc/timezone") util.write_file("/etc/timezone", "\n".join(tz_lines))
util.write_file(tz_fn, "\n".join(tz_lines)) util.copy(tz_file, "/etc/localtime")
util.copy(tz_file, self._paths.join(False, "/etc/localtime"))
def package_command(self, command, args=None): def package_command(self, command, args=None):
e = os.environ.copy() e = os.environ.copy()

View File

@ -302,14 +302,10 @@ class Paths(object):
def __init__(self, path_cfgs, ds=None): def __init__(self, path_cfgs, ds=None):
self.cfgs = path_cfgs self.cfgs = path_cfgs
# Populate all the initial paths # Populate all the initial paths
self.cloud_dir = self.join(False, self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud')
path_cfgs.get('cloud_dir',
'/var/lib/cloud'))
self.instance_link = os.path.join(self.cloud_dir, 'instance') self.instance_link = os.path.join(self.cloud_dir, 'instance')
self.boot_finished = os.path.join(self.instance_link, "boot-finished") self.boot_finished = os.path.join(self.instance_link, "boot-finished")
self.upstart_conf_d = path_cfgs.get('upstart_dir') self.upstart_conf_d = path_cfgs.get('upstart_dir')
if self.upstart_conf_d:
self.upstart_conf_d = self.join(False, self.upstart_conf_d)
self.seed_dir = os.path.join(self.cloud_dir, 'seed') self.seed_dir = os.path.join(self.cloud_dir, 'seed')
# This one isn't joined, since it should just be read-only # This one isn't joined, since it should just be read-only
template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/') template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/')
@ -328,29 +324,6 @@ class Paths(object):
# Set when a datasource becomes active # Set when a datasource becomes active
self.datasource = ds self.datasource = ds
# joins the paths but also appends a read
# or write root if available
def join(self, read_only, *paths):
if read_only:
root = self.cfgs.get('read_root')
else:
root = self.cfgs.get('write_root')
if not paths:
return root
if len(paths) > 1:
joined = os.path.join(*paths)
else:
joined = paths[0]
if root:
pre_joined = joined
# Need to remove any starting '/' since this
# will confuse os.path.join
joined = joined.lstrip("/")
joined = os.path.join(root, joined)
LOG.debug("Translated %s to adjusted path %s (read-only=%s)",
pre_joined, joined, read_only)
return joined
# get_ipath_cur: get the current instance path for an item # get_ipath_cur: get the current instance path for an item
def get_ipath_cur(self, name=None): def get_ipath_cur(self, name=None):
ipath = self.instance_link ipath = self.instance_link

View File

@ -20,8 +20,6 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
from email.mime.multipart import MIMEMultipart
import abc import abc
import os import os

View File

@ -212,17 +212,15 @@ def update_authorized_keys(old_entries, keys):
return '\n'.join(lines) return '\n'.join(lines)
def users_ssh_info(username, paths): def users_ssh_info(username):
pw_ent = pwd.getpwnam(username) pw_ent = pwd.getpwnam(username)
if not pw_ent: if not pw_ent or not pw_ent.pw_dir:
raise RuntimeError("Unable to get ssh info for user %r" % (username)) raise RuntimeError("Unable to get ssh info for user %r" % (username))
ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh')) return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent)
return (ssh_dir, pw_ent)
def extract_authorized_keys(username, paths): def extract_authorized_keys(username):
(ssh_dir, pw_ent) = users_ssh_info(username, paths) (ssh_dir, pw_ent) = users_ssh_info(username)
sshd_conf_fn = paths.join(True, DEF_SSHD_CFG)
auth_key_fn = None auth_key_fn = None
with util.SeLinuxGuard(ssh_dir, recursive=True): with util.SeLinuxGuard(ssh_dir, recursive=True):
try: try:
@ -231,7 +229,7 @@ def extract_authorized_keys(username, paths):
# The following tokens are defined: %% is replaced by a literal # The following tokens are defined: %% is replaced by a literal
# '%', %h is replaced by the home directory of the user being # '%', %h is replaced by the home directory of the user being
# authenticated and %u is replaced by the username of that user. # authenticated and %u is replaced by the username of that user.
ssh_cfg = parse_ssh_config_map(sshd_conf_fn) ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG)
auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip() auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip()
if not auth_key_fn: if not auth_key_fn:
auth_key_fn = "%h/.ssh/authorized_keys" auth_key_fn = "%h/.ssh/authorized_keys"
@ -240,7 +238,6 @@ def extract_authorized_keys(username, paths):
auth_key_fn = auth_key_fn.replace("%%", '%') auth_key_fn = auth_key_fn.replace("%%", '%')
if not auth_key_fn.startswith('/'): if not auth_key_fn.startswith('/'):
auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn) auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn)
auth_key_fn = paths.join(False, auth_key_fn)
except (IOError, OSError): except (IOError, OSError):
# Give up and use a default key filename # Give up and use a default key filename
auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
@ -248,14 +245,13 @@ def extract_authorized_keys(username, paths):
" in ssh config" " in ssh config"
" from %r, using 'AuthorizedKeysFile' file" " from %r, using 'AuthorizedKeysFile' file"
" %r instead"), " %r instead"),
sshd_conf_fn, auth_key_fn) DEF_SSHD_CFG, auth_key_fn)
auth_key_entries = parse_authorized_keys(auth_key_fn) return (auth_key_fn, parse_authorized_keys(auth_key_fn))
return (auth_key_fn, auth_key_entries)
def setup_user_keys(keys, username, key_prefix, paths): def setup_user_keys(keys, username, key_prefix):
# Make sure the users .ssh dir is setup accordingly # Make sure the users .ssh dir is setup accordingly
(ssh_dir, pwent) = users_ssh_info(username, paths) (ssh_dir, pwent) = users_ssh_info(username)
if not os.path.isdir(ssh_dir): if not os.path.isdir(ssh_dir):
util.ensure_dir(ssh_dir, mode=0700) util.ensure_dir(ssh_dir, mode=0700)
util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
@ -267,7 +263,7 @@ def setup_user_keys(keys, username, key_prefix, paths):
key_entries.append(parser.parse(str(k), def_opt=key_prefix)) key_entries.append(parser.parse(str(k), def_opt=key_prefix))
# Extract the old and make the new # Extract the old and make the new
(auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths) (auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
with util.SeLinuxGuard(ssh_dir, recursive=True): with util.SeLinuxGuard(ssh_dir, recursive=True):
content = update_authorized_keys(auth_key_entries, key_entries) content = update_authorized_keys(auth_key_entries, key_entries)
util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700) util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700)

19
pylintrc Normal file
View File

@ -0,0 +1,19 @@
[General]
init-hook='import sys; sys.path.append("tests/")'
[MESSAGES CONTROL]
# See: http://pylint-messages.wikidot.com/all-codes
# W0142: *args and **kwargs are fine.
# W0511: TODOs in code comments are fine.
# W0702: No exception type(s) specified
# W0703: Catch "Exception"
# C0103: Invalid name
# C0111: Missing docstring
disable=W0142,W0511,W0702,W0703,C0103,C0111
[REPORTS]
reports=no
include-ids=yes
[FORMAT]
max-line-length=79

0
tests/__init__.py Normal file
View File

View File

View File

View File

View File

@ -1,14 +1,6 @@
import copy import copy
import os
import sys
top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") from tests.unittests import helpers
top_dir = os.path.abspath(top_dir)
if os.path.exists(top_dir):
sys.path.insert(0, os.path.dirname(top_dir))
import helpers
import itertools import itertools

View File

View File

@ -77,7 +77,7 @@ class TestConfig(MockerTestCase):
"""Test that a single cert gets passed to add_ca_certs.""" """Test that a single cert gets passed to add_ca_certs."""
config = {"ca-certs": {"trusted": ["CERT1"]}} config = {"ca-certs": {"trusted": ["CERT1"]}}
self.mock_add(self.paths, ["CERT1"]) self.mock_add(["CERT1"])
self.mock_update() self.mock_update()
self.mocker.replay() self.mocker.replay()
@ -87,7 +87,7 @@ class TestConfig(MockerTestCase):
"""Test that multiple certs get passed to add_ca_certs.""" """Test that multiple certs get passed to add_ca_certs."""
config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
self.mock_add(self.paths, ["CERT1", "CERT2"]) self.mock_add(["CERT1", "CERT2"])
self.mock_update() self.mock_update()
self.mocker.replay() self.mocker.replay()
@ -97,7 +97,7 @@ class TestConfig(MockerTestCase):
"""Test remove_defaults works as expected.""" """Test remove_defaults works as expected."""
config = {"ca-certs": {"remove-defaults": True}} config = {"ca-certs": {"remove-defaults": True}}
self.mock_remove(self.paths) self.mock_remove()
self.mock_update() self.mock_update()
self.mocker.replay() self.mocker.replay()
@ -116,8 +116,8 @@ class TestConfig(MockerTestCase):
"""Test remove_defaults is not called when config value is False.""" """Test remove_defaults is not called when config value is False."""
config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}}
self.mock_remove(self.paths) self.mock_remove()
self.mock_add(self.paths, ["CERT1"]) self.mock_add(["CERT1"])
self.mock_update() self.mock_update()
self.mocker.replay() self.mocker.replay()
@ -136,7 +136,7 @@ class TestAddCaCerts(MockerTestCase):
"""Test that no certificate are written if not provided.""" """Test that no certificate are written if not provided."""
self.mocker.replace(util.write_file, passthrough=False) self.mocker.replace(util.write_file, passthrough=False)
self.mocker.replay() self.mocker.replay()
cc_ca_certs.add_ca_certs(self.paths, []) cc_ca_certs.add_ca_certs([])
def test_single_cert(self): def test_single_cert(self):
"""Test adding a single certificate to the trusted CAs.""" """Test adding a single certificate to the trusted CAs."""
@ -149,7 +149,7 @@ class TestAddCaCerts(MockerTestCase):
"\ncloud-init-ca-certs.crt", omode="ab") "\ncloud-init-ca-certs.crt", omode="ab")
self.mocker.replay() self.mocker.replay()
cc_ca_certs.add_ca_certs(self.paths, [cert]) cc_ca_certs.add_ca_certs([cert])
def test_multiple_certs(self): def test_multiple_certs(self):
"""Test adding multiple certificates to the trusted CAs.""" """Test adding multiple certificates to the trusted CAs."""
@ -163,7 +163,7 @@ class TestAddCaCerts(MockerTestCase):
"\ncloud-init-ca-certs.crt", omode="ab") "\ncloud-init-ca-certs.crt", omode="ab")
self.mocker.replay() self.mocker.replay()
cc_ca_certs.add_ca_certs(self.paths, certs) cc_ca_certs.add_ca_certs(certs)
class TestUpdateCaCerts(MockerTestCase): class TestUpdateCaCerts(MockerTestCase):
@ -198,4 +198,4 @@ class TestRemoveDefaultCaCerts(MockerTestCase):
"ca-certificates ca-certificates/trust_new_crts select no") "ca-certificates ca-certificates/trust_new_crts select no")
self.mocker.replay() self.mocker.replay()
cc_ca_certs.remove_default_ca_certs(self.paths) cc_ca_certs.remove_default_ca_certs()

View File

View File

@ -1,14 +1,6 @@
import os import os
import sys
# Allow running this test individually from tests.unittests import helpers
top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py")
top_dir = os.path.abspath(top_dir)
if os.path.exists(top_dir):
sys.path.insert(0, os.path.dirname(top_dir))
import helpers
from cloudinit.settings import (PER_INSTANCE) from cloudinit.settings import (PER_INSTANCE)
from cloudinit import stages from cloudinit import stages

View File

@ -6,23 +6,16 @@ else
files=( "$@" ); files=( "$@" );
fi fi
RC_FILE="pylintrc"
if [ ! -f $RC_FILE ]; then
RC_FILE="../pylintrc"
fi
cmd=( cmd=(
pylint pylint
--reports=n --rcfile=$RC_FILE
--include-ids=y
--max-line-length=79
--disable=R --disable=R
--disable=I --disable=I
--disable=W0142 # Used * or ** magic
--disable=W0511 # TODO/FIXME note
--disable=W0702 # No exception type(s) specified
--disable=W0703 # Catch "Exception"
--disable=C0103 # Invalid name
--disable=C0111 # Missing docstring
"${files[@]}" "${files[@]}"
) )