Tidy lint

This commit is contained in:
James Page 2014-04-11 11:05:26 +01:00
parent f7008e028a
commit 3c6e764f57
5 changed files with 58 additions and 58 deletions

View File

@ -39,18 +39,18 @@ def get_corosync_conf():
for unit in utils.relation_list(relid):
conf = {
'corosync_bindnetaddr':
hacluster.get_network_address(
utils.relation_get('corosync_bindiface',
unit, relid)
),
hacluster.get_network_address(
utils.relation_get('corosync_bindiface',
unit, relid)
),
'corosync_mcastport': utils.relation_get('corosync_mcastport',
unit, relid),
'corosync_mcastaddr': utils.config_get('corosync_mcastaddr'),
'corosync_pcmk_ver': utils.config_get('corosync_pcmk_ver'),
}
}
if None not in conf.itervalues():
return conf
missing = [k for k, v in conf.iteritems() if v == None]
missing = [k for k, v in conf.iteritems() if v is None]
utils.juju_log('INFO',
'Missing required principle configuration: %s' % missing)
return None
@ -76,7 +76,7 @@ def emit_base_conf():
# write the authkey
with open('/etc/corosync/authkey', 'w') as corosync_key_file:
corosync_key_file.write(b64decode(corosync_key))
os.chmod = ('/etc/corosync/authkey', 0400)
os.chmod = ('/etc/corosync/authkey', 0o400)
def config_changed():
@ -145,7 +145,7 @@ def configure_cluster():
# Check that there's enough nodes in order to perform the
# configuration of the HA cluster
if (len(get_cluster_nodes()) <
int(utils.config_get('cluster_count'))):
int(utils.config_get('cluster_count'))):
utils.juju_log('WARNING', 'Not enough nodes in cluster, bailing')
return
@ -160,43 +160,43 @@ def configure_cluster():
resources = \
{} if utils.relation_get("resources",
unit, relid) is None \
else ast.literal_eval(utils.relation_get("resources",
unit, relid))
else ast.literal_eval(utils.relation_get("resources",
unit, relid))
resource_params = \
{} if utils.relation_get("resource_params",
unit, relid) is None \
else ast.literal_eval(utils.relation_get("resource_params",
unit, relid))
else ast.literal_eval(utils.relation_get("resource_params",
unit, relid))
groups = \
{} if utils.relation_get("groups",
unit, relid) is None \
else ast.literal_eval(utils.relation_get("groups",
unit, relid))
else ast.literal_eval(utils.relation_get("groups",
unit, relid))
ms = \
{} if utils.relation_get("ms",
unit, relid) is None \
else ast.literal_eval(utils.relation_get("ms",
unit, relid))
else ast.literal_eval(utils.relation_get("ms",
unit, relid))
orders = \
{} if utils.relation_get("orders",
unit, relid) is None \
else ast.literal_eval(utils.relation_get("orders",
unit, relid))
else ast.literal_eval(utils.relation_get("orders",
unit, relid))
colocations = \
{} if utils.relation_get("colocations",
unit, relid) is None \
else ast.literal_eval(utils.relation_get("colocations",
unit, relid))
else ast.literal_eval(utils.relation_get("colocations",
unit, relid))
clones = \
{} if utils.relation_get("clones",
unit, relid) is None \
else ast.literal_eval(utils.relation_get("clones",
unit, relid))
else ast.literal_eval(utils.relation_get("clones",
unit, relid))
init_services = \
{} if utils.relation_get("init_services",
unit, relid) is None \
else ast.literal_eval(utils.relation_get("init_services",
unit, relid))
else ast.literal_eval(utils.relation_get("init_services",
unit, relid))
else:
utils.juju_log('WARNING',
@ -257,16 +257,16 @@ def configure_cluster():
if utils.running(init_services[res_name]):
utils.stop(init_services[res_name])
# Put the services in HA, if not already done so
#if not pcmk.is_resource_present(res_name):
# if not pcmk.is_resource_present(res_name):
if not pcmk.crm_opt_exists(res_name):
if not res_name in resource_params:
cmd = 'crm -w -F configure primitive %s %s' % (res_name,
res_type)
else:
cmd = 'crm -w -F configure primitive %s %s %s' % \
(res_name,
res_type,
resource_params[res_name])
(res_name,
res_type,
resource_params[res_name])
pcmk.commit(cmd)
utils.juju_log('INFO', '%s' % cmd)
if monitor_host:
@ -327,7 +327,7 @@ def configure_cluster():
# than as individual resources.
if (res_name not in clones.values() and
res_name not in groups.values() and
not pcmk.crm_res_running(res_name)):
not pcmk.crm_res_running(res_name)):
# Just in case, cleanup the resources to ensure they get
# started in case they failed for some unrelated reason.
cmd = 'crm resource cleanup %s' % res_name
@ -370,14 +370,14 @@ def configure_stonith():
url = utils.config_get('maas_url')
creds = utils.config_get('maas_credentials')
if None in [url, creds]:
utils.juju_log('ERROR', 'maas_url and maas_credentials must be set'\
utils.juju_log('ERROR', 'maas_url and maas_credentials must be set'
' in config to enable STONITH.')
sys.exit(1)
maas = MAAS.MAASHelper(url, creds)
nodes = maas.list_nodes()
if not nodes:
utils.juju_log('ERROR', 'Could not obtain node inventory from '\
utils.juju_log('ERROR', 'Could not obtain node inventory from '
'MAAS @ %s.' % url)
sys.exit(1)
@ -386,13 +386,13 @@ def configure_stonith():
rsc, constraint = pcmk.maas_stonith_primitive(nodes, node)
if not rsc:
utils.juju_log('ERROR',
'Failed to determine STONITH primitive for node'\
'Failed to determine STONITH primitive for node'
' %s' % node)
sys.exit(1)
rsc_name = str(rsc).split(' ')[1]
if not pcmk.is_resource_present(rsc_name):
utils.juju_log('INFO', 'Creating new STONITH primitive %s.' %\
utils.juju_log('INFO', 'Creating new STONITH primitive %s.' %
rsc_name)
cmd = 'crm -F configure %s' % rsc
pcmk.commit(cmd)
@ -400,7 +400,7 @@ def configure_stonith():
cmd = 'crm -F configure %s' % constraint
pcmk.commit(cmd)
else:
utils.juju_log('INFO', 'STONITH primitive already exists '\
utils.juju_log('INFO', 'STONITH primitive already exists '
'for node.')
cmd = "crm configure property stonith-enabled=true"
@ -429,6 +429,6 @@ hooks = {
'ha-relation-changed': configure_cluster,
'hanode-relation-joined': configure_cluster,
'hanode-relation-changed': configure_cluster,
}
}
utils.do_hooks(hooks)

View File

@ -15,7 +15,7 @@ from lib.utils import (
relation_get,
get_unit_hostname,
config_get
)
)
import subprocess
import os
@ -35,7 +35,7 @@ def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource
]
]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
@ -93,7 +93,7 @@ def https():
if (relation_get('https_keystone', rid=r_id, unit=unit) and
relation_get('ssl_cert', rid=r_id, unit=unit) and
relation_get('ssl_key', rid=r_id, unit=unit) and
relation_get('ca_cert', rid=r_id, unit=unit)):
relation_get('ca_cert', rid=r_id, unit=unit)):
return True
return False

View File

@ -33,7 +33,7 @@ def install(*pkgs):
'apt-get',
'-y',
'install'
]
]
for pkg in pkgs:
cmd.append(pkg)
subprocess.check_call(cmd)
@ -55,13 +55,12 @@ except ImportError:
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)
)
loader=jinja2.FileSystemLoader(template_dir)
)
template = templates.get_template(template_name)
return template.render(context)
CLOUD_ARCHIVE = \
""" # Ubuntu Cloud Archive
CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
@ -72,7 +71,7 @@ CLOUD_ARCHIVE_POCKETS = {
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly'
}
}
def configure_source():
@ -83,7 +82,7 @@ def configure_source():
cmd = [
'add-apt-repository',
source
]
]
subprocess.check_call(cmd)
if source.startswith('cloud:'):
# CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
@ -101,7 +100,7 @@ def configure_source():
'apt-key',
'adv', '--keyserver keyserver.ubuntu.com',
'--recv-keys', key
]
]
subprocess.check_call(cmd)
elif l == 1:
apt_line = source
@ -111,7 +110,7 @@ def configure_source():
cmd = [
'apt-get',
'update'
]
]
subprocess.check_call(cmd)
# Protocols
@ -123,7 +122,7 @@ def expose(port, protocol='TCP'):
cmd = [
'open-port',
'{}/{}'.format(port, protocol)
]
]
subprocess.check_call(cmd)
@ -132,7 +131,7 @@ def juju_log(severity, message):
'juju-log',
'--log-level', severity,
message
]
]
subprocess.check_call(cmd)
@ -157,7 +156,7 @@ def relation_ids(relation):
cmd = [
'relation-ids',
relation
]
]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
@ -170,7 +169,7 @@ def relation_list(rid):
cmd = [
'relation-list',
'-r', rid,
]
]
result = str(subprocess.check_output(cmd)).split()
if result == "":
return None
@ -182,7 +181,7 @@ def relation_list(rid):
def relation_get(attribute, unit=None, rid=None):
cmd = [
'relation-get',
]
]
if rid:
cmd.append('-r')
cmd.append(rid)
@ -201,7 +200,7 @@ def relation_get_dict(relation_id=None, remote_unit=None):
"""Obtain all relation data as dict by way of JSON"""
cmd = [
'relation-get', '--format=json'
]
]
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
@ -222,7 +221,7 @@ def relation_get_dict(relation_id=None, remote_unit=None):
def relation_set(**kwargs):
cmd = [
'relation-set'
]
]
args = []
for k, v in kwargs.items():
if k == 'rid':
@ -240,7 +239,7 @@ def unit_get(attribute):
cmd = [
'unit-get',
attribute
]
]
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
if value == "":
return None
@ -254,7 +253,7 @@ def config_get(attribute):
'config-get',
'--format',
'json',
]
]
out = subprocess.check_output(cmd).strip() # IGNORE:E1103
cfg = json.loads(out)
@ -318,7 +317,7 @@ def running(service):
return False
else:
if ("start/running" in output or
"is running" in output):
"is running" in output):
return True
else:
return False

View File

@ -10,6 +10,7 @@ MAAS_PROFILE_NAME = 'maas-juju-hacluster'
class MAASHelper(object):
def __init__(self, url, creds):
self.url = url
self.creds = creds

View File

@ -72,7 +72,7 @@ def _maas_ipmi_stonith_resource(node, power_params):
# ensure ipmi stonith agents are not running on the nodes that
# they manage.
constraint = 'location const_loc_stonith_avoid_%s %s -inf: %s' %\
(node, rsc_name, node)
(node, rsc_name, node)
return rsc, constraint