synced /next
This commit is contained in:
commit
68e425023b
4
Makefile
4
Makefile
|
@ -5,7 +5,7 @@ lint:
|
|||
@flake8 --exclude hooks/charmhelpers hooks unit_tests
|
||||
@charm proof
|
||||
|
||||
test:
|
||||
unit_test:
|
||||
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
|
||||
|
||||
bin/charm_helpers_sync.py:
|
||||
|
@ -16,6 +16,6 @@ bin/charm_helpers_sync.py:
|
|||
sync: bin/charm_helpers_sync.py
|
||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
|
||||
|
||||
publish: lint test
|
||||
publish: lint unit_test
|
||||
bzr push lp:charms/cinder-ceph
|
||||
bzr push lp:charms/trusty/cinder-ceph
|
||||
|
|
|
@ -7,4 +7,5 @@ include:
|
|||
- contrib.storage
|
||||
- contrib.hahelpers
|
||||
- contrib.network.ip
|
||||
- contrib.python.packages
|
||||
- payload.execd
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
||||
# only standard libraries.
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
try:
|
||||
import six # flake8: noqa
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||
import six # flake8: noqa
|
||||
|
||||
try:
|
||||
import yaml # flake8: noqa
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||
import yaml # flake8: noqa
|
|
@ -13,9 +13,10 @@ clustering-related helpers.
|
|||
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from socket import gethostname as get_unit_hostname
|
||||
|
||||
import six
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
relation_ids,
|
||||
|
@ -77,7 +78,7 @@ def is_crm_leader(resource):
|
|||
"show", resource
|
||||
]
|
||||
try:
|
||||
status = subprocess.check_output(cmd)
|
||||
status = subprocess.check_output(cmd).decode('UTF-8')
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
|
@ -150,34 +151,42 @@ def https():
|
|||
return False
|
||||
|
||||
|
||||
def determine_api_port(public_port):
|
||||
def determine_api_port(public_port, singlenode_mode=False):
|
||||
'''
|
||||
Determine correct API server listening port based on
|
||||
existence of HTTPS reverse proxy and/or haproxy.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
||||
|
||||
returns: int: the correct listening port for the API service
|
||||
'''
|
||||
i = 0
|
||||
if len(peer_units()) > 0 or is_clustered():
|
||||
if singlenode_mode:
|
||||
i += 1
|
||||
elif len(peer_units()) > 0 or is_clustered():
|
||||
i += 1
|
||||
if https():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
||||
def determine_apache_port(public_port):
|
||||
def determine_apache_port(public_port, singlenode_mode=False):
|
||||
'''
|
||||
Description: Determine correct apache listening port based on public IP +
|
||||
state of the cluster.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
||||
|
||||
returns: int: the correct listening port for the HAProxy service
|
||||
'''
|
||||
i = 0
|
||||
if len(peer_units()) > 0 or is_clustered():
|
||||
if singlenode_mode:
|
||||
i += 1
|
||||
elif len(peer_units()) > 0 or is_clustered():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
@ -197,7 +206,7 @@ def get_hacluster_config():
|
|||
for setting in settings:
|
||||
conf[setting] = config_get(setting)
|
||||
missing = []
|
||||
[missing.append(s) for s, v in conf.iteritems() if v is None]
|
||||
[missing.append(s) for s, v in six.iteritems(conf) if v is None]
|
||||
if missing:
|
||||
log('Insufficient config data to configure hacluster.', level=ERROR)
|
||||
raise HAIncompleteConfig
|
||||
|
|
|
@ -228,7 +228,7 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
|||
raise Exception("Interface '%s' doesn't have any %s addresses." %
|
||||
(iface, inet_type))
|
||||
|
||||
return addresses
|
||||
return sorted(addresses)
|
||||
|
||||
|
||||
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
|
||||
|
@ -302,7 +302,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
|
|||
if global_addrs:
|
||||
# Make sure any found global addresses are not temporary
|
||||
cmd = ['ip', 'addr', 'show', iface]
|
||||
out = subprocess.check_output(cmd)
|
||||
out = subprocess.check_output(cmd).decode('UTF-8')
|
||||
if dynamic_only:
|
||||
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
|
||||
else:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import six
|
||||
from charmhelpers.contrib.amulet.deployment import (
|
||||
AmuletDeployment
|
||||
)
|
||||
|
@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||
|
||||
def _configure_services(self, configs):
|
||||
"""Configure all of the services."""
|
||||
for service, config in configs.iteritems():
|
||||
for service, config in six.iteritems(configs):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _get_openstack_release(self):
|
||||
|
|
|
@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
|
|||
import keystoneclient.v2_0 as keystone_client
|
||||
import novaclient.v1_1.client as nova_client
|
||||
|
||||
import six
|
||||
|
||||
from charmhelpers.contrib.amulet.utils import (
|
||||
AmuletUtils
|
||||
)
|
||||
|
@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
expected service catalog endpoints.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
for k, v in expected.iteritems():
|
||||
for k, v in six.iteritems(expected):
|
||||
if k in actual:
|
||||
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
||||
if ret:
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
import json
|
||||
import os
|
||||
import time
|
||||
|
||||
from base64 import b64decode
|
||||
from subprocess import check_call
|
||||
|
||||
import six
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
filter_installed_packages,
|
||||
|
@ -20,11 +21,15 @@ from charmhelpers.core.hookenv import (
|
|||
relation_set,
|
||||
unit_get,
|
||||
unit_private_ip,
|
||||
charm_name,
|
||||
DEBUG,
|
||||
INFO,
|
||||
WARNING,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
from charmhelpers.core.sysctl import create as sysctl_create
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
mkdir,
|
||||
write_file,
|
||||
|
@ -69,7 +74,7 @@ def ensure_packages(packages):
|
|||
|
||||
def context_complete(ctxt):
|
||||
_missing = []
|
||||
for k, v in ctxt.iteritems():
|
||||
for k, v in six.iteritems(ctxt):
|
||||
if v is None or v == '':
|
||||
_missing.append(k)
|
||||
|
||||
|
@ -97,7 +102,7 @@ def config_flags_parser(config_flags):
|
|||
split = config_flags.strip(' =').split('=')
|
||||
limit = len(split)
|
||||
flags = {}
|
||||
for i in xrange(0, limit - 1):
|
||||
for i in range(0, limit - 1):
|
||||
current = split[i]
|
||||
next = split[i + 1]
|
||||
vindex = next.rfind(',')
|
||||
|
@ -375,7 +380,7 @@ class AMQPContext(OSContextGenerator):
|
|||
host = format_ipv6_addr(host) or host
|
||||
rabbitmq_hosts.append(host)
|
||||
|
||||
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
|
||||
ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
|
||||
|
||||
if not context_complete(ctxt):
|
||||
return {}
|
||||
|
@ -408,7 +413,7 @@ class CephContext(OSContextGenerator):
|
|||
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
|
||||
mon_hosts.append(ceph_addr)
|
||||
|
||||
ctxt = {'mon_hosts': ' '.join(mon_hosts),
|
||||
ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
|
||||
'auth': auth,
|
||||
'key': key,
|
||||
'use_syslog': use_syslog}
|
||||
|
@ -430,8 +435,11 @@ class HAProxyContext(OSContextGenerator):
|
|||
"""
|
||||
interfaces = ['cluster']
|
||||
|
||||
def __init__(self, singlenode_mode=False):
|
||||
self.singlenode_mode = singlenode_mode
|
||||
|
||||
def __call__(self):
|
||||
if not relation_ids('cluster'):
|
||||
if not relation_ids('cluster') and not self.singlenode_mode:
|
||||
return {}
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
|
@ -492,7 +500,8 @@ class HAProxyContext(OSContextGenerator):
|
|||
ctxt['stat_port'] = ':8888'
|
||||
|
||||
for frontend in cluster_hosts:
|
||||
if len(cluster_hosts[frontend]['backends']) > 1:
|
||||
if (len(cluster_hosts[frontend]['backends']) > 1 or
|
||||
self.singlenode_mode):
|
||||
# Enable haproxy when we have enough peers.
|
||||
log('Ensuring haproxy enabled in /etc/default/haproxy.',
|
||||
level=DEBUG)
|
||||
|
@ -587,7 +596,7 @@ class ApacheSSLContext(OSContextGenerator):
|
|||
if k.startswith('ssl_key_'):
|
||||
cns.append(k.lstrip('ssl_key_'))
|
||||
|
||||
return list(set(cns))
|
||||
return sorted(list(set(cns)))
|
||||
|
||||
def get_network_addresses(self):
|
||||
"""For each network configured, return corresponding address and vip
|
||||
|
@ -631,10 +640,10 @@ class ApacheSSLContext(OSContextGenerator):
|
|||
else:
|
||||
addresses.append((addr, addr))
|
||||
|
||||
return addresses
|
||||
return sorted(addresses)
|
||||
|
||||
def __call__(self):
|
||||
if isinstance(self.external_ports, basestring):
|
||||
if isinstance(self.external_ports, six.string_types):
|
||||
self.external_ports = [self.external_ports]
|
||||
|
||||
if not self.external_ports or not https():
|
||||
|
@ -651,7 +660,7 @@ class ApacheSSLContext(OSContextGenerator):
|
|||
self.configure_cert(cn)
|
||||
|
||||
addresses = self.get_network_addresses()
|
||||
for address, endpoint in set(addresses):
|
||||
for address, endpoint in sorted(set(addresses)):
|
||||
for api_port in self.external_ports:
|
||||
ext_port = determine_apache_port(api_port)
|
||||
int_port = determine_api_port(api_port)
|
||||
|
@ -659,7 +668,7 @@ class ApacheSSLContext(OSContextGenerator):
|
|||
ctxt['endpoints'].append(portmap)
|
||||
ctxt['ext_ports'].append(int(ext_port))
|
||||
|
||||
ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
|
||||
ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
|
||||
return ctxt
|
||||
|
||||
|
||||
|
@ -921,10 +930,10 @@ class SubordinateConfigContext(OSContextGenerator):
|
|||
continue
|
||||
|
||||
sub_config = sub_config[self.config_file]
|
||||
for k, v in sub_config.iteritems():
|
||||
for k, v in six.iteritems(sub_config):
|
||||
if k == 'sections':
|
||||
for section, config_dict in v.iteritems():
|
||||
log("Adding section '%s'" % (section),
|
||||
for section, config_dict in six.iteritems(v):
|
||||
log("adding section '%s'" % (section),
|
||||
level=DEBUG)
|
||||
ctxt[k][section] = config_dict
|
||||
else:
|
||||
|
@ -975,7 +984,7 @@ class WorkerConfigContext(OSContextGenerator):
|
|||
return NUM_CPUS
|
||||
|
||||
def __call__(self):
|
||||
multiplier = config('worker-multiplier') or 1
|
||||
multiplier = config('worker-multiplier') or 0
|
||||
ctxt = {"workers": self.num_cpus * multiplier}
|
||||
return ctxt
|
||||
|
||||
|
@ -1010,3 +1019,14 @@ class NotificationDriverContext(OSContextGenerator):
|
|||
ctxt['notifications'] = "True"
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
class SysctlContext(OSContextGenerator):
|
||||
"""This context check if the 'sysctl' option exists on configuration
|
||||
then creates a file with the loaded contents"""
|
||||
def __call__(self):
|
||||
sysctl_dict = config('sysctl')
|
||||
if sysctl_dict:
|
||||
sysctl_create(sysctl_dict,
|
||||
'/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
|
||||
return {'sysctl': sysctl_dict}
|
||||
|
|
|
@ -2,21 +2,19 @@ from charmhelpers.core.hookenv import (
|
|||
config,
|
||||
unit_get,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_address_in_network,
|
||||
is_address_in_network,
|
||||
is_ipv6,
|
||||
get_ipv6_addr,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||
|
||||
PUBLIC = 'public'
|
||||
INTERNAL = 'int'
|
||||
ADMIN = 'admin'
|
||||
|
||||
_address_map = {
|
||||
ADDRESS_MAP = {
|
||||
PUBLIC: {
|
||||
'config': 'os-public-network',
|
||||
'fallback': 'public-address'
|
||||
|
@ -33,16 +31,14 @@ _address_map = {
|
|||
|
||||
|
||||
def canonical_url(configs, endpoint_type=PUBLIC):
|
||||
'''
|
||||
Returns the correct HTTP URL to this host given the state of HTTPS
|
||||
"""Returns the correct HTTP URL to this host given the state of HTTPS
|
||||
configuration, hacluster and charm configuration.
|
||||
|
||||
:configs OSTemplateRenderer: A config tempating object to inspect for
|
||||
a complete https context.
|
||||
:endpoint_type str: The endpoint type to resolve.
|
||||
|
||||
:returns str: Base URL for services on the current service unit.
|
||||
'''
|
||||
:param configs: OSTemplateRenderer config templating object to inspect
|
||||
for a complete https context.
|
||||
:param endpoint_type: str endpoint type to resolve.
|
||||
:param returns: str base URL for services on the current service unit.
|
||||
"""
|
||||
scheme = 'http'
|
||||
if 'https' in configs.complete_contexts():
|
||||
scheme = 'https'
|
||||
|
@ -53,27 +49,45 @@ def canonical_url(configs, endpoint_type=PUBLIC):
|
|||
|
||||
|
||||
def resolve_address(endpoint_type=PUBLIC):
|
||||
"""Return unit address depending on net config.
|
||||
|
||||
If unit is clustered with vip(s) and has net splits defined, return vip on
|
||||
correct network. If clustered with no nets defined, return primary vip.
|
||||
|
||||
If not clustered, return unit address ensuring address is on configured net
|
||||
split if one is configured.
|
||||
|
||||
:param endpoint_type: Network endpoing type
|
||||
"""
|
||||
resolved_address = None
|
||||
if is_clustered():
|
||||
if config(_address_map[endpoint_type]['config']) is None:
|
||||
# Assume vip is simple and pass back directly
|
||||
resolved_address = config('vip')
|
||||
vips = config('vip')
|
||||
if vips:
|
||||
vips = vips.split()
|
||||
|
||||
net_type = ADDRESS_MAP[endpoint_type]['config']
|
||||
net_addr = config(net_type)
|
||||
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
||||
clustered = is_clustered()
|
||||
if clustered:
|
||||
if not net_addr:
|
||||
# If no net-splits defined, we expect a single vip
|
||||
resolved_address = vips[0]
|
||||
else:
|
||||
for vip in config('vip').split():
|
||||
if is_address_in_network(
|
||||
config(_address_map[endpoint_type]['config']),
|
||||
vip):
|
||||
for vip in vips:
|
||||
if is_address_in_network(net_addr, vip):
|
||||
resolved_address = vip
|
||||
break
|
||||
else:
|
||||
if config('prefer-ipv6'):
|
||||
fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
|
||||
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
||||
else:
|
||||
fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
|
||||
resolved_address = get_address_in_network(
|
||||
config(_address_map[endpoint_type]['config']), fallback_addr)
|
||||
fallback_addr = unit_get(net_fallback)
|
||||
|
||||
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
||||
|
||||
if resolved_address is None:
|
||||
raise ValueError('Unable to resolve a suitable IP address'
|
||||
' based on charm state and configuration')
|
||||
else:
|
||||
return resolved_address
|
||||
raise ValueError("Unable to resolve a suitable IP address based on "
|
||||
"charm state and configuration. (net_type=%s, "
|
||||
"clustered=%s)" % (net_type, clustered))
|
||||
|
||||
return resolved_address
|
||||
|
|
|
@ -14,7 +14,7 @@ from charmhelpers.contrib.openstack.utils import os_release
|
|||
def headers_package():
|
||||
"""Ensures correct linux-headers for running kernel are installed,
|
||||
for building DKMS package"""
|
||||
kver = check_output(['uname', '-r']).strip()
|
||||
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
||||
return 'linux-headers-%s' % kver
|
||||
|
||||
QUANTUM_CONF_DIR = '/etc/quantum'
|
||||
|
@ -22,7 +22,7 @@ QUANTUM_CONF_DIR = '/etc/quantum'
|
|||
|
||||
def kernel_version():
|
||||
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
|
||||
kver = check_output(['uname', '-r']).strip()
|
||||
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
||||
kver = kver.split('.')
|
||||
return (int(kver[0]), int(kver[1]))
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ listen stats {{ stat_port }}
|
|||
stats auth admin:password
|
||||
|
||||
{% if frontends -%}
|
||||
{% for service, ports in service_ports.iteritems() -%}
|
||||
{% for service, ports in service_ports.items() -%}
|
||||
frontend tcp-in_{{ service }}
|
||||
bind *:{{ ports[0] }}
|
||||
bind :::{{ ports[0] }}
|
||||
|
@ -46,7 +46,7 @@ frontend tcp-in_{{ service }}
|
|||
{% for frontend in frontends -%}
|
||||
backend {{ service }}_{{ frontend }}
|
||||
balance leastconn
|
||||
{% for unit, address in frontends[frontend]['backends'].iteritems() -%}
|
||||
{% for unit, address in frontends[frontend]['backends'].items() -%}
|
||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||
{% endfor %}
|
||||
{% endfor -%}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
import os
|
||||
|
||||
from charmhelpers.fetch import apt_install
|
||||
import six
|
||||
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
ERROR,
|
||||
INFO
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
||||
|
||||
try:
|
||||
|
@ -43,7 +43,7 @@ def get_loader(templates_dir, os_release):
|
|||
order by OpenStack release.
|
||||
"""
|
||||
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
|
||||
for rel in OPENSTACK_CODENAMES.itervalues()]
|
||||
for rel in six.itervalues(OPENSTACK_CODENAMES)]
|
||||
|
||||
if not os.path.isdir(templates_dir):
|
||||
log('Templates directory not found @ %s.' % templates_dir,
|
||||
|
@ -258,7 +258,7 @@ class OSConfigRenderer(object):
|
|||
"""
|
||||
Write out all registered config files.
|
||||
"""
|
||||
[self.write(k) for k in self.templates.iterkeys()]
|
||||
[self.write(k) for k in six.iterkeys(self.templates)]
|
||||
|
||||
def set_release(self, openstack_release):
|
||||
"""
|
||||
|
@ -275,5 +275,5 @@ class OSConfigRenderer(object):
|
|||
'''
|
||||
interfaces = []
|
||||
[interfaces.extend(i.complete_contexts())
|
||||
for i in self.templates.itervalues()]
|
||||
for i in six.itervalues(self.templates)]
|
||||
return interfaces
|
||||
|
|
|
@ -10,11 +10,13 @@ import os
|
|||
import socket
|
||||
import sys
|
||||
|
||||
import six
|
||||
import yaml
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log as juju_log,
|
||||
charm_dir,
|
||||
ERROR,
|
||||
INFO,
|
||||
relation_ids,
|
||||
relation_set
|
||||
|
@ -31,7 +33,8 @@ from charmhelpers.contrib.network.ip import (
|
|||
)
|
||||
|
||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
||||
from charmhelpers.fetch import apt_install, apt_cache
|
||||
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
||||
from charmhelpers.contrib.python.packages import pip_install
|
||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||
|
||||
|
@ -113,7 +116,7 @@ def get_os_codename_install_source(src):
|
|||
|
||||
# Best guess match based on deb string provided
|
||||
if src.startswith('deb') or src.startswith('ppa'):
|
||||
for k, v in OPENSTACK_CODENAMES.iteritems():
|
||||
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
||||
if v in src:
|
||||
return v
|
||||
|
||||
|
@ -134,7 +137,7 @@ def get_os_codename_version(vers):
|
|||
|
||||
def get_os_version_codename(codename):
|
||||
'''Determine OpenStack version number from codename.'''
|
||||
for k, v in OPENSTACK_CODENAMES.iteritems():
|
||||
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
||||
if v == codename:
|
||||
return k
|
||||
e = 'Could not derive OpenStack version for '\
|
||||
|
@ -194,7 +197,7 @@ def get_os_version_package(pkg, fatal=True):
|
|||
else:
|
||||
vers_map = OPENSTACK_CODENAMES
|
||||
|
||||
for version, cname in vers_map.iteritems():
|
||||
for version, cname in six.iteritems(vers_map):
|
||||
if cname == codename:
|
||||
return version
|
||||
# e = "Could not determine OpenStack version for package: %s" % pkg
|
||||
|
@ -318,7 +321,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
|||
rc_script.write(
|
||||
"#!/bin/bash\n")
|
||||
[rc_script.write('export %s=%s\n' % (u, p))
|
||||
for u, p in env_vars.iteritems() if u != "script_path"]
|
||||
for u, p in six.iteritems(env_vars) if u != "script_path"]
|
||||
|
||||
|
||||
def openstack_upgrade_available(package):
|
||||
|
@ -351,8 +354,8 @@ def ensure_block_device(block_device):
|
|||
'''
|
||||
_none = ['None', 'none', None]
|
||||
if (block_device in _none):
|
||||
error_out('prepare_storage(): Missing required input: '
|
||||
'block_device=%s.' % block_device, level=ERROR)
|
||||
error_out('prepare_storage(): Missing required input: block_device=%s.'
|
||||
% block_device)
|
||||
|
||||
if block_device.startswith('/dev/'):
|
||||
bdev = block_device
|
||||
|
@ -368,8 +371,7 @@ def ensure_block_device(block_device):
|
|||
bdev = '/dev/%s' % block_device
|
||||
|
||||
if not is_block_device(bdev):
|
||||
error_out('Failed to locate valid block device at %s' % bdev,
|
||||
level=ERROR)
|
||||
error_out('Failed to locate valid block device at %s' % bdev)
|
||||
|
||||
return bdev
|
||||
|
||||
|
@ -418,7 +420,7 @@ def ns_query(address):
|
|||
|
||||
if isinstance(address, dns.name.Name):
|
||||
rtype = 'PTR'
|
||||
elif isinstance(address, basestring):
|
||||
elif isinstance(address, six.string_types):
|
||||
rtype = 'A'
|
||||
else:
|
||||
return None
|
||||
|
@ -486,8 +488,7 @@ def sync_db_with_multi_ipv6_addresses(database, database_user,
|
|||
'hostname': json.dumps(hosts)}
|
||||
|
||||
if relation_prefix:
|
||||
keys = kwargs.keys()
|
||||
for key in keys:
|
||||
for key in list(kwargs.keys()):
|
||||
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
|
||||
del kwargs[key]
|
||||
|
||||
|
@ -508,3 +509,111 @@ def os_requires_version(ostack_release, pkg):
|
|||
f(*args)
|
||||
return wrapped_f
|
||||
return wrap
|
||||
|
||||
|
||||
def git_install_requested():
|
||||
"""Returns true if openstack-origin-git is specified."""
|
||||
return config('openstack-origin-git') != "None"
|
||||
|
||||
|
||||
requirements_dir = None
|
||||
|
||||
|
||||
def git_clone_and_install(file_name, core_project):
|
||||
"""Clone/install all OpenStack repos specified in yaml config file."""
|
||||
global requirements_dir
|
||||
|
||||
if file_name == "None":
|
||||
return
|
||||
|
||||
yaml_file = os.path.join(charm_dir(), file_name)
|
||||
|
||||
# clone/install the requirements project first
|
||||
installed = _git_clone_and_install_subset(yaml_file,
|
||||
whitelist=['requirements'])
|
||||
if 'requirements' not in installed:
|
||||
error_out('requirements git repository must be specified')
|
||||
|
||||
# clone/install all other projects except requirements and the core project
|
||||
blacklist = ['requirements', core_project]
|
||||
_git_clone_and_install_subset(yaml_file, blacklist=blacklist,
|
||||
update_requirements=True)
|
||||
|
||||
# clone/install the core project
|
||||
whitelist = [core_project]
|
||||
installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
|
||||
update_requirements=True)
|
||||
if core_project not in installed:
|
||||
error_out('{} git repository must be specified'.format(core_project))
|
||||
|
||||
|
||||
def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
|
||||
update_requirements=False):
|
||||
"""Clone/install subset of OpenStack repos specified in yaml config file."""
|
||||
global requirements_dir
|
||||
installed = []
|
||||
|
||||
with open(yaml_file, 'r') as fd:
|
||||
projects = yaml.load(fd)
|
||||
for proj, val in projects.items():
|
||||
# The project subset is chosen based on the following 3 rules:
|
||||
# 1) If project is in blacklist, we don't clone/install it, period.
|
||||
# 2) If whitelist is empty, we clone/install everything else.
|
||||
# 3) If whitelist is not empty, we clone/install everything in the
|
||||
# whitelist.
|
||||
if proj in blacklist:
|
||||
continue
|
||||
if whitelist and proj not in whitelist:
|
||||
continue
|
||||
repo = val['repository']
|
||||
branch = val['branch']
|
||||
repo_dir = _git_clone_and_install_single(repo, branch,
|
||||
update_requirements)
|
||||
if proj == 'requirements':
|
||||
requirements_dir = repo_dir
|
||||
installed.append(proj)
|
||||
return installed
|
||||
|
||||
|
||||
def _git_clone_and_install_single(repo, branch, update_requirements=False):
|
||||
"""Clone and install a single git repository."""
|
||||
dest_parent_dir = "/mnt/openstack-git/"
|
||||
dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
|
||||
|
||||
if not os.path.exists(dest_parent_dir):
|
||||
juju_log('Host dir not mounted at {}. '
|
||||
'Creating directory there instead.'.format(dest_parent_dir))
|
||||
os.mkdir(dest_parent_dir)
|
||||
|
||||
if not os.path.exists(dest_dir):
|
||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||
repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
|
||||
else:
|
||||
repo_dir = dest_dir
|
||||
|
||||
if update_requirements:
|
||||
if not requirements_dir:
|
||||
error_out('requirements repo must be cloned before '
|
||||
'updating from global requirements.')
|
||||
_git_update_requirements(repo_dir, requirements_dir)
|
||||
|
||||
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
||||
pip_install(repo_dir)
|
||||
|
||||
return repo_dir
|
||||
|
||||
|
||||
def _git_update_requirements(package_dir, reqs_dir):
|
||||
"""Update from global requirements.
|
||||
|
||||
Update an OpenStack git directory's requirements.txt and
|
||||
test-requirements.txt from global-requirements.txt."""
|
||||
orig_dir = os.getcwd()
|
||||
os.chdir(reqs_dir)
|
||||
cmd = "python update.py {}".format(package_dir)
|
||||
try:
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
except subprocess.CalledProcessError:
|
||||
package = os.path.basename(package_dir)
|
||||
error_out("Error updating {} from global-requirements.txt".format(package))
|
||||
os.chdir(orig_dir)
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||
|
||||
from charmhelpers.fetch import apt_install, apt_update
|
||||
from charmhelpers.core.hookenv import log
|
||||
|
||||
try:
|
||||
from pip import main as pip_execute
|
||||
except ImportError:
|
||||
apt_update()
|
||||
apt_install('python-pip')
|
||||
from pip import main as pip_execute
|
||||
|
||||
|
||||
def parse_options(given, available):
|
||||
"""Given a set of options, check if available"""
|
||||
for key, value in sorted(given.items()):
|
||||
if key in available:
|
||||
yield "--{0}={1}".format(key, value)
|
||||
|
||||
|
||||
def pip_install_requirements(requirements, **options):
|
||||
"""Install a requirements file """
|
||||
command = ["install"]
|
||||
|
||||
available_options = ('proxy', 'src', 'log', )
|
||||
for option in parse_options(options, available_options):
|
||||
command.append(option)
|
||||
|
||||
command.append("-r {0}".format(requirements))
|
||||
log("Installing from file: {} with options: {}".format(requirements,
|
||||
command))
|
||||
pip_execute(command)
|
||||
|
||||
|
||||
def pip_install(package, fatal=False, **options):
|
||||
"""Install a python package"""
|
||||
command = ["install"]
|
||||
|
||||
available_options = ('proxy', 'src', 'log', "index-url", )
|
||||
for option in parse_options(options, available_options):
|
||||
command.append(option)
|
||||
|
||||
if isinstance(package, list):
|
||||
command.extend(package)
|
||||
else:
|
||||
command.append(package)
|
||||
|
||||
log("Installing {} package with options: {}".format(package,
|
||||
command))
|
||||
pip_execute(command)
|
||||
|
||||
|
||||
def pip_uninstall(package, **options):
|
||||
"""Uninstall a python package"""
|
||||
command = ["uninstall", "-q", "-y"]
|
||||
|
||||
available_options = ('proxy', 'log', )
|
||||
for option in parse_options(options, available_options):
|
||||
command.append(option)
|
||||
|
||||
if isinstance(package, list):
|
||||
command.extend(package)
|
||||
else:
|
||||
command.append(package)
|
||||
|
||||
log("Uninstalling {} package with options: {}".format(package,
|
||||
command))
|
||||
pip_execute(command)
|
||||
|
||||
|
||||
def pip_list():
|
||||
"""Returns the list of current python installed packages
|
||||
"""
|
||||
return pip_execute(["list"])
|
|
@ -65,7 +65,8 @@ def install():
|
|||
def rbd_exists(service, pool, rbd_img):
|
||||
"""Check to see if a RADOS block device exists."""
|
||||
try:
|
||||
out = check_output(['rbd', 'list', '--id', service, '--pool', pool])
|
||||
out = check_output(['rbd', 'list', '--id',
|
||||
service, '--pool', pool]).decode('UTF-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
|
@ -82,7 +83,8 @@ def create_rbd_image(service, pool, image, sizemb):
|
|||
def pool_exists(service, name):
|
||||
"""Check to see if a RADOS pool already exists."""
|
||||
try:
|
||||
out = check_output(['rados', '--id', service, 'lspools'])
|
||||
out = check_output(['rados', '--id', service,
|
||||
'lspools']).decode('UTF-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
|
@ -96,7 +98,8 @@ def get_osds(service):
|
|||
version = ceph_version()
|
||||
if version and version >= '0.56':
|
||||
return json.loads(check_output(['ceph', '--id', service,
|
||||
'osd', 'ls', '--format=json']))
|
||||
'osd', 'ls',
|
||||
'--format=json']).decode('UTF-8'))
|
||||
|
||||
return None
|
||||
|
||||
|
@ -112,7 +115,7 @@ def create_pool(service, name, replicas=3):
|
|||
# on upstream recommended best practices.
|
||||
osds = get_osds(service)
|
||||
if osds:
|
||||
pgnum = (len(osds) * 100 / replicas)
|
||||
pgnum = (len(osds) * 100 // replicas)
|
||||
else:
|
||||
# NOTE(james-page): Default to 200 for older ceph versions
|
||||
# which don't support OSD query from cli
|
||||
|
@ -193,7 +196,7 @@ def configure(service, key, auth, use_syslog):
|
|||
def image_mapped(name):
|
||||
"""Determine whether a RADOS block device is mapped locally."""
|
||||
try:
|
||||
out = check_output(['rbd', 'showmapped'])
|
||||
out = check_output(['rbd', 'showmapped']).decode('UTF-8')
|
||||
except CalledProcessError:
|
||||
return False
|
||||
|
||||
|
@ -361,7 +364,7 @@ def ceph_version():
|
|||
"""Retrieve the local version of ceph."""
|
||||
if os.path.exists('/usr/bin/ceph'):
|
||||
cmd = ['ceph', '-v']
|
||||
output = check_output(cmd)
|
||||
output = check_output(cmd).decode('US-ASCII')
|
||||
output = output.split()
|
||||
if len(output) > 3:
|
||||
return output[2]
|
||||
|
@ -369,46 +372,3 @@ def ceph_version():
|
|||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class CephBrokerRq(object):
|
||||
"""Ceph broker request.
|
||||
|
||||
Multiple operations can be added to a request and sent to the Ceph broker
|
||||
to be executed.
|
||||
|
||||
Request is json-encoded for sending over the wire.
|
||||
|
||||
The API is versioned and defaults to version 1.
|
||||
"""
|
||||
def __init__(self, api_version=1):
|
||||
self.api_version = api_version
|
||||
self.ops = []
|
||||
|
||||
def add_op_create_pool(self, name, replica_count=3):
|
||||
self.ops.append({'op': 'create-pool', 'name': name,
|
||||
'replicas': replica_count})
|
||||
|
||||
@property
|
||||
def request(self):
|
||||
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
|
||||
|
||||
|
||||
class CephBrokerRsp(object):
|
||||
"""Ceph broker response.
|
||||
|
||||
Response is json-decoded and contents provided as methods/properties.
|
||||
|
||||
The API is versioned and defaults to version 1.
|
||||
"""
|
||||
def __init__(self, encoded_rsp):
|
||||
self.api_version = None
|
||||
self.rsp = json.loads(encoded_rsp)
|
||||
|
||||
@property
|
||||
def exit_code(self):
|
||||
return self.rsp.get('exit-code')
|
||||
|
||||
@property
|
||||
def exit_msg(self):
|
||||
return self.rsp.get('stderr')
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
|
||||
import os
|
||||
import re
|
||||
|
||||
from subprocess import (
|
||||
check_call,
|
||||
check_output,
|
||||
)
|
||||
|
||||
import six
|
||||
|
||||
|
||||
##################################################
|
||||
# loopback device helpers.
|
||||
|
@ -37,7 +37,7 @@ def create_loopback(file_path):
|
|||
'''
|
||||
file_path = os.path.abspath(file_path)
|
||||
check_call(['losetup', '--find', file_path])
|
||||
for d, f in loopback_devices().iteritems():
|
||||
for d, f in six.iteritems(loopback_devices()):
|
||||
if f == file_path:
|
||||
return d
|
||||
|
||||
|
@ -51,7 +51,7 @@ def ensure_loopback_device(path, size):
|
|||
|
||||
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
|
||||
'''
|
||||
for d, f in loopback_devices().iteritems():
|
||||
for d, f in six.iteritems(loopback_devices()):
|
||||
if f == path:
|
||||
return d
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ def list_lvm_volume_group(block_device):
|
|||
vg = None
|
||||
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
||||
for l in pvd:
|
||||
l = l.decode('UTF-8')
|
||||
if l.strip().startswith('VG Name'):
|
||||
vg = ' '.join(l.strip().split()[2:])
|
||||
return vg
|
||||
|
|
|
@ -30,7 +30,8 @@ def zap_disk(block_device):
|
|||
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
||||
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
||||
'--clear', block_device])
|
||||
dev_end = check_output(['blockdev', '--getsz', block_device])
|
||||
dev_end = check_output(['blockdev', '--getsz',
|
||||
block_device]).decode('UTF-8')
|
||||
gpt_end = int(dev_end.split()[0]) - 100
|
||||
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
||||
'bs=1M', 'count=1'])
|
||||
|
@ -47,7 +48,7 @@ def is_device_mounted(device):
|
|||
it doesn't.
|
||||
'''
|
||||
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
||||
out = check_output(['mount'])
|
||||
out = check_output(['mount']).decode('UTF-8')
|
||||
if is_partition:
|
||||
return bool(re.search(device + r"\b", out))
|
||||
return bool(re.search(device + r"[0-9]+\b", out))
|
||||
|
|
|
@ -3,10 +3,11 @@
|
|||
|
||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||
|
||||
import io
|
||||
import os
|
||||
|
||||
|
||||
class Fstab(file):
|
||||
class Fstab(io.FileIO):
|
||||
"""This class extends file in order to implement a file reader/writer
|
||||
for file `/etc/fstab`
|
||||
"""
|
||||
|
@ -24,8 +25,8 @@ class Fstab(file):
|
|||
options = "defaults"
|
||||
|
||||
self.options = options
|
||||
self.d = d
|
||||
self.p = p
|
||||
self.d = int(d)
|
||||
self.p = int(p)
|
||||
|
||||
def __eq__(self, o):
|
||||
return str(self) == str(o)
|
||||
|
@ -45,7 +46,7 @@ class Fstab(file):
|
|||
self._path = path
|
||||
else:
|
||||
self._path = self.DEFAULT_PATH
|
||||
file.__init__(self, self._path, 'r+')
|
||||
super(Fstab, self).__init__(self._path, 'rb+')
|
||||
|
||||
def _hydrate_entry(self, line):
|
||||
# NOTE: use split with no arguments to split on any
|
||||
|
@ -58,8 +59,9 @@ class Fstab(file):
|
|||
def entries(self):
|
||||
self.seek(0)
|
||||
for line in self.readlines():
|
||||
line = line.decode('us-ascii')
|
||||
try:
|
||||
if not line.startswith("#"):
|
||||
if line.strip() and not line.startswith("#"):
|
||||
yield self._hydrate_entry(line)
|
||||
except ValueError:
|
||||
pass
|
||||
|
@ -75,14 +77,14 @@ class Fstab(file):
|
|||
if self.get_entry_by_attr('device', entry.device):
|
||||
return False
|
||||
|
||||
self.write(str(entry) + '\n')
|
||||
self.write((str(entry) + '\n').encode('us-ascii'))
|
||||
self.truncate()
|
||||
return entry
|
||||
|
||||
def remove_entry(self, entry):
|
||||
self.seek(0)
|
||||
|
||||
lines = self.readlines()
|
||||
lines = [l.decode('us-ascii') for l in self.readlines()]
|
||||
|
||||
found = False
|
||||
for index, line in enumerate(lines):
|
||||
|
@ -97,7 +99,7 @@ class Fstab(file):
|
|||
lines.remove(line)
|
||||
|
||||
self.seek(0)
|
||||
self.write(''.join(lines))
|
||||
self.write(''.join(lines).encode('us-ascii'))
|
||||
self.truncate()
|
||||
return True
|
||||
|
||||
|
|
|
@ -9,9 +9,14 @@ import json
|
|||
import yaml
|
||||
import subprocess
|
||||
import sys
|
||||
import UserDict
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
import six
|
||||
if not six.PY3:
|
||||
from UserDict import UserDict
|
||||
else:
|
||||
from collections import UserDict
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
WARNING = "WARNING"
|
||||
|
@ -63,16 +68,18 @@ def log(message, level=None):
|
|||
command = ['juju-log']
|
||||
if level:
|
||||
command += ['-l', level]
|
||||
if not isinstance(message, six.string_types):
|
||||
message = repr(message)
|
||||
command += [message]
|
||||
subprocess.call(command)
|
||||
|
||||
|
||||
class Serializable(UserDict.IterableUserDict):
|
||||
class Serializable(UserDict):
|
||||
"""Wrapper, an object that can be serialized to yaml or json"""
|
||||
|
||||
def __init__(self, obj):
|
||||
# wrap the object
|
||||
UserDict.IterableUserDict.__init__(self)
|
||||
UserDict.__init__(self)
|
||||
self.data = obj
|
||||
|
||||
def __getattr__(self, attr):
|
||||
|
@ -218,7 +225,7 @@ class Config(dict):
|
|||
prev_keys = []
|
||||
if self._prev_dict is not None:
|
||||
prev_keys = self._prev_dict.keys()
|
||||
return list(set(prev_keys + dict.keys(self)))
|
||||
return list(set(prev_keys + list(dict.keys(self))))
|
||||
|
||||
def load_previous(self, path=None):
|
||||
"""Load previous copy of config from disk.
|
||||
|
@ -269,7 +276,7 @@ class Config(dict):
|
|||
|
||||
"""
|
||||
if self._prev_dict:
|
||||
for k, v in self._prev_dict.iteritems():
|
||||
for k, v in six.iteritems(self._prev_dict):
|
||||
if k not in self:
|
||||
self[k] = v
|
||||
with open(self.path, 'w') as f:
|
||||
|
@ -284,7 +291,8 @@ def config(scope=None):
|
|||
config_cmd_line.append(scope)
|
||||
config_cmd_line.append('--format=json')
|
||||
try:
|
||||
config_data = json.loads(subprocess.check_output(config_cmd_line))
|
||||
config_data = json.loads(
|
||||
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
||||
if scope is not None:
|
||||
return config_data
|
||||
return Config(config_data)
|
||||
|
@ -303,10 +311,10 @@ def relation_get(attribute=None, unit=None, rid=None):
|
|||
if unit:
|
||||
_args.append(unit)
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||
except ValueError:
|
||||
return None
|
||||
except CalledProcessError, e:
|
||||
except CalledProcessError as e:
|
||||
if e.returncode == 2:
|
||||
return None
|
||||
raise
|
||||
|
@ -318,7 +326,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
|||
relation_cmd_line = ['relation-set']
|
||||
if relation_id is not None:
|
||||
relation_cmd_line.extend(('-r', relation_id))
|
||||
for k, v in (relation_settings.items() + kwargs.items()):
|
||||
for k, v in (list(relation_settings.items()) + list(kwargs.items())):
|
||||
if v is None:
|
||||
relation_cmd_line.append('{}='.format(k))
|
||||
else:
|
||||
|
@ -335,7 +343,8 @@ def relation_ids(reltype=None):
|
|||
relid_cmd_line = ['relation-ids', '--format=json']
|
||||
if reltype is not None:
|
||||
relid_cmd_line.append(reltype)
|
||||
return json.loads(subprocess.check_output(relid_cmd_line)) or []
|
||||
return json.loads(
|
||||
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
|
||||
return []
|
||||
|
||||
|
||||
|
@ -346,7 +355,8 @@ def related_units(relid=None):
|
|||
units_cmd_line = ['relation-list', '--format=json']
|
||||
if relid is not None:
|
||||
units_cmd_line.extend(('-r', relid))
|
||||
return json.loads(subprocess.check_output(units_cmd_line)) or []
|
||||
return json.loads(
|
||||
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
|
||||
|
||||
|
||||
@cached
|
||||
|
@ -385,21 +395,31 @@ def relations_of_type(reltype=None):
|
|||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def metadata():
|
||||
"""Get the current charm metadata.yaml contents as a python object"""
|
||||
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
|
||||
return yaml.safe_load(md)
|
||||
|
||||
|
||||
@cached
|
||||
def relation_types():
|
||||
"""Get a list of relation types supported by this charm"""
|
||||
charmdir = os.environ.get('CHARM_DIR', '')
|
||||
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
|
||||
md = yaml.safe_load(mdf)
|
||||
rel_types = []
|
||||
md = metadata()
|
||||
for key in ('provides', 'requires', 'peers'):
|
||||
section = md.get(key)
|
||||
if section:
|
||||
rel_types.extend(section.keys())
|
||||
mdf.close()
|
||||
return rel_types
|
||||
|
||||
|
||||
@cached
|
||||
def charm_name():
|
||||
"""Get the name of the current charm as is specified on metadata.yaml"""
|
||||
return metadata().get('name')
|
||||
|
||||
|
||||
@cached
|
||||
def relations():
|
||||
"""Get a nested dictionary of relation data for all related units"""
|
||||
|
@ -455,7 +475,7 @@ def unit_get(attribute):
|
|||
"""Get the unit ID for the remote unit"""
|
||||
_args = ['unit-get', '--format=json', attribute]
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
|
|
@ -14,11 +14,12 @@ import string
|
|||
import subprocess
|
||||
import hashlib
|
||||
from contextlib import contextmanager
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from hookenv import log
|
||||
from fstab import Fstab
|
||||
import six
|
||||
|
||||
from .hookenv import log
|
||||
from .fstab import Fstab
|
||||
|
||||
|
||||
def service_start(service_name):
|
||||
|
@ -54,7 +55,9 @@ def service(action, service_name):
|
|||
def service_running(service):
|
||||
"""Determine whether a system service is running"""
|
||||
try:
|
||||
output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
|
||||
output = subprocess.check_output(
|
||||
['service', service, 'status'],
|
||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
|
@ -67,7 +70,9 @@ def service_running(service):
|
|||
def service_available(service_name):
|
||||
"""Determine whether a system service is available"""
|
||||
try:
|
||||
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
|
||||
subprocess.check_output(
|
||||
['service', service_name, 'status'],
|
||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
return 'unrecognized service' not in e.output
|
||||
else:
|
||||
|
@ -96,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
|||
return user_info
|
||||
|
||||
|
||||
def add_group(group_name, system_group=False):
|
||||
"""Add a group to the system"""
|
||||
try:
|
||||
group_info = grp.getgrnam(group_name)
|
||||
log('group {0} already exists!'.format(group_name))
|
||||
except KeyError:
|
||||
log('creating group {0}'.format(group_name))
|
||||
cmd = ['addgroup']
|
||||
if system_group:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
cmd.extend([
|
||||
'--group',
|
||||
])
|
||||
cmd.append(group_name)
|
||||
subprocess.check_call(cmd)
|
||||
group_info = grp.getgrnam(group_name)
|
||||
return group_info
|
||||
|
||||
|
||||
def add_user_to_group(username, group):
|
||||
"""Add a user to a group"""
|
||||
cmd = [
|
||||
|
@ -115,7 +140,7 @@ def rsync(from_path, to_path, flags='-r', options=None):
|
|||
cmd.append(from_path)
|
||||
cmd.append(to_path)
|
||||
log(" ".join(cmd))
|
||||
return subprocess.check_output(cmd).strip()
|
||||
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
||||
|
||||
|
||||
def symlink(source, destination):
|
||||
|
@ -130,7 +155,7 @@ def symlink(source, destination):
|
|||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def mkdir(path, owner='root', group='root', perms=0555, force=False):
|
||||
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
|
||||
"""Create a directory"""
|
||||
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||
perms))
|
||||
|
@ -146,7 +171,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False):
|
|||
os.chown(realpath, uid, gid)
|
||||
|
||||
|
||||
def write_file(path, content, owner='root', group='root', perms=0444):
|
||||
def write_file(path, content, owner='root', group='root', perms=0o444):
|
||||
"""Create or overwrite a file with the contents of a string"""
|
||||
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
|
@ -177,7 +202,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
|
|||
cmd_args.extend([device, mountpoint])
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
except subprocess.CalledProcessError as e:
|
||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||
return False
|
||||
|
||||
|
@ -191,7 +216,7 @@ def umount(mountpoint, persist=False):
|
|||
cmd_args = ['umount', mountpoint]
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
except subprocess.CalledProcessError as e:
|
||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||
return False
|
||||
|
||||
|
@ -218,8 +243,8 @@ def file_hash(path, hash_type='md5'):
|
|||
"""
|
||||
if os.path.exists(path):
|
||||
h = getattr(hashlib, hash_type)()
|
||||
with open(path, 'r') as source:
|
||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||
with open(path, 'rb') as source:
|
||||
h.update(source.read())
|
||||
return h.hexdigest()
|
||||
else:
|
||||
return None
|
||||
|
@ -297,7 +322,7 @@ def pwgen(length=None):
|
|||
if length is None:
|
||||
length = random.choice(range(35, 45))
|
||||
alphanumeric_chars = [
|
||||
l for l in (string.letters + string.digits)
|
||||
l for l in (string.ascii_letters + string.digits)
|
||||
if l not in 'l0QD1vAEIOUaeiou']
|
||||
random_chars = [
|
||||
random.choice(alphanumeric_chars) for _ in range(length)]
|
||||
|
@ -306,14 +331,14 @@ def pwgen(length=None):
|
|||
|
||||
def list_nics(nic_type):
|
||||
'''Return a list of nics of given type(s)'''
|
||||
if isinstance(nic_type, basestring):
|
||||
if isinstance(nic_type, six.string_types):
|
||||
int_types = [nic_type]
|
||||
else:
|
||||
int_types = nic_type
|
||||
interfaces = []
|
||||
for int_type in int_types:
|
||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||
ip_output = subprocess.check_output(cmd).split('\n')
|
||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||
ip_output = (line for line in ip_output if line)
|
||||
for line in ip_output:
|
||||
if line.split()[1].startswith(int_type):
|
||||
|
@ -335,7 +360,7 @@ def set_nic_mtu(nic, mtu):
|
|||
|
||||
def get_nic_mtu(nic):
|
||||
cmd = ['ip', 'addr', 'show', nic]
|
||||
ip_output = subprocess.check_output(cmd).split('\n')
|
||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||
mtu = ""
|
||||
for line in ip_output:
|
||||
words = line.split()
|
||||
|
@ -346,7 +371,7 @@ def get_nic_mtu(nic):
|
|||
|
||||
def get_nic_hwaddr(nic):
|
||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||
ip_output = subprocess.check_output(cmd)
|
||||
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||
hwaddr = ""
|
||||
words = ip_output.split()
|
||||
if 'link/ether' in words:
|
||||
|
@ -363,8 +388,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
|||
|
||||
'''
|
||||
import apt_pkg
|
||||
from charmhelpers.fetch import apt_cache
|
||||
if not pkgcache:
|
||||
from charmhelpers.fetch import apt_cache
|
||||
pkgcache = apt_cache()
|
||||
pkg = pkgcache[package]
|
||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
||||
|
|
|
@ -196,7 +196,7 @@ class StoredContext(dict):
|
|||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'w') as file_stream:
|
||||
os.fchmod(file_stream.fileno(), 0600)
|
||||
os.fchmod(file_stream.fileno(), 0o600)
|
||||
yaml.dump(config_data, file_stream)
|
||||
|
||||
def read_context(self, file_name):
|
||||
|
@ -211,15 +211,19 @@ class StoredContext(dict):
|
|||
|
||||
class TemplateCallback(ManagerCallback):
|
||||
"""
|
||||
Callback class that will render a Jinja2 template, for use as a ready action.
|
||||
Callback class that will render a Jinja2 template, for use as a ready
|
||||
action.
|
||||
|
||||
:param str source: The template source file, relative to
|
||||
`$CHARM_DIR/templates`
|
||||
|
||||
:param str source: The template source file, relative to `$CHARM_DIR/templates`
|
||||
:param str target: The target to write the rendered template to
|
||||
:param str owner: The owner of the rendered file
|
||||
:param str group: The group of the rendered file
|
||||
:param int perms: The permissions of the rendered file
|
||||
"""
|
||||
def __init__(self, source, target, owner='root', group='root', perms=0444):
|
||||
def __init__(self, source, target,
|
||||
owner='root', group='root', perms=0o444):
|
||||
self.source = source
|
||||
self.target = target
|
||||
self.owner = owner
|
||||
|
|
|
@ -4,7 +4,8 @@ from charmhelpers.core import host
|
|||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
|
||||
def render(source, target, context, owner='root', group='root',
|
||||
perms=0o444, templates_dir=None):
|
||||
"""
|
||||
Render a template.
|
||||
|
||||
|
@ -47,5 +48,5 @@ def render(source, target, context, owner='root', group='root', perms=0444, temp
|
|||
level=hookenv.ERROR)
|
||||
raise e
|
||||
content = template.render(context)
|
||||
host.mkdir(os.path.dirname(target))
|
||||
host.mkdir(os.path.dirname(target), owner, group)
|
||||
host.write_file(target, content, owner, group, perms)
|
||||
|
|
|
@ -5,10 +5,6 @@ from yaml import safe_load
|
|||
from charmhelpers.core.host import (
|
||||
lsb_release
|
||||
)
|
||||
from urlparse import (
|
||||
urlparse,
|
||||
urlunparse,
|
||||
)
|
||||
import subprocess
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
|
@ -16,6 +12,12 @@ from charmhelpers.core.hookenv import (
|
|||
)
|
||||
import os
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
else:
|
||||
from urlparse import urlparse, urlunparse
|
||||
|
||||
|
||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
|
@ -149,7 +151,7 @@ def apt_install(packages, options=None, fatal=False):
|
|||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, basestring):
|
||||
if isinstance(packages, six.string_types):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
|
@ -182,7 +184,7 @@ def apt_update(fatal=False):
|
|||
def apt_purge(packages, fatal=False):
|
||||
"""Purge one or more packages"""
|
||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||
if isinstance(packages, basestring):
|
||||
if isinstance(packages, six.string_types):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
|
@ -193,7 +195,7 @@ def apt_purge(packages, fatal=False):
|
|||
def apt_hold(packages, fatal=False):
|
||||
"""Hold one or more packages"""
|
||||
cmd = ['apt-mark', 'hold']
|
||||
if isinstance(packages, basestring):
|
||||
if isinstance(packages, six.string_types):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
|
@ -260,7 +262,7 @@ def add_source(source, key=None):
|
|||
|
||||
if key:
|
||||
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||
with NamedTemporaryFile() as key_file:
|
||||
with NamedTemporaryFile('w+') as key_file:
|
||||
key_file.write(key)
|
||||
key_file.flush()
|
||||
key_file.seek(0)
|
||||
|
@ -297,14 +299,14 @@ def configure_sources(update=False,
|
|||
sources = safe_load((config(sources_var) or '').strip()) or []
|
||||
keys = safe_load((config(keys_var) or '').strip()) or None
|
||||
|
||||
if isinstance(sources, basestring):
|
||||
if isinstance(sources, six.string_types):
|
||||
sources = [sources]
|
||||
|
||||
if keys is None:
|
||||
for source in sources:
|
||||
add_source(source, None)
|
||||
else:
|
||||
if isinstance(keys, basestring):
|
||||
if isinstance(keys, six.string_types):
|
||||
keys = [keys]
|
||||
|
||||
if len(sources) != len(keys):
|
||||
|
@ -401,7 +403,7 @@ def _run_apt_command(cmd, fatal=False):
|
|||
while result is None or result == APT_NO_LOCK:
|
||||
try:
|
||||
result = subprocess.check_call(cmd, env=env)
|
||||
except subprocess.CalledProcessError, e:
|
||||
except subprocess.CalledProcessError as e:
|
||||
retry_count = retry_count + 1
|
||||
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
||||
raise
|
||||
|
|
|
@ -1,8 +1,23 @@
|
|||
import os
|
||||
import urllib2
|
||||
from urllib import urlretrieve
|
||||
import urlparse
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
from urllib.request import (
|
||||
build_opener, install_opener, urlopen, urlretrieve,
|
||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||
)
|
||||
from urllib.parse import urlparse, urlunparse, parse_qs
|
||||
from urllib.error import URLError
|
||||
else:
|
||||
from urllib import urlretrieve
|
||||
from urllib2 import (
|
||||
build_opener, install_opener, urlopen,
|
||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||
URLError
|
||||
)
|
||||
from urlparse import urlparse, urlunparse, parse_qs
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
|
@ -15,6 +30,24 @@ from charmhelpers.payload.archive import (
|
|||
from charmhelpers.core.host import mkdir, check_hash
|
||||
|
||||
|
||||
def splituser(host):
|
||||
'''urllib.splituser(), but six's support of this seems broken'''
|
||||
_userprog = re.compile('^(.*)@(.*)$')
|
||||
match = _userprog.match(host)
|
||||
if match:
|
||||
return match.group(1, 2)
|
||||
return None, host
|
||||
|
||||
|
||||
def splitpasswd(user):
|
||||
'''urllib.splitpasswd(), but six's support of this is missing'''
|
||||
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
|
||||
match = _passwdprog.match(user)
|
||||
if match:
|
||||
return match.group(1, 2)
|
||||
return user, None
|
||||
|
||||
|
||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""
|
||||
Handler to download archive files from arbitrary URLs.
|
||||
|
@ -42,20 +75,20 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||
"""
|
||||
# propogate all exceptions
|
||||
# URLError, OSError, etc
|
||||
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
|
||||
proto, netloc, path, params, query, fragment = urlparse(source)
|
||||
if proto in ('http', 'https'):
|
||||
auth, barehost = urllib2.splituser(netloc)
|
||||
auth, barehost = splituser(netloc)
|
||||
if auth is not None:
|
||||
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
|
||||
username, password = urllib2.splitpasswd(auth)
|
||||
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||
source = urlunparse((proto, barehost, path, params, query, fragment))
|
||||
username, password = splitpasswd(auth)
|
||||
passman = HTTPPasswordMgrWithDefaultRealm()
|
||||
# Realm is set to None in add_password to force the username and password
|
||||
# to be used whatever the realm
|
||||
passman.add_password(None, source, username, password)
|
||||
authhandler = urllib2.HTTPBasicAuthHandler(passman)
|
||||
opener = urllib2.build_opener(authhandler)
|
||||
urllib2.install_opener(opener)
|
||||
response = urllib2.urlopen(source)
|
||||
authhandler = HTTPBasicAuthHandler(passman)
|
||||
opener = build_opener(authhandler)
|
||||
install_opener(opener)
|
||||
response = urlopen(source)
|
||||
try:
|
||||
with open(dest, 'w') as dest_file:
|
||||
dest_file.write(response.read())
|
||||
|
@ -91,17 +124,21 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||
url_parts = self.parse_url(source)
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0755)
|
||||
mkdir(dest_dir, perms=0o755)
|
||||
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
|
||||
try:
|
||||
self.download(source, dld_file)
|
||||
except urllib2.URLError as e:
|
||||
except URLError as e:
|
||||
raise UnhandledSource(e.reason)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
options = urlparse.parse_qs(url_parts.fragment)
|
||||
options = parse_qs(url_parts.fragment)
|
||||
for key, value in options.items():
|
||||
if key in hashlib.algorithms:
|
||||
if not six.PY3:
|
||||
algorithms = hashlib.algorithms
|
||||
else:
|
||||
algorithms = hashlib.algorithms_available
|
||||
if key in algorithms:
|
||||
check_hash(dld_file, value, key)
|
||||
if checksum:
|
||||
check_hash(dld_file, checksum, hash_type)
|
||||
|
|
|
@ -5,6 +5,10 @@ from charmhelpers.fetch import (
|
|||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
raise ImportError('bzrlib does not support Python3')
|
||||
|
||||
try:
|
||||
from bzrlib.branch import Branch
|
||||
except ImportError:
|
||||
|
@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
|
|||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||
branch_name)
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0755)
|
||||
mkdir(dest_dir, perms=0o755)
|
||||
try:
|
||||
self.branch(source, dest_dir)
|
||||
except OSError as e:
|
||||
|
|
|
@ -5,6 +5,10 @@ from charmhelpers.fetch import (
|
|||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
raise ImportError('GitPython does not support Python 3')
|
||||
|
||||
try:
|
||||
from git import Repo
|
||||
except ImportError:
|
||||
|
@ -17,7 +21,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||
"""Handler for git branches via generic and github URLs"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
#TODO (mattyw) no support for ssh git@ yet
|
||||
# TODO (mattyw) no support for ssh git@ yet
|
||||
if url_parts.scheme not in ('http', 'https', 'git'):
|
||||
return False
|
||||
else:
|
||||
|
@ -30,13 +34,16 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||
repo = Repo.clone_from(source, dest)
|
||||
repo.git.checkout(branch)
|
||||
|
||||
def install(self, source, branch="master"):
|
||||
def install(self, source, branch="master", dest=None):
|
||||
url_parts = self.parse_url(source)
|
||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||
branch_name)
|
||||
if dest:
|
||||
dest_dir = os.path.join(dest, branch_name)
|
||||
else:
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||
branch_name)
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0755)
|
||||
mkdir(dest_dir, perms=0o755)
|
||||
try:
|
||||
self.clone(source, dest_dir, branch)
|
||||
except OSError as e:
|
||||
|
|
|
@ -14,6 +14,7 @@ utils.register_configs = _register_configs
|
|||
|
||||
TO_PATCH = [
|
||||
# cinder_utils
|
||||
'ensure_ceph_pool',
|
||||
'ensure_ceph_keyring',
|
||||
'register_configs',
|
||||
'restart_map',
|
||||
|
@ -22,7 +23,6 @@ TO_PATCH = [
|
|||
# charmhelpers.core.hookenv
|
||||
'config',
|
||||
'relation_ids',
|
||||
'relation_get',
|
||||
'relation_set',
|
||||
'service_name',
|
||||
'log',
|
||||
|
@ -30,6 +30,7 @@ TO_PATCH = [
|
|||
'apt_install',
|
||||
'apt_update',
|
||||
# charmhelpers.contrib.hahelpers.cluster_utils
|
||||
'eligible_leader',
|
||||
'execd_preinstall',
|
||||
'CephSubordinateContext'
|
||||
]
|
||||
|
@ -68,14 +69,13 @@ class TestCinderHooks(CharmTestCase):
|
|||
def test_ceph_changed(self, mock_config):
|
||||
'''It ensures ceph assets created on ceph changed'''
|
||||
self.CONFIGS.complete_contexts.return_value = ['ceph']
|
||||
rsp = json.dumps({'exit_code': 0})
|
||||
self.relation_get.return_value = {'broker_rsp': rsp}
|
||||
self.service_name.return_value = 'cinder'
|
||||
self.ensure_ceph_keyring.return_value = True
|
||||
hooks.hooks.execute(['hooks/ceph-relation-changed'])
|
||||
self.ensure_ceph_keyring.assert_called_with(service='cinder',
|
||||
user='cinder',
|
||||
group='cinder')
|
||||
self.ensure_ceph_pool.assert_called_with(service='cinder', replicas=3)
|
||||
self.assertTrue(self.CONFIGS.write_all.called)
|
||||
self.set_ceph_env_variables.assert_called_with(service='cinder')
|
||||
|
||||
|
@ -91,6 +91,15 @@ class TestCinderHooks(CharmTestCase):
|
|||
self.assertTrue(self.log.called)
|
||||
self.assertFalse(self.CONFIGS.write_all.called)
|
||||
|
||||
@patch('charmhelpers.core.hookenv.config')
|
||||
def test_ceph_changed_no_leadership(self, mock_config):
|
||||
'''It does not attempt to create ceph pool if not leader'''
|
||||
self.eligible_leader.return_value = False
|
||||
self.service_name.return_value = 'cinder'
|
||||
self.ensure_ceph_keyring.return_value = True
|
||||
hooks.hooks.execute(['hooks/ceph-relation-changed'])
|
||||
self.assertFalse(self.ensure_ceph_pool.called)
|
||||
|
||||
@patch('charmhelpers.core.hookenv.config')
|
||||
@patch.object(hooks, 'storage_backend')
|
||||
def test_upgrade_charm_related(self, _storage_backend, mock_config):
|
||||
|
|
Loading…
Reference in New Issue