Merged next in and resolved conflicts

This commit is contained in:
Liam Young 2015-01-09 15:54:17 +00:00
commit 8aca48d035
45 changed files with 1449 additions and 601 deletions

View File

@ -12,4 +12,5 @@ include:
- payload.execd
- contrib.peerstorage
- contrib.network.ip
- contrib.python.packages
- contrib.charmsupport

View File

@ -70,6 +70,14 @@ options:
default: "services"
type: string
description: "Name of tenant to associate service credentials."
service-admin-prefix:
type: string
default:
description: |
When service relations are joined they provide a name used to create a
service admin_username in keystone. The name used may be too crude for
some situations e.g. pre-populated LDAP identity backend. If set, this
option will be prepended to each service admin_username.
# Database settings used to request access via shared-db-relation-* relations
database:
default: "keystone"

View File

@ -0,0 +1,22 @@
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -16,6 +16,8 @@ import os
from socket import gethostname as get_unit_hostname
import six
from charmhelpers.core.hookenv import (
log,
relation_ids,
@ -27,12 +29,19 @@ from charmhelpers.core.hookenv import (
WARNING,
unit_get,
)
from charmhelpers.core.decorators import (
retry_on_exception,
)
class HAIncompleteConfig(Exception):
pass
class CRMResourceNotFound(Exception):
pass
def is_elected_leader(resource):
"""
Returns True if the charm executing this is the elected cluster leader.
@ -67,24 +76,30 @@ def is_clustered():
return False
def is_crm_leader(resource):
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
def is_crm_leader(resource, retry=False):
"""
Returns True if the charm calling this is the elected corosync leader,
as returned by calling the external "crm" command.
We allow this operation to be retried to avoid the possibility of getting a
false negative. See LP #1396246 for more info.
"""
cmd = [
"crm", "resource",
"show", resource
]
cmd = ['crm', 'resource', 'show', resource]
try:
status = subprocess.check_output(cmd)
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
status = None
if status and get_unit_hostname() in status:
return True
if status and "resource %s is NOT running" % (resource) in status:
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
return False
def is_leader(resource):
@ -150,34 +165,42 @@ def https():
return False
def determine_api_port(public_port):
def determine_api_port(public_port, singlenode_mode=False):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_apache_port(public_port):
def determine_apache_port(public_port, singlenode_mode=False):
'''
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10)
@ -197,7 +220,7 @@ def get_hacluster_config():
for setting in settings:
conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None]
[missing.append(s) for s, v in six.iteritems(conf) if v is None]
if missing:
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig

View File

@ -1,15 +1,12 @@
import glob
import re
import subprocess
import sys
from functools import partial
from charmhelpers.core.hookenv import unit_get
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
WARNING,
ERROR,
log
)
@ -34,31 +31,28 @@ def _validate_cidr(network):
network)
def no_ip_found_error_out(network):
errmsg = ("No IP address found in network: %s" % network)
raise ValueError(errmsg)
def get_address_in_network(network, fallback=None, fatal=False):
"""
Get an IPv4 or IPv6 address within the network from the host.
"""Get an IPv4 or IPv6 address within the network from the host.
:param network (str): CIDR presentation format. For example,
'192.168.1.0/24'.
:param fallback (str): If no address is found, return fallback.
:param fatal (boolean): If no address is found, fallback is not
set and fatal is True then exit(1).
"""
def not_found_error_out():
log("No IP address found in network: %s" % network,
level=ERROR)
sys.exit(1)
if network is None:
if fallback is not None:
return fallback
if fatal:
no_ip_found_error_out(network)
else:
if fatal:
not_found_error_out()
else:
return None
return None
_validate_cidr(network)
network = netaddr.IPNetwork(network)
@ -70,6 +64,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network:
return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
@ -82,20 +77,20 @@ def get_address_in_network(network, fallback=None, fatal=False):
return fallback
if fatal:
not_found_error_out()
no_ip_found_error_out(network)
return None
def is_ipv6(address):
'''Determine whether provided address is IPv6 or not'''
"""Determine whether provided address is IPv6 or not."""
try:
address = netaddr.IPAddress(address)
except netaddr.AddrFormatError:
# probably a hostname - so not an address at all!
return False
else:
return address.version == 6
return address.version == 6
def is_address_in_network(network, address):
@ -113,11 +108,13 @@ def is_address_in_network(network, address):
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Network (%s) is not in CIDR presentation format" %
network)
try:
address = netaddr.IPAddress(address)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Address (%s) is not in correct presentation format" %
address)
if address in network:
return True
else:
@ -147,6 +144,7 @@ def _get_for_address(address, key):
return iface
else:
return addresses[netifaces.AF_INET][0][key]
if address.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
@ -160,41 +158,42 @@ def _get_for_address(address, key):
return str(cidr).split('/')[1]
else:
return addr[key]
return None
get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')
def format_ipv6_addr(address):
"""
IPv6 needs to be wrapped with [] in url link to parse correctly.
"""If address is IPv6, wrap it in '[]' otherwise return None.
This is required by most configuration files when specifying IPv6
addresses.
"""
if is_ipv6(address):
address = "[%s]" % address
else:
log("Not a valid ipv6 address: %s" % address, level=WARNING)
address = None
return "[%s]" % address
return address
return None
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
fatal=True, exc_list=None):
"""
Return the assigned IP address for a given interface, if any, or [].
"""
"""Return the assigned IP address for a given interface, if any."""
# Extract nic if passed /dev/ethX
if '/' in iface:
iface = iface.split('/')[-1]
if not exc_list:
exc_list = []
try:
inet_num = getattr(netifaces, inet_type)
except AttributeError:
raise Exception('Unknown inet type ' + str(inet_type))
raise Exception("Unknown inet type '%s'" % str(inet_type))
interfaces = netifaces.interfaces()
if inc_aliases:
@ -202,15 +201,18 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
for _iface in interfaces:
if iface == _iface or _iface.split(':')[0] == iface:
ifaces.append(_iface)
if fatal and not ifaces:
raise Exception("Invalid interface '%s'" % iface)
ifaces.sort()
else:
if iface not in interfaces:
if fatal:
raise Exception("%s not found " % (iface))
raise Exception("Interface '%s' not found " % (iface))
else:
return []
else:
ifaces = [iface]
@ -221,10 +223,13 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
for entry in net_info[inet_num]:
if 'addr' in entry and entry['addr'] not in exc_list:
addresses.append(entry['addr'])
if fatal and not addresses:
raise Exception("Interface '%s' doesn't have any %s addresses." %
(iface, inet_type))
return addresses
return sorted(addresses)
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
@ -241,6 +246,7 @@ def get_iface_from_addr(addr):
raw = re.match(ll_key, _addr)
if raw:
_addr = raw.group(1)
if _addr == addr:
log("Address '%s' is configured on iface '%s'" %
(addr, iface))
@ -251,8 +257,9 @@ def get_iface_from_addr(addr):
def sniff_iface(f):
"""If no iface provided, inject net iface inferred from unit private
address.
"""Ensure decorated function is called with a value for iface.
If no iface provided, inject net iface inferred from unit private address.
"""
def iface_sniffer(*args, **kwargs):
if not kwargs.get('iface', None):
@ -295,7 +302,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
if global_addrs:
# Make sure any found global addresses are not temporary
cmd = ['ip', 'addr', 'show', iface]
out = subprocess.check_output(cmd)
out = subprocess.check_output(cmd).decode('UTF-8')
if dynamic_only:
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
else:
@ -317,33 +324,28 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
return addrs
if fatal:
raise Exception("Interface '%s' doesn't have a scope global "
raise Exception("Interface '%s' does not have a scope global "
"non-temporary ipv6 address." % iface)
return []
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
"""
Return a list of bridges on the system or []
"""
b_rgex = vnic_dir + '/*/bridge'
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
"""Return a list of bridges on the system."""
b_regex = "%s/*/bridge" % vnic_dir
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
"""
Return a list of nics comprising a given bridge on the system or []
"""
brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
"""Return a list of nics comprising a given bridge on the system."""
brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
return [x.split('/')[-1] for x in glob.glob(brif_regex)]
def is_bridge_member(nic):
"""
Check if a given nic is a member of a bridge
"""
"""Check if a given nic is a member of a bridge."""
for bridge in get_bridges():
if nic in get_bridge_nics(bridge):
return True
return False

View File

@ -1,3 +1,4 @@
import six
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _get_openstack_release(self):

View File

@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
import six
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils):
expected service catalog endpoints.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems():
for k, v in six.iteritems(expected):
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:

File diff suppressed because it is too large Load Diff

View File

@ -2,21 +2,19 @@ from charmhelpers.core.hookenv import (
config,
unit_get,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
_address_map = {
ADDRESS_MAP = {
PUBLIC: {
'config': 'os-public-network',
'fallback': 'public-address'
@ -33,16 +31,14 @@ _address_map = {
def canonical_url(configs, endpoint_type=PUBLIC):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
"""Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:configs OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:endpoint_type str: The endpoint type to resolve.
:returns str: Base URL for services on the current service unit.
'''
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
"""
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
@ -53,27 +49,45 @@ def canonical_url(configs, endpoint_type=PUBLIC):
def resolve_address(endpoint_type=PUBLIC):
"""Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured.
:param endpoint_type: Network endpoing type
"""
resolved_address = None
if is_clustered():
if config(_address_map[endpoint_type]['config']) is None:
# Assume vip is simple and pass back directly
resolved_address = config('vip')
vips = config('vip')
if vips:
vips = vips.split()
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
clustered = is_clustered()
if clustered:
if not net_addr:
# If no net-splits defined, we expect a single vip
resolved_address = vips[0]
else:
for vip in config('vip').split():
if is_address_in_network(
config(_address_map[endpoint_type]['config']),
vip):
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
resolved_address = get_address_in_network(
config(_address_map[endpoint_type]['config']), fallback_addr)
fallback_addr = unit_get(net_fallback)
resolved_address = get_address_in_network(net_addr, fallback_addr)
if resolved_address is None:
raise ValueError('Unable to resolve a suitable IP address'
' based on charm state and configuration')
else:
return resolved_address
raise ValueError("Unable to resolve a suitable IP address based on "
"charm state and configuration. (net_type=%s, "
"clustered=%s)" % (net_type, clustered))
return resolved_address

View File

@ -14,7 +14,7 @@ from charmhelpers.contrib.openstack.utils import os_release
def headers_package():
"""Ensures correct linux-headers for running kernel are installed,
for building DKMS package"""
kver = check_output(['uname', '-r']).strip()
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
return 'linux-headers-%s' % kver
QUANTUM_CONF_DIR = '/etc/quantum'
@ -22,7 +22,7 @@ QUANTUM_CONF_DIR = '/etc/quantum'
def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).strip()
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
kver = kver.split('.')
return (int(kver[0]), int(kver[1]))
@ -138,10 +138,31 @@ def neutron_plugins():
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['neutron-plugin-cisco']],
'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
},
'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['calico-felix',
'bird',
'neutron-dhcp-agent',
'nova-api-metadata'],
'packages': [[headers_package()] + determine_dkms_package(),
['calico-compute',
'bird',
'neutron-dhcp-agent',
'nova-api-metadata']],
'server_packages': ['neutron-server', 'calico-control'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':
@ -162,7 +183,8 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None):
elif manager == 'neutron':
plugins = neutron_plugins()
else:
log('Error: Network manager does not support plugins.')
log("Network manager '%s' does not support plugins." % (manager),
level=ERROR)
raise Exception
try:

View File

@ -35,10 +35,12 @@ listen stats {{ stat_port }}
stats auth admin:password
{% if frontends -%}
{% for service, ports in service_ports.iteritems() -%}
{% for service, ports in service_ports.items() -%}
frontend tcp-in_{{ service }}
bind *:{{ ports[0] }}
{% if ipv6 -%}
bind :::{{ ports[0] }}
{% endif -%}
{% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
@ -46,7 +48,7 @@ frontend tcp-in_{{ service }}
{% for frontend in frontends -%}
backend {{ service }}_{{ frontend }}
balance leastconn
{% for unit, address in frontends[frontend]['backends'].iteritems() -%}
{% for unit, address in frontends[frontend]['backends'].items() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
{% endfor -%}

View File

@ -1,13 +1,13 @@
import os
from charmhelpers.fetch import apt_install
import six
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
log,
ERROR,
INFO
)
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try:
@ -43,7 +43,7 @@ def get_loader(templates_dir, os_release):
order by OpenStack release.
"""
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in OPENSTACK_CODENAMES.itervalues()]
for rel in six.itervalues(OPENSTACK_CODENAMES)]
if not os.path.isdir(templates_dir):
log('Templates directory not found @ %s.' % templates_dir,
@ -258,7 +258,7 @@ class OSConfigRenderer(object):
"""
Write out all registered config files.
"""
[self.write(k) for k in self.templates.iterkeys()]
[self.write(k) for k in six.iterkeys(self.templates)]
def set_release(self, openstack_release):
"""
@ -275,5 +275,5 @@ class OSConfigRenderer(object):
'''
interfaces = []
[interfaces.extend(i.complete_contexts())
for i in self.templates.itervalues()]
for i in six.itervalues(self.templates)]
return interfaces

View File

@ -2,6 +2,7 @@
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
from functools import wraps
import subprocess
import json
@ -9,11 +10,13 @@ import os
import socket
import sys
import six
import yaml
from charmhelpers.core.hookenv import (
config,
log as juju_log,
charm_dir,
ERROR,
INFO,
relation_ids,
relation_set
@ -30,7 +33,8 @@ from charmhelpers.contrib.network.ip import (
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install, apt_cache
from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.python.packages import pip_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
@ -112,7 +116,7 @@ def get_os_codename_install_source(src):
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in OPENSTACK_CODENAMES.iteritems():
for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v in src:
return v
@ -133,7 +137,7 @@ def get_os_codename_version(vers):
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in OPENSTACK_CODENAMES.iteritems():
for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
@ -193,7 +197,7 @@ def get_os_version_package(pkg, fatal=True):
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in vers_map.iteritems():
for version, cname in six.iteritems(vers_map):
if cname == codename:
return version
# e = "Could not determine OpenStack version for package: %s" % pkg
@ -317,7 +321,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"]
for u, p in six.iteritems(env_vars) if u != "script_path"]
def openstack_upgrade_available(package):
@ -350,8 +354,8 @@ def ensure_block_device(block_device):
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: '
'block_device=%s.' % block_device, level=ERROR)
error_out('prepare_storage(): Missing required input: block_device=%s.'
% block_device)
if block_device.startswith('/dev/'):
bdev = block_device
@ -367,8 +371,7 @@ def ensure_block_device(block_device):
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev,
level=ERROR)
error_out('Failed to locate valid block device at %s' % bdev)
return bdev
@ -417,7 +420,7 @@ def ns_query(address):
if isinstance(address, dns.name.Name):
rtype = 'PTR'
elif isinstance(address, basestring):
elif isinstance(address, six.string_types):
rtype = 'A'
else:
return None
@ -468,6 +471,14 @@ def get_hostname(address, fqdn=True):
return result.split('.')[0]
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
mm_map = {}
if os.path.isfile(mm_file):
with open(mm_file, 'r') as f:
mm_map = json.load(f)
return mm_map
def sync_db_with_multi_ipv6_addresses(database, database_user,
relation_prefix=None):
hosts = get_ipv6_addr(dynamic_only=False)
@ -477,10 +488,132 @@ def sync_db_with_multi_ipv6_addresses(database, database_user,
'hostname': json.dumps(hosts)}
if relation_prefix:
keys = kwargs.keys()
for key in keys:
for key in list(kwargs.keys()):
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
del kwargs[key]
for rid in relation_ids('shared-db'):
relation_set(relation_id=rid, **kwargs)
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap
def git_install_requested():
"""Returns true if openstack-origin-git is specified."""
return config('openstack-origin-git') != "None"
requirements_dir = None
def git_clone_and_install(file_name, core_project):
"""Clone/install all OpenStack repos specified in yaml config file."""
global requirements_dir
if file_name == "None":
return
yaml_file = os.path.join(charm_dir(), file_name)
# clone/install the requirements project first
installed = _git_clone_and_install_subset(yaml_file,
whitelist=['requirements'])
if 'requirements' not in installed:
error_out('requirements git repository must be specified')
# clone/install all other projects except requirements and the core project
blacklist = ['requirements', core_project]
_git_clone_and_install_subset(yaml_file, blacklist=blacklist,
update_requirements=True)
# clone/install the core project
whitelist = [core_project]
installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
update_requirements=True)
if core_project not in installed:
error_out('{} git repository must be specified'.format(core_project))
def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
update_requirements=False):
"""Clone/install subset of OpenStack repos specified in yaml config file."""
global requirements_dir
installed = []
with open(yaml_file, 'r') as fd:
projects = yaml.load(fd)
for proj, val in projects.items():
# The project subset is chosen based on the following 3 rules:
# 1) If project is in blacklist, we don't clone/install it, period.
# 2) If whitelist is empty, we clone/install everything else.
# 3) If whitelist is not empty, we clone/install everything in the
# whitelist.
if proj in blacklist:
continue
if whitelist and proj not in whitelist:
continue
repo = val['repository']
branch = val['branch']
repo_dir = _git_clone_and_install_single(repo, branch,
update_requirements)
if proj == 'requirements':
requirements_dir = repo_dir
installed.append(proj)
return installed
def _git_clone_and_install_single(repo, branch, update_requirements=False):
"""Clone and install a single git repository."""
dest_parent_dir = "/mnt/openstack-git/"
dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
if not os.path.exists(dest_parent_dir):
juju_log('Host dir not mounted at {}. '
'Creating directory there instead.'.format(dest_parent_dir))
os.mkdir(dest_parent_dir)
if not os.path.exists(dest_dir):
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
else:
repo_dir = dest_dir
if update_requirements:
if not requirements_dir:
error_out('requirements repo must be cloned before '
'updating from global requirements.')
_git_update_requirements(repo_dir, requirements_dir)
juju_log('Installing git repo from dir: {}'.format(repo_dir))
pip_install(repo_dir)
return repo_dir
def _git_update_requirements(package_dir, reqs_dir):
"""Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt."""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
cmd = "python update.py {}".format(package_dir)
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from global-requirements.txt".format(package))
os.chdir(orig_dir)

View File

@ -1,3 +1,4 @@
import six
from charmhelpers.core.hookenv import relation_id as current_relation_id
from charmhelpers.core.hookenv import (
is_relation_made,
@ -93,7 +94,7 @@ def peer_echo(includes=None):
if ex in echo_data:
echo_data.pop(ex)
else:
for attribute, value in rdata.iteritems():
for attribute, value in six.iteritems(rdata):
for include in includes:
if include in attribute:
echo_data[attribute] = value
@ -119,8 +120,8 @@ def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
relation_settings=relation_settings,
**kwargs)
if is_relation_made(peer_relation_name):
for key, value in dict(kwargs.items() +
relation_settings.items()).iteritems():
for key, value in six.iteritems(dict(list(kwargs.items()) +
list(relation_settings.items()))):
key_prefix = relation_id or current_relation_id()
peer_store(key_prefix + delimiter + key,
value,

View File

@ -0,0 +1,77 @@
#!/usr/bin/env python
# coding: utf-8
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import log
try:
from pip import main as pip_execute
except ImportError:
apt_update()
apt_install('python-pip')
from pip import main as pip_execute
def parse_options(given, available):
"""Given a set of options, check if available"""
for key, value in sorted(given.items()):
if key in available:
yield "--{0}={1}".format(key, value)
def pip_install_requirements(requirements, **options):
"""Install a requirements file """
command = ["install"]
available_options = ('proxy', 'src', 'log', )
for option in parse_options(options, available_options):
command.append(option)
command.append("-r {0}".format(requirements))
log("Installing from file: {} with options: {}".format(requirements,
command))
pip_execute(command)
def pip_install(package, fatal=False, **options):
"""Install a python package"""
command = ["install"]
available_options = ('proxy', 'src', 'log', "index-url", )
for option in parse_options(options, available_options):
command.append(option)
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Installing {} package with options: {}".format(package,
command))
pip_execute(command)
def pip_uninstall(package, **options):
"""Uninstall a python package"""
command = ["uninstall", "-q", "-y"]
available_options = ('proxy', 'log', )
for option in parse_options(options, available_options):
command.append(option)
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Uninstalling {} package with options: {}".format(package,
command))
pip_execute(command)
def pip_list():
"""Returns the list of current python installed packages
"""
return pip_execute(["list"])

View File

@ -16,19 +16,18 @@ import time
from subprocess import (
check_call,
check_output,
CalledProcessError
CalledProcessError,
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
DEBUG,
INFO,
WARNING,
ERROR
ERROR,
)
from charmhelpers.core.host import (
mount,
mounts,
@ -37,7 +36,6 @@ from charmhelpers.core.host import (
service_running,
umount,
)
from charmhelpers.fetch import (
apt_install,
)
@ -56,99 +54,85 @@ CEPH_CONF = """[global]
def install():
''' Basic Ceph client installation '''
"""Basic Ceph client installation."""
ceph_dir = "/etc/ceph"
if not os.path.exists(ceph_dir):
os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
''' Check to see if a RADOS block device exists '''
"""Check to see if a RADOS block device exists."""
try:
out = check_output(['rbd', 'list', '--id', service,
'--pool', pool])
out = check_output(['rbd', 'list', '--id',
service, '--pool', pool]).decode('UTF-8')
except CalledProcessError:
return False
else:
return rbd_img in out
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
''' Create a new RADOS block device '''
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
"""Create a new RADOS block device."""
cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
'--pool', pool]
check_call(cmd)
def pool_exists(service, name):
''' Check to see if a RADOS pool already exists '''
"""Check to see if a RADOS pool already exists."""
try:
out = check_output(['rados', '--id', service, 'lspools'])
out = check_output(['rados', '--id', service,
'lspools']).decode('UTF-8')
except CalledProcessError:
return False
else:
return name in out
return name in out
def get_osds(service):
'''
Return a list of all Ceph Object Storage Daemons
currently in the cluster
'''
"""Return a list of all Ceph Object Storage Daemons currently in the
cluster.
"""
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls', '--format=json']))
else:
return None
'osd', 'ls',
'--format=json']).decode('UTF-8'))
return None
def create_pool(service, name, replicas=2):
''' Create a new RADOS pool '''
def create_pool(service, name, replicas=3):
"""Create a new RADOS pool."""
if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING)
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 / replicas)
pgnum = (len(osds) * 100 // replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'create',
name, str(pgnum)
]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
check_call(cmd)
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'set', name,
'size', str(replicas)
]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
str(replicas)]
check_call(cmd)
def delete_pool(service, name):
''' Delete a RADOS pool from ceph '''
cmd = [
'ceph', '--id', service,
'osd', 'pool', 'delete',
name, '--yes-i-really-really-mean-it'
]
"""Delete a RADOS pool from ceph."""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
'--yes-i-really-really-mean-it']
check_call(cmd)
@ -161,44 +145,43 @@ def _keyring_path(service):
def create_keyring(service, key):
''' Create a new Ceph keyring containing key'''
"""Create a new Ceph keyring containing key."""
keyring = _keyring_path(service)
if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
log('Ceph keyring exists at %s.' % keyring, level=WARNING)
return
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.{}'.format(service),
'--add-key={}'.format(key)
]
cmd = ['ceph-authtool', keyring, '--create-keyring',
'--name=client.{}'.format(service), '--add-key={}'.format(key)]
check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO)
log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
def create_key_file(service, key):
''' Create a file containing key '''
"""Create a file containing key."""
keyfile = _keyfile_path(service)
if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
log('Keyfile exists at %s.' % keyfile, level=WARNING)
return
with open(keyfile, 'w') as fd:
fd.write(key)
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
log('Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
''' Query named relation 'ceph' to detemine current nodes '''
"""Query named relation 'ceph' to determine current nodes."""
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
def configure(service, key, auth, use_syslog):
''' Perform basic configuration of Ceph '''
"""Perform basic configuration of Ceph."""
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
@ -211,17 +194,17 @@ def configure(service, key, auth, use_syslog):
def image_mapped(name):
''' Determine whether a RADOS block device is mapped locally '''
"""Determine whether a RADOS block device is mapped locally."""
try:
out = check_output(['rbd', 'showmapped'])
out = check_output(['rbd', 'showmapped']).decode('UTF-8')
except CalledProcessError:
return False
else:
return name in out
return name in out
def map_block_storage(service, pool, image):
''' Map a RADOS block device for local use '''
"""Map a RADOS block device for local use."""
cmd = [
'rbd',
'map',
@ -235,31 +218,32 @@ def map_block_storage(service, pool, image):
def filesystem_mounted(fs):
''' Determine whether a filesytems is already mounted '''
"""Determine whether a filesytems is already mounted."""
return fs in [f for f, m in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10):
''' Make a new filesystem on the specified block device '''
"""Make a new filesystem on the specified block device."""
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device,
log('Gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO)
log('Waiting for block device %s to appear' % blk_device,
level=DEBUG)
count += 1
time.sleep(1)
else:
log('ceph: Formatting block device %s as filesystem %s.' %
log('Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_block_device(blk_device, data_src_dst):
''' Migrate data in data_src_dst to blk_device and then remount '''
"""Migrate data in data_src_dst to blk_device and then remount."""
# mount block device into /mnt
mount(blk_device, '/mnt')
# copy data to /mnt
@ -279,8 +263,8 @@ def place_data_on_block_device(blk_device, data_src_dst):
# TODO: re-use
def modprobe(module):
''' Load a kernel module and configure for auto-load on reboot '''
log('ceph: Loading kernel module', level=INFO)
"""Load a kernel module and configure for auto-load on reboot."""
log('Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
with open('/etc/modules', 'r+') as modules:
@ -289,7 +273,7 @@ def modprobe(module):
def copy_files(src, dst, symlinks=False, ignore=None):
''' Copy files from src to dst '''
"""Copy files from src to dst."""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
@ -300,9 +284,9 @@ def copy_files(src, dst, symlinks=False, ignore=None):
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
NOTE: This function must only be called from a single service unit for
blk_device, fstype, system_services=[],
replicas=3):
"""NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device,
@ -316,15 +300,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
log('ceph: Creating new pool {}.'.format(pool))
create_pool(service, pool)
log('Creating new pool {}.'.format(pool), level=INFO)
create_pool(service, pool, replicas=replicas)
if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image ({}).'.format(rbd_img))
log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
level=INFO)
map_block_storage(service, pool, rbd_img)
# make file system
@ -339,45 +324,47 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
for svc in system_services:
if service_running(svc):
log('ceph: Stopping services {} prior to migrating data.'
.format(svc))
log('Stopping services {} prior to migrating data.'
.format(svc), level=DEBUG)
service_stop(svc)
place_data_on_block_device(blk_device, mount_point)
for svc in system_services:
log('ceph: Starting service {} after migrating data.'
.format(svc))
log('Starting service {} after migrating data.'
.format(svc), level=DEBUG)
service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None):
'''
Ensures a ceph keyring is created for a named service
and optionally ensures user and group ownership.
"""Ensures a ceph keyring is created for a named service and optionally
ensures user and group ownership.
Returns False if no ceph key is available in relation state.
'''
"""
key = None
for rid in relation_ids('ceph'):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
break
if not key:
return False
create_keyring(service=service, key=key)
keyring = _keyring_path(service)
if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring])
return True
def ceph_version():
''' Retrieve the local version of ceph '''
"""Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
output = check_output(cmd).decode('US-ASCII')
output = output.split()
if len(output) > 3:
return output[2]
@ -385,3 +372,46 @@ def ceph_version():
return None
else:
return None
class CephBrokerRq(object):
"""Ceph broker request.
Multiple operations can be added to a request and sent to the Ceph broker
to be executed.
Request is json-encoded for sending over the wire.
The API is versioned and defaults to version 1.
"""
def __init__(self, api_version=1):
self.api_version = api_version
self.ops = []
def add_op_create_pool(self, name, replica_count=3):
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count})
@property
def request(self):
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
class CephBrokerRsp(object):
"""Ceph broker response.
Response is json-decoded and contents provided as methods/properties.
The API is versioned and defaults to version 1.
"""
def __init__(self, encoded_rsp):
self.api_version = None
self.rsp = json.loads(encoded_rsp)
@property
def exit_code(self):
return self.rsp.get('exit-code')
@property
def exit_msg(self):
return self.rsp.get('stderr')

View File

@ -1,12 +1,12 @@
import os
import re
from subprocess import (
check_call,
check_output,
)
import six
##################################################
# loopback device helpers.
@ -37,7 +37,7 @@ def create_loopback(file_path):
'''
file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path])
for d, f in loopback_devices().iteritems():
for d, f in six.iteritems(loopback_devices()):
if f == file_path:
return d
@ -51,7 +51,7 @@ def ensure_loopback_device(path, size):
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in loopback_devices().iteritems():
for d, f in six.iteritems(loopback_devices()):
if f == path:
return d

View File

@ -61,6 +61,7 @@ def list_lvm_volume_group(block_device):
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
l = l.decode('UTF-8')
if l.strip().startswith('VG Name'):
vg = ' '.join(l.strip().split()[2:])
return vg

View File

@ -30,7 +30,8 @@ def zap_disk(block_device):
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
call(['sgdisk', '--zap-all', '--mbrtogpt',
'--clear', block_device])
dev_end = check_output(['blockdev', '--getsz', block_device])
dev_end = check_output(['blockdev', '--getsz',
block_device]).decode('UTF-8')
gpt_end = int(dev_end.split()[0]) - 100
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=1M', 'count=1'])
@ -47,7 +48,7 @@ def is_device_mounted(device):
it doesn't.
'''
is_partition = bool(re.search(r".*[0-9]+\b", device))
out = check_output(['mount'])
out = check_output(['mount']).decode('UTF-8')
if is_partition:
return bool(re.search(device + r"\b", out))
return bool(re.search(device + r"[0-9]+\b", out))

View File

@ -185,13 +185,14 @@ def ssh_authorized_peers(peer_interface, user, group=None,
relation_set(ssh_authorized_hosts=authed_hosts)
def _run_as_user(user):
def _run_as_user(user, gid=None):
try:
user = pwd.getpwnam(user)
except KeyError:
log('Invalid user: %s' % user)
raise Exception
uid, gid = user.pw_uid, user.pw_gid
uid = user.pw_uid
gid = gid or user.pw_gid
os.environ['HOME'] = user.pw_dir
def _inner():
@ -200,8 +201,8 @@ def _run_as_user(user):
return _inner
def run_as_user(user, cmd):
return check_output(cmd, preexec_fn=_run_as_user(user), cwd='/')
def run_as_user(user, cmd, gid=None):
return check_output(cmd, preexec_fn=_run_as_user(user, gid), cwd='/')
def collect_authed_hosts(peer_interface):
@ -227,8 +228,8 @@ def collect_authed_hosts(peer_interface):
return hosts
def sync_path_to_host(path, host, user, verbose=False):
cmd = copy(BASE_CMD)
def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None):
cmd = cmd or copy(BASE_CMD)
if not verbose:
cmd.append('-silent')
@ -241,17 +242,23 @@ def sync_path_to_host(path, host, user, verbose=False):
try:
log('Syncing local path %s to %s@%s:%s' % (path, user, host, path))
run_as_user(user, cmd)
run_as_user(user, cmd, gid)
except:
log('Error syncing remote files')
def sync_to_peer(host, user, paths=[], verbose=False):
def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None):
'''Sync paths to an specific host'''
[sync_path_to_host(p, host, user, verbose) for p in paths]
if paths:
for p in paths:
sync_path_to_host(p, host, user, verbose, cmd, gid)
def sync_to_peers(peer_interface, user, paths=[], verbose=False):
def sync_to_peers(peer_interface, user, paths=None,
verbose=False, cmd=None, gid=None):
'''Sync all hosts to an specific path'''
for host in collect_authed_hosts(peer_interface):
sync_to_peer(host, user, paths, verbose)
'''The type of group is integer, it allows user has permissions to '''
'''operate a directory have a different group id with the user id.'''
if paths:
for host in collect_authed_hosts(peer_interface):
sync_to_peer(host, user, paths, verbose, cmd, gid)

View File

@ -0,0 +1,41 @@
#
# Copyright 2014 Canonical Ltd.
#
# Authors:
# Edward Hope-Morley <opentastic@gmail.com>
#
import time
from charmhelpers.core.hookenv import (
log,
INFO,
)
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
"""If the decorated function raises exception exc_type, allow num_retries
retry attempts before raise the exception.
"""
def _retry_on_exception_inner_1(f):
def _retry_on_exception_inner_2(*args, **kwargs):
retries = num_retries
multiplier = 1
while True:
try:
return f(*args, **kwargs)
except exc_type:
if not retries:
raise
delay = base_delay * multiplier
multiplier += 1
log("Retrying '%s' %d more times (delay=%s)" %
(f.__name__, retries, delay), level=INFO)
retries -= 1
if delay:
time.sleep(delay)
return _retry_on_exception_inner_2
return _retry_on_exception_inner_1

View File

@ -3,10 +3,11 @@
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import io
import os
class Fstab(file):
class Fstab(io.FileIO):
"""This class extends file in order to implement a file reader/writer
for file `/etc/fstab`
"""
@ -24,8 +25,8 @@ class Fstab(file):
options = "defaults"
self.options = options
self.d = d
self.p = p
self.d = int(d)
self.p = int(p)
def __eq__(self, o):
return str(self) == str(o)
@ -45,7 +46,7 @@ class Fstab(file):
self._path = path
else:
self._path = self.DEFAULT_PATH
file.__init__(self, self._path, 'r+')
super(Fstab, self).__init__(self._path, 'rb+')
def _hydrate_entry(self, line):
# NOTE: use split with no arguments to split on any
@ -58,8 +59,9 @@ class Fstab(file):
def entries(self):
self.seek(0)
for line in self.readlines():
line = line.decode('us-ascii')
try:
if not line.startswith("#"):
if line.strip() and not line.startswith("#"):
yield self._hydrate_entry(line)
except ValueError:
pass
@ -75,14 +77,14 @@ class Fstab(file):
if self.get_entry_by_attr('device', entry.device):
return False
self.write(str(entry) + '\n')
self.write((str(entry) + '\n').encode('us-ascii'))
self.truncate()
return entry
def remove_entry(self, entry):
self.seek(0)
lines = self.readlines()
lines = [l.decode('us-ascii') for l in self.readlines()]
found = False
for index, line in enumerate(lines):
@ -97,7 +99,7 @@ class Fstab(file):
lines.remove(line)
self.seek(0)
self.write(''.join(lines))
self.write(''.join(lines).encode('us-ascii'))
self.truncate()
return True

View File

@ -9,9 +9,14 @@ import json
import yaml
import subprocess
import sys
import UserDict
from subprocess import CalledProcessError
import six
if not six.PY3:
from UserDict import UserDict
else:
from collections import UserDict
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
@ -63,16 +68,18 @@ def log(message, level=None):
command = ['juju-log']
if level:
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message]
subprocess.call(command)
class Serializable(UserDict.IterableUserDict):
class Serializable(UserDict):
"""Wrapper, an object that can be serialized to yaml or json"""
def __init__(self, obj):
# wrap the object
UserDict.IterableUserDict.__init__(self)
UserDict.__init__(self)
self.data = obj
def __getattr__(self, attr):
@ -214,6 +221,12 @@ class Config(dict):
except KeyError:
return (self._prev_dict or {})[key]
def keys(self):
prev_keys = []
if self._prev_dict is not None:
prev_keys = self._prev_dict.keys()
return list(set(prev_keys + list(dict.keys(self))))
def load_previous(self, path=None):
"""Load previous copy of config from disk.
@ -263,7 +276,7 @@ class Config(dict):
"""
if self._prev_dict:
for k, v in self._prev_dict.iteritems():
for k, v in six.iteritems(self._prev_dict):
if k not in self:
self[k] = v
with open(self.path, 'w') as f:
@ -278,7 +291,8 @@ def config(scope=None):
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
config_data = json.loads(subprocess.check_output(config_cmd_line))
config_data = json.loads(
subprocess.check_output(config_cmd_line).decode('UTF-8'))
if scope is not None:
return config_data
return Config(config_data)
@ -297,10 +311,10 @@ def relation_get(attribute=None, unit=None, rid=None):
if unit:
_args.append(unit)
try:
return json.loads(subprocess.check_output(_args))
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None
except CalledProcessError, e:
except CalledProcessError as e:
if e.returncode == 2:
return None
raise
@ -312,7 +326,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs):
relation_cmd_line = ['relation-set']
if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id))
for k, v in (relation_settings.items() + kwargs.items()):
for k, v in (list(relation_settings.items()) + list(kwargs.items())):
if v is None:
relation_cmd_line.append('{}='.format(k))
else:
@ -329,7 +343,8 @@ def relation_ids(reltype=None):
relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None:
relid_cmd_line.append(reltype)
return json.loads(subprocess.check_output(relid_cmd_line)) or []
return json.loads(
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
return []
@ -340,7 +355,8 @@ def related_units(relid=None):
units_cmd_line = ['relation-list', '--format=json']
if relid is not None:
units_cmd_line.extend(('-r', relid))
return json.loads(subprocess.check_output(units_cmd_line)) or []
return json.loads(
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
@cached
@ -379,21 +395,31 @@ def relations_of_type(reltype=None):
return relation_data
@cached
def metadata():
"""Get the current charm metadata.yaml contents as a python object"""
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
return yaml.safe_load(md)
@cached
def relation_types():
"""Get a list of relation types supported by this charm"""
charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf)
rel_types = []
md = metadata()
for key in ('provides', 'requires', 'peers'):
section = md.get(key)
if section:
rel_types.extend(section.keys())
mdf.close()
return rel_types
@cached
def charm_name():
"""Get the name of the current charm as is specified on metadata.yaml"""
return metadata().get('name')
@cached
def relations():
"""Get a nested dictionary of relation data for all related units"""
@ -449,7 +475,7 @@ def unit_get(attribute):
"""Get the unit ID for the remote unit"""
_args = ['unit-get', '--format=json', attribute]
try:
return json.loads(subprocess.check_output(_args))
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError:
return None

View File

@ -6,19 +6,20 @@
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
import os
import re
import pwd
import grp
import random
import string
import subprocess
import hashlib
import shutil
from contextlib import contextmanager
from collections import OrderedDict
from hookenv import log
from fstab import Fstab
import six
from .hookenv import log
from .fstab import Fstab
def service_start(service_name):
@ -54,7 +55,9 @@ def service(action, service_name):
def service_running(service):
"""Determine whether a system service is running"""
try:
output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
output = subprocess.check_output(
['service', service, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
@ -67,7 +70,9 @@ def service_running(service):
def service_available(service_name):
"""Determine whether a system service is available"""
try:
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
subprocess.check_output(
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError as e:
return 'unrecognized service' not in e.output
else:
@ -96,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
return user_info
def add_group(group_name, system_group=False):
"""Add a group to the system"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
except KeyError:
log('creating group {0}'.format(group_name))
cmd = ['addgroup']
if system_group:
cmd.append('--system')
else:
cmd.extend([
'--group',
])
cmd.append(group_name)
subprocess.check_call(cmd)
group_info = grp.getgrnam(group_name)
return group_info
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = [
@ -115,7 +140,7 @@ def rsync(from_path, to_path, flags='-r', options=None):
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
return subprocess.check_output(cmd).strip()
return subprocess.check_output(cmd).decode('UTF-8').strip()
def symlink(source, destination):
@ -130,23 +155,26 @@ def symlink(source, destination):
subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0555, force=False):
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
"""Create a directory"""
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
if os.path.exists(realpath):
if force and not os.path.isdir(realpath):
path_exists = os.path.exists(realpath)
if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
else:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
elif not path_exists:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
os.chown(realpath, uid, gid)
def write_file(path, content, owner='root', group='root', perms=0444):
def write_file(path, content, owner='root', group='root', perms=0o444):
"""Create or overwrite a file with the contents of a string"""
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid
@ -177,7 +205,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e:
except subprocess.CalledProcessError as e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
@ -191,7 +219,7 @@ def umount(mountpoint, persist=False):
cmd_args = ['umount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e:
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
@ -218,8 +246,8 @@ def file_hash(path, hash_type='md5'):
"""
if os.path.exists(path):
h = getattr(hashlib, hash_type)()
with open(path, 'r') as source:
h.update(source.read()) # IGNORE:E1101 - it does have update
with open(path, 'rb') as source:
h.update(source.read())
return h.hexdigest()
else:
return None
@ -297,7 +325,7 @@ def pwgen(length=None):
if length is None:
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.letters + string.digits)
l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou']
random_chars = [
random.choice(alphanumeric_chars) for _ in range(length)]
@ -306,18 +334,24 @@ def pwgen(length=None):
def list_nics(nic_type):
'''Return a list of nics of given type(s)'''
if isinstance(nic_type, basestring):
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).split('\n')
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
interfaces.append(line.split()[1].replace(":", ""))
matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
if matched:
interface = matched.groups()[0]
else:
interface = line.split()[1].replace(":", "")
interfaces.append(interface)
return interfaces
@ -329,7 +363,7 @@ def set_nic_mtu(nic, mtu):
def get_nic_mtu(nic):
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).split('\n')
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = ""
for line in ip_output:
words = line.split()
@ -340,7 +374,7 @@ def get_nic_mtu(nic):
def get_nic_hwaddr(nic):
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd)
ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = ""
words = ip_output.split()
if 'link/ether' in words:
@ -357,8 +391,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
'''
import apt_pkg
from charmhelpers.fetch import apt_cache
if not pkgcache:
from charmhelpers.fetch import apt_cache
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)

View File

@ -1,2 +1,2 @@
from .base import *
from .helpers import *
from .base import * # NOQA
from .helpers import * # NOQA

View File

@ -196,7 +196,7 @@ class StoredContext(dict):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'w') as file_stream:
os.fchmod(file_stream.fileno(), 0600)
os.fchmod(file_stream.fileno(), 0o600)
yaml.dump(config_data, file_stream)
def read_context(self, file_name):
@ -211,15 +211,19 @@ class StoredContext(dict):
class TemplateCallback(ManagerCallback):
"""
Callback class that will render a Jinja2 template, for use as a ready action.
Callback class that will render a Jinja2 template, for use as a ready
action.
:param str source: The template source file, relative to
`$CHARM_DIR/templates`
:param str source: The template source file, relative to `$CHARM_DIR/templates`
:param str target: The target to write the rendered template to
:param str owner: The owner of the rendered file
:param str group: The group of the rendered file
:param int perms: The permissions of the rendered file
"""
def __init__(self, source, target, owner='root', group='root', perms=0444):
def __init__(self, source, target,
owner='root', group='root', perms=0o444):
self.source = source
self.target = target
self.owner = owner

View File

@ -4,7 +4,8 @@ from charmhelpers.core import host
from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None):
"""
Render a template.
@ -47,5 +48,5 @@ def render(source, target, context, owner='root', group='root', perms=0444, temp
level=hookenv.ERROR)
raise e
content = template.render(context)
host.mkdir(os.path.dirname(target))
host.mkdir(os.path.dirname(target), owner, group)
host.write_file(target, content, owner, group, perms)

View File

@ -5,10 +5,6 @@ from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
)
from urlparse import (
urlparse,
urlunparse,
)
import subprocess
from charmhelpers.core.hookenv import (
config,
@ -16,6 +12,12 @@ from charmhelpers.core.hookenv import (
)
import os
import six
if six.PY3:
from urllib.parse import urlparse, urlunparse
else:
from urlparse import urlparse, urlunparse
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
@ -72,6 +74,7 @@ CLOUD_ARCHIVE_POCKETS = {
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
)
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
@ -148,7 +151,7 @@ def apt_install(packages, options=None, fatal=False):
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, basestring):
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
@ -181,7 +184,7 @@ def apt_update(fatal=False):
def apt_purge(packages, fatal=False):
"""Purge one or more packages"""
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, basestring):
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
@ -192,7 +195,7 @@ def apt_purge(packages, fatal=False):
def apt_hold(packages, fatal=False):
"""Hold one or more packages"""
cmd = ['apt-mark', 'hold']
if isinstance(packages, basestring):
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
@ -218,6 +221,7 @@ def add_source(source, key=None):
pocket for the release.
'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse'
'distro' may be used as a noop
@param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an
@ -251,12 +255,14 @@ def add_source(source, key=None):
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
elif source == 'distro':
pass
else:
raise SourceConfigError("Unknown source: {!r}".format(source))
log("Unknown source: {!r}".format(source))
if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile() as key_file:
with NamedTemporaryFile('w+') as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
@ -293,14 +299,14 @@ def configure_sources(update=False,
sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, basestring):
if isinstance(sources, six.string_types):
sources = [sources]
if keys is None:
for source in sources:
add_source(source, None)
else:
if isinstance(keys, basestring):
if isinstance(keys, six.string_types):
keys = [keys]
if len(sources) != len(keys):
@ -397,7 +403,7 @@ def _run_apt_command(cmd, fatal=False):
while result is None or result == APT_NO_LOCK:
try:
result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError, e:
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > APT_NO_LOCK_RETRY_COUNT:
raise

View File

@ -1,8 +1,23 @@
import os
import urllib2
from urllib import urlretrieve
import urlparse
import hashlib
import re
import six
if six.PY3:
from urllib.request import (
build_opener, install_opener, urlopen, urlretrieve,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
)
from urllib.parse import urlparse, urlunparse, parse_qs
from urllib.error import URLError
else:
from urllib import urlretrieve
from urllib2 import (
build_opener, install_opener, urlopen,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
URLError
)
from urlparse import urlparse, urlunparse, parse_qs
from charmhelpers.fetch import (
BaseFetchHandler,
@ -15,6 +30,24 @@ from charmhelpers.payload.archive import (
from charmhelpers.core.host import mkdir, check_hash
def splituser(host):
'''urllib.splituser(), but six's support of this seems broken'''
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match:
return match.group(1, 2)
return None, host
def splitpasswd(user):
'''urllib.splitpasswd(), but six's support of this is missing'''
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
match = _passwdprog.match(user)
if match:
return match.group(1, 2)
return user, None
class ArchiveUrlFetchHandler(BaseFetchHandler):
"""
Handler to download archive files from arbitrary URLs.
@ -42,20 +75,20 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
"""
# propogate all exceptions
# URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
proto, netloc, path, params, query, fragment = urlparse(source)
if proto in ('http', 'https'):
auth, barehost = urllib2.splituser(netloc)
auth, barehost = splituser(netloc)
if auth is not None:
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
username, password = urllib2.splitpasswd(auth)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
source = urlunparse((proto, barehost, path, params, query, fragment))
username, password = splitpasswd(auth)
passman = HTTPPasswordMgrWithDefaultRealm()
# Realm is set to None in add_password to force the username and password
# to be used whatever the realm
passman.add_password(None, source, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
response = urllib2.urlopen(source)
authhandler = HTTPBasicAuthHandler(passman)
opener = build_opener(authhandler)
install_opener(opener)
response = urlopen(source)
try:
with open(dest, 'w') as dest_file:
dest_file.write(response.read())
@ -91,17 +124,21 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
url_parts = self.parse_url(source)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
mkdir(dest_dir, perms=0o755)
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
try:
self.download(source, dld_file)
except urllib2.URLError as e:
except URLError as e:
raise UnhandledSource(e.reason)
except OSError as e:
raise UnhandledSource(e.strerror)
options = urlparse.parse_qs(url_parts.fragment)
options = parse_qs(url_parts.fragment)
for key, value in options.items():
if key in hashlib.algorithms:
if not six.PY3:
algorithms = hashlib.algorithms
else:
algorithms = hashlib.algorithms_available
if key in algorithms:
check_hash(dld_file, value, key)
if checksum:
check_hash(dld_file, checksum, hash_type)

View File

@ -5,6 +5,10 @@ from charmhelpers.fetch import (
)
from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('bzrlib does not support Python3')
try:
from bzrlib.branch import Branch
except ImportError:
@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
mkdir(dest_dir, perms=0o755)
try:
self.branch(source, dest_dir)
except OSError as e:

View File

@ -0,0 +1,51 @@
import os
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('GitPython does not support Python 3')
try:
from git import Repo
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-git")
from git import Repo
class GitUrlFetchHandler(BaseFetchHandler):
"""Handler for git branches via generic and github URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
# TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git'):
return False
else:
return True
def clone(self, source, dest, branch):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
repo = Repo.clone_from(source, dest)
repo.git.checkout(branch)
def install(self, source, branch="master", dest=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
try:
self.clone(source, dest_dir, branch)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -1,4 +1,4 @@
from charmhelpers.core.hookenv import config, unit_get
from charmhelpers.core.hookenv import config
from charmhelpers.core.host import mkdir, write_file
@ -6,15 +6,11 @@ from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
is_clustered
determine_api_port
)
from charmhelpers.contrib.hahelpers.apache import install_ca_cert
from charmhelpers.contrib.network.ip import (
get_address_in_network, is_address_in_network)
import os
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -49,26 +45,12 @@ class ApacheSSLContext(context.ApacheSSLContext):
install_ca_cert(ca.get_ca_bundle())
def canonical_names(self):
addresses = []
vips = []
if config('vip'):
vips = config('vip').split()
for network_type in ['os-internal-network',
'os-admin-network',
'os-public-network']:
address = get_address_in_network(config(network_type),
unit_get('private-address'))
if len(vips) > 0 and is_clustered():
for vip in vips:
if is_address_in_network(config(network_type),
vip):
addresses.append(vip)
break
elif is_clustered():
addresses.append(config('vip'))
else:
addresses.append(address)
return list(set(addresses))
addresses = self.get_network_addresses()
addrs = []
for address, endpoint in addresses:
addrs.append(endpoint)
return list(set(addrs))
class HAProxyContext(context.HAProxyContext):
@ -90,8 +72,10 @@ class HAProxyContext(context.HAProxyContext):
listen_ports['public_port'] = api_port('keystone-public')
# Apache ports
a_admin_port = determine_apache_port(api_port('keystone-admin'))
a_public_port = determine_apache_port(api_port('keystone-public'))
a_admin_port = determine_apache_port(api_port('keystone-admin'),
singlenode_mode=True)
a_public_port = determine_apache_port(api_port('keystone-public'),
singlenode_mode=True)
port_mapping = {
'admin-port': [
@ -118,8 +102,10 @@ class KeystoneContext(context.OSContextGenerator):
)
ctxt = {}
ctxt['token'] = set_admin_token(config('admin-token'))
ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'))
ctxt['public_port'] = determine_api_port(api_port('keystone-public'))
ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'),
singlenode_mode=True)
ctxt['public_port'] = determine_api_port(api_port('keystone-public'),
singlenode_mode=True)
ctxt['debug'] = config('debug') in ['yes', 'true', 'True']
ctxt['verbose'] = config('verbose') in ['yes', 'true', 'True']
ctxt['identity_backend'] = config('identity-backend')

View File

@ -1,5 +1,6 @@
#!/usr/bin/python
import hashlib
import os
import sys
import time
@ -56,7 +57,8 @@ from keystone_utils import (
KEYSTONE_CONF,
SSH_USER,
STORED_PASSWD,
setup_ipv6
setup_ipv6,
send_notifications,
)
from charmhelpers.contrib.hahelpers.cluster import (
@ -203,9 +205,24 @@ def pgsql_db_changed():
@hooks.hook('identity-service-relation-changed')
def identity_changed(relation_id=None, remote_unit=None):
notifications = {}
if eligible_leader(CLUSTER_RES):
add_service_to_keystone(relation_id, remote_unit)
synchronize_ca()
settings = relation_get(rid=relation_id, unit=remote_unit)
service = settings.get('service', None)
if service:
# If service is known and endpoint has changed, notify service if
# it is related with notifications interface.
csum = hashlib.sha256()
# We base the decision to notify on whether these parameters have
# changed (if csum is unchanged from previous notify, relation will
# not fire).
csum.update(settings.get('public_url', None))
csum.update(settings.get('admin_url', None))
csum.update(settings.get('internal_url', None))
notifications['%s-endpoint-changed' % (service)] = csum.hexdigest()
else:
# Each unit needs to set the db information otherwise if the unit
# with the info dies the settings die with it Bug# 1355848
@ -215,6 +232,9 @@ def identity_changed(relation_id=None, remote_unit=None):
relation_set(relation_id=rel_id, **peerdb_settings)
log('Deferring identity_changed() to service leader.')
if notifications:
send_notifications(notifications)
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):

View File

@ -1,6 +1,7 @@
#!/usr/bin/python
import subprocess
import os
import uuid
import urlparse
import time
@ -12,7 +13,8 @@ from charmhelpers.contrib.hahelpers.cluster import(
eligible_leader,
determine_api_port,
https,
is_clustered
is_clustered,
is_elected_leader,
)
from charmhelpers.contrib.openstack import context, templating
@ -40,8 +42,11 @@ import charmhelpers.contrib.unison as unison
from charmhelpers.core.hookenv import (
config,
log,
local_unit,
relation_get,
relation_set,
relation_ids,
DEBUG,
INFO,
)
@ -78,6 +83,7 @@ BASE_PACKAGES = [
'python-keystoneclient',
'python-mysqldb',
'python-psycopg2',
'python-six',
'pwgen',
'unison',
'uuid',
@ -119,7 +125,7 @@ BASE_RESOURCE_MAP = OrderedDict([
context.WorkerConfigContext()],
}),
(HAPROXY_CONF, {
'contexts': [context.HAProxyContext(),
'contexts': [context.HAProxyContext(singlenode_mode=True),
keystone_context.HAProxyContext()],
'services': ['haproxy'],
}),
@ -249,9 +255,10 @@ def determine_packages():
def save_script_rc():
env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
'OPENSTACK_PORT_ADMIN': determine_api_port(
api_port('keystone-admin')),
api_port('keystone-admin'), singlenode_mode=True),
'OPENSTACK_PORT_PUBLIC': determine_api_port(
api_port('keystone-public'))}
api_port('keystone-public'),
singlenode_mode=True)}
_save_script_rc(**env_vars)
@ -299,10 +306,12 @@ def get_local_endpoint():
ipv6_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
endpoint_url = 'http://[%s]:{}/v2.0/' % ipv6_addr
local_endpoint = endpoint_url.format(
determine_api_port(api_port('keystone-admin')))
determine_api_port(api_port('keystone-admin'),
singlenode_mode=True))
else:
local_endpoint = 'http://localhost:{}/v2.0/'.format(
determine_api_port(api_port('keystone-admin')))
determine_api_port(api_port('keystone-admin'),
singlenode_mode=True))
return local_endpoint
@ -658,6 +667,7 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
relation_data["service_protocol"] = "http"
relation_data["auth_port"] = config('admin-port')
relation_data["service_port"] = config('service-port')
relation_data["region"] = config('region')
if config('https-service-endpoints') in ['True', 'true']:
# Pass CA cert as client will need it to
# verify https connections
@ -680,7 +690,14 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
publicurl=settings['public_url'],
adminurl=settings['admin_url'],
internalurl=settings['internal_url'])
# If an admin username prefix is provided, ensure all services use
# it.
service_username = settings['service']
prefix = config('service-admin-prefix')
if prefix:
service_username = "%s%s" % (prefix, service_username)
# NOTE(jamespage) internal IP for backwards compat for SSL certs
internal_cn = urlparse.urlparse(settings['internal_url']).hostname
https_cns.append(internal_cn)
@ -733,6 +750,11 @@ def add_service_to_keystone(relation_id=None, remote_unit=None):
https_cns.append(urlparse.urlparse(ep['admin_url']).hostname)
service_username = '_'.join(services)
# If an admin username prefix is provided, ensure all services use it.
prefix = config('service-admin-prefix')
if prefix:
service_username = "%s%s" % (prefix, service_username)
if 'None' in [v for k, v in settings.iteritems()]:
return
@ -844,3 +866,69 @@ def setup_ipv6():
' main')
apt_update()
apt_install('haproxy/trusty-backports', fatal=True)
def send_notifications(data, force=False):
"""Send notifications to all units listening on the identity-notifications
interface.
Units are expected to ignore notifications that they don't expect.
NOTE: settings that are not required/inuse must always be set to None
so that they are removed from the relation.
:param data: Dict of key=value to use as trigger for notification. If the
last broadcast is unchanged by the addition of this data, the
notification will not be sent.
:param force: Determines whether a trigger value is set to ensure the
remote hook is fired.
"""
if not data or not is_elected_leader(CLUSTER_RES):
log("Not sending notifications (no data or not leader)", level=INFO)
return
rel_ids = relation_ids('identity-notifications')
if not rel_ids:
log("No relations on identity-notifications - skipping broadcast",
level=INFO)
return
keys = []
diff = False
# Get all settings previously sent
for rid in rel_ids:
rs = relation_get(unit=local_unit(), rid=rid)
if rs:
keys += rs.keys()
# Don't bother checking if we have already identified a diff
if diff:
continue
# Work out if this notification changes anything
for k, v in data.iteritems():
if rs.get(k, None) != v:
diff = True
break
if not diff:
log("Notifications unchanged by new values so skipping broadcast",
level=INFO)
return
# Set all to None
_notifications = {k: None for k in set(keys)}
# Set new values
for k, v in data.iteritems():
_notifications[k] = v
if force:
_notifications['trigger'] = str(uuid.uuid4())
# Broadcast
log("Sending identity-service notifications (trigger=%s)" % (force),
level=DEBUG)
for rid in rel_ids:
relation_set(relation_id=rid, relation_settings=_notifications)

View File

@ -12,6 +12,8 @@ provides:
scope: container
identity-service:
interface: keystone
identity-notifications:
interface: keystone-notifications
identity-admin:
interface: keystone-admin
requires:

View File

@ -304,8 +304,8 @@ class KeystoneBasicDeployment(OpenStackAmuletDeployment):
conf = '/etc/keystone/keystone.conf'
relation = unit.relation('identity-service', 'cinder:identity-service')
expected = {'admin_token': relation['admin_token'],
'admin_port': relation['auth_port'],
'public_port': relation['service_port'],
'admin_port': '35347',
'public_port': '4990',
'use_syslog': 'False',
'log_config': '/etc/keystone/logging.conf',
'debug': 'False',

View File

@ -0,0 +1,22 @@
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -1,6 +1,6 @@
import amulet
import os
import six
class AmuletDeployment(object):
@ -52,12 +52,12 @@ class AmuletDeployment(object):
def _add_relations(self, relations):
"""Add all of the relations for the services."""
for k, v in relations.iteritems():
for k, v in six.iteritems(relations):
self.d.relate(k, v)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _deploy(self):

View File

@ -5,6 +5,8 @@ import re
import sys
import time
import six
class AmuletUtils(object):
"""Amulet utilities.
@ -58,7 +60,7 @@ class AmuletUtils(object):
Verify the specified services are running on the corresponding
service units.
"""
for k, v in commands.iteritems():
for k, v in six.iteritems(commands):
for cmd in v:
output, code = k.run(cmd)
if code != 0:
@ -100,11 +102,11 @@ class AmuletUtils(object):
longs, or can be a function that evaluate a variable and returns a
bool.
"""
for k, v in expected.iteritems():
for k, v in six.iteritems(expected):
if k in actual:
if (isinstance(v, basestring) or
if (isinstance(v, six.string_types) or
isinstance(v, bool) or
isinstance(v, (int, long))):
isinstance(v, six.integer_types)):
if v != actual[k]:
return "{}:{}".format(k, actual[k])
elif not v(actual[k]):

View File

@ -1,3 +1,4 @@
import six
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _get_openstack_release(self):

View File

@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
import six
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils):
expected service catalog endpoints.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems():
for k, v in six.iteritems(expected):
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:

View File

@ -91,3 +91,30 @@ class TestKeystoneContexts(CharmTestCase):
}}
}
)
@patch('charmhelpers.contrib.openstack.context.log')
@patch('charmhelpers.contrib.openstack.context.config')
@patch('charmhelpers.contrib.openstack.context.unit_get')
@patch('charmhelpers.contrib.openstack.context.is_clustered')
@patch('charmhelpers.contrib.network.ip.get_address_in_network')
def test_canonical_names_without_network_splits(self,
mock_get_address,
mock_is_clustered,
mock_unit_get,
mock_config,
mock_log):
NET_CONFIG = {'vip': '10.0.3.1 10.0.3.2',
'os-internal-network': None,
'os-admin-network': None,
'os-public-network': None}
mock_unit_get.return_value = '10.0.3.10'
mock_is_clustered.return_value = True
config = {}
config.update(NET_CONFIG)
mock_config.side_effect = lambda key: config[key]
apache = context.ApacheSSLContext()
apache.canonical_names()
msg = "Multiple networks configured but net_type" \
" is None (os-public-network)."
mock_log.assert_called_with(msg, level="WARNING")

View File

@ -85,7 +85,7 @@ class KeystoneRelationTests(CharmTestCase):
self.apt_install.assert_called_with(
['haproxy', 'unison', 'python-keystoneclient',
'uuid', 'python-mysqldb', 'openssl', 'apache2',
'pwgen', 'keystone', 'python-psycopg2'], fatal=True)
'pwgen', 'python-six', 'keystone', 'python-psycopg2'], fatal=True)
self.assertTrue(self.execd_preinstall.called)
mod_ch_openstack_utils = 'charmhelpers.contrib.openstack.utils'
@ -320,7 +320,10 @@ class KeystoneRelationTests(CharmTestCase):
relation_id='identity-service:0',
remote_unit='unit/0')
def test_identity_changed_leader(self):
@patch.object(hooks, 'hashlib')
@patch.object(hooks, 'send_notifications')
def test_identity_changed_leader(self, mock_send_notifications,
mock_hashlib):
self.eligible_leader.return_value = True
hooks.identity_changed(
relation_id='identity-service:0',

View File

@ -179,7 +179,8 @@ class TestKeystoneUtils(CharmTestCase):
'auth_port': 80,
'service_port': 81,
'https_keystone': 'True',
'ca_cert': 'certificate'}
'ca_cert': 'certificate',
'region': 'RegionOne'}
self.peer_store_and_set.assert_called_with(
relation_id=relation_id,
**relation_data)
@ -290,3 +291,35 @@ class TestKeystoneUtils(CharmTestCase):
region='RegionOne', service='nova',
publicurl=publicurl, adminurl=adminurl,
internalurl=internalurl)
@patch.object(utils, 'uuid')
@patch.object(utils, 'relation_set')
@patch.object(utils, 'relation_get')
@patch.object(utils, 'relation_ids')
@patch.object(utils, 'is_elected_leader')
def test_send_notifications(self, mock_is_elected_leader,
mock_relation_ids, mock_relation_get,
mock_relation_set, mock_uuid):
relation_id = 'testrel:0'
mock_uuid.uuid4.return_value = '1234'
mock_relation_ids.return_value = [relation_id]
mock_is_elected_leader.return_value = False
utils.send_notifications({'foo-endpoint-changed': 1})
self.assertFalse(mock_relation_set.called)
mock_is_elected_leader.return_value = True
utils.send_notifications({})
self.assertFalse(mock_relation_set.called)
settings = {'foo-endpoint-changed': 1}
utils.send_notifications(settings)
self.assertTrue(mock_relation_set.called)
mock_relation_set.assert_called_once_with(relation_id=relation_id,
relation_settings=settings)
mock_relation_set.reset_mock()
settings = {'foo-endpoint-changed': 1}
utils.send_notifications(settings, force=True)
self.assertTrue(mock_relation_set.called)
settings['trigger'] = '1234'
mock_relation_set.assert_called_once_with(relation_id=relation_id,
relation_settings=settings)