Merged next in and resolved conflicts

This commit is contained in:
Liam Young 2015-01-09 16:02:39 +00:00
commit 78c1e7b862
42 changed files with 1378 additions and 609 deletions

View File

@ -117,10 +117,3 @@ overwrite: Whether or not to wipe local storage that of data that may prevent
enabled-services: Can be used to separate cinder services between service enabled-services: Can be used to separate cinder services between service
service units (see previous section) service units (see previous section)
Contact Information
-------------------
Author: Adam Gandelman <adamg@canonical.com>
Report bugs at: http://bugs.launchpad.net/charms
Location: http://jujucharms.com

View File

@ -11,4 +11,5 @@ include:
- fetch - fetch
- payload.execd - payload.execd
- contrib.network.ip - contrib.network.ip
- contrib.python.packages
- contrib.charmsupport - contrib.charmsupport

View File

@ -0,0 +1,22 @@
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -16,6 +16,8 @@ import os
from socket import gethostname as get_unit_hostname from socket import gethostname as get_unit_hostname
import six
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
relation_ids, relation_ids,
@ -27,12 +29,19 @@ from charmhelpers.core.hookenv import (
WARNING, WARNING,
unit_get, unit_get,
) )
from charmhelpers.core.decorators import (
retry_on_exception,
)
class HAIncompleteConfig(Exception): class HAIncompleteConfig(Exception):
pass pass
class CRMResourceNotFound(Exception):
pass
def is_elected_leader(resource): def is_elected_leader(resource):
""" """
Returns True if the charm executing this is the elected cluster leader. Returns True if the charm executing this is the elected cluster leader.
@ -67,24 +76,30 @@ def is_clustered():
return False return False
def is_crm_leader(resource): @retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
def is_crm_leader(resource, retry=False):
""" """
Returns True if the charm calling this is the elected corosync leader, Returns True if the charm calling this is the elected corosync leader,
as returned by calling the external "crm" command. as returned by calling the external "crm" command.
We allow this operation to be retried to avoid the possibility of getting a
false negative. See LP #1396246 for more info.
""" """
cmd = [ cmd = ['crm', 'resource', 'show', resource]
"crm", "resource",
"show", resource
]
try: try:
status = subprocess.check_output(cmd) status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
return False status = None
else:
if get_unit_hostname() in status: if status and get_unit_hostname() in status:
return True return True
else:
return False if status and "resource %s is NOT running" % (resource) in status:
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
return False
def is_leader(resource): def is_leader(resource):
@ -150,34 +165,42 @@ def https():
return False return False
def determine_api_port(public_port): def determine_api_port(public_port, singlenode_mode=False):
''' '''
Determine correct API server listening port based on Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy. existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the API service returns: int: the correct listening port for the API service
''' '''
i = 0 i = 0
if len(peer_units()) > 0 or is_clustered(): if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1 i += 1
if https(): if https():
i += 1 i += 1
return public_port - (i * 10) return public_port - (i * 10)
def determine_apache_port(public_port): def determine_apache_port(public_port, singlenode_mode=False):
''' '''
Description: Determine correct apache listening port based on public IP + Description: Determine correct apache listening port based on public IP +
state of the cluster. state of the cluster.
public_port: int: standard public port for given service public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the HAProxy service returns: int: the correct listening port for the HAProxy service
''' '''
i = 0 i = 0
if len(peer_units()) > 0 or is_clustered(): if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1 i += 1
return public_port - (i * 10) return public_port - (i * 10)
@ -197,7 +220,7 @@ def get_hacluster_config():
for setting in settings: for setting in settings:
conf[setting] = config_get(setting) conf[setting] = config_get(setting)
missing = [] missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None] [missing.append(s) for s, v in six.iteritems(conf) if v is None]
if missing: if missing:
log('Insufficient config data to configure hacluster.', level=ERROR) log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig raise HAIncompleteConfig

View File

@ -1,15 +1,12 @@
import glob import glob
import re import re
import subprocess import subprocess
import sys
from functools import partial from functools import partial
from charmhelpers.core.hookenv import unit_get from charmhelpers.core.hookenv import unit_get
from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
WARNING,
ERROR,
log log
) )
@ -34,31 +31,28 @@ def _validate_cidr(network):
network) network)
def no_ip_found_error_out(network):
errmsg = ("No IP address found in network: %s" % network)
raise ValueError(errmsg)
def get_address_in_network(network, fallback=None, fatal=False): def get_address_in_network(network, fallback=None, fatal=False):
""" """Get an IPv4 or IPv6 address within the network from the host.
Get an IPv4 or IPv6 address within the network from the host.
:param network (str): CIDR presentation format. For example, :param network (str): CIDR presentation format. For example,
'192.168.1.0/24'. '192.168.1.0/24'.
:param fallback (str): If no address is found, return fallback. :param fallback (str): If no address is found, return fallback.
:param fatal (boolean): If no address is found, fallback is not :param fatal (boolean): If no address is found, fallback is not
set and fatal is True then exit(1). set and fatal is True then exit(1).
""" """
def not_found_error_out():
log("No IP address found in network: %s" % network,
level=ERROR)
sys.exit(1)
if network is None: if network is None:
if fallback is not None: if fallback is not None:
return fallback return fallback
if fatal:
no_ip_found_error_out(network)
else: else:
if fatal: return None
not_found_error_out()
else:
return None
_validate_cidr(network) _validate_cidr(network)
network = netaddr.IPNetwork(network) network = netaddr.IPNetwork(network)
@ -70,6 +64,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network: if cidr in network:
return str(cidr.ip) return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses: if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]: for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'): if not addr['addr'].startswith('fe80'):
@ -82,20 +77,20 @@ def get_address_in_network(network, fallback=None, fatal=False):
return fallback return fallback
if fatal: if fatal:
not_found_error_out() no_ip_found_error_out(network)
return None return None
def is_ipv6(address): def is_ipv6(address):
'''Determine whether provided address is IPv6 or not''' """Determine whether provided address is IPv6 or not."""
try: try:
address = netaddr.IPAddress(address) address = netaddr.IPAddress(address)
except netaddr.AddrFormatError: except netaddr.AddrFormatError:
# probably a hostname - so not an address at all! # probably a hostname - so not an address at all!
return False return False
else:
return address.version == 6 return address.version == 6
def is_address_in_network(network, address): def is_address_in_network(network, address):
@ -113,11 +108,13 @@ def is_address_in_network(network, address):
except (netaddr.core.AddrFormatError, ValueError): except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Network (%s) is not in CIDR presentation format" % raise ValueError("Network (%s) is not in CIDR presentation format" %
network) network)
try: try:
address = netaddr.IPAddress(address) address = netaddr.IPAddress(address)
except (netaddr.core.AddrFormatError, ValueError): except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Address (%s) is not in correct presentation format" % raise ValueError("Address (%s) is not in correct presentation format" %
address) address)
if address in network: if address in network:
return True return True
else: else:
@ -147,6 +144,7 @@ def _get_for_address(address, key):
return iface return iface
else: else:
return addresses[netifaces.AF_INET][0][key] return addresses[netifaces.AF_INET][0][key]
if address.version == 6 and netifaces.AF_INET6 in addresses: if address.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]: for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'): if not addr['addr'].startswith('fe80'):
@ -160,41 +158,42 @@ def _get_for_address(address, key):
return str(cidr).split('/')[1] return str(cidr).split('/')[1]
else: else:
return addr[key] return addr[key]
return None return None
get_iface_for_address = partial(_get_for_address, key='iface') get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask') get_netmask_for_address = partial(_get_for_address, key='netmask')
def format_ipv6_addr(address): def format_ipv6_addr(address):
""" """If address is IPv6, wrap it in '[]' otherwise return None.
IPv6 needs to be wrapped with [] in url link to parse correctly.
This is required by most configuration files when specifying IPv6
addresses.
""" """
if is_ipv6(address): if is_ipv6(address):
address = "[%s]" % address return "[%s]" % address
else:
log("Not a valid ipv6 address: %s" % address, level=WARNING)
address = None
return address return None
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
fatal=True, exc_list=None): fatal=True, exc_list=None):
""" """Return the assigned IP address for a given interface, if any."""
Return the assigned IP address for a given interface, if any, or [].
"""
# Extract nic if passed /dev/ethX # Extract nic if passed /dev/ethX
if '/' in iface: if '/' in iface:
iface = iface.split('/')[-1] iface = iface.split('/')[-1]
if not exc_list: if not exc_list:
exc_list = [] exc_list = []
try: try:
inet_num = getattr(netifaces, inet_type) inet_num = getattr(netifaces, inet_type)
except AttributeError: except AttributeError:
raise Exception('Unknown inet type ' + str(inet_type)) raise Exception("Unknown inet type '%s'" % str(inet_type))
interfaces = netifaces.interfaces() interfaces = netifaces.interfaces()
if inc_aliases: if inc_aliases:
@ -202,15 +201,18 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
for _iface in interfaces: for _iface in interfaces:
if iface == _iface or _iface.split(':')[0] == iface: if iface == _iface or _iface.split(':')[0] == iface:
ifaces.append(_iface) ifaces.append(_iface)
if fatal and not ifaces: if fatal and not ifaces:
raise Exception("Invalid interface '%s'" % iface) raise Exception("Invalid interface '%s'" % iface)
ifaces.sort() ifaces.sort()
else: else:
if iface not in interfaces: if iface not in interfaces:
if fatal: if fatal:
raise Exception("%s not found " % (iface)) raise Exception("Interface '%s' not found " % (iface))
else: else:
return [] return []
else: else:
ifaces = [iface] ifaces = [iface]
@ -221,10 +223,13 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
for entry in net_info[inet_num]: for entry in net_info[inet_num]:
if 'addr' in entry and entry['addr'] not in exc_list: if 'addr' in entry and entry['addr'] not in exc_list:
addresses.append(entry['addr']) addresses.append(entry['addr'])
if fatal and not addresses: if fatal and not addresses:
raise Exception("Interface '%s' doesn't have any %s addresses." % raise Exception("Interface '%s' doesn't have any %s addresses." %
(iface, inet_type)) (iface, inet_type))
return addresses
return sorted(addresses)
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
@ -241,6 +246,7 @@ def get_iface_from_addr(addr):
raw = re.match(ll_key, _addr) raw = re.match(ll_key, _addr)
if raw: if raw:
_addr = raw.group(1) _addr = raw.group(1)
if _addr == addr: if _addr == addr:
log("Address '%s' is configured on iface '%s'" % log("Address '%s' is configured on iface '%s'" %
(addr, iface)) (addr, iface))
@ -251,8 +257,9 @@ def get_iface_from_addr(addr):
def sniff_iface(f): def sniff_iface(f):
"""If no iface provided, inject net iface inferred from unit private """Ensure decorated function is called with a value for iface.
address.
If no iface provided, inject net iface inferred from unit private address.
""" """
def iface_sniffer(*args, **kwargs): def iface_sniffer(*args, **kwargs):
if not kwargs.get('iface', None): if not kwargs.get('iface', None):
@ -295,7 +302,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
if global_addrs: if global_addrs:
# Make sure any found global addresses are not temporary # Make sure any found global addresses are not temporary
cmd = ['ip', 'addr', 'show', iface] cmd = ['ip', 'addr', 'show', iface]
out = subprocess.check_output(cmd) out = subprocess.check_output(cmd).decode('UTF-8')
if dynamic_only: if dynamic_only:
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
else: else:
@ -317,33 +324,28 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
return addrs return addrs
if fatal: if fatal:
raise Exception("Interface '%s' doesn't have a scope global " raise Exception("Interface '%s' does not have a scope global "
"non-temporary ipv6 address." % iface) "non-temporary ipv6 address." % iface)
return [] return []
def get_bridges(vnic_dir='/sys/devices/virtual/net'): def get_bridges(vnic_dir='/sys/devices/virtual/net'):
""" """Return a list of bridges on the system."""
Return a list of bridges on the system or [] b_regex = "%s/*/bridge" % vnic_dir
""" return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
b_rgex = vnic_dir + '/*/bridge'
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
""" """Return a list of nics comprising a given bridge on the system."""
Return a list of nics comprising a given bridge on the system or [] brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
""" return [x.split('/')[-1] for x in glob.glob(brif_regex)]
brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
def is_bridge_member(nic): def is_bridge_member(nic):
""" """Check if a given nic is a member of a bridge."""
Check if a given nic is a member of a bridge
"""
for bridge in get_bridges(): for bridge in get_bridges():
if nic in get_bridge_nics(bridge): if nic in get_bridge_nics(bridge):
return True return True
return False return False

View File

@ -1,3 +1,4 @@
import six
from charmhelpers.contrib.amulet.deployment import ( from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment AmuletDeployment
) )
@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
def _configure_services(self, configs): def _configure_services(self, configs):
"""Configure all of the services.""" """Configure all of the services."""
for service, config in configs.iteritems(): for service, config in six.iteritems(configs):
self.d.configure(service, config) self.d.configure(service, config)
def _get_openstack_release(self): def _get_openstack_release(self):

View File

@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client import novaclient.v1_1.client as nova_client
import six
from charmhelpers.contrib.amulet.utils import ( from charmhelpers.contrib.amulet.utils import (
AmuletUtils AmuletUtils
) )
@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils):
expected service catalog endpoints. expected service catalog endpoints.
""" """
self.log.debug('actual: {}'.format(repr(actual))) self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems(): for k, v in six.iteritems(expected):
if k in actual: if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0]) ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret: if ret:

File diff suppressed because it is too large Load Diff

View File

@ -2,21 +2,19 @@ from charmhelpers.core.hookenv import (
config, config,
unit_get, unit_get,
) )
from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.network.ip import (
get_address_in_network, get_address_in_network,
is_address_in_network, is_address_in_network,
is_ipv6, is_ipv6,
get_ipv6_addr, get_ipv6_addr,
) )
from charmhelpers.contrib.hahelpers.cluster import is_clustered from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public' PUBLIC = 'public'
INTERNAL = 'int' INTERNAL = 'int'
ADMIN = 'admin' ADMIN = 'admin'
_address_map = { ADDRESS_MAP = {
PUBLIC: { PUBLIC: {
'config': 'os-public-network', 'config': 'os-public-network',
'fallback': 'public-address' 'fallback': 'public-address'
@ -33,16 +31,14 @@ _address_map = {
def canonical_url(configs, endpoint_type=PUBLIC): def canonical_url(configs, endpoint_type=PUBLIC):
''' """Returns the correct HTTP URL to this host given the state of HTTPS
Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration. configuration, hacluster and charm configuration.
:configs OSTemplateRenderer: A config tempating object to inspect for :param configs: OSTemplateRenderer config templating object to inspect
a complete https context. for a complete https context.
:endpoint_type str: The endpoint type to resolve. :param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
:returns str: Base URL for services on the current service unit. """
'''
scheme = 'http' scheme = 'http'
if 'https' in configs.complete_contexts(): if 'https' in configs.complete_contexts():
scheme = 'https' scheme = 'https'
@ -53,27 +49,45 @@ def canonical_url(configs, endpoint_type=PUBLIC):
def resolve_address(endpoint_type=PUBLIC): def resolve_address(endpoint_type=PUBLIC):
"""Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured.
:param endpoint_type: Network endpoing type
"""
resolved_address = None resolved_address = None
if is_clustered(): vips = config('vip')
if config(_address_map[endpoint_type]['config']) is None: if vips:
# Assume vip is simple and pass back directly vips = vips.split()
resolved_address = config('vip')
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
clustered = is_clustered()
if clustered:
if not net_addr:
# If no net-splits defined, we expect a single vip
resolved_address = vips[0]
else: else:
for vip in config('vip').split(): for vip in vips:
if is_address_in_network( if is_address_in_network(net_addr, vip):
config(_address_map[endpoint_type]['config']),
vip):
resolved_address = vip resolved_address = vip
break
else: else:
if config('prefer-ipv6'): if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0] fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else: else:
fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) fallback_addr = unit_get(net_fallback)
resolved_address = get_address_in_network(
config(_address_map[endpoint_type]['config']), fallback_addr) resolved_address = get_address_in_network(net_addr, fallback_addr)
if resolved_address is None: if resolved_address is None:
raise ValueError('Unable to resolve a suitable IP address' raise ValueError("Unable to resolve a suitable IP address based on "
' based on charm state and configuration') "charm state and configuration. (net_type=%s, "
else: "clustered=%s)" % (net_type, clustered))
return resolved_address
return resolved_address

View File

@ -14,7 +14,7 @@ from charmhelpers.contrib.openstack.utils import os_release
def headers_package(): def headers_package():
"""Ensures correct linux-headers for running kernel are installed, """Ensures correct linux-headers for running kernel are installed,
for building DKMS package""" for building DKMS package"""
kver = check_output(['uname', '-r']).strip() kver = check_output(['uname', '-r']).decode('UTF-8').strip()
return 'linux-headers-%s' % kver return 'linux-headers-%s' % kver
QUANTUM_CONF_DIR = '/etc/quantum' QUANTUM_CONF_DIR = '/etc/quantum'
@ -22,7 +22,7 @@ QUANTUM_CONF_DIR = '/etc/quantum'
def kernel_version(): def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """ """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).strip() kver = check_output(['uname', '-r']).decode('UTF-8').strip()
kver = kver.split('.') kver = kver.split('.')
return (int(kver[0]), int(kver[1])) return (int(kver[0]), int(kver[1]))
@ -138,10 +138,31 @@ def neutron_plugins():
relation_prefix='neutron', relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)], ssl_dir=NEUTRON_CONF_DIR)],
'services': [], 'services': [],
'packages': [['neutron-plugin-cisco']], 'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-cisco']],
'server_packages': ['neutron-server', 'server_packages': ['neutron-server',
'neutron-plugin-cisco'], 'neutron-plugin-cisco'],
'server_services': ['neutron-server'] 'server_services': ['neutron-server']
},
'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['calico-felix',
'bird',
'neutron-dhcp-agent',
'nova-api-metadata'],
'packages': [[headers_package()] + determine_dkms_package(),
['calico-compute',
'bird',
'neutron-dhcp-agent',
'nova-api-metadata']],
'server_packages': ['neutron-server', 'calico-control'],
'server_services': ['neutron-server']
} }
} }
if release >= 'icehouse': if release >= 'icehouse':
@ -162,7 +183,8 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None):
elif manager == 'neutron': elif manager == 'neutron':
plugins = neutron_plugins() plugins = neutron_plugins()
else: else:
log('Error: Network manager does not support plugins.') log("Network manager '%s' does not support plugins." % (manager),
level=ERROR)
raise Exception raise Exception
try: try:

View File

@ -35,10 +35,12 @@ listen stats {{ stat_port }}
stats auth admin:password stats auth admin:password
{% if frontends -%} {% if frontends -%}
{% for service, ports in service_ports.iteritems() -%} {% for service, ports in service_ports.items() -%}
frontend tcp-in_{{ service }} frontend tcp-in_{{ service }}
bind *:{{ ports[0] }} bind *:{{ ports[0] }}
{% if ipv6 -%}
bind :::{{ ports[0] }} bind :::{{ ports[0] }}
{% endif -%}
{% for frontend in frontends -%} {% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
@ -46,7 +48,7 @@ frontend tcp-in_{{ service }}
{% for frontend in frontends -%} {% for frontend in frontends -%}
backend {{ service }}_{{ frontend }} backend {{ service }}_{{ frontend }}
balance leastconn balance leastconn
{% for unit, address in frontends[frontend]['backends'].iteritems() -%} {% for unit, address in frontends[frontend]['backends'].items() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %} {% endfor %}
{% endfor -%} {% endfor -%}

View File

@ -1,13 +1,13 @@
import os import os
from charmhelpers.fetch import apt_install import six
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
ERROR, ERROR,
INFO INFO
) )
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try: try:
@ -43,7 +43,7 @@ def get_loader(templates_dir, os_release):
order by OpenStack release. order by OpenStack release.
""" """
tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in OPENSTACK_CODENAMES.itervalues()] for rel in six.itervalues(OPENSTACK_CODENAMES)]
if not os.path.isdir(templates_dir): if not os.path.isdir(templates_dir):
log('Templates directory not found @ %s.' % templates_dir, log('Templates directory not found @ %s.' % templates_dir,
@ -258,7 +258,7 @@ class OSConfigRenderer(object):
""" """
Write out all registered config files. Write out all registered config files.
""" """
[self.write(k) for k in self.templates.iterkeys()] [self.write(k) for k in six.iterkeys(self.templates)]
def set_release(self, openstack_release): def set_release(self, openstack_release):
""" """
@ -275,5 +275,5 @@ class OSConfigRenderer(object):
''' '''
interfaces = [] interfaces = []
[interfaces.extend(i.complete_contexts()) [interfaces.extend(i.complete_contexts())
for i in self.templates.itervalues()] for i in six.itervalues(self.templates)]
return interfaces return interfaces

View File

@ -2,6 +2,7 @@
# Common python helper functions used for OpenStack charms. # Common python helper functions used for OpenStack charms.
from collections import OrderedDict from collections import OrderedDict
from functools import wraps
import subprocess import subprocess
import json import json
@ -9,11 +10,13 @@ import os
import socket import socket
import sys import sys
import six
import yaml
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
config, config,
log as juju_log, log as juju_log,
charm_dir, charm_dir,
ERROR,
INFO, INFO,
relation_ids, relation_ids,
relation_set relation_set
@ -30,7 +33,8 @@ from charmhelpers.contrib.network.ip import (
) )
from charmhelpers.core.host import lsb_release, mounts, umount from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install, apt_cache from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.python.packages import pip_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
@ -112,7 +116,7 @@ def get_os_codename_install_source(src):
# Best guess match based on deb string provided # Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'): if src.startswith('deb') or src.startswith('ppa'):
for k, v in OPENSTACK_CODENAMES.iteritems(): for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v in src: if v in src:
return v return v
@ -133,7 +137,7 @@ def get_os_codename_version(vers):
def get_os_version_codename(codename): def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.''' '''Determine OpenStack version number from codename.'''
for k, v in OPENSTACK_CODENAMES.iteritems(): for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v == codename: if v == codename:
return k return k
e = 'Could not derive OpenStack version for '\ e = 'Could not derive OpenStack version for '\
@ -193,7 +197,7 @@ def get_os_version_package(pkg, fatal=True):
else: else:
vers_map = OPENSTACK_CODENAMES vers_map = OPENSTACK_CODENAMES
for version, cname in vers_map.iteritems(): for version, cname in six.iteritems(vers_map):
if cname == codename: if cname == codename:
return version return version
# e = "Could not determine OpenStack version for package: %s" % pkg # e = "Could not determine OpenStack version for package: %s" % pkg
@ -317,7 +321,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
rc_script.write( rc_script.write(
"#!/bin/bash\n") "#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p)) [rc_script.write('export %s=%s\n' % (u, p))
for u, p in env_vars.iteritems() if u != "script_path"] for u, p in six.iteritems(env_vars) if u != "script_path"]
def openstack_upgrade_available(package): def openstack_upgrade_available(package):
@ -350,8 +354,8 @@ def ensure_block_device(block_device):
''' '''
_none = ['None', 'none', None] _none = ['None', 'none', None]
if (block_device in _none): if (block_device in _none):
error_out('prepare_storage(): Missing required input: ' error_out('prepare_storage(): Missing required input: block_device=%s.'
'block_device=%s.' % block_device, level=ERROR) % block_device)
if block_device.startswith('/dev/'): if block_device.startswith('/dev/'):
bdev = block_device bdev = block_device
@ -367,8 +371,7 @@ def ensure_block_device(block_device):
bdev = '/dev/%s' % block_device bdev = '/dev/%s' % block_device
if not is_block_device(bdev): if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev, error_out('Failed to locate valid block device at %s' % bdev)
level=ERROR)
return bdev return bdev
@ -417,7 +420,7 @@ def ns_query(address):
if isinstance(address, dns.name.Name): if isinstance(address, dns.name.Name):
rtype = 'PTR' rtype = 'PTR'
elif isinstance(address, basestring): elif isinstance(address, six.string_types):
rtype = 'A' rtype = 'A'
else: else:
return None return None
@ -468,6 +471,14 @@ def get_hostname(address, fqdn=True):
return result.split('.')[0] return result.split('.')[0]
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
mm_map = {}
if os.path.isfile(mm_file):
with open(mm_file, 'r') as f:
mm_map = json.load(f)
return mm_map
def sync_db_with_multi_ipv6_addresses(database, database_user, def sync_db_with_multi_ipv6_addresses(database, database_user,
relation_prefix=None): relation_prefix=None):
hosts = get_ipv6_addr(dynamic_only=False) hosts = get_ipv6_addr(dynamic_only=False)
@ -477,10 +488,132 @@ def sync_db_with_multi_ipv6_addresses(database, database_user,
'hostname': json.dumps(hosts)} 'hostname': json.dumps(hosts)}
if relation_prefix: if relation_prefix:
keys = kwargs.keys() for key in list(kwargs.keys()):
for key in keys:
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
del kwargs[key] del kwargs[key]
for rid in relation_ids('shared-db'): for rid in relation_ids('shared-db'):
relation_set(relation_id=rid, **kwargs) relation_set(relation_id=rid, **kwargs)
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap
def git_install_requested():
"""Returns true if openstack-origin-git is specified."""
return config('openstack-origin-git') != "None"
requirements_dir = None
def git_clone_and_install(file_name, core_project):
"""Clone/install all OpenStack repos specified in yaml config file."""
global requirements_dir
if file_name == "None":
return
yaml_file = os.path.join(charm_dir(), file_name)
# clone/install the requirements project first
installed = _git_clone_and_install_subset(yaml_file,
whitelist=['requirements'])
if 'requirements' not in installed:
error_out('requirements git repository must be specified')
# clone/install all other projects except requirements and the core project
blacklist = ['requirements', core_project]
_git_clone_and_install_subset(yaml_file, blacklist=blacklist,
update_requirements=True)
# clone/install the core project
whitelist = [core_project]
installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
update_requirements=True)
if core_project not in installed:
error_out('{} git repository must be specified'.format(core_project))
def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
update_requirements=False):
"""Clone/install subset of OpenStack repos specified in yaml config file."""
global requirements_dir
installed = []
with open(yaml_file, 'r') as fd:
projects = yaml.load(fd)
for proj, val in projects.items():
# The project subset is chosen based on the following 3 rules:
# 1) If project is in blacklist, we don't clone/install it, period.
# 2) If whitelist is empty, we clone/install everything else.
# 3) If whitelist is not empty, we clone/install everything in the
# whitelist.
if proj in blacklist:
continue
if whitelist and proj not in whitelist:
continue
repo = val['repository']
branch = val['branch']
repo_dir = _git_clone_and_install_single(repo, branch,
update_requirements)
if proj == 'requirements':
requirements_dir = repo_dir
installed.append(proj)
return installed
def _git_clone_and_install_single(repo, branch, update_requirements=False):
"""Clone and install a single git repository."""
dest_parent_dir = "/mnt/openstack-git/"
dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
if not os.path.exists(dest_parent_dir):
juju_log('Host dir not mounted at {}. '
'Creating directory there instead.'.format(dest_parent_dir))
os.mkdir(dest_parent_dir)
if not os.path.exists(dest_dir):
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
else:
repo_dir = dest_dir
if update_requirements:
if not requirements_dir:
error_out('requirements repo must be cloned before '
'updating from global requirements.')
_git_update_requirements(repo_dir, requirements_dir)
juju_log('Installing git repo from dir: {}'.format(repo_dir))
pip_install(repo_dir)
return repo_dir
def _git_update_requirements(package_dir, reqs_dir):
"""Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt."""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
cmd = "python update.py {}".format(package_dir)
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from global-requirements.txt".format(package))
os.chdir(orig_dir)

View File

@ -0,0 +1,77 @@
#!/usr/bin/env python
# coding: utf-8
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import log
try:
from pip import main as pip_execute
except ImportError:
apt_update()
apt_install('python-pip')
from pip import main as pip_execute
def parse_options(given, available):
"""Given a set of options, check if available"""
for key, value in sorted(given.items()):
if key in available:
yield "--{0}={1}".format(key, value)
def pip_install_requirements(requirements, **options):
"""Install a requirements file """
command = ["install"]
available_options = ('proxy', 'src', 'log', )
for option in parse_options(options, available_options):
command.append(option)
command.append("-r {0}".format(requirements))
log("Installing from file: {} with options: {}".format(requirements,
command))
pip_execute(command)
def pip_install(package, fatal=False, **options):
"""Install a python package"""
command = ["install"]
available_options = ('proxy', 'src', 'log', "index-url", )
for option in parse_options(options, available_options):
command.append(option)
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Installing {} package with options: {}".format(package,
command))
pip_execute(command)
def pip_uninstall(package, **options):
"""Uninstall a python package"""
command = ["uninstall", "-q", "-y"]
available_options = ('proxy', 'log', )
for option in parse_options(options, available_options):
command.append(option)
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Uninstalling {} package with options: {}".format(package,
command))
pip_execute(command)
def pip_list():
"""Returns the list of current python installed packages
"""
return pip_execute(["list"])

View File

@ -16,19 +16,18 @@ import time
from subprocess import ( from subprocess import (
check_call, check_call,
check_output, check_output,
CalledProcessError CalledProcessError,
) )
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
relation_get, relation_get,
relation_ids, relation_ids,
related_units, related_units,
log, log,
DEBUG,
INFO, INFO,
WARNING, WARNING,
ERROR ERROR,
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
mount, mount,
mounts, mounts,
@ -37,7 +36,6 @@ from charmhelpers.core.host import (
service_running, service_running,
umount, umount,
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_install, apt_install,
) )
@ -56,99 +54,85 @@ CEPH_CONF = """[global]
def install(): def install():
''' Basic Ceph client installation ''' """Basic Ceph client installation."""
ceph_dir = "/etc/ceph" ceph_dir = "/etc/ceph"
if not os.path.exists(ceph_dir): if not os.path.exists(ceph_dir):
os.mkdir(ceph_dir) os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True) apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img): def rbd_exists(service, pool, rbd_img):
''' Check to see if a RADOS block device exists ''' """Check to see if a RADOS block device exists."""
try: try:
out = check_output(['rbd', 'list', '--id', service, out = check_output(['rbd', 'list', '--id',
'--pool', pool]) service, '--pool', pool]).decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
else:
return rbd_img in out return rbd_img in out
def create_rbd_image(service, pool, image, sizemb): def create_rbd_image(service, pool, image, sizemb):
''' Create a new RADOS block device ''' """Create a new RADOS block device."""
cmd = [ cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
'rbd', '--pool', pool]
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
check_call(cmd) check_call(cmd)
def pool_exists(service, name): def pool_exists(service, name):
''' Check to see if a RADOS pool already exists ''' """Check to see if a RADOS pool already exists."""
try: try:
out = check_output(['rados', '--id', service, 'lspools']) out = check_output(['rados', '--id', service,
'lspools']).decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
else:
return name in out return name in out
def get_osds(service): def get_osds(service):
''' """Return a list of all Ceph Object Storage Daemons currently in the
Return a list of all Ceph Object Storage Daemons cluster.
currently in the cluster """
'''
version = ceph_version() version = ceph_version()
if version and version >= '0.56': if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service, return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls', '--format=json'])) 'osd', 'ls',
else: '--format=json']).decode('UTF-8'))
return None
return None
def create_pool(service, name, replicas=2): def create_pool(service, name, replicas=3):
''' Create a new RADOS pool ''' """Create a new RADOS pool."""
if pool_exists(service, name): if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name), log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING) level=WARNING)
return return
# Calculate the number of placement groups based # Calculate the number of placement groups based
# on upstream recommended best practices. # on upstream recommended best practices.
osds = get_osds(service) osds = get_osds(service)
if osds: if osds:
pgnum = (len(osds) * 100 / replicas) pgnum = (len(osds) * 100 // replicas)
else: else:
# NOTE(james-page): Default to 200 for older ceph versions # NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli # which don't support OSD query from cli
pgnum = 200 pgnum = 200
cmd = [
'ceph', '--id', service, cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
'osd', 'pool', 'create',
name, str(pgnum)
]
check_call(cmd) check_call(cmd)
cmd = [
'ceph', '--id', service, cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
'osd', 'pool', 'set', name, str(replicas)]
'size', str(replicas)
]
check_call(cmd) check_call(cmd)
def delete_pool(service, name): def delete_pool(service, name):
''' Delete a RADOS pool from ceph ''' """Delete a RADOS pool from ceph."""
cmd = [ cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
'ceph', '--id', service, '--yes-i-really-really-mean-it']
'osd', 'pool', 'delete',
name, '--yes-i-really-really-mean-it'
]
check_call(cmd) check_call(cmd)
@ -161,44 +145,43 @@ def _keyring_path(service):
def create_keyring(service, key): def create_keyring(service, key):
''' Create a new Ceph keyring containing key''' """Create a new Ceph keyring containing key."""
keyring = _keyring_path(service) keyring = _keyring_path(service)
if os.path.exists(keyring): if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=WARNING) log('Ceph keyring exists at %s.' % keyring, level=WARNING)
return return
cmd = [
'ceph-authtool', cmd = ['ceph-authtool', keyring, '--create-keyring',
keyring, '--name=client.{}'.format(service), '--add-key={}'.format(key)]
'--create-keyring',
'--name=client.{}'.format(service),
'--add-key={}'.format(key)
]
check_call(cmd) check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO) log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
def create_key_file(service, key): def create_key_file(service, key):
''' Create a file containing key ''' """Create a file containing key."""
keyfile = _keyfile_path(service) keyfile = _keyfile_path(service)
if os.path.exists(keyfile): if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) log('Keyfile exists at %s.' % keyfile, level=WARNING)
return return
with open(keyfile, 'w') as fd: with open(keyfile, 'w') as fd:
fd.write(key) fd.write(key)
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
log('Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes(): def get_ceph_nodes():
''' Query named relation 'ceph' to detemine current nodes ''' """Query named relation 'ceph' to determine current nodes."""
hosts = [] hosts = []
for r_id in relation_ids('ceph'): for r_id in relation_ids('ceph'):
for unit in related_units(r_id): for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id)) hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts return hosts
def configure(service, key, auth, use_syslog): def configure(service, key, auth, use_syslog):
''' Perform basic configuration of Ceph ''' """Perform basic configuration of Ceph."""
create_keyring(service, key) create_keyring(service, key)
create_key_file(service, key) create_key_file(service, key)
hosts = get_ceph_nodes() hosts = get_ceph_nodes()
@ -211,17 +194,17 @@ def configure(service, key, auth, use_syslog):
def image_mapped(name): def image_mapped(name):
''' Determine whether a RADOS block device is mapped locally ''' """Determine whether a RADOS block device is mapped locally."""
try: try:
out = check_output(['rbd', 'showmapped']) out = check_output(['rbd', 'showmapped']).decode('UTF-8')
except CalledProcessError: except CalledProcessError:
return False return False
else:
return name in out return name in out
def map_block_storage(service, pool, image): def map_block_storage(service, pool, image):
''' Map a RADOS block device for local use ''' """Map a RADOS block device for local use."""
cmd = [ cmd = [
'rbd', 'rbd',
'map', 'map',
@ -235,31 +218,32 @@ def map_block_storage(service, pool, image):
def filesystem_mounted(fs): def filesystem_mounted(fs):
''' Determine whether a filesytems is already mounted ''' """Determine whether a filesytems is already mounted."""
return fs in [f for f, m in mounts()] return fs in [f for f, m in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10): def make_filesystem(blk_device, fstype='ext4', timeout=10):
''' Make a new filesystem on the specified block device ''' """Make a new filesystem on the specified block device."""
count = 0 count = 0
e_noent = os.errno.ENOENT e_noent = os.errno.ENOENT
while not os.path.exists(blk_device): while not os.path.exists(blk_device):
if count >= timeout: if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device, log('Gave up waiting on block device %s' % blk_device,
level=ERROR) level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device) raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO) log('Waiting for block device %s to appear' % blk_device,
level=DEBUG)
count += 1 count += 1
time.sleep(1) time.sleep(1)
else: else:
log('ceph: Formatting block device %s as filesystem %s.' % log('Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO) (blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device]) check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_block_device(blk_device, data_src_dst): def place_data_on_block_device(blk_device, data_src_dst):
''' Migrate data in data_src_dst to blk_device and then remount ''' """Migrate data in data_src_dst to blk_device and then remount."""
# mount block device into /mnt # mount block device into /mnt
mount(blk_device, '/mnt') mount(blk_device, '/mnt')
# copy data to /mnt # copy data to /mnt
@ -279,8 +263,8 @@ def place_data_on_block_device(blk_device, data_src_dst):
# TODO: re-use # TODO: re-use
def modprobe(module): def modprobe(module):
''' Load a kernel module and configure for auto-load on reboot ''' """Load a kernel module and configure for auto-load on reboot."""
log('ceph: Loading kernel module', level=INFO) log('Loading kernel module', level=INFO)
cmd = ['modprobe', module] cmd = ['modprobe', module]
check_call(cmd) check_call(cmd)
with open('/etc/modules', 'r+') as modules: with open('/etc/modules', 'r+') as modules:
@ -289,7 +273,7 @@ def modprobe(module):
def copy_files(src, dst, symlinks=False, ignore=None): def copy_files(src, dst, symlinks=False, ignore=None):
''' Copy files from src to dst ''' """Copy files from src to dst."""
for item in os.listdir(src): for item in os.listdir(src):
s = os.path.join(src, item) s = os.path.join(src, item)
d = os.path.join(dst, item) d = os.path.join(dst, item)
@ -300,9 +284,9 @@ def copy_files(src, dst, symlinks=False, ignore=None):
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]): blk_device, fstype, system_services=[],
""" replicas=3):
NOTE: This function must only be called from a single service unit for """NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur. the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device, Ensures given pool and RBD image exists, is mapped to a block device,
@ -316,15 +300,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
""" """
# Ensure pool, RBD image, RBD mappings are in place. # Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool): if not pool_exists(service, pool):
log('ceph: Creating new pool {}.'.format(pool)) log('Creating new pool {}.'.format(pool), level=INFO)
create_pool(service, pool) create_pool(service, pool, replicas=replicas)
if not rbd_exists(service, pool, rbd_img): if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image ({}).'.format(rbd_img)) log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
create_rbd_image(service, pool, rbd_img, sizemb) create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img): if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
level=INFO)
map_block_storage(service, pool, rbd_img) map_block_storage(service, pool, rbd_img)
# make file system # make file system
@ -339,45 +324,47 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
for svc in system_services: for svc in system_services:
if service_running(svc): if service_running(svc):
log('ceph: Stopping services {} prior to migrating data.' log('Stopping services {} prior to migrating data.'
.format(svc)) .format(svc), level=DEBUG)
service_stop(svc) service_stop(svc)
place_data_on_block_device(blk_device, mount_point) place_data_on_block_device(blk_device, mount_point)
for svc in system_services: for svc in system_services:
log('ceph: Starting service {} after migrating data.' log('Starting service {} after migrating data.'
.format(svc)) .format(svc), level=DEBUG)
service_start(svc) service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None): def ensure_ceph_keyring(service, user=None, group=None):
''' """Ensures a ceph keyring is created for a named service and optionally
Ensures a ceph keyring is created for a named service ensures user and group ownership.
and optionally ensures user and group ownership.
Returns False if no ceph key is available in relation state. Returns False if no ceph key is available in relation state.
''' """
key = None key = None
for rid in relation_ids('ceph'): for rid in relation_ids('ceph'):
for unit in related_units(rid): for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit)
if key: if key:
break break
if not key: if not key:
return False return False
create_keyring(service=service, key=key) create_keyring(service=service, key=key)
keyring = _keyring_path(service) keyring = _keyring_path(service)
if user and group: if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring]) check_call(['chown', '%s.%s' % (user, group), keyring])
return True return True
def ceph_version(): def ceph_version():
''' Retrieve the local version of ceph ''' """Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'): if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v'] cmd = ['ceph', '-v']
output = check_output(cmd) output = check_output(cmd).decode('US-ASCII')
output = output.split() output = output.split()
if len(output) > 3: if len(output) > 3:
return output[2] return output[2]
@ -385,3 +372,46 @@ def ceph_version():
return None return None
else: else:
return None return None
class CephBrokerRq(object):
"""Ceph broker request.
Multiple operations can be added to a request and sent to the Ceph broker
to be executed.
Request is json-encoded for sending over the wire.
The API is versioned and defaults to version 1.
"""
def __init__(self, api_version=1):
self.api_version = api_version
self.ops = []
def add_op_create_pool(self, name, replica_count=3):
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count})
@property
def request(self):
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
class CephBrokerRsp(object):
"""Ceph broker response.
Response is json-decoded and contents provided as methods/properties.
The API is versioned and defaults to version 1.
"""
def __init__(self, encoded_rsp):
self.api_version = None
self.rsp = json.loads(encoded_rsp)
@property
def exit_code(self):
return self.rsp.get('exit-code')
@property
def exit_msg(self):
return self.rsp.get('stderr')

View File

@ -1,12 +1,12 @@
import os import os
import re import re
from subprocess import ( from subprocess import (
check_call, check_call,
check_output, check_output,
) )
import six
################################################## ##################################################
# loopback device helpers. # loopback device helpers.
@ -37,7 +37,7 @@ def create_loopback(file_path):
''' '''
file_path = os.path.abspath(file_path) file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path]) check_call(['losetup', '--find', file_path])
for d, f in loopback_devices().iteritems(): for d, f in six.iteritems(loopback_devices()):
if f == file_path: if f == file_path:
return d return d
@ -51,7 +51,7 @@ def ensure_loopback_device(path, size):
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0) :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
''' '''
for d, f in loopback_devices().iteritems(): for d, f in six.iteritems(loopback_devices()):
if f == path: if f == path:
return d return d

View File

@ -61,6 +61,7 @@ def list_lvm_volume_group(block_device):
vg = None vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines() pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd: for l in pvd:
l = l.decode('UTF-8')
if l.strip().startswith('VG Name'): if l.strip().startswith('VG Name'):
vg = ' '.join(l.strip().split()[2:]) vg = ' '.join(l.strip().split()[2:])
return vg return vg

View File

@ -30,7 +30,8 @@ def zap_disk(block_device):
# sometimes sgdisk exits non-zero; this is OK, dd will clean up # sometimes sgdisk exits non-zero; this is OK, dd will clean up
call(['sgdisk', '--zap-all', '--mbrtogpt', call(['sgdisk', '--zap-all', '--mbrtogpt',
'--clear', block_device]) '--clear', block_device])
dev_end = check_output(['blockdev', '--getsz', block_device]) dev_end = check_output(['blockdev', '--getsz',
block_device]).decode('UTF-8')
gpt_end = int(dev_end.split()[0]) - 100 gpt_end = int(dev_end.split()[0]) - 100
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=1M', 'count=1']) 'bs=1M', 'count=1'])
@ -47,7 +48,7 @@ def is_device_mounted(device):
it doesn't. it doesn't.
''' '''
is_partition = bool(re.search(r".*[0-9]+\b", device)) is_partition = bool(re.search(r".*[0-9]+\b", device))
out = check_output(['mount']) out = check_output(['mount']).decode('UTF-8')
if is_partition: if is_partition:
return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"\b", out))
return bool(re.search(device + r"[0-9]+\b", out)) return bool(re.search(device + r"[0-9]+\b", out))

View File

@ -0,0 +1,41 @@
#
# Copyright 2014 Canonical Ltd.
#
# Authors:
# Edward Hope-Morley <opentastic@gmail.com>
#
import time
from charmhelpers.core.hookenv import (
log,
INFO,
)
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
"""If the decorated function raises exception exc_type, allow num_retries
retry attempts before raise the exception.
"""
def _retry_on_exception_inner_1(f):
def _retry_on_exception_inner_2(*args, **kwargs):
retries = num_retries
multiplier = 1
while True:
try:
return f(*args, **kwargs)
except exc_type:
if not retries:
raise
delay = base_delay * multiplier
multiplier += 1
log("Retrying '%s' %d more times (delay=%s)" %
(f.__name__, retries, delay), level=INFO)
retries -= 1
if delay:
time.sleep(delay)
return _retry_on_exception_inner_2
return _retry_on_exception_inner_1

View File

@ -3,10 +3,11 @@
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import io
import os import os
class Fstab(file): class Fstab(io.FileIO):
"""This class extends file in order to implement a file reader/writer """This class extends file in order to implement a file reader/writer
for file `/etc/fstab` for file `/etc/fstab`
""" """
@ -24,8 +25,8 @@ class Fstab(file):
options = "defaults" options = "defaults"
self.options = options self.options = options
self.d = d self.d = int(d)
self.p = p self.p = int(p)
def __eq__(self, o): def __eq__(self, o):
return str(self) == str(o) return str(self) == str(o)
@ -45,7 +46,7 @@ class Fstab(file):
self._path = path self._path = path
else: else:
self._path = self.DEFAULT_PATH self._path = self.DEFAULT_PATH
file.__init__(self, self._path, 'r+') super(Fstab, self).__init__(self._path, 'rb+')
def _hydrate_entry(self, line): def _hydrate_entry(self, line):
# NOTE: use split with no arguments to split on any # NOTE: use split with no arguments to split on any
@ -58,8 +59,9 @@ class Fstab(file):
def entries(self): def entries(self):
self.seek(0) self.seek(0)
for line in self.readlines(): for line in self.readlines():
line = line.decode('us-ascii')
try: try:
if not line.startswith("#"): if line.strip() and not line.startswith("#"):
yield self._hydrate_entry(line) yield self._hydrate_entry(line)
except ValueError: except ValueError:
pass pass
@ -75,14 +77,14 @@ class Fstab(file):
if self.get_entry_by_attr('device', entry.device): if self.get_entry_by_attr('device', entry.device):
return False return False
self.write(str(entry) + '\n') self.write((str(entry) + '\n').encode('us-ascii'))
self.truncate() self.truncate()
return entry return entry
def remove_entry(self, entry): def remove_entry(self, entry):
self.seek(0) self.seek(0)
lines = self.readlines() lines = [l.decode('us-ascii') for l in self.readlines()]
found = False found = False
for index, line in enumerate(lines): for index, line in enumerate(lines):
@ -97,7 +99,7 @@ class Fstab(file):
lines.remove(line) lines.remove(line)
self.seek(0) self.seek(0)
self.write(''.join(lines)) self.write(''.join(lines).encode('us-ascii'))
self.truncate() self.truncate()
return True return True

View File

@ -9,9 +9,14 @@ import json
import yaml import yaml
import subprocess import subprocess
import sys import sys
import UserDict
from subprocess import CalledProcessError from subprocess import CalledProcessError
import six
if not six.PY3:
from UserDict import UserDict
else:
from collections import UserDict
CRITICAL = "CRITICAL" CRITICAL = "CRITICAL"
ERROR = "ERROR" ERROR = "ERROR"
WARNING = "WARNING" WARNING = "WARNING"
@ -63,16 +68,18 @@ def log(message, level=None):
command = ['juju-log'] command = ['juju-log']
if level: if level:
command += ['-l', level] command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message] command += [message]
subprocess.call(command) subprocess.call(command)
class Serializable(UserDict.IterableUserDict): class Serializable(UserDict):
"""Wrapper, an object that can be serialized to yaml or json""" """Wrapper, an object that can be serialized to yaml or json"""
def __init__(self, obj): def __init__(self, obj):
# wrap the object # wrap the object
UserDict.IterableUserDict.__init__(self) UserDict.__init__(self)
self.data = obj self.data = obj
def __getattr__(self, attr): def __getattr__(self, attr):
@ -214,6 +221,12 @@ class Config(dict):
except KeyError: except KeyError:
return (self._prev_dict or {})[key] return (self._prev_dict or {})[key]
def keys(self):
prev_keys = []
if self._prev_dict is not None:
prev_keys = self._prev_dict.keys()
return list(set(prev_keys + list(dict.keys(self))))
def load_previous(self, path=None): def load_previous(self, path=None):
"""Load previous copy of config from disk. """Load previous copy of config from disk.
@ -263,7 +276,7 @@ class Config(dict):
""" """
if self._prev_dict: if self._prev_dict:
for k, v in self._prev_dict.iteritems(): for k, v in six.iteritems(self._prev_dict):
if k not in self: if k not in self:
self[k] = v self[k] = v
with open(self.path, 'w') as f: with open(self.path, 'w') as f:
@ -278,7 +291,8 @@ def config(scope=None):
config_cmd_line.append(scope) config_cmd_line.append(scope)
config_cmd_line.append('--format=json') config_cmd_line.append('--format=json')
try: try:
config_data = json.loads(subprocess.check_output(config_cmd_line)) config_data = json.loads(
subprocess.check_output(config_cmd_line).decode('UTF-8'))
if scope is not None: if scope is not None:
return config_data return config_data
return Config(config_data) return Config(config_data)
@ -297,10 +311,10 @@ def relation_get(attribute=None, unit=None, rid=None):
if unit: if unit:
_args.append(unit) _args.append(unit)
try: try:
return json.loads(subprocess.check_output(_args)) return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError: except ValueError:
return None return None
except CalledProcessError, e: except CalledProcessError as e:
if e.returncode == 2: if e.returncode == 2:
return None return None
raise raise
@ -312,7 +326,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs):
relation_cmd_line = ['relation-set'] relation_cmd_line = ['relation-set']
if relation_id is not None: if relation_id is not None:
relation_cmd_line.extend(('-r', relation_id)) relation_cmd_line.extend(('-r', relation_id))
for k, v in (relation_settings.items() + kwargs.items()): for k, v in (list(relation_settings.items()) + list(kwargs.items())):
if v is None: if v is None:
relation_cmd_line.append('{}='.format(k)) relation_cmd_line.append('{}='.format(k))
else: else:
@ -329,7 +343,8 @@ def relation_ids(reltype=None):
relid_cmd_line = ['relation-ids', '--format=json'] relid_cmd_line = ['relation-ids', '--format=json']
if reltype is not None: if reltype is not None:
relid_cmd_line.append(reltype) relid_cmd_line.append(reltype)
return json.loads(subprocess.check_output(relid_cmd_line)) or [] return json.loads(
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
return [] return []
@ -340,7 +355,8 @@ def related_units(relid=None):
units_cmd_line = ['relation-list', '--format=json'] units_cmd_line = ['relation-list', '--format=json']
if relid is not None: if relid is not None:
units_cmd_line.extend(('-r', relid)) units_cmd_line.extend(('-r', relid))
return json.loads(subprocess.check_output(units_cmd_line)) or [] return json.loads(
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
@cached @cached
@ -379,21 +395,31 @@ def relations_of_type(reltype=None):
return relation_data return relation_data
@cached
def metadata():
"""Get the current charm metadata.yaml contents as a python object"""
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
return yaml.safe_load(md)
@cached @cached
def relation_types(): def relation_types():
"""Get a list of relation types supported by this charm""" """Get a list of relation types supported by this charm"""
charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf)
rel_types = [] rel_types = []
md = metadata()
for key in ('provides', 'requires', 'peers'): for key in ('provides', 'requires', 'peers'):
section = md.get(key) section = md.get(key)
if section: if section:
rel_types.extend(section.keys()) rel_types.extend(section.keys())
mdf.close()
return rel_types return rel_types
@cached
def charm_name():
"""Get the name of the current charm as is specified on metadata.yaml"""
return metadata().get('name')
@cached @cached
def relations(): def relations():
"""Get a nested dictionary of relation data for all related units""" """Get a nested dictionary of relation data for all related units"""
@ -449,7 +475,7 @@ def unit_get(attribute):
"""Get the unit ID for the remote unit""" """Get the unit ID for the remote unit"""
_args = ['unit-get', '--format=json', attribute] _args = ['unit-get', '--format=json', attribute]
try: try:
return json.loads(subprocess.check_output(_args)) return json.loads(subprocess.check_output(_args).decode('UTF-8'))
except ValueError: except ValueError:
return None return None

View File

@ -6,19 +6,20 @@
# Matthew Wedgwood <matthew.wedgwood@canonical.com> # Matthew Wedgwood <matthew.wedgwood@canonical.com>
import os import os
import re
import pwd import pwd
import grp import grp
import random import random
import string import string
import subprocess import subprocess
import hashlib import hashlib
import shutil
from contextlib import contextmanager from contextlib import contextmanager
from collections import OrderedDict from collections import OrderedDict
from hookenv import log import six
from fstab import Fstab
from .hookenv import log
from .fstab import Fstab
def service_start(service_name): def service_start(service_name):
@ -54,7 +55,9 @@ def service(action, service_name):
def service_running(service): def service_running(service):
"""Determine whether a system service is running""" """Determine whether a system service is running"""
try: try:
output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) output = subprocess.check_output(
['service', service, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
return False return False
else: else:
@ -67,7 +70,9 @@ def service_running(service):
def service_available(service_name): def service_available(service_name):
"""Determine whether a system service is available""" """Determine whether a system service is available"""
try: try:
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) subprocess.check_output(
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
return 'unrecognized service' not in e.output return 'unrecognized service' not in e.output
else: else:
@ -96,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
return user_info return user_info
def add_group(group_name, system_group=False):
"""Add a group to the system"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
except KeyError:
log('creating group {0}'.format(group_name))
cmd = ['addgroup']
if system_group:
cmd.append('--system')
else:
cmd.extend([
'--group',
])
cmd.append(group_name)
subprocess.check_call(cmd)
group_info = grp.getgrnam(group_name)
return group_info
def add_user_to_group(username, group): def add_user_to_group(username, group):
"""Add a user to a group""" """Add a user to a group"""
cmd = [ cmd = [
@ -115,7 +140,7 @@ def rsync(from_path, to_path, flags='-r', options=None):
cmd.append(from_path) cmd.append(from_path)
cmd.append(to_path) cmd.append(to_path)
log(" ".join(cmd)) log(" ".join(cmd))
return subprocess.check_output(cmd).strip() return subprocess.check_output(cmd).decode('UTF-8').strip()
def symlink(source, destination): def symlink(source, destination):
@ -130,23 +155,26 @@ def symlink(source, destination):
subprocess.check_call(cmd) subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0555, force=False): def mkdir(path, owner='root', group='root', perms=0o555, force=False):
"""Create a directory""" """Create a directory"""
log("Making dir {} {}:{} {:o}".format(path, owner, group, log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms)) perms))
uid = pwd.getpwnam(owner).pw_uid uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path) realpath = os.path.abspath(path)
if os.path.exists(realpath): path_exists = os.path.exists(realpath)
if force and not os.path.isdir(realpath): if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path)) log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath) os.unlink(realpath)
else: os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
elif not path_exists:
os.makedirs(realpath, perms) os.makedirs(realpath, perms)
os.chown(realpath, uid, gid) os.chown(realpath, uid, gid)
def write_file(path, content, owner='root', group='root', perms=0444): def write_file(path, content, owner='root', group='root', perms=0o444):
"""Create or overwrite a file with the contents of a string""" """Create or overwrite a file with the contents of a string"""
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid uid = pwd.getpwnam(owner).pw_uid
@ -177,7 +205,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
cmd_args.extend([device, mountpoint]) cmd_args.extend([device, mountpoint])
try: try:
subprocess.check_output(cmd_args) subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError as e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False return False
@ -191,7 +219,7 @@ def umount(mountpoint, persist=False):
cmd_args = ['umount', mountpoint] cmd_args = ['umount', mountpoint]
try: try:
subprocess.check_output(cmd_args) subprocess.check_output(cmd_args)
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output)) log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False return False
@ -218,8 +246,8 @@ def file_hash(path, hash_type='md5'):
""" """
if os.path.exists(path): if os.path.exists(path):
h = getattr(hashlib, hash_type)() h = getattr(hashlib, hash_type)()
with open(path, 'r') as source: with open(path, 'rb') as source:
h.update(source.read()) # IGNORE:E1101 - it does have update h.update(source.read())
return h.hexdigest() return h.hexdigest()
else: else:
return None return None
@ -297,7 +325,7 @@ def pwgen(length=None):
if length is None: if length is None:
length = random.choice(range(35, 45)) length = random.choice(range(35, 45))
alphanumeric_chars = [ alphanumeric_chars = [
l for l in (string.letters + string.digits) l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou'] if l not in 'l0QD1vAEIOUaeiou']
random_chars = [ random_chars = [
random.choice(alphanumeric_chars) for _ in range(length)] random.choice(alphanumeric_chars) for _ in range(length)]
@ -306,18 +334,24 @@ def pwgen(length=None):
def list_nics(nic_type): def list_nics(nic_type):
'''Return a list of nics of given type(s)''' '''Return a list of nics of given type(s)'''
if isinstance(nic_type, basestring): if isinstance(nic_type, six.string_types):
int_types = [nic_type] int_types = [nic_type]
else: else:
int_types = nic_type int_types = nic_type
interfaces = [] interfaces = []
for int_type in int_types: for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).split('\n') ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line for line in ip_output if line) ip_output = (line for line in ip_output if line)
for line in ip_output: for line in ip_output:
if line.split()[1].startswith(int_type): if line.split()[1].startswith(int_type):
interfaces.append(line.split()[1].replace(":", "")) matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
if matched:
interface = matched.groups()[0]
else:
interface = line.split()[1].replace(":", "")
interfaces.append(interface)
return interfaces return interfaces
@ -329,7 +363,7 @@ def set_nic_mtu(nic, mtu):
def get_nic_mtu(nic): def get_nic_mtu(nic):
cmd = ['ip', 'addr', 'show', nic] cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).split('\n') ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = "" mtu = ""
for line in ip_output: for line in ip_output:
words = line.split() words = line.split()
@ -340,7 +374,7 @@ def get_nic_mtu(nic):
def get_nic_hwaddr(nic): def get_nic_hwaddr(nic):
cmd = ['ip', '-o', '-0', 'addr', 'show', nic] cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd) ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = "" hwaddr = ""
words = ip_output.split() words = ip_output.split()
if 'link/ether' in words: if 'link/ether' in words:
@ -357,8 +391,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
''' '''
import apt_pkg import apt_pkg
from charmhelpers.fetch import apt_cache
if not pkgcache: if not pkgcache:
from charmhelpers.fetch import apt_cache
pkgcache = apt_cache() pkgcache = apt_cache()
pkg = pkgcache[package] pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)

View File

@ -1,2 +1,2 @@
from .base import * from .base import * # NOQA
from .helpers import * from .helpers import * # NOQA

View File

@ -196,7 +196,7 @@ class StoredContext(dict):
if not os.path.isabs(file_name): if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name) file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'w') as file_stream: with open(file_name, 'w') as file_stream:
os.fchmod(file_stream.fileno(), 0600) os.fchmod(file_stream.fileno(), 0o600)
yaml.dump(config_data, file_stream) yaml.dump(config_data, file_stream)
def read_context(self, file_name): def read_context(self, file_name):
@ -211,15 +211,19 @@ class StoredContext(dict):
class TemplateCallback(ManagerCallback): class TemplateCallback(ManagerCallback):
""" """
Callback class that will render a Jinja2 template, for use as a ready action. Callback class that will render a Jinja2 template, for use as a ready
action.
:param str source: The template source file, relative to
`$CHARM_DIR/templates`
:param str source: The template source file, relative to `$CHARM_DIR/templates`
:param str target: The target to write the rendered template to :param str target: The target to write the rendered template to
:param str owner: The owner of the rendered file :param str owner: The owner of the rendered file
:param str group: The group of the rendered file :param str group: The group of the rendered file
:param int perms: The permissions of the rendered file :param int perms: The permissions of the rendered file
""" """
def __init__(self, source, target, owner='root', group='root', perms=0444): def __init__(self, source, target,
owner='root', group='root', perms=0o444):
self.source = source self.source = source
self.target = target self.target = target
self.owner = owner self.owner = owner

View File

@ -4,7 +4,8 @@ from charmhelpers.core import host
from charmhelpers.core import hookenv from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None):
""" """
Render a template. Render a template.
@ -47,5 +48,5 @@ def render(source, target, context, owner='root', group='root', perms=0444, temp
level=hookenv.ERROR) level=hookenv.ERROR)
raise e raise e
content = template.render(context) content = template.render(context)
host.mkdir(os.path.dirname(target)) host.mkdir(os.path.dirname(target), owner, group)
host.write_file(target, content, owner, group, perms) host.write_file(target, content, owner, group, perms)

View File

@ -5,10 +5,6 @@ from yaml import safe_load
from charmhelpers.core.host import ( from charmhelpers.core.host import (
lsb_release lsb_release
) )
from urlparse import (
urlparse,
urlunparse,
)
import subprocess import subprocess
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
config, config,
@ -16,6 +12,12 @@ from charmhelpers.core.hookenv import (
) )
import os import os
import six
if six.PY3:
from urllib.parse import urlparse, urlunparse
else:
from urlparse import urlparse, urlunparse
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
@ -72,6 +74,7 @@ CLOUD_ARCHIVE_POCKETS = {
FETCH_HANDLERS = ( FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
) )
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
@ -148,7 +151,7 @@ def apt_install(packages, options=None, fatal=False):
cmd = ['apt-get', '--assume-yes'] cmd = ['apt-get', '--assume-yes']
cmd.extend(options) cmd.extend(options)
cmd.append('install') cmd.append('install')
if isinstance(packages, basestring): if isinstance(packages, six.string_types):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -181,7 +184,7 @@ def apt_update(fatal=False):
def apt_purge(packages, fatal=False): def apt_purge(packages, fatal=False):
"""Purge one or more packages""" """Purge one or more packages"""
cmd = ['apt-get', '--assume-yes', 'purge'] cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, basestring): if isinstance(packages, six.string_types):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -192,7 +195,7 @@ def apt_purge(packages, fatal=False):
def apt_hold(packages, fatal=False): def apt_hold(packages, fatal=False):
"""Hold one or more packages""" """Hold one or more packages"""
cmd = ['apt-mark', 'hold'] cmd = ['apt-mark', 'hold']
if isinstance(packages, basestring): if isinstance(packages, six.string_types):
cmd.append(packages) cmd.append(packages)
else: else:
cmd.extend(packages) cmd.extend(packages)
@ -218,6 +221,7 @@ def add_source(source, key=None):
pocket for the release. pocket for the release.
'cloud:' may be used to activate official cloud archive pockets, 'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse' such as 'cloud:icehouse'
'distro' may be used as a noop
@param key: A key to be added to the system's APT keyring and used @param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an to verify the signatures on packages. Ideally, this should be an
@ -251,12 +255,14 @@ def add_source(source, key=None):
release = lsb_release()['DISTRIB_CODENAME'] release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release)) apt.write(PROPOSED_POCKET.format(release))
elif source == 'distro':
pass
else: else:
raise SourceConfigError("Unknown source: {!r}".format(source)) log("Unknown source: {!r}".format(source))
if key: if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile() as key_file: with NamedTemporaryFile('w+') as key_file:
key_file.write(key) key_file.write(key)
key_file.flush() key_file.flush()
key_file.seek(0) key_file.seek(0)
@ -293,14 +299,14 @@ def configure_sources(update=False,
sources = safe_load((config(sources_var) or '').strip()) or [] sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, basestring): if isinstance(sources, six.string_types):
sources = [sources] sources = [sources]
if keys is None: if keys is None:
for source in sources: for source in sources:
add_source(source, None) add_source(source, None)
else: else:
if isinstance(keys, basestring): if isinstance(keys, six.string_types):
keys = [keys] keys = [keys]
if len(sources) != len(keys): if len(sources) != len(keys):
@ -397,7 +403,7 @@ def _run_apt_command(cmd, fatal=False):
while result is None or result == APT_NO_LOCK: while result is None or result == APT_NO_LOCK:
try: try:
result = subprocess.check_call(cmd, env=env) result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError as e:
retry_count = retry_count + 1 retry_count = retry_count + 1
if retry_count > APT_NO_LOCK_RETRY_COUNT: if retry_count > APT_NO_LOCK_RETRY_COUNT:
raise raise

View File

@ -1,8 +1,23 @@
import os import os
import urllib2
from urllib import urlretrieve
import urlparse
import hashlib import hashlib
import re
import six
if six.PY3:
from urllib.request import (
build_opener, install_opener, urlopen, urlretrieve,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
)
from urllib.parse import urlparse, urlunparse, parse_qs
from urllib.error import URLError
else:
from urllib import urlretrieve
from urllib2 import (
build_opener, install_opener, urlopen,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
URLError
)
from urlparse import urlparse, urlunparse, parse_qs
from charmhelpers.fetch import ( from charmhelpers.fetch import (
BaseFetchHandler, BaseFetchHandler,
@ -15,6 +30,24 @@ from charmhelpers.payload.archive import (
from charmhelpers.core.host import mkdir, check_hash from charmhelpers.core.host import mkdir, check_hash
def splituser(host):
'''urllib.splituser(), but six's support of this seems broken'''
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match:
return match.group(1, 2)
return None, host
def splitpasswd(user):
'''urllib.splitpasswd(), but six's support of this is missing'''
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
match = _passwdprog.match(user)
if match:
return match.group(1, 2)
return user, None
class ArchiveUrlFetchHandler(BaseFetchHandler): class ArchiveUrlFetchHandler(BaseFetchHandler):
""" """
Handler to download archive files from arbitrary URLs. Handler to download archive files from arbitrary URLs.
@ -42,20 +75,20 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
""" """
# propogate all exceptions # propogate all exceptions
# URLError, OSError, etc # URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse.urlparse(source) proto, netloc, path, params, query, fragment = urlparse(source)
if proto in ('http', 'https'): if proto in ('http', 'https'):
auth, barehost = urllib2.splituser(netloc) auth, barehost = splituser(netloc)
if auth is not None: if auth is not None:
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) source = urlunparse((proto, barehost, path, params, query, fragment))
username, password = urllib2.splitpasswd(auth) username, password = splitpasswd(auth)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm() passman = HTTPPasswordMgrWithDefaultRealm()
# Realm is set to None in add_password to force the username and password # Realm is set to None in add_password to force the username and password
# to be used whatever the realm # to be used whatever the realm
passman.add_password(None, source, username, password) passman.add_password(None, source, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman) authhandler = HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler) opener = build_opener(authhandler)
urllib2.install_opener(opener) install_opener(opener)
response = urllib2.urlopen(source) response = urlopen(source)
try: try:
with open(dest, 'w') as dest_file: with open(dest, 'w') as dest_file:
dest_file.write(response.read()) dest_file.write(response.read())
@ -91,17 +124,21 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
url_parts = self.parse_url(source) url_parts = self.parse_url(source)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
if not os.path.exists(dest_dir): if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755) mkdir(dest_dir, perms=0o755)
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
try: try:
self.download(source, dld_file) self.download(source, dld_file)
except urllib2.URLError as e: except URLError as e:
raise UnhandledSource(e.reason) raise UnhandledSource(e.reason)
except OSError as e: except OSError as e:
raise UnhandledSource(e.strerror) raise UnhandledSource(e.strerror)
options = urlparse.parse_qs(url_parts.fragment) options = parse_qs(url_parts.fragment)
for key, value in options.items(): for key, value in options.items():
if key in hashlib.algorithms: if not six.PY3:
algorithms = hashlib.algorithms
else:
algorithms = hashlib.algorithms_available
if key in algorithms:
check_hash(dld_file, value, key) check_hash(dld_file, value, key)
if checksum: if checksum:
check_hash(dld_file, checksum, hash_type) check_hash(dld_file, checksum, hash_type)

View File

@ -5,6 +5,10 @@ from charmhelpers.fetch import (
) )
from charmhelpers.core.host import mkdir from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('bzrlib does not support Python3')
try: try:
from bzrlib.branch import Branch from bzrlib.branch import Branch
except ImportError: except ImportError:
@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name) branch_name)
if not os.path.exists(dest_dir): if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755) mkdir(dest_dir, perms=0o755)
try: try:
self.branch(source, dest_dir) self.branch(source, dest_dir)
except OSError as e: except OSError as e:

View File

@ -0,0 +1,51 @@
import os
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.core.host import mkdir
import six
if six.PY3:
raise ImportError('GitPython does not support Python 3')
try:
from git import Repo
except ImportError:
from charmhelpers.fetch import apt_install
apt_install("python-git")
from git import Repo
class GitUrlFetchHandler(BaseFetchHandler):
"""Handler for git branches via generic and github URLs"""
def can_handle(self, source):
url_parts = self.parse_url(source)
# TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git'):
return False
else:
return True
def clone(self, source, dest, branch):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
repo = Repo.clone_from(source, dest)
repo.git.checkout(branch)
def install(self, source, branch="master", dest=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
try:
self.clone(source, dest_dir, branch)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -64,8 +64,10 @@ class HAProxyContext(OSContextGenerator):
Also used to extend cinder.conf context with correct api_listening_port Also used to extend cinder.conf context with correct api_listening_port
''' '''
haproxy_port = config('api-listening-port') haproxy_port = config('api-listening-port')
api_port = determine_api_port(config('api-listening-port')) api_port = determine_api_port(config('api-listening-port'),
apache_port = determine_apache_port(config('api-listening-port')) singlenode_mode=True)
apache_port = determine_apache_port(config('api-listening-port'),
singlenode_mode=True)
ctxt = { ctxt = {
'service_ports': {'cinder_api': [haproxy_port, apache_port]}, 'service_ports': {'cinder_api': [haproxy_port, apache_port]},

View File

@ -1,5 +1,4 @@
#!/usr/bin/python #!/usr/bin/python
import os import os
import sys import sys
import uuid import uuid
@ -9,7 +8,6 @@ from subprocess import check_call
from cinder_utils import ( from cinder_utils import (
determine_packages, determine_packages,
do_openstack_upgrade, do_openstack_upgrade,
ensure_ceph_pool,
juju_log, juju_log,
migrate_database, migrate_database,
configure_lvm_storage, configure_lvm_storage,
@ -39,6 +37,7 @@ from charmhelpers.core.hookenv import (
unit_get, unit_get,
log, log,
ERROR, ERROR,
INFO
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
@ -46,14 +45,21 @@ from charmhelpers.fetch import (
apt_update apt_update
) )
from charmhelpers.core.host import lsb_release, restart_on_change from charmhelpers.core.host import (
lsb_release,
restart_on_change,
)
from charmhelpers.contrib.openstack.utils import ( from charmhelpers.contrib.openstack.utils import (
configure_installation_source, configure_installation_source,
openstack_upgrade_available, openstack_upgrade_available,
sync_db_with_multi_ipv6_addresses) sync_db_with_multi_ipv6_addresses)
from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring from charmhelpers.contrib.storage.linux.ceph import (
ensure_ceph_keyring,
CephBrokerRq,
CephBrokerRsp,
)
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
eligible_leader, eligible_leader,
@ -259,23 +265,38 @@ def ceph_joined():
@hooks.hook('ceph-relation-changed') @hooks.hook('ceph-relation-changed')
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def ceph_changed(): def ceph_changed(relation_id=None):
if 'ceph' not in CONFIGS.complete_contexts(): if 'ceph' not in CONFIGS.complete_contexts():
juju_log('ceph relation incomplete. Peer not ready?') juju_log('ceph relation incomplete. Peer not ready?')
return return
svc = service_name()
if not ensure_ceph_keyring(service=svc, service = service_name()
if not ensure_ceph_keyring(service=service,
user='cinder', group='cinder'): user='cinder', group='cinder'):
juju_log('Could not create ceph keyring: peer not ready?') juju_log('Could not create ceph keyring: peer not ready?')
return return
CONFIGS.write(CINDER_CONF)
CONFIGS.write(ceph_config_file())
set_ceph_env_variables(service=svc)
if eligible_leader(CLUSTER_RES): settings = relation_get(rid=relation_id)
_config = config() if settings and 'broker_rsp' in settings:
ensure_ceph_pool(service=svc, rsp = CephBrokerRsp(settings['broker_rsp'])
replicas=_config['ceph-osd-replication-count']) # Non-zero return code implies failure
if rsp.exit_code:
log("Ceph broker request failed (rc=%s, msg=%s)" %
(rsp.exit_code, rsp.exit_msg), level=ERROR)
return
log("Ceph broker request succeeded (rc=%s, msg=%s)" %
(rsp.exit_code, rsp.exit_msg), level=INFO)
set_ceph_env_variables(service=service)
CONFIGS.write(CINDER_CONF)
CONFIGS.write(ceph_config_file())
else:
rq = CephBrokerRq()
replicas = config('ceph-osd-replication-count')
rq.add_op_create_pool(name=service, replica_count=replicas)
for rid in relation_ids('ceph'):
relation_set(relation_id=rid, broker_req=rq.request)
log("Request(s) sent to Ceph broker (rid=%s)" % (rid))
@hooks.hook('cluster-relation-joined') @hooks.hook('cluster-relation-joined')

View File

@ -27,11 +27,6 @@ from charmhelpers.core.host import (
lsb_release lsb_release
) )
from charmhelpers.contrib.storage.linux.ceph import (
create_pool as ceph_create_pool,
pool_exists as ceph_pool_exists,
)
from charmhelpers.contrib.openstack.alternatives import install_alternative from charmhelpers.contrib.openstack.alternatives import install_alternative
from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.cluster import (
eligible_leader, eligible_leader,
@ -145,7 +140,7 @@ CONFIG_FILES = OrderedDict([
'services': ['cinder-volume'] 'services': ['cinder-volume']
}), }),
(HAPROXY_CONF, { (HAPROXY_CONF, {
'hook_contexts': [context.HAProxyContext(), 'hook_contexts': [context.HAProxyContext(singlenode_mode=True),
cinder_contexts.HAProxyContext()], cinder_contexts.HAProxyContext()],
'services': ['haproxy'], 'services': ['haproxy'],
}), }),
@ -315,14 +310,17 @@ def configure_lvm_storage(block_devices, volume_group, overwrite=False,
vg_found = False vg_found = False
new_devices = [] new_devices = []
for device in devices: for device in devices:
if (not is_lvm_physical_volume(device) or if not is_lvm_physical_volume(device):
(is_lvm_physical_volume(device) and # Unused device
list_lvm_volume_group(device) != volume_group)): if overwrite is True or not has_partition_table(device):
prepare_volume(device)
new_devices.append(device)
elif (is_lvm_physical_volume(device) and
list_lvm_volume_group(device) != volume_group):
# Existing LVM but not part of required VG or new device # Existing LVM but not part of required VG or new device
if overwrite is True: if overwrite is True:
clean_storage(device) prepare_volume(device)
new_devices.append(device) new_devices.append(device)
create_lvm_physical_volume(device)
elif (is_lvm_physical_volume(device) and elif (is_lvm_physical_volume(device) and
list_lvm_volume_group(device) == volume_group): list_lvm_volume_group(device) == volume_group):
# Mark vg as found # Mark vg as found
@ -343,6 +341,17 @@ def configure_lvm_storage(block_devices, volume_group, overwrite=False,
extend_lvm_volume_group(volume_group, new_device) extend_lvm_volume_group(volume_group, new_device)
def prepare_volume(device):
clean_storage(device)
create_lvm_physical_volume(device)
def has_partition_table(block_device):
out = subprocess.check_output(['fdisk', '-l', block_device],
stderr=subprocess.STDOUT)
return "doesn't contain a valid partition" not in out
def clean_storage(block_device): def clean_storage(block_device):
'''Ensures a block device is clean. That is: '''Ensures a block device is clean. That is:
- unmounted - unmounted
@ -397,13 +406,6 @@ def migrate_database():
subprocess.check_call(cmd) subprocess.check_call(cmd)
def ensure_ceph_pool(service, replicas):
'Creates a ceph pool for service if one does not exist'
# TODO(Ditto about moving somewhere sharable)
if not ceph_pool_exists(service=service, name=service):
ceph_create_pool(service=service, name=service, replicas=replicas)
def set_ceph_env_variables(service): def set_ceph_env_variables(service):
# XXX: Horrid kludge to make cinder-volume use # XXX: Horrid kludge to make cinder-volume use
# a different ceph username than admin # a different ceph username than admin

View File

@ -1,5 +1,5 @@
name: cinder name: cinder
summary: Cinder OpenStack starage service summary: Cinder OpenStack storage service
maintainer: Adam Gandelman <adamg@canonical.com> maintainer: Adam Gandelman <adamg@canonical.com>
description: | description: |
Cinder is a storage service for the Openstack project Cinder is a storage service for the Openstack project

View File

@ -0,0 +1,22 @@
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -1,6 +1,6 @@
import amulet import amulet
import os import os
import six
class AmuletDeployment(object): class AmuletDeployment(object):
@ -52,12 +52,12 @@ class AmuletDeployment(object):
def _add_relations(self, relations): def _add_relations(self, relations):
"""Add all of the relations for the services.""" """Add all of the relations for the services."""
for k, v in relations.iteritems(): for k, v in six.iteritems(relations):
self.d.relate(k, v) self.d.relate(k, v)
def _configure_services(self, configs): def _configure_services(self, configs):
"""Configure all of the services.""" """Configure all of the services."""
for service, config in configs.iteritems(): for service, config in six.iteritems(configs):
self.d.configure(service, config) self.d.configure(service, config)
def _deploy(self): def _deploy(self):

View File

@ -5,6 +5,8 @@ import re
import sys import sys
import time import time
import six
class AmuletUtils(object): class AmuletUtils(object):
"""Amulet utilities. """Amulet utilities.
@ -58,7 +60,7 @@ class AmuletUtils(object):
Verify the specified services are running on the corresponding Verify the specified services are running on the corresponding
service units. service units.
""" """
for k, v in commands.iteritems(): for k, v in six.iteritems(commands):
for cmd in v: for cmd in v:
output, code = k.run(cmd) output, code = k.run(cmd)
if code != 0: if code != 0:
@ -100,11 +102,11 @@ class AmuletUtils(object):
longs, or can be a function that evaluate a variable and returns a longs, or can be a function that evaluate a variable and returns a
bool. bool.
""" """
for k, v in expected.iteritems(): for k, v in six.iteritems(expected):
if k in actual: if k in actual:
if (isinstance(v, basestring) or if (isinstance(v, six.string_types) or
isinstance(v, bool) or isinstance(v, bool) or
isinstance(v, (int, long))): isinstance(v, six.integer_types)):
if v != actual[k]: if v != actual[k]:
return "{}:{}".format(k, actual[k]) return "{}:{}".format(k, actual[k])
elif not v(actual[k]): elif not v(actual[k]):

View File

@ -1,3 +1,4 @@
import six
from charmhelpers.contrib.amulet.deployment import ( from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment AmuletDeployment
) )
@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
def _configure_services(self, configs): def _configure_services(self, configs):
"""Configure all of the services.""" """Configure all of the services."""
for service, config in configs.iteritems(): for service, config in six.iteritems(configs):
self.d.configure(service, config) self.d.configure(service, config)
def _get_openstack_release(self): def _get_openstack_release(self):

View File

@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client import novaclient.v1_1.client as nova_client
import six
from charmhelpers.contrib.amulet.utils import ( from charmhelpers.contrib.amulet.utils import (
AmuletUtils AmuletUtils
) )
@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils):
expected service catalog endpoints. expected service catalog endpoints.
""" """
self.log.debug('actual: {}'.format(repr(actual))) self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems(): for k, v in six.iteritems(expected):
if k in actual: if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0]) ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret: if ret:

View File

@ -1,10 +1,14 @@
from mock import MagicMock, patch, call import json
from mock import (
MagicMock,
patch,
call
)
import cinder_utils as utils import cinder_utils as utils
from test_utils import ( from test_utils import (
CharmTestCase, CharmTestCase,
RESTART_MAP, RESTART_MAP
) )
# Need to do some early patching to get the module loaded. # Need to do some early patching to get the module loaded.
@ -31,7 +35,6 @@ TO_PATCH = [
'determine_packages', 'determine_packages',
'do_openstack_upgrade', 'do_openstack_upgrade',
'ensure_ceph_keyring', 'ensure_ceph_keyring',
'ensure_ceph_pool',
'juju_log', 'juju_log',
'log', 'log',
'lsb_release', 'lsb_release',
@ -362,22 +365,64 @@ class TestJoinedHooks(CharmTestCase):
m = 'ceph relation incomplete. Peer not ready?' m = 'ceph relation incomplete. Peer not ready?'
self.juju_log.assert_called_with(m) self.juju_log.assert_called_with(m)
def test_ceph_changed(self): @patch("cinder_hooks.relation_set")
@patch("cinder_hooks.relation_get")
def test_ceph_changed_broker_send_rq(self, mock_relation_get,
mock_relation_set):
self.CONFIGS.complete_contexts.return_value = ['ceph']
self.service_name.return_value = 'cinder'
self.ensure_ceph_keyring.return_value = True
self.ceph_config_file.return_value = '/var/lib/charm/cinder/ceph.conf'
self.relation_ids.return_value = ['ceph:0']
hooks.hooks.execute(['hooks/ceph-relation-changed'])
self.ensure_ceph_keyring.assert_called_with(service='cinder',
user='cinder',
group='cinder')
req = {'api-version': 1,
'ops': [{"op": "create-pool", "name": "cinder", "replicas": 3}]}
broker_dict = json.dumps(req)
mock_relation_set.assert_called_with(broker_req=broker_dict,
relation_id='ceph:0')
for c in [call('/var/lib/charm/cinder/ceph.conf'),
call('/etc/cinder/cinder.conf')]:
self.assertNotIn(c, self.CONFIGS.write.call_args_list)
self.assertFalse(self.set_ceph_env_variables.called)
@patch("cinder_hooks.relation_get", autospec=True)
def test_ceph_changed_broker_success(self, mock_relation_get):
'It ensures ceph assets created on ceph changed' 'It ensures ceph assets created on ceph changed'
self.CONFIGS.complete_contexts.return_value = ['ceph'] self.CONFIGS.complete_contexts.return_value = ['ceph']
self.service_name.return_value = 'cinder' self.service_name.return_value = 'cinder'
self.ensure_ceph_keyring.return_value = True self.ensure_ceph_keyring.return_value = True
self.ceph_config_file.return_value = '/var/lib/charm/cinder/ceph.conf' self.ceph_config_file.return_value = '/var/lib/charm/cinder/ceph.conf'
mock_relation_get.return_value = {'broker_rsp':
json.dumps({'exit-code': 0})}
hooks.hooks.execute(['hooks/ceph-relation-changed']) hooks.hooks.execute(['hooks/ceph-relation-changed'])
self.ensure_ceph_keyring.assert_called_with(service='cinder', self.ensure_ceph_keyring.assert_called_with(service='cinder',
user='cinder', user='cinder',
group='cinder') group='cinder')
self.ensure_ceph_pool.assert_called_with(service='cinder', replicas=3)
for c in [call('/var/lib/charm/cinder/ceph.conf'), for c in [call('/var/lib/charm/cinder/ceph.conf'),
call('/etc/cinder/cinder.conf')]: call('/etc/cinder/cinder.conf')]:
self.assertIn(c, self.CONFIGS.write.call_args_list) self.assertIn(c, self.CONFIGS.write.call_args_list)
self.set_ceph_env_variables.assert_called_with(service='cinder') self.set_ceph_env_variables.assert_called_with(service='cinder')
@patch("cinder_hooks.relation_get", autospec=True)
def test_ceph_changed_broker_nonzero_rc(self, mock_relation_get):
self.CONFIGS.complete_contexts.return_value = ['ceph']
self.service_name.return_value = 'cinder'
self.ensure_ceph_keyring.return_value = True
self.ceph_config_file.return_value = '/var/lib/charm/cinder/ceph.conf'
mock_relation_get.return_value = {'broker_rsp':
json.dumps({'exit-code': 1})}
hooks.hooks.execute(['hooks/ceph-relation-changed'])
self.ensure_ceph_keyring.assert_called_with(service='cinder',
user='cinder',
group='cinder')
for c in [call('/var/lib/charm/cinder/ceph.conf'),
call('/etc/cinder/cinder.conf')]:
self.assertNotIn(c, self.CONFIGS.write.call_args_list)
self.assertFalse(self.set_ceph_env_variables.called)
def test_ceph_changed_no_keys(self): def test_ceph_changed_no_keys(self):
'It ensures ceph assets created on ceph changed' 'It ensures ceph assets created on ceph changed'
self.CONFIGS.complete_contexts.return_value = ['ceph'] self.CONFIGS.complete_contexts.return_value = ['ceph']
@ -395,7 +440,6 @@ class TestJoinedHooks(CharmTestCase):
self.service_name.return_value = 'cinder' self.service_name.return_value = 'cinder'
self.ensure_ceph_keyring.return_value = True self.ensure_ceph_keyring.return_value = True
hooks.hooks.execute(['hooks/ceph-relation-changed']) hooks.hooks.execute(['hooks/ceph-relation-changed'])
self.assertFalse(self.ensure_ceph_pool.called)
class TestDepartedHooks(CharmTestCase): class TestDepartedHooks(CharmTestCase):

View File

@ -19,8 +19,6 @@ TO_PATCH = [
'umount', 'umount',
'mkdir', 'mkdir',
# ceph utils # ceph utils
'ceph_create_pool',
'ceph_pool_exists',
# storage_utils # storage_utils
'create_lvm_physical_volume', 'create_lvm_physical_volume',
'create_lvm_volume_group', 'create_lvm_volume_group',
@ -58,6 +56,18 @@ DPKG_OPTIONS = [
'--option', 'Dpkg::Options::=--force-confdef', '--option', 'Dpkg::Options::=--force-confdef',
] ]
FDISKDISPLAY = """
Disk /dev/vdb doesn't contain a valid partition table
Disk /dev/vdb: 21.5 GB, 21474836480 bytes
16 heads, 63 sectors/track, 41610 cylinders, total 41943040 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
"""
class TestCinderUtils(CharmTestCase): class TestCinderUtils(CharmTestCase):
@ -210,6 +220,13 @@ class TestCinderUtils(CharmTestCase):
self.assertTrue(cinder_utils._parse_block_device('/mnt/loop0'), self.assertTrue(cinder_utils._parse_block_device('/mnt/loop0'),
('/mnt/loop0', cinder_utils.DEFAULT_LOOPBACK_SIZE)) ('/mnt/loop0', cinder_utils.DEFAULT_LOOPBACK_SIZE))
@patch('subprocess.check_output')
def test_has_partition_table(self, _check):
_check.return_value = FDISKDISPLAY
block_device = '/dev/vdb'
cinder_utils.has_partition_table(block_device)
_check.assert_called_with(['fdisk', '-l', '/dev/vdb'], stderr=-2)
@patch.object(cinder_utils, 'clean_storage') @patch.object(cinder_utils, 'clean_storage')
@patch.object(cinder_utils, 'reduce_lvm_volume_group_missing') @patch.object(cinder_utils, 'reduce_lvm_volume_group_missing')
@patch.object(cinder_utils, 'extend_lvm_volume_group') @patch.object(cinder_utils, 'extend_lvm_volume_group')
@ -230,6 +247,37 @@ class TestCinderUtils(CharmTestCase):
reduce_lvm.assert_called_with('test') reduce_lvm.assert_called_with('test')
extend_lvm.assert_called_with('test', '/dev/vdc') extend_lvm.assert_called_with('test', '/dev/vdc')
@patch.object(cinder_utils, 'has_partition_table')
@patch.object(cinder_utils, 'clean_storage')
@patch.object(cinder_utils, 'reduce_lvm_volume_group_missing')
@patch.object(cinder_utils, 'extend_lvm_volume_group')
def test_configure_lvm_storage_unused_dev(self, extend_lvm, reduce_lvm,
clean_storage, has_part):
devices = ['/dev/vdb', '/dev/vdc']
self.is_lvm_physical_volume.return_value = False
has_part.return_value = False
cinder_utils.configure_lvm_storage(devices, 'test', False, True)
clean_storage.assert_has_calls(
[call('/dev/vdb'),
call('/dev/vdc')]
)
self.create_lvm_physical_volume.assert_has_calls(
[call('/dev/vdb'),
call('/dev/vdc')]
)
self.create_lvm_volume_group.assert_called_with('test', '/dev/vdb')
reduce_lvm.assert_called_with('test')
extend_lvm.assert_called_with('test', '/dev/vdc')
@patch.object(cinder_utils, 'has_partition_table')
@patch.object(cinder_utils, 'reduce_lvm_volume_group_missing')
def test_configure_lvm_storage_used_dev(self, reduce_lvm, has_part):
devices = ['/dev/vdb', '/dev/vdc']
self.is_lvm_physical_volume.return_value = False
has_part.return_value = True
cinder_utils.configure_lvm_storage(devices, 'test', False, True)
reduce_lvm.assert_called_with('test')
@patch.object(cinder_utils, 'clean_storage') @patch.object(cinder_utils, 'clean_storage')
@patch.object(cinder_utils, 'reduce_lvm_volume_group_missing') @patch.object(cinder_utils, 'reduce_lvm_volume_group_missing')
@patch.object(cinder_utils, 'extend_lvm_volume_group') @patch.object(cinder_utils, 'extend_lvm_volume_group')
@ -351,18 +399,6 @@ class TestCinderUtils(CharmTestCase):
cinder_utils.migrate_database() cinder_utils.migrate_database()
check_call.assert_called_with(['cinder-manage', 'db', 'sync']) check_call.assert_called_with(['cinder-manage', 'db', 'sync'])
def test_ensure_ceph_pool(self):
self.ceph_pool_exists.return_value = False
cinder_utils.ensure_ceph_pool(service='cinder', replicas=3)
self.ceph_create_pool.assert_called_with(service='cinder',
name='cinder',
replicas=3)
def test_ensure_ceph_pool_already_exists(self):
self.ceph_pool_exists.return_value = True
cinder_utils.ensure_ceph_pool(service='cinder', replicas=3)
self.assertFalse(self.ceph_create_pool.called)
@patch('os.path.exists') @patch('os.path.exists')
def test_register_configs_apache(self, exists): def test_register_configs_apache(self, exists):
exists.return_value = False exists.return_value = False

View File

@ -26,8 +26,6 @@ from test_utils import (
TO_PATCH = [ TO_PATCH = [
# cinder_utils # cinder_utils
'determine_packages', 'determine_packages',
'ensure_ceph_keyring',
'ensure_ceph_pool',
'juju_log', 'juju_log',
'lsb_release', 'lsb_release',
'migrate_database', 'migrate_database',