diff --git a/charm-helpers-tests.yaml b/charm-helpers-tests.yaml
index 883089de..65f5cedc 100644
--- a/charm-helpers-tests.yaml
+++ b/charm-helpers-tests.yaml
@@ -1,6 +1,13 @@
branch: lp:charm-helpers
destination: tests/charmhelpers
include:
+ - fetch
- core
- contrib.amulet
- contrib.openstack.amulet
+ - contrib.openstack.utils
+ - contrib.openstack.exceptions
+ - contrib.network.ip
+ - contrib.storage|inc=*
+ - contrib.python|inc=*
+ - osplatform
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
index 9646b838..8240249e 100644
--- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
@@ -373,7 +373,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
croncmd = (
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
- '-s /etc/init.d/%s status' % svc
+ '-e -s /etc/init.d/%s status' % svc
)
cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
f = open(cronpath, 'w')
diff --git a/hooks/charmhelpers/contrib/hardening/apache/checks/config.py b/hooks/charmhelpers/contrib/hardening/apache/checks/config.py
index 51b636f7..b18b263d 100644
--- a/hooks/charmhelpers/contrib/hardening/apache/checks/config.py
+++ b/hooks/charmhelpers/contrib/hardening/apache/checks/config.py
@@ -26,6 +26,7 @@ from charmhelpers.contrib.hardening.audits.file import (
DirectoryPermissionAudit,
NoReadWriteForOther,
TemplatedFile,
+ DeletedFile
)
from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
@@ -52,13 +53,13 @@ def get_audits():
'mods-available/alias.conf'),
context,
TEMPLATES_DIR,
- mode=0o0755,
+ mode=0o0640,
user='root',
service_actions=[{'service': 'apache2',
'actions': ['restart']}]),
TemplatedFile(os.path.join(settings['common']['apache_dir'],
- 'conf-enabled/hardening.conf'),
+ 'conf-enabled/99-hardening.conf'),
context,
TEMPLATES_DIR,
mode=0o0640,
@@ -69,11 +70,13 @@ def get_audits():
DirectoryPermissionAudit(settings['common']['apache_dir'],
user='root',
group='root',
- mode=0o640),
+ mode=0o0750),
DisabledModuleAudit(settings['hardening']['modules_to_disable']),
NoReadWriteForOther(settings['common']['apache_dir']),
+
+ DeletedFile(['/var/www/html/index.html'])
]
return audits
@@ -94,5 +97,4 @@ class ApacheConfContext(object):
ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
out).group(1)
ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
- ctxt['traceenable'] = settings['hardening']['traceenable']
return ctxt
diff --git a/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf
similarity index 56%
rename from hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf
rename to hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf
index 07945418..22b68041 100644
--- a/hooks/charmhelpers/contrib/hardening/apache/templates/hardening.conf
+++ b/hooks/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf
@@ -4,15 +4,29 @@
###############################################################################
-
+
# http://httpd.apache.org/docs/2.4/upgrading.html
{% if apache_version > '2.2' -%}
Require all granted
{% else -%}
- Order Allow,Deny
- Deny from all
+ Order Allow,Deny
+ Deny from all
{% endif %}
+
+ Options -Indexes -FollowSymLinks
+ AllowOverride None
+
+
+
+ Options -Indexes -FollowSymLinks
+ AllowOverride None
+
+
TraceEnable {{ traceenable }}
+ServerTokens {{ servertokens }}
+
+SSLHonorCipherOrder {{ honor_cipher_order }}
+SSLCipherSuite {{ cipher_suite }}
diff --git a/hooks/charmhelpers/contrib/hardening/audits/__init__.py b/hooks/charmhelpers/contrib/hardening/audits/__init__.py
index 9bf9c3c6..6dd5b05f 100644
--- a/hooks/charmhelpers/contrib/hardening/audits/__init__.py
+++ b/hooks/charmhelpers/contrib/hardening/audits/__init__.py
@@ -49,13 +49,6 @@ class BaseAudit(object): # NO-QA
# Invoke the callback if there is one.
if hasattr(self.unless, '__call__'):
- results = self.unless()
- if results:
- return False
- else:
- return True
+ return not self.unless()
- if self.unless:
- return False
- else:
- return True
+ return not self.unless
diff --git a/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml b/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml
index e5ada29f..0f940d4c 100644
--- a/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml
+++ b/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml
@@ -10,4 +10,7 @@ common:
hardening:
traceenable: 'off'
allowed_http_methods: "GET POST"
- modules_to_disable: [ cgi, cgid ]
\ No newline at end of file
+ modules_to_disable: [ cgi, cgid ]
+ servertokens: 'Prod'
+ honor_cipher_order: 'on'
+ cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES'
diff --git a/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
index 227589b5..c112137c 100644
--- a/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
+++ b/hooks/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
@@ -7,3 +7,6 @@ common:
hardening:
allowed_http_methods:
modules_to_disable:
+ servertokens:
+ honor_cipher_order:
+ cipher_suite:
diff --git a/hooks/charmhelpers/contrib/hardening/defaults/os.yaml b/hooks/charmhelpers/contrib/hardening/defaults/os.yaml
index ddd4286c..9a8627b5 100644
--- a/hooks/charmhelpers/contrib/hardening/defaults/os.yaml
+++ b/hooks/charmhelpers/contrib/hardening/defaults/os.yaml
@@ -58,6 +58,7 @@ security:
rsync
kernel_enable_module_loading: True # (type:boolean)
kernel_enable_core_dump: False # (type:boolean)
+ ssh_tmout: 300
sysctl:
kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128
diff --git a/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema
index 88b3966e..cc3b9c20 100644
--- a/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema
+++ b/hooks/charmhelpers/contrib/hardening/defaults/os.yaml.schema
@@ -34,6 +34,7 @@ security:
packages_list:
kernel_enable_module_loading:
kernel_enable_core_dump:
+ ssh_tmout:
sysctl:
kernel_secure_sysrq:
kernel_enable_sysrq:
diff --git a/hooks/charmhelpers/contrib/hardening/host/checks/profile.py b/hooks/charmhelpers/contrib/hardening/host/checks/profile.py
index 56d65263..2727428d 100644
--- a/hooks/charmhelpers/contrib/hardening/host/checks/profile.py
+++ b/hooks/charmhelpers/contrib/hardening/host/checks/profile.py
@@ -25,7 +25,6 @@ def get_audits():
audits = []
settings = utils.get_settings('os')
-
# If core dumps are not enabled, then don't allow core dumps to be
# created as they may contain sensitive information.
if not settings['security']['kernel_enable_core_dump']:
@@ -33,11 +32,18 @@ def get_audits():
ProfileContext(),
template_dir=TEMPLATES_DIR,
mode=0o0755, user='root', group='root'))
+ if settings['security']['ssh_tmout']:
+ audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh',
+ ProfileContext(),
+ template_dir=TEMPLATES_DIR,
+ mode=0o0644, user='root', group='root'))
return audits
class ProfileContext(object):
def __call__(self):
- ctxt = {}
+ settings = utils.get_settings('os')
+ ctxt = {'ssh_tmout':
+ settings['security']['ssh_tmout']}
return ctxt
diff --git a/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh
new file mode 100644
index 00000000..616cef46
--- /dev/null
+++ b/hooks/charmhelpers/contrib/hardening/host/templates/99-hardening.sh
@@ -0,0 +1,5 @@
+TMOUT={{ tmout }}
+readonly TMOUT
+export TMOUT
+
+readonly HISTFILE
diff --git a/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py b/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py
index f3cac6d9..41bed2d1 100644
--- a/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py
+++ b/hooks/charmhelpers/contrib/hardening/ssh/checks/config.py
@@ -27,7 +27,10 @@ from charmhelpers.fetch import (
apt_install,
apt_update,
)
-from charmhelpers.core.host import lsb_release
+from charmhelpers.core.host import (
+ lsb_release,
+ CompareHostReleases,
+)
from charmhelpers.contrib.hardening.audits.file import (
TemplatedFile,
FileContentAudit,
@@ -68,7 +71,8 @@ class SSHConfigContext(object):
'weak': default + ',hmac-sha1'}
# Use newer ciphers on Ubuntu Trusty and above
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
+ _release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(_release) >= 'trusty':
log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG)
macs = macs_66
@@ -96,7 +100,8 @@ class SSHConfigContext(object):
'weak': weak}
# Use newer kex on Ubuntu Trusty and above
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
+ _release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(_release) >= 'trusty':
log('Detected Ubuntu 14.04 or newer, using new key exchange '
'algorithms', level=DEBUG)
kex = kex_66
@@ -119,7 +124,8 @@ class SSHConfigContext(object):
'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'}
# Use newer ciphers on ubuntu Trusty and above
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
+ _release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(_release) >= 'trusty':
log('Detected Ubuntu 14.04 or newer, using new ciphers',
level=DEBUG)
cipher = ciphers_66
@@ -291,7 +297,8 @@ class SSHConfigFileContentAudit(FileContentAudit):
self.fail_cases = []
settings = utils.get_settings('ssh')
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
+ _release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(_release) >= 'trusty':
if not settings['server']['weak_hmac']:
self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
else:
@@ -364,7 +371,8 @@ class SSHDConfigFileContentAudit(FileContentAudit):
self.fail_cases = []
settings = utils.get_settings('ssh')
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
+ _release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(_release) >= 'trusty':
if not settings['server']['weak_hmac']:
self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
else:
diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py
index 54c76a72..14c93aad 100644
--- a/hooks/charmhelpers/contrib/network/ip.py
+++ b/hooks/charmhelpers/contrib/network/ip.py
@@ -31,6 +31,7 @@ from charmhelpers.core.hookenv import (
from charmhelpers.core.host import (
lsb_release,
+ CompareHostReleases,
)
try:
@@ -67,6 +68,24 @@ def no_ip_found_error_out(network):
raise ValueError(errmsg)
+def _get_ipv6_network_from_address(address):
+ """Get an netaddr.IPNetwork for the given IPv6 address
+ :param address: a dict as returned by netifaces.ifaddresses
+ :returns netaddr.IPNetwork: None if the address is a link local or loopback
+ address
+ """
+ if address['addr'].startswith('fe80') or address['addr'] == "::1":
+ return None
+
+ prefix = address['netmask'].split("/")
+ if len(prefix) > 1:
+ netmask = prefix[1]
+ else:
+ netmask = address['netmask']
+ return netaddr.IPNetwork("%s/%s" % (address['addr'],
+ netmask))
+
+
def get_address_in_network(network, fallback=None, fatal=False):
"""Get an IPv4 or IPv6 address within the network from the host.
@@ -92,19 +111,17 @@ def get_address_in_network(network, fallback=None, fatal=False):
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if network.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- if cidr in network:
- return str(cidr.ip)
+ for addr in addresses[netifaces.AF_INET]:
+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
+ addr['netmask']))
+ if cidr in network:
+ return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- if cidr in network:
- return str(cidr.ip)
+ cidr = _get_ipv6_network_from_address(addr)
+ if cidr and cidr in network:
+ return str(cidr.ip)
if fallback is not None:
return fallback
@@ -180,18 +197,18 @@ def _get_for_address(address, key):
if address.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- elif key == 'netmask' and cidr:
- return str(cidr).split('/')[1]
- else:
- return addr[key]
+ network = _get_ipv6_network_from_address(addr)
+ if not network:
+ continue
+ cidr = network.cidr
+ if address in cidr:
+ if key == 'iface':
+ return iface
+ elif key == 'netmask' and cidr:
+ return str(cidr).split('/')[1]
+ else:
+ return addr[key]
return None
@@ -521,7 +538,8 @@ def port_has_listener(address, port):
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
- if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty":
+ release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(release) < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu "
"versions less than Trusty 14.04")
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index 1f4cf42e..346e6fea 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -40,6 +40,7 @@ from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
from charmhelpers.core.decorators import retry_on_exception
+from charmhelpers.core.host import CompareHostReleases
DEBUG = logging.DEBUG
ERROR = logging.ERROR
@@ -1255,7 +1256,7 @@ class OpenStackAmuletUtils(AmuletUtils):
contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf',
fatal=True)
ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs')
- if ubuntu_release <= 'trusty':
+ if CompareHostReleases(ubuntu_release) <= 'trusty':
memcache_listen_addr = 'ip6-localhost'
else:
memcache_listen_addr = '::1'
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index 6cdbbbbf..3e055422 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -59,6 +59,8 @@ from charmhelpers.core.host import (
write_file,
pwgen,
lsb_release,
+ CompareHostReleases,
+ is_container,
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
@@ -155,7 +157,8 @@ class OSContextGenerator(object):
if self.missing_data:
self.complete = False
- log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
+ log('Missing required data: %s' % ' '.join(self.missing_data),
+ level=INFO)
else:
self.complete = True
return self.complete
@@ -213,8 +216,9 @@ class SharedDBContext(OSContextGenerator):
hostname_key = "{}_hostname".format(self.relation_prefix)
else:
hostname_key = "hostname"
- access_hostname = get_address_in_network(access_network,
- unit_get('private-address'))
+ access_hostname = get_address_in_network(
+ access_network,
+ unit_get('private-address'))
set_hostname = relation_get(attribute=hostname_key,
unit=local_unit())
if set_hostname != access_hostname:
@@ -308,7 +312,10 @@ def db_ssl(rdata, ctxt, ssl_dir):
class IdentityServiceContext(OSContextGenerator):
- def __init__(self, service=None, service_user=None, rel_name='identity-service'):
+ def __init__(self,
+ service=None,
+ service_user=None,
+ rel_name='identity-service'):
self.service = service
self.service_user = service_user
self.rel_name = rel_name
@@ -457,19 +464,17 @@ class AMQPContext(OSContextGenerator):
host = format_ipv6_addr(host) or host
rabbitmq_hosts.append(host)
- ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
+ rabbitmq_hosts = sorted(rabbitmq_hosts)
+ ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
transport_hosts = rabbitmq_hosts
if transport_hosts:
- transport_url_hosts = ''
- for host in transport_hosts:
- if transport_url_hosts:
- format_string = ",{}:{}@{}:{}"
- else:
- format_string = "{}:{}@{}:{}"
- transport_url_hosts += format_string.format(
- ctxt['rabbitmq_user'], ctxt['rabbitmq_password'],
- host, rabbitmq_port)
+ transport_url_hosts = ','.join([
+ "{}:{}@{}:{}".format(ctxt['rabbitmq_user'],
+ ctxt['rabbitmq_password'],
+ host_,
+ rabbitmq_port)
+ for host_ in transport_hosts])
ctxt['transport_url'] = "rabbit://{}/{}".format(
transport_url_hosts, vhost)
@@ -1217,6 +1222,10 @@ class BindHostContext(OSContextGenerator):
return {'bind_host': '0.0.0.0'}
+MAX_DEFAULT_WORKERS = 4
+DEFAULT_MULTIPLIER = 2
+
+
class WorkerConfigContext(OSContextGenerator):
@property
@@ -1228,10 +1237,19 @@ class WorkerConfigContext(OSContextGenerator):
return psutil.NUM_CPUS
def __call__(self):
- multiplier = config('worker-multiplier') or 0
+ multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER
count = int(self.num_cpus * multiplier)
if multiplier > 0 and count == 0:
count = 1
+
+ if config('worker-multiplier') is None and is_container():
+ # NOTE(jamespage): Limit unconfigured worker-multiplier
+ # to MAX_DEFAULT_WORKERS to avoid insane
+ # worker configuration in LXD containers
+ # on large servers
+ # Reference: https://pad.lv/1665270
+ count = min(count, MAX_DEFAULT_WORKERS)
+
ctxt = {"workers": count}
return ctxt
@@ -1601,7 +1619,8 @@ class MemcacheContext(OSContextGenerator):
if ctxt['use_memcache']:
# Trusty version of memcached does not support ::1 as a listen
# address so use host file entry instead
- if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty':
+ release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(release) > 'trusty':
ctxt['memcache_server'] = '::1'
else:
ctxt['memcache_server'] = 'ip6-localhost'
diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py
index a8f1ed72..37fa0eb0 100644
--- a/hooks/charmhelpers/contrib/openstack/neutron.py
+++ b/hooks/charmhelpers/contrib/openstack/neutron.py
@@ -23,7 +23,10 @@ from charmhelpers.core.hookenv import (
ERROR,
)
-from charmhelpers.contrib.openstack.utils import os_release
+from charmhelpers.contrib.openstack.utils import (
+ os_release,
+ CompareOpenStackReleases,
+)
def headers_package():
@@ -198,7 +201,8 @@ def neutron_plugins():
},
'plumgrid': {
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
- 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
+ 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin'
+ '.plumgrid_plugin.NeutronPluginPLUMgridV2'),
'contexts': [
context.SharedDBContext(user=config('database-user'),
database=config('database'),
@@ -225,7 +229,7 @@ def neutron_plugins():
'server_services': ['neutron-server']
}
}
- if release >= 'icehouse':
+ if CompareOpenStackReleases(release) >= 'icehouse':
# NOTE: patch in ml2 plugin for icehouse onwards
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
@@ -233,10 +237,10 @@ def neutron_plugins():
'neutron-plugin-ml2']
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
plugins['nvp'] = plugins['nsx']
- if release >= 'kilo':
+ if CompareOpenStackReleases(release) >= 'kilo':
plugins['midonet']['driver'] = (
'neutron.plugins.midonet.plugin.MidonetPluginV2')
- if release >= 'liberty':
+ if CompareOpenStackReleases(release) >= 'liberty':
plugins['midonet']['driver'] = (
'midonet.neutron.plugin_v1.MidonetPluginV2')
plugins['midonet']['server_packages'].remove(
@@ -244,10 +248,11 @@ def neutron_plugins():
plugins['midonet']['server_packages'].append(
'python-networking-midonet')
plugins['plumgrid']['driver'] = (
- 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
+ 'networking_plumgrid.neutron.plugins'
+ '.plugin.NeutronPluginPLUMgridV2')
plugins['plumgrid']['server_packages'].remove(
'neutron-plugin-plumgrid')
- if release >= 'mitaka':
+ if CompareOpenStackReleases(release) >= 'mitaka':
plugins['nsx']['server_packages'].remove('neutron-plugin-vmware')
plugins['nsx']['server_packages'].append('python-vmware-nsx')
plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
index 32b62767..54fba39d 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
@@ -5,6 +5,8 @@ global
user haproxy
group haproxy
spread-checks 0
+ stats socket /var/run/haproxy/admin.sock mode 600 level admin
+ stats timeout 2m
defaults
log global
@@ -58,6 +60,15 @@ frontend tcp-in_{{ service }}
{% for frontend in frontends -%}
backend {{ service }}_{{ frontend }}
balance leastconn
+ {% if backend_options -%}
+ {% if backend_options[service] -%}
+ {% for option in backend_options[service] -%}
+ {% for key, value in option.items() -%}
+ {{ key }} {{ value }}
+ {% endfor -%}
+ {% endfor -%}
+ {% endif -%}
+ {% endif -%}
{% for unit, address in frontends[frontend]['backends'].items() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 7e8ecff4..e13450c1 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -33,9 +33,7 @@ import yaml
from charmhelpers.contrib.network import ip
-from charmhelpers.core import (
- unitdata,
-)
+from charmhelpers.core import unitdata
from charmhelpers.core.hookenv import (
action_fail,
@@ -55,6 +53,8 @@ from charmhelpers.core.hookenv import (
application_version_set,
)
+from charmhelpers.core.strutils import BasicStringComparator
+
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
@@ -97,6 +97,22 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
+OPENSTACK_RELEASES = (
+ 'diablo',
+ 'essex',
+ 'folsom',
+ 'grizzly',
+ 'havana',
+ 'icehouse',
+ 'juno',
+ 'kilo',
+ 'liberty',
+ 'mitaka',
+ 'newton',
+ 'ocata',
+ 'pike',
+)
+
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
@@ -238,6 +254,17 @@ GIT_DEFAULT_BRANCHES = {
DEFAULT_LOOPBACK_SIZE = '5G'
+class CompareOpenStackReleases(BasicStringComparator):
+ """Provide comparisons of OpenStack releases.
+
+ Use in the form of
+
+ if CompareOpenStackReleases(release) > 'mitaka':
+ # do something with mitaka
+ """
+ _list = OPENSTACK_RELEASES
+
+
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
sys.exit(1)
@@ -1066,7 +1093,8 @@ def git_generate_systemd_init_files(templates_dir):
shutil.copyfile(init_in_source, init_source)
with open(init_source, 'a') as outfile:
- template = '/usr/share/openstack-pkg-tools/init-script-template'
+ template = ('/usr/share/openstack-pkg-tools/'
+ 'init-script-template')
with open(template) as infile:
outfile.write('\n\n{}'.format(infile.read()))
@@ -1971,9 +1999,7 @@ def enable_memcache(source=None, release=None, package=None):
if not _release:
_release = get_os_codename_install_source(source)
- # TODO: this should be changed to a numeric comparison using a known list
- # of releases and comparing by index.
- return _release >= 'mitaka'
+ return CompareOpenStackReleases(_release) >= 'mitaka'
def token_cache_pkgs(source=None, release=None):
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index ae7f3f93..9417d684 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -987,18 +987,20 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
service_start(svc)
-def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
+def ensure_ceph_keyring(service, user=None, group=None,
+ relation='ceph', key=None):
"""Ensures a ceph keyring is created for a named service and optionally
ensures user and group ownership.
- Returns False if no ceph key is available in relation state.
+ @returns boolean: Flag to indicate whether a key was successfully written
+ to disk based on either relation data or a supplied key
"""
- key = None
- for rid in relation_ids(relation):
- for unit in related_units(rid):
- key = relation_get('key', rid=rid, unit=unit)
- if key:
- break
+ if not key:
+ for rid in relation_ids(relation):
+ for unit in related_units(rid):
+ key = relation_get('key', rid=rid, unit=unit)
+ if key:
+ break
if not key:
return False
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index 05edfa50..0ee5cb9f 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -45,6 +45,7 @@ if __platform__ == "ubuntu":
add_new_group,
lsb_release,
cmp_pkgrevno,
+ CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import (
@@ -52,6 +53,7 @@ elif __platform__ == "centos":
add_new_group,
lsb_release,
cmp_pkgrevno,
+ CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
UPDATEDB_PATH = '/etc/updatedb.conf'
diff --git a/hooks/charmhelpers/core/host_factory/centos.py b/hooks/charmhelpers/core/host_factory/centos.py
index 902d469f..7781a396 100644
--- a/hooks/charmhelpers/core/host_factory/centos.py
+++ b/hooks/charmhelpers/core/host_factory/centos.py
@@ -2,6 +2,22 @@ import subprocess
import yum
import os
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+class CompareHostReleases(BasicStringComparator):
+ """Provide comparisons of Host releases.
+
+ Use in the form of
+
+ if CompareHostReleases(release) > 'trusty':
+ # do something with mitaka
+ """
+
+ def __init__(self, item):
+ raise NotImplementedError(
+ "CompareHostReleases() is not implemented for CentOS")
+
def service_available(service_name):
# """Determine whether a system service is available."""
diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py
index 8c66af55..0448288c 100644
--- a/hooks/charmhelpers/core/host_factory/ubuntu.py
+++ b/hooks/charmhelpers/core/host_factory/ubuntu.py
@@ -1,5 +1,37 @@
import subprocess
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+UBUNTU_RELEASES = (
+ 'lucid',
+ 'maverick',
+ 'natty',
+ 'oneiric',
+ 'precise',
+ 'quantal',
+ 'raring',
+ 'saucy',
+ 'trusty',
+ 'utopic',
+ 'vivid',
+ 'wily',
+ 'xenial',
+ 'yakkety',
+ 'zesty',
+)
+
+
+class CompareHostReleases(BasicStringComparator):
+ """Provide comparisons of Ubuntu releases.
+
+ Use in the form of
+
+ if CompareHostReleases(release) > 'trusty':
+ # do something with mitaka
+ """
+ _list = UBUNTU_RELEASES
+
def service_available(service_name):
"""Determine whether a system service is available"""
diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py
index dd9b9717..685dabde 100644
--- a/hooks/charmhelpers/core/strutils.py
+++ b/hooks/charmhelpers/core/strutils.py
@@ -68,3 +68,56 @@ def bytes_from_string(value):
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+
+
+class BasicStringComparator(object):
+ """Provides a class that will compare strings from an iterator type object.
+ Used to provide > and < comparisons on strings that may not necessarily be
+ alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
+ z-wrap.
+ """
+
+ _list = None
+
+ def __init__(self, item):
+ if self._list is None:
+ raise Exception("Must define the _list in the class definition!")
+ try:
+ self.index = self._list.index(item)
+ except Exception:
+ raise KeyError("Item '{}' is not in list '{}'"
+ .format(item, self._list))
+
+ def __eq__(self, other):
+ assert isinstance(other, str) or isinstance(other, self.__class__)
+ return self.index == self._list.index(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ assert isinstance(other, str) or isinstance(other, self.__class__)
+ return self.index < self._list.index(other)
+
+ def __ge__(self, other):
+ return not self.__lt__(other)
+
+ def __gt__(self, other):
+ assert isinstance(other, str) or isinstance(other, self.__class__)
+ return self.index > self._list.index(other)
+
+ def __le__(self, other):
+ return not self.__gt__(other)
+
+ def __str__(self):
+ """Always give back the item at the index so it can be used in
+ comparisons like:
+
+ s_mitaka = CompareOpenStack('mitaka')
+ s_newton = CompareOpenstack('newton')
+
+ assert s_newton > s_mitaka
+
+ @returns:
+ """
+ return self._list[self.index]
diff --git a/hooks/nova_compute_context.py b/hooks/nova_compute_context.py
index b23061f5..126f5797 100644
--- a/hooks/nova_compute_context.py
+++ b/hooks/nova_compute_context.py
@@ -20,6 +20,7 @@ from charmhelpers.core.unitdata import kv
from charmhelpers.contrib.openstack import context
from charmhelpers.core.host import (
lsb_release,
+ CompareHostReleases,
)
from charmhelpers.core.strutils import (
bool_from_string,
@@ -39,6 +40,7 @@ from charmhelpers.contrib.openstack.utils import (
get_os_version_package,
get_os_version_codename,
os_release,
+ CompareOpenStackReleases,
)
from charmhelpers.contrib.openstack.ip import (
INTERNAL,
@@ -140,18 +142,19 @@ class NovaComputeLibvirtContext(context.OSContextGenerator):
# /etc/libvirt/libvirtd.conf (
'listen_tls': 0
}
- distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
- release = os_release('nova-common')
+ cmp_distro_codename = CompareHostReleases(
+ lsb_release()['DISTRIB_CODENAME'].lower())
+ cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
# NOTE(jamespage): deal with switch to systemd
- if distro_codename < "wily":
+ if cmp_distro_codename < "wily":
ctxt['libvirtd_opts'] = '-d'
else:
ctxt['libvirtd_opts'] = ''
# NOTE(jamespage): deal with alignment with Debian in
# Ubuntu yakkety and beyond.
- if distro_codename >= 'yakkety' or release >= 'ocata':
+ if cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata':
ctxt['libvirt_user'] = 'libvirt'
else:
ctxt['libvirt_user'] = 'libvirtd'
@@ -195,7 +198,7 @@ class NovaComputeLibvirtContext(context.OSContextGenerator):
if config('ksm') in ("1", "0",):
ctxt['ksm'] = config('ksm')
else:
- if release < 'kilo':
+ if cmp_os_release < 'kilo':
log("KSM set to 1 by default on openstack releases < kilo",
level=INFO)
ctxt['ksm'] = "1"
@@ -241,7 +244,8 @@ class NovaComputeVirtContext(context.OSContextGenerator):
def __call__(self):
ctxt = {}
- if lsb_release()['DISTRIB_CODENAME'].lower() >= "yakkety":
+ _release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(_release) >= "yakkety":
ctxt['virt_type'] = config('virt-type')
ctxt['enable_live_migration'] = config('enable-live-migration')
ctxt['resume_guests_state_on_host_boot'] =\
diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py
index fbdc334e..1e0a3fa2 100644
--- a/hooks/nova_compute_utils.py
+++ b/hooks/nova_compute_utils.py
@@ -43,6 +43,8 @@ from charmhelpers.core.host import (
service_restart,
lsb_release,
write_file,
+ rsync,
+ CompareHostReleases,
)
from charmhelpers.core.hookenv import (
@@ -79,6 +81,7 @@ from charmhelpers.contrib.openstack.utils import (
pause_unit,
resume_unit,
os_application_version_set,
+ CompareOpenStackReleases,
)
from charmhelpers.contrib.python.packages import (
@@ -86,9 +89,6 @@ from charmhelpers.contrib.python.packages import (
)
from charmhelpers.core.hugepage import hugepage_support
-from charmhelpers.core.host import (
- rsync,
-)
from nova_compute_context import (
nova_metadata_requirement,
@@ -311,8 +311,8 @@ REQUIRED_INTERFACES = {
def libvirt_daemon():
'''Resolve the correct name of the libvirt daemon service'''
distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
- if (distro_codename >= 'yakkety' or
- os_release('nova-common') >= 'ocata'):
+ if (CompareHostReleases(distro_codename) >= 'yakkety' or
+ CompareOpenStackReleases(os_release('nova-common')) >= 'ocata'):
return LIBVIRTD_DAEMON
else:
return LIBVIRT_BIN_DAEMON
@@ -332,9 +332,10 @@ def resource_map():
# Network manager gets set late by the cloud-compute interface.
# FlatDHCPManager only requires some extra packages.
+ cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
if (net_manager in ['flatmanager', 'flatdhcpmanager'] and
config('multi-host').lower() == 'yes' and
- os_release('nova-common') < 'ocata'):
+ cmp_os_release < 'ocata'):
resource_map[NOVA_CONF]['services'].extend(
['nova-api', 'nova-network']
)
@@ -342,9 +343,9 @@ def resource_map():
resource_map.pop(NOVA_API_AA_PROFILE_PATH)
resource_map.pop(NOVA_NETWORK_AA_PROFILE_PATH)
- distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
- if (distro_codename >= 'yakkety' or
- os_release('nova-common') >= 'ocata'):
+ cmp_distro_codename = CompareHostReleases(
+ lsb_release()['DISTRIB_CODENAME'].lower())
+ if (cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata'):
for data in resource_map.values():
if LIBVIRT_BIN_DAEMON in data['services']:
data['services'].remove(LIBVIRT_BIN_DAEMON)
@@ -413,7 +414,8 @@ def determine_packages_arch():
'''Generate list of architecture-specific packages'''
packages = []
distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
- if platform.machine() == 'aarch64' and distro_codename >= 'wily':
+ if (platform.machine() == 'aarch64' and
+ CompareHostReleases(distro_codename) >= 'wily'):
packages.extend(['qemu-efi']), # AArch64 cloud images require UEFI fw
return packages
@@ -425,7 +427,7 @@ def determine_packages():
net_manager = network_manager()
if (net_manager in ['flatmanager', 'flatdhcpmanager'] and
config('multi-host').lower() == 'yes' and
- os_release('nova-common') < 'ocata'):
+ CompareOpenStackReleases(os_release('nova-common')) < 'ocata'):
packages.extend(['nova-api', 'nova-network'])
if relation_ids('ceph'):
@@ -693,7 +695,8 @@ def destroy_libvirt_network(netname):
def configure_lxd(user='nova'):
''' Configure lxd use for nova user '''
if not git_install_requested():
- if lsb_release()['DISTRIB_CODENAME'].lower() < "vivid":
+ _release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(_release) < "vivid":
raise Exception("LXD is not supported for Ubuntu "
"versions less than 15.04 (vivid)")
@@ -733,7 +736,8 @@ def get_topics():
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
- if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty":
+ _release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(_release) < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu "
"versions less than Trusty 14.04")
diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py
index 430ea65f..a384c1d4 100644
--- a/tests/basic_deployment.py
+++ b/tests/basic_deployment.py
@@ -25,6 +25,7 @@ from charmhelpers.contrib.openstack.amulet.utils import (
DEBUG,
# ERROR
)
+from charmhelpers.contrib.openstack.utils import CompareOpenStackReleases
from novaclient import exceptions
@@ -265,7 +266,8 @@ class NovaBasicDeployment(OpenStackAmuletDeployment):
if self._get_openstack_release() >= self.trusty_liberty:
services[self.keystone_sentry] = ['apache2']
- if self._get_openstack_release_string() >= 'ocata':
+ _os_release = self._get_openstack_release_string()
+ if CompareOpenStackReleases(_os_release) >= 'ocata':
services[self.nova_compute_sentry].remove('nova-network')
services[self.nova_compute_sentry].remove('nova-api')
diff --git a/tests/charmhelpers/contrib/network/__init__.py b/tests/charmhelpers/contrib/network/__init__.py
new file mode 100644
index 00000000..d7567b86
--- /dev/null
+++ b/tests/charmhelpers/contrib/network/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/charmhelpers/contrib/network/ip.py b/tests/charmhelpers/contrib/network/ip.py
new file mode 100644
index 00000000..14c93aad
--- /dev/null
+++ b/tests/charmhelpers/contrib/network/ip.py
@@ -0,0 +1,574 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import glob
+import re
+import subprocess
+import six
+import socket
+
+from functools import partial
+
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import (
+ config,
+ log,
+ network_get_primary_address,
+ unit_get,
+ WARNING,
+)
+
+from charmhelpers.core.host import (
+ lsb_release,
+ CompareHostReleases,
+)
+
+try:
+ import netifaces
+except ImportError:
+ apt_update(fatal=True)
+ if six.PY2:
+ apt_install('python-netifaces', fatal=True)
+ else:
+ apt_install('python3-netifaces', fatal=True)
+ import netifaces
+
+try:
+ import netaddr
+except ImportError:
+ apt_update(fatal=True)
+ if six.PY2:
+ apt_install('python-netaddr', fatal=True)
+ else:
+ apt_install('python3-netaddr', fatal=True)
+ import netaddr
+
+
+def _validate_cidr(network):
+ try:
+ netaddr.IPNetwork(network)
+ except (netaddr.core.AddrFormatError, ValueError):
+ raise ValueError("Network (%s) is not in CIDR presentation format" %
+ network)
+
+
+def no_ip_found_error_out(network):
+ errmsg = ("No IP address found in network(s): %s" % network)
+ raise ValueError(errmsg)
+
+
+def _get_ipv6_network_from_address(address):
+ """Get an netaddr.IPNetwork for the given IPv6 address
+ :param address: a dict as returned by netifaces.ifaddresses
+ :returns netaddr.IPNetwork: None if the address is a link local or loopback
+ address
+ """
+ if address['addr'].startswith('fe80') or address['addr'] == "::1":
+ return None
+
+ prefix = address['netmask'].split("/")
+ if len(prefix) > 1:
+ netmask = prefix[1]
+ else:
+ netmask = address['netmask']
+ return netaddr.IPNetwork("%s/%s" % (address['addr'],
+ netmask))
+
+
+def get_address_in_network(network, fallback=None, fatal=False):
+ """Get an IPv4 or IPv6 address within the network from the host.
+
+ :param network (str): CIDR presentation format. For example,
+ '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
+ :param fallback (str): If no address is found, return fallback.
+ :param fatal (boolean): If no address is found, fallback is not
+ set and fatal is True then exit(1).
+ """
+ if network is None:
+ if fallback is not None:
+ return fallback
+
+ if fatal:
+ no_ip_found_error_out(network)
+ else:
+ return None
+
+ networks = network.split() or [network]
+ for network in networks:
+ _validate_cidr(network)
+ network = netaddr.IPNetwork(network)
+ for iface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(iface)
+ if network.version == 4 and netifaces.AF_INET in addresses:
+ for addr in addresses[netifaces.AF_INET]:
+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
+ addr['netmask']))
+ if cidr in network:
+ return str(cidr.ip)
+
+ if network.version == 6 and netifaces.AF_INET6 in addresses:
+ for addr in addresses[netifaces.AF_INET6]:
+ cidr = _get_ipv6_network_from_address(addr)
+ if cidr and cidr in network:
+ return str(cidr.ip)
+
+ if fallback is not None:
+ return fallback
+
+ if fatal:
+ no_ip_found_error_out(network)
+
+ return None
+
+
+def is_ipv6(address):
+ """Determine whether provided address is IPv6 or not."""
+ try:
+ address = netaddr.IPAddress(address)
+ except netaddr.AddrFormatError:
+ # probably a hostname - so not an address at all!
+ return False
+
+ return address.version == 6
+
+
+def is_address_in_network(network, address):
+ """
+ Determine whether the provided address is within a network range.
+
+ :param network (str): CIDR presentation format. For example,
+ '192.168.1.0/24'.
+ :param address: An individual IPv4 or IPv6 address without a net
+ mask or subnet prefix. For example, '192.168.1.1'.
+ :returns boolean: Flag indicating whether address is in network.
+ """
+ try:
+ network = netaddr.IPNetwork(network)
+ except (netaddr.core.AddrFormatError, ValueError):
+ raise ValueError("Network (%s) is not in CIDR presentation format" %
+ network)
+
+ try:
+ address = netaddr.IPAddress(address)
+ except (netaddr.core.AddrFormatError, ValueError):
+ raise ValueError("Address (%s) is not in correct presentation format" %
+ address)
+
+ if address in network:
+ return True
+ else:
+ return False
+
+
+def _get_for_address(address, key):
+ """Retrieve an attribute of or the physical interface that
+ the IP address provided could be bound to.
+
+ :param address (str): An individual IPv4 or IPv6 address without a net
+ mask or subnet prefix. For example, '192.168.1.1'.
+ :param key: 'iface' for the physical interface name or an attribute
+ of the configured interface, for example 'netmask'.
+ :returns str: Requested attribute or None if address is not bindable.
+ """
+ address = netaddr.IPAddress(address)
+ for iface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(iface)
+ if address.version == 4 and netifaces.AF_INET in addresses:
+ addr = addresses[netifaces.AF_INET][0]['addr']
+ netmask = addresses[netifaces.AF_INET][0]['netmask']
+ network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
+ cidr = network.cidr
+ if address in cidr:
+ if key == 'iface':
+ return iface
+ else:
+ return addresses[netifaces.AF_INET][0][key]
+
+ if address.version == 6 and netifaces.AF_INET6 in addresses:
+ for addr in addresses[netifaces.AF_INET6]:
+ network = _get_ipv6_network_from_address(addr)
+ if not network:
+ continue
+
+ cidr = network.cidr
+ if address in cidr:
+ if key == 'iface':
+ return iface
+ elif key == 'netmask' and cidr:
+ return str(cidr).split('/')[1]
+ else:
+ return addr[key]
+ return None
+
+
+get_iface_for_address = partial(_get_for_address, key='iface')
+
+
+get_netmask_for_address = partial(_get_for_address, key='netmask')
+
+
+def resolve_network_cidr(ip_address):
+ '''
+ Resolves the full address cidr of an ip_address based on
+ configured network interfaces
+ '''
+ netmask = get_netmask_for_address(ip_address)
+ return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
+
+
+def format_ipv6_addr(address):
+ """If address is IPv6, wrap it in '[]' otherwise return None.
+
+ This is required by most configuration files when specifying IPv6
+ addresses.
+ """
+ if is_ipv6(address):
+ return "[%s]" % address
+
+ return None
+
+
+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
+ fatal=True, exc_list=None):
+ """Return the assigned IP address for a given interface, if any.
+
+ :param iface: network interface on which address(es) are expected to
+ be found.
+ :param inet_type: inet address family
+ :param inc_aliases: include alias interfaces in search
+ :param fatal: if True, raise exception if address not found
+ :param exc_list: list of addresses to ignore
+ :return: list of ip addresses
+ """
+ # Extract nic if passed /dev/ethX
+ if '/' in iface:
+ iface = iface.split('/')[-1]
+
+ if not exc_list:
+ exc_list = []
+
+ try:
+ inet_num = getattr(netifaces, inet_type)
+ except AttributeError:
+ raise Exception("Unknown inet type '%s'" % str(inet_type))
+
+ interfaces = netifaces.interfaces()
+ if inc_aliases:
+ ifaces = []
+ for _iface in interfaces:
+ if iface == _iface or _iface.split(':')[0] == iface:
+ ifaces.append(_iface)
+
+ if fatal and not ifaces:
+ raise Exception("Invalid interface '%s'" % iface)
+
+ ifaces.sort()
+ else:
+ if iface not in interfaces:
+ if fatal:
+ raise Exception("Interface '%s' not found " % (iface))
+ else:
+ return []
+
+ else:
+ ifaces = [iface]
+
+ addresses = []
+ for netiface in ifaces:
+ net_info = netifaces.ifaddresses(netiface)
+ if inet_num in net_info:
+ for entry in net_info[inet_num]:
+ if 'addr' in entry and entry['addr'] not in exc_list:
+ addresses.append(entry['addr'])
+
+ if fatal and not addresses:
+ raise Exception("Interface '%s' doesn't have any %s addresses." %
+ (iface, inet_type))
+
+ return sorted(addresses)
+
+
+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
+
+
+def get_iface_from_addr(addr):
+ """Work out on which interface the provided address is configured."""
+ for iface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(iface)
+ for inet_type in addresses:
+ for _addr in addresses[inet_type]:
+ _addr = _addr['addr']
+ # link local
+ ll_key = re.compile("(.+)%.*")
+ raw = re.match(ll_key, _addr)
+ if raw:
+ _addr = raw.group(1)
+
+ if _addr == addr:
+ log("Address '%s' is configured on iface '%s'" %
+ (addr, iface))
+ return iface
+
+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
+ raise Exception(msg)
+
+
+def sniff_iface(f):
+ """Ensure decorated function is called with a value for iface.
+
+ If no iface provided, inject net iface inferred from unit private address.
+ """
+ def iface_sniffer(*args, **kwargs):
+ if not kwargs.get('iface', None):
+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
+
+ return f(*args, **kwargs)
+
+ return iface_sniffer
+
+
+@sniff_iface
+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
+ dynamic_only=True):
+ """Get assigned IPv6 address for a given interface.
+
+ Returns list of addresses found. If no address found, returns empty list.
+
+ If iface is None, we infer the current primary interface by doing a reverse
+ lookup on the unit private-address.
+
+ We currently only support scope global IPv6 addresses i.e. non-temporary
+ addresses. If no global IPv6 address is found, return the first one found
+ in the ipv6 address list.
+
+ :param iface: network interface on which ipv6 address(es) are expected to
+ be found.
+ :param inc_aliases: include alias interfaces in search
+ :param fatal: if True, raise exception if address not found
+ :param exc_list: list of addresses to ignore
+ :param dynamic_only: only recognise dynamic addresses
+ :return: list of ipv6 addresses
+ """
+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
+ inc_aliases=inc_aliases, fatal=fatal,
+ exc_list=exc_list)
+
+ if addresses:
+ global_addrs = []
+ for addr in addresses:
+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
+ m = re.match(key_scope_link_local, addr)
+ if m:
+ eui_64_mac = m.group(1)
+ iface = m.group(2)
+ else:
+ global_addrs.append(addr)
+
+ if global_addrs:
+ # Make sure any found global addresses are not temporary
+ cmd = ['ip', 'addr', 'show', iface]
+ out = subprocess.check_output(cmd).decode('UTF-8')
+ if dynamic_only:
+ key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
+ else:
+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
+
+ addrs = []
+ for line in out.split('\n'):
+ line = line.strip()
+ m = re.match(key, line)
+ if m and 'temporary' not in line:
+ # Return the first valid address we find
+ for addr in global_addrs:
+ if m.group(1) == addr:
+ if not dynamic_only or \
+ m.group(1).endswith(eui_64_mac):
+ addrs.append(addr)
+
+ if addrs:
+ return addrs
+
+ if fatal:
+ raise Exception("Interface '%s' does not have a scope global "
+ "non-temporary ipv6 address." % iface)
+
+ return []
+
+
+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
+ """Return a list of bridges on the system."""
+ b_regex = "%s/*/bridge" % vnic_dir
+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
+
+
+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
+ """Return a list of nics comprising a given bridge on the system."""
+ brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
+ return [x.split('/')[-1] for x in glob.glob(brif_regex)]
+
+
+def is_bridge_member(nic):
+ """Check if a given nic is a member of a bridge."""
+ for bridge in get_bridges():
+ if nic in get_bridge_nics(bridge):
+ return True
+
+ return False
+
+
+def is_ip(address):
+ """
+ Returns True if address is a valid IP address.
+ """
+ try:
+ # Test to see if already an IPv4/IPv6 address
+ address = netaddr.IPAddress(address)
+ return True
+ except (netaddr.AddrFormatError, ValueError):
+ return False
+
+
+def ns_query(address):
+ try:
+ import dns.resolver
+ except ImportError:
+ if six.PY2:
+ apt_install('python-dnspython', fatal=True)
+ else:
+ apt_install('python3-dnspython', fatal=True)
+ import dns.resolver
+
+ if isinstance(address, dns.name.Name):
+ rtype = 'PTR'
+ elif isinstance(address, six.string_types):
+ rtype = 'A'
+ else:
+ return None
+
+ try:
+ answers = dns.resolver.query(address, rtype)
+ except dns.resolver.NXDOMAIN:
+ return None
+
+ if answers:
+ return str(answers[0])
+ return None
+
+
+def get_host_ip(hostname, fallback=None):
+ """
+ Resolves the IP for a given hostname, or returns
+ the input if it is already an IP.
+ """
+ if is_ip(hostname):
+ return hostname
+
+ ip_addr = ns_query(hostname)
+ if not ip_addr:
+ try:
+ ip_addr = socket.gethostbyname(hostname)
+ except:
+ log("Failed to resolve hostname '%s'" % (hostname),
+ level=WARNING)
+ return fallback
+ return ip_addr
+
+
+def get_hostname(address, fqdn=True):
+ """
+ Resolves hostname for given IP, or returns the input
+ if it is already a hostname.
+ """
+ if is_ip(address):
+ try:
+ import dns.reversename
+ except ImportError:
+ if six.PY2:
+ apt_install("python-dnspython", fatal=True)
+ else:
+ apt_install("python3-dnspython", fatal=True)
+ import dns.reversename
+
+ rev = dns.reversename.from_address(address)
+ result = ns_query(rev)
+
+ if not result:
+ try:
+ result = socket.gethostbyaddr(address)[0]
+ except:
+ return None
+ else:
+ result = address
+
+ if fqdn:
+ # strip trailing .
+ if result.endswith('.'):
+ return result[:-1]
+ else:
+ return result
+ else:
+ return result.split('.')[0]
+
+
+def port_has_listener(address, port):
+ """
+ Returns True if the address:port is open and being listened to,
+ else False.
+
+ @param address: an IP address or hostname
+ @param port: integer port
+
+ Note calls 'zc' via a subprocess shell
+ """
+ cmd = ['nc', '-z', address, str(port)]
+ result = subprocess.call(cmd)
+ return not(bool(result))
+
+
+def assert_charm_supports_ipv6():
+ """Check whether we are able to support charms ipv6."""
+ release = lsb_release()['DISTRIB_CODENAME'].lower()
+ if CompareHostReleases(release) < "trusty":
+ raise Exception("IPv6 is not supported in the charms for Ubuntu "
+ "versions less than Trusty 14.04")
+
+
+def get_relation_ip(interface, config_override=None):
+ """Return this unit's IP for the given relation.
+
+ Allow for an arbitrary interface to use with network-get to select an IP.
+ Handle all address selection options including configuration parameter
+ override and IPv6.
+
+ Usage: get_relation_ip('amqp', config_override='access-network')
+
+ @param interface: string name of the relation.
+ @param config_override: string name of the config option for network
+ override. Supports legacy network override configuration parameters.
+ @raises Exception if prefer-ipv6 is configured but IPv6 unsupported.
+ @returns IPv6 or IPv4 address
+ """
+
+ fallback = get_host_ip(unit_get('private-address'))
+ if config('prefer-ipv6'):
+ assert_charm_supports_ipv6()
+ return get_ipv6_addr()[0]
+ elif config_override and config(config_override):
+ return get_address_in_network(config(config_override),
+ fallback)
+ else:
+ try:
+ return network_get_primary_address(interface)
+ except NotImplementedError:
+ return fallback
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
index 1f4cf42e..346e6fea 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -40,6 +40,7 @@ from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
from charmhelpers.core.decorators import retry_on_exception
+from charmhelpers.core.host import CompareHostReleases
DEBUG = logging.DEBUG
ERROR = logging.ERROR
@@ -1255,7 +1256,7 @@ class OpenStackAmuletUtils(AmuletUtils):
contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf',
fatal=True)
ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs')
- if ubuntu_release <= 'trusty':
+ if CompareHostReleases(ubuntu_release) <= 'trusty':
memcache_listen_addr = 'ip6-localhost'
else:
memcache_listen_addr = '::1'
diff --git a/tests/charmhelpers/contrib/openstack/exceptions.py b/tests/charmhelpers/contrib/openstack/exceptions.py
new file mode 100644
index 00000000..f85ae4f4
--- /dev/null
+++ b/tests/charmhelpers/contrib/openstack/exceptions.py
@@ -0,0 +1,21 @@
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+class OSContextError(Exception):
+ """Raised when an error occurs during context generation.
+
+ This exception is principally used in contrib.openstack.context
+ """
+ pass
diff --git a/tests/charmhelpers/contrib/openstack/utils.py b/tests/charmhelpers/contrib/openstack/utils.py
new file mode 100644
index 00000000..e13450c1
--- /dev/null
+++ b/tests/charmhelpers/contrib/openstack/utils.py
@@ -0,0 +1,2015 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Common python helper functions used for OpenStack charms.
+from collections import OrderedDict
+from functools import wraps
+
+import subprocess
+import json
+import os
+import sys
+import re
+import itertools
+import functools
+import shutil
+
+import six
+import tempfile
+import traceback
+import uuid
+import yaml
+
+from charmhelpers.contrib.network import ip
+
+from charmhelpers.core import unitdata
+
+from charmhelpers.core.hookenv import (
+ action_fail,
+ action_set,
+ config,
+ log as juju_log,
+ charm_dir,
+ DEBUG,
+ INFO,
+ ERROR,
+ related_units,
+ relation_ids,
+ relation_set,
+ service_name,
+ status_set,
+ hook_name,
+ application_version_set,
+)
+
+from charmhelpers.core.strutils import BasicStringComparator
+
+from charmhelpers.contrib.storage.linux.lvm import (
+ deactivate_lvm_volume_group,
+ is_lvm_physical_volume,
+ remove_lvm_physical_volume,
+)
+
+from charmhelpers.contrib.network.ip import (
+ get_ipv6_addr,
+ is_ipv6,
+ port_has_listener,
+)
+
+from charmhelpers.contrib.python.packages import (
+ pip_create_virtualenv,
+ pip_install,
+)
+
+from charmhelpers.core.host import (
+ lsb_release,
+ mounts,
+ umount,
+ service_running,
+ service_pause,
+ service_resume,
+ restart_on_change_helper,
+)
+from charmhelpers.fetch import (
+ apt_install,
+ apt_cache,
+ install_remote,
+ get_upstream_version
+)
+from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
+from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
+from charmhelpers.contrib.openstack.exceptions import OSContextError
+
+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
+
+DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
+ 'restricted main multiverse universe')
+
+OPENSTACK_RELEASES = (
+ 'diablo',
+ 'essex',
+ 'folsom',
+ 'grizzly',
+ 'havana',
+ 'icehouse',
+ 'juno',
+ 'kilo',
+ 'liberty',
+ 'mitaka',
+ 'newton',
+ 'ocata',
+ 'pike',
+)
+
+UBUNTU_OPENSTACK_RELEASE = OrderedDict([
+ ('oneiric', 'diablo'),
+ ('precise', 'essex'),
+ ('quantal', 'folsom'),
+ ('raring', 'grizzly'),
+ ('saucy', 'havana'),
+ ('trusty', 'icehouse'),
+ ('utopic', 'juno'),
+ ('vivid', 'kilo'),
+ ('wily', 'liberty'),
+ ('xenial', 'mitaka'),
+ ('yakkety', 'newton'),
+ ('zesty', 'ocata'),
+])
+
+
+OPENSTACK_CODENAMES = OrderedDict([
+ ('2011.2', 'diablo'),
+ ('2012.1', 'essex'),
+ ('2012.2', 'folsom'),
+ ('2013.1', 'grizzly'),
+ ('2013.2', 'havana'),
+ ('2014.1', 'icehouse'),
+ ('2014.2', 'juno'),
+ ('2015.1', 'kilo'),
+ ('2015.2', 'liberty'),
+ ('2016.1', 'mitaka'),
+ ('2016.2', 'newton'),
+ ('2017.1', 'ocata'),
+])
+
+# The ugly duckling - must list releases oldest to newest
+SWIFT_CODENAMES = OrderedDict([
+ ('diablo',
+ ['1.4.3']),
+ ('essex',
+ ['1.4.8']),
+ ('folsom',
+ ['1.7.4']),
+ ('grizzly',
+ ['1.7.6', '1.7.7', '1.8.0']),
+ ('havana',
+ ['1.9.0', '1.9.1', '1.10.0']),
+ ('icehouse',
+ ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
+ ('juno',
+ ['2.0.0', '2.1.0', '2.2.0']),
+ ('kilo',
+ ['2.2.1', '2.2.2']),
+ ('liberty',
+ ['2.3.0', '2.4.0', '2.5.0']),
+ ('mitaka',
+ ['2.5.0', '2.6.0', '2.7.0']),
+ ('newton',
+ ['2.8.0', '2.9.0', '2.10.0']),
+ ('ocata',
+ ['2.11.0', '2.12.0', '2.13.0']),
+])
+
+# >= Liberty version->codename mapping
+PACKAGE_CODENAMES = {
+ 'nova-common': OrderedDict([
+ ('12', 'liberty'),
+ ('13', 'mitaka'),
+ ('14', 'newton'),
+ ('15', 'ocata'),
+ ]),
+ 'neutron-common': OrderedDict([
+ ('7', 'liberty'),
+ ('8', 'mitaka'),
+ ('9', 'newton'),
+ ('10', 'ocata'),
+ ]),
+ 'cinder-common': OrderedDict([
+ ('7', 'liberty'),
+ ('8', 'mitaka'),
+ ('9', 'newton'),
+ ('10', 'ocata'),
+ ]),
+ 'keystone': OrderedDict([
+ ('8', 'liberty'),
+ ('9', 'mitaka'),
+ ('10', 'newton'),
+ ('11', 'ocata'),
+ ]),
+ 'horizon-common': OrderedDict([
+ ('8', 'liberty'),
+ ('9', 'mitaka'),
+ ('10', 'newton'),
+ ('11', 'ocata'),
+ ]),
+ 'ceilometer-common': OrderedDict([
+ ('5', 'liberty'),
+ ('6', 'mitaka'),
+ ('7', 'newton'),
+ ('8', 'ocata'),
+ ]),
+ 'heat-common': OrderedDict([
+ ('5', 'liberty'),
+ ('6', 'mitaka'),
+ ('7', 'newton'),
+ ('8', 'ocata'),
+ ]),
+ 'glance-common': OrderedDict([
+ ('11', 'liberty'),
+ ('12', 'mitaka'),
+ ('13', 'newton'),
+ ('14', 'ocata'),
+ ]),
+ 'openstack-dashboard': OrderedDict([
+ ('8', 'liberty'),
+ ('9', 'mitaka'),
+ ('10', 'newton'),
+ ('11', 'ocata'),
+ ]),
+}
+
+GIT_DEFAULT_REPOS = {
+ 'requirements': 'git://github.com/openstack/requirements',
+ 'cinder': 'git://github.com/openstack/cinder',
+ 'glance': 'git://github.com/openstack/glance',
+ 'horizon': 'git://github.com/openstack/horizon',
+ 'keystone': 'git://github.com/openstack/keystone',
+ 'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
+ 'neutron': 'git://github.com/openstack/neutron',
+ 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
+ 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
+ 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
+ 'nova': 'git://github.com/openstack/nova',
+}
+
+GIT_DEFAULT_BRANCHES = {
+ 'liberty': 'stable/liberty',
+ 'mitaka': 'stable/mitaka',
+ 'newton': 'stable/newton',
+ 'master': 'master',
+}
+
+DEFAULT_LOOPBACK_SIZE = '5G'
+
+
+class CompareOpenStackReleases(BasicStringComparator):
+ """Provide comparisons of OpenStack releases.
+
+ Use in the form of
+
+ if CompareOpenStackReleases(release) > 'mitaka':
+ # do something with mitaka
+ """
+ _list = OPENSTACK_RELEASES
+
+
+def error_out(msg):
+ juju_log("FATAL ERROR: %s" % msg, level='ERROR')
+ sys.exit(1)
+
+
+def get_os_codename_install_source(src):
+ '''Derive OpenStack release codename from a given installation source.'''
+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ rel = ''
+ if src is None:
+ return rel
+ if src in ['distro', 'distro-proposed']:
+ try:
+ rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
+ except KeyError:
+ e = 'Could not derive openstack release for '\
+ 'this Ubuntu release: %s' % ubuntu_rel
+ error_out(e)
+ return rel
+
+ if src.startswith('cloud:'):
+ ca_rel = src.split(':')[1]
+ ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
+ return ca_rel
+
+ # Best guess match based on deb string provided
+ if src.startswith('deb') or src.startswith('ppa'):
+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
+ if v in src:
+ return v
+
+
+def get_os_version_install_source(src):
+ codename = get_os_codename_install_source(src)
+ return get_os_version_codename(codename)
+
+
+def get_os_codename_version(vers):
+ '''Determine OpenStack codename from version number.'''
+ try:
+ return OPENSTACK_CODENAMES[vers]
+ except KeyError:
+ e = 'Could not determine OpenStack codename for version %s' % vers
+ error_out(e)
+
+
+def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
+ '''Determine OpenStack version number from codename.'''
+ for k, v in six.iteritems(version_map):
+ if v == codename:
+ return k
+ e = 'Could not derive OpenStack version for '\
+ 'codename: %s' % codename
+ error_out(e)
+
+
+def get_os_version_codename_swift(codename):
+ '''Determine OpenStack version number of swift from codename.'''
+ for k, v in six.iteritems(SWIFT_CODENAMES):
+ if k == codename:
+ return v[-1]
+ e = 'Could not derive swift version for '\
+ 'codename: %s' % codename
+ error_out(e)
+
+
+def get_swift_codename(version):
+ '''Determine OpenStack codename that corresponds to swift version.'''
+ codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
+
+ if len(codenames) > 1:
+ # If more than one release codename contains this version we determine
+ # the actual codename based on the highest available install source.
+ for codename in reversed(codenames):
+ releases = UBUNTU_OPENSTACK_RELEASE
+ release = [k for k, v in six.iteritems(releases) if codename in v]
+ ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
+ if codename in ret or release[0] in ret:
+ return codename
+ elif len(codenames) == 1:
+ return codenames[0]
+
+ # NOTE: fallback - attempt to match with just major.minor version
+ match = re.match('^(\d+)\.(\d+)', version)
+ if match:
+ major_minor_version = match.group(0)
+ for codename, versions in six.iteritems(SWIFT_CODENAMES):
+ for release_version in versions:
+ if release_version.startswith(major_minor_version):
+ return codename
+
+ return None
+
+
+def get_os_codename_package(package, fatal=True):
+ '''Derive OpenStack release codename from an installed package.'''
+ import apt_pkg as apt
+
+ cache = apt_cache()
+
+ try:
+ pkg = cache[package]
+ except:
+ if not fatal:
+ return None
+ # the package is unknown to the current apt cache.
+ e = 'Could not determine version of package with no installation '\
+ 'candidate: %s' % package
+ error_out(e)
+
+ if not pkg.current_ver:
+ if not fatal:
+ return None
+ # package is known, but no version is currently installed.
+ e = 'Could not determine version of uninstalled package: %s' % package
+ error_out(e)
+
+ vers = apt.upstream_version(pkg.current_ver.ver_str)
+ if 'swift' in pkg.name:
+ # Fully x.y.z match for swift versions
+ match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
+ else:
+ # x.y match only for 20XX.X
+ # and ignore patch level for other packages
+ match = re.match('^(\d+)\.(\d+)', vers)
+
+ if match:
+ vers = match.group(0)
+
+ # Generate a major version number for newer semantic
+ # versions of openstack projects
+ major_vers = vers.split('.')[0]
+ # >= Liberty independent project versions
+ if (package in PACKAGE_CODENAMES and
+ major_vers in PACKAGE_CODENAMES[package]):
+ return PACKAGE_CODENAMES[package][major_vers]
+ else:
+ # < Liberty co-ordinated project versions
+ try:
+ if 'swift' in pkg.name:
+ return get_swift_codename(vers)
+ else:
+ return OPENSTACK_CODENAMES[vers]
+ except KeyError:
+ if not fatal:
+ return None
+ e = 'Could not determine OpenStack codename for version %s' % vers
+ error_out(e)
+
+
+def get_os_version_package(pkg, fatal=True):
+ '''Derive OpenStack version number from an installed package.'''
+ codename = get_os_codename_package(pkg, fatal=fatal)
+
+ if not codename:
+ return None
+
+ if 'swift' in pkg:
+ vers_map = SWIFT_CODENAMES
+ for cname, version in six.iteritems(vers_map):
+ if cname == codename:
+ return version[-1]
+ else:
+ vers_map = OPENSTACK_CODENAMES
+ for version, cname in six.iteritems(vers_map):
+ if cname == codename:
+ return version
+ # e = "Could not determine OpenStack version for package: %s" % pkg
+ # error_out(e)
+
+
+os_rel = None
+
+
+def reset_os_release():
+ '''Unset the cached os_release version'''
+ global os_rel
+ os_rel = None
+
+
+def os_release(package, base='essex', reset_cache=False):
+ '''
+ Returns OpenStack release codename from a cached global.
+
+ If reset_cache then unset the cached os_release version and return the
+ freshly determined version.
+
+ If the codename can not be determined from either an installed package or
+ the installation source, the earliest release supported by the charm should
+ be returned.
+ '''
+ global os_rel
+ if reset_cache:
+ reset_os_release()
+ if os_rel:
+ return os_rel
+ os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or
+ get_os_codename_package(package, fatal=False) or
+ get_os_codename_install_source(config('openstack-origin')) or
+ base)
+ return os_rel
+
+
+def import_key(keyid):
+ key = keyid.strip()
+ if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
+ key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
+ juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
+ juju_log("Importing ASCII Armor PGP key", level=DEBUG)
+ with tempfile.NamedTemporaryFile() as keyfile:
+ with open(keyfile.name, 'w') as fd:
+ fd.write(key)
+ fd.write("\n")
+
+ cmd = ['apt-key', 'add', keyfile.name]
+ try:
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError:
+ error_out("Error importing PGP key '%s'" % key)
+ else:
+ juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
+ juju_log("Importing PGP key from keyserver", level=DEBUG)
+ cmd = ['apt-key', 'adv', '--keyserver',
+ 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
+ try:
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError:
+ error_out("Error importing PGP key '%s'" % key)
+
+
+def get_source_and_pgp_key(input):
+ """Look for a pgp key ID or ascii-armor key in the given input."""
+ index = input.strip()
+ index = input.rfind('|')
+ if index < 0:
+ return input, None
+
+ key = input[index + 1:].strip('|')
+ source = input[:index]
+ return source, key
+
+
+def configure_installation_source(rel):
+ '''Configure apt installation source.'''
+ if rel == 'distro':
+ return
+ elif rel == 'distro-proposed':
+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
+ f.write(DISTRO_PROPOSED % ubuntu_rel)
+ elif rel[:4] == "ppa:":
+ src, key = get_source_and_pgp_key(rel)
+ if key:
+ import_key(key)
+
+ subprocess.check_call(["add-apt-repository", "-y", src])
+ elif rel[:3] == "deb":
+ src, key = get_source_and_pgp_key(rel)
+ if key:
+ import_key(key)
+
+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
+ f.write(src)
+ elif rel[:6] == 'cloud:':
+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ rel = rel.split(':')[1]
+ u_rel = rel.split('-')[0]
+ ca_rel = rel.split('-')[1]
+
+ if u_rel != ubuntu_rel:
+ e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
+ 'version (%s)' % (ca_rel, ubuntu_rel)
+ error_out(e)
+
+ if 'staging' in ca_rel:
+ # staging is just a regular PPA.
+ os_rel = ca_rel.split('/')[0]
+ ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
+ cmd = 'add-apt-repository -y %s' % ppa
+ subprocess.check_call(cmd.split(' '))
+ return
+
+ # map charm config options to actual archive pockets.
+ pockets = {
+ 'folsom': 'precise-updates/folsom',
+ 'folsom/updates': 'precise-updates/folsom',
+ 'folsom/proposed': 'precise-proposed/folsom',
+ 'grizzly': 'precise-updates/grizzly',
+ 'grizzly/updates': 'precise-updates/grizzly',
+ 'grizzly/proposed': 'precise-proposed/grizzly',
+ 'havana': 'precise-updates/havana',
+ 'havana/updates': 'precise-updates/havana',
+ 'havana/proposed': 'precise-proposed/havana',
+ 'icehouse': 'precise-updates/icehouse',
+ 'icehouse/updates': 'precise-updates/icehouse',
+ 'icehouse/proposed': 'precise-proposed/icehouse',
+ 'juno': 'trusty-updates/juno',
+ 'juno/updates': 'trusty-updates/juno',
+ 'juno/proposed': 'trusty-proposed/juno',
+ 'kilo': 'trusty-updates/kilo',
+ 'kilo/updates': 'trusty-updates/kilo',
+ 'kilo/proposed': 'trusty-proposed/kilo',
+ 'liberty': 'trusty-updates/liberty',
+ 'liberty/updates': 'trusty-updates/liberty',
+ 'liberty/proposed': 'trusty-proposed/liberty',
+ 'mitaka': 'trusty-updates/mitaka',
+ 'mitaka/updates': 'trusty-updates/mitaka',
+ 'mitaka/proposed': 'trusty-proposed/mitaka',
+ 'newton': 'xenial-updates/newton',
+ 'newton/updates': 'xenial-updates/newton',
+ 'newton/proposed': 'xenial-proposed/newton',
+ 'ocata': 'xenial-updates/ocata',
+ 'ocata/updates': 'xenial-updates/ocata',
+ 'ocata/proposed': 'xenial-proposed/ocata',
+ }
+
+ try:
+ pocket = pockets[ca_rel]
+ except KeyError:
+ e = 'Invalid Cloud Archive release specified: %s' % rel
+ error_out(e)
+
+ src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
+ apt_install('ubuntu-cloud-keyring', fatal=True)
+
+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
+ f.write(src)
+ else:
+ error_out("Invalid openstack-release specified: %s" % rel)
+
+
+def config_value_changed(option):
+ """
+ Determine if config value changed since last call to this function.
+ """
+ hook_data = unitdata.HookData()
+ with hook_data():
+ db = unitdata.kv()
+ current = config(option)
+ saved = db.get(option)
+ db.set(option, current)
+ if saved is None:
+ return False
+ return current != saved
+
+
+def save_script_rc(script_path="scripts/scriptrc", **env_vars):
+ """
+ Write an rc file in the charm-delivered directory containing
+ exported environment variables provided by env_vars. Any charm scripts run
+ outside the juju hook environment can source this scriptrc to obtain
+ updated config information necessary to perform health checks or
+ service changes.
+ """
+ juju_rc_path = "%s/%s" % (charm_dir(), script_path)
+ if not os.path.exists(os.path.dirname(juju_rc_path)):
+ os.mkdir(os.path.dirname(juju_rc_path))
+ with open(juju_rc_path, 'wb') as rc_script:
+ rc_script.write(
+ "#!/bin/bash\n")
+ [rc_script.write('export %s=%s\n' % (u, p))
+ for u, p in six.iteritems(env_vars) if u != "script_path"]
+
+
+def openstack_upgrade_available(package):
+ """
+ Determines if an OpenStack upgrade is available from installation
+ source, based on version of installed package.
+
+ :param package: str: Name of installed package.
+
+ :returns: bool: : Returns True if configured installation source offers
+ a newer version of package.
+
+ """
+
+ import apt_pkg as apt
+ src = config('openstack-origin')
+ cur_vers = get_os_version_package(package)
+ if "swift" in package:
+ codename = get_os_codename_install_source(src)
+ avail_vers = get_os_version_codename_swift(codename)
+ else:
+ avail_vers = get_os_version_install_source(src)
+ apt.init()
+ if "swift" in package:
+ major_cur_vers = cur_vers.split('.', 1)[0]
+ major_avail_vers = avail_vers.split('.', 1)[0]
+ major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
+ return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
+ return apt.version_compare(avail_vers, cur_vers) == 1
+
+
+def ensure_block_device(block_device):
+ '''
+ Confirm block_device, create as loopback if necessary.
+
+ :param block_device: str: Full path of block device to ensure.
+
+ :returns: str: Full path of ensured block device.
+ '''
+ _none = ['None', 'none', None]
+ if (block_device in _none):
+ error_out('prepare_storage(): Missing required input: block_device=%s.'
+ % block_device)
+
+ if block_device.startswith('/dev/'):
+ bdev = block_device
+ elif block_device.startswith('/'):
+ _bd = block_device.split('|')
+ if len(_bd) == 2:
+ bdev, size = _bd
+ else:
+ bdev = block_device
+ size = DEFAULT_LOOPBACK_SIZE
+ bdev = ensure_loopback_device(bdev, size)
+ else:
+ bdev = '/dev/%s' % block_device
+
+ if not is_block_device(bdev):
+ error_out('Failed to locate valid block device at %s' % bdev)
+
+ return bdev
+
+
+def clean_storage(block_device):
+ '''
+ Ensures a block device is clean. That is:
+ - unmounted
+ - any lvm volume groups are deactivated
+ - any lvm physical device signatures removed
+ - partition table wiped
+
+ :param block_device: str: Full path to block device to clean.
+ '''
+ for mp, d in mounts():
+ if d == block_device:
+ juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
+ (d, mp), level=INFO)
+ umount(mp, persist=True)
+
+ if is_lvm_physical_volume(block_device):
+ deactivate_lvm_volume_group(block_device)
+ remove_lvm_physical_volume(block_device)
+ else:
+ zap_disk(block_device)
+
+
+is_ip = ip.is_ip
+ns_query = ip.ns_query
+get_host_ip = ip.get_host_ip
+get_hostname = ip.get_hostname
+
+
+def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
+ mm_map = {}
+ if os.path.isfile(mm_file):
+ with open(mm_file, 'r') as f:
+ mm_map = json.load(f)
+ return mm_map
+
+
+def sync_db_with_multi_ipv6_addresses(database, database_user,
+ relation_prefix=None):
+ hosts = get_ipv6_addr(dynamic_only=False)
+
+ if config('vip'):
+ vips = config('vip').split()
+ for vip in vips:
+ if vip and is_ipv6(vip):
+ hosts.append(vip)
+
+ kwargs = {'database': database,
+ 'username': database_user,
+ 'hostname': json.dumps(hosts)}
+
+ if relation_prefix:
+ for key in list(kwargs.keys()):
+ kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
+ del kwargs[key]
+
+ for rid in relation_ids('shared-db'):
+ relation_set(relation_id=rid, **kwargs)
+
+
+def os_requires_version(ostack_release, pkg):
+ """
+ Decorator for hook to specify minimum supported release
+ """
+ def wrap(f):
+ @wraps(f)
+ def wrapped_f(*args):
+ if os_release(pkg) < ostack_release:
+ raise Exception("This hook is not supported on releases"
+ " before %s" % ostack_release)
+ f(*args)
+ return wrapped_f
+ return wrap
+
+
+def git_install_requested():
+ """
+ Returns true if openstack-origin-git is specified.
+ """
+ return config('openstack-origin-git') is not None
+
+
+def git_os_codename_install_source(projects_yaml):
+ """
+ Returns OpenStack codename of release being installed from source.
+ """
+ if git_install_requested():
+ projects = _git_yaml_load(projects_yaml)
+
+ if projects in GIT_DEFAULT_BRANCHES.keys():
+ if projects == 'master':
+ return 'ocata'
+ return projects
+
+ if 'release' in projects:
+ if projects['release'] == 'master':
+ return 'ocata'
+ return projects['release']
+
+ return None
+
+
+def git_default_repos(projects_yaml):
+ """
+ Returns default repos if a default openstack-origin-git value is specified.
+ """
+ service = service_name()
+ core_project = service
+
+ for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
+ if projects_yaml == default:
+
+ # add the requirements repo first
+ repo = {
+ 'name': 'requirements',
+ 'repository': GIT_DEFAULT_REPOS['requirements'],
+ 'branch': branch,
+ }
+ repos = [repo]
+
+ # neutron-* and nova-* charms require some additional repos
+ if service in ['neutron-api', 'neutron-gateway',
+ 'neutron-openvswitch']:
+ core_project = 'neutron'
+ if service == 'neutron-api':
+ repo = {
+ 'name': 'networking-hyperv',
+ 'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
+ 'branch': branch,
+ }
+ repos.append(repo)
+ for project in ['neutron-fwaas', 'neutron-lbaas',
+ 'neutron-vpnaas', 'nova']:
+ repo = {
+ 'name': project,
+ 'repository': GIT_DEFAULT_REPOS[project],
+ 'branch': branch,
+ }
+ repos.append(repo)
+
+ elif service in ['nova-cloud-controller', 'nova-compute']:
+ core_project = 'nova'
+ repo = {
+ 'name': 'neutron',
+ 'repository': GIT_DEFAULT_REPOS['neutron'],
+ 'branch': branch,
+ }
+ repos.append(repo)
+ elif service == 'openstack-dashboard':
+ core_project = 'horizon'
+
+ # finally add the current service's core project repo
+ repo = {
+ 'name': core_project,
+ 'repository': GIT_DEFAULT_REPOS[core_project],
+ 'branch': branch,
+ }
+ repos.append(repo)
+
+ return yaml.dump(dict(repositories=repos, release=default))
+
+ return projects_yaml
+
+
+def _git_yaml_load(projects_yaml):
+ """
+ Load the specified yaml into a dictionary.
+ """
+ if not projects_yaml:
+ return None
+
+ return yaml.load(projects_yaml)
+
+
+requirements_dir = None
+
+
+def git_clone_and_install(projects_yaml, core_project):
+ """
+ Clone/install all specified OpenStack repositories.
+
+ The expected format of projects_yaml is:
+
+ repositories:
+ - {name: keystone,
+ repository: 'git://git.openstack.org/openstack/keystone.git',
+ branch: 'stable/icehouse'}
+ - {name: requirements,
+ repository: 'git://git.openstack.org/openstack/requirements.git',
+ branch: 'stable/icehouse'}
+
+ directory: /mnt/openstack-git
+ http_proxy: squid-proxy-url
+ https_proxy: squid-proxy-url
+
+ The directory, http_proxy, and https_proxy keys are optional.
+
+ """
+ global requirements_dir
+ parent_dir = '/mnt/openstack-git'
+ http_proxy = None
+
+ projects = _git_yaml_load(projects_yaml)
+ _git_validate_projects_yaml(projects, core_project)
+
+ old_environ = dict(os.environ)
+
+ if 'http_proxy' in projects.keys():
+ http_proxy = projects['http_proxy']
+ os.environ['http_proxy'] = projects['http_proxy']
+ if 'https_proxy' in projects.keys():
+ os.environ['https_proxy'] = projects['https_proxy']
+
+ if 'directory' in projects.keys():
+ parent_dir = projects['directory']
+
+ pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
+
+ # Upgrade setuptools and pip from default virtualenv versions. The default
+ # versions in trusty break master OpenStack branch deployments.
+ for p in ['pip', 'setuptools']:
+ pip_install(p, upgrade=True, proxy=http_proxy,
+ venv=os.path.join(parent_dir, 'venv'))
+
+ constraints = None
+ for p in projects['repositories']:
+ repo = p['repository']
+ branch = p['branch']
+ depth = '1'
+ if 'depth' in p.keys():
+ depth = p['depth']
+ if p['name'] == 'requirements':
+ repo_dir = _git_clone_and_install_single(repo, branch, depth,
+ parent_dir, http_proxy,
+ update_requirements=False)
+ requirements_dir = repo_dir
+ constraints = os.path.join(repo_dir, "upper-constraints.txt")
+ # upper-constraints didn't exist until after icehouse
+ if not os.path.isfile(constraints):
+ constraints = None
+ # use constraints unless project yaml sets use_constraints to false
+ if 'use_constraints' in projects.keys():
+ if not projects['use_constraints']:
+ constraints = None
+ else:
+ repo_dir = _git_clone_and_install_single(repo, branch, depth,
+ parent_dir, http_proxy,
+ update_requirements=True,
+ constraints=constraints)
+
+ os.environ = old_environ
+
+
+def _git_validate_projects_yaml(projects, core_project):
+ """
+ Validate the projects yaml.
+ """
+ _git_ensure_key_exists('repositories', projects)
+
+ for project in projects['repositories']:
+ _git_ensure_key_exists('name', project.keys())
+ _git_ensure_key_exists('repository', project.keys())
+ _git_ensure_key_exists('branch', project.keys())
+
+ if projects['repositories'][0]['name'] != 'requirements':
+ error_out('{} git repo must be specified first'.format('requirements'))
+
+ if projects['repositories'][-1]['name'] != core_project:
+ error_out('{} git repo must be specified last'.format(core_project))
+
+ _git_ensure_key_exists('release', projects)
+
+
+def _git_ensure_key_exists(key, keys):
+ """
+ Ensure that key exists in keys.
+ """
+ if key not in keys:
+ error_out('openstack-origin-git key \'{}\' is missing'.format(key))
+
+
+def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
+ update_requirements, constraints=None):
+ """
+ Clone and install a single git repository.
+ """
+ if not os.path.exists(parent_dir):
+ juju_log('Directory already exists at {}. '
+ 'No need to create directory.'.format(parent_dir))
+ os.mkdir(parent_dir)
+
+ juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
+ repo_dir = install_remote(
+ repo, dest=parent_dir, branch=branch, depth=depth)
+
+ venv = os.path.join(parent_dir, 'venv')
+
+ if update_requirements:
+ if not requirements_dir:
+ error_out('requirements repo must be cloned before '
+ 'updating from global requirements.')
+ _git_update_requirements(venv, repo_dir, requirements_dir)
+
+ juju_log('Installing git repo from dir: {}'.format(repo_dir))
+ if http_proxy:
+ pip_install(repo_dir, proxy=http_proxy, venv=venv,
+ constraints=constraints)
+ else:
+ pip_install(repo_dir, venv=venv, constraints=constraints)
+
+ return repo_dir
+
+
+def _git_update_requirements(venv, package_dir, reqs_dir):
+ """
+ Update from global requirements.
+
+ Update an OpenStack git directory's requirements.txt and
+ test-requirements.txt from global-requirements.txt.
+ """
+ orig_dir = os.getcwd()
+ os.chdir(reqs_dir)
+ python = os.path.join(venv, 'bin/python')
+ cmd = [python, 'update.py', package_dir]
+ try:
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError:
+ package = os.path.basename(package_dir)
+ error_out("Error updating {} from "
+ "global-requirements.txt".format(package))
+ os.chdir(orig_dir)
+
+
+def git_pip_venv_dir(projects_yaml):
+ """
+ Return the pip virtualenv path.
+ """
+ parent_dir = '/mnt/openstack-git'
+
+ projects = _git_yaml_load(projects_yaml)
+
+ if 'directory' in projects.keys():
+ parent_dir = projects['directory']
+
+ return os.path.join(parent_dir, 'venv')
+
+
+def git_src_dir(projects_yaml, project):
+ """
+ Return the directory where the specified project's source is located.
+ """
+ parent_dir = '/mnt/openstack-git'
+
+ projects = _git_yaml_load(projects_yaml)
+
+ if 'directory' in projects.keys():
+ parent_dir = projects['directory']
+
+ for p in projects['repositories']:
+ if p['name'] == project:
+ return os.path.join(parent_dir, os.path.basename(p['repository']))
+
+ return None
+
+
+def git_yaml_value(projects_yaml, key):
+ """
+ Return the value in projects_yaml for the specified key.
+ """
+ projects = _git_yaml_load(projects_yaml)
+
+ if key in projects.keys():
+ return projects[key]
+
+ return None
+
+
+def git_generate_systemd_init_files(templates_dir):
+ """
+ Generate systemd init files.
+
+ Generates and installs systemd init units and script files based on the
+ *.init.in files contained in the templates_dir directory.
+
+ This code is based on the openstack-pkg-tools package and its init
+ script generation, which is used by the OpenStack packages.
+ """
+ for f in os.listdir(templates_dir):
+ # Create the init script and systemd unit file from the template
+ if f.endswith(".init.in"):
+ init_in_file = f
+ init_file = f[:-8]
+ service_file = "{}.service".format(init_file)
+
+ init_in_source = os.path.join(templates_dir, init_in_file)
+ init_source = os.path.join(templates_dir, init_file)
+ service_source = os.path.join(templates_dir, service_file)
+
+ init_dest = os.path.join('/etc/init.d', init_file)
+ service_dest = os.path.join('/lib/systemd/system', service_file)
+
+ shutil.copyfile(init_in_source, init_source)
+ with open(init_source, 'a') as outfile:
+ template = ('/usr/share/openstack-pkg-tools/'
+ 'init-script-template')
+ with open(template) as infile:
+ outfile.write('\n\n{}'.format(infile.read()))
+
+ cmd = ['pkgos-gen-systemd-unit', init_in_source]
+ subprocess.check_call(cmd)
+
+ if os.path.exists(init_dest):
+ os.remove(init_dest)
+ if os.path.exists(service_dest):
+ os.remove(service_dest)
+ shutil.copyfile(init_source, init_dest)
+ shutil.copyfile(service_source, service_dest)
+ os.chmod(init_dest, 0o755)
+
+ for f in os.listdir(templates_dir):
+ # If there's a service.in file, use it instead of the generated one
+ if f.endswith(".service.in"):
+ service_in_file = f
+ service_file = f[:-3]
+
+ service_in_source = os.path.join(templates_dir, service_in_file)
+ service_source = os.path.join(templates_dir, service_file)
+ service_dest = os.path.join('/lib/systemd/system', service_file)
+
+ shutil.copyfile(service_in_source, service_source)
+
+ if os.path.exists(service_dest):
+ os.remove(service_dest)
+ shutil.copyfile(service_source, service_dest)
+
+ for f in os.listdir(templates_dir):
+ # Generate the systemd unit if there's no existing .service.in
+ if f.endswith(".init.in"):
+ init_in_file = f
+ init_file = f[:-8]
+ service_in_file = "{}.service.in".format(init_file)
+ service_file = "{}.service".format(init_file)
+
+ init_in_source = os.path.join(templates_dir, init_in_file)
+ service_in_source = os.path.join(templates_dir, service_in_file)
+ service_source = os.path.join(templates_dir, service_file)
+ service_dest = os.path.join('/lib/systemd/system', service_file)
+
+ if not os.path.exists(service_in_source):
+ cmd = ['pkgos-gen-systemd-unit', init_in_source]
+ subprocess.check_call(cmd)
+
+ if os.path.exists(service_dest):
+ os.remove(service_dest)
+ shutil.copyfile(service_source, service_dest)
+
+
+def git_determine_usr_bin():
+ """Return the /usr/bin path for Apache2 config.
+
+ The /usr/bin path will be located in the virtualenv if the charm
+ is configured to deploy from source.
+ """
+ if git_install_requested():
+ projects_yaml = config('openstack-origin-git')
+ projects_yaml = git_default_repos(projects_yaml)
+ return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
+ else:
+ return '/usr/bin'
+
+
+def git_determine_python_path():
+ """Return the python-path for Apache2 config.
+
+ Returns 'None' unless the charm is configured to deploy from source,
+ in which case the path of the virtualenv's site-packages is returned.
+ """
+ if git_install_requested():
+ projects_yaml = config('openstack-origin-git')
+ projects_yaml = git_default_repos(projects_yaml)
+ return os.path.join(git_pip_venv_dir(projects_yaml),
+ 'lib/python2.7/site-packages')
+ else:
+ return None
+
+
+def os_workload_status(configs, required_interfaces, charm_func=None):
+ """
+ Decorator to set workload status based on complete contexts
+ """
+ def wrap(f):
+ @wraps(f)
+ def wrapped_f(*args, **kwargs):
+ # Run the original function first
+ f(*args, **kwargs)
+ # Set workload status now that contexts have been
+ # acted on
+ set_os_workload_status(configs, required_interfaces, charm_func)
+ return wrapped_f
+ return wrap
+
+
+def set_os_workload_status(configs, required_interfaces, charm_func=None,
+ services=None, ports=None):
+ """Set the state of the workload status for the charm.
+
+ This calls _determine_os_workload_status() to get the new state, message
+ and sets the status using status_set()
+
+ @param configs: a templating.OSConfigRenderer() object
+ @param required_interfaces: {generic: [specific, specific2, ...]}
+ @param charm_func: a callable function that returns state, message. The
+ signature is charm_func(configs) -> (state, message)
+ @param services: list of strings OR dictionary specifying services/ports
+ @param ports: OPTIONAL list of port numbers.
+ @returns state, message: the new workload status, user message
+ """
+ state, message = _determine_os_workload_status(
+ configs, required_interfaces, charm_func, services, ports)
+ status_set(state, message)
+
+
+def _determine_os_workload_status(
+ configs, required_interfaces, charm_func=None,
+ services=None, ports=None):
+ """Determine the state of the workload status for the charm.
+
+ This function returns the new workload status for the charm based
+ on the state of the interfaces, the paused state and whether the
+ services are actually running and any specified ports are open.
+
+ This checks:
+
+ 1. if the unit should be paused, that it is actually paused. If so the
+ state is 'maintenance' + message, else 'broken'.
+ 2. that the interfaces/relations are complete. If they are not then
+ it sets the state to either 'broken' or 'waiting' and an appropriate
+ message.
+ 3. If all the relation data is set, then it checks that the actual
+ services really are running. If not it sets the state to 'broken'.
+
+ If everything is okay then the state returns 'active'.
+
+ @param configs: a templating.OSConfigRenderer() object
+ @param required_interfaces: {generic: [specific, specific2, ...]}
+ @param charm_func: a callable function that returns state, message. The
+ signature is charm_func(configs) -> (state, message)
+ @param services: list of strings OR dictionary specifying services/ports
+ @param ports: OPTIONAL list of port numbers.
+ @returns state, message: the new workload status, user message
+ """
+ state, message = _ows_check_if_paused(services, ports)
+
+ if state is None:
+ state, message = _ows_check_generic_interfaces(
+ configs, required_interfaces)
+
+ if state != 'maintenance' and charm_func:
+ # _ows_check_charm_func() may modify the state, message
+ state, message = _ows_check_charm_func(
+ state, message, lambda: charm_func(configs))
+
+ if state is None:
+ state, message = _ows_check_services_running(services, ports)
+
+ if state is None:
+ state = 'active'
+ message = "Unit is ready"
+ juju_log(message, 'INFO')
+
+ return state, message
+
+
+def _ows_check_if_paused(services=None, ports=None):
+ """Check if the unit is supposed to be paused, and if so check that the
+ services/ports (if passed) are actually stopped/not being listened to.
+
+ if the unit isn't supposed to be paused, just return None, None
+
+ @param services: OPTIONAL services spec or list of service names.
+ @param ports: OPTIONAL list of port numbers.
+ @returns state, message or None, None
+ """
+ if is_unit_paused_set():
+ state, message = check_actually_paused(services=services,
+ ports=ports)
+ if state is None:
+ # we're paused okay, so set maintenance and return
+ state = "maintenance"
+ message = "Paused. Use 'resume' action to resume normal service."
+ return state, message
+ return None, None
+
+
+def _ows_check_generic_interfaces(configs, required_interfaces):
+ """Check the complete contexts to determine the workload status.
+
+ - Checks for missing or incomplete contexts
+ - juju log details of missing required data.
+ - determines the correct workload status
+ - creates an appropriate message for status_set(...)
+
+ if there are no problems then the function returns None, None
+
+ @param configs: a templating.OSConfigRenderer() object
+ @params required_interfaces: {generic_interface: [specific_interface], }
+ @returns state, message or None, None
+ """
+ incomplete_rel_data = incomplete_relation_data(configs,
+ required_interfaces)
+ state = None
+ message = None
+ missing_relations = set()
+ incomplete_relations = set()
+
+ for generic_interface, relations_states in incomplete_rel_data.items():
+ related_interface = None
+ missing_data = {}
+ # Related or not?
+ for interface, relation_state in relations_states.items():
+ if relation_state.get('related'):
+ related_interface = interface
+ missing_data = relation_state.get('missing_data')
+ break
+ # No relation ID for the generic_interface?
+ if not related_interface:
+ juju_log("{} relation is missing and must be related for "
+ "functionality. ".format(generic_interface), 'WARN')
+ state = 'blocked'
+ missing_relations.add(generic_interface)
+ else:
+ # Relation ID eists but no related unit
+ if not missing_data:
+ # Edge case - relation ID exists but departings
+ _hook_name = hook_name()
+ if (('departed' in _hook_name or 'broken' in _hook_name) and
+ related_interface in _hook_name):
+ state = 'blocked'
+ missing_relations.add(generic_interface)
+ juju_log("{} relation's interface, {}, "
+ "relationship is departed or broken "
+ "and is required for functionality."
+ "".format(generic_interface, related_interface),
+ "WARN")
+ # Normal case relation ID exists but no related unit
+ # (joining)
+ else:
+ juju_log("{} relations's interface, {}, is related but has"
+ " no units in the relation."
+ "".format(generic_interface, related_interface),
+ "INFO")
+ # Related unit exists and data missing on the relation
+ else:
+ juju_log("{} relation's interface, {}, is related awaiting "
+ "the following data from the relationship: {}. "
+ "".format(generic_interface, related_interface,
+ ", ".join(missing_data)), "INFO")
+ if state != 'blocked':
+ state = 'waiting'
+ if generic_interface not in missing_relations:
+ incomplete_relations.add(generic_interface)
+
+ if missing_relations:
+ message = "Missing relations: {}".format(", ".join(missing_relations))
+ if incomplete_relations:
+ message += "; incomplete relations: {}" \
+ "".format(", ".join(incomplete_relations))
+ state = 'blocked'
+ elif incomplete_relations:
+ message = "Incomplete relations: {}" \
+ "".format(", ".join(incomplete_relations))
+ state = 'waiting'
+
+ return state, message
+
+
+def _ows_check_charm_func(state, message, charm_func_with_configs):
+ """Run a custom check function for the charm to see if it wants to
+ change the state. This is only run if not in 'maintenance' and
+ tests to see if the new state is more important that the previous
+ one determined by the interfaces/relations check.
+
+ @param state: the previously determined state so far.
+ @param message: the user orientated message so far.
+ @param charm_func: a callable function that returns state, message
+ @returns state, message strings.
+ """
+ if charm_func_with_configs:
+ charm_state, charm_message = charm_func_with_configs()
+ if charm_state != 'active' and charm_state != 'unknown':
+ state = workload_state_compare(state, charm_state)
+ if message:
+ charm_message = charm_message.replace("Incomplete relations: ",
+ "")
+ message = "{}, {}".format(message, charm_message)
+ else:
+ message = charm_message
+ return state, message
+
+
+def _ows_check_services_running(services, ports):
+ """Check that the services that should be running are actually running
+ and that any ports specified are being listened to.
+
+ @param services: list of strings OR dictionary specifying services/ports
+ @param ports: list of ports
+ @returns state, message: strings or None, None
+ """
+ messages = []
+ state = None
+ if services is not None:
+ services = _extract_services_list_helper(services)
+ services_running, running = _check_running_services(services)
+ if not all(running):
+ messages.append(
+ "Services not running that should be: {}"
+ .format(", ".join(_filter_tuples(services_running, False))))
+ state = 'blocked'
+ # also verify that the ports that should be open are open
+ # NB, that ServiceManager objects only OPTIONALLY have ports
+ map_not_open, ports_open = (
+ _check_listening_on_services_ports(services))
+ if not all(ports_open):
+ # find which service has missing ports. They are in service
+ # order which makes it a bit easier.
+ message_parts = {service: ", ".join([str(v) for v in open_ports])
+ for service, open_ports in map_not_open.items()}
+ message = ", ".join(
+ ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
+ messages.append(
+ "Services with ports not open that should be: {}"
+ .format(message))
+ state = 'blocked'
+
+ if ports is not None:
+ # and we can also check ports which we don't know the service for
+ ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
+ if not all(ports_open_bools):
+ messages.append(
+ "Ports which should be open, but are not: {}"
+ .format(", ".join([str(p) for p, v in ports_open
+ if not v])))
+ state = 'blocked'
+
+ if state is not None:
+ message = "; ".join(messages)
+ return state, message
+
+ return None, None
+
+
+def _extract_services_list_helper(services):
+ """Extract a OrderedDict of {service: [ports]} of the supplied services
+ for use by the other functions.
+
+ The services object can either be:
+ - None : no services were passed (an empty dict is returned)
+ - a list of strings
+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+ - An array of [{'service': service_name, ...}, ...]
+
+ @param services: see above
+ @returns OrderedDict(service: [ports], ...)
+ """
+ if services is None:
+ return {}
+ if isinstance(services, dict):
+ services = services.values()
+ # either extract the list of services from the dictionary, or if
+ # it is a simple string, use that. i.e. works with mixed lists.
+ _s = OrderedDict()
+ for s in services:
+ if isinstance(s, dict) and 'service' in s:
+ _s[s['service']] = s.get('ports', [])
+ if isinstance(s, str):
+ _s[s] = []
+ return _s
+
+
+def _check_running_services(services):
+ """Check that the services dict provided is actually running and provide
+ a list of (service, boolean) tuples for each service.
+
+ Returns both a zipped list of (service, boolean) and a list of booleans
+ in the same order as the services.
+
+ @param services: OrderedDict of strings: [ports], one for each service to
+ check.
+ @returns [(service, boolean), ...], : results for checks
+ [boolean] : just the result of the service checks
+ """
+ services_running = [service_running(s) for s in services]
+ return list(zip(services, services_running)), services_running
+
+
+def _check_listening_on_services_ports(services, test=False):
+ """Check that the unit is actually listening (has the port open) on the
+ ports that the service specifies are open. If test is True then the
+ function returns the services with ports that are open rather than
+ closed.
+
+ Returns an OrderedDict of service: ports and a list of booleans
+
+ @param services: OrderedDict(service: [port, ...], ...)
+ @param test: default=False, if False, test for closed, otherwise open.
+ @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
+ """
+ test = not(not(test)) # ensure test is True or False
+ all_ports = list(itertools.chain(*services.values()))
+ ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
+ map_ports = OrderedDict()
+ matched_ports = [p for p, opened in zip(all_ports, ports_states)
+ if opened == test] # essentially opened xor test
+ for service, ports in services.items():
+ set_ports = set(ports).intersection(matched_ports)
+ if set_ports:
+ map_ports[service] = set_ports
+ return map_ports, ports_states
+
+
+def _check_listening_on_ports_list(ports):
+ """Check that the ports list given are being listened to
+
+ Returns a list of ports being listened to and a list of the
+ booleans.
+
+ @param ports: LIST or port numbers.
+ @returns [(port_num, boolean), ...], [boolean]
+ """
+ ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
+ return zip(ports, ports_open), ports_open
+
+
+def _filter_tuples(services_states, state):
+ """Return a simple list from a list of tuples according to the condition
+
+ @param services_states: LIST of (string, boolean): service and running
+ state.
+ @param state: Boolean to match the tuple against.
+ @returns [LIST of strings] that matched the tuple RHS.
+ """
+ return [s for s, b in services_states if b == state]
+
+
+def workload_state_compare(current_workload_state, workload_state):
+ """ Return highest priority of two states"""
+ hierarchy = {'unknown': -1,
+ 'active': 0,
+ 'maintenance': 1,
+ 'waiting': 2,
+ 'blocked': 3,
+ }
+
+ if hierarchy.get(workload_state) is None:
+ workload_state = 'unknown'
+ if hierarchy.get(current_workload_state) is None:
+ current_workload_state = 'unknown'
+
+ # Set workload_state based on hierarchy of statuses
+ if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
+ return current_workload_state
+ else:
+ return workload_state
+
+
+def incomplete_relation_data(configs, required_interfaces):
+ """Check complete contexts against required_interfaces
+ Return dictionary of incomplete relation data.
+
+ configs is an OSConfigRenderer object with configs registered
+
+ required_interfaces is a dictionary of required general interfaces
+ with dictionary values of possible specific interfaces.
+ Example:
+ required_interfaces = {'database': ['shared-db', 'pgsql-db']}
+
+ The interface is said to be satisfied if anyone of the interfaces in the
+ list has a complete context.
+
+ Return dictionary of incomplete or missing required contexts with relation
+ status of interfaces and any missing data points. Example:
+ {'message':
+ {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
+ 'zeromq-configuration': {'related': False}},
+ 'identity':
+ {'identity-service': {'related': False}},
+ 'database':
+ {'pgsql-db': {'related': False},
+ 'shared-db': {'related': True}}}
+ """
+ complete_ctxts = configs.complete_contexts()
+ incomplete_relations = [
+ svc_type
+ for svc_type, interfaces in required_interfaces.items()
+ if not set(interfaces).intersection(complete_ctxts)]
+ return {
+ i: configs.get_incomplete_context_data(required_interfaces[i])
+ for i in incomplete_relations}
+
+
+def do_action_openstack_upgrade(package, upgrade_callback, configs):
+ """Perform action-managed OpenStack upgrade.
+
+ Upgrades packages to the configured openstack-origin version and sets
+ the corresponding action status as a result.
+
+ If the charm was installed from source we cannot upgrade it.
+ For backwards compatibility a config flag (action-managed-upgrade) must
+ be set for this code to run, otherwise a full service level upgrade will
+ fire on config-changed.
+
+ @param package: package name for determining if upgrade available
+ @param upgrade_callback: function callback to charm's upgrade function
+ @param configs: templating object derived from OSConfigRenderer class
+
+ @return: True if upgrade successful; False if upgrade failed or skipped
+ """
+ ret = False
+
+ if git_install_requested():
+ action_set({'outcome': 'installed from source, skipped upgrade.'})
+ else:
+ if openstack_upgrade_available(package):
+ if config('action-managed-upgrade'):
+ juju_log('Upgrading OpenStack release')
+
+ try:
+ upgrade_callback(configs=configs)
+ action_set({'outcome': 'success, upgrade completed.'})
+ ret = True
+ except:
+ action_set({'outcome': 'upgrade failed, see traceback.'})
+ action_set({'traceback': traceback.format_exc()})
+ action_fail('do_openstack_upgrade resulted in an '
+ 'unexpected error')
+ else:
+ action_set({'outcome': 'action-managed-upgrade config is '
+ 'False, skipped upgrade.'})
+ else:
+ action_set({'outcome': 'no upgrade available.'})
+
+ return ret
+
+
+def remote_restart(rel_name, remote_service=None):
+ trigger = {
+ 'restart-trigger': str(uuid.uuid4()),
+ }
+ if remote_service:
+ trigger['remote-service'] = remote_service
+ for rid in relation_ids(rel_name):
+ # This subordinate can be related to two seperate services using
+ # different subordinate relations so only issue the restart if
+ # the principle is conencted down the relation we think it is
+ if related_units(relid=rid):
+ relation_set(relation_id=rid,
+ relation_settings=trigger,
+ )
+
+
+def check_actually_paused(services=None, ports=None):
+ """Check that services listed in the services object and and ports
+ are actually closed (not listened to), to verify that the unit is
+ properly paused.
+
+ @param services: See _extract_services_list_helper
+ @returns status, : string for status (None if okay)
+ message : string for problem for status_set
+ """
+ state = None
+ message = None
+ messages = []
+ if services is not None:
+ services = _extract_services_list_helper(services)
+ services_running, services_states = _check_running_services(services)
+ if any(services_states):
+ # there shouldn't be any running so this is a problem
+ messages.append("these services running: {}"
+ .format(", ".join(
+ _filter_tuples(services_running, True))))
+ state = "blocked"
+ ports_open, ports_open_bools = (
+ _check_listening_on_services_ports(services, True))
+ if any(ports_open_bools):
+ message_parts = {service: ", ".join([str(v) for v in open_ports])
+ for service, open_ports in ports_open.items()}
+ message = ", ".join(
+ ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
+ messages.append(
+ "these service:ports are open: {}".format(message))
+ state = 'blocked'
+ if ports is not None:
+ ports_open, bools = _check_listening_on_ports_list(ports)
+ if any(bools):
+ messages.append(
+ "these ports which should be closed, but are open: {}"
+ .format(", ".join([str(p) for p, v in ports_open if v])))
+ state = 'blocked'
+ if messages:
+ message = ("Services should be paused but {}"
+ .format(", ".join(messages)))
+ return state, message
+
+
+def set_unit_paused():
+ """Set the unit to a paused state in the local kv() store.
+ This does NOT actually pause the unit
+ """
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ kv.set('unit-paused', True)
+
+
+def clear_unit_paused():
+ """Clear the unit from a paused state in the local kv() store
+ This does NOT actually restart any services - it only clears the
+ local state.
+ """
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ kv.set('unit-paused', False)
+
+
+def is_unit_paused_set():
+ """Return the state of the kv().get('unit-paused').
+ This does NOT verify that the unit really is paused.
+
+ To help with units that don't have HookData() (testing)
+ if it excepts, return False
+ """
+ try:
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ # transform something truth-y into a Boolean.
+ return not(not(kv.get('unit-paused')))
+ except:
+ return False
+
+
+def pause_unit(assess_status_func, services=None, ports=None,
+ charm_func=None):
+ """Pause a unit by stopping the services and setting 'unit-paused'
+ in the local kv() store.
+
+ Also checks that the services have stopped and ports are no longer
+ being listened to.
+
+ An optional charm_func() can be called that can either raise an
+ Exception or return non None, None to indicate that the unit
+ didn't pause cleanly.
+
+ The signature for charm_func is:
+ charm_func() -> message: string
+
+ charm_func() is executed after any services are stopped, if supplied.
+
+ The services object can either be:
+ - None : no services were passed (an empty dict is returned)
+ - a list of strings
+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+ - An array of [{'service': service_name, ...}, ...]
+
+ @param assess_status_func: (f() -> message: string | None) or None
+ @param services: OPTIONAL see above
+ @param ports: OPTIONAL list of port
+ @param charm_func: function to run for custom charm pausing.
+ @returns None
+ @raises Exception(message) on an error for action_fail().
+ """
+ services = _extract_services_list_helper(services)
+ messages = []
+ if services:
+ for service in services.keys():
+ stopped = service_pause(service)
+ if not stopped:
+ messages.append("{} didn't stop cleanly.".format(service))
+ if charm_func:
+ try:
+ message = charm_func()
+ if message:
+ messages.append(message)
+ except Exception as e:
+ message.append(str(e))
+ set_unit_paused()
+ if assess_status_func:
+ message = assess_status_func()
+ if message:
+ messages.append(message)
+ if messages:
+ raise Exception("Couldn't pause: {}".format("; ".join(messages)))
+
+
+def resume_unit(assess_status_func, services=None, ports=None,
+ charm_func=None):
+ """Resume a unit by starting the services and clearning 'unit-paused'
+ in the local kv() store.
+
+ Also checks that the services have started and ports are being listened to.
+
+ An optional charm_func() can be called that can either raise an
+ Exception or return non None to indicate that the unit
+ didn't resume cleanly.
+
+ The signature for charm_func is:
+ charm_func() -> message: string
+
+ charm_func() is executed after any services are started, if supplied.
+
+ The services object can either be:
+ - None : no services were passed (an empty dict is returned)
+ - a list of strings
+ - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
+ - An array of [{'service': service_name, ...}, ...]
+
+ @param assess_status_func: (f() -> message: string | None) or None
+ @param services: OPTIONAL see above
+ @param ports: OPTIONAL list of port
+ @param charm_func: function to run for custom charm resuming.
+ @returns None
+ @raises Exception(message) on an error for action_fail().
+ """
+ services = _extract_services_list_helper(services)
+ messages = []
+ if services:
+ for service in services.keys():
+ started = service_resume(service)
+ if not started:
+ messages.append("{} didn't start cleanly.".format(service))
+ if charm_func:
+ try:
+ message = charm_func()
+ if message:
+ messages.append(message)
+ except Exception as e:
+ message.append(str(e))
+ clear_unit_paused()
+ if assess_status_func:
+ message = assess_status_func()
+ if message:
+ messages.append(message)
+ if messages:
+ raise Exception("Couldn't resume: {}".format("; ".join(messages)))
+
+
+def make_assess_status_func(*args, **kwargs):
+ """Creates an assess_status_func() suitable for handing to pause_unit()
+ and resume_unit().
+
+ This uses the _determine_os_workload_status(...) function to determine
+ what the workload_status should be for the unit. If the unit is
+ not in maintenance or active states, then the message is returned to
+ the caller. This is so an action that doesn't result in either a
+ complete pause or complete resume can signal failure with an action_fail()
+ """
+ def _assess_status_func():
+ state, message = _determine_os_workload_status(*args, **kwargs)
+ status_set(state, message)
+ if state not in ['maintenance', 'active']:
+ return message
+ return None
+
+ return _assess_status_func
+
+
+def pausable_restart_on_change(restart_map, stopstart=False,
+ restart_functions=None):
+ """A restart_on_change decorator that checks to see if the unit is
+ paused. If it is paused then the decorated function doesn't fire.
+
+ This is provided as a helper, as the @restart_on_change(...) decorator
+ is in core.host, yet the openstack specific helpers are in this file
+ (contrib.openstack.utils). Thus, this needs to be an optional feature
+ for openstack charms (or charms that wish to use the openstack
+ pause/resume type features).
+
+ It is used as follows:
+
+ from contrib.openstack.utils import (
+ pausable_restart_on_change as restart_on_change)
+
+ @restart_on_change(restart_map, stopstart=)
+ def some_hook(...):
+ pass
+
+ see core.utils.restart_on_change() for more details.
+
+ @param f: the function to decorate
+ @param restart_map: the restart map {conf_file: [services]}
+ @param stopstart: DEFAULT false; whether to stop, start or just restart
+ @returns decorator to use a restart_on_change with pausability
+ """
+ def wrap(f):
+ @functools.wraps(f)
+ def wrapped_f(*args, **kwargs):
+ if is_unit_paused_set():
+ return f(*args, **kwargs)
+ # otherwise, normal restart_on_change functionality
+ return restart_on_change_helper(
+ (lambda: f(*args, **kwargs)), restart_map, stopstart,
+ restart_functions)
+ return wrapped_f
+ return wrap
+
+
+def config_flags_parser(config_flags):
+ """Parses config flags string into dict.
+
+ This parsing method supports a few different formats for the config
+ flag values to be parsed:
+
+ 1. A string in the simple format of key=value pairs, with the possibility
+ of specifying multiple key value pairs within the same string. For
+ example, a string in the format of 'key1=value1, key2=value2' will
+ return a dict of:
+
+ {'key1': 'value1',
+ 'key2': 'value2'}.
+
+ 2. A string in the above format, but supporting a comma-delimited list
+ of values for the same key. For example, a string in the format of
+ 'key1=value1, key2=value3,value4,value5' will return a dict of:
+
+ {'key1', 'value1',
+ 'key2', 'value2,value3,value4'}
+
+ 3. A string containing a colon character (:) prior to an equal
+ character (=) will be treated as yaml and parsed as such. This can be
+ used to specify more complex key value pairs. For example,
+ a string in the format of 'key1: subkey1=value1, subkey2=value2' will
+ return a dict of:
+
+ {'key1', 'subkey1=value1, subkey2=value2'}
+
+ The provided config_flags string may be a list of comma-separated values
+ which themselves may be comma-separated list of values.
+ """
+ # If we find a colon before an equals sign then treat it as yaml.
+ # Note: limit it to finding the colon first since this indicates assignment
+ # for inline yaml.
+ colon = config_flags.find(':')
+ equals = config_flags.find('=')
+ if colon > 0:
+ if colon < equals or equals < 0:
+ return yaml.safe_load(config_flags)
+
+ if config_flags.find('==') >= 0:
+ juju_log("config_flags is not in expected format (key=value)",
+ level=ERROR)
+ raise OSContextError
+
+ # strip the following from each value.
+ post_strippers = ' ,'
+ # we strip any leading/trailing '=' or ' ' from the string then
+ # split on '='.
+ split = config_flags.strip(' =').split('=')
+ limit = len(split)
+ flags = {}
+ for i in range(0, limit - 1):
+ current = split[i]
+ next = split[i + 1]
+ vindex = next.rfind(',')
+ if (i == limit - 2) or (vindex < 0):
+ value = next
+ else:
+ value = next[:vindex]
+
+ if i == 0:
+ key = current
+ else:
+ # if this not the first entry, expect an embedded key.
+ index = current.rfind(',')
+ if index < 0:
+ juju_log("Invalid config value(s) at index %s" % (i),
+ level=ERROR)
+ raise OSContextError
+ key = current[index + 1:]
+
+ # Add to collection.
+ flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
+
+ return flags
+
+
+def os_application_version_set(package):
+ '''Set version of application for Juju 2.0 and later'''
+ application_version = get_upstream_version(package)
+ # NOTE(jamespage) if not able to figure out package version, fallback to
+ # openstack codename version detection.
+ if not application_version:
+ application_version_set(os_release(package))
+ else:
+ application_version_set(application_version)
+
+
+def enable_memcache(source=None, release=None, package=None):
+ """Determine if memcache should be enabled on the local unit
+
+ @param release: release of OpenStack currently deployed
+ @param package: package to derive OpenStack version deployed
+ @returns boolean Whether memcache should be enabled
+ """
+ _release = None
+ if release:
+ _release = release
+ else:
+ _release = os_release(package, base='icehouse')
+ if not _release:
+ _release = get_os_codename_install_source(source)
+
+ return CompareOpenStackReleases(_release) >= 'mitaka'
+
+
+def token_cache_pkgs(source=None, release=None):
+ """Determine additional packages needed for token caching
+
+ @param source: source string for charm
+ @param release: release of OpenStack currently deployed
+ @returns List of package to enable token caching
+ """
+ packages = []
+ if enable_memcache(source=source, release=release):
+ packages.extend(['memcached', 'python-memcache'])
+ return packages
diff --git a/tests/charmhelpers/contrib/python/__init__.py b/tests/charmhelpers/contrib/python/__init__.py
new file mode 100644
index 00000000..d7567b86
--- /dev/null
+++ b/tests/charmhelpers/contrib/python/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/charmhelpers/contrib/python/debug.py b/tests/charmhelpers/contrib/python/debug.py
new file mode 100644
index 00000000..7d04dfa5
--- /dev/null
+++ b/tests/charmhelpers/contrib/python/debug.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import atexit
+import sys
+
+from charmhelpers.contrib.python.rpdb import Rpdb
+from charmhelpers.core.hookenv import (
+ open_port,
+ close_port,
+ ERROR,
+ log
+)
+
+__author__ = "Jorge Niedbalski "
+
+DEFAULT_ADDR = "0.0.0.0"
+DEFAULT_PORT = 4444
+
+
+def _error(message):
+ log(message, level=ERROR)
+
+
+def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
+ """
+ Set a trace point using the remote debugger
+ """
+ atexit.register(close_port, port)
+ try:
+ log("Starting a remote python debugger session on %s:%s" % (addr,
+ port))
+ open_port(port)
+ debugger = Rpdb(addr=addr, port=port)
+ debugger.set_trace(sys._getframe().f_back)
+ except:
+ _error("Cannot start a remote debug session on %s:%s" % (addr,
+ port))
diff --git a/tests/charmhelpers/contrib/python/packages.py b/tests/charmhelpers/contrib/python/packages.py
new file mode 100644
index 00000000..6e95028b
--- /dev/null
+++ b/tests/charmhelpers/contrib/python/packages.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import six
+import subprocess
+import sys
+
+from charmhelpers.fetch import apt_install, apt_update
+from charmhelpers.core.hookenv import charm_dir, log
+
+__author__ = "Jorge Niedbalski "
+
+
+def pip_execute(*args, **kwargs):
+ """Overriden pip_execute() to stop sys.path being changed.
+
+ The act of importing main from the pip module seems to cause add wheels
+ from the /usr/share/python-wheels which are installed by various tools.
+ This function ensures that sys.path remains the same after the call is
+ executed.
+ """
+ try:
+ _path = sys.path
+ try:
+ from pip import main as _pip_execute
+ except ImportError:
+ apt_update()
+ if six.PY2:
+ apt_install('python-pip')
+ else:
+ apt_install('python3-pip')
+ from pip import main as _pip_execute
+ _pip_execute(*args, **kwargs)
+ finally:
+ sys.path = _path
+
+
+def parse_options(given, available):
+ """Given a set of options, check if available"""
+ for key, value in sorted(given.items()):
+ if not value:
+ continue
+ if key in available:
+ yield "--{0}={1}".format(key, value)
+
+
+def pip_install_requirements(requirements, constraints=None, **options):
+ """Install a requirements file.
+
+ :param constraints: Path to pip constraints file.
+ http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
+ """
+ command = ["install"]
+
+ available_options = ('proxy', 'src', 'log', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ command.append("-r {0}".format(requirements))
+ if constraints:
+ command.append("-c {0}".format(constraints))
+ log("Installing from file: {} with constraints {} "
+ "and options: {}".format(requirements, constraints, command))
+ else:
+ log("Installing from file: {} with options: {}".format(requirements,
+ command))
+ pip_execute(command)
+
+
+def pip_install(package, fatal=False, upgrade=False, venv=None,
+ constraints=None, **options):
+ """Install a python package"""
+ if venv:
+ venv_python = os.path.join(venv, 'bin/pip')
+ command = [venv_python, "install"]
+ else:
+ command = ["install"]
+
+ available_options = ('proxy', 'src', 'log', 'index-url', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ if upgrade:
+ command.append('--upgrade')
+
+ if constraints:
+ command.extend(['-c', constraints])
+
+ if isinstance(package, list):
+ command.extend(package)
+ else:
+ command.append(package)
+
+ log("Installing {} package with options: {}".format(package,
+ command))
+ if venv:
+ subprocess.check_call(command)
+ else:
+ pip_execute(command)
+
+
+def pip_uninstall(package, **options):
+ """Uninstall a python package"""
+ command = ["uninstall", "-q", "-y"]
+
+ available_options = ('proxy', 'log', )
+ for option in parse_options(options, available_options):
+ command.append(option)
+
+ if isinstance(package, list):
+ command.extend(package)
+ else:
+ command.append(package)
+
+ log("Uninstalling {} package with options: {}".format(package,
+ command))
+ pip_execute(command)
+
+
+def pip_list():
+ """Returns the list of current python installed packages
+ """
+ return pip_execute(["list"])
+
+
+def pip_create_virtualenv(path=None):
+ """Create an isolated Python environment."""
+ if six.PY2:
+ apt_install('python-virtualenv')
+ else:
+ apt_install('python3-virtualenv')
+
+ if path:
+ venv_path = path
+ else:
+ venv_path = os.path.join(charm_dir(), 'venv')
+
+ if not os.path.exists(venv_path):
+ subprocess.check_call(['virtualenv', venv_path])
diff --git a/tests/charmhelpers/contrib/python/rpdb.py b/tests/charmhelpers/contrib/python/rpdb.py
new file mode 100644
index 00000000..9b31610c
--- /dev/null
+++ b/tests/charmhelpers/contrib/python/rpdb.py
@@ -0,0 +1,56 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Remote Python Debugger (pdb wrapper)."""
+
+import pdb
+import socket
+import sys
+
+__author__ = "Bertrand Janin "
+__version__ = "0.1.3"
+
+
+class Rpdb(pdb.Pdb):
+
+ def __init__(self, addr="127.0.0.1", port=4444):
+ """Initialize the socket and initialize pdb."""
+
+ # Backup stdin and stdout before replacing them by the socket handle
+ self.old_stdout = sys.stdout
+ self.old_stdin = sys.stdin
+
+ # Open a 'reusable' socket to let the webapp reload on the same port
+ self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
+ self.skt.bind((addr, port))
+ self.skt.listen(1)
+ (clientsocket, address) = self.skt.accept()
+ handle = clientsocket.makefile('rw')
+ pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
+ sys.stdout = sys.stdin = handle
+
+ def shutdown(self):
+ """Revert stdin and stdout, close the socket."""
+ sys.stdout = self.old_stdout
+ sys.stdin = self.old_stdin
+ self.skt.close()
+ self.set_continue()
+
+ def do_continue(self, arg):
+ """Stop all operation on ``continue``."""
+ self.shutdown()
+ return 1
+
+ do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
diff --git a/tests/charmhelpers/contrib/python/version.py b/tests/charmhelpers/contrib/python/version.py
new file mode 100644
index 00000000..3eb42103
--- /dev/null
+++ b/tests/charmhelpers/contrib/python/version.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+__author__ = "Jorge Niedbalski "
+
+
+def current_version():
+ """Current system python version"""
+ return sys.version_info
+
+
+def current_version_string():
+ """Current system python version as string major.minor.micro"""
+ return "{0}.{1}.{2}".format(sys.version_info.major,
+ sys.version_info.minor,
+ sys.version_info.micro)
diff --git a/tests/charmhelpers/contrib/storage/__init__.py b/tests/charmhelpers/contrib/storage/__init__.py
new file mode 100644
index 00000000..d7567b86
--- /dev/null
+++ b/tests/charmhelpers/contrib/storage/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/charmhelpers/contrib/storage/linux/__init__.py b/tests/charmhelpers/contrib/storage/linux/__init__.py
new file mode 100644
index 00000000..d7567b86
--- /dev/null
+++ b/tests/charmhelpers/contrib/storage/linux/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/charmhelpers/contrib/storage/linux/ceph.py b/tests/charmhelpers/contrib/storage/linux/ceph.py
new file mode 100644
index 00000000..9417d684
--- /dev/null
+++ b/tests/charmhelpers/contrib/storage/linux/ceph.py
@@ -0,0 +1,1353 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Copyright 2012 Canonical Ltd.
+#
+# This file is sourced from lp:openstack-charm-helpers
+#
+# Authors:
+# James Page
+# Adam Gandelman
+#
+
+import errno
+import hashlib
+import math
+import six
+
+import os
+import shutil
+import json
+import time
+import uuid
+
+from subprocess import (
+ check_call,
+ check_output,
+ CalledProcessError,
+)
+from charmhelpers.core.hookenv import (
+ config,
+ service_name,
+ local_unit,
+ relation_get,
+ relation_ids,
+ relation_set,
+ related_units,
+ log,
+ DEBUG,
+ INFO,
+ WARNING,
+ ERROR,
+)
+from charmhelpers.core.host import (
+ mount,
+ mounts,
+ service_start,
+ service_stop,
+ service_running,
+ umount,
+)
+from charmhelpers.fetch import (
+ apt_install,
+)
+
+from charmhelpers.core.kernel import modprobe
+from charmhelpers.contrib.openstack.utils import config_flags_parser
+
+KEYRING = '/etc/ceph/ceph.client.{}.keyring'
+KEYFILE = '/etc/ceph/ceph.client.{}.key'
+
+CEPH_CONF = """[global]
+auth supported = {auth}
+keyring = {keyring}
+mon host = {mon_hosts}
+log to syslog = {use_syslog}
+err to syslog = {use_syslog}
+clog to syslog = {use_syslog}
+"""
+
+# The number of placement groups per OSD to target for placement group
+# calculations. This number is chosen as 100 due to the ceph PG Calc
+# documentation recommending to choose 100 for clusters which are not
+# expected to increase in the foreseeable future. Since the majority of the
+# calculations are done on deployment, target the case of non-expanding
+# clusters as the default.
+DEFAULT_PGS_PER_OSD_TARGET = 100
+DEFAULT_POOL_WEIGHT = 10.0
+LEGACY_PG_COUNT = 200
+DEFAULT_MINIMUM_PGS = 2
+
+
+def validator(value, valid_type, valid_range=None):
+ """
+ Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
+ Example input:
+ validator(value=1,
+ valid_type=int,
+ valid_range=[0, 2])
+ This says I'm testing value=1. It must be an int inclusive in [0,2]
+
+ :param value: The value to validate
+ :param valid_type: The type that value should be.
+ :param valid_range: A range of values that value can assume.
+ :return:
+ """
+ assert isinstance(value, valid_type), "{} is not a {}".format(
+ value,
+ valid_type)
+ if valid_range is not None:
+ assert isinstance(valid_range, list), \
+ "valid_range must be a list, was given {}".format(valid_range)
+ # If we're dealing with strings
+ if valid_type is six.string_types:
+ assert value in valid_range, \
+ "{} is not in the list {}".format(value, valid_range)
+ # Integer, float should have a min and max
+ else:
+ if len(valid_range) != 2:
+ raise ValueError(
+ "Invalid valid_range list of {} for {}. "
+ "List must be [min,max]".format(valid_range, value))
+ assert value >= valid_range[0], \
+ "{} is less than minimum allowed value of {}".format(
+ value, valid_range[0])
+ assert value <= valid_range[1], \
+ "{} is greater than maximum allowed value of {}".format(
+ value, valid_range[1])
+
+
+class PoolCreationError(Exception):
+ """
+ A custom error to inform the caller that a pool creation failed. Provides an error message
+ """
+
+ def __init__(self, message):
+ super(PoolCreationError, self).__init__(message)
+
+
+class Pool(object):
+ """
+ An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
+ Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
+ """
+
+ def __init__(self, service, name):
+ self.service = service
+ self.name = name
+
+ # Create the pool if it doesn't exist already
+ # To be implemented by subclasses
+ def create(self):
+ pass
+
+ def add_cache_tier(self, cache_pool, mode):
+ """
+ Adds a new cache tier to an existing pool.
+ :param cache_pool: six.string_types. The cache tier pool name to add.
+ :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
+ :return: None
+ """
+ # Check the input types and values
+ validator(value=cache_pool, valid_type=six.string_types)
+ validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
+
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
+ check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
+
+ def remove_cache_tier(self, cache_pool):
+ """
+ Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
+ :param cache_pool: six.string_types. The cache tier pool name to remove.
+ :return: None
+ """
+ # read-only is easy, writeback is much harder
+ mode = get_cache_mode(self.service, cache_pool)
+ version = ceph_version()
+ if mode == 'readonly':
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
+
+ elif mode == 'writeback':
+ pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
+ 'cache-mode', cache_pool, 'forward']
+ if version >= '10.1':
+ # Jewel added a mandatory flag
+ pool_forward_cmd.append('--yes-i-really-mean-it')
+
+ check_call(pool_forward_cmd)
+ # Flush the cache and wait for it to return
+ check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
+ check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
+
+ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
+ """Return the number of placement groups to use when creating the pool.
+
+ Returns the number of placement groups which should be specified when
+ creating the pool. This is based upon the calculation guidelines
+ provided by the Ceph Placement Group Calculator (located online at
+ http://ceph.com/pgcalc/).
+
+ The number of placement groups are calculated using the following:
+
+ (Target PGs per OSD) * (OSD #) * (%Data)
+ ----------------------------------------
+ (Pool size)
+
+ Per the upstream guidelines, the OSD # should really be considered
+ based on the number of OSDs which are eligible to be selected by the
+ pool. Since the pool creation doesn't specify any of CRUSH set rules,
+ the default rule will be dependent upon the type of pool being
+ created (replicated or erasure).
+
+ This code makes no attempt to determine the number of OSDs which can be
+ selected for the specific rule, rather it is left to the user to tune
+ in the form of 'expected-osd-count' config option.
+
+ :param pool_size: int. pool_size is either the number of replicas for
+ replicated pools or the K+M sum for erasure coded pools
+ :param percent_data: float. the percentage of data that is expected to
+ be contained in the pool for the specific OSD set. Default value
+ is to assume 10% of the data is for this pool, which is a
+ relatively low % of the data but allows for the pg_num to be
+ increased. NOTE: the default is primarily to handle the scenario
+ where related charms requiring pools has not been upgraded to
+ include an update to indicate their relative usage of the pools.
+ :return: int. The number of pgs to use.
+ """
+
+ # Note: This calculation follows the approach that is provided
+ # by the Ceph PG Calculator located at http://ceph.com/pgcalc/.
+ validator(value=pool_size, valid_type=int)
+
+ # Ensure that percent data is set to something - even with a default
+ # it can be set to None, which would wreak havoc below.
+ if percent_data is None:
+ percent_data = DEFAULT_POOL_WEIGHT
+
+ # If the expected-osd-count is specified, then use the max between
+ # the expected-osd-count and the actual osd_count
+ osd_list = get_osds(self.service)
+ expected = config('expected-osd-count') or 0
+
+ if osd_list:
+ osd_count = max(expected, len(osd_list))
+
+ # Log a message to provide some insight if the calculations claim
+ # to be off because someone is setting the expected count and
+ # there are more OSDs in reality. Try to make a proper guess
+ # based upon the cluster itself.
+ if expected and osd_count != expected:
+ log("Found more OSDs than provided expected count. "
+ "Using the actual count instead", INFO)
+ elif expected:
+ # Use the expected-osd-count in older ceph versions to allow for
+ # a more accurate pg calculations
+ osd_count = expected
+ else:
+ # NOTE(james-page): Default to 200 for older ceph versions
+ # which don't support OSD query from cli
+ return LEGACY_PG_COUNT
+
+ percent_data /= 100.0
+ target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET
+ num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size
+
+ # NOTE: ensure a sane minimum number of PGS otherwise we don't get any
+ # reasonable data distribution in minimal OSD configurations
+ if num_pg < DEFAULT_MINIMUM_PGS:
+ num_pg = DEFAULT_MINIMUM_PGS
+
+ # The CRUSH algorithm has a slight optimization for placement groups
+ # with powers of 2 so find the nearest power of 2. If the nearest
+ # power of 2 is more than 25% below the original value, the next
+ # highest value is used. To do this, find the nearest power of 2 such
+ # that 2^n <= num_pg, check to see if its within the 25% tolerance.
+ exponent = math.floor(math.log(num_pg, 2))
+ nearest = 2 ** exponent
+ if (num_pg - nearest) > (num_pg * 0.25):
+ # Choose the next highest power of 2 since the nearest is more
+ # than 25% below the original value.
+ return int(nearest * 2)
+ else:
+ return int(nearest)
+
+
+class ReplicatedPool(Pool):
+ def __init__(self, service, name, pg_num=None, replicas=2,
+ percent_data=10.0):
+ super(ReplicatedPool, self).__init__(service=service, name=name)
+ self.replicas = replicas
+ if pg_num:
+ # Since the number of placement groups were specified, ensure
+ # that there aren't too many created.
+ max_pgs = self.get_pgs(self.replicas, 100.0)
+ self.pg_num = min(pg_num, max_pgs)
+ else:
+ self.pg_num = self.get_pgs(self.replicas, percent_data)
+
+ def create(self):
+ if not pool_exists(self.service, self.name):
+ # Create it
+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
+ self.name, str(self.pg_num)]
+ try:
+ check_call(cmd)
+ # Set the pool replica size
+ update_pool(client=self.service,
+ pool=self.name,
+ settings={'size': str(self.replicas)})
+ except CalledProcessError:
+ raise
+
+
+# Default jerasure erasure coded pool
+class ErasurePool(Pool):
+ def __init__(self, service, name, erasure_code_profile="default",
+ percent_data=10.0):
+ super(ErasurePool, self).__init__(service=service, name=name)
+ self.erasure_code_profile = erasure_code_profile
+ self.percent_data = percent_data
+
+ def create(self):
+ if not pool_exists(self.service, self.name):
+ # Try to find the erasure profile information in order to properly
+ # size the number of placement groups. The size of an erasure
+ # coded placement group is calculated as k+m.
+ erasure_profile = get_erasure_profile(self.service,
+ self.erasure_code_profile)
+
+ # Check for errors
+ if erasure_profile is None:
+ msg = ("Failed to discover erasure profile named "
+ "{}".format(self.erasure_code_profile))
+ log(msg, level=ERROR)
+ raise PoolCreationError(msg)
+ if 'k' not in erasure_profile or 'm' not in erasure_profile:
+ # Error
+ msg = ("Unable to find k (data chunks) or m (coding chunks) "
+ "in erasure profile {}".format(erasure_profile))
+ log(msg, level=ERROR)
+ raise PoolCreationError(msg)
+
+ k = int(erasure_profile['k'])
+ m = int(erasure_profile['m'])
+ pgs = self.get_pgs(k + m, self.percent_data)
+ # Create it
+ cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
+ self.name, str(pgs), str(pgs),
+ 'erasure', self.erasure_code_profile]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+ """Get an existing erasure code profile if it already exists.
+ Returns json formatted output"""
+
+
+def get_mon_map(service):
+ """
+ Returns the current monitor map.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :return: json string. :raise: ValueError if the monmap fails to parse.
+ Also raises CalledProcessError if our ceph command fails
+ """
+ try:
+ mon_status = check_output(
+ ['ceph', '--id', service,
+ 'mon_status', '--format=json'])
+ try:
+ return json.loads(mon_status)
+ except ValueError as v:
+ log("Unable to parse mon_status json: {}. Error: {}".format(
+ mon_status, v.message))
+ raise
+ except CalledProcessError as e:
+ log("mon_status command failed with message: {}".format(
+ e.message))
+ raise
+
+
+def hash_monitor_names(service):
+ """
+ Uses the get_mon_map() function to get information about the monitor
+ cluster.
+ Hash the name of each monitor. Return a sorted list of monitor hashes
+ in an ascending order.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :rtype : dict. json dict of monitor name, ip address and rank
+ example: {
+ 'name': 'ip-172-31-13-165',
+ 'rank': 0,
+ 'addr': '172.31.13.165:6789/0'}
+ """
+ try:
+ hash_list = []
+ monitor_list = get_mon_map(service=service)
+ if monitor_list['monmap']['mons']:
+ for mon in monitor_list['monmap']['mons']:
+ hash_list.append(
+ hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
+ return sorted(hash_list)
+ else:
+ return None
+ except (ValueError, CalledProcessError):
+ raise
+
+
+def monitor_key_delete(service, key):
+ """
+ Delete a key and value pair from the monitor cluster
+ :param service: six.string_types. The Ceph user name to run the command under
+ Deletes a key value pair on the monitor cluster.
+ :param key: six.string_types. The key to delete.
+ """
+ try:
+ check_output(
+ ['ceph', '--id', service,
+ 'config-key', 'del', str(key)])
+ except CalledProcessError as e:
+ log("Monitor config-key put failed with message: {}".format(
+ e.output))
+ raise
+
+
+def monitor_key_set(service, key, value):
+ """
+ Sets a key value pair on the monitor cluster.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param key: six.string_types. The key to set.
+ :param value: The value to set. This will be converted to a string
+ before setting
+ """
+ try:
+ check_output(
+ ['ceph', '--id', service,
+ 'config-key', 'put', str(key), str(value)])
+ except CalledProcessError as e:
+ log("Monitor config-key put failed with message: {}".format(
+ e.output))
+ raise
+
+
+def monitor_key_get(service, key):
+ """
+ Gets the value of an existing key in the monitor cluster.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param key: six.string_types. The key to search for.
+ :return: Returns the value of that key or None if not found.
+ """
+ try:
+ output = check_output(
+ ['ceph', '--id', service,
+ 'config-key', 'get', str(key)])
+ return output
+ except CalledProcessError as e:
+ log("Monitor config-key get failed with message: {}".format(
+ e.output))
+ return None
+
+
+def monitor_key_exists(service, key):
+ """
+ Searches for the existence of a key in the monitor cluster.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param key: six.string_types. The key to search for
+ :return: Returns True if the key exists, False if not and raises an
+ exception if an unknown error occurs. :raise: CalledProcessError if
+ an unknown error occurs
+ """
+ try:
+ check_call(
+ ['ceph', '--id', service,
+ 'config-key', 'exists', str(key)])
+ # I can return true here regardless because Ceph returns
+ # ENOENT if the key wasn't found
+ return True
+ except CalledProcessError as e:
+ if e.returncode == errno.ENOENT:
+ return False
+ else:
+ log("Unknown error from ceph config-get exists: {} {}".format(
+ e.returncode, e.output))
+ raise
+
+
+def get_erasure_profile(service, name):
+ """
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param name:
+ :return:
+ """
+ try:
+ out = check_output(['ceph', '--id', service,
+ 'osd', 'erasure-code-profile', 'get',
+ name, '--format=json'])
+ return json.loads(out)
+ except (CalledProcessError, OSError, ValueError):
+ return None
+
+
+def pool_set(service, pool_name, key, value):
+ """
+ Sets a value for a RADOS pool in ceph.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :param key: six.string_types
+ :param value:
+ :return: None. Can raise CalledProcessError
+ """
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def snapshot_pool(service, pool_name, snapshot_name):
+ """
+ Snapshots a RADOS pool in ceph.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :param snapshot_name: six.string_types
+ :return: None. Can raise CalledProcessError
+ """
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def remove_pool_snapshot(service, pool_name, snapshot_name):
+ """
+ Remove a snapshot from a RADOS pool in ceph.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :param snapshot_name: six.string_types
+ :return: None. Can raise CalledProcessError
+ """
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+# max_bytes should be an int or long
+def set_pool_quota(service, pool_name, max_bytes):
+ """
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :param max_bytes: int or long
+ :return: None. Can raise CalledProcessError
+ """
+ # Set a byte quota on a RADOS pool in ceph.
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
+ 'max_bytes', str(max_bytes)]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def remove_pool_quota(service, pool_name):
+ """
+ Set a byte quota on a RADOS pool in ceph.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :return: None. Can raise CalledProcessError
+ """
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def remove_erasure_profile(service, profile_name):
+ """
+ Create a new erasure code profile if one does not already exist for it. Updates
+ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
+ for more details
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param profile_name: six.string_types
+ :return: None. Can raise CalledProcessError
+ """
+ cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
+ profile_name]
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
+ failure_domain='host',
+ data_chunks=2, coding_chunks=1,
+ locality=None, durability_estimator=None):
+ """
+ Create a new erasure code profile if one does not already exist for it. Updates
+ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
+ for more details
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param profile_name: six.string_types
+ :param erasure_plugin_name: six.string_types
+ :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
+ 'room', 'root', 'row'])
+ :param data_chunks: int
+ :param coding_chunks: int
+ :param locality: int
+ :param durability_estimator: int
+ :return: None. Can raise CalledProcessError
+ """
+ # Ensure this failure_domain is allowed by Ceph
+ validator(failure_domain, six.string_types,
+ ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
+
+ cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
+ 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
+ 'ruleset_failure_domain=' + failure_domain]
+ if locality is not None and durability_estimator is not None:
+ raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
+
+ # Add plugin specific information
+ if locality is not None:
+ # For local erasure codes
+ cmd.append('l=' + str(locality))
+ if durability_estimator is not None:
+ # For Shec erasure codes
+ cmd.append('c=' + str(durability_estimator))
+
+ if erasure_profile_exists(service, profile_name):
+ cmd.append('--force')
+
+ try:
+ check_call(cmd)
+ except CalledProcessError:
+ raise
+
+
+def rename_pool(service, old_name, new_name):
+ """
+ Rename a Ceph pool from old_name to new_name
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param old_name: six.string_types
+ :param new_name: six.string_types
+ :return: None
+ """
+ validator(value=old_name, valid_type=six.string_types)
+ validator(value=new_name, valid_type=six.string_types)
+
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
+ check_call(cmd)
+
+
+def erasure_profile_exists(service, name):
+ """
+ Check to see if an Erasure code profile already exists.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param name: six.string_types
+ :return: int or None
+ """
+ validator(value=name, valid_type=six.string_types)
+ try:
+ check_call(['ceph', '--id', service,
+ 'osd', 'erasure-code-profile', 'get',
+ name])
+ return True
+ except CalledProcessError:
+ return False
+
+
+def get_cache_mode(service, pool_name):
+ """
+ Find the current caching mode of the pool_name given.
+ :param service: six.string_types. The Ceph user name to run the command under
+ :param pool_name: six.string_types
+ :return: int or None
+ """
+ validator(value=service, valid_type=six.string_types)
+ validator(value=pool_name, valid_type=six.string_types)
+ out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
+ try:
+ osd_json = json.loads(out)
+ for pool in osd_json['pools']:
+ if pool['pool_name'] == pool_name:
+ return pool['cache_mode']
+ return None
+ except ValueError:
+ raise
+
+
+def pool_exists(service, name):
+ """Check to see if a RADOS pool already exists."""
+ try:
+ out = check_output(['rados', '--id', service,
+ 'lspools']).decode('UTF-8')
+ except CalledProcessError:
+ return False
+
+ return name in out.split()
+
+
+def get_osds(service):
+ """Return a list of all Ceph Object Storage Daemons currently in the
+ cluster.
+ """
+ version = ceph_version()
+ if version and version >= '0.56':
+ return json.loads(check_output(['ceph', '--id', service,
+ 'osd', 'ls',
+ '--format=json']).decode('UTF-8'))
+
+ return None
+
+
+def install():
+ """Basic Ceph client installation."""
+ ceph_dir = "/etc/ceph"
+ if not os.path.exists(ceph_dir):
+ os.mkdir(ceph_dir)
+
+ apt_install('ceph-common', fatal=True)
+
+
+def rbd_exists(service, pool, rbd_img):
+ """Check to see if a RADOS block device exists."""
+ try:
+ out = check_output(['rbd', 'list', '--id',
+ service, '--pool', pool]).decode('UTF-8')
+ except CalledProcessError:
+ return False
+
+ return rbd_img in out
+
+
+def create_rbd_image(service, pool, image, sizemb):
+ """Create a new RADOS block device."""
+ cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
+ '--pool', pool]
+ check_call(cmd)
+
+
+def update_pool(client, pool, settings):
+ cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
+ for k, v in six.iteritems(settings):
+ cmd.append(k)
+ cmd.append(v)
+
+ check_call(cmd)
+
+
+def create_pool(service, name, replicas=3, pg_num=None):
+ """Create a new RADOS pool."""
+ if pool_exists(service, name):
+ log("Ceph pool {} already exists, skipping creation".format(name),
+ level=WARNING)
+ return
+
+ if not pg_num:
+ # Calculate the number of placement groups based
+ # on upstream recommended best practices.
+ osds = get_osds(service)
+ if osds:
+ pg_num = (len(osds) * 100 // replicas)
+ else:
+ # NOTE(james-page): Default to 200 for older ceph versions
+ # which don't support OSD query from cli
+ pg_num = 200
+
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
+ check_call(cmd)
+
+ update_pool(service, name, settings={'size': str(replicas)})
+
+
+def delete_pool(service, name):
+ """Delete a RADOS pool from ceph."""
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
+ '--yes-i-really-really-mean-it']
+ check_call(cmd)
+
+
+def _keyfile_path(service):
+ return KEYFILE.format(service)
+
+
+def _keyring_path(service):
+ return KEYRING.format(service)
+
+
+def create_keyring(service, key):
+ """Create a new Ceph keyring containing key."""
+ keyring = _keyring_path(service)
+ if os.path.exists(keyring):
+ log('Ceph keyring exists at %s.' % keyring, level=WARNING)
+ return
+
+ cmd = ['ceph-authtool', keyring, '--create-keyring',
+ '--name=client.{}'.format(service), '--add-key={}'.format(key)]
+ check_call(cmd)
+ log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
+
+
+def delete_keyring(service):
+ """Delete an existing Ceph keyring."""
+ keyring = _keyring_path(service)
+ if not os.path.exists(keyring):
+ log('Keyring does not exist at %s' % keyring, level=WARNING)
+ return
+
+ os.remove(keyring)
+ log('Deleted ring at %s.' % keyring, level=INFO)
+
+
+def create_key_file(service, key):
+ """Create a file containing key."""
+ keyfile = _keyfile_path(service)
+ if os.path.exists(keyfile):
+ log('Keyfile exists at %s.' % keyfile, level=WARNING)
+ return
+
+ with open(keyfile, 'w') as fd:
+ fd.write(key)
+
+ log('Created new keyfile at %s.' % keyfile, level=INFO)
+
+
+def get_ceph_nodes(relation='ceph'):
+ """Query named relation to determine current nodes."""
+ hosts = []
+ for r_id in relation_ids(relation):
+ for unit in related_units(r_id):
+ hosts.append(relation_get('private-address', unit=unit, rid=r_id))
+
+ return hosts
+
+
+def configure(service, key, auth, use_syslog):
+ """Perform basic configuration of Ceph."""
+ create_keyring(service, key)
+ create_key_file(service, key)
+ hosts = get_ceph_nodes()
+ with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
+ ceph_conf.write(CEPH_CONF.format(auth=auth,
+ keyring=_keyring_path(service),
+ mon_hosts=",".join(map(str, hosts)),
+ use_syslog=use_syslog))
+ modprobe('rbd')
+
+
+def image_mapped(name):
+ """Determine whether a RADOS block device is mapped locally."""
+ try:
+ out = check_output(['rbd', 'showmapped']).decode('UTF-8')
+ except CalledProcessError:
+ return False
+
+ return name in out
+
+
+def map_block_storage(service, pool, image):
+ """Map a RADOS block device for local use."""
+ cmd = [
+ 'rbd',
+ 'map',
+ '{}/{}'.format(pool, image),
+ '--user',
+ service,
+ '--secret',
+ _keyfile_path(service),
+ ]
+ check_call(cmd)
+
+
+def filesystem_mounted(fs):
+ """Determine whether a filesytems is already mounted."""
+ return fs in [f for f, m in mounts()]
+
+
+def make_filesystem(blk_device, fstype='ext4', timeout=10):
+ """Make a new filesystem on the specified block device."""
+ count = 0
+ e_noent = os.errno.ENOENT
+ while not os.path.exists(blk_device):
+ if count >= timeout:
+ log('Gave up waiting on block device %s' % blk_device,
+ level=ERROR)
+ raise IOError(e_noent, os.strerror(e_noent), blk_device)
+
+ log('Waiting for block device %s to appear' % blk_device,
+ level=DEBUG)
+ count += 1
+ time.sleep(1)
+ else:
+ log('Formatting block device %s as filesystem %s.' %
+ (blk_device, fstype), level=INFO)
+ check_call(['mkfs', '-t', fstype, blk_device])
+
+
+def place_data_on_block_device(blk_device, data_src_dst):
+ """Migrate data in data_src_dst to blk_device and then remount."""
+ # mount block device into /mnt
+ mount(blk_device, '/mnt')
+ # copy data to /mnt
+ copy_files(data_src_dst, '/mnt')
+ # umount block device
+ umount('/mnt')
+ # Grab user/group ID's from original source
+ _dir = os.stat(data_src_dst)
+ uid = _dir.st_uid
+ gid = _dir.st_gid
+ # re-mount where the data should originally be
+ # TODO: persist is currently a NO-OP in core.host
+ mount(blk_device, data_src_dst, persist=True)
+ # ensure original ownership of new mount.
+ os.chown(data_src_dst, uid, gid)
+
+
+def copy_files(src, dst, symlinks=False, ignore=None):
+ """Copy files from src to dst."""
+ for item in os.listdir(src):
+ s = os.path.join(src, item)
+ d = os.path.join(dst, item)
+ if os.path.isdir(s):
+ shutil.copytree(s, d, symlinks, ignore)
+ else:
+ shutil.copy2(s, d)
+
+
+def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
+ blk_device, fstype, system_services=[],
+ replicas=3):
+ """NOTE: This function must only be called from a single service unit for
+ the same rbd_img otherwise data loss will occur.
+
+ Ensures given pool and RBD image exists, is mapped to a block device,
+ and the device is formatted and mounted at the given mount_point.
+
+ If formatting a device for the first time, data existing at mount_point
+ will be migrated to the RBD device before being re-mounted.
+
+ All services listed in system_services will be stopped prior to data
+ migration and restarted when complete.
+ """
+ # Ensure pool, RBD image, RBD mappings are in place.
+ if not pool_exists(service, pool):
+ log('Creating new pool {}.'.format(pool), level=INFO)
+ create_pool(service, pool, replicas=replicas)
+
+ if not rbd_exists(service, pool, rbd_img):
+ log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
+ create_rbd_image(service, pool, rbd_img, sizemb)
+
+ if not image_mapped(rbd_img):
+ log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
+ level=INFO)
+ map_block_storage(service, pool, rbd_img)
+
+ # make file system
+ # TODO: What happens if for whatever reason this is run again and
+ # the data is already in the rbd device and/or is mounted??
+ # When it is mounted already, it will fail to make the fs
+ # XXX: This is really sketchy! Need to at least add an fstab entry
+ # otherwise this hook will blow away existing data if its executed
+ # after a reboot.
+ if not filesystem_mounted(mount_point):
+ make_filesystem(blk_device, fstype)
+
+ for svc in system_services:
+ if service_running(svc):
+ log('Stopping services {} prior to migrating data.'
+ .format(svc), level=DEBUG)
+ service_stop(svc)
+
+ place_data_on_block_device(blk_device, mount_point)
+
+ for svc in system_services:
+ log('Starting service {} after migrating data.'
+ .format(svc), level=DEBUG)
+ service_start(svc)
+
+
+def ensure_ceph_keyring(service, user=None, group=None,
+ relation='ceph', key=None):
+ """Ensures a ceph keyring is created for a named service and optionally
+ ensures user and group ownership.
+
+ @returns boolean: Flag to indicate whether a key was successfully written
+ to disk based on either relation data or a supplied key
+ """
+ if not key:
+ for rid in relation_ids(relation):
+ for unit in related_units(rid):
+ key = relation_get('key', rid=rid, unit=unit)
+ if key:
+ break
+
+ if not key:
+ return False
+
+ create_keyring(service=service, key=key)
+ keyring = _keyring_path(service)
+ if user and group:
+ check_call(['chown', '%s.%s' % (user, group), keyring])
+
+ return True
+
+
+def ceph_version():
+ """Retrieve the local version of ceph."""
+ if os.path.exists('/usr/bin/ceph'):
+ cmd = ['ceph', '-v']
+ output = check_output(cmd).decode('US-ASCII')
+ output = output.split()
+ if len(output) > 3:
+ return output[2]
+ else:
+ return None
+ else:
+ return None
+
+
+class CephBrokerRq(object):
+ """Ceph broker request.
+
+ Multiple operations can be added to a request and sent to the Ceph broker
+ to be executed.
+
+ Request is json-encoded for sending over the wire.
+
+ The API is versioned and defaults to version 1.
+ """
+
+ def __init__(self, api_version=1, request_id=None):
+ self.api_version = api_version
+ if request_id:
+ self.request_id = request_id
+ else:
+ self.request_id = str(uuid.uuid1())
+ self.ops = []
+
+ def add_op_request_access_to_group(self, name, namespace=None,
+ permission=None, key_name=None):
+ """
+ Adds the requested permissions to the current service's Ceph key,
+ allowing the key to access only the specified pools
+ """
+ self.ops.append({'op': 'add-permissions-to-key', 'group': name,
+ 'namespace': namespace, 'name': key_name or service_name(),
+ 'group-permission': permission})
+
+ def add_op_create_pool(self, name, replica_count=3, pg_num=None,
+ weight=None, group=None, namespace=None):
+ """Adds an operation to create a pool.
+
+ @param pg_num setting: optional setting. If not provided, this value
+ will be calculated by the broker based on how many OSDs are in the
+ cluster at the time of creation. Note that, if provided, this value
+ will be capped at the current available maximum.
+ @param weight: the percentage of data the pool makes up
+ """
+ if pg_num and weight:
+ raise ValueError('pg_num and weight are mutually exclusive')
+
+ self.ops.append({'op': 'create-pool', 'name': name,
+ 'replicas': replica_count, 'pg_num': pg_num,
+ 'weight': weight, 'group': group,
+ 'group-namespace': namespace})
+
+ def set_ops(self, ops):
+ """Set request ops to provided value.
+
+ Useful for injecting ops that come from a previous request
+ to allow comparisons to ensure validity.
+ """
+ self.ops = ops
+
+ @property
+ def request(self):
+ return json.dumps({'api-version': self.api_version, 'ops': self.ops,
+ 'request-id': self.request_id})
+
+ def _ops_equal(self, other):
+ if len(self.ops) == len(other.ops):
+ for req_no in range(0, len(self.ops)):
+ for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
+ if self.ops[req_no].get(key) != other.ops[req_no].get(key):
+ return False
+ else:
+ return False
+ return True
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return False
+ if self.api_version == other.api_version and \
+ self._ops_equal(other):
+ return True
+ else:
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class CephBrokerRsp(object):
+ """Ceph broker response.
+
+ Response is json-decoded and contents provided as methods/properties.
+
+ The API is versioned and defaults to version 1.
+ """
+
+ def __init__(self, encoded_rsp):
+ self.api_version = None
+ self.rsp = json.loads(encoded_rsp)
+
+ @property
+ def request_id(self):
+ return self.rsp.get('request-id')
+
+ @property
+ def exit_code(self):
+ return self.rsp.get('exit-code')
+
+ @property
+ def exit_msg(self):
+ return self.rsp.get('stderr')
+
+
+# Ceph Broker Conversation:
+# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
+# and send that request to ceph via the ceph relation. The CephBrokerRq has a
+# unique id so that the client can identity which CephBrokerRsp is associated
+# with the request. Ceph will also respond to each client unit individually
+# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
+# via key broker-rsp-glance-0
+#
+# To use this the charm can just do something like:
+#
+# from charmhelpers.contrib.storage.linux.ceph import (
+# send_request_if_needed,
+# is_request_complete,
+# CephBrokerRq,
+# )
+#
+# @hooks.hook('ceph-relation-changed')
+# def ceph_changed():
+# rq = CephBrokerRq()
+# rq.add_op_create_pool(name='poolname', replica_count=3)
+#
+# if is_request_complete(rq):
+#
+# else:
+# send_request_if_needed(get_ceph_request())
+#
+# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
+# of glance having sent a request to ceph which ceph has successfully processed
+# 'ceph:8': {
+# 'ceph/0': {
+# 'auth': 'cephx',
+# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
+# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
+# 'ceph-public-address': '10.5.44.103',
+# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
+# 'private-address': '10.5.44.103',
+# },
+# 'glance/0': {
+# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
+# '"ops": [{"replicas": 3, "name": "glance", '
+# '"op": "create-pool"}]}'),
+# 'private-address': '10.5.44.109',
+# },
+# }
+
+def get_previous_request(rid):
+ """Return the last ceph broker request sent on a given relation
+
+ @param rid: Relation id to query for request
+ """
+ request = None
+ broker_req = relation_get(attribute='broker_req', rid=rid,
+ unit=local_unit())
+ if broker_req:
+ request_data = json.loads(broker_req)
+ request = CephBrokerRq(api_version=request_data['api-version'],
+ request_id=request_data['request-id'])
+ request.set_ops(request_data['ops'])
+
+ return request
+
+
+def get_request_states(request, relation='ceph'):
+ """Return a dict of requests per relation id with their corresponding
+ completion state.
+
+ This allows a charm, which has a request for ceph, to see whether there is
+ an equivalent request already being processed and if so what state that
+ request is in.
+
+ @param request: A CephBrokerRq object
+ """
+ complete = []
+ requests = {}
+ for rid in relation_ids(relation):
+ complete = False
+ previous_request = get_previous_request(rid)
+ if request == previous_request:
+ sent = True
+ complete = is_request_complete_for_rid(previous_request, rid)
+ else:
+ sent = False
+ complete = False
+
+ requests[rid] = {
+ 'sent': sent,
+ 'complete': complete,
+ }
+
+ return requests
+
+
+def is_request_sent(request, relation='ceph'):
+ """Check to see if a functionally equivalent request has already been sent
+
+ Returns True if a similair request has been sent
+
+ @param request: A CephBrokerRq object
+ """
+ states = get_request_states(request, relation=relation)
+ for rid in states.keys():
+ if not states[rid]['sent']:
+ return False
+
+ return True
+
+
+def is_request_complete(request, relation='ceph'):
+ """Check to see if a functionally equivalent request has already been
+ completed
+
+ Returns True if a similair request has been completed
+
+ @param request: A CephBrokerRq object
+ """
+ states = get_request_states(request, relation=relation)
+ for rid in states.keys():
+ if not states[rid]['complete']:
+ return False
+
+ return True
+
+
+def is_request_complete_for_rid(request, rid):
+ """Check if a given request has been completed on the given relation
+
+ @param request: A CephBrokerRq object
+ @param rid: Relation ID
+ """
+ broker_key = get_broker_rsp_key()
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ if rdata.get(broker_key):
+ rsp = CephBrokerRsp(rdata.get(broker_key))
+ if rsp.request_id == request.request_id:
+ if not rsp.exit_code:
+ return True
+ else:
+ # The remote unit sent no reply targeted at this unit so either the
+ # remote ceph cluster does not support unit targeted replies or it
+ # has not processed our request yet.
+ if rdata.get('broker_rsp'):
+ request_data = json.loads(rdata['broker_rsp'])
+ if request_data.get('request-id'):
+ log('Ignoring legacy broker_rsp without unit key as remote '
+ 'service supports unit specific replies', level=DEBUG)
+ else:
+ log('Using legacy broker_rsp as remote service does not '
+ 'supports unit specific replies', level=DEBUG)
+ rsp = CephBrokerRsp(rdata['broker_rsp'])
+ if not rsp.exit_code:
+ return True
+
+ return False
+
+
+def get_broker_rsp_key():
+ """Return broker response key for this unit
+
+ This is the key that ceph is going to use to pass request status
+ information back to this unit
+ """
+ return 'broker-rsp-' + local_unit().replace('/', '-')
+
+
+def send_request_if_needed(request, relation='ceph'):
+ """Send broker request if an equivalent request has not already been sent
+
+ @param request: A CephBrokerRq object
+ """
+ if is_request_sent(request, relation=relation):
+ log('Request already sent but not complete, not sending new request',
+ level=DEBUG)
+ else:
+ for rid in relation_ids(relation):
+ log('Sending request {}'.format(request.request_id), level=DEBUG)
+ relation_set(relation_id=rid, broker_req=request.request)
+
+
+class CephConfContext(object):
+ """Ceph config (ceph.conf) context.
+
+ Supports user-provided Ceph configuration settings. Use can provide a
+ dictionary as the value for the config-flags charm option containing
+ Ceph configuration settings keyede by their section in ceph.conf.
+ """
+ def __init__(self, permitted_sections=None):
+ self.permitted_sections = permitted_sections or []
+
+ def __call__(self):
+ conf = config('config-flags')
+ if not conf:
+ return {}
+
+ conf = config_flags_parser(conf)
+ if type(conf) != dict:
+ log("Provided config-flags is not a dictionary - ignoring",
+ level=WARNING)
+ return {}
+
+ permitted = self.permitted_sections
+ if permitted:
+ diff = set(conf.keys()).difference(set(permitted))
+ if diff:
+ log("Config-flags contains invalid keys '%s' - they will be "
+ "ignored" % (', '.join(diff)), level=WARNING)
+
+ ceph_conf = {}
+ for key in conf:
+ if permitted and key not in permitted:
+ log("Ignoring key '%s'" % key, level=WARNING)
+ continue
+
+ ceph_conf[key] = conf[key]
+
+ return ceph_conf
diff --git a/tests/charmhelpers/contrib/storage/linux/loopback.py b/tests/charmhelpers/contrib/storage/linux/loopback.py
new file mode 100644
index 00000000..1d6ae6f0
--- /dev/null
+++ b/tests/charmhelpers/contrib/storage/linux/loopback.py
@@ -0,0 +1,86 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+from subprocess import (
+ check_call,
+ check_output,
+)
+
+import six
+
+
+##################################################
+# loopback device helpers.
+##################################################
+def loopback_devices():
+ '''
+ Parse through 'losetup -a' output to determine currently mapped
+ loopback devices. Output is expected to look like:
+
+ /dev/loop0: [0807]:961814 (/tmp/my.img)
+
+ :returns: dict: a dict mapping {loopback_dev: backing_file}
+ '''
+ loopbacks = {}
+ cmd = ['losetup', '-a']
+ devs = [d.strip().split(' ') for d in
+ check_output(cmd).splitlines() if d != '']
+ for dev, _, f in devs:
+ loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
+ return loopbacks
+
+
+def create_loopback(file_path):
+ '''
+ Create a loopback device for a given backing file.
+
+ :returns: str: Full path to new loopback device (eg, /dev/loop0)
+ '''
+ file_path = os.path.abspath(file_path)
+ check_call(['losetup', '--find', file_path])
+ for d, f in six.iteritems(loopback_devices()):
+ if f == file_path:
+ return d
+
+
+def ensure_loopback_device(path, size):
+ '''
+ Ensure a loopback device exists for a given backing file path and size.
+ If it a loopback device is not mapped to file, a new one will be created.
+
+ TODO: Confirm size of found loopback device.
+
+ :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
+ '''
+ for d, f in six.iteritems(loopback_devices()):
+ if f == path:
+ return d
+
+ if not os.path.exists(path):
+ cmd = ['truncate', '--size', size, path]
+ check_call(cmd)
+
+ return create_loopback(path)
+
+
+def is_mapped_loopback_device(device):
+ """
+ Checks if a given device name is an existing/mapped loopback device.
+ :param device: str: Full path to the device (eg, /dev/loop1).
+ :returns: str: Path to the backing file if is a loopback device
+ empty string otherwise
+ """
+ return loopback_devices().get(device, "")
diff --git a/tests/charmhelpers/contrib/storage/linux/lvm.py b/tests/charmhelpers/contrib/storage/linux/lvm.py
new file mode 100644
index 00000000..4719f53c
--- /dev/null
+++ b/tests/charmhelpers/contrib/storage/linux/lvm.py
@@ -0,0 +1,103 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from subprocess import (
+ CalledProcessError,
+ check_call,
+ check_output,
+ Popen,
+ PIPE,
+)
+
+
+##################################################
+# LVM helpers.
+##################################################
+def deactivate_lvm_volume_group(block_device):
+ '''
+ Deactivate any volume gruop associated with an LVM physical volume.
+
+ :param block_device: str: Full path to LVM physical volume
+ '''
+ vg = list_lvm_volume_group(block_device)
+ if vg:
+ cmd = ['vgchange', '-an', vg]
+ check_call(cmd)
+
+
+def is_lvm_physical_volume(block_device):
+ '''
+ Determine whether a block device is initialized as an LVM PV.
+
+ :param block_device: str: Full path of block device to inspect.
+
+ :returns: boolean: True if block device is a PV, False if not.
+ '''
+ try:
+ check_output(['pvdisplay', block_device])
+ return True
+ except CalledProcessError:
+ return False
+
+
+def remove_lvm_physical_volume(block_device):
+ '''
+ Remove LVM PV signatures from a given block device.
+
+ :param block_device: str: Full path of block device to scrub.
+ '''
+ p = Popen(['pvremove', '-ff', block_device],
+ stdin=PIPE)
+ p.communicate(input='y\n')
+
+
+def list_lvm_volume_group(block_device):
+ '''
+ List LVM volume group associated with a given block device.
+
+ Assumes block device is a valid LVM PV.
+
+ :param block_device: str: Full path of block device to inspect.
+
+ :returns: str: Name of volume group associated with block device or None
+ '''
+ vg = None
+ pvd = check_output(['pvdisplay', block_device]).splitlines()
+ for l in pvd:
+ l = l.decode('UTF-8')
+ if l.strip().startswith('VG Name'):
+ vg = ' '.join(l.strip().split()[2:])
+ return vg
+
+
+def create_lvm_physical_volume(block_device):
+ '''
+ Initialize a block device as an LVM physical volume.
+
+ :param block_device: str: Full path of block device to initialize.
+
+ '''
+ check_call(['pvcreate', block_device])
+
+
+def create_lvm_volume_group(volume_group, block_device):
+ '''
+ Create an LVM volume group backed by a given block device.
+
+ Assumes block device has already been initialized as an LVM PV.
+
+ :param volume_group: str: Name of volume group to create.
+ :block_device: str: Full path of PV-initialized block device.
+ '''
+ check_call(['vgcreate', volume_group, block_device])
diff --git a/tests/charmhelpers/contrib/storage/linux/utils.py b/tests/charmhelpers/contrib/storage/linux/utils.py
new file mode 100644
index 00000000..3dc0df68
--- /dev/null
+++ b/tests/charmhelpers/contrib/storage/linux/utils.py
@@ -0,0 +1,69 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+from stat import S_ISBLK
+
+from subprocess import (
+ check_call,
+ check_output,
+ call
+)
+
+
+def is_block_device(path):
+ '''
+ Confirm device at path is a valid block device node.
+
+ :returns: boolean: True if path is a block device, False if not.
+ '''
+ if not os.path.exists(path):
+ return False
+ return S_ISBLK(os.stat(path).st_mode)
+
+
+def zap_disk(block_device):
+ '''
+ Clear a block device of partition table. Relies on sgdisk, which is
+ installed as pat of the 'gdisk' package in Ubuntu.
+
+ :param block_device: str: Full path of block device to clean.
+ '''
+ # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
+ # sometimes sgdisk exits non-zero; this is OK, dd will clean up
+ call(['sgdisk', '--zap-all', '--', block_device])
+ call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
+ dev_end = check_output(['blockdev', '--getsz',
+ block_device]).decode('UTF-8')
+ gpt_end = int(dev_end.split()[0]) - 100
+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
+ 'bs=1M', 'count=1'])
+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
+ 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
+
+
+def is_device_mounted(device):
+ '''Given a device path, return True if that device is mounted, and False
+ if it isn't.
+
+ :param device: str: Full path of the device to check.
+ :returns: boolean: True if the path represents a mounted device, False if
+ it doesn't.
+ '''
+ try:
+ out = check_output(['lsblk', '-P', device]).decode('UTF-8')
+ except:
+ return False
+ return bool(re.search(r'MOUNTPOINT=".+"', out))
diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py
index 05edfa50..0ee5cb9f 100644
--- a/tests/charmhelpers/core/host.py
+++ b/tests/charmhelpers/core/host.py
@@ -45,6 +45,7 @@ if __platform__ == "ubuntu":
add_new_group,
lsb_release,
cmp_pkgrevno,
+ CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import (
@@ -52,6 +53,7 @@ elif __platform__ == "centos":
add_new_group,
lsb_release,
cmp_pkgrevno,
+ CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
UPDATEDB_PATH = '/etc/updatedb.conf'
diff --git a/tests/charmhelpers/core/host_factory/centos.py b/tests/charmhelpers/core/host_factory/centos.py
index 902d469f..7781a396 100644
--- a/tests/charmhelpers/core/host_factory/centos.py
+++ b/tests/charmhelpers/core/host_factory/centos.py
@@ -2,6 +2,22 @@ import subprocess
import yum
import os
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+class CompareHostReleases(BasicStringComparator):
+ """Provide comparisons of Host releases.
+
+ Use in the form of
+
+ if CompareHostReleases(release) > 'trusty':
+ # do something with mitaka
+ """
+
+ def __init__(self, item):
+ raise NotImplementedError(
+ "CompareHostReleases() is not implemented for CentOS")
+
def service_available(service_name):
# """Determine whether a system service is available."""
diff --git a/tests/charmhelpers/core/host_factory/ubuntu.py b/tests/charmhelpers/core/host_factory/ubuntu.py
index 8c66af55..0448288c 100644
--- a/tests/charmhelpers/core/host_factory/ubuntu.py
+++ b/tests/charmhelpers/core/host_factory/ubuntu.py
@@ -1,5 +1,37 @@
import subprocess
+from charmhelpers.core.strutils import BasicStringComparator
+
+
+UBUNTU_RELEASES = (
+ 'lucid',
+ 'maverick',
+ 'natty',
+ 'oneiric',
+ 'precise',
+ 'quantal',
+ 'raring',
+ 'saucy',
+ 'trusty',
+ 'utopic',
+ 'vivid',
+ 'wily',
+ 'xenial',
+ 'yakkety',
+ 'zesty',
+)
+
+
+class CompareHostReleases(BasicStringComparator):
+ """Provide comparisons of Ubuntu releases.
+
+ Use in the form of
+
+ if CompareHostReleases(release) > 'trusty':
+ # do something with mitaka
+ """
+ _list = UBUNTU_RELEASES
+
def service_available(service_name):
"""Determine whether a system service is available"""
diff --git a/tests/charmhelpers/core/strutils.py b/tests/charmhelpers/core/strutils.py
index dd9b9717..685dabde 100644
--- a/tests/charmhelpers/core/strutils.py
+++ b/tests/charmhelpers/core/strutils.py
@@ -68,3 +68,56 @@ def bytes_from_string(value):
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+
+
+class BasicStringComparator(object):
+ """Provides a class that will compare strings from an iterator type object.
+ Used to provide > and < comparisons on strings that may not necessarily be
+ alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
+ z-wrap.
+ """
+
+ _list = None
+
+ def __init__(self, item):
+ if self._list is None:
+ raise Exception("Must define the _list in the class definition!")
+ try:
+ self.index = self._list.index(item)
+ except Exception:
+ raise KeyError("Item '{}' is not in list '{}'"
+ .format(item, self._list))
+
+ def __eq__(self, other):
+ assert isinstance(other, str) or isinstance(other, self.__class__)
+ return self.index == self._list.index(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ assert isinstance(other, str) or isinstance(other, self.__class__)
+ return self.index < self._list.index(other)
+
+ def __ge__(self, other):
+ return not self.__lt__(other)
+
+ def __gt__(self, other):
+ assert isinstance(other, str) or isinstance(other, self.__class__)
+ return self.index > self._list.index(other)
+
+ def __le__(self, other):
+ return not self.__gt__(other)
+
+ def __str__(self):
+ """Always give back the item at the index so it can be used in
+ comparisons like:
+
+ s_mitaka = CompareOpenStack('mitaka')
+ s_newton = CompareOpenstack('newton')
+
+ assert s_newton > s_mitaka
+
+ @returns:
+ """
+ return self._list[self.index]
diff --git a/tests/charmhelpers/fetch/__init__.py b/tests/charmhelpers/fetch/__init__.py
new file mode 100644
index 00000000..ec5e0fe9
--- /dev/null
+++ b/tests/charmhelpers/fetch/__init__.py
@@ -0,0 +1,197 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib
+from charmhelpers.osplatform import get_platform
+from yaml import safe_load
+from charmhelpers.core.hookenv import (
+ config,
+ log,
+)
+
+import six
+if six.PY3:
+ from urllib.parse import urlparse, urlunparse
+else:
+ from urlparse import urlparse, urlunparse
+
+
+# The order of this list is very important. Handlers should be listed in from
+# least- to most-specific URL matching.
+FETCH_HANDLERS = (
+ 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
+ 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
+ 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
+)
+
+
+class SourceConfigError(Exception):
+ pass
+
+
+class UnhandledSource(Exception):
+ pass
+
+
+class AptLockError(Exception):
+ pass
+
+
+class BaseFetchHandler(object):
+
+ """Base class for FetchHandler implementations in fetch plugins"""
+
+ def can_handle(self, source):
+ """Returns True if the source can be handled. Otherwise returns
+ a string explaining why it cannot"""
+ return "Wrong source type"
+
+ def install(self, source):
+ """Try to download and unpack the source. Return the path to the
+ unpacked files or raise UnhandledSource."""
+ raise UnhandledSource("Wrong source type {}".format(source))
+
+ def parse_url(self, url):
+ return urlparse(url)
+
+ def base_url(self, url):
+ """Return url without querystring or fragment"""
+ parts = list(self.parse_url(url))
+ parts[4:] = ['' for i in parts[4:]]
+ return urlunparse(parts)
+
+
+__platform__ = get_platform()
+module = "charmhelpers.fetch.%s" % __platform__
+fetch = importlib.import_module(module)
+
+filter_installed_packages = fetch.filter_installed_packages
+install = fetch.install
+upgrade = fetch.upgrade
+update = fetch.update
+purge = fetch.purge
+add_source = fetch.add_source
+
+if __platform__ == "ubuntu":
+ apt_cache = fetch.apt_cache
+ apt_install = fetch.install
+ apt_update = fetch.update
+ apt_upgrade = fetch.upgrade
+ apt_purge = fetch.purge
+ apt_mark = fetch.apt_mark
+ apt_hold = fetch.apt_hold
+ apt_unhold = fetch.apt_unhold
+ get_upstream_version = fetch.get_upstream_version
+elif __platform__ == "centos":
+ yum_search = fetch.yum_search
+
+
+def configure_sources(update=False,
+ sources_var='install_sources',
+ keys_var='install_keys'):
+ """Configure multiple sources from charm configuration.
+
+ The lists are encoded as yaml fragments in the configuration.
+ The fragment needs to be included as a string. Sources and their
+ corresponding keys are of the types supported by add_source().
+
+ Example config:
+ install_sources: |
+ - "ppa:foo"
+ - "http://example.com/repo precise main"
+ install_keys: |
+ - null
+ - "a1b2c3d4"
+
+ Note that 'null' (a.k.a. None) should not be quoted.
+ """
+ sources = safe_load((config(sources_var) or '').strip()) or []
+ keys = safe_load((config(keys_var) or '').strip()) or None
+
+ if isinstance(sources, six.string_types):
+ sources = [sources]
+
+ if keys is None:
+ for source in sources:
+ add_source(source, None)
+ else:
+ if isinstance(keys, six.string_types):
+ keys = [keys]
+
+ if len(sources) != len(keys):
+ raise SourceConfigError(
+ 'Install sources and keys lists are different lengths')
+ for source, key in zip(sources, keys):
+ add_source(source, key)
+ if update:
+ fetch.update(fatal=True)
+
+
+def install_remote(source, *args, **kwargs):
+ """Install a file tree from a remote source.
+
+ The specified source should be a url of the form:
+ scheme://[host]/path[#[option=value][&...]]
+
+ Schemes supported are based on this modules submodules.
+ Options supported are submodule-specific.
+ Additional arguments are passed through to the submodule.
+
+ For example::
+
+ dest = install_remote('http://example.com/archive.tgz',
+ checksum='deadbeef',
+ hash_type='sha1')
+
+ This will download `archive.tgz`, validate it using SHA1 and, if
+ the file is ok, extract it and return the directory in which it
+ was extracted. If the checksum fails, it will raise
+ :class:`charmhelpers.core.host.ChecksumError`.
+ """
+ # We ONLY check for True here because can_handle may return a string
+ # explaining why it can't handle a given source.
+ handlers = [h for h in plugins() if h.can_handle(source) is True]
+ for handler in handlers:
+ try:
+ return handler.install(source, *args, **kwargs)
+ except UnhandledSource as e:
+ log('Install source attempt unsuccessful: {}'.format(e),
+ level='WARNING')
+ raise UnhandledSource("No handler found for source {}".format(source))
+
+
+def install_from_config(config_var_name):
+ """Install a file from config."""
+ charm_config = config()
+ source = charm_config[config_var_name]
+ return install_remote(source)
+
+
+def plugins(fetch_handlers=None):
+ if not fetch_handlers:
+ fetch_handlers = FETCH_HANDLERS
+ plugin_list = []
+ for handler_name in fetch_handlers:
+ package, classname = handler_name.rsplit('.', 1)
+ try:
+ handler_class = getattr(
+ importlib.import_module(package),
+ classname)
+ plugin_list.append(handler_class())
+ except NotImplementedError:
+ # Skip missing plugins so that they can be ommitted from
+ # installation if desired
+ log("FetchHandler {} not found, skipping plugin".format(
+ handler_name))
+ return plugin_list
diff --git a/tests/charmhelpers/fetch/archiveurl.py b/tests/charmhelpers/fetch/archiveurl.py
new file mode 100644
index 00000000..dd24f9ec
--- /dev/null
+++ b/tests/charmhelpers/fetch/archiveurl.py
@@ -0,0 +1,165 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import hashlib
+import re
+
+from charmhelpers.fetch import (
+ BaseFetchHandler,
+ UnhandledSource
+)
+from charmhelpers.payload.archive import (
+ get_archive_handler,
+ extract,
+)
+from charmhelpers.core.host import mkdir, check_hash
+
+import six
+if six.PY3:
+ from urllib.request import (
+ build_opener, install_opener, urlopen, urlretrieve,
+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+ )
+ from urllib.parse import urlparse, urlunparse, parse_qs
+ from urllib.error import URLError
+else:
+ from urllib import urlretrieve
+ from urllib2 import (
+ build_opener, install_opener, urlopen,
+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
+ URLError
+ )
+ from urlparse import urlparse, urlunparse, parse_qs
+
+
+def splituser(host):
+ '''urllib.splituser(), but six's support of this seems broken'''
+ _userprog = re.compile('^(.*)@(.*)$')
+ match = _userprog.match(host)
+ if match:
+ return match.group(1, 2)
+ return None, host
+
+
+def splitpasswd(user):
+ '''urllib.splitpasswd(), but six's support of this is missing'''
+ _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
+ match = _passwdprog.match(user)
+ if match:
+ return match.group(1, 2)
+ return user, None
+
+
+class ArchiveUrlFetchHandler(BaseFetchHandler):
+ """
+ Handler to download archive files from arbitrary URLs.
+
+ Can fetch from http, https, ftp, and file URLs.
+
+ Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
+
+ Installs the contents of the archive in $CHARM_DIR/fetched/.
+ """
+ def can_handle(self, source):
+ url_parts = self.parse_url(source)
+ if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
+ # XXX: Why is this returning a boolean and a string? It's
+ # doomed to fail since "bool(can_handle('foo://'))" will be True.
+ return "Wrong source type"
+ if get_archive_handler(self.base_url(source)):
+ return True
+ return False
+
+ def download(self, source, dest):
+ """
+ Download an archive file.
+
+ :param str source: URL pointing to an archive file.
+ :param str dest: Local path location to download archive file to.
+ """
+ # propogate all exceptions
+ # URLError, OSError, etc
+ proto, netloc, path, params, query, fragment = urlparse(source)
+ if proto in ('http', 'https'):
+ auth, barehost = splituser(netloc)
+ if auth is not None:
+ source = urlunparse((proto, barehost, path, params, query, fragment))
+ username, password = splitpasswd(auth)
+ passman = HTTPPasswordMgrWithDefaultRealm()
+ # Realm is set to None in add_password to force the username and password
+ # to be used whatever the realm
+ passman.add_password(None, source, username, password)
+ authhandler = HTTPBasicAuthHandler(passman)
+ opener = build_opener(authhandler)
+ install_opener(opener)
+ response = urlopen(source)
+ try:
+ with open(dest, 'wb') as dest_file:
+ dest_file.write(response.read())
+ except Exception as e:
+ if os.path.isfile(dest):
+ os.unlink(dest)
+ raise e
+
+ # Mandatory file validation via Sha1 or MD5 hashing.
+ def download_and_validate(self, url, hashsum, validate="sha1"):
+ tempfile, headers = urlretrieve(url)
+ check_hash(tempfile, hashsum, validate)
+ return tempfile
+
+ def install(self, source, dest=None, checksum=None, hash_type='sha1'):
+ """
+ Download and install an archive file, with optional checksum validation.
+
+ The checksum can also be given on the `source` URL's fragment.
+ For example::
+
+ handler.install('http://example.com/file.tgz#sha1=deadbeef')
+
+ :param str source: URL pointing to an archive file.
+ :param str dest: Local destination path to install to. If not given,
+ installs to `$CHARM_DIR/archives/archive_file_name`.
+ :param str checksum: If given, validate the archive file after download.
+ :param str hash_type: Algorithm used to generate `checksum`.
+ Can be any hash alrgorithm supported by :mod:`hashlib`,
+ such as md5, sha1, sha256, sha512, etc.
+
+ """
+ url_parts = self.parse_url(source)
+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
+ if not os.path.exists(dest_dir):
+ mkdir(dest_dir, perms=0o755)
+ dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
+ try:
+ self.download(source, dld_file)
+ except URLError as e:
+ raise UnhandledSource(e.reason)
+ except OSError as e:
+ raise UnhandledSource(e.strerror)
+ options = parse_qs(url_parts.fragment)
+ for key, value in options.items():
+ if not six.PY3:
+ algorithms = hashlib.algorithms
+ else:
+ algorithms = hashlib.algorithms_available
+ if key in algorithms:
+ if len(value) != 1:
+ raise TypeError(
+ "Expected 1 hash value, not %d" % len(value))
+ expected = value[0]
+ check_hash(dld_file, expected, key)
+ if checksum:
+ check_hash(dld_file, checksum, hash_type)
+ return extract(dld_file, dest)
diff --git a/tests/charmhelpers/fetch/bzrurl.py b/tests/charmhelpers/fetch/bzrurl.py
new file mode 100644
index 00000000..07cd0293
--- /dev/null
+++ b/tests/charmhelpers/fetch/bzrurl.py
@@ -0,0 +1,76 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from subprocess import check_call
+from charmhelpers.fetch import (
+ BaseFetchHandler,
+ UnhandledSource,
+ filter_installed_packages,
+ install,
+)
+from charmhelpers.core.host import mkdir
+
+
+if filter_installed_packages(['bzr']) != []:
+ install(['bzr'])
+ if filter_installed_packages(['bzr']) != []:
+ raise NotImplementedError('Unable to install bzr')
+
+
+class BzrUrlFetchHandler(BaseFetchHandler):
+ """Handler for bazaar branches via generic and lp URLs."""
+
+ def can_handle(self, source):
+ url_parts = self.parse_url(source)
+ if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
+ return False
+ elif not url_parts.scheme:
+ return os.path.exists(os.path.join(source, '.bzr'))
+ else:
+ return True
+
+ def branch(self, source, dest, revno=None):
+ if not self.can_handle(source):
+ raise UnhandledSource("Cannot handle {}".format(source))
+ cmd_opts = []
+ if revno:
+ cmd_opts += ['-r', str(revno)]
+ if os.path.exists(dest):
+ cmd = ['bzr', 'pull']
+ cmd += cmd_opts
+ cmd += ['--overwrite', '-d', dest, source]
+ else:
+ cmd = ['bzr', 'branch']
+ cmd += cmd_opts
+ cmd += [source, dest]
+ check_call(cmd)
+
+ def install(self, source, dest=None, revno=None):
+ url_parts = self.parse_url(source)
+ branch_name = url_parts.path.strip("/").split("/")[-1]
+ if dest:
+ dest_dir = os.path.join(dest, branch_name)
+ else:
+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+ branch_name)
+
+ if dest and not os.path.exists(dest):
+ mkdir(dest, perms=0o755)
+
+ try:
+ self.branch(source, dest_dir, revno)
+ except OSError as e:
+ raise UnhandledSource(e.strerror)
+ return dest_dir
diff --git a/tests/charmhelpers/fetch/centos.py b/tests/charmhelpers/fetch/centos.py
new file mode 100644
index 00000000..604bbfb5
--- /dev/null
+++ b/tests/charmhelpers/fetch/centos.py
@@ -0,0 +1,171 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+import os
+import time
+import six
+import yum
+
+from tempfile import NamedTemporaryFile
+from charmhelpers.core.hookenv import log
+
+YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM.
+YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
+YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
+
+
+def filter_installed_packages(packages):
+ """Return a list of packages that require installation."""
+ yb = yum.YumBase()
+ package_list = yb.doPackageLists()
+ temp_cache = {p.base_package_name: 1 for p in package_list['installed']}
+
+ _pkgs = [p for p in packages if not temp_cache.get(p, False)]
+ return _pkgs
+
+
+def install(packages, options=None, fatal=False):
+ """Install one or more packages."""
+ cmd = ['yum', '--assumeyes']
+ if options is not None:
+ cmd.extend(options)
+ cmd.append('install')
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+ log("Installing {} with options: {}".format(packages,
+ options))
+ _run_yum_command(cmd, fatal)
+
+
+def upgrade(options=None, fatal=False, dist=False):
+ """Upgrade all packages."""
+ cmd = ['yum', '--assumeyes']
+ if options is not None:
+ cmd.extend(options)
+ cmd.append('upgrade')
+ log("Upgrading with options: {}".format(options))
+ _run_yum_command(cmd, fatal)
+
+
+def update(fatal=False):
+ """Update local yum cache."""
+ cmd = ['yum', '--assumeyes', 'update']
+ log("Update with fatal: {}".format(fatal))
+ _run_yum_command(cmd, fatal)
+
+
+def purge(packages, fatal=False):
+ """Purge one or more packages."""
+ cmd = ['yum', '--assumeyes', 'remove']
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+ log("Purging {}".format(packages))
+ _run_yum_command(cmd, fatal)
+
+
+def yum_search(packages):
+ """Search for a package."""
+ output = {}
+ cmd = ['yum', 'search']
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+ log("Searching for {}".format(packages))
+ result = subprocess.check_output(cmd)
+ for package in list(packages):
+ output[package] = package in result
+ return output
+
+
+def add_source(source, key=None):
+ """Add a package source to this system.
+
+ @param source: a URL with a rpm package
+
+ @param key: A key to be added to the system's keyring and used
+ to verify the signatures on packages. Ideally, this should be an
+ ASCII format GPG public key including the block headers. A GPG key
+ id may also be used, but be aware that only insecure protocols are
+ available to retrieve the actual public key from a public keyserver
+ placing your Juju environment at risk.
+ """
+ if source is None:
+ log('Source is not present. Skipping')
+ return
+
+ if source.startswith('http'):
+ directory = '/etc/yum.repos.d/'
+ for filename in os.listdir(directory):
+ with open(directory + filename, 'r') as rpm_file:
+ if source in rpm_file.read():
+ break
+ else:
+ log("Add source: {!r}".format(source))
+ # write in the charms.repo
+ with open(directory + 'Charms.repo', 'a') as rpm_file:
+ rpm_file.write('[%s]\n' % source[7:].replace('/', '_'))
+ rpm_file.write('name=%s\n' % source[7:])
+ rpm_file.write('baseurl=%s\n\n' % source)
+ else:
+ log("Unknown source: {!r}".format(source))
+
+ if key:
+ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
+ with NamedTemporaryFile('w+') as key_file:
+ key_file.write(key)
+ key_file.flush()
+ key_file.seek(0)
+ subprocess.check_call(['rpm', '--import', key_file])
+ else:
+ subprocess.check_call(['rpm', '--import', key])
+
+
+def _run_yum_command(cmd, fatal=False):
+ """Run an YUM command.
+
+ Checks the output and retry if the fatal flag is set to True.
+
+ :param: cmd: str: The yum command to run.
+ :param: fatal: bool: Whether the command's output should be checked and
+ retried.
+ """
+ env = os.environ.copy()
+
+ if fatal:
+ retry_count = 0
+ result = None
+
+ # If the command is considered "fatal", we need to retry if the yum
+ # lock was not acquired.
+
+ while result is None or result == YUM_NO_LOCK:
+ try:
+ result = subprocess.check_call(cmd, env=env)
+ except subprocess.CalledProcessError as e:
+ retry_count = retry_count + 1
+ if retry_count > YUM_NO_LOCK_RETRY_COUNT:
+ raise
+ result = e.returncode
+ log("Couldn't acquire YUM lock. Will retry in {} seconds."
+ "".format(YUM_NO_LOCK_RETRY_DELAY))
+ time.sleep(YUM_NO_LOCK_RETRY_DELAY)
+
+ else:
+ subprocess.call(cmd, env=env)
diff --git a/tests/charmhelpers/fetch/giturl.py b/tests/charmhelpers/fetch/giturl.py
new file mode 100644
index 00000000..4cf21bc2
--- /dev/null
+++ b/tests/charmhelpers/fetch/giturl.py
@@ -0,0 +1,69 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from subprocess import check_call, CalledProcessError
+from charmhelpers.fetch import (
+ BaseFetchHandler,
+ UnhandledSource,
+ filter_installed_packages,
+ install,
+)
+
+if filter_installed_packages(['git']) != []:
+ install(['git'])
+ if filter_installed_packages(['git']) != []:
+ raise NotImplementedError('Unable to install git')
+
+
+class GitUrlFetchHandler(BaseFetchHandler):
+ """Handler for git branches via generic and github URLs."""
+
+ def can_handle(self, source):
+ url_parts = self.parse_url(source)
+ # TODO (mattyw) no support for ssh git@ yet
+ if url_parts.scheme not in ('http', 'https', 'git', ''):
+ return False
+ elif not url_parts.scheme:
+ return os.path.exists(os.path.join(source, '.git'))
+ else:
+ return True
+
+ def clone(self, source, dest, branch="master", depth=None):
+ if not self.can_handle(source):
+ raise UnhandledSource("Cannot handle {}".format(source))
+
+ if os.path.exists(dest):
+ cmd = ['git', '-C', dest, 'pull', source, branch]
+ else:
+ cmd = ['git', 'clone', source, dest, '--branch', branch]
+ if depth:
+ cmd.extend(['--depth', depth])
+ check_call(cmd)
+
+ def install(self, source, branch="master", dest=None, depth=None):
+ url_parts = self.parse_url(source)
+ branch_name = url_parts.path.strip("/").split("/")[-1]
+ if dest:
+ dest_dir = os.path.join(dest, branch_name)
+ else:
+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+ branch_name)
+ try:
+ self.clone(source, dest_dir, branch, depth)
+ except CalledProcessError as e:
+ raise UnhandledSource(e)
+ except OSError as e:
+ raise UnhandledSource(e.strerror)
+ return dest_dir
diff --git a/tests/charmhelpers/fetch/snap.py b/tests/charmhelpers/fetch/snap.py
new file mode 100644
index 00000000..23c707b0
--- /dev/null
+++ b/tests/charmhelpers/fetch/snap.py
@@ -0,0 +1,122 @@
+# Copyright 2014-2017 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Charm helpers snap for classic charms.
+
+If writing reactive charms, use the snap layer:
+https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html
+"""
+import subprocess
+from os import environ
+from time import sleep
+from charmhelpers.core.hookenv import log
+
+__author__ = 'Joseph Borg '
+
+SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved).
+SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks.
+SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
+
+
+class CouldNotAcquireLockException(Exception):
+ pass
+
+
+def _snap_exec(commands):
+ """
+ Execute snap commands.
+
+ :param commands: List commands
+ :return: Integer exit code
+ """
+ assert type(commands) == list
+
+ retry_count = 0
+ return_code = None
+
+ while return_code is None or return_code == SNAP_NO_LOCK:
+ try:
+ return_code = subprocess.check_call(['snap'] + commands, env=environ)
+ except subprocess.CalledProcessError as e:
+ retry_count += + 1
+ if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
+ raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT)
+ return_code = e.returncode
+ log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN')
+ sleep(SNAP_NO_LOCK_RETRY_DELAY)
+
+ return return_code
+
+
+def snap_install(packages, *flags):
+ """
+ Install a snap package.
+
+ :param packages: String or List String package name
+ :param flags: List String flags to pass to install command
+ :return: Integer return code from snap
+ """
+ if type(packages) is not list:
+ packages = [packages]
+
+ flags = list(flags)
+
+ message = 'Installing snap(s) "%s"' % ', '.join(packages)
+ if flags:
+ message += ' with option(s) "%s"' % ', '.join(flags)
+
+ log(message, level='INFO')
+ return _snap_exec(['install'] + flags + packages)
+
+
+def snap_remove(packages, *flags):
+ """
+ Remove a snap package.
+
+ :param packages: String or List String package name
+ :param flags: List String flags to pass to remove command
+ :return: Integer return code from snap
+ """
+ if type(packages) is not list:
+ packages = [packages]
+
+ flags = list(flags)
+
+ message = 'Removing snap(s) "%s"' % ', '.join(packages)
+ if flags:
+ message += ' with options "%s"' % ', '.join(flags)
+
+ log(message, level='INFO')
+ return _snap_exec(['remove'] + flags + packages)
+
+
+def snap_refresh(packages, *flags):
+ """
+ Refresh / Update snap package.
+
+ :param packages: String or List String package name
+ :param flags: List String flags to pass to refresh command
+ :return: Integer return code from snap
+ """
+ if type(packages) is not list:
+ packages = [packages]
+
+ flags = list(flags)
+
+ message = 'Refreshing snap(s) "%s"' % ', '.join(packages)
+ if flags:
+ message += ' with options "%s"' % ', '.join(flags)
+
+ log(message, level='INFO')
+ return _snap_exec(['refresh'] + flags + packages)
diff --git a/tests/charmhelpers/fetch/ubuntu.py b/tests/charmhelpers/fetch/ubuntu.py
new file mode 100644
index 00000000..82ac80ff
--- /dev/null
+++ b/tests/charmhelpers/fetch/ubuntu.py
@@ -0,0 +1,364 @@
+# Copyright 2014-2015 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import six
+import time
+import subprocess
+
+from tempfile import NamedTemporaryFile
+from charmhelpers.core.host import (
+ lsb_release
+)
+from charmhelpers.core.hookenv import log
+from charmhelpers.fetch import SourceConfigError
+
+CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
+deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
+"""
+
+PROPOSED_POCKET = """# Proposed
+deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
+"""
+
+CLOUD_ARCHIVE_POCKETS = {
+ # Folsom
+ 'folsom': 'precise-updates/folsom',
+ 'precise-folsom': 'precise-updates/folsom',
+ 'precise-folsom/updates': 'precise-updates/folsom',
+ 'precise-updates/folsom': 'precise-updates/folsom',
+ 'folsom/proposed': 'precise-proposed/folsom',
+ 'precise-folsom/proposed': 'precise-proposed/folsom',
+ 'precise-proposed/folsom': 'precise-proposed/folsom',
+ # Grizzly
+ 'grizzly': 'precise-updates/grizzly',
+ 'precise-grizzly': 'precise-updates/grizzly',
+ 'precise-grizzly/updates': 'precise-updates/grizzly',
+ 'precise-updates/grizzly': 'precise-updates/grizzly',
+ 'grizzly/proposed': 'precise-proposed/grizzly',
+ 'precise-grizzly/proposed': 'precise-proposed/grizzly',
+ 'precise-proposed/grizzly': 'precise-proposed/grizzly',
+ # Havana
+ 'havana': 'precise-updates/havana',
+ 'precise-havana': 'precise-updates/havana',
+ 'precise-havana/updates': 'precise-updates/havana',
+ 'precise-updates/havana': 'precise-updates/havana',
+ 'havana/proposed': 'precise-proposed/havana',
+ 'precise-havana/proposed': 'precise-proposed/havana',
+ 'precise-proposed/havana': 'precise-proposed/havana',
+ # Icehouse
+ 'icehouse': 'precise-updates/icehouse',
+ 'precise-icehouse': 'precise-updates/icehouse',
+ 'precise-icehouse/updates': 'precise-updates/icehouse',
+ 'precise-updates/icehouse': 'precise-updates/icehouse',
+ 'icehouse/proposed': 'precise-proposed/icehouse',
+ 'precise-icehouse/proposed': 'precise-proposed/icehouse',
+ 'precise-proposed/icehouse': 'precise-proposed/icehouse',
+ # Juno
+ 'juno': 'trusty-updates/juno',
+ 'trusty-juno': 'trusty-updates/juno',
+ 'trusty-juno/updates': 'trusty-updates/juno',
+ 'trusty-updates/juno': 'trusty-updates/juno',
+ 'juno/proposed': 'trusty-proposed/juno',
+ 'trusty-juno/proposed': 'trusty-proposed/juno',
+ 'trusty-proposed/juno': 'trusty-proposed/juno',
+ # Kilo
+ 'kilo': 'trusty-updates/kilo',
+ 'trusty-kilo': 'trusty-updates/kilo',
+ 'trusty-kilo/updates': 'trusty-updates/kilo',
+ 'trusty-updates/kilo': 'trusty-updates/kilo',
+ 'kilo/proposed': 'trusty-proposed/kilo',
+ 'trusty-kilo/proposed': 'trusty-proposed/kilo',
+ 'trusty-proposed/kilo': 'trusty-proposed/kilo',
+ # Liberty
+ 'liberty': 'trusty-updates/liberty',
+ 'trusty-liberty': 'trusty-updates/liberty',
+ 'trusty-liberty/updates': 'trusty-updates/liberty',
+ 'trusty-updates/liberty': 'trusty-updates/liberty',
+ 'liberty/proposed': 'trusty-proposed/liberty',
+ 'trusty-liberty/proposed': 'trusty-proposed/liberty',
+ 'trusty-proposed/liberty': 'trusty-proposed/liberty',
+ # Mitaka
+ 'mitaka': 'trusty-updates/mitaka',
+ 'trusty-mitaka': 'trusty-updates/mitaka',
+ 'trusty-mitaka/updates': 'trusty-updates/mitaka',
+ 'trusty-updates/mitaka': 'trusty-updates/mitaka',
+ 'mitaka/proposed': 'trusty-proposed/mitaka',
+ 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
+ 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
+ # Newton
+ 'newton': 'xenial-updates/newton',
+ 'xenial-newton': 'xenial-updates/newton',
+ 'xenial-newton/updates': 'xenial-updates/newton',
+ 'xenial-updates/newton': 'xenial-updates/newton',
+ 'newton/proposed': 'xenial-proposed/newton',
+ 'xenial-newton/proposed': 'xenial-proposed/newton',
+ 'xenial-proposed/newton': 'xenial-proposed/newton',
+ # Ocata
+ 'ocata': 'xenial-updates/ocata',
+ 'xenial-ocata': 'xenial-updates/ocata',
+ 'xenial-ocata/updates': 'xenial-updates/ocata',
+ 'xenial-updates/ocata': 'xenial-updates/ocata',
+ 'ocata/proposed': 'xenial-proposed/ocata',
+ 'xenial-ocata/proposed': 'xenial-proposed/ocata',
+ 'xenial-ocata/newton': 'xenial-proposed/ocata',
+}
+
+APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
+CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries.
+CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times.
+
+
+def filter_installed_packages(packages):
+ """Return a list of packages that require installation."""
+ cache = apt_cache()
+ _pkgs = []
+ for package in packages:
+ try:
+ p = cache[package]
+ p.current_ver or _pkgs.append(package)
+ except KeyError:
+ log('Package {} has no installation candidate.'.format(package),
+ level='WARNING')
+ _pkgs.append(package)
+ return _pkgs
+
+
+def apt_cache(in_memory=True, progress=None):
+ """Build and return an apt cache."""
+ from apt import apt_pkg
+ apt_pkg.init()
+ if in_memory:
+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
+ return apt_pkg.Cache(progress)
+
+
+def install(packages, options=None, fatal=False):
+ """Install one or more packages."""
+ if options is None:
+ options = ['--option=Dpkg::Options::=--force-confold']
+
+ cmd = ['apt-get', '--assume-yes']
+ cmd.extend(options)
+ cmd.append('install')
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+ log("Installing {} with options: {}".format(packages,
+ options))
+ _run_apt_command(cmd, fatal)
+
+
+def upgrade(options=None, fatal=False, dist=False):
+ """Upgrade all packages."""
+ if options is None:
+ options = ['--option=Dpkg::Options::=--force-confold']
+
+ cmd = ['apt-get', '--assume-yes']
+ cmd.extend(options)
+ if dist:
+ cmd.append('dist-upgrade')
+ else:
+ cmd.append('upgrade')
+ log("Upgrading with options: {}".format(options))
+ _run_apt_command(cmd, fatal)
+
+
+def update(fatal=False):
+ """Update local apt cache."""
+ cmd = ['apt-get', 'update']
+ _run_apt_command(cmd, fatal)
+
+
+def purge(packages, fatal=False):
+ """Purge one or more packages."""
+ cmd = ['apt-get', '--assume-yes', 'purge']
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+ log("Purging {}".format(packages))
+ _run_apt_command(cmd, fatal)
+
+
+def apt_mark(packages, mark, fatal=False):
+ """Flag one or more packages using apt-mark."""
+ log("Marking {} as {}".format(packages, mark))
+ cmd = ['apt-mark', mark]
+ if isinstance(packages, six.string_types):
+ cmd.append(packages)
+ else:
+ cmd.extend(packages)
+
+ if fatal:
+ subprocess.check_call(cmd, universal_newlines=True)
+ else:
+ subprocess.call(cmd, universal_newlines=True)
+
+
+def apt_hold(packages, fatal=False):
+ return apt_mark(packages, 'hold', fatal=fatal)
+
+
+def apt_unhold(packages, fatal=False):
+ return apt_mark(packages, 'unhold', fatal=fatal)
+
+
+def add_source(source, key=None):
+ """Add a package source to this system.
+
+ @param source: a URL or sources.list entry, as supported by
+ add-apt-repository(1). Examples::
+
+ ppa:charmers/example
+ deb https://stub:key@private.example.com/ubuntu trusty main
+
+ In addition:
+ 'proposed:' may be used to enable the standard 'proposed'
+ pocket for the release.
+ 'cloud:' may be used to activate official cloud archive pockets,
+ such as 'cloud:icehouse'
+ 'distro' may be used as a noop
+
+ @param key: A key to be added to the system's APT keyring and used
+ to verify the signatures on packages. Ideally, this should be an
+ ASCII format GPG public key including the block headers. A GPG key
+ id may also be used, but be aware that only insecure protocols are
+ available to retrieve the actual public key from a public keyserver
+ placing your Juju environment at risk. ppa and cloud archive keys
+ are securely added automtically, so sould not be provided.
+ """
+ if source is None:
+ log('Source is not present. Skipping')
+ return
+
+ if (source.startswith('ppa:') or
+ source.startswith('http') or
+ source.startswith('deb ') or
+ source.startswith('cloud-archive:')):
+ cmd = ['add-apt-repository', '--yes', source]
+ _run_with_retries(cmd)
+ elif source.startswith('cloud:'):
+ install(filter_installed_packages(['ubuntu-cloud-keyring']),
+ fatal=True)
+ pocket = source.split(':')[-1]
+ if pocket not in CLOUD_ARCHIVE_POCKETS:
+ raise SourceConfigError(
+ 'Unsupported cloud: source option %s' %
+ pocket)
+ actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
+ apt.write(CLOUD_ARCHIVE.format(actual_pocket))
+ elif source == 'proposed':
+ release = lsb_release()['DISTRIB_CODENAME']
+ with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
+ apt.write(PROPOSED_POCKET.format(release))
+ elif source == 'distro':
+ pass
+ else:
+ log("Unknown source: {!r}".format(source))
+
+ if key:
+ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
+ with NamedTemporaryFile('w+') as key_file:
+ key_file.write(key)
+ key_file.flush()
+ key_file.seek(0)
+ subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
+ else:
+ # Note that hkp: is in no way a secure protocol. Using a
+ # GPG key id is pointless from a security POV unless you
+ # absolutely trust your network and DNS.
+ subprocess.check_call(['apt-key', 'adv', '--keyserver',
+ 'hkp://keyserver.ubuntu.com:80', '--recv',
+ key])
+
+
+def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
+ retry_message="", cmd_env=None):
+ """Run a command and retry until success or max_retries is reached.
+
+ :param: cmd: str: The apt command to run.
+ :param: max_retries: int: The number of retries to attempt on a fatal
+ command. Defaults to CMD_RETRY_COUNT.
+ :param: retry_exitcodes: tuple: Optional additional exit codes to retry.
+ Defaults to retry on exit code 1.
+ :param: retry_message: str: Optional log prefix emitted during retries.
+ :param: cmd_env: dict: Environment variables to add to the command run.
+ """
+
+ env = os.environ.copy()
+ if cmd_env:
+ env.update(cmd_env)
+
+ if not retry_message:
+ retry_message = "Failed executing '{}'".format(" ".join(cmd))
+ retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY)
+
+ retry_count = 0
+ result = None
+
+ retry_results = (None,) + retry_exitcodes
+ while result in retry_results:
+ try:
+ result = subprocess.check_call(cmd, env=env)
+ except subprocess.CalledProcessError as e:
+ retry_count = retry_count + 1
+ if retry_count > max_retries:
+ raise
+ result = e.returncode
+ log(retry_message)
+ time.sleep(CMD_RETRY_DELAY)
+
+
+def _run_apt_command(cmd, fatal=False):
+ """Run an apt command with optional retries.
+
+ :param: fatal: bool: Whether the command's output should be checked and
+ retried.
+ """
+ # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment.
+ cmd_env = {
+ 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')}
+
+ if fatal:
+ _run_with_retries(
+ cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,),
+ retry_message="Couldn't acquire DPKG lock")
+ else:
+ env = os.environ.copy()
+ env.update(cmd_env)
+ subprocess.call(cmd, env=env)
+
+
+def get_upstream_version(package):
+ """Determine upstream version based on installed package
+
+ @returns None (if not installed) or the upstream version
+ """
+ import apt_pkg
+ cache = apt_cache()
+ try:
+ pkg = cache[package]
+ except:
+ # the package is unknown to the current apt cache.
+ return None
+
+ if not pkg.current_ver:
+ # package is known, but no version is currently installed.
+ return None
+
+ return apt_pkg.upstream_version(pkg.current_ver.ver_str)
diff --git a/tests/charmhelpers/osplatform.py b/tests/charmhelpers/osplatform.py
new file mode 100644
index 00000000..d9a4d5c0
--- /dev/null
+++ b/tests/charmhelpers/osplatform.py
@@ -0,0 +1,25 @@
+import platform
+
+
+def get_platform():
+ """Return the current OS platform.
+
+ For example: if current os platform is Ubuntu then a string "ubuntu"
+ will be returned (which is the name of the module).
+ This string is used to decide which platform module should be imported.
+ """
+ # linux_distribution is deprecated and will be removed in Python 3.7
+ # Warings *not* disabled, as we certainly need to fix this.
+ tuple_platform = platform.linux_distribution()
+ current_platform = tuple_platform[0]
+ if "Ubuntu" in current_platform:
+ return "ubuntu"
+ elif "CentOS" in current_platform:
+ return "centos"
+ elif "debian" in current_platform:
+ # Stock Python does not detect Ubuntu and instead returns debian.
+ # Or at least it does in some build environments like Travis CI
+ return "ubuntu"
+ else:
+ raise RuntimeError("This module is not supported on {}."
+ .format(current_platform))
diff --git a/tox.ini b/tox.ini
index 6f1aeace..7c2936e3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -14,7 +14,7 @@ install_command =
pip install --allow-unverified python-apt {opts} {packages}
commands = ostestr {posargs}
whitelist_externals = juju
-passenv = HOME TERM AMULET_* CS_API_URL
+passenv = HOME TERM AMULET_* CS_API_*
[testenv:py27]
basepython = python2.7
diff --git a/unit_tests/test_nova_compute_contexts.py b/unit_tests/test_nova_compute_contexts.py
index 589e7ab0..129463a2 100644
--- a/unit_tests/test_nova_compute_contexts.py
+++ b/unit_tests/test_nova_compute_contexts.py
@@ -210,6 +210,7 @@ class NovaComputeContextTests(CharmTestCase):
'reserved_host_memory': 512}, libvirt())
def test_libvirt_bin_context_no_migration(self):
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
self.kv.return_value = FakeUnitdata(**{'host_uuid': self.host_uuid})
self.test_config.set('enable-live-migration', False)
libvirt = context.NovaComputeLibvirtContext()
@@ -226,6 +227,7 @@ class NovaComputeContextTests(CharmTestCase):
def test_libvirt_bin_context_migration_tcp_listen(self):
self.kv.return_value = FakeUnitdata(**{'host_uuid': self.host_uuid})
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
self.test_config.set('enable-live-migration', True)
libvirt = context.NovaComputeLibvirtContext()
@@ -241,6 +243,7 @@ class NovaComputeContextTests(CharmTestCase):
def test_libvirt_disk_cachemodes(self):
self.kv.return_value = FakeUnitdata(**{'host_uuid': self.host_uuid})
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
self.test_config.set('disk-cachemodes', 'file=unsafe,block=none')
libvirt = context.NovaComputeLibvirtContext()
@@ -257,6 +260,8 @@ class NovaComputeContextTests(CharmTestCase):
def test_libvirt_hugepages(self):
self.kv.return_value = FakeUnitdata(**{'host_uuid': self.host_uuid})
+ self.os_release.return_value = 'kilo'
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
self.test_config.set('hugepages', '22')
libvirt = context.NovaComputeLibvirtContext()
@@ -294,6 +299,8 @@ class NovaComputeContextTests(CharmTestCase):
def test_resume_guests_state_on_host_boot(self):
self.kv.return_value = FakeUnitdata(**{'host_uuid': self.host_uuid})
+ self.os_release.return_value = 'diablo'
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
self.test_config.set('resume-guests-state-on-host-boot', True)
lxd = context.NovaComputeVirtContext()
self.assertEqual({'resume_guests_state_on_host_boot': True}, lxd())
@@ -301,6 +308,7 @@ class NovaComputeContextTests(CharmTestCase):
@patch.object(context.uuid, 'uuid4')
def test_libvirt_new_uuid(self, mock_uuid):
self.kv.return_value = FakeUnitdata()
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
mock_uuid.return_value = '73874c1c-ba48-406d-8d99-ac185d83b9bc'
libvirt = context.NovaComputeLibvirtContext()
self.assertEqual(libvirt()['host_uuid'],
@@ -320,6 +328,7 @@ class NovaComputeContextTests(CharmTestCase):
@patch.object(context.uuid, 'uuid4')
def test_libvirt_cpu_mode_host_passthrough(self, mock_uuid):
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
self.test_config.set('cpu-mode', 'host-passthrough')
mock_uuid.return_value = 'e46e530d-18ae-4a67-9ff0-e6e2ba7c60a7'
libvirt = context.NovaComputeLibvirtContext()
@@ -330,6 +339,7 @@ class NovaComputeContextTests(CharmTestCase):
@patch.object(context.uuid, 'uuid4')
def test_libvirt_cpu_mode_none(self, mock_uuid):
self.test_config.set('cpu-mode', 'none')
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
mock_uuid.return_value = 'e46e530d-18ae-4a67-9ff0-e6e2ba7c60a7'
libvirt = context.NovaComputeLibvirtContext()
@@ -338,6 +348,7 @@ class NovaComputeContextTests(CharmTestCase):
def test_libvirt_vnf_configs(self):
self.kv.return_value = FakeUnitdata(**{'host_uuid': self.host_uuid})
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
self.test_config.set('hugepages', '22')
self.test_config.set('reserved-host-memory', 1024)
self.test_config.set('vcpu-pin-set', '^0^2')
@@ -358,6 +369,8 @@ class NovaComputeContextTests(CharmTestCase):
'pci_passthrough_whitelist': 'mypcidevices'}, libvirt())
def test_ksm_configs(self):
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
+
self.test_config.set('ksm', '1')
libvirt = context.NovaComputeLibvirtContext()
self.assertTrue(libvirt()['ksm'] == '1')
@@ -384,13 +397,14 @@ class NovaComputeContextTests(CharmTestCase):
libvirt = context.NovaComputeLibvirtContext()
self.assertTrue(libvirt()['ksm'] == 'AUTO')
- self.os_release.return_value = 'cactus'
+ self.os_release.return_value = 'diablo'
self.test_config.set('ksm', 'AUTO')
libvirt = context.NovaComputeLibvirtContext()
self.assertTrue(libvirt()['ksm'] == '1')
@patch.object(context.uuid, 'uuid4')
def test_libvirt_cpu_mode_default(self, mock_uuid):
+ self.lsb_release.return_value = {'DISTRIB_CODENAME': 'lucid'}
libvirt = context.NovaComputeLibvirtContext()
self.assertFalse('cpu-mode' in libvirt())
diff --git a/unit_tests/test_nova_compute_utils.py b/unit_tests/test_nova_compute_utils.py
index 4a899661..1700eabb 100644
--- a/unit_tests/test_nova_compute_utils.py
+++ b/unit_tests/test_nova_compute_utils.py
@@ -439,12 +439,14 @@ class NovaComputeUtilsTests(CharmTestCase):
@patch.object(utils, 'nova_metadata_requirement')
@patch.object(utils, 'network_manager')
def test_resource_map_neutron(self, net_man, en_meta):
+ self.os_release.return_value = 'diablo'
self._test_resource_map_neutron(net_man, en_meta, 'libvirt-bin')
@patch.object(utils, 'nova_metadata_requirement')
@patch.object(utils, 'network_manager')
def test_resource_map_neutron_yakkety(self, net_man, en_meta,):
self.lsb_release.return_value = {'DISTRIB_CODENAME': 'yakkety'}
+ self.os_release.return_value = 'diablo'
self._test_resource_map_neutron(net_man, en_meta, 'libvirtd')
@patch.object(utils, 'nova_metadata_requirement')
@@ -455,6 +457,7 @@ class NovaComputeUtilsTests(CharmTestCase):
net_man.return_value = 'bob'
_plugin.return_value = 'ovs'
self.relation_ids.return_value = []
+ self.os_release.return_value = 'diablo'
result = utils.resource_map()['/etc/nova/nova.conf']['services']
self.assertTrue('nova-api-metadata' in result)
@@ -1014,6 +1017,7 @@ class NovaComputeUtilsTests(CharmTestCase):
self.assertEqual(utils.libvirt_daemon(), utils.LIBVIRTD_DAEMON)
def test_libvirt_daemon_preyakkety(self):
+ self.os_release.return_value = 'diablo'
self.lsb_release.return_value = {
'DISTRIB_CODENAME': 'xenial'
}