summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Beisner <ryan.beisner@canonical.com>2017-08-24 16:50:36 -0500
committerRyan Beisner <ryan.beisner@canonical.com>2017-08-24 16:50:45 -0500
commitbbe3fae336d96b77253032732e3260ec17380382 (patch)
treea2c64c0222eeea0d00ae9b89f2fb8b2e8ec535aa
parent8a1ba0e4be18c634bf9dfc380416141d9b31335c (diff)
Sync charm-helpers
Notes
Notes (review): Verified+1: Canonical CI <uosci-testing-bot@ubuntu.com> Code-Review+1: Ryan Beisner <ryan.beisner@canonical.com> Code-Review+2: David Ames <david.ames@canonical.com> Workflow+1: David Ames <david.ames@canonical.com> Verified+2: Jenkins Submitted-by: Jenkins Submitted-at: Fri, 25 Aug 2017 18:08:26 +0000 Reviewed-on: https://review.openstack.org/497655 Project: openstack/charm-cinder-backup Branch: refs/heads/master
-rw-r--r--hooks/charmhelpers/__init__.py61
-rw-r--r--hooks/charmhelpers/contrib/network/ip.py6
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/utils.py103
-rw-r--r--hooks/charmhelpers/contrib/openstack/context.py96
-rw-r--r--hooks/charmhelpers/contrib/openstack/keystone.py2
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/ceph.conf5
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg6
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications8
-rw-r--r--hooks/charmhelpers/contrib/openstack/templating.py7
-rw-r--r--hooks/charmhelpers/contrib/openstack/utils.py361
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/bcache.py74
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/ceph.py44
-rw-r--r--hooks/charmhelpers/core/hookenv.py40
-rw-r--r--hooks/charmhelpers/core/host.py38
-rw-r--r--hooks/charmhelpers/fetch/__init__.py26
-rw-r--r--hooks/charmhelpers/fetch/centos.py2
-rw-r--r--hooks/charmhelpers/fetch/snap.py22
-rw-r--r--hooks/charmhelpers/fetch/ubuntu.py327
-rw-r--r--tests/charmhelpers/__init__.py61
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/utils.py103
-rw-r--r--tests/charmhelpers/core/hookenv.py40
-rw-r--r--tests/charmhelpers/core/host.py38
22 files changed, 1119 insertions, 351 deletions
diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py
index 4886788..e7aa471 100644
--- a/hooks/charmhelpers/__init__.py
+++ b/hooks/charmhelpers/__init__.py
@@ -14,6 +14,11 @@
14 14
15# Bootstrap charm-helpers, installing its dependencies if necessary using 15# Bootstrap charm-helpers, installing its dependencies if necessary using
16# only standard libraries. 16# only standard libraries.
17from __future__ import print_function
18from __future__ import absolute_import
19
20import functools
21import inspect
17import subprocess 22import subprocess
18import sys 23import sys
19 24
@@ -34,3 +39,59 @@ except ImportError:
34 else: 39 else:
35 subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) 40 subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
36 import yaml # flake8: noqa 41 import yaml # flake8: noqa
42
43
44# Holds a list of mapping of mangled function names that have been deprecated
45# using the @deprecate decorator below. This is so that the warning is only
46# printed once for each usage of the function.
47__deprecated_functions = {}
48
49
50def deprecate(warning, date=None, log=None):
51 """Add a deprecation warning the first time the function is used.
52 The date, which is a string in semi-ISO8660 format indicate the year-month
53 that the function is officially going to be removed.
54
55 usage:
56
57 @deprecate('use core/fetch/add_source() instead', '2017-04')
58 def contributed_add_source_thing(...):
59 ...
60
61 And it then prints to the log ONCE that the function is deprecated.
62 The reason for passing the logging function (log) is so that hookenv.log
63 can be used for a charm if needed.
64
65 :param warning: String to indicat where it has moved ot.
66 :param date: optional sting, in YYYY-MM format to indicate when the
67 function will definitely (probably) be removed.
68 :param log: The log function to call to log. If not, logs to stdout
69 """
70 def wrap(f):
71
72 @functools.wraps(f)
73 def wrapped_f(*args, **kwargs):
74 try:
75 module = inspect.getmodule(f)
76 file = inspect.getsourcefile(f)
77 lines = inspect.getsourcelines(f)
78 f_name = "{}-{}-{}..{}-{}".format(
79 module.__name__, file, lines[0], lines[-1], f.__name__)
80 except (IOError, TypeError):
81 # assume it was local, so just use the name of the function
82 f_name = f.__name__
83 if f_name not in __deprecated_functions:
84 __deprecated_functions[f_name] = True
85 s = "DEPRECATION WARNING: Function {} is being removed".format(
86 f.__name__)
87 if date:
88 s = "{} on/around {}".format(s, date)
89 if warning:
90 s = "{} : {}".format(s, warning)
91 if log:
92 log(s)
93 else:
94 print(s)
95 return f(*args, **kwargs)
96 return wrapped_f
97 return wrap
diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py
index fc3f5e3..d7e6deb 100644
--- a/hooks/charmhelpers/contrib/network/ip.py
+++ b/hooks/charmhelpers/contrib/network/ip.py
@@ -243,11 +243,13 @@ def is_ipv6_disabled():
243 try: 243 try:
244 result = subprocess.check_output( 244 result = subprocess.check_output(
245 ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], 245 ['sysctl', 'net.ipv6.conf.all.disable_ipv6'],
246 stderr=subprocess.STDOUT) 246 stderr=subprocess.STDOUT,
247 return "net.ipv6.conf.all.disable_ipv6 = 1" in result 247 universal_newlines=True)
248 except subprocess.CalledProcessError: 248 except subprocess.CalledProcessError:
249 return True 249 return True
250 250
251 return "net.ipv6.conf.all.disable_ipv6 = 1" in result
252
251 253
252def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, 254def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
253 fatal=True, exc_list=None): 255 fatal=True, exc_list=None):
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index bcef4cd..c8edbf6 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -25,9 +25,12 @@ import urlparse
25import cinderclient.v1.client as cinder_client 25import cinderclient.v1.client as cinder_client
26import glanceclient.v1.client as glance_client 26import glanceclient.v1.client as glance_client
27import heatclient.v1.client as heat_client 27import heatclient.v1.client as heat_client
28import keystoneclient.v2_0 as keystone_client 28from keystoneclient.v2_0 import client as keystone_client
29from keystoneclient.auth.identity import v3 as keystone_id_v3 29from keystoneauth1.identity import (
30from keystoneclient import session as keystone_session 30 v3,
31 v2,
32)
33from keystoneauth1 import session as keystone_session
31from keystoneclient.v3 import client as keystone_client_v3 34from keystoneclient.v3 import client as keystone_client_v3
32from novaclient import exceptions 35from novaclient import exceptions
33 36
@@ -368,12 +371,20 @@ class OpenStackAmuletUtils(AmuletUtils):
368 port) 371 port)
369 if not api_version or api_version == 2: 372 if not api_version or api_version == 2:
370 ep = base_ep + "/v2.0" 373 ep = base_ep + "/v2.0"
371 return keystone_client.Client(username=username, password=password, 374 auth = v2.Password(
372 tenant_name=project_name, 375 username=username,
373 auth_url=ep) 376 password=password,
377 tenant_name=project_name,
378 auth_url=ep
379 )
380 sess = keystone_session.Session(auth=auth)
381 client = keystone_client.Client(session=sess)
382 # This populates the client.service_catalog
383 client.auth_ref = auth.get_access(sess)
384 return client
374 else: 385 else:
375 ep = base_ep + "/v3" 386 ep = base_ep + "/v3"
376 auth = keystone_id_v3.Password( 387 auth = v3.Password(
377 user_domain_name=user_domain_name, 388 user_domain_name=user_domain_name,
378 username=username, 389 username=username,
379 password=password, 390 password=password,
@@ -382,36 +393,45 @@ class OpenStackAmuletUtils(AmuletUtils):
382 project_name=project_name, 393 project_name=project_name,
383 auth_url=ep 394 auth_url=ep
384 ) 395 )
385 return keystone_client_v3.Client( 396 sess = keystone_session.Session(auth=auth)
386 session=keystone_session.Session(auth=auth) 397 client = keystone_client_v3.Client(session=sess)
387 ) 398 # This populates the client.service_catalog
399 client.auth_ref = auth.get_access(sess)
400 return client
388 401
389 def authenticate_keystone_admin(self, keystone_sentry, user, password, 402 def authenticate_keystone_admin(self, keystone_sentry, user, password,
390 tenant=None, api_version=None, 403 tenant=None, api_version=None,
391 keystone_ip=None): 404 keystone_ip=None, user_domain_name=None,
405 project_domain_name=None,
406 project_name=None):
392 """Authenticates admin user with the keystone admin endpoint.""" 407 """Authenticates admin user with the keystone admin endpoint."""
393 self.log.debug('Authenticating keystone admin...') 408 self.log.debug('Authenticating keystone admin...')
394 if not keystone_ip: 409 if not keystone_ip:
395 keystone_ip = keystone_sentry.info['public-address'] 410 keystone_ip = keystone_sentry.info['public-address']
396 411
397 user_domain_name = None 412 # To support backward compatibility usage of this function
398 domain_name = None 413 if not project_name:
399 if api_version == 3: 414 project_name = tenant
415 if api_version == 3 and not user_domain_name:
400 user_domain_name = 'admin_domain' 416 user_domain_name = 'admin_domain'
401 domain_name = user_domain_name 417 if api_version == 3 and not project_domain_name:
402 418 project_domain_name = 'admin_domain'
403 return self.authenticate_keystone(keystone_ip, user, password, 419 if api_version == 3 and not project_name:
404 project_name=tenant, 420 project_name = 'admin'
405 api_version=api_version, 421
406 user_domain_name=user_domain_name, 422 return self.authenticate_keystone(
407 domain_name=domain_name, 423 keystone_ip, user, password,
408 admin_port=True) 424 api_version=api_version,
425 user_domain_name=user_domain_name,
426 project_domain_name=project_domain_name,
427 project_name=project_name,
428 admin_port=True)
409 429
410 def authenticate_keystone_user(self, keystone, user, password, tenant): 430 def authenticate_keystone_user(self, keystone, user, password, tenant):
411 """Authenticates a regular user with the keystone public endpoint.""" 431 """Authenticates a regular user with the keystone public endpoint."""
412 self.log.debug('Authenticating keystone user ({})...'.format(user)) 432 self.log.debug('Authenticating keystone user ({})...'.format(user))
413 ep = keystone.service_catalog.url_for(service_type='identity', 433 ep = keystone.service_catalog.url_for(service_type='identity',
414 endpoint_type='publicURL') 434 interface='publicURL')
415 keystone_ip = urlparse.urlparse(ep).hostname 435 keystone_ip = urlparse.urlparse(ep).hostname
416 436
417 return self.authenticate_keystone(keystone_ip, user, password, 437 return self.authenticate_keystone(keystone_ip, user, password,
@@ -421,22 +441,32 @@ class OpenStackAmuletUtils(AmuletUtils):
421 """Authenticates admin user with glance.""" 441 """Authenticates admin user with glance."""
422 self.log.debug('Authenticating glance admin...') 442 self.log.debug('Authenticating glance admin...')
423 ep = keystone.service_catalog.url_for(service_type='image', 443 ep = keystone.service_catalog.url_for(service_type='image',
424 endpoint_type='adminURL') 444 interface='adminURL')
425 return glance_client.Client(ep, token=keystone.auth_token) 445 if keystone.session:
446 return glance_client.Client(ep, session=keystone.session)
447 else:
448 return glance_client.Client(ep, token=keystone.auth_token)
426 449
427 def authenticate_heat_admin(self, keystone): 450 def authenticate_heat_admin(self, keystone):
428 """Authenticates the admin user with heat.""" 451 """Authenticates the admin user with heat."""
429 self.log.debug('Authenticating heat admin...') 452 self.log.debug('Authenticating heat admin...')
430 ep = keystone.service_catalog.url_for(service_type='orchestration', 453 ep = keystone.service_catalog.url_for(service_type='orchestration',
431 endpoint_type='publicURL') 454 interface='publicURL')
432 return heat_client.Client(endpoint=ep, token=keystone.auth_token) 455 if keystone.session:
456 return heat_client.Client(endpoint=ep, session=keystone.session)
457 else:
458 return heat_client.Client(endpoint=ep, token=keystone.auth_token)
433 459
434 def authenticate_nova_user(self, keystone, user, password, tenant): 460 def authenticate_nova_user(self, keystone, user, password, tenant):
435 """Authenticates a regular user with nova-api.""" 461 """Authenticates a regular user with nova-api."""
436 self.log.debug('Authenticating nova user ({})...'.format(user)) 462 self.log.debug('Authenticating nova user ({})...'.format(user))
437 ep = keystone.service_catalog.url_for(service_type='identity', 463 ep = keystone.service_catalog.url_for(service_type='identity',
438 endpoint_type='publicURL') 464 interface='publicURL')
439 if novaclient.__version__[0] >= "7": 465 if keystone.session:
466 return nova_client.Client(NOVA_CLIENT_VERSION,
467 session=keystone.session,
468 auth_url=ep)
469 elif novaclient.__version__[0] >= "7":
440 return nova_client.Client(NOVA_CLIENT_VERSION, 470 return nova_client.Client(NOVA_CLIENT_VERSION,
441 username=user, password=password, 471 username=user, password=password,
442 project_name=tenant, auth_url=ep) 472 project_name=tenant, auth_url=ep)
@@ -449,12 +479,15 @@ class OpenStackAmuletUtils(AmuletUtils):
449 """Authenticates a regular user with swift api.""" 479 """Authenticates a regular user with swift api."""
450 self.log.debug('Authenticating swift user ({})...'.format(user)) 480 self.log.debug('Authenticating swift user ({})...'.format(user))
451 ep = keystone.service_catalog.url_for(service_type='identity', 481 ep = keystone.service_catalog.url_for(service_type='identity',
452 endpoint_type='publicURL') 482 interface='publicURL')
453 return swiftclient.Connection(authurl=ep, 483 if keystone.session:
454 user=user, 484 return swiftclient.Connection(session=keystone.session)
455 key=password, 485 else:
456 tenant_name=tenant, 486 return swiftclient.Connection(authurl=ep,
457 auth_version='2.0') 487 user=user,
488 key=password,
489 tenant_name=tenant,
490 auth_version='2.0')
458 491
459 def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", 492 def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto",
460 ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): 493 ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index ea93159..f67f326 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -41,9 +41,9 @@ from charmhelpers.core.hookenv import (
41 charm_name, 41 charm_name,
42 DEBUG, 42 DEBUG,
43 INFO, 43 INFO,
44 WARNING,
45 ERROR, 44 ERROR,
46 status_set, 45 status_set,
46 network_get_primary_address
47) 47)
48 48
49from charmhelpers.core.sysctl import create as sysctl_create 49from charmhelpers.core.sysctl import create as sysctl_create
@@ -80,6 +80,9 @@ from charmhelpers.contrib.openstack.neutron import (
80from charmhelpers.contrib.openstack.ip import ( 80from charmhelpers.contrib.openstack.ip import (
81 resolve_address, 81 resolve_address,
82 INTERNAL, 82 INTERNAL,
83 ADMIN,
84 PUBLIC,
85 ADDRESS_MAP,
83) 86)
84from charmhelpers.contrib.network.ip import ( 87from charmhelpers.contrib.network.ip import (
85 get_address_in_network, 88 get_address_in_network,
@@ -87,7 +90,6 @@ from charmhelpers.contrib.network.ip import (
87 get_ipv6_addr, 90 get_ipv6_addr,
88 get_netmask_for_address, 91 get_netmask_for_address,
89 format_ipv6_addr, 92 format_ipv6_addr,
90 is_address_in_network,
91 is_bridge_member, 93 is_bridge_member,
92 is_ipv6_disabled, 94 is_ipv6_disabled,
93) 95)
@@ -97,6 +99,7 @@ from charmhelpers.contrib.openstack.utils import (
97 git_determine_usr_bin, 99 git_determine_usr_bin,
98 git_determine_python_path, 100 git_determine_python_path,
99 enable_memcache, 101 enable_memcache,
102 snap_install_requested,
100) 103)
101from charmhelpers.core.unitdata import kv 104from charmhelpers.core.unitdata import kv
102 105
@@ -244,6 +247,11 @@ class SharedDBContext(OSContextGenerator):
244 'database_password': rdata.get(password_setting), 247 'database_password': rdata.get(password_setting),
245 'database_type': 'mysql' 248 'database_type': 'mysql'
246 } 249 }
250 # Note(coreycb): We can drop mysql+pymysql if we want when the
251 # following review lands, though it seems mysql+pymysql would
252 # be preferred. https://review.openstack.org/#/c/462190/
253 if snap_install_requested():
254 ctxt['database_type'] = 'mysql+pymysql'
247 if self.context_complete(ctxt): 255 if self.context_complete(ctxt):
248 db_ssl(rdata, ctxt, self.ssl_dir) 256 db_ssl(rdata, ctxt, self.ssl_dir)
249 return ctxt 257 return ctxt
@@ -510,6 +518,10 @@ class CephContext(OSContextGenerator):
510 ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) 518 ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
511 if not ctxt.get('key'): 519 if not ctxt.get('key'):
512 ctxt['key'] = relation_get('key', rid=rid, unit=unit) 520 ctxt['key'] = relation_get('key', rid=rid, unit=unit)
521 if not ctxt.get('rbd_features'):
522 default_features = relation_get('rbd-features', rid=rid, unit=unit)
523 if default_features is not None:
524 ctxt['rbd_features'] = default_features
513 525
514 ceph_addrs = relation_get('ceph-public-address', rid=rid, 526 ceph_addrs = relation_get('ceph-public-address', rid=rid,
515 unit=unit) 527 unit=unit)
@@ -610,7 +622,6 @@ class HAProxyContext(OSContextGenerator):
610 ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') 622 ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
611 623
612 if config('prefer-ipv6'): 624 if config('prefer-ipv6'):
613 ctxt['ipv6'] = True
614 ctxt['local_host'] = 'ip6-localhost' 625 ctxt['local_host'] = 'ip6-localhost'
615 ctxt['haproxy_host'] = '::' 626 ctxt['haproxy_host'] = '::'
616 else: 627 else:
@@ -726,11 +737,17 @@ class ApacheSSLContext(OSContextGenerator):
726 return sorted(list(set(cns))) 737 return sorted(list(set(cns)))
727 738
728 def get_network_addresses(self): 739 def get_network_addresses(self):
729 """For each network configured, return corresponding address and vip 740 """For each network configured, return corresponding address and
730 (if available). 741 hostnamr or vip (if available).
731 742
732 Returns a list of tuples of the form: 743 Returns a list of tuples of the form:
733 744
745 [(address_in_net_a, hostname_in_net_a),
746 (address_in_net_b, hostname_in_net_b),
747 ...]
748
749 or, if no hostnames(s) available:
750
734 [(address_in_net_a, vip_in_net_a), 751 [(address_in_net_a, vip_in_net_a),
735 (address_in_net_b, vip_in_net_b), 752 (address_in_net_b, vip_in_net_b),
736 ...] 753 ...]
@@ -742,32 +759,27 @@ class ApacheSSLContext(OSContextGenerator):
742 ...] 759 ...]
743 """ 760 """
744 addresses = [] 761 addresses = []
745 if config('vip'): 762 for net_type in [INTERNAL, ADMIN, PUBLIC]:
746 vips = config('vip').split() 763 net_config = config(ADDRESS_MAP[net_type]['config'])
747 else: 764 # NOTE(jamespage): Fallback must always be private address
748 vips = [] 765 # as this is used to bind services on the
749 766 # local unit.
750 for net_type in ['os-internal-network', 'os-admin-network', 767 fallback = unit_get("private-address")
751 'os-public-network']: 768 if net_config:
752 addr = get_address_in_network(config(net_type), 769 addr = get_address_in_network(net_config,
753 unit_get('private-address')) 770 fallback)
754 if len(vips) > 1 and is_clustered():
755 if not config(net_type):
756 log("Multiple networks configured but net_type "
757 "is None (%s)." % net_type, level=WARNING)
758 continue
759
760 for vip in vips:
761 if is_address_in_network(config(net_type), vip):
762 addresses.append((addr, vip))
763 break
764
765 elif is_clustered() and config('vip'):
766 addresses.append((addr, config('vip')))
767 else: 771 else:
768 addresses.append((addr, addr)) 772 try:
773 addr = network_get_primary_address(
774 ADDRESS_MAP[net_type]['binding']
775 )
776 except NotImplementedError:
777 addr = fallback
778
779 endpoint = resolve_address(net_type)
780 addresses.append((addr, endpoint))
769 781
770 return sorted(addresses) 782 return sorted(set(addresses))
771 783
772 def __call__(self): 784 def __call__(self):
773 if isinstance(self.external_ports, six.string_types): 785 if isinstance(self.external_ports, six.string_types):
@@ -794,7 +806,7 @@ class ApacheSSLContext(OSContextGenerator):
794 self.configure_cert(cn) 806 self.configure_cert(cn)
795 807
796 addresses = self.get_network_addresses() 808 addresses = self.get_network_addresses()
797 for address, endpoint in sorted(set(addresses)): 809 for address, endpoint in addresses:
798 for api_port in self.external_ports: 810 for api_port in self.external_ports:
799 ext_port = determine_apache_port(api_port, 811 ext_port = determine_apache_port(api_port,
800 singlenode_mode=True) 812 singlenode_mode=True)
@@ -1397,14 +1409,38 @@ class NeutronAPIContext(OSContextGenerator):
1397 'rel_key': 'dns-domain', 1409 'rel_key': 'dns-domain',
1398 'default': None, 1410 'default': None,
1399 }, 1411 },
1412 'polling_interval': {
1413 'rel_key': 'polling-interval',
1414 'default': 2,
1415 },
1416 'rpc_response_timeout': {
1417 'rel_key': 'rpc-response-timeout',
1418 'default': 60,
1419 },
1420 'report_interval': {
1421 'rel_key': 'report-interval',
1422 'default': 30,
1423 },
1424 'enable_qos': {
1425 'rel_key': 'enable-qos',
1426 'default': False,
1427 },
1400 } 1428 }
1401 ctxt = self.get_neutron_options({}) 1429 ctxt = self.get_neutron_options({})
1402 for rid in relation_ids('neutron-plugin-api'): 1430 for rid in relation_ids('neutron-plugin-api'):
1403 for unit in related_units(rid): 1431 for unit in related_units(rid):
1404 rdata = relation_get(rid=rid, unit=unit) 1432 rdata = relation_get(rid=rid, unit=unit)
1433 # The l2-population key is used by the context as a way of
1434 # checking if the api service on the other end is sending data
1435 # in a recent format.
1405 if 'l2-population' in rdata: 1436 if 'l2-population' in rdata:
1406 ctxt.update(self.get_neutron_options(rdata)) 1437 ctxt.update(self.get_neutron_options(rdata))
1407 1438
1439 if ctxt['enable_qos']:
1440 ctxt['extension_drivers'] = 'qos'
1441 else:
1442 ctxt['extension_drivers'] = ''
1443
1408 return ctxt 1444 return ctxt
1409 1445
1410 def get_neutron_options(self, rdata): 1446 def get_neutron_options(self, rdata):
diff --git a/hooks/charmhelpers/contrib/openstack/keystone.py b/hooks/charmhelpers/contrib/openstack/keystone.py
index a15a03f..d7e02cc 100644
--- a/hooks/charmhelpers/contrib/openstack/keystone.py
+++ b/hooks/charmhelpers/contrib/openstack/keystone.py
@@ -29,7 +29,7 @@ def get_api_suffix(api_version):
29 @returns the api suffix formatted according to the given api 29 @returns the api suffix formatted according to the given api
30 version 30 version
31 """ 31 """
32 return 'v2.0' if api_version in (2, "2.0") else 'v3' 32 return 'v2.0' if api_version in (2, "2", "2.0") else 'v3'
33 33
34 34
35def format_endpoint(schema, addr, port, api_version): 35def format_endpoint(schema, addr, port, api_version):
diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
index 33ceee2..ed5c4f1 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
+++ b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
@@ -1,6 +1,6 @@
1############################################################################### 1###############################################################################
2# [ WARNING ] 2# [ WARNING ]
3# cinder configuration file maintained by Juju 3# ceph configuration file maintained by Juju
4# local changes may be overwritten. 4# local changes may be overwritten.
5############################################################################### 5###############################################################################
6[global] 6[global]
@@ -12,6 +12,9 @@ mon host = {{ mon_hosts }}
12log to syslog = {{ use_syslog }} 12log to syslog = {{ use_syslog }}
13err to syslog = {{ use_syslog }} 13err to syslog = {{ use_syslog }}
14clog to syslog = {{ use_syslog }} 14clog to syslog = {{ use_syslog }}
15{% if rbd_features %}
16rbd default features = {{ rbd_features }}
17{% endif %}
15 18
16[client] 19[client]
17{% if rbd_client_cache_settings -%} 20{% if rbd_client_cache_settings -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
index 54fba39..2e66045 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
@@ -1,6 +1,6 @@
1global 1global
2 log {{ local_host }} local0 2 log /var/lib/haproxy/dev/log local0
3 log {{ local_host }} local1 notice 3 log /var/lib/haproxy/dev/log local1 notice
4 maxconn 20000 4 maxconn 20000
5 user haproxy 5 user haproxy
6 group haproxy 6 group haproxy
@@ -48,9 +48,7 @@ listen stats
48{% for service, ports in service_ports.items() -%} 48{% for service, ports in service_ports.items() -%}
49frontend tcp-in_{{ service }} 49frontend tcp-in_{{ service }}
50 bind *:{{ ports[0] }} 50 bind *:{{ ports[0] }}
51 {% if ipv6 -%}
52 bind :::{{ ports[0] }} 51 bind :::{{ ports[0] }}
53 {% endif -%}
54 {% for frontend in frontends -%} 52 {% for frontend in frontends -%}
55 acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} 53 acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
56 use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} 54 use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications
new file mode 100644
index 0000000..5dccd4b
--- /dev/null
+++ b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications
@@ -0,0 +1,8 @@
1{% if transport_url -%}
2[oslo_messaging_notifications]
3driver = messagingv2
4transport_url = {{ transport_url }}
5{% if notification_topics -%}
6topics = {{ notification_topics }}
7{% endif -%}
8{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py
index 934baf5..d8c1fc7 100644
--- a/hooks/charmhelpers/contrib/openstack/templating.py
+++ b/hooks/charmhelpers/contrib/openstack/templating.py
@@ -20,7 +20,8 @@ from charmhelpers.fetch import apt_install, apt_update
20from charmhelpers.core.hookenv import ( 20from charmhelpers.core.hookenv import (
21 log, 21 log,
22 ERROR, 22 ERROR,
23 INFO 23 INFO,
24 TRACE
24) 25)
25from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES 26from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
26 27
@@ -80,8 +81,10 @@ def get_loader(templates_dir, os_release):
80 loaders.insert(0, FileSystemLoader(tmpl_dir)) 81 loaders.insert(0, FileSystemLoader(tmpl_dir))
81 if rel == os_release: 82 if rel == os_release:
82 break 83 break
84 # demote this log to the lowest level; we don't really need to see these
85 # lots in production even when debugging.
83 log('Creating choice loader with dirs: %s' % 86 log('Creating choice loader with dirs: %s' %
84 [l.searchpath for l in loaders], level=INFO) 87 [l.searchpath for l in loaders], level=TRACE)
85 return ChoiceLoader(loaders) 88 return ChoiceLoader(loaders)
86 89
87 90
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 161c786..837a167 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -26,11 +26,12 @@ import functools
26import shutil 26import shutil
27 27
28import six 28import six
29import tempfile
30import traceback 29import traceback
31import uuid 30import uuid
32import yaml 31import yaml
33 32
33from charmhelpers import deprecate
34
34from charmhelpers.contrib.network import ip 35from charmhelpers.contrib.network import ip
35 36
36from charmhelpers.core import unitdata 37from charmhelpers.core import unitdata
@@ -41,7 +42,6 @@ from charmhelpers.core.hookenv import (
41 config, 42 config,
42 log as juju_log, 43 log as juju_log,
43 charm_dir, 44 charm_dir,
44 DEBUG,
45 INFO, 45 INFO,
46 ERROR, 46 ERROR,
47 related_units, 47 related_units,
@@ -51,6 +51,7 @@ from charmhelpers.core.hookenv import (
51 status_set, 51 status_set,
52 hook_name, 52 hook_name,
53 application_version_set, 53 application_version_set,
54 cached,
54) 55)
55 56
56from charmhelpers.core.strutils import BasicStringComparator 57from charmhelpers.core.strutils import BasicStringComparator
@@ -82,11 +83,21 @@ from charmhelpers.core.host import (
82 restart_on_change_helper, 83 restart_on_change_helper,
83) 84)
84from charmhelpers.fetch import ( 85from charmhelpers.fetch import (
85 apt_install,
86 apt_cache, 86 apt_cache,
87 install_remote, 87 install_remote,
88 import_key as fetch_import_key,
89 add_source as fetch_add_source,
90 SourceConfigError,
91 GPGKeyError,
88 get_upstream_version 92 get_upstream_version
89) 93)
94
95from charmhelpers.fetch.snap import (
96 snap_install,
97 snap_refresh,
98 SNAP_CHANNELS,
99)
100
90from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk 101from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
91from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device 102from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
92from charmhelpers.contrib.openstack.exceptions import OSContextError 103from charmhelpers.contrib.openstack.exceptions import OSContextError
@@ -175,7 +186,7 @@ SWIFT_CODENAMES = OrderedDict([
175 ('ocata', 186 ('ocata',
176 ['2.11.0', '2.12.0', '2.13.0']), 187 ['2.11.0', '2.12.0', '2.13.0']),
177 ('pike', 188 ('pike',
178 ['2.13.0']), 189 ['2.13.0', '2.15.0']),
179]) 190])
180 191
181# >= Liberty version->codename mapping 192# >= Liberty version->codename mapping
@@ -324,8 +335,10 @@ def get_os_codename_install_source(src):
324 return ca_rel 335 return ca_rel
325 336
326 # Best guess match based on deb string provided 337 # Best guess match based on deb string provided
327 if src.startswith('deb') or src.startswith('ppa'): 338 if (src.startswith('deb') or
328 for k, v in six.iteritems(OPENSTACK_CODENAMES): 339 src.startswith('ppa') or
340 src.startswith('snap')):
341 for v in OPENSTACK_CODENAMES.values():
329 if v in src: 342 if v in src:
330 return v 343 return v
331 344
@@ -394,6 +407,19 @@ def get_swift_codename(version):
394 407
395def get_os_codename_package(package, fatal=True): 408def get_os_codename_package(package, fatal=True):
396 '''Derive OpenStack release codename from an installed package.''' 409 '''Derive OpenStack release codename from an installed package.'''
410
411 if snap_install_requested():
412 cmd = ['snap', 'list', package]
413 try:
414 out = subprocess.check_output(cmd)
415 except subprocess.CalledProcessError as e:
416 return None
417 lines = out.split('\n')
418 for line in lines:
419 if package in line:
420 # Second item in list is Version
421 return line.split()[1]
422
397 import apt_pkg as apt 423 import apt_pkg as apt
398 424
399 cache = apt_cache() 425 cache = apt_cache()
@@ -469,13 +495,14 @@ def get_os_version_package(pkg, fatal=True):
469 # error_out(e) 495 # error_out(e)
470 496
471 497
472os_rel = None 498# Module local cache variable for the os_release.
499_os_rel = None
473 500
474 501
475def reset_os_release(): 502def reset_os_release():
476 '''Unset the cached os_release version''' 503 '''Unset the cached os_release version'''
477 global os_rel 504 global _os_rel
478 os_rel = None 505 _os_rel = None
479 506
480 507
481def os_release(package, base='essex', reset_cache=False): 508def os_release(package, base='essex', reset_cache=False):
@@ -489,150 +516,77 @@ def os_release(package, base='essex', reset_cache=False):
489 the installation source, the earliest release supported by the charm should 516 the installation source, the earliest release supported by the charm should
490 be returned. 517 be returned.
491 ''' 518 '''
492 global os_rel 519 global _os_rel
493 if reset_cache: 520 if reset_cache:
494 reset_os_release() 521 reset_os_release()
495 if os_rel: 522 if _os_rel:
496 return os_rel 523 return _os_rel
497 os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or 524 _os_rel = (
498 get_os_codename_package(package, fatal=False) or 525 git_os_codename_install_source(config('openstack-origin-git')) or
499 get_os_codename_install_source(config('openstack-origin')) or 526 get_os_codename_package(package, fatal=False) or
500 base) 527 get_os_codename_install_source(config('openstack-origin')) or
501 return os_rel 528 base)
529 return _os_rel
502 530
503 531
532@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log)
504def import_key(keyid): 533def import_key(keyid):
505 key = keyid.strip() 534 """Import a key, either ASCII armored, or a GPG key id.
506 if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
507 key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
508 juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
509 juju_log("Importing ASCII Armor PGP key", level=DEBUG)
510 with tempfile.NamedTemporaryFile() as keyfile:
511 with open(keyfile.name, 'w') as fd:
512 fd.write(key)
513 fd.write("\n")
514
515 cmd = ['apt-key', 'add', keyfile.name]
516 try:
517 subprocess.check_call(cmd)
518 except subprocess.CalledProcessError:
519 error_out("Error importing PGP key '%s'" % key)
520 else:
521 juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
522 juju_log("Importing PGP key from keyserver", level=DEBUG)
523 cmd = ['apt-key', 'adv', '--keyserver',
524 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
525 try:
526 subprocess.check_call(cmd)
527 except subprocess.CalledProcessError:
528 error_out("Error importing PGP key '%s'" % key)
529
530
531def get_source_and_pgp_key(input):
532 """Look for a pgp key ID or ascii-armor key in the given input."""
533 index = input.strip()
534 index = input.rfind('|')
535 if index < 0:
536 return input, None
537
538 key = input[index + 1:].strip('|')
539 source = input[:index]
540 return source, key
541
542
543def configure_installation_source(rel):
544 '''Configure apt installation source.'''
545 if rel == 'distro':
546 return
547 elif rel == 'distro-proposed':
548 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
549 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
550 f.write(DISTRO_PROPOSED % ubuntu_rel)
551 elif rel[:4] == "ppa:":
552 src, key = get_source_and_pgp_key(rel)
553 if key:
554 import_key(key)
555
556 subprocess.check_call(["add-apt-repository", "-y", src])
557 elif rel[:3] == "deb":
558 src, key = get_source_and_pgp_key(rel)
559 if key:
560 import_key(key)
561
562 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
563 f.write(src)
564 elif rel[:6] == 'cloud:':
565 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
566 rel = rel.split(':')[1]
567 u_rel = rel.split('-')[0]
568 ca_rel = rel.split('-')[1]
569
570 if u_rel != ubuntu_rel:
571 e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
572 'version (%s)' % (ca_rel, ubuntu_rel)
573 error_out(e)
574 535
575 if 'staging' in ca_rel: 536 @param keyid: the key in ASCII armor format, or a GPG key id.
576 # staging is just a regular PPA. 537 @raises SystemExit() via sys.exit() on failure.
577 os_rel = ca_rel.split('/')[0] 538 """
578 ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel 539 try:
579 cmd = 'add-apt-repository -y %s' % ppa 540 return fetch_import_key(keyid)
580 subprocess.check_call(cmd.split(' ')) 541 except GPGKeyError as e:
581 return 542 error_out("Could not import key: {}".format(str(e)))
582
583 # map charm config options to actual archive pockets.
584 pockets = {
585 'folsom': 'precise-updates/folsom',
586 'folsom/updates': 'precise-updates/folsom',
587 'folsom/proposed': 'precise-proposed/folsom',
588 'grizzly': 'precise-updates/grizzly',
589 'grizzly/updates': 'precise-updates/grizzly',
590 'grizzly/proposed': 'precise-proposed/grizzly',
591 'havana': 'precise-updates/havana',
592 'havana/updates': 'precise-updates/havana',
593 'havana/proposed': 'precise-proposed/havana',
594 'icehouse': 'precise-updates/icehouse',
595 'icehouse/updates': 'precise-updates/icehouse',
596 'icehouse/proposed': 'precise-proposed/icehouse',
597 'juno': 'trusty-updates/juno',
598 'juno/updates': 'trusty-updates/juno',
599 'juno/proposed': 'trusty-proposed/juno',
600 'kilo': 'trusty-updates/kilo',
601 'kilo/updates': 'trusty-updates/kilo',
602 'kilo/proposed': 'trusty-proposed/kilo',
603 'liberty': 'trusty-updates/liberty',
604 'liberty/updates': 'trusty-updates/liberty',
605 'liberty/proposed': 'trusty-proposed/liberty',
606 'mitaka': 'trusty-updates/mitaka',
607 'mitaka/updates': 'trusty-updates/mitaka',
608 'mitaka/proposed': 'trusty-proposed/mitaka',
609 'newton': 'xenial-updates/newton',
610 'newton/updates': 'xenial-updates/newton',
611 'newton/proposed': 'xenial-proposed/newton',
612 'ocata': 'xenial-updates/ocata',
613 'ocata/updates': 'xenial-updates/ocata',
614 'ocata/proposed': 'xenial-proposed/ocata',
615 'pike': 'xenial-updates/pike',
616 'pike/updates': 'xenial-updates/pike',
617 'pike/proposed': 'xenial-proposed/pike',
618 'queens': 'xenial-updates/queens',
619 'queens/updates': 'xenial-updates/queens',
620 'queens/proposed': 'xenial-proposed/queens',
621 }
622 543
623 try:
624 pocket = pockets[ca_rel]
625 except KeyError:
626 e = 'Invalid Cloud Archive release specified: %s' % rel
627 error_out(e)
628 544
629 src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) 545def get_source_and_pgp_key(source_and_key):
630 apt_install('ubuntu-cloud-keyring', fatal=True) 546 """Look for a pgp key ID or ascii-armor key in the given input.
631 547
632 with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: 548 :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
633 f.write(src) 549 optional.
634 else: 550 :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id
635 error_out("Invalid openstack-release specified: %s" % rel) 551 if there was no '|' in the source_and_key string.
552 """
553 try:
554 source, key = source_and_key.split('|', 2)
555 return source, key or None
556 except ValueError:
557 return source_and_key, None
558
559
560@deprecate("use charmhelpers.fetch.add_source() instead.",
561 "2017-07", log=juju_log)
562def configure_installation_source(source_plus_key):
563 """Configure an installation source.
564
565 The functionality is provided by charmhelpers.fetch.add_source()
566 The difference between the two functions is that add_source() signature
567 requires the key to be passed directly, whereas this function passes an
568 optional key by appending '|<key>' to the end of the source specificiation
569 'source'.
570
571 Another difference from add_source() is that the function calls sys.exit(1)
572 if the configuration fails, whereas add_source() raises
573 SourceConfigurationError(). Another difference, is that add_source()
574 silently fails (with a juju_log command) if there is no matching source to
575 configure, whereas this function fails with a sys.exit(1)
576
577 :param source: String_plus_key -- see above for details.
578
579 Note that the behaviour on error is to log the error to the juju log and
580 then call sys.exit(1).
581 """
582 # extract the key if there is one, denoted by a '|' in the rel
583 source, key = get_source_and_pgp_key(source_plus_key)
584
585 # handle the ordinary sources via add_source
586 try:
587 fetch_add_source(source, key, fail_invalid=True)
588 except SourceConfigError as se:
589 error_out(str(se))
636 590
637 591
638def config_value_changed(option): 592def config_value_changed(option):
@@ -677,12 +631,14 @@ def openstack_upgrade_available(package):
677 631
678 :returns: bool: : Returns True if configured installation source offers 632 :returns: bool: : Returns True if configured installation source offers
679 a newer version of package. 633 a newer version of package.
680
681 """ 634 """
682 635
683 import apt_pkg as apt 636 import apt_pkg as apt
684 src = config('openstack-origin') 637 src = config('openstack-origin')
685 cur_vers = get_os_version_package(package) 638 cur_vers = get_os_version_package(package)
639 if not cur_vers:
640 # The package has not been installed yet do not attempt upgrade
641 return False
686 if "swift" in package: 642 if "swift" in package:
687 codename = get_os_codename_install_source(src) 643 codename = get_os_codename_install_source(src)
688 avail_vers = get_os_version_codename_swift(codename) 644 avail_vers = get_os_version_codename_swift(codename)
@@ -1933,6 +1889,30 @@ def pausable_restart_on_change(restart_map, stopstart=False,
1933 return wrap 1889 return wrap
1934 1890
1935 1891
1892def ordered(orderme):
1893 """Converts the provided dictionary into a collections.OrderedDict.
1894
1895 The items in the returned OrderedDict will be inserted based on the
1896 natural sort order of the keys. Nested dictionaries will also be sorted
1897 in order to ensure fully predictable ordering.
1898
1899 :param orderme: the dict to order
1900 :return: collections.OrderedDict
1901 :raises: ValueError: if `orderme` isn't a dict instance.
1902 """
1903 if not isinstance(orderme, dict):
1904 raise ValueError('argument must be a dict type')
1905
1906 result = OrderedDict()
1907 for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]):
1908 if isinstance(v, dict):
1909 result[k] = ordered(v)
1910 else:
1911 result[k] = v
1912
1913 return result
1914
1915
1936def config_flags_parser(config_flags): 1916def config_flags_parser(config_flags):
1937 """Parses config flags string into dict. 1917 """Parses config flags string into dict.
1938 1918
@@ -1944,15 +1924,13 @@ def config_flags_parser(config_flags):
1944 example, a string in the format of 'key1=value1, key2=value2' will 1924 example, a string in the format of 'key1=value1, key2=value2' will
1945 return a dict of: 1925 return a dict of:
1946 1926
1947 {'key1': 'value1', 1927 {'key1': 'value1', 'key2': 'value2'}.
1948 'key2': 'value2'}.
1949 1928
1950 2. A string in the above format, but supporting a comma-delimited list 1929 2. A string in the above format, but supporting a comma-delimited list
1951 of values for the same key. For example, a string in the format of 1930 of values for the same key. For example, a string in the format of
1952 'key1=value1, key2=value3,value4,value5' will return a dict of: 1931 'key1=value1, key2=value3,value4,value5' will return a dict of:
1953 1932
1954 {'key1', 'value1', 1933 {'key1': 'value1', 'key2': 'value2,value3,value4'}
1955 'key2', 'value2,value3,value4'}
1956 1934
1957 3. A string containing a colon character (:) prior to an equal 1935 3. A string containing a colon character (:) prior to an equal
1958 character (=) will be treated as yaml and parsed as such. This can be 1936 character (=) will be treated as yaml and parsed as such. This can be
@@ -1972,7 +1950,7 @@ def config_flags_parser(config_flags):
1972 equals = config_flags.find('=') 1950 equals = config_flags.find('=')
1973 if colon > 0: 1951 if colon > 0:
1974 if colon < equals or equals < 0: 1952 if colon < equals or equals < 0:
1975 return yaml.safe_load(config_flags) 1953 return ordered(yaml.safe_load(config_flags))
1976 1954
1977 if config_flags.find('==') >= 0: 1955 if config_flags.find('==') >= 0:
1978 juju_log("config_flags is not in expected format (key=value)", 1956 juju_log("config_flags is not in expected format (key=value)",
@@ -1985,7 +1963,7 @@ def config_flags_parser(config_flags):
1985 # split on '='. 1963 # split on '='.
1986 split = config_flags.strip(' =').split('=') 1964 split = config_flags.strip(' =').split('=')
1987 limit = len(split) 1965 limit = len(split)
1988 flags = {} 1966 flags = OrderedDict()
1989 for i in range(0, limit - 1): 1967 for i in range(0, limit - 1):
1990 current = split[i] 1968 current = split[i]
1991 next = split[i + 1] 1969 next = split[i + 1]
@@ -2052,3 +2030,84 @@ def token_cache_pkgs(source=None, release=None):
2052 if enable_memcache(source=source, release=release): 2030 if enable_memcache(source=source, release=release):
2053 packages.extend(['memcached', 'python-memcache']) 2031 packages.extend(['memcached', 'python-memcache'])
2054 return packages 2032 return packages
2033
2034
2035def update_json_file(filename, items):
2036 """Updates the json `filename` with a given dict.
2037 :param filename: json filename (i.e.: /etc/glance/policy.json)
2038 :param items: dict of items to update
2039 """
2040 with open(filename) as fd:
2041 policy = json.load(fd)
2042 policy.update(items)
2043 with open(filename, "w") as fd:
2044 fd.write(json.dumps(policy, indent=4))
2045
2046
2047@cached
2048def snap_install_requested():
2049 """ Determine if installing from snaps
2050
2051 If openstack-origin is of the form snap:channel-series-release
2052 and channel is in SNAPS_CHANNELS return True.
2053 """
2054 origin = config('openstack-origin') or ""
2055 if not origin.startswith('snap:'):
2056 return False
2057
2058 _src = origin[5:]
2059 channel, series, release = _src.split('-')
2060 if channel.lower() in SNAP_CHANNELS:
2061 return True
2062 return False
2063
2064
2065def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
2066 """Generate a dictionary of snap install information from origin
2067
2068 @param snaps: List of snaps
2069 @param src: String of openstack-origin or source of the form
2070 snap:channel-series-track
2071 @param mode: String classic, devmode or jailmode
2072 @returns: Dictionary of snaps with channels and modes
2073 """
2074
2075 if not src.startswith('snap:'):
2076 juju_log("Snap source is not a snap origin", 'WARN')
2077 return {}
2078
2079 _src = src[5:]
2080 _channel, _series, _release = _src.split('-')
2081 channel = '--channel={}/{}'.format(_release, _channel)
2082
2083 return {snap: {'channel': channel, 'mode': mode}
2084 for snap in snaps}
2085
2086
2087def install_os_snaps(snaps, refresh=False):
2088 """Install OpenStack snaps from channel and with mode
2089
2090 @param snaps: Dictionary of snaps with channels and modes of the form:
2091 {'snap_name': {'channel': 'snap_channel',
2092 'mode': 'snap_mode'}}
2093 Where channel a snapstore channel and mode is --classic, --devmode or
2094 --jailmode.
2095 @param post_snap_install: Callback function to run after snaps have been
2096 installed
2097 """
2098
2099 def _ensure_flag(flag):
2100 if flag.startswith('--'):
2101 return flag
2102 return '--{}'.format(flag)
2103
2104 if refresh:
2105 for snap in snaps.keys():
2106 snap_refresh(snap,
2107 _ensure_flag(snaps[snap]['channel']),
2108 _ensure_flag(snaps[snap]['mode']))
2109 else:
2110 for snap in snaps.keys():
2111 snap_install(snap,
2112 _ensure_flag(snaps[snap]['channel']),
2113 _ensure_flag(snaps[snap]['mode']))
diff --git a/hooks/charmhelpers/contrib/storage/linux/bcache.py b/hooks/charmhelpers/contrib/storage/linux/bcache.py
new file mode 100644
index 0000000..605991e
--- /dev/null
+++ b/hooks/charmhelpers/contrib/storage/linux/bcache.py
@@ -0,0 +1,74 @@
1# Copyright 2017 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14import os
15import json
16
17from charmhelpers.core.hookenv import log
18
19stats_intervals = ['stats_day', 'stats_five_minute',
20 'stats_hour', 'stats_total']
21
22SYSFS = '/sys'
23
24
25class Bcache(object):
26 """Bcache behaviour
27 """
28
29 def __init__(self, cachepath):
30 self.cachepath = cachepath
31
32 @classmethod
33 def fromdevice(cls, devname):
34 return cls('{}/block/{}/bcache'.format(SYSFS, devname))
35
36 def __str__(self):
37 return self.cachepath
38
39 def get_stats(self, interval):
40 """Get cache stats
41 """
42 intervaldir = 'stats_{}'.format(interval)
43 path = "{}/{}".format(self.cachepath, intervaldir)
44 out = dict()
45 for elem in os.listdir(path):
46 out[elem] = open('{}/{}'.format(path, elem)).read().strip()
47 return out
48
49
50def get_bcache_fs():
51 """Return all cache sets
52 """
53 cachesetroot = "{}/fs/bcache".format(SYSFS)
54 try:
55 dirs = os.listdir(cachesetroot)
56 except OSError:
57 log("No bcache fs found")
58 return []
59 cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')])
60 return cacheset
61
62
63def get_stats_action(cachespec, interval):
64 """Action for getting bcache statistics for a given cachespec.
65 Cachespec can either be a device name, eg. 'sdb', which will retrieve
66 cache stats for the given device, or 'global', which will retrieve stats
67 for all cachesets
68 """
69 if cachespec == 'global':
70 caches = get_bcache_fs()
71 else:
72 caches = [Bcache.fromdevice(cachespec)]
73 res = dict((c.cachepath, c.get_stats(interval)) for c in caches)
74 return json.dumps(res, indent=4, separators=(',', ': '))
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index 9417d68..e5a01b1 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -63,6 +63,7 @@ from charmhelpers.core.host import (
63from charmhelpers.fetch import ( 63from charmhelpers.fetch import (
64 apt_install, 64 apt_install,
65) 65)
66from charmhelpers.core.unitdata import kv
66 67
67from charmhelpers.core.kernel import modprobe 68from charmhelpers.core.kernel import modprobe
68from charmhelpers.contrib.openstack.utils import config_flags_parser 69from charmhelpers.contrib.openstack.utils import config_flags_parser
@@ -1314,6 +1315,47 @@ def send_request_if_needed(request, relation='ceph'):
1314 relation_set(relation_id=rid, broker_req=request.request) 1315 relation_set(relation_id=rid, broker_req=request.request)
1315 1316
1316 1317
1318def is_broker_action_done(action, rid=None, unit=None):
1319 """Check whether broker action has completed yet.
1320
1321 @param action: name of action to be performed
1322 @returns True if action complete otherwise False
1323 """
1324 rdata = relation_get(rid, unit) or {}
1325 broker_rsp = rdata.get(get_broker_rsp_key())
1326 if not broker_rsp:
1327 return False
1328
1329 rsp = CephBrokerRsp(broker_rsp)
1330 unit_name = local_unit().partition('/')[2]
1331 key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
1332 kvstore = kv()
1333 val = kvstore.get(key=key)
1334 if val and val == rsp.request_id:
1335 return True
1336
1337 return False
1338
1339
1340def mark_broker_action_done(action, rid=None, unit=None):
1341 """Mark action as having been completed.
1342
1343 @param action: name of action to be performed
1344 @returns None
1345 """
1346 rdata = relation_get(rid, unit) or {}
1347 broker_rsp = rdata.get(get_broker_rsp_key())
1348 if not broker_rsp:
1349 return
1350
1351 rsp = CephBrokerRsp(broker_rsp)
1352 unit_name = local_unit().partition('/')[2]
1353 key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
1354 kvstore = kv()
1355 kvstore.set(key=key, value=rsp.request_id)
1356 kvstore.flush()
1357
1358
1317class CephConfContext(object): 1359class CephConfContext(object):
1318 """Ceph config (ceph.conf) context. 1360 """Ceph config (ceph.conf) context.
1319 1361
@@ -1330,7 +1372,7 @@ class CephConfContext(object):
1330 return {} 1372 return {}
1331 1373
1332 conf = config_flags_parser(conf) 1374 conf = config_flags_parser(conf)
1333 if type(conf) != dict: 1375 if not isinstance(conf, dict):
1334 log("Provided config-flags is not a dictionary - ignoring", 1376 log("Provided config-flags is not a dictionary - ignoring",
1335 level=WARNING) 1377 level=WARNING)
1336 return {} 1378 return {}
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index e44e22b..12f37b2 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -43,6 +43,7 @@ ERROR = "ERROR"
43WARNING = "WARNING" 43WARNING = "WARNING"
44INFO = "INFO" 44INFO = "INFO"
45DEBUG = "DEBUG" 45DEBUG = "DEBUG"
46TRACE = "TRACE"
46MARKER = object() 47MARKER = object()
47 48
48cache = {} 49cache = {}
@@ -202,6 +203,27 @@ def service_name():
202 return local_unit().split('/')[0] 203 return local_unit().split('/')[0]
203 204
204 205
206def principal_unit():
207 """Returns the principal unit of this unit, otherwise None"""
208 # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
209 principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
210 # If it's empty, then this unit is the principal
211 if principal_unit == '':
212 return os.environ['JUJU_UNIT_NAME']
213 elif principal_unit is not None:
214 return principal_unit
215 # For Juju 2.1 and below, let's try work out the principle unit by
216 # the various charms' metadata.yaml.
217 for reltype in relation_types():
218 for rid in relation_ids(reltype):
219 for unit in related_units(rid):
220 md = _metadata_unit(unit)
221 subordinate = md.pop('subordinate', None)
222 if not subordinate:
223 return unit
224 return None
225
226
205@cached 227@cached
206def remote_service_name(relid=None): 228def remote_service_name(relid=None):
207 """The remote service name for a given relation-id (or the current relation)""" 229 """The remote service name for a given relation-id (or the current relation)"""
@@ -478,6 +500,21 @@ def metadata():
478 return yaml.safe_load(md) 500 return yaml.safe_load(md)
479 501
480 502
503def _metadata_unit(unit):
504 """Given the name of a unit (e.g. apache2/0), get the unit charm's
505 metadata.yaml. Very similar to metadata() but allows us to inspect
506 other units. Unit needs to be co-located, such as a subordinate or
507 principal/primary.
508
509 :returns: metadata.yaml as a python object.
510
511 """
512 basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
513 unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
514 with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md:
515 return yaml.safe_load(md)
516
517
481@cached 518@cached
482def relation_types(): 519def relation_types():
483 """Get a list of relation types supported by this charm""" 520 """Get a list of relation types supported by this charm"""
@@ -753,6 +790,9 @@ class Hooks(object):
753 790
754def charm_dir(): 791def charm_dir():
755 """Return the root directory of the current charm""" 792 """Return the root directory of the current charm"""
793 d = os.environ.get('JUJU_CHARM_DIR')
794 if d is not None:
795 return d
756 return os.environ.get('CHARM_DIR') 796 return os.environ.get('CHARM_DIR')
757 797
758 798
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index 88e80a4..5656e2f 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -34,7 +34,7 @@ import six
34 34
35from contextlib import contextmanager 35from contextlib import contextmanager
36from collections import OrderedDict 36from collections import OrderedDict
37from .hookenv import log 37from .hookenv import log, DEBUG
38from .fstab import Fstab 38from .fstab import Fstab
39from charmhelpers.osplatform import get_platform 39from charmhelpers.osplatform import get_platform
40 40
@@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
191 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) 191 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
192 sysv_file = os.path.join(initd_dir, service_name) 192 sysv_file = os.path.join(initd_dir, service_name)
193 if init_is_systemd(): 193 if init_is_systemd():
194 service('disable', service_name)
194 service('mask', service_name) 195 service('mask', service_name)
195 elif os.path.exists(upstart_file): 196 elif os.path.exists(upstart_file):
196 override_path = os.path.join( 197 override_path = os.path.join(
@@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init",
225 sysv_file = os.path.join(initd_dir, service_name) 226 sysv_file = os.path.join(initd_dir, service_name)
226 if init_is_systemd(): 227 if init_is_systemd():
227 service('unmask', service_name) 228 service('unmask', service_name)
229 service('enable', service_name)
228 elif os.path.exists(upstart_file): 230 elif os.path.exists(upstart_file):
229 override_path = os.path.join( 231 override_path = os.path.join(
230 init_dir, '{}.override'.format(service_name)) 232 init_dir, '{}.override'.format(service_name))
@@ -485,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False):
485 487
486def write_file(path, content, owner='root', group='root', perms=0o444): 488def write_file(path, content, owner='root', group='root', perms=0o444):
487 """Create or overwrite a file with the contents of a byte string.""" 489 """Create or overwrite a file with the contents of a byte string."""
488 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
489 uid = pwd.getpwnam(owner).pw_uid 490 uid = pwd.getpwnam(owner).pw_uid
490 gid = grp.getgrnam(group).gr_gid 491 gid = grp.getgrnam(group).gr_gid
491 with open(path, 'wb') as target: 492 # lets see if we can grab the file and compare the context, to avoid doing
492 os.fchown(target.fileno(), uid, gid) 493 # a write.
493 os.fchmod(target.fileno(), perms) 494 existing_content = None
494 target.write(content) 495 existing_uid, existing_gid = None, None
496 try:
497 with open(path, 'rb') as target:
498 existing_content = target.read()
499 stat = os.stat(path)
500 existing_uid, existing_gid = stat.st_uid, stat.st_gid
501 except:
502 pass
503 if content != existing_content:
504 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
505 level=DEBUG)
506 with open(path, 'wb') as target:
507 os.fchown(target.fileno(), uid, gid)
508 os.fchmod(target.fileno(), perms)
509 target.write(content)
510 return
511 # the contents were the same, but we might still need to change the
512 # ownership.
513 if existing_uid != uid:
514 log("Changing uid on already existing content: {} -> {}"
515 .format(existing_uid, uid), level=DEBUG)
516 os.chown(path, uid, -1)
517 if existing_gid != gid:
518 log("Changing gid on already existing content: {} -> {}"
519 .format(existing_gid, gid), level=DEBUG)
520 os.chown(path, -1, gid)
495 521
496 522
497def fstab_remove(mp): 523def fstab_remove(mp):
diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py
index ec5e0fe..480a627 100644
--- a/hooks/charmhelpers/fetch/__init__.py
+++ b/hooks/charmhelpers/fetch/__init__.py
@@ -48,6 +48,13 @@ class AptLockError(Exception):
48 pass 48 pass
49 49
50 50
51class GPGKeyError(Exception):
52 """Exception occurs when a GPG key cannot be fetched or used. The message
53 indicates what the problem is.
54 """
55 pass
56
57
51class BaseFetchHandler(object): 58class BaseFetchHandler(object):
52 59
53 """Base class for FetchHandler implementations in fetch plugins""" 60 """Base class for FetchHandler implementations in fetch plugins"""
@@ -77,21 +84,22 @@ module = "charmhelpers.fetch.%s" % __platform__
77fetch = importlib.import_module(module) 84fetch = importlib.import_module(module)
78 85
79filter_installed_packages = fetch.filter_installed_packages 86filter_installed_packages = fetch.filter_installed_packages
80install = fetch.install 87install = fetch.apt_install
81upgrade = fetch.upgrade 88upgrade = fetch.apt_upgrade
82update = fetch.update 89update = _fetch_update = fetch.apt_update
83purge = fetch.purge 90purge = fetch.apt_purge
84add_source = fetch.add_source 91add_source = fetch.add_source
85 92
86if __platform__ == "ubuntu": 93if __platform__ == "ubuntu":
87 apt_cache = fetch.apt_cache 94 apt_cache = fetch.apt_cache
88 apt_install = fetch.install 95 apt_install = fetch.apt_install
89 apt_update = fetch.update 96 apt_update = fetch.apt_update
90 apt_upgrade = fetch.upgrade 97 apt_upgrade = fetch.apt_upgrade
91 apt_purge = fetch.purge 98 apt_purge = fetch.apt_purge
92 apt_mark = fetch.apt_mark 99 apt_mark = fetch.apt_mark
93 apt_hold = fetch.apt_hold 100 apt_hold = fetch.apt_hold
94 apt_unhold = fetch.apt_unhold 101 apt_unhold = fetch.apt_unhold
102 import_key = fetch.import_key
95 get_upstream_version = fetch.get_upstream_version 103 get_upstream_version = fetch.get_upstream_version
96elif __platform__ == "centos": 104elif __platform__ == "centos":
97 yum_search = fetch.yum_search 105 yum_search = fetch.yum_search
@@ -135,7 +143,7 @@ def configure_sources(update=False,
135 for source, key in zip(sources, keys): 143 for source, key in zip(sources, keys):
136 add_source(source, key) 144 add_source(source, key)
137 if update: 145 if update:
138 fetch.update(fatal=True) 146 _fetch_update(fatal=True)
139 147
140 148
141def install_remote(source, *args, **kwargs): 149def install_remote(source, *args, **kwargs):
diff --git a/hooks/charmhelpers/fetch/centos.py b/hooks/charmhelpers/fetch/centos.py
index 604bbfb..a91dcff 100644
--- a/hooks/charmhelpers/fetch/centos.py
+++ b/hooks/charmhelpers/fetch/centos.py
@@ -132,7 +132,7 @@ def add_source(source, key=None):
132 key_file.write(key) 132 key_file.write(key)
133 key_file.flush() 133 key_file.flush()
134 key_file.seek(0) 134 key_file.seek(0)
135 subprocess.check_call(['rpm', '--import', key_file]) 135 subprocess.check_call(['rpm', '--import', key_file.name])
136 else: 136 else:
137 subprocess.check_call(['rpm', '--import', key]) 137 subprocess.check_call(['rpm', '--import', key])
138 138
diff --git a/hooks/charmhelpers/fetch/snap.py b/hooks/charmhelpers/fetch/snap.py
index 23c707b..112a54c 100644
--- a/hooks/charmhelpers/fetch/snap.py
+++ b/hooks/charmhelpers/fetch/snap.py
@@ -18,15 +18,23 @@ If writing reactive charms, use the snap layer:
18https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html 18https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html
19""" 19"""
20import subprocess 20import subprocess
21from os import environ 21import os
22from time import sleep 22from time import sleep
23from charmhelpers.core.hookenv import log 23from charmhelpers.core.hookenv import log
24 24
25__author__ = 'Joseph Borg <joseph.borg@canonical.com>' 25__author__ = 'Joseph Borg <joseph.borg@canonical.com>'
26 26
27SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). 27# The return code for "couldn't acquire lock" in Snap
28# (hopefully this will be improved).
29SNAP_NO_LOCK = 1
28SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. 30SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks.
29SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. 31SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
32SNAP_CHANNELS = [
33 'edge',
34 'beta',
35 'candidate',
36 'stable',
37]
30 38
31 39
32class CouldNotAcquireLockException(Exception): 40class CouldNotAcquireLockException(Exception):
@@ -47,13 +55,17 @@ def _snap_exec(commands):
47 55
48 while return_code is None or return_code == SNAP_NO_LOCK: 56 while return_code is None or return_code == SNAP_NO_LOCK:
49 try: 57 try:
50 return_code = subprocess.check_call(['snap'] + commands, env=environ) 58 return_code = subprocess.check_call(['snap'] + commands,
59 env=os.environ)
51 except subprocess.CalledProcessError as e: 60 except subprocess.CalledProcessError as e:
52 retry_count += + 1 61 retry_count += + 1
53 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: 62 if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
54 raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) 63 raise CouldNotAcquireLockException(
64 'Could not aquire lock after {} attempts'
65 .format(SNAP_NO_LOCK_RETRY_COUNT))
55 return_code = e.returncode 66 return_code = e.returncode
56 log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') 67 log('Snap failed to acquire lock, trying again in {} seconds.'
68 .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN'))
57 sleep(SNAP_NO_LOCK_RETRY_DELAY) 69 sleep(SNAP_NO_LOCK_RETRY_DELAY)
58 70
59 return return_code 71 return return_code
diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py
index 7bc6cc7..40e1cb5 100644
--- a/hooks/charmhelpers/fetch/ubuntu.py
+++ b/hooks/charmhelpers/fetch/ubuntu.py
@@ -12,29 +12,48 @@
12# See the License for the specific language governing permissions and 12# See the License for the specific language governing permissions and
13# limitations under the License. 13# limitations under the License.
14 14
15from collections import OrderedDict
15import os 16import os
17import platform
18import re
16import six 19import six
17import time 20import time
18import subprocess 21import subprocess
19
20from tempfile import NamedTemporaryFile 22from tempfile import NamedTemporaryFile
23
21from charmhelpers.core.host import ( 24from charmhelpers.core.host import (
22 lsb_release 25 lsb_release
23) 26)
24from charmhelpers.core.hookenv import log 27from charmhelpers.core.hookenv import (
25from charmhelpers.fetch import SourceConfigError 28 log,
26 29 DEBUG,
30 WARNING,
31)
32from charmhelpers.fetch import SourceConfigError, GPGKeyError
33
34PROPOSED_POCKET = (
35 "# Proposed\n"
36 "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe "
37 "multiverse restricted\n")
38PROPOSED_PORTS_POCKET = (
39 "# Proposed\n"
40 "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe "
41 "multiverse restricted\n")
42# Only supports 64bit and ppc64 at the moment.
43ARCH_TO_PROPOSED_POCKET = {
44 'x86_64': PROPOSED_POCKET,
45 'ppc64le': PROPOSED_PORTS_POCKET,
46 'aarch64': PROPOSED_PORTS_POCKET,
47}
48CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
49CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
27CLOUD_ARCHIVE = """# Ubuntu Cloud Archive 50CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
28deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main 51deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
29""" 52"""
30
31PROPOSED_POCKET = """# Proposed
32deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
33"""
34
35CLOUD_ARCHIVE_POCKETS = { 53CLOUD_ARCHIVE_POCKETS = {
36 # Folsom 54 # Folsom
37 'folsom': 'precise-updates/folsom', 55 'folsom': 'precise-updates/folsom',
56 'folsom/updates': 'precise-updates/folsom',
38 'precise-folsom': 'precise-updates/folsom', 57 'precise-folsom': 'precise-updates/folsom',
39 'precise-folsom/updates': 'precise-updates/folsom', 58 'precise-folsom/updates': 'precise-updates/folsom',
40 'precise-updates/folsom': 'precise-updates/folsom', 59 'precise-updates/folsom': 'precise-updates/folsom',
@@ -43,6 +62,7 @@ CLOUD_ARCHIVE_POCKETS = {
43 'precise-proposed/folsom': 'precise-proposed/folsom', 62 'precise-proposed/folsom': 'precise-proposed/folsom',
44 # Grizzly 63 # Grizzly
45 'grizzly': 'precise-updates/grizzly', 64 'grizzly': 'precise-updates/grizzly',
65 'grizzly/updates': 'precise-updates/grizzly',
46 'precise-grizzly': 'precise-updates/grizzly', 66 'precise-grizzly': 'precise-updates/grizzly',
47 'precise-grizzly/updates': 'precise-updates/grizzly', 67 'precise-grizzly/updates': 'precise-updates/grizzly',
48 'precise-updates/grizzly': 'precise-updates/grizzly', 68 'precise-updates/grizzly': 'precise-updates/grizzly',
@@ -51,6 +71,7 @@ CLOUD_ARCHIVE_POCKETS = {
51 'precise-proposed/grizzly': 'precise-proposed/grizzly', 71 'precise-proposed/grizzly': 'precise-proposed/grizzly',
52 # Havana 72 # Havana
53 'havana': 'precise-updates/havana', 73 'havana': 'precise-updates/havana',
74 'havana/updates': 'precise-updates/havana',
54 'precise-havana': 'precise-updates/havana', 75 'precise-havana': 'precise-updates/havana',
55 'precise-havana/updates': 'precise-updates/havana', 76 'precise-havana/updates': 'precise-updates/havana',
56 'precise-updates/havana': 'precise-updates/havana', 77 'precise-updates/havana': 'precise-updates/havana',
@@ -59,6 +80,7 @@ CLOUD_ARCHIVE_POCKETS = {
59 'precise-proposed/havana': 'precise-proposed/havana', 80 'precise-proposed/havana': 'precise-proposed/havana',
60 # Icehouse 81 # Icehouse
61 'icehouse': 'precise-updates/icehouse', 82 'icehouse': 'precise-updates/icehouse',
83 'icehouse/updates': 'precise-updates/icehouse',
62 'precise-icehouse': 'precise-updates/icehouse', 84 'precise-icehouse': 'precise-updates/icehouse',
63 'precise-icehouse/updates': 'precise-updates/icehouse', 85 'precise-icehouse/updates': 'precise-updates/icehouse',
64 'precise-updates/icehouse': 'precise-updates/icehouse', 86 'precise-updates/icehouse': 'precise-updates/icehouse',
@@ -67,6 +89,7 @@ CLOUD_ARCHIVE_POCKETS = {
67 'precise-proposed/icehouse': 'precise-proposed/icehouse', 89 'precise-proposed/icehouse': 'precise-proposed/icehouse',
68 # Juno 90 # Juno
69 'juno': 'trusty-updates/juno', 91 'juno': 'trusty-updates/juno',
92 'juno/updates': 'trusty-updates/juno',
70 'trusty-juno': 'trusty-updates/juno', 93 'trusty-juno': 'trusty-updates/juno',
71 'trusty-juno/updates': 'trusty-updates/juno', 94 'trusty-juno/updates': 'trusty-updates/juno',
72 'trusty-updates/juno': 'trusty-updates/juno', 95 'trusty-updates/juno': 'trusty-updates/juno',
@@ -75,6 +98,7 @@ CLOUD_ARCHIVE_POCKETS = {
75 'trusty-proposed/juno': 'trusty-proposed/juno', 98 'trusty-proposed/juno': 'trusty-proposed/juno',
76 # Kilo 99 # Kilo
77 'kilo': 'trusty-updates/kilo', 100 'kilo': 'trusty-updates/kilo',
101 'kilo/updates': 'trusty-updates/kilo',
78 'trusty-kilo': 'trusty-updates/kilo', 102 'trusty-kilo': 'trusty-updates/kilo',
79 'trusty-kilo/updates': 'trusty-updates/kilo', 103 'trusty-kilo/updates': 'trusty-updates/kilo',
80 'trusty-updates/kilo': 'trusty-updates/kilo', 104 'trusty-updates/kilo': 'trusty-updates/kilo',
@@ -83,6 +107,7 @@ CLOUD_ARCHIVE_POCKETS = {
83 'trusty-proposed/kilo': 'trusty-proposed/kilo', 107 'trusty-proposed/kilo': 'trusty-proposed/kilo',
84 # Liberty 108 # Liberty
85 'liberty': 'trusty-updates/liberty', 109 'liberty': 'trusty-updates/liberty',
110 'liberty/updates': 'trusty-updates/liberty',
86 'trusty-liberty': 'trusty-updates/liberty', 111 'trusty-liberty': 'trusty-updates/liberty',
87 'trusty-liberty/updates': 'trusty-updates/liberty', 112 'trusty-liberty/updates': 'trusty-updates/liberty',
88 'trusty-updates/liberty': 'trusty-updates/liberty', 113 'trusty-updates/liberty': 'trusty-updates/liberty',
@@ -91,6 +116,7 @@ CLOUD_ARCHIVE_POCKETS = {
91 'trusty-proposed/liberty': 'trusty-proposed/liberty', 116 'trusty-proposed/liberty': 'trusty-proposed/liberty',
92 # Mitaka 117 # Mitaka
93 'mitaka': 'trusty-updates/mitaka', 118 'mitaka': 'trusty-updates/mitaka',
119 'mitaka/updates': 'trusty-updates/mitaka',
94 'trusty-mitaka': 'trusty-updates/mitaka', 120 'trusty-mitaka': 'trusty-updates/mitaka',
95 'trusty-mitaka/updates': 'trusty-updates/mitaka', 121 'trusty-mitaka/updates': 'trusty-updates/mitaka',
96 'trusty-updates/mitaka': 'trusty-updates/mitaka', 122 'trusty-updates/mitaka': 'trusty-updates/mitaka',
@@ -99,6 +125,7 @@ CLOUD_ARCHIVE_POCKETS = {
99 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', 125 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
100 # Newton 126 # Newton
101 'newton': 'xenial-updates/newton', 127 'newton': 'xenial-updates/newton',
128 'newton/updates': 'xenial-updates/newton',
102 'xenial-newton': 'xenial-updates/newton', 129 'xenial-newton': 'xenial-updates/newton',
103 'xenial-newton/updates': 'xenial-updates/newton', 130 'xenial-newton/updates': 'xenial-updates/newton',
104 'xenial-updates/newton': 'xenial-updates/newton', 131 'xenial-updates/newton': 'xenial-updates/newton',
@@ -107,12 +134,13 @@ CLOUD_ARCHIVE_POCKETS = {
107 'xenial-proposed/newton': 'xenial-proposed/newton', 134 'xenial-proposed/newton': 'xenial-proposed/newton',
108 # Ocata 135 # Ocata
109 'ocata': 'xenial-updates/ocata', 136 'ocata': 'xenial-updates/ocata',
137 'ocata/updates': 'xenial-updates/ocata',
110 'xenial-ocata': 'xenial-updates/ocata', 138 'xenial-ocata': 'xenial-updates/ocata',
111 'xenial-ocata/updates': 'xenial-updates/ocata', 139 'xenial-ocata/updates': 'xenial-updates/ocata',
112 'xenial-updates/ocata': 'xenial-updates/ocata', 140 'xenial-updates/ocata': 'xenial-updates/ocata',
113 'ocata/proposed': 'xenial-proposed/ocata', 141 'ocata/proposed': 'xenial-proposed/ocata',
114 'xenial-ocata/proposed': 'xenial-proposed/ocata', 142 'xenial-ocata/proposed': 'xenial-proposed/ocata',
115 'xenial-ocata/newton': 'xenial-proposed/ocata', 143 'xenial-proposed/ocata': 'xenial-proposed/ocata',
116 # Pike 144 # Pike
117 'pike': 'xenial-updates/pike', 145 'pike': 'xenial-updates/pike',
118 'xenial-pike': 'xenial-updates/pike', 146 'xenial-pike': 'xenial-updates/pike',
@@ -120,7 +148,7 @@ CLOUD_ARCHIVE_POCKETS = {
120 'xenial-updates/pike': 'xenial-updates/pike', 148 'xenial-updates/pike': 'xenial-updates/pike',
121 'pike/proposed': 'xenial-proposed/pike', 149 'pike/proposed': 'xenial-proposed/pike',
122 'xenial-pike/proposed': 'xenial-proposed/pike', 150 'xenial-pike/proposed': 'xenial-proposed/pike',
123 'xenial-pike/newton': 'xenial-proposed/pike', 151 'xenial-proposed/pike': 'xenial-proposed/pike',
124 # Queens 152 # Queens
125 'queens': 'xenial-updates/queens', 153 'queens': 'xenial-updates/queens',
126 'xenial-queens': 'xenial-updates/queens', 154 'xenial-queens': 'xenial-updates/queens',
@@ -128,12 +156,13 @@ CLOUD_ARCHIVE_POCKETS = {
128 'xenial-updates/queens': 'xenial-updates/queens', 156 'xenial-updates/queens': 'xenial-updates/queens',
129 'queens/proposed': 'xenial-proposed/queens', 157 'queens/proposed': 'xenial-proposed/queens',
130 'xenial-queens/proposed': 'xenial-proposed/queens', 158 'xenial-queens/proposed': 'xenial-proposed/queens',
131 'xenial-queens/newton': 'xenial-proposed/queens', 159 'xenial-proposed/queens': 'xenial-proposed/queens',
132} 160}
133 161
162
134APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. 163APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
135CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. 164CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries.
136CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. 165CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times.
137 166
138 167
139def filter_installed_packages(packages): 168def filter_installed_packages(packages):
@@ -161,7 +190,7 @@ def apt_cache(in_memory=True, progress=None):
161 return apt_pkg.Cache(progress) 190 return apt_pkg.Cache(progress)
162 191
163 192
164def install(packages, options=None, fatal=False): 193def apt_install(packages, options=None, fatal=False):
165 """Install one or more packages.""" 194 """Install one or more packages."""
166 if options is None: 195 if options is None:
167 options = ['--option=Dpkg::Options::=--force-confold'] 196 options = ['--option=Dpkg::Options::=--force-confold']
@@ -178,7 +207,7 @@ def install(packages, options=None, fatal=False):
178 _run_apt_command(cmd, fatal) 207 _run_apt_command(cmd, fatal)
179 208
180 209
181def upgrade(options=None, fatal=False, dist=False): 210def apt_upgrade(options=None, fatal=False, dist=False):
182 """Upgrade all packages.""" 211 """Upgrade all packages."""
183 if options is None: 212 if options is None:
184 options = ['--option=Dpkg::Options::=--force-confold'] 213 options = ['--option=Dpkg::Options::=--force-confold']
@@ -193,13 +222,13 @@ def upgrade(options=None, fatal=False, dist=False):
193 _run_apt_command(cmd, fatal) 222 _run_apt_command(cmd, fatal)
194 223
195 224
196def update(fatal=False): 225def apt_update(fatal=False):
197 """Update local apt cache.""" 226 """Update local apt cache."""
198 cmd = ['apt-get', 'update'] 227 cmd = ['apt-get', 'update']
199 _run_apt_command(cmd, fatal) 228 _run_apt_command(cmd, fatal)
200 229
201 230
202def purge(packages, fatal=False): 231def apt_purge(packages, fatal=False):
203 """Purge one or more packages.""" 232 """Purge one or more packages."""
204 cmd = ['apt-get', '--assume-yes', 'purge'] 233 cmd = ['apt-get', '--assume-yes', 'purge']
205 if isinstance(packages, six.string_types): 234 if isinstance(packages, six.string_types):
@@ -233,7 +262,58 @@ def apt_unhold(packages, fatal=False):
233 return apt_mark(packages, 'unhold', fatal=fatal) 262 return apt_mark(packages, 'unhold', fatal=fatal)
234 263
235 264
236def add_source(source, key=None): 265def import_key(key):
266 """Import an ASCII Armor key.
267
268 /!\ A Radix64 format keyid is also supported for backwards
269 compatibility, but should never be used; the key retrieval
270 mechanism is insecure and subject to man-in-the-middle attacks
271 voiding all signature checks using that key.
272
273 :param keyid: The key in ASCII armor format,
274 including BEGIN and END markers.
275 :raises: GPGKeyError if the key could not be imported
276 """
277 key = key.strip()
278 if '-' in key or '\n' in key:
279 # Send everything not obviously a keyid to GPG to import, as
280 # we trust its validation better than our own. eg. handling
281 # comments before the key.
282 log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
283 if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
284 '-----END PGP PUBLIC KEY BLOCK-----' in key):
285 log("Importing ASCII Armor PGP key", level=DEBUG)
286 with NamedTemporaryFile() as keyfile:
287 with open(keyfile.name, 'w') as fd:
288 fd.write(key)
289 fd.write("\n")
290 cmd = ['apt-key', 'add', keyfile.name]
291 try:
292 subprocess.check_call(cmd)
293 except subprocess.CalledProcessError:
294 error = "Error importing PGP key '{}'".format(key)
295 log(error)
296 raise GPGKeyError(error)
297 else:
298 raise GPGKeyError("ASCII armor markers missing from GPG key")
299 else:
300 # We should only send things obviously not a keyid offsite
301 # via this unsecured protocol, as it may be a secret or part
302 # of one.
303 log("PGP key found (looks like Radix64 format)", level=WARNING)
304 log("INSECURLY importing PGP key from keyserver; "
305 "full key not provided.", level=WARNING)
306 cmd = ['apt-key', 'adv', '--keyserver',
307 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
308 try:
309 subprocess.check_call(cmd)
310 except subprocess.CalledProcessError:
311 error = "Error importing PGP key '{}'".format(key)
312 log(error)
313 raise GPGKeyError(error)
314
315
316def add_source(source, key=None, fail_invalid=False):
237 """Add a package source to this system. 317 """Add a package source to this system.
238 318
239 @param source: a URL or sources.list entry, as supported by 319 @param source: a URL or sources.list entry, as supported by
@@ -249,6 +329,33 @@ def add_source(source, key=None):
249 such as 'cloud:icehouse' 329 such as 'cloud:icehouse'
250 'distro' may be used as a noop 330 'distro' may be used as a noop
251 331
332 Full list of source specifications supported by the function are:
333
334 'distro': A NOP; i.e. it has no effect.
335 'proposed': the proposed deb spec [2] is wrtten to
336 /etc/apt/sources.list/proposed
337 'distro-proposed': adds <version>-proposed to the debs [2]
338 'ppa:<ppa-name>': add-apt-repository --yes <ppa_name>
339 'deb <deb-spec>': add-apt-repository --yes deb <deb-spec>
340 'http://....': add-apt-repository --yes http://...
341 'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec>
342 'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with
343 optional staging version. If staging is used then the staging PPA [2]
344 with be used. If staging is NOT used then the cloud archive [3] will be
345 added, and the 'ubuntu-cloud-keyring' package will be added for the
346 current distro.
347
348 Otherwise the source is not recognised and this is logged to the juju log.
349 However, no error is raised, unless sys_error_on_exit is True.
350
351 [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
352 where {} is replaced with the derived pocket name.
353 [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \
354 main universe multiverse restricted
355 where {} is replaced with the lsb_release codename (e.g. xenial)
356 [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket>
357 to /etc/apt/sources.list.d/cloud-archive-list
358
252 @param key: A key to be added to the system's APT keyring and used 359 @param key: A key to be added to the system's APT keyring and used
253 to verify the signatures on packages. Ideally, this should be an 360 to verify the signatures on packages. Ideally, this should be an
254 ASCII format GPG public key including the block headers. A GPG key 361 ASCII format GPG public key including the block headers. A GPG key
@@ -256,51 +363,142 @@ def add_source(source, key=None):
256 available to retrieve the actual public key from a public keyserver 363 available to retrieve the actual public key from a public keyserver
257 placing your Juju environment at risk. ppa and cloud archive keys 364 placing your Juju environment at risk. ppa and cloud archive keys
258 are securely added automtically, so sould not be provided. 365 are securely added automtically, so sould not be provided.
366
367 @param fail_invalid: (boolean) if True, then the function raises a
368 SourceConfigError is there is no matching installation source.
369
370 @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
371 valid pocket in CLOUD_ARCHIVE_POCKETS
259 """ 372 """
373 _mapping = OrderedDict([
374 (r"^distro$", lambda: None), # This is a NOP
375 (r"^(?:proposed|distro-proposed)$", _add_proposed),
376 (r"^cloud-archive:(.*)$", _add_apt_repository),
377 (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
378 (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
379 (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
380 (r"^cloud:(.*)$", _add_cloud_pocket),
381 (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
382 ])
260 if source is None: 383 if source is None:
261 log('Source is not present. Skipping') 384 source = ''
262 return 385 for r, fn in six.iteritems(_mapping):
263 386 m = re.match(r, source)
264 if (source.startswith('ppa:') or 387 if m:
265 source.startswith('http') or 388 # call the assoicated function with the captured groups
266 source.startswith('deb ') or 389 # raises SourceConfigError on error.
267 source.startswith('cloud-archive:')): 390 fn(*m.groups())
268 cmd = ['add-apt-repository', '--yes', source] 391 if key:
269 _run_with_retries(cmd) 392 try:
270 elif source.startswith('cloud:'): 393 import_key(key)
271 install(filter_installed_packages(['ubuntu-cloud-keyring']), 394 except GPGKeyError as e:
272 fatal=True) 395 raise SourceConfigError(str(e))
273 pocket = source.split(':')[-1] 396 break
274 if pocket not in CLOUD_ARCHIVE_POCKETS:
275 raise SourceConfigError(
276 'Unsupported cloud: source option %s' %
277 pocket)
278 actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
279 with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
280 apt.write(CLOUD_ARCHIVE.format(actual_pocket))
281 elif source == 'proposed':
282 release = lsb_release()['DISTRIB_CODENAME']
283 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
284 apt.write(PROPOSED_POCKET.format(release))
285 elif source == 'distro':
286 pass
287 else: 397 else:
288 log("Unknown source: {!r}".format(source)) 398 # nothing matched. log an error and maybe sys.exit
289 399 err = "Unknown source: {!r}".format(source)
290 if key: 400 log(err)
291 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: 401 if fail_invalid:
292 with NamedTemporaryFile('w+') as key_file: 402 raise SourceConfigError(err)
293 key_file.write(key) 403
294 key_file.flush() 404
295 key_file.seek(0) 405def _add_proposed():
296 subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) 406 """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
297 else: 407
298 # Note that hkp: is in no way a secure protocol. Using a 408 Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
299 # GPG key id is pointless from a security POV unless you 409 the deb line.
300 # absolutely trust your network and DNS. 410
301 subprocess.check_call(['apt-key', 'adv', '--keyserver', 411 For intel architecutres PROPOSED_POCKET is used for the release, but for
302 'hkp://keyserver.ubuntu.com:80', '--recv', 412 other architectures PROPOSED_PORTS_POCKET is used for the release.
303 key]) 413 """
414 release = lsb_release()['DISTRIB_CODENAME']
415 arch = platform.machine()
416 if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
417 raise SourceConfigError("Arch {} not supported for (distro-)proposed"
418 .format(arch))
419 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
420 apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release))
421
422
423def _add_apt_repository(spec):
424 """Add the spec using add_apt_repository
425
426 :param spec: the parameter to pass to add_apt_repository
427 """
428 _run_with_retries(['add-apt-repository', '--yes', spec])
429
430
431def _add_cloud_pocket(pocket):
432 """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
433
434 Note that this overwrites the existing file if there is one.
435
436 This function also converts the simple pocket in to the actual pocket using
437 the CLOUD_ARCHIVE_POCKETS mapping.
438
439 :param pocket: string representing the pocket to add a deb spec for.
440 :raises: SourceConfigError if the cloud pocket doesn't exist or the
441 requested release doesn't match the current distro version.
442 """
443 apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
444 fatal=True)
445 if pocket not in CLOUD_ARCHIVE_POCKETS:
446 raise SourceConfigError(
447 'Unsupported cloud: source option %s' %
448 pocket)
449 actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
450 with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
451 apt.write(CLOUD_ARCHIVE.format(actual_pocket))
452
453
454def _add_cloud_staging(cloud_archive_release, openstack_release):
455 """Add the cloud staging repository which is in
456 ppa:ubuntu-cloud-archive/<openstack_release>-staging
457
458 This function checks that the cloud_archive_release matches the current
459 codename for the distro that charm is being installed on.
460
461 :param cloud_archive_release: string, codename for the release.
462 :param openstack_release: String, codename for the openstack release.
463 :raises: SourceConfigError if the cloud_archive_release doesn't match the
464 current version of the os.
465 """
466 _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
467 ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release)
468 cmd = 'add-apt-repository -y {}'.format(ppa)
469 _run_with_retries(cmd.split(' '))
470
471
472def _add_cloud_distro_check(cloud_archive_release, openstack_release):
473 """Add the cloud pocket, but also check the cloud_archive_release against
474 the current distro, and use the openstack_release as the full lookup.
475
476 This just calls _add_cloud_pocket() with the openstack_release as pocket
477 to get the correct cloud-archive.list for dpkg to work with.
478
479 :param cloud_archive_release:String, codename for the distro release.
480 :param openstack_release: String, spec for the release to look up in the
481 CLOUD_ARCHIVE_POCKETS
482 :raises: SourceConfigError if this is the wrong distro, or the pocket spec
483 doesn't exist.
484 """
485 _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
486 _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release))
487
488
489def _verify_is_ubuntu_rel(release, os_release):
490 """Verify that the release is in the same as the current ubuntu release.
491
492 :param release: String, lowercase for the release.
493 :param os_release: String, the os_release being asked for
494 :raises: SourceConfigError if the release is not the same as the ubuntu
495 release.
496 """
497 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
498 if release != ubuntu_rel:
499 raise SourceConfigError(
500 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'
501 'version ({})'.format(release, os_release, ubuntu_rel))
304 502
305 503
306def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), 504def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
@@ -316,9 +514,12 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
316 :param: cmd_env: dict: Environment variables to add to the command run. 514 :param: cmd_env: dict: Environment variables to add to the command run.
317 """ 515 """
318 516
319 env = os.environ.copy() 517 env = None
518 kwargs = {}
320 if cmd_env: 519 if cmd_env:
520 env = os.environ.copy()
321 env.update(cmd_env) 521 env.update(cmd_env)
522 kwargs['env'] = env
322 523
323 if not retry_message: 524 if not retry_message:
324 retry_message = "Failed executing '{}'".format(" ".join(cmd)) 525 retry_message = "Failed executing '{}'".format(" ".join(cmd))
@@ -330,7 +531,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
330 retry_results = (None,) + retry_exitcodes 531 retry_results = (None,) + retry_exitcodes
331 while result in retry_results: 532 while result in retry_results:
332 try: 533 try:
333 result = subprocess.check_call(cmd, env=env) 534 # result = subprocess.check_call(cmd, env=env)
535 result = subprocess.check_call(cmd, **kwargs)
334 except subprocess.CalledProcessError as e: 536 except subprocess.CalledProcessError as e:
335 retry_count = retry_count + 1 537 retry_count = retry_count + 1
336 if retry_count > max_retries: 538 if retry_count > max_retries:
@@ -343,6 +545,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
343def _run_apt_command(cmd, fatal=False): 545def _run_apt_command(cmd, fatal=False):
344 """Run an apt command with optional retries. 546 """Run an apt command with optional retries.
345 547
548 :param: cmd: str: The apt command to run.
346 :param: fatal: bool: Whether the command's output should be checked and 549 :param: fatal: bool: Whether the command's output should be checked and
347 retried. 550 retried.
348 """ 551 """
diff --git a/tests/charmhelpers/__init__.py b/tests/charmhelpers/__init__.py
index 4886788..e7aa471 100644
--- a/tests/charmhelpers/__init__.py
+++ b/tests/charmhelpers/__init__.py
@@ -14,6 +14,11 @@
14 14
15# Bootstrap charm-helpers, installing its dependencies if necessary using 15# Bootstrap charm-helpers, installing its dependencies if necessary using
16# only standard libraries. 16# only standard libraries.
17from __future__ import print_function
18from __future__ import absolute_import
19
20import functools
21import inspect
17import subprocess 22import subprocess
18import sys 23import sys
19 24
@@ -34,3 +39,59 @@ except ImportError:
34 else: 39 else:
35 subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) 40 subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
36 import yaml # flake8: noqa 41 import yaml # flake8: noqa
42
43
44# Holds a list of mapping of mangled function names that have been deprecated
45# using the @deprecate decorator below. This is so that the warning is only
46# printed once for each usage of the function.
47__deprecated_functions = {}
48
49
50def deprecate(warning, date=None, log=None):
51 """Add a deprecation warning the first time the function is used.
52 The date, which is a string in semi-ISO8660 format indicate the year-month
53 that the function is officially going to be removed.
54
55 usage:
56
57 @deprecate('use core/fetch/add_source() instead', '2017-04')
58 def contributed_add_source_thing(...):
59 ...
60
61 And it then prints to the log ONCE that the function is deprecated.
62 The reason for passing the logging function (log) is so that hookenv.log
63 can be used for a charm if needed.
64
65 :param warning: String to indicat where it has moved ot.
66 :param date: optional sting, in YYYY-MM format to indicate when the
67 function will definitely (probably) be removed.
68 :param log: The log function to call to log. If not, logs to stdout
69 """
70 def wrap(f):
71
72 @functools.wraps(f)
73 def wrapped_f(*args, **kwargs):
74 try:
75 module = inspect.getmodule(f)
76 file = inspect.getsourcefile(f)
77 lines = inspect.getsourcelines(f)
78 f_name = "{}-{}-{}..{}-{}".format(
79 module.__name__, file, lines[0], lines[-1], f.__name__)
80 except (IOError, TypeError):
81 # assume it was local, so just use the name of the function
82 f_name = f.__name__
83 if f_name not in __deprecated_functions:
84 __deprecated_functions[f_name] = True
85 s = "DEPRECATION WARNING: Function {} is being removed".format(
86 f.__name__)
87 if date:
88 s = "{} on/around {}".format(s, date)
89 if warning:
90 s = "{} : {}".format(s, warning)
91 if log:
92 log(s)
93 else:
94 print(s)
95 return f(*args, **kwargs)
96 return wrapped_f
97 return wrap
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
index bcef4cd..c8edbf6 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -25,9 +25,12 @@ import urlparse
25import cinderclient.v1.client as cinder_client 25import cinderclient.v1.client as cinder_client
26import glanceclient.v1.client as glance_client 26import glanceclient.v1.client as glance_client
27import heatclient.v1.client as heat_client 27import heatclient.v1.client as heat_client
28import keystoneclient.v2_0 as keystone_client 28from keystoneclient.v2_0 import client as keystone_client
29from keystoneclient.auth.identity import v3 as keystone_id_v3 29from keystoneauth1.identity import (
30from keystoneclient import session as keystone_session 30 v3,
31 v2,
32)
33from keystoneauth1 import session as keystone_session
31from keystoneclient.v3 import client as keystone_client_v3 34from keystoneclient.v3 import client as keystone_client_v3
32from novaclient import exceptions 35from novaclient import exceptions
33 36
@@ -368,12 +371,20 @@ class OpenStackAmuletUtils(AmuletUtils):
368 port) 371 port)
369 if not api_version or api_version == 2: 372 if not api_version or api_version == 2:
370 ep = base_ep + "/v2.0" 373 ep = base_ep + "/v2.0"
371 return keystone_client.Client(username=username, password=password, 374 auth = v2.Password(
372 tenant_name=project_name, 375 username=username,
373 auth_url=ep) 376 password=password,
377 tenant_name=project_name,
378 auth_url=ep
379 )
380 sess = keystone_session.Session(auth=auth)
381 client = keystone_client.Client(session=sess)
382 # This populates the client.service_catalog
383 client.auth_ref = auth.get_access(sess)
384 return client
374 else: 385 else:
375 ep = base_ep + "/v3" 386 ep = base_ep + "/v3"
376 auth = keystone_id_v3.Password( 387 auth = v3.Password(
377 user_domain_name=user_domain_name, 388 user_domain_name=user_domain_name,
378 username=username, 389 username=username,
379 password=password, 390 password=password,
@@ -382,36 +393,45 @@ class OpenStackAmuletUtils(AmuletUtils):
382 project_name=project_name, 393 project_name=project_name,
383 auth_url=ep 394 auth_url=ep
384 ) 395 )
385 return keystone_client_v3.Client( 396 sess = keystone_session.Session(auth=auth)
386 session=keystone_session.Session(auth=auth) 397 client = keystone_client_v3.Client(session=sess)
387 ) 398 # This populates the client.service_catalog
399 client.auth_ref = auth.get_access(sess)
400 return client
388 401
389 def authenticate_keystone_admin(self, keystone_sentry, user, password, 402 def authenticate_keystone_admin(self, keystone_sentry, user, password,
390 tenant=None, api_version=None, 403 tenant=None, api_version=None,
391 keystone_ip=None): 404 keystone_ip=None, user_domain_name=None,
405 project_domain_name=None,
406 project_name=None):
392 """Authenticates admin user with the keystone admin endpoint.""" 407 """Authenticates admin user with the keystone admin endpoint."""
393 self.log.debug('Authenticating keystone admin...') 408 self.log.debug('Authenticating keystone admin...')
394 if not keystone_ip: 409 if not keystone_ip:
395 keystone_ip = keystone_sentry.info['public-address'] 410 keystone_ip = keystone_sentry.info['public-address']
396 411
397 user_domain_name = None 412 # To support backward compatibility usage of this function
398 domain_name = None 413 if not project_name:
399 if api_version == 3: 414 project_name = tenant
415 if api_version == 3 and not user_domain_name:
400 user_domain_name = 'admin_domain' 416 user_domain_name = 'admin_domain'
401 domain_name = user_domain_name 417 if api_version == 3 and not project_domain_name:
402 418 project_domain_name = 'admin_domain'
403 return self.authenticate_keystone(keystone_ip, user, password, 419 if api_version == 3 and not project_name:
404 project_name=tenant, 420 project_name = 'admin'
405 api_version=api_version, 421
406 user_domain_name=user_domain_name, 422 return self.authenticate_keystone(
407 domain_name=domain_name, 423 keystone_ip, user, password,
408 admin_port=True) 424 api_version=api_version,
425 user_domain_name=user_domain_name,
426 project_domain_name=project_domain_name,
427 project_name=project_name,
428 admin_port=True)
409 429
410 def authenticate_keystone_user(self, keystone, user, password, tenant): 430 def authenticate_keystone_user(self, keystone, user, password, tenant):
411 """Authenticates a regular user with the keystone public endpoint.""" 431 """Authenticates a regular user with the keystone public endpoint."""
412 self.log.debug('Authenticating keystone user ({})...'.format(user)) 432 self.log.debug('Authenticating keystone user ({})...'.format(user))
413 ep = keystone.service_catalog.url_for(service_type='identity', 433 ep = keystone.service_catalog.url_for(service_type='identity',
414 endpoint_type='publicURL') 434 interface='publicURL')
415 keystone_ip = urlparse.urlparse(ep).hostname 435 keystone_ip = urlparse.urlparse(ep).hostname
416 436
417 return self.authenticate_keystone(keystone_ip, user, password, 437 return self.authenticate_keystone(keystone_ip, user, password,
@@ -421,22 +441,32 @@ class OpenStackAmuletUtils(AmuletUtils):
421 """Authenticates admin user with glance.""" 441 """Authenticates admin user with glance."""
422 self.log.debug('Authenticating glance admin...') 442 self.log.debug('Authenticating glance admin...')
423 ep = keystone.service_catalog.url_for(service_type='image', 443 ep = keystone.service_catalog.url_for(service_type='image',
424 endpoint_type='adminURL') 444 interface='adminURL')
425 return glance_client.Client(ep, token=keystone.auth_token) 445 if keystone.session:
446 return glance_client.Client(ep, session=keystone.session)
447 else:
448 return glance_client.Client(ep, token=keystone.auth_token)
426 449
427 def authenticate_heat_admin(self, keystone): 450 def authenticate_heat_admin(self, keystone):
428 """Authenticates the admin user with heat.""" 451 """Authenticates the admin user with heat."""
429 self.log.debug('Authenticating heat admin...') 452 self.log.debug('Authenticating heat admin...')
430 ep = keystone.service_catalog.url_for(service_type='orchestration', 453 ep = keystone.service_catalog.url_for(service_type='orchestration',
431 endpoint_type='publicURL') 454 interface='publicURL')
432 return heat_client.Client(endpoint=ep, token=keystone.auth_token) 455 if keystone.session:
456 return heat_client.Client(endpoint=ep, session=keystone.session)
457 else:
458 return heat_client.Client(endpoint=ep, token=keystone.auth_token)
433 459
434 def authenticate_nova_user(self, keystone, user, password, tenant): 460 def authenticate_nova_user(self, keystone, user, password, tenant):
435 """Authenticates a regular user with nova-api.""" 461 """Authenticates a regular user with nova-api."""
436 self.log.debug('Authenticating nova user ({})...'.format(user)) 462 self.log.debug('Authenticating nova user ({})...'.format(user))
437 ep = keystone.service_catalog.url_for(service_type='identity', 463 ep = keystone.service_catalog.url_for(service_type='identity',
438 endpoint_type='publicURL') 464 interface='publicURL')
439 if novaclient.__version__[0] >= "7": 465 if keystone.session:
466 return nova_client.Client(NOVA_CLIENT_VERSION,
467 session=keystone.session,
468 auth_url=ep)
469 elif novaclient.__version__[0] >= "7":
440 return nova_client.Client(NOVA_CLIENT_VERSION, 470 return nova_client.Client(NOVA_CLIENT_VERSION,
441 username=user, password=password, 471 username=user, password=password,
442 project_name=tenant, auth_url=ep) 472 project_name=tenant, auth_url=ep)
@@ -449,12 +479,15 @@ class OpenStackAmuletUtils(AmuletUtils):
449 """Authenticates a regular user with swift api.""" 479 """Authenticates a regular user with swift api."""
450 self.log.debug('Authenticating swift user ({})...'.format(user)) 480 self.log.debug('Authenticating swift user ({})...'.format(user))
451 ep = keystone.service_catalog.url_for(service_type='identity', 481 ep = keystone.service_catalog.url_for(service_type='identity',
452 endpoint_type='publicURL') 482 interface='publicURL')
453 return swiftclient.Connection(authurl=ep, 483 if keystone.session:
454 user=user, 484 return swiftclient.Connection(session=keystone.session)
455 key=password, 485 else:
456 tenant_name=tenant, 486 return swiftclient.Connection(authurl=ep,
457 auth_version='2.0') 487 user=user,
488 key=password,
489 tenant_name=tenant,
490 auth_version='2.0')
458 491
459 def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", 492 def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto",
460 ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): 493 ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):
diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py
index e44e22b..12f37b2 100644
--- a/tests/charmhelpers/core/hookenv.py
+++ b/tests/charmhelpers/core/hookenv.py
@@ -43,6 +43,7 @@ ERROR = "ERROR"
43WARNING = "WARNING" 43WARNING = "WARNING"
44INFO = "INFO" 44INFO = "INFO"
45DEBUG = "DEBUG" 45DEBUG = "DEBUG"
46TRACE = "TRACE"
46MARKER = object() 47MARKER = object()
47 48
48cache = {} 49cache = {}
@@ -202,6 +203,27 @@ def service_name():
202 return local_unit().split('/')[0] 203 return local_unit().split('/')[0]
203 204
204 205
206def principal_unit():
207 """Returns the principal unit of this unit, otherwise None"""
208 # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
209 principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
210 # If it's empty, then this unit is the principal
211 if principal_unit == '':
212 return os.environ['JUJU_UNIT_NAME']
213 elif principal_unit is not None:
214 return principal_unit
215 # For Juju 2.1 and below, let's try work out the principle unit by
216 # the various charms' metadata.yaml.
217 for reltype in relation_types():
218 for rid in relation_ids(reltype):
219 for unit in related_units(rid):
220 md = _metadata_unit(unit)
221 subordinate = md.pop('subordinate', None)
222 if not subordinate:
223 return unit
224 return None
225
226
205@cached 227@cached
206def remote_service_name(relid=None): 228def remote_service_name(relid=None):
207 """The remote service name for a given relation-id (or the current relation)""" 229 """The remote service name for a given relation-id (or the current relation)"""
@@ -478,6 +500,21 @@ def metadata():
478 return yaml.safe_load(md) 500 return yaml.safe_load(md)
479 501
480 502
503def _metadata_unit(unit):
504 """Given the name of a unit (e.g. apache2/0), get the unit charm's
505 metadata.yaml. Very similar to metadata() but allows us to inspect
506 other units. Unit needs to be co-located, such as a subordinate or
507 principal/primary.
508
509 :returns: metadata.yaml as a python object.
510
511 """
512 basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
513 unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
514 with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md:
515 return yaml.safe_load(md)
516
517
481@cached 518@cached
482def relation_types(): 519def relation_types():
483 """Get a list of relation types supported by this charm""" 520 """Get a list of relation types supported by this charm"""
@@ -753,6 +790,9 @@ class Hooks(object):
753 790
754def charm_dir(): 791def charm_dir():
755 """Return the root directory of the current charm""" 792 """Return the root directory of the current charm"""
793 d = os.environ.get('JUJU_CHARM_DIR')
794 if d is not None:
795 return d
756 return os.environ.get('CHARM_DIR') 796 return os.environ.get('CHARM_DIR')
757 797
758 798
diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py
index 88e80a4..5656e2f 100644
--- a/tests/charmhelpers/core/host.py
+++ b/tests/charmhelpers/core/host.py
@@ -34,7 +34,7 @@ import six
34 34
35from contextlib import contextmanager 35from contextlib import contextmanager
36from collections import OrderedDict 36from collections import OrderedDict
37from .hookenv import log 37from .hookenv import log, DEBUG
38from .fstab import Fstab 38from .fstab import Fstab
39from charmhelpers.osplatform import get_platform 39from charmhelpers.osplatform import get_platform
40 40
@@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
191 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) 191 upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
192 sysv_file = os.path.join(initd_dir, service_name) 192 sysv_file = os.path.join(initd_dir, service_name)
193 if init_is_systemd(): 193 if init_is_systemd():
194 service('disable', service_name)
194 service('mask', service_name) 195 service('mask', service_name)
195 elif os.path.exists(upstart_file): 196 elif os.path.exists(upstart_file):
196 override_path = os.path.join( 197 override_path = os.path.join(
@@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init",
225 sysv_file = os.path.join(initd_dir, service_name) 226 sysv_file = os.path.join(initd_dir, service_name)
226 if init_is_systemd(): 227 if init_is_systemd():
227 service('unmask', service_name) 228 service('unmask', service_name)
229 service('enable', service_name)
228 elif os.path.exists(upstart_file): 230 elif os.path.exists(upstart_file):
229 override_path = os.path.join( 231 override_path = os.path.join(
230 init_dir, '{}.override'.format(service_name)) 232 init_dir, '{}.override'.format(service_name))
@@ -485,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False):
485 487
486def write_file(path, content, owner='root', group='root', perms=0o444): 488def write_file(path, content, owner='root', group='root', perms=0o444):
487 """Create or overwrite a file with the contents of a byte string.""" 489 """Create or overwrite a file with the contents of a byte string."""
488 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
489 uid = pwd.getpwnam(owner).pw_uid 490 uid = pwd.getpwnam(owner).pw_uid
490 gid = grp.getgrnam(group).gr_gid 491 gid = grp.getgrnam(group).gr_gid
491 with open(path, 'wb') as target: 492 # lets see if we can grab the file and compare the context, to avoid doing
492 os.fchown(target.fileno(), uid, gid) 493 # a write.
493 os.fchmod(target.fileno(), perms) 494 existing_content = None
494 target.write(content) 495 existing_uid, existing_gid = None, None
496 try:
497 with open(path, 'rb') as target:
498 existing_content = target.read()
499 stat = os.stat(path)
500 existing_uid, existing_gid = stat.st_uid, stat.st_gid
501 except:
502 pass
503 if content != existing_content:
504 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
505 level=DEBUG)
506 with open(path, 'wb') as target:
507 os.fchown(target.fileno(), uid, gid)
508 os.fchmod(target.fileno(), perms)
509 target.write(content)
510 return
511 # the contents were the same, but we might still need to change the
512 # ownership.
513 if existing_uid != uid:
514 log("Changing uid on already existing content: {} -> {}"
515 .format(existing_uid, uid), level=DEBUG)
516 os.chown(path, uid, -1)
517 if existing_gid != gid:
518 log("Changing gid on already existing content: {} -> {}"
519 .format(existing_gid, gid), level=DEBUG)
520 os.chown(path, -1, gid)
495 521
496 522
497def fstab_remove(mp): 523def fstab_remove(mp):