summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Ames <david.ames@canonical.com>2018-05-08 11:52:58 -0700
committerDavid Ames <david.ames@canonical.com>2018-05-17 23:29:16 +0000
commitbb4e4912c7ac0d42ac23ed871b9d171c0df0a434 (patch)
tree2d5c4e0f5e65d2cfb2b342211f9ed7c68674c682
parent3301c5f57e9e4c6a878a178294db034239161491 (diff)
Enable Bionic as a gate test
Change bionic test from dev to gate for 18.05. Change-Id: I4d82f73b7b83d5bb597147663d424554d34d7e76
Notes
Notes (review): Verified+1: Canonical CI <uosci-testing-bot@ubuntu.com> Code-Review+1: David Ames <david.ames@canonical.com> Code-Review+2: Frode Nordahl <frode.nordahl@canonical.com> Workflow+1: Frode Nordahl <frode.nordahl@canonical.com> Verified+2: Zuul Submitted-by: Zuul Submitted-at: Sun, 20 May 2018 07:11:42 +0000 Reviewed-on: https://review.openstack.org/566983 Project: openstack/charm-cinder-backup Branch: refs/heads/master
-rw-r--r--.gitignore1
-rw-r--r--hooks/charmhelpers/contrib/hahelpers/apache.py5
-rw-r--r--hooks/charmhelpers/contrib/hahelpers/cluster.py14
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/deployment.py10
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/utils.py225
-rw-r--r--hooks/charmhelpers/contrib/openstack/context.py10
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware5
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications3
-rw-r--r--hooks/charmhelpers/contrib/openstack/utils.py4
-rw-r--r--hooks/charmhelpers/contrib/openstack/vaultlocker.py126
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/ceph.py43
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/lvm.py29
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/utils.py16
-rw-r--r--hooks/charmhelpers/core/hookenv.py138
-rw-r--r--hooks/charmhelpers/core/host.py11
-rw-r--r--hooks/charmhelpers/core/services/base.py25
-rw-r--r--hooks/charmhelpers/core/sysctl.py18
-rw-r--r--hooks/charmhelpers/core/unitdata.py9
-rw-r--r--hooks/charmhelpers/fetch/ubuntu.py1
-rw-r--r--tests/basic_deployment.py259
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/deployment.py10
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/utils.py225
-rw-r--r--tests/charmhelpers/core/hookenv.py138
-rw-r--r--tests/charmhelpers/core/host.py11
-rw-r--r--tests/charmhelpers/core/services/base.py25
-rw-r--r--tests/charmhelpers/core/sysctl.py18
-rw-r--r--tests/charmhelpers/core/unitdata.py9
-rwxr-xr-xtests/gate-basic-bionic-queens (renamed from tests/dev-basic-bionic-queens)0
-rwxr-xr-xtests/gate-basic-xenial-queens25
-rw-r--r--tox.ini2
30 files changed, 1078 insertions, 337 deletions
diff --git a/.gitignore b/.gitignore
index 813d956..7947e2f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,3 +5,4 @@ bin
5*.sw[nop] 5*.sw[nop]
6*.py[oc] 6*.py[oc]
7*.pyc 7*.pyc
8func-results.json
diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py
index 22acb68..605a1be 100644
--- a/hooks/charmhelpers/contrib/hahelpers/apache.py
+++ b/hooks/charmhelpers/contrib/hahelpers/apache.py
@@ -65,7 +65,8 @@ def get_ca_cert():
65 if ca_cert is None: 65 if ca_cert is None:
66 log("Inspecting identity-service relations for CA SSL certificate.", 66 log("Inspecting identity-service relations for CA SSL certificate.",
67 level=INFO) 67 level=INFO)
68 for r_id in relation_ids('identity-service'): 68 for r_id in (relation_ids('identity-service') +
69 relation_ids('identity-credentials')):
69 for unit in relation_list(r_id): 70 for unit in relation_list(r_id):
70 if ca_cert is None: 71 if ca_cert is None:
71 ca_cert = relation_get('ca_cert', 72 ca_cert = relation_get('ca_cert',
@@ -76,7 +77,7 @@ def get_ca_cert():
76def retrieve_ca_cert(cert_file): 77def retrieve_ca_cert(cert_file):
77 cert = None 78 cert = None
78 if os.path.isfile(cert_file): 79 if os.path.isfile(cert_file):
79 with open(cert_file, 'r') as crt: 80 with open(cert_file, 'rb') as crt:
80 cert = crt.read() 81 cert = crt.read()
81 return cert 82 return cert
82 83
diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py
index 4207e42..47facd9 100644
--- a/hooks/charmhelpers/contrib/hahelpers/cluster.py
+++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py
@@ -371,6 +371,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
371 ''' Distribute operations by waiting based on modulo_distribution 371 ''' Distribute operations by waiting based on modulo_distribution
372 372
373 If modulo and or wait are not set, check config_get for those values. 373 If modulo and or wait are not set, check config_get for those values.
374 If config values are not set, default to modulo=3 and wait=30.
374 375
375 :param modulo: int The modulo number creates the group distribution 376 :param modulo: int The modulo number creates the group distribution
376 :param wait: int The constant time wait value 377 :param wait: int The constant time wait value
@@ -382,10 +383,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
382 :side effect: Calls time.sleep() 383 :side effect: Calls time.sleep()
383 ''' 384 '''
384 if modulo is None: 385 if modulo is None:
385 modulo = config_get('modulo-nodes') 386 modulo = config_get('modulo-nodes') or 3
386 if wait is None: 387 if wait is None:
387 wait = config_get('known-wait') 388 wait = config_get('known-wait') or 30
388 calculated_wait = modulo_distribution(modulo=modulo, wait=wait) 389 if juju_is_leader():
390 # The leader should never wait
391 calculated_wait = 0
392 else:
393 # non_zero_wait=True guarantees the non-leader who gets modulo 0
394 # will still wait
395 calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
396 non_zero_wait=True)
389 msg = "Waiting {} seconds for {} ...".format(calculated_wait, 397 msg = "Waiting {} seconds for {} ...".format(calculated_wait,
390 operation_name) 398 operation_name)
391 log(msg, DEBUG) 399 log(msg, DEBUG)
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
index 5afbbd8..66beeda 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -21,6 +21,9 @@ from collections import OrderedDict
21from charmhelpers.contrib.amulet.deployment import ( 21from charmhelpers.contrib.amulet.deployment import (
22 AmuletDeployment 22 AmuletDeployment
23) 23)
24from charmhelpers.contrib.openstack.amulet.utils import (
25 OPENSTACK_RELEASES_PAIRS
26)
24 27
25DEBUG = logging.DEBUG 28DEBUG = logging.DEBUG
26ERROR = logging.ERROR 29ERROR = logging.ERROR
@@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
271 release. 274 release.
272 """ 275 """
273 # Must be ordered by OpenStack release (not by Ubuntu release): 276 # Must be ordered by OpenStack release (not by Ubuntu release):
274 (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, 277 for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
275 self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, 278 setattr(self, os_pair, i)
276 self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
277 self.xenial_pike, self.artful_pike, self.xenial_queens,
278 self.bionic_queens,) = range(13)
279 279
280 releases = { 280 releases = {
281 ('trusty', None): self.trusty_icehouse, 281 ('trusty', None): self.trusty_icehouse,
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index d93cff3..84e87f5 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -50,6 +50,13 @@ ERROR = logging.ERROR
50 50
51NOVA_CLIENT_VERSION = "2" 51NOVA_CLIENT_VERSION = "2"
52 52
53OPENSTACK_RELEASES_PAIRS = [
54 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
55 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
56 'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
57 'xenial_pike', 'artful_pike', 'xenial_queens',
58 'bionic_queens']
59
53 60
54class OpenStackAmuletUtils(AmuletUtils): 61class OpenStackAmuletUtils(AmuletUtils):
55 """OpenStack amulet utilities. 62 """OpenStack amulet utilities.
@@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils):
63 super(OpenStackAmuletUtils, self).__init__(log_level) 70 super(OpenStackAmuletUtils, self).__init__(log_level)
64 71
65 def validate_endpoint_data(self, endpoints, admin_port, internal_port, 72 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
66 public_port, expected): 73 public_port, expected, openstack_release=None):
74 """Validate endpoint data. Pick the correct validator based on
75 OpenStack release. Expected data should be in the v2 format:
76 {
77 'id': id,
78 'region': region,
79 'adminurl': adminurl,
80 'internalurl': internalurl,
81 'publicurl': publicurl,
82 'service_id': service_id}
83
84 """
85 validation_function = self.validate_v2_endpoint_data
86 xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
87 if openstack_release and openstack_release >= xenial_queens:
88 validation_function = self.validate_v3_endpoint_data
89 expected = {
90 'id': expected['id'],
91 'region': expected['region'],
92 'region_id': 'RegionOne',
93 'url': self.valid_url,
94 'interface': self.not_null,
95 'service_id': expected['service_id']}
96 return validation_function(endpoints, admin_port, internal_port,
97 public_port, expected)
98
99 def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
100 public_port, expected):
67 """Validate endpoint data. 101 """Validate endpoint data.
68 102
69 Validate actual endpoint data vs expected endpoint data. The ports 103 Validate actual endpoint data vs expected endpoint data. The ports
@@ -141,7 +175,86 @@ class OpenStackAmuletUtils(AmuletUtils):
141 if len(found) != expected_num_eps: 175 if len(found) != expected_num_eps:
142 return 'Unexpected number of endpoints found' 176 return 'Unexpected number of endpoints found'
143 177
144 def validate_svc_catalog_endpoint_data(self, expected, actual): 178 def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
179 """Convert v2 endpoint data into v3.
180
181 {
182 'service_name1': [
183 {
184 'adminURL': adminURL,
185 'id': id,
186 'region': region.
187 'publicURL': publicURL,
188 'internalURL': internalURL
189 }],
190 'service_name2': [
191 {
192 'adminURL': adminURL,
193 'id': id,
194 'region': region.
195 'publicURL': publicURL,
196 'internalURL': internalURL
197 }],
198 }
199 """
200 self.log.warn("Endpoint ID and Region ID validation is limited to not "
201 "null checks after v2 to v3 conversion")
202 for svc in ep_data.keys():
203 assert len(ep_data[svc]) == 1, "Unknown data format"
204 svc_ep_data = ep_data[svc][0]
205 ep_data[svc] = [
206 {
207 'url': svc_ep_data['adminURL'],
208 'interface': 'admin',
209 'region': svc_ep_data['region'],
210 'region_id': self.not_null,
211 'id': self.not_null},
212 {
213 'url': svc_ep_data['publicURL'],
214 'interface': 'public',
215 'region': svc_ep_data['region'],
216 'region_id': self.not_null,
217 'id': self.not_null},
218 {
219 'url': svc_ep_data['internalURL'],
220 'interface': 'internal',
221 'region': svc_ep_data['region'],
222 'region_id': self.not_null,
223 'id': self.not_null}]
224 return ep_data
225
226 def validate_svc_catalog_endpoint_data(self, expected, actual,
227 openstack_release=None):
228 """Validate service catalog endpoint data. Pick the correct validator
229 for the OpenStack version. Expected data should be in the v2 format:
230 {
231 'service_name1': [
232 {
233 'adminURL': adminURL,
234 'id': id,
235 'region': region.
236 'publicURL': publicURL,
237 'internalURL': internalURL
238 }],
239 'service_name2': [
240 {
241 'adminURL': adminURL,
242 'id': id,
243 'region': region.
244 'publicURL': publicURL,
245 'internalURL': internalURL
246 }],
247 }
248
249 """
250 validation_function = self.validate_v2_svc_catalog_endpoint_data
251 xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
252 if openstack_release and openstack_release >= xenial_queens:
253 validation_function = self.validate_v3_svc_catalog_endpoint_data
254 expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
255 return validation_function(expected, actual)
256
257 def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
145 """Validate service catalog endpoint data. 258 """Validate service catalog endpoint data.
146 259
147 Validate a list of actual service catalog endpoints vs a list of 260 Validate a list of actual service catalog endpoints vs a list of
@@ -328,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils):
328 if rel.get('api_version') != str(api_version): 441 if rel.get('api_version') != str(api_version):
329 raise Exception("api_version not propagated through relation" 442 raise Exception("api_version not propagated through relation"
330 " data yet ('{}' != '{}')." 443 " data yet ('{}' != '{}')."
331 "".format(rel['api_version'], api_version)) 444 "".format(rel.get('api_version'), api_version))
332 445
333 def keystone_configure_api_version(self, sentry_relation_pairs, deployment, 446 def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
334 api_version): 447 api_version):
@@ -350,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils):
350 deployment._auto_wait_for_status() 463 deployment._auto_wait_for_status()
351 self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) 464 self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
352 465
353 def authenticate_cinder_admin(self, keystone_sentry, username, 466 def authenticate_cinder_admin(self, keystone, api_version=2):
354 password, tenant, api_version=2):
355 """Authenticates admin user with cinder.""" 467 """Authenticates admin user with cinder."""
356 # NOTE(beisner): cinder python client doesn't accept tokens. 468 self.log.debug('Authenticating cinder admin...')
357 keystone_ip = keystone_sentry.info['public-address']
358 ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
359 _clients = { 469 _clients = {
360 1: cinder_client.Client, 470 1: cinder_client.Client,
361 2: cinder_clientv2.Client} 471 2: cinder_clientv2.Client}
362 return _clients[api_version](username, password, tenant, ept) 472 return _clients[api_version](session=keystone.session)
363 473
364 def authenticate_keystone(self, keystone_ip, username, password, 474 def authenticate_keystone(self, keystone_ip, username, password,
365 api_version=False, admin_port=False, 475 api_version=False, admin_port=False,
@@ -367,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils):
367 project_domain_name=None, project_name=None): 477 project_domain_name=None, project_name=None):
368 """Authenticate with Keystone""" 478 """Authenticate with Keystone"""
369 self.log.debug('Authenticating with keystone...') 479 self.log.debug('Authenticating with keystone...')
370 port = 5000 480 if not api_version:
371 if admin_port: 481 api_version = 2
372 port = 35357 482 sess, auth = self.get_keystone_session(
373 base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), 483 keystone_ip=keystone_ip,
374 port) 484 username=username,
375 if not api_version or api_version == 2: 485 password=password,
376 ep = base_ep + "/v2.0" 486 api_version=api_version,
487 admin_port=admin_port,
488 user_domain_name=user_domain_name,
489 domain_name=domain_name,
490 project_domain_name=project_domain_name,
491 project_name=project_name
492 )
493 if api_version == 2:
494 client = keystone_client.Client(session=sess)
495 else:
496 client = keystone_client_v3.Client(session=sess)
497 # This populates the client.service_catalog
498 client.auth_ref = auth.get_access(sess)
499 return client
500
501 def get_keystone_session(self, keystone_ip, username, password,
502 api_version=False, admin_port=False,
503 user_domain_name=None, domain_name=None,
504 project_domain_name=None, project_name=None):
505 """Return a keystone session object"""
506 ep = self.get_keystone_endpoint(keystone_ip,
507 api_version=api_version,
508 admin_port=admin_port)
509 if api_version == 2:
377 auth = v2.Password( 510 auth = v2.Password(
378 username=username, 511 username=username,
379 password=password, 512 password=password,
@@ -381,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils):
381 auth_url=ep 514 auth_url=ep
382 ) 515 )
383 sess = keystone_session.Session(auth=auth) 516 sess = keystone_session.Session(auth=auth)
384 client = keystone_client.Client(session=sess)
385 # This populates the client.service_catalog
386 client.auth_ref = auth.get_access(sess)
387 return client
388 else: 517 else:
389 ep = base_ep + "/v3"
390 auth = v3.Password( 518 auth = v3.Password(
391 user_domain_name=user_domain_name, 519 user_domain_name=user_domain_name,
392 username=username, 520 username=username,
@@ -397,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils):
397 auth_url=ep 525 auth_url=ep
398 ) 526 )
399 sess = keystone_session.Session(auth=auth) 527 sess = keystone_session.Session(auth=auth)
400 client = keystone_client_v3.Client(session=sess) 528 return (sess, auth)
401 # This populates the client.service_catalog 529
402 client.auth_ref = auth.get_access(sess) 530 def get_keystone_endpoint(self, keystone_ip, api_version=None,
403 return client 531 admin_port=False):
532 """Return keystone endpoint"""
533 port = 5000
534 if admin_port:
535 port = 35357
536 base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
537 port)
538 if api_version == 2:
539 ep = base_ep + "/v2.0"
540 else:
541 ep = base_ep + "/v3"
542 return ep
543
544 def get_default_keystone_session(self, keystone_sentry,
545 openstack_release=None):
546 """Return a keystone session object and client object assuming standard
547 default settings
548
549 Example call in amulet tests:
550 self.keystone_session, self.keystone = u.get_default_keystone_session(
551 self.keystone_sentry,
552 openstack_release=self._get_openstack_release())
553
554 The session can then be used to auth other clients:
555 neutronclient.Client(session=session)
556 aodh_client.Client(session=session)
557 eyc
558 """
559 self.log.debug('Authenticating keystone admin...')
560 api_version = 2
561 client_class = keystone_client.Client
562 # 11 => xenial_queens
563 if openstack_release and openstack_release >= 11:
564 api_version = 3
565 client_class = keystone_client_v3.Client
566 keystone_ip = keystone_sentry.info['public-address']
567 session, auth = self.get_keystone_session(
568 keystone_ip,
569 api_version=api_version,
570 username='admin',
571 password='openstack',
572 project_name='admin',
573 user_domain_name='admin_domain',
574 project_domain_name='admin_domain')
575 client = client_class(session=session)
576 # This populates the client.service_catalog
577 client.auth_ref = auth.get_access(session)
578 return session, client
404 579
405 def authenticate_keystone_admin(self, keystone_sentry, user, password, 580 def authenticate_keystone_admin(self, keystone_sentry, user, password,
406 tenant=None, api_version=None, 581 tenant=None, api_version=None,
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index 36cf32f..2d91f0a 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -384,6 +384,7 @@ class IdentityServiceContext(OSContextGenerator):
384 # so a missing value just indicates keystone needs 384 # so a missing value just indicates keystone needs
385 # upgrading 385 # upgrading
386 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') 386 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
387 ctxt['admin_domain_id'] = rdata.get('service_domain_id')
387 return ctxt 388 return ctxt
388 389
389 return {} 390 return {}
@@ -796,9 +797,9 @@ class ApacheSSLContext(OSContextGenerator):
796 key_filename = 'key' 797 key_filename = 'key'
797 798
798 write_file(path=os.path.join(ssl_dir, cert_filename), 799 write_file(path=os.path.join(ssl_dir, cert_filename),
799 content=b64decode(cert)) 800 content=b64decode(cert), perms=0o640)
800 write_file(path=os.path.join(ssl_dir, key_filename), 801 write_file(path=os.path.join(ssl_dir, key_filename),
801 content=b64decode(key)) 802 content=b64decode(key), perms=0o640)
802 803
803 def configure_ca(self): 804 def configure_ca(self):
804 ca_cert = get_ca_cert() 805 ca_cert = get_ca_cert()
@@ -1872,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator):
1872 context is needed to do that before rendering a template. 1873 context is needed to do that before rendering a template.
1873 ''' 1874 '''
1874 1875
1875 def __init__(self, dirname): 1876 def __init__(self, dirname, **kwargs):
1876 '''Used merely to ensure that a given directory exists.''' 1877 '''Used merely to ensure that a given directory exists.'''
1877 self.dirname = dirname 1878 self.dirname = dirname
1879 self.kwargs = kwargs
1878 1880
1879 def __call__(self): 1881 def __call__(self):
1880 mkdir(self.dirname) 1882 mkdir(self.dirname, **self.kwargs)
1881 return {} 1883 return {}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware
new file mode 100644
index 0000000..dd73230
--- /dev/null
+++ b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware
@@ -0,0 +1,5 @@
1[oslo_middleware]
2
3# Bug #1758675
4enable_proxy_headers_parsing = true
5
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications
index 5dccd4b..021a3c2 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications
+++ b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications
@@ -5,4 +5,7 @@ transport_url = {{ transport_url }}
5{% if notification_topics -%} 5{% if notification_topics -%}
6topics = {{ notification_topics }} 6topics = {{ notification_topics }}
7{% endif -%} 7{% endif -%}
8{% if notification_format -%}
9notification_format = {{ notification_format }}
10{% endif -%}
8{% endif -%} 11{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index b753275..6184abd 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -182,7 +182,7 @@ SWIFT_CODENAMES = OrderedDict([
182 ('pike', 182 ('pike',
183 ['2.13.0', '2.15.0']), 183 ['2.13.0', '2.15.0']),
184 ('queens', 184 ('queens',
185 ['2.16.0']), 185 ['2.16.0', '2.17.0']),
186]) 186])
187 187
188# >= Liberty version->codename mapping 188# >= Liberty version->codename mapping
@@ -306,7 +306,7 @@ def get_os_codename_install_source(src):
306 306
307 if src.startswith('cloud:'): 307 if src.startswith('cloud:'):
308 ca_rel = src.split(':')[1] 308 ca_rel = src.split(':')[1]
309 ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] 309 ca_rel = ca_rel.split('-')[1].split('/')[0]
310 return ca_rel 310 return ca_rel
311 311
312 # Best guess match based on deb string provided 312 # Best guess match based on deb string provided
diff --git a/hooks/charmhelpers/contrib/openstack/vaultlocker.py b/hooks/charmhelpers/contrib/openstack/vaultlocker.py
new file mode 100644
index 0000000..a8e4bf8
--- /dev/null
+++ b/hooks/charmhelpers/contrib/openstack/vaultlocker.py
@@ -0,0 +1,126 @@
1# Copyright 2018 Canonical Limited.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import json
16import os
17
18import charmhelpers.contrib.openstack.alternatives as alternatives
19import charmhelpers.contrib.openstack.context as context
20
21import charmhelpers.core.hookenv as hookenv
22import charmhelpers.core.host as host
23import charmhelpers.core.templating as templating
24import charmhelpers.core.unitdata as unitdata
25
26VAULTLOCKER_BACKEND = 'charm-vaultlocker'
27
28
29class VaultKVContext(context.OSContextGenerator):
30 """Vault KV context for interaction with vault-kv interfaces"""
31 interfaces = ['secrets-storage']
32
33 def __init__(self, secret_backend=None):
34 super(context.OSContextGenerator, self).__init__()
35 self.secret_backend = (
36 secret_backend or 'charm-{}'.format(hookenv.service_name())
37 )
38
39 def __call__(self):
40 db = unitdata.kv()
41 last_token = db.get('last-token')
42 secret_id = db.get('secret-id')
43 for relation_id in hookenv.relation_ids(self.interfaces[0]):
44 for unit in hookenv.related_units(relation_id):
45 data = hookenv.relation_get(unit=unit,
46 rid=relation_id)
47 vault_url = data.get('vault_url')
48 role_id = data.get('{}_role_id'.format(hookenv.local_unit()))
49 token = data.get('{}_token'.format(hookenv.local_unit()))
50
51 if all([vault_url, role_id, token]):
52 token = json.loads(token)
53 vault_url = json.loads(vault_url)
54
55 # Tokens may change when secret_id's are being
56 # reissued - if so use token to get new secret_id
57 if token != last_token:
58 secret_id = retrieve_secret_id(
59 url=vault_url,
60 token=token
61 )
62 db.set('secret-id', secret_id)
63 db.set('last-token', token)
64 db.flush()
65
66 ctxt = {
67 'vault_url': vault_url,
68 'role_id': json.loads(role_id),
69 'secret_id': secret_id,
70 'secret_backend': self.secret_backend,
71 }
72 vault_ca = data.get('vault_ca')
73 if vault_ca:
74 ctxt['vault_ca'] = json.loads(vault_ca)
75 self.complete = True
76 return ctxt
77 return {}
78
79
80def write_vaultlocker_conf(context, priority=100):
81 """Write vaultlocker configuration to disk and install alternative
82
83 :param context: Dict of data from vault-kv relation
84 :ptype: context: dict
85 :param priority: Priority of alternative configuration
86 :ptype: priority: int"""
87 charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format(
88 hookenv.service_name()
89 )
90 host.mkdir(os.path.dirname(charm_vl_path), perms=0o700)
91 templating.render(source='vaultlocker.conf.j2',
92 target=charm_vl_path,
93 context=context, perms=0o600),
94 alternatives.install_alternative('vaultlocker.conf',
95 '/etc/vaultlocker/vaultlocker.conf',
96 charm_vl_path, priority)
97
98
99def vault_relation_complete(backend=None):
100 """Determine whether vault relation is complete
101
102 :param backend: Name of secrets backend requested
103 :ptype backend: string
104 :returns: whether the relation to vault is complete
105 :rtype: bool"""
106 vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
107 vault_kv()
108 return vault_kv.complete
109
110
111# TODO: contrib a high level unwrap method to hvac that works
112def retrieve_secret_id(url, token):
113 """Retrieve a response-wrapped secret_id from Vault
114
115 :param url: URL to Vault Server
116 :ptype url: str
117 :param token: One shot Token to use
118 :ptype token: str
119 :returns: secret_id to use for Vault Access
120 :rtype: str"""
121 import hvac
122 client = hvac.Client(url=url, token=token)
123 response = client._post('/v1/sys/wrapping/unwrap')
124 if response.status_code == 200:
125 data = response.json()
126 return data['data']['secret_id']
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index e13e60a..7682820 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -291,7 +291,7 @@ class Pool(object):
291 291
292class ReplicatedPool(Pool): 292class ReplicatedPool(Pool):
293 def __init__(self, service, name, pg_num=None, replicas=2, 293 def __init__(self, service, name, pg_num=None, replicas=2,
294 percent_data=10.0): 294 percent_data=10.0, app_name=None):
295 super(ReplicatedPool, self).__init__(service=service, name=name) 295 super(ReplicatedPool, self).__init__(service=service, name=name)
296 self.replicas = replicas 296 self.replicas = replicas
297 if pg_num: 297 if pg_num:
@@ -301,6 +301,10 @@ class ReplicatedPool(Pool):
301 self.pg_num = min(pg_num, max_pgs) 301 self.pg_num = min(pg_num, max_pgs)
302 else: 302 else:
303 self.pg_num = self.get_pgs(self.replicas, percent_data) 303 self.pg_num = self.get_pgs(self.replicas, percent_data)
304 if app_name:
305 self.app_name = app_name
306 else:
307 self.app_name = 'unknown'
304 308
305 def create(self): 309 def create(self):
306 if not pool_exists(self.service, self.name): 310 if not pool_exists(self.service, self.name):
@@ -313,6 +317,12 @@ class ReplicatedPool(Pool):
313 update_pool(client=self.service, 317 update_pool(client=self.service,
314 pool=self.name, 318 pool=self.name,
315 settings={'size': str(self.replicas)}) 319 settings={'size': str(self.replicas)})
320 try:
321 set_app_name_for_pool(client=self.service,
322 pool=self.name,
323 name=self.app_name)
324 except CalledProcessError:
325 log('Could not set app name for pool {}'.format(self.name, level=WARNING))
316 except CalledProcessError: 326 except CalledProcessError:
317 raise 327 raise
318 328
@@ -320,10 +330,14 @@ class ReplicatedPool(Pool):
320# Default jerasure erasure coded pool 330# Default jerasure erasure coded pool
321class ErasurePool(Pool): 331class ErasurePool(Pool):
322 def __init__(self, service, name, erasure_code_profile="default", 332 def __init__(self, service, name, erasure_code_profile="default",
323 percent_data=10.0): 333 percent_data=10.0, app_name=None):
324 super(ErasurePool, self).__init__(service=service, name=name) 334 super(ErasurePool, self).__init__(service=service, name=name)
325 self.erasure_code_profile = erasure_code_profile 335 self.erasure_code_profile = erasure_code_profile
326 self.percent_data = percent_data 336 self.percent_data = percent_data
337 if app_name:
338 self.app_name = app_name
339 else:
340 self.app_name = 'unknown'
327 341
328 def create(self): 342 def create(self):
329 if not pool_exists(self.service, self.name): 343 if not pool_exists(self.service, self.name):
@@ -355,6 +369,12 @@ class ErasurePool(Pool):
355 'erasure', self.erasure_code_profile] 369 'erasure', self.erasure_code_profile]
356 try: 370 try:
357 check_call(cmd) 371 check_call(cmd)
372 try:
373 set_app_name_for_pool(client=self.service,
374 pool=self.name,
375 name=self.app_name)
376 except CalledProcessError:
377 log('Could not set app name for pool {}'.format(self.name, level=WARNING))
358 except CalledProcessError: 378 except CalledProcessError:
359 raise 379 raise
360 380
@@ -778,6 +798,25 @@ def update_pool(client, pool, settings):
778 check_call(cmd) 798 check_call(cmd)
779 799
780 800
801def set_app_name_for_pool(client, pool, name):
802 """
803 Calls `osd pool application enable` for the specified pool name
804
805 :param client: Name of the ceph client to use
806 :type client: str
807 :param pool: Pool to set app name for
808 :type pool: str
809 :param name: app name for the specified pool
810 :type name: str
811
812 :raises: CalledProcessError if ceph call fails
813 """
814 if ceph_version() >= '12.0.0':
815 cmd = ['ceph', '--id', client, 'osd', 'pool',
816 'application', 'enable', pool, name]
817 check_call(cmd)
818
819
781def create_pool(service, name, replicas=3, pg_num=None): 820def create_pool(service, name, replicas=3, pg_num=None):
782 """Create a new RADOS pool.""" 821 """Create a new RADOS pool."""
783 if pool_exists(service, name): 822 if pool_exists(service, name):
diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py
index 79a7a24..c8bde69 100644
--- a/hooks/charmhelpers/contrib/storage/linux/lvm.py
+++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py
@@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device):
151 ''' 151 '''
152 cmd = ['lvextend', lv_name, block_device] 152 cmd = ['lvextend', lv_name, block_device]
153 check_call(cmd) 153 check_call(cmd)
154
155
156def create_logical_volume(lv_name, volume_group, size=None):
157 '''
158 Create a new logical volume in an existing volume group
159
160 :param lv_name: str: name of logical volume to be created.
161 :param volume_group: str: Name of volume group to use for the new volume.
162 :param size: str: Size of logical volume to create (100% if not supplied)
163 :raises subprocess.CalledProcessError: in the event that the lvcreate fails.
164 '''
165 if size:
166 check_call([
167 'lvcreate',
168 '--yes',
169 '-L',
170 '{}'.format(size),
171 '-n', lv_name, volume_group
172 ])
173 # create the lv with all the space available, this is needed because the
174 # system call is different for LVM
175 else:
176 check_call([
177 'lvcreate',
178 '--yes',
179 '-l',
180 '100%FREE',
181 '-n', lv_name, volume_group
182 ])
diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py
index c942889..6f846b0 100644
--- a/hooks/charmhelpers/contrib/storage/linux/utils.py
+++ b/hooks/charmhelpers/contrib/storage/linux/utils.py
@@ -67,3 +67,19 @@ def is_device_mounted(device):
67 except Exception: 67 except Exception:
68 return False 68 return False
69 return bool(re.search(r'MOUNTPOINT=".+"', out)) 69 return bool(re.search(r'MOUNTPOINT=".+"', out))
70
71
72def mkfs_xfs(device, force=False):
73 """Format device with XFS filesystem.
74
75 By default this should fail if the device already has a filesystem on it.
76 :param device: Full path to device to format
77 :ptype device: tr
78 :param force: Force operation
79 :ptype: force: boolean"""
80 cmd = ['mkfs.xfs']
81 if force:
82 cmd.append("-f")
83
84 cmd += ['-i', 'size=1024', device]
85 check_call(cmd)
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 7ed1cc4..627d8f7 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -27,6 +27,7 @@ import glob
27import os 27import os
28import json 28import json
29import yaml 29import yaml
30import re
30import subprocess 31import subprocess
31import sys 32import sys
32import errno 33import errno
@@ -67,7 +68,7 @@ def cached(func):
67 @wraps(func) 68 @wraps(func)
68 def wrapper(*args, **kwargs): 69 def wrapper(*args, **kwargs):
69 global cache 70 global cache
70 key = str((func, args, kwargs)) 71 key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
71 try: 72 try:
72 return cache[key] 73 return cache[key]
73 except KeyError: 74 except KeyError:
@@ -289,7 +290,7 @@ class Config(dict):
289 self.implicit_save = True 290 self.implicit_save = True
290 self._prev_dict = None 291 self._prev_dict = None
291 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) 292 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
292 if os.path.exists(self.path): 293 if os.path.exists(self.path) and os.stat(self.path).st_size:
293 self.load_previous() 294 self.load_previous()
294 atexit(self._implicit_save) 295 atexit(self._implicit_save)
295 296
@@ -309,7 +310,11 @@ class Config(dict):
309 """ 310 """
310 self.path = path or self.path 311 self.path = path or self.path
311 with open(self.path) as f: 312 with open(self.path) as f:
312 self._prev_dict = json.load(f) 313 try:
314 self._prev_dict = json.load(f)
315 except ValueError as e:
316 log('Unable to parse previous config data - {}'.format(str(e)),
317 level=ERROR)
313 for k, v in copy.deepcopy(self._prev_dict).items(): 318 for k, v in copy.deepcopy(self._prev_dict).items():
314 if k not in self: 319 if k not in self:
315 self[k] = v 320 self[k] = v
@@ -353,22 +358,40 @@ class Config(dict):
353 self.save() 358 self.save()
354 359
355 360
356@cached 361_cache_config = None
362
363
357def config(scope=None): 364def config(scope=None):
358 """Juju charm configuration""" 365 """
359 config_cmd_line = ['config-get'] 366 Get the juju charm configuration (scope==None) or individual key,
360 if scope is not None: 367 (scope=str). The returned value is a Python data structure loaded as
361 config_cmd_line.append(scope) 368 JSON from the Juju config command.
362 else: 369
363 config_cmd_line.append('--all') 370 :param scope: If set, return the value for the specified key.
364 config_cmd_line.append('--format=json') 371 :type scope: Optional[str]
372 :returns: Either the whole config as a Config, or a key from it.
373 :rtype: Any
374 """
375 global _cache_config
376 config_cmd_line = ['config-get', '--all', '--format=json']
365 try: 377 try:
366 config_data = json.loads( 378 # JSON Decode Exception for Python3.5+
367 subprocess.check_output(config_cmd_line).decode('UTF-8')) 379 exc_json = json.decoder.JSONDecodeError
380 except AttributeError:
381 # JSON Decode Exception for Python2.7 through Python3.4
382 exc_json = ValueError
383 try:
384 if _cache_config is None:
385 config_data = json.loads(
386 subprocess.check_output(config_cmd_line).decode('UTF-8'))
387 _cache_config = Config(config_data)
368 if scope is not None: 388 if scope is not None:
369 return config_data 389 return _cache_config.get(scope)
370 return Config(config_data) 390 return _cache_config
371 except ValueError: 391 except (exc_json, UnicodeDecodeError) as e:
392 log('Unable to parse output from config-get: config_cmd_line="{}" '
393 'message="{}"'
394 .format(config_cmd_line, str(e)), level=ERROR)
372 return None 395 return None
373 396
374 397
@@ -1043,7 +1066,6 @@ def juju_version():
1043 universal_newlines=True).strip() 1066 universal_newlines=True).strip()
1044 1067
1045 1068
1046@cached
1047def has_juju_version(minimum_version): 1069def has_juju_version(minimum_version):
1048 """Return True if the Juju version is at least the provided version""" 1070 """Return True if the Juju version is at least the provided version"""
1049 return LooseVersion(juju_version()) >= LooseVersion(minimum_version) 1071 return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@@ -1103,6 +1125,8 @@ def _run_atexit():
1103@translate_exc(from_exc=OSError, to_exc=NotImplementedError) 1125@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1104def network_get_primary_address(binding): 1126def network_get_primary_address(binding):
1105 ''' 1127 '''
1128 Deprecated since Juju 2.3; use network_get()
1129
1106 Retrieve the primary network address for a named binding 1130 Retrieve the primary network address for a named binding
1107 1131
1108 :param binding: string. The name of a relation of extra-binding 1132 :param binding: string. The name of a relation of extra-binding
@@ -1123,7 +1147,6 @@ def network_get_primary_address(binding):
1123 return response 1147 return response
1124 1148
1125 1149
1126@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1127def network_get(endpoint, relation_id=None): 1150def network_get(endpoint, relation_id=None):
1128 """ 1151 """
1129 Retrieve the network details for a relation endpoint 1152 Retrieve the network details for a relation endpoint
@@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None):
1131 :param endpoint: string. The name of a relation endpoint 1154 :param endpoint: string. The name of a relation endpoint
1132 :param relation_id: int. The ID of the relation for the current context. 1155 :param relation_id: int. The ID of the relation for the current context.
1133 :return: dict. The loaded YAML output of the network-get query. 1156 :return: dict. The loaded YAML output of the network-get query.
1134 :raise: NotImplementedError if run on Juju < 2.1 1157 :raise: NotImplementedError if request not supported by the Juju version.
1135 """ 1158 """
1159 if not has_juju_version('2.2'):
1160 raise NotImplementedError(juju_version()) # earlier versions require --primary-address
1161 if relation_id and not has_juju_version('2.3'):
1162 raise NotImplementedError # 2.3 added the -r option
1163
1136 cmd = ['network-get', endpoint, '--format', 'yaml'] 1164 cmd = ['network-get', endpoint, '--format', 'yaml']
1137 if relation_id: 1165 if relation_id:
1138 cmd.append('-r') 1166 cmd.append('-r')
1139 cmd.append(relation_id) 1167 cmd.append(relation_id)
1140 try: 1168 response = subprocess.check_output(
1141 response = subprocess.check_output( 1169 cmd,
1142 cmd, 1170 stderr=subprocess.STDOUT).decode('UTF-8').strip()
1143 stderr=subprocess.STDOUT).decode('UTF-8').strip()
1144 except CalledProcessError as e:
1145 # Early versions of Juju 2.0.x required the --primary-address argument.
1146 # We catch that condition here and raise NotImplementedError since
1147 # the requested semantics are not available - the caller can then
1148 # use the network_get_primary_address() method instead.
1149 if '--primary-address is currently required' in e.output.decode('UTF-8'):
1150 raise NotImplementedError
1151 raise
1152 return yaml.safe_load(response) 1171 return yaml.safe_load(response)
1153 1172
1154 1173
@@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name):
1204 1223
1205def ingress_address(rid=None, unit=None): 1224def ingress_address(rid=None, unit=None):
1206 """ 1225 """
1207 Retrieve the ingress-address from a relation when available. Otherwise, 1226 Retrieve the ingress-address from a relation when available.
1208 return the private-address. This function is to be used on the consuming 1227 Otherwise, return the private-address.
1209 side of the relation. 1228
1229 When used on the consuming side of the relation (unit is a remote
1230 unit), the ingress-address is the IP address that this unit needs
1231 to use to reach the provided service on the remote unit.
1232
1233 When used on the providing side of the relation (unit == local_unit()),
1234 the ingress-address is the IP address that is advertised to remote
1235 units on this relation. Remote units need to use this address to
1236 reach the local provided service on this unit.
1237
1238 Note that charms may document some other method to use in
1239 preference to the ingress_address(), such as an address provided
1240 on a different relation attribute or a service discovery mechanism.
1241 This allows charms to redirect inbound connections to their peers
1242 or different applications such as load balancers.
1210 1243
1211 Usage: 1244 Usage:
1212 addresses = [ingress_address(rid=u.rid, unit=u.unit) 1245 addresses = [ingress_address(rid=u.rid, unit=u.unit)
@@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None):
1220 settings = relation_get(rid=rid, unit=unit) 1253 settings = relation_get(rid=rid, unit=unit)
1221 return (settings.get('ingress-address') or 1254 return (settings.get('ingress-address') or
1222 settings.get('private-address')) 1255 settings.get('private-address'))
1256
1257
1258def egress_subnets(rid=None, unit=None):
1259 """
1260 Retrieve the egress-subnets from a relation.
1261
1262 This function is to be used on the providing side of the
1263 relation, and provides the ranges of addresses that client
1264 connections may come from. The result is uninteresting on
1265 the consuming side of a relation (unit == local_unit()).
1266
1267 Returns a stable list of subnets in CIDR format.
1268 eg. ['192.168.1.0/24', '2001::F00F/128']
1269
1270 If egress-subnets is not available, falls back to using the published
1271 ingress-address, or finally private-address.
1272
1273 :param rid: string relation id
1274 :param unit: string unit name
1275 :side effect: calls relation_get
1276 :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
1277 """
1278 def _to_range(addr):
1279 if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
1280 addr += '/32'
1281 elif ':' in addr and '/' not in addr: # IPv6
1282 addr += '/128'
1283 return addr
1284
1285 settings = relation_get(rid=rid, unit=unit)
1286 if 'egress-subnets' in settings:
1287 return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
1288 if 'ingress-address' in settings:
1289 return [_to_range(settings['ingress-address'])]
1290 if 'private-address' in settings:
1291 return [_to_range(settings['private-address'])]
1292 return [] # Should never happen
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index fd14d60..322ab2a 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path):
993 return output 993 return output
994 994
995 995
996def modulo_distribution(modulo=3, wait=30): 996def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
997 """ Modulo distribution 997 """ Modulo distribution
998 998
999 This helper uses the unit number, a modulo value and a constant wait time 999 This helper uses the unit number, a modulo value and a constant wait time
@@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
1015 1015
1016 @param modulo: int The modulo number creates the group distribution 1016 @param modulo: int The modulo number creates the group distribution
1017 @param wait: int The constant time wait value 1017 @param wait: int The constant time wait value
1018 @param non_zero_wait: boolean Override unit % modulo == 0,
1019 return modulo * wait. Used to avoid collisions with
1020 leader nodes which are often given priority.
1018 @return: int Calculated time to wait for unit operation 1021 @return: int Calculated time to wait for unit operation
1019 """ 1022 """
1020 unit_number = int(local_unit().split('/')[1]) 1023 unit_number = int(local_unit().split('/')[1])
1021 return (unit_number % modulo) * wait 1024 calculated_wait_time = (unit_number % modulo) * wait
1025 if non_zero_wait and calculated_wait_time == 0:
1026 return modulo * wait
1027 else:
1028 return calculated_wait_time
diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py
index ca9dc99..179ad4f 100644
--- a/hooks/charmhelpers/core/services/base.py
+++ b/hooks/charmhelpers/core/services/base.py
@@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
307 """ 307 """
308 def __call__(self, manager, service_name, event_name): 308 def __call__(self, manager, service_name, event_name):
309 service = manager.get_service(service_name) 309 service = manager.get_service(service_name)
310 new_ports = service.get('ports', []) 310 # turn this generator into a list,
311 # as we'll be going over it multiple times
312 new_ports = list(service.get('ports', []))
311 port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) 313 port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
312 if os.path.exists(port_file): 314 if os.path.exists(port_file):
313 with open(port_file) as fp: 315 with open(port_file) as fp:
314 old_ports = fp.read().split(',') 316 old_ports = fp.read().split(',')
315 for old_port in old_ports: 317 for old_port in old_ports:
316 if bool(old_port): 318 if bool(old_port) and not self.ports_contains(old_port, new_ports):
317 old_port = int(old_port) 319 hookenv.close_port(old_port)
318 if old_port not in new_ports:
319 hookenv.close_port(old_port)
320 with open(port_file, 'w') as fp: 320 with open(port_file, 'w') as fp:
321 fp.write(','.join(str(port) for port in new_ports)) 321 fp.write(','.join(str(port) for port in new_ports))
322 for port in new_ports: 322 for port in new_ports:
323 # A port is either a number or 'ICMP'
324 protocol = 'TCP'
325 if str(port).upper() == 'ICMP':
326 protocol = 'ICMP'
323 if event_name == 'start': 327 if event_name == 'start':
324 hookenv.open_port(port) 328 hookenv.open_port(port, protocol)
325 elif event_name == 'stop': 329 elif event_name == 'stop':
326 hookenv.close_port(port) 330 hookenv.close_port(port, protocol)
331
332 def ports_contains(self, port, ports):
333 if not bool(port):
334 return False
335 if str(port).upper() != 'ICMP':
336 port = int(port)
337 return port in ports
327 338
328 339
329def service_stop(service_name): 340def service_stop(service_name):
diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py
index 6e413e3..1f188d8 100644
--- a/hooks/charmhelpers/core/sysctl.py
+++ b/hooks/charmhelpers/core/sysctl.py
@@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
31def create(sysctl_dict, sysctl_file): 31def create(sysctl_dict, sysctl_file):
32 """Creates a sysctl.conf file from a YAML associative array 32 """Creates a sysctl.conf file from a YAML associative array
33 33
34 :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" 34 :param sysctl_dict: a dict or YAML-formatted string of sysctl
35 options eg "{ 'kernel.max_pid': 1337 }"
35 :type sysctl_dict: str 36 :type sysctl_dict: str
36 :param sysctl_file: path to the sysctl file to be saved 37 :param sysctl_file: path to the sysctl file to be saved
37 :type sysctl_file: str or unicode 38 :type sysctl_file: str or unicode
38 :returns: None 39 :returns: None
39 """ 40 """
40 try: 41 if type(sysctl_dict) is not dict:
41 sysctl_dict_parsed = yaml.safe_load(sysctl_dict) 42 try:
42 except yaml.YAMLError: 43 sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
43 log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), 44 except yaml.YAMLError:
44 level=ERROR) 45 log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
45 return 46 level=ERROR)
47 return
48 else:
49 sysctl_dict_parsed = sysctl_dict
46 50
47 with open(sysctl_file, "w") as fd: 51 with open(sysctl_file, "w") as fd:
48 for key, value in sysctl_dict_parsed.items(): 52 for key, value in sysctl_dict_parsed.items():
diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py
index 6d7b494..ab55432 100644
--- a/hooks/charmhelpers/core/unitdata.py
+++ b/hooks/charmhelpers/core/unitdata.py
@@ -166,6 +166,10 @@ class Storage(object):
166 166
167 To support dicts, lists, integer, floats, and booleans values 167 To support dicts, lists, integer, floats, and booleans values
168 are automatically json encoded/decoded. 168 are automatically json encoded/decoded.
169
170 Note: to facilitate unit testing, ':memory:' can be passed as the
171 path parameter which causes sqlite3 to only build the db in memory.
172 This should only be used for testing purposes.
169 """ 173 """
170 def __init__(self, path=None): 174 def __init__(self, path=None):
171 self.db_path = path 175 self.db_path = path
@@ -175,8 +179,9 @@ class Storage(object):
175 else: 179 else:
176 self.db_path = os.path.join( 180 self.db_path = os.path.join(
177 os.environ.get('CHARM_DIR', ''), '.unit-state.db') 181 os.environ.get('CHARM_DIR', ''), '.unit-state.db')
178 with open(self.db_path, 'a') as f: 182 if self.db_path != ':memory:':
179 os.fchmod(f.fileno(), 0o600) 183 with open(self.db_path, 'a') as f:
184 os.fchmod(f.fileno(), 0o600)
180 self.conn = sqlite3.connect('%s' % self.db_path) 185 self.conn = sqlite3.connect('%s' % self.db_path)
181 self.cursor = self.conn.cursor() 186 self.cursor = self.conn.cursor()
182 self.revision = None 187 self.revision = None
diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py
index 910e96a..653d58f 100644
--- a/hooks/charmhelpers/fetch/ubuntu.py
+++ b/hooks/charmhelpers/fetch/ubuntu.py
@@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = {
44 'x86_64': PROPOSED_POCKET, 44 'x86_64': PROPOSED_POCKET,
45 'ppc64le': PROPOSED_PORTS_POCKET, 45 'ppc64le': PROPOSED_PORTS_POCKET,
46 'aarch64': PROPOSED_PORTS_POCKET, 46 'aarch64': PROPOSED_PORTS_POCKET,
47 's390x': PROPOSED_PORTS_POCKET,
47} 48}
48CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" 49CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
49CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' 50CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py
index 337bc98..88504bc 100644
--- a/tests/basic_deployment.py
+++ b/tests/basic_deployment.py
@@ -67,7 +67,8 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
67 {'name': 'percona-cluster'}, 67 {'name': 'percona-cluster'},
68 {'name': 'keystone'}, 68 {'name': 'keystone'},
69 {'name': 'rabbitmq-server'}, 69 {'name': 'rabbitmq-server'},
70 {'name': 'ceph', 'units': 3}, 70 {'name': 'ceph-mon', 'units': 3},
71 {'name': 'ceph-osd', 'units': 3},
71 {'name': 'cinder'}, 72 {'name': 'cinder'},
72 {'name': 'cinder-ceph'}, 73 {'name': 'cinder-ceph'},
73 ] 74 ]
@@ -78,8 +79,9 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
78 """Add all of the relations for the services.""" 79 """Add all of the relations for the services."""
79 80
80 relations = { 81 relations = {
81 'cinder-backup:ceph': 'ceph:client', 82 'cinder-backup:ceph': 'ceph-mon:client',
82 'cinder-ceph:ceph': 'ceph:client', 83 'cinder-ceph:ceph': 'ceph-mon:client',
84 'ceph-osd:mon': 'ceph-mon:osd',
83 'cinder:storage-backend': 'cinder-ceph:storage-backend', 85 'cinder:storage-backend': 'cinder-ceph:storage-backend',
84 'cinder:backup-backend': 'cinder-backup:backup-backend', 86 'cinder:backup-backend': 'cinder-backup:backup-backend',
85 'keystone:shared-db': 'percona-cluster:shared-db', 87 'keystone:shared-db': 'percona-cluster:shared-db',
@@ -108,10 +110,16 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
108 'auth-supported': 'none', 110 'auth-supported': 'none',
109 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc', 111 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
110 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==', 112 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==',
111 'osd-reformat': 'yes', 113 }
114
115 # Include a non-existent device as osd-devices is a whitelist,
116 # and this will catch cases where proposals attempt to change that.
117 ceph_osd_config = {
118 'osd-reformat': True,
112 'ephemeral-unmount': '/mnt', 119 'ephemeral-unmount': '/mnt',
113 'osd-devices': '/dev/vdb /srv/ceph' 120 'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent'
114 } 121 }
122
115 cinder_ceph_config = { 123 cinder_ceph_config = {
116 'ceph-osd-replication-count': '3', 124 'ceph-osd-replication-count': '3',
117 } 125 }
@@ -119,7 +127,8 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
119 'keystone': keystone_config, 127 'keystone': keystone_config,
120 'percona-cluster': pxc_config, 128 'percona-cluster': pxc_config,
121 'cinder': cinder_config, 129 'cinder': cinder_config,
122 'ceph': ceph_config, 130 'ceph-mon': ceph_config,
131 'ceph-osd': ceph_osd_config,
123 'cinder-ceph': cinder_ceph_config, 132 'cinder-ceph': cinder_ceph_config,
124 'cinder-backup': cinder_ceph_config, 133 'cinder-backup': cinder_ceph_config,
125 } 134 }
@@ -132,9 +141,12 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
132 self.keystone_sentry = self.d.sentry['keystone'][0] 141 self.keystone_sentry = self.d.sentry['keystone'][0]
133 self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] 142 self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
134 self.cinder_sentry = self.d.sentry['cinder'][0] 143 self.cinder_sentry = self.d.sentry['cinder'][0]
135 self.ceph0_sentry = self.d.sentry['ceph'][0] 144 self.ceph0_sentry = self.d.sentry['ceph-mon'][0]
136 self.ceph1_sentry = self.d.sentry['ceph'][1] 145 self.ceph1_sentry = self.d.sentry['ceph-mon'][1]
137 self.ceph2_sentry = self.d.sentry['ceph'][2] 146 self.ceph2_sentry = self.d.sentry['ceph-mon'][2]
147 self.ceph_osd0_sentry = self.d.sentry['ceph-osd'][0]
148 self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1]
149 self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2]
138 self.cinder_backup_sentry = self.d.sentry['cinder-backup'][0] 150 self.cinder_backup_sentry = self.d.sentry['cinder-backup'][0]
139 u.log.debug('openstack release val: {}'.format( 151 u.log.debug('openstack release val: {}'.format(
140 self._get_openstack_release())) 152 self._get_openstack_release()))
@@ -142,147 +154,46 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
142 self._get_openstack_release_string())) 154 self._get_openstack_release_string()))
143 155
144 # Authenticate admin with keystone 156 # Authenticate admin with keystone
145 self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, 157 self.keystone_session, self.keystone = u.get_default_keystone_session(
146 user='admin', 158 self.keystone_sentry,
147 password='openstack', 159 openstack_release=self._get_openstack_release())
148 tenant='admin')
149 # Authenticate admin with cinder endpoint
150 self.cinder = u.authenticate_cinder_admin(self.keystone_sentry,
151 username='admin',
152 password='openstack',
153 tenant='admin')
154
155 # Create a demo tenant/role/user
156 self.demo_tenant = 'demoTenant'
157 self.demo_role = 'demoRole'
158 self.demo_user = 'demoUser'
159 if not u.tenant_exists(self.keystone, self.demo_tenant):
160 tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
161 description='demo tenant',
162 enabled=True)
163 self.keystone.roles.create(name=self.demo_role)
164 self.keystone.users.create(name=self.demo_user,
165 password='password',
166 tenant_id=tenant.id,
167 email='demo@demo.com')
168
169 # Authenticate demo user with keystone
170 self.keystone_demo = u.authenticate_keystone_user(self.keystone,
171 self.demo_user,
172 'password',
173 self.demo_tenant)
174
175 # Authenticate demo user with nova-api
176 self.nova_demo = u.authenticate_nova_user(self.keystone,
177 self.demo_user,
178 'password',
179 self.demo_tenant)
180
181 def test_100_ceph_processes(self):
182 """Verify that the expected service processes are running
183 on each ceph unit."""
184
185 # Process name and quantity of processes to expect on each unit
186 ceph_processes = {
187 'ceph-mon': 1,
188 'ceph-osd': 2
189 }
190 160
191 # Units with process names and PID quantities expected 161 # Authenticate admin with cinder endpoint
192 expected_processes = { 162 if self._get_openstack_release() >= self.xenial_pike:
193 self.ceph0_sentry: ceph_processes, 163 api_version = 2
194 self.ceph1_sentry: ceph_processes, 164 else:
195 self.ceph2_sentry: ceph_processes 165 api_version = 1
196 } 166 self.cinder = u.authenticate_cinder_admin(self.keystone, api_version)
197
198 actual_pids = u.get_unit_process_ids(expected_processes)
199 ret = u.validate_unit_process_ids(expected_processes, actual_pids)
200 if ret:
201 amulet.raise_status(amulet.FAIL, msg=ret)
202 167
203 def test_102_services(self): 168 def test_102_services(self):
204 """Verify the expected services are running on the service units.""" 169 """Verify the expected services are running on the service units."""
170 if self._get_openstack_release() >= self.xenial_ocata:
171 cinder_services = ['apache2',
172 'cinder-scheduler',
173 'cinder-volume']
174 else:
175 cinder_services = ['cinder-api',
176 'cinder-scheduler',
177 'cinder-volume']
205 services = { 178 services = {
206 self.rabbitmq_sentry: ['rabbitmq-server'], 179 self.cinder_sentry: cinder_services,
207 self.keystone_sentry: ['keystone'],
208 self.cinder_sentry: ['cinder-api',
209 'cinder-scheduler',
210 'cinder-volume'],
211 } 180 }
212 181
213 if self._get_openstack_release() < self.xenial_mitaka:
214 # For upstart systems only. Ceph services under systemd
215 # are checked by process name instead.
216 ceph_services = [
217 'ceph-mon-all',
218 'ceph-mon id=`hostname`',
219 'ceph-osd-all',
220 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
221 'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
222 ]
223 services[self.ceph0_sentry] = ceph_services
224 services[self.ceph1_sentry] = ceph_services
225 services[self.ceph2_sentry] = ceph_services
226
227 if self._get_openstack_release() >= self.trusty_liberty:
228 services[self.keystone_sentry] = ['apache2']
229
230 if self._get_openstack_release() >= self.xenial_ocata:
231 services[self.cinder_sentry].remove('cinder-api')
232
233 ret = u.validate_services_by_name(services) 182 ret = u.validate_services_by_name(services)
234 if ret: 183 if ret:
235 amulet.raise_status(amulet.FAIL, msg=ret) 184 amulet.raise_status(amulet.FAIL, msg=ret)
236 185
237 def test_110_users(self):
238 """Verify expected users."""
239 u.log.debug('Checking keystone users...')
240
241 if self._get_openstack_release() < self.xenial_pike:
242 expected = [{
243 'name': 'cinder_cinderv2',
244 'enabled': True,
245 'tenantId': u.not_null,
246 'id': u.not_null,
247 'email': 'juju@localhost',
248 }]
249 else:
250 expected = [{
251 'name': 'cinderv2_cinderv3',
252 'enabled': True,
253 'tenantId': u.not_null,
254 'id': u.not_null,
255 'email': 'juju@localhost',
256 }]
257
258 expected.append({
259 'name': 'admin',
260 'enabled': True,
261 'tenantId': u.not_null,
262 'id': u.not_null,
263 'email': 'juju@localhost',
264 })
265
266 actual = self.keystone.users.list()
267 ret = u.validate_user_data(expected, actual)
268 if ret:
269 amulet.raise_status(amulet.FAIL, msg=ret)
270
271 def test_112_service_catalog(self): 186 def test_112_service_catalog(self):
272 """Verify that the service catalog endpoint data""" 187 """Verify that the service catalog endpoint data"""
273 u.log.debug('Checking keystone service catalog...') 188 u.log.debug('Checking keystone service catalog...')
274 endpoint_vol = { 189 endpoint_vol = {'adminURL': u.valid_url,
275 'adminURL': u.valid_url, 190 'region': 'RegionOne',
276 'region': 'RegionOne', 191 'publicURL': u.valid_url,
277 'publicURL': u.valid_url, 192 'internalURL': u.valid_url}
278 'internalURL': u.valid_url 193 endpoint_id = {'adminURL': u.valid_url,
279 } 194 'region': 'RegionOne',
280 endpoint_id = { 195 'publicURL': u.valid_url,
281 'adminURL': u.valid_url, 196 'internalURL': u.valid_url}
282 'region': 'RegionOne',
283 'publicURL': u.valid_url,
284 'internalURL': u.valid_url
285 }
286 if self._get_openstack_release() >= self.trusty_icehouse: 197 if self._get_openstack_release() >= self.trusty_icehouse:
287 endpoint_vol['id'] = u.not_null 198 endpoint_vol['id'] = u.not_null
288 endpoint_id['id'] = u.not_null 199 endpoint_id['id'] = u.not_null
@@ -295,29 +206,49 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
295 # Ocata and prior 206 # Ocata and prior
296 expected = {'identity': [endpoint_id], 207 expected = {'identity': [endpoint_id],
297 'volume': [endpoint_id]} 208 'volume': [endpoint_id]}
298
299 actual = self.keystone.service_catalog.get_endpoints() 209 actual = self.keystone.service_catalog.get_endpoints()
300 210
301 ret = u.validate_svc_catalog_endpoint_data(expected, actual) 211 ret = u.validate_svc_catalog_endpoint_data(
212 expected,
213 actual,
214 openstack_release=self._get_openstack_release())
302 if ret: 215 if ret:
303 amulet.raise_status(amulet.FAIL, msg=ret) 216 amulet.raise_status(amulet.FAIL, msg=ret)
304 217
305 def test_114_cinder_endpoint(self): 218 def test_114_cinder_endpoint(self):
306 """Verify the cinder endpoint data.""" 219 """Verify the cinder endpoint data."""
307 u.log.debug('Checking cinder api endpoint data...') 220 u.log.debug('Checking cinder endpoint...')
308 endpoints = self.keystone.endpoints.list() 221 endpoints = self.keystone.endpoints.list()
309 admin_port = internal_port = public_port = '8776' 222 admin_port = internal_port = public_port = '8776'
310 expected = { 223 if self._get_openstack_release() >= self.xenial_queens:
311 'id': u.not_null, 224 expected = {
312 'region': 'RegionOne', 225 'id': u.not_null,
313 'adminurl': u.valid_url, 226 'region': 'RegionOne',
314 'internalurl': u.valid_url, 227 'region_id': 'RegionOne',
315 'publicurl': u.valid_url, 228 'url': u.valid_url,
316 'service_id': u.not_null 229 'interface': u.not_null,
317 } 230 'service_id': u.not_null}
318 231 ret = u.validate_v3_endpoint_data(
319 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, 232 endpoints,
320 public_port, expected) 233 admin_port,
234 internal_port,
235 public_port,
236 expected,
237 6)
238 else:
239 expected = {
240 'id': u.not_null,
241 'region': 'RegionOne',
242 'adminurl': u.valid_url,
243 'internalurl': u.valid_url,
244 'publicurl': u.valid_url,
245 'service_id': u.not_null}
246 ret = u.validate_v2_endpoint_data(
247 endpoints,
248 admin_port,
249 internal_port,
250 public_port,
251 expected)
321 if ret: 252 if ret:
322 amulet.raise_status(amulet.FAIL, 253 amulet.raise_status(amulet.FAIL,
323 msg='cinder endpoint: {}'.format(ret)) 254 msg='cinder endpoint: {}'.format(ret))
@@ -342,7 +273,7 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
342 client_unit = self.cinder_backup_sentry 273 client_unit = self.cinder_backup_sentry
343 broker_req = json.loads(client_unit.relation( 274 broker_req = json.loads(client_unit.relation(
344 'ceph', 275 'ceph',
345 'ceph:client')['broker_req']) 276 'ceph-mon:client')['broker_req'])
346 return broker_req 277 return broker_req
347 278
348 def get_broker_response(self): 279 def get_broker_response(self):
@@ -371,7 +302,7 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
371 u.log.debug('Checking cinder-backup:ceph to ceph:client ' 302 u.log.debug('Checking cinder-backup:ceph to ceph:client '
372 'relation data...') 303 'relation data...')
373 unit = self.cinder_backup_sentry 304 unit = self.cinder_backup_sentry
374 relation = ['ceph', 'ceph:client'] 305 relation = ['ceph', 'ceph-mon:client']
375 306
376 req = { 307 req = {
377 "api-version": 1, 308 "api-version": 1,
@@ -385,15 +316,15 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
385 } 316 }
386 ret = u.validate_relation_data(unit, relation, expected) 317 ret = u.validate_relation_data(unit, relation, expected)
387 if ret: 318 if ret:
388 msg = u.relation_error('cinder-backup ceph', ret) 319 msg = u.relation_error('cinder-backup ceph-mon', ret)
389 amulet.raise_status(amulet.FAIL, msg=msg) 320 amulet.raise_status(amulet.FAIL, msg=msg)
390 ret = self.validate_broker_req(unit, relation, req) 321 ret = self.validate_broker_req(unit, relation, req)
391 if ret: 322 if ret:
392 msg = u.relation_error('cinder-backup ceph', ret) 323 msg = u.relation_error('cinder-backup ceph-mon', ret)
393 amulet.raise_status(amulet.FAIL, msg=msg) 324 amulet.raise_status(amulet.FAIL, msg=msg)
394 325
395 def test_201_ceph_cinderbackup_ceph_relation(self): 326 def test_201_ceph_cinderbackup_ceph_relation(self):
396 u.log.debug('Checking ceph:client to cinder-backup:ceph ' 327 u.log.debug('Checking ceph-mon:client to cinder-backup:ceph '
397 'relation data...') 328 'relation data...')
398 ceph_unit = self.ceph0_sentry 329 ceph_unit = self.ceph0_sentry
399 relation = ['client', 'cinder-backup:ceph'] 330 relation = ['client', 'cinder-backup:ceph']
@@ -816,17 +747,27 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
816 747
817 name = "demo-vol" 748 name = "demo-vol"
818 vols = self.cinder.volumes.list() 749 vols = self.cinder.volumes.list()
819 cinder_vols = [v for v in vols if v.name == name] 750 try:
751 cinder_vols = [v for v in vols if v.name == name]
752 except AttributeError:
753 cinder_vols = [v for v in vols if v.display_name == name]
820 if not cinder_vols: 754 if not cinder_vols:
821 # NOTE(hopem): it appears that at some point cinder-backup stopped 755 # NOTE(hopem): it appears that at some point cinder-backup stopped
822 # restoring volume metadata properly so revert to default name if 756 # restoring volume metadata properly so revert to default name if
823 # original is not found 757 # original is not found
824 name = "restore_backup_%s" % (vol_backup.id) 758 name = "restore_backup_%s" % (vol_backup.id)
825 cinder_vols = [v for v in vols if v.name == name] 759 try:
760 cinder_vols = [v for v in vols if v.name == name]
761 except AttributeError:
762 cinder_vols = [v for v in vols if v.display_name == name]
826 763
827 if not cinder_vols: 764 if not cinder_vols:
828 msg = ("Could not find restore vol '%s' in %s" % 765 try:
829 (name, [v.name for v in vols])) 766 msg = ("Could not find restore vol '%s' in %s" %
767 (name, [v.name for v in vols]))
768 except AttributeError:
769 msg = ("Could not find restore vol '%s' in %s" %
770 (name, [v.display_name for v in vols]))
830 u.log.error(msg) 771 u.log.error(msg)
831 amulet.raise_status(amulet.FAIL, msg=msg) 772 amulet.raise_status(amulet.FAIL, msg=msg)
832 773
diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
index 5afbbd8..66beeda 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -21,6 +21,9 @@ from collections import OrderedDict
21from charmhelpers.contrib.amulet.deployment import ( 21from charmhelpers.contrib.amulet.deployment import (
22 AmuletDeployment 22 AmuletDeployment
23) 23)
24from charmhelpers.contrib.openstack.amulet.utils import (
25 OPENSTACK_RELEASES_PAIRS
26)
24 27
25DEBUG = logging.DEBUG 28DEBUG = logging.DEBUG
26ERROR = logging.ERROR 29ERROR = logging.ERROR
@@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
271 release. 274 release.
272 """ 275 """
273 # Must be ordered by OpenStack release (not by Ubuntu release): 276 # Must be ordered by OpenStack release (not by Ubuntu release):
274 (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty, 277 for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
275 self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton, 278 setattr(self, os_pair, i)
276 self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
277 self.xenial_pike, self.artful_pike, self.xenial_queens,
278 self.bionic_queens,) = range(13)
279 279
280 releases = { 280 releases = {
281 ('trusty', None): self.trusty_icehouse, 281 ('trusty', None): self.trusty_icehouse,
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
index d93cff3..84e87f5 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -50,6 +50,13 @@ ERROR = logging.ERROR
50 50
51NOVA_CLIENT_VERSION = "2" 51NOVA_CLIENT_VERSION = "2"
52 52
53OPENSTACK_RELEASES_PAIRS = [
54 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
55 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
56 'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
57 'xenial_pike', 'artful_pike', 'xenial_queens',
58 'bionic_queens']
59
53 60
54class OpenStackAmuletUtils(AmuletUtils): 61class OpenStackAmuletUtils(AmuletUtils):
55 """OpenStack amulet utilities. 62 """OpenStack amulet utilities.
@@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils):
63 super(OpenStackAmuletUtils, self).__init__(log_level) 70 super(OpenStackAmuletUtils, self).__init__(log_level)
64 71
65 def validate_endpoint_data(self, endpoints, admin_port, internal_port, 72 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
66 public_port, expected): 73 public_port, expected, openstack_release=None):
74 """Validate endpoint data. Pick the correct validator based on
75 OpenStack release. Expected data should be in the v2 format:
76 {
77 'id': id,
78 'region': region,
79 'adminurl': adminurl,
80 'internalurl': internalurl,
81 'publicurl': publicurl,
82 'service_id': service_id}
83
84 """
85 validation_function = self.validate_v2_endpoint_data
86 xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
87 if openstack_release and openstack_release >= xenial_queens:
88 validation_function = self.validate_v3_endpoint_data
89 expected = {
90 'id': expected['id'],
91 'region': expected['region'],
92 'region_id': 'RegionOne',
93 'url': self.valid_url,
94 'interface': self.not_null,
95 'service_id': expected['service_id']}
96 return validation_function(endpoints, admin_port, internal_port,
97 public_port, expected)
98
99 def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
100 public_port, expected):
67 """Validate endpoint data. 101 """Validate endpoint data.
68 102
69 Validate actual endpoint data vs expected endpoint data. The ports 103 Validate actual endpoint data vs expected endpoint data. The ports
@@ -141,7 +175,86 @@ class OpenStackAmuletUtils(AmuletUtils):
141 if len(found) != expected_num_eps: 175 if len(found) != expected_num_eps:
142 return 'Unexpected number of endpoints found' 176 return 'Unexpected number of endpoints found'
143 177
144 def validate_svc_catalog_endpoint_data(self, expected, actual): 178 def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
179 """Convert v2 endpoint data into v3.
180
181 {
182 'service_name1': [
183 {
184 'adminURL': adminURL,
185 'id': id,
186 'region': region.
187 'publicURL': publicURL,
188 'internalURL': internalURL
189 }],
190 'service_name2': [
191 {
192 'adminURL': adminURL,
193 'id': id,
194 'region': region.
195 'publicURL': publicURL,
196 'internalURL': internalURL
197 }],
198 }
199 """
200 self.log.warn("Endpoint ID and Region ID validation is limited to not "
201 "null checks after v2 to v3 conversion")
202 for svc in ep_data.keys():
203 assert len(ep_data[svc]) == 1, "Unknown data format"
204 svc_ep_data = ep_data[svc][0]
205 ep_data[svc] = [
206 {
207 'url': svc_ep_data['adminURL'],
208 'interface': 'admin',
209 'region': svc_ep_data['region'],
210 'region_id': self.not_null,
211 'id': self.not_null},
212 {
213 'url': svc_ep_data['publicURL'],
214 'interface': 'public',
215 'region': svc_ep_data['region'],
216 'region_id': self.not_null,
217 'id': self.not_null},
218 {
219 'url': svc_ep_data['internalURL'],
220 'interface': 'internal',
221 'region': svc_ep_data['region'],
222 'region_id': self.not_null,
223 'id': self.not_null}]
224 return ep_data
225
226 def validate_svc_catalog_endpoint_data(self, expected, actual,
227 openstack_release=None):
228 """Validate service catalog endpoint data. Pick the correct validator
229 for the OpenStack version. Expected data should be in the v2 format:
230 {
231 'service_name1': [
232 {
233 'adminURL': adminURL,
234 'id': id,
235 'region': region.
236 'publicURL': publicURL,
237 'internalURL': internalURL
238 }],
239 'service_name2': [
240 {
241 'adminURL': adminURL,
242 'id': id,
243 'region': region.
244 'publicURL': publicURL,
245 'internalURL': internalURL
246 }],
247 }
248
249 """
250 validation_function = self.validate_v2_svc_catalog_endpoint_data
251 xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
252 if openstack_release and openstack_release >= xenial_queens:
253 validation_function = self.validate_v3_svc_catalog_endpoint_data
254 expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
255 return validation_function(expected, actual)
256
257 def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
145 """Validate service catalog endpoint data. 258 """Validate service catalog endpoint data.
146 259
147 Validate a list of actual service catalog endpoints vs a list of 260 Validate a list of actual service catalog endpoints vs a list of
@@ -328,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils):
328 if rel.get('api_version') != str(api_version): 441 if rel.get('api_version') != str(api_version):
329 raise Exception("api_version not propagated through relation" 442 raise Exception("api_version not propagated through relation"
330 " data yet ('{}' != '{}')." 443 " data yet ('{}' != '{}')."
331 "".format(rel['api_version'], api_version)) 444 "".format(rel.get('api_version'), api_version))
332 445
333 def keystone_configure_api_version(self, sentry_relation_pairs, deployment, 446 def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
334 api_version): 447 api_version):
@@ -350,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils):
350 deployment._auto_wait_for_status() 463 deployment._auto_wait_for_status()
351 self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) 464 self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
352 465
353 def authenticate_cinder_admin(self, keystone_sentry, username, 466 def authenticate_cinder_admin(self, keystone, api_version=2):
354 password, tenant, api_version=2):
355 """Authenticates admin user with cinder.""" 467 """Authenticates admin user with cinder."""
356 # NOTE(beisner): cinder python client doesn't accept tokens. 468 self.log.debug('Authenticating cinder admin...')
357 keystone_ip = keystone_sentry.info['public-address']
358 ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
359 _clients = { 469 _clients = {
360 1: cinder_client.Client, 470 1: cinder_client.Client,
361 2: cinder_clientv2.Client} 471 2: cinder_clientv2.Client}
362 return _clients[api_version](username, password, tenant, ept) 472 return _clients[api_version](session=keystone.session)
363 473
364 def authenticate_keystone(self, keystone_ip, username, password, 474 def authenticate_keystone(self, keystone_ip, username, password,
365 api_version=False, admin_port=False, 475 api_version=False, admin_port=False,
@@ -367,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils):
367 project_domain_name=None, project_name=None): 477 project_domain_name=None, project_name=None):
368 """Authenticate with Keystone""" 478 """Authenticate with Keystone"""
369 self.log.debug('Authenticating with keystone...') 479 self.log.debug('Authenticating with keystone...')
370 port = 5000 480 if not api_version:
371 if admin_port: 481 api_version = 2
372 port = 35357 482 sess, auth = self.get_keystone_session(
373 base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), 483 keystone_ip=keystone_ip,
374 port) 484 username=username,
375 if not api_version or api_version == 2: 485 password=password,
376 ep = base_ep + "/v2.0" 486 api_version=api_version,
487 admin_port=admin_port,
488 user_domain_name=user_domain_name,
489 domain_name=domain_name,
490 project_domain_name=project_domain_name,
491 project_name=project_name
492 )
493 if api_version == 2:
494 client = keystone_client.Client(session=sess)
495 else:
496 client = keystone_client_v3.Client(session=sess)
497 # This populates the client.service_catalog
498 client.auth_ref = auth.get_access(sess)
499 return client
500
501 def get_keystone_session(self, keystone_ip, username, password,
502 api_version=False, admin_port=False,
503 user_domain_name=None, domain_name=None,
504 project_domain_name=None, project_name=None):
505 """Return a keystone session object"""
506 ep = self.get_keystone_endpoint(keystone_ip,
507 api_version=api_version,
508 admin_port=admin_port)
509 if api_version == 2:
377 auth = v2.Password( 510 auth = v2.Password(
378 username=username, 511 username=username,
379 password=password, 512 password=password,
@@ -381,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils):
381 auth_url=ep 514 auth_url=ep
382 ) 515 )
383 sess = keystone_session.Session(auth=auth) 516 sess = keystone_session.Session(auth=auth)
384 client = keystone_client.Client(session=sess)
385 # This populates the client.service_catalog
386 client.auth_ref = auth.get_access(sess)
387 return client
388 else: 517 else:
389 ep = base_ep + "/v3"
390 auth = v3.Password( 518 auth = v3.Password(
391 user_domain_name=user_domain_name, 519 user_domain_name=user_domain_name,
392 username=username, 520 username=username,
@@ -397,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils):
397 auth_url=ep 525 auth_url=ep
398 ) 526 )
399 sess = keystone_session.Session(auth=auth) 527 sess = keystone_session.Session(auth=auth)
400 client = keystone_client_v3.Client(session=sess) 528 return (sess, auth)
401 # This populates the client.service_catalog 529
402 client.auth_ref = auth.get_access(sess) 530 def get_keystone_endpoint(self, keystone_ip, api_version=None,
403 return client 531 admin_port=False):
532 """Return keystone endpoint"""
533 port = 5000
534 if admin_port:
535 port = 35357
536 base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
537 port)
538 if api_version == 2:
539 ep = base_ep + "/v2.0"
540 else:
541 ep = base_ep + "/v3"
542 return ep
543
544 def get_default_keystone_session(self, keystone_sentry,
545 openstack_release=None):
546 """Return a keystone session object and client object assuming standard
547 default settings
548
549 Example call in amulet tests:
550 self.keystone_session, self.keystone = u.get_default_keystone_session(
551 self.keystone_sentry,
552 openstack_release=self._get_openstack_release())
553
554 The session can then be used to auth other clients:
555 neutronclient.Client(session=session)
556 aodh_client.Client(session=session)
557 eyc
558 """
559 self.log.debug('Authenticating keystone admin...')
560 api_version = 2
561 client_class = keystone_client.Client
562 # 11 => xenial_queens
563 if openstack_release and openstack_release >= 11:
564 api_version = 3
565 client_class = keystone_client_v3.Client
566 keystone_ip = keystone_sentry.info['public-address']
567 session, auth = self.get_keystone_session(
568 keystone_ip,
569 api_version=api_version,
570 username='admin',
571 password='openstack',
572 project_name='admin',
573 user_domain_name='admin_domain',
574 project_domain_name='admin_domain')
575 client = client_class(session=session)
576 # This populates the client.service_catalog
577 client.auth_ref = auth.get_access(session)
578 return session, client
404 579
405 def authenticate_keystone_admin(self, keystone_sentry, user, password, 580 def authenticate_keystone_admin(self, keystone_sentry, user, password,
406 tenant=None, api_version=None, 581 tenant=None, api_version=None,
diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py
index 7ed1cc4..627d8f7 100644
--- a/tests/charmhelpers/core/hookenv.py
+++ b/tests/charmhelpers/core/hookenv.py
@@ -27,6 +27,7 @@ import glob
27import os 27import os
28import json 28import json
29import yaml 29import yaml
30import re
30import subprocess 31import subprocess
31import sys 32import sys
32import errno 33import errno
@@ -67,7 +68,7 @@ def cached(func):
67 @wraps(func) 68 @wraps(func)
68 def wrapper(*args, **kwargs): 69 def wrapper(*args, **kwargs):
69 global cache 70 global cache
70 key = str((func, args, kwargs)) 71 key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
71 try: 72 try:
72 return cache[key] 73 return cache[key]
73 except KeyError: 74 except KeyError:
@@ -289,7 +290,7 @@ class Config(dict):
289 self.implicit_save = True 290 self.implicit_save = True
290 self._prev_dict = None 291 self._prev_dict = None
291 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) 292 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
292 if os.path.exists(self.path): 293 if os.path.exists(self.path) and os.stat(self.path).st_size:
293 self.load_previous() 294 self.load_previous()
294 atexit(self._implicit_save) 295 atexit(self._implicit_save)
295 296
@@ -309,7 +310,11 @@ class Config(dict):
309 """ 310 """
310 self.path = path or self.path 311 self.path = path or self.path
311 with open(self.path) as f: 312 with open(self.path) as f:
312 self._prev_dict = json.load(f) 313 try:
314 self._prev_dict = json.load(f)
315 except ValueError as e:
316 log('Unable to parse previous config data - {}'.format(str(e)),
317 level=ERROR)
313 for k, v in copy.deepcopy(self._prev_dict).items(): 318 for k, v in copy.deepcopy(self._prev_dict).items():
314 if k not in self: 319 if k not in self:
315 self[k] = v 320 self[k] = v
@@ -353,22 +358,40 @@ class Config(dict):
353 self.save() 358 self.save()
354 359
355 360
356@cached 361_cache_config = None
362
363
357def config(scope=None): 364def config(scope=None):
358 """Juju charm configuration""" 365 """
359 config_cmd_line = ['config-get'] 366 Get the juju charm configuration (scope==None) or individual key,
360 if scope is not None: 367 (scope=str). The returned value is a Python data structure loaded as
361 config_cmd_line.append(scope) 368 JSON from the Juju config command.
362 else: 369
363 config_cmd_line.append('--all') 370 :param scope: If set, return the value for the specified key.
364 config_cmd_line.append('--format=json') 371 :type scope: Optional[str]
372 :returns: Either the whole config as a Config, or a key from it.
373 :rtype: Any
374 """
375 global _cache_config
376 config_cmd_line = ['config-get', '--all', '--format=json']
365 try: 377 try:
366 config_data = json.loads( 378 # JSON Decode Exception for Python3.5+
367 subprocess.check_output(config_cmd_line).decode('UTF-8')) 379 exc_json = json.decoder.JSONDecodeError
380 except AttributeError:
381 # JSON Decode Exception for Python2.7 through Python3.4
382 exc_json = ValueError
383 try:
384 if _cache_config is None:
385 config_data = json.loads(
386 subprocess.check_output(config_cmd_line).decode('UTF-8'))
387 _cache_config = Config(config_data)
368 if scope is not None: 388 if scope is not None:
369 return config_data 389 return _cache_config.get(scope)
370 return Config(config_data) 390 return _cache_config
371 except ValueError: 391 except (exc_json, UnicodeDecodeError) as e:
392 log('Unable to parse output from config-get: config_cmd_line="{}" '
393 'message="{}"'
394 .format(config_cmd_line, str(e)), level=ERROR)
372 return None 395 return None
373 396
374 397
@@ -1043,7 +1066,6 @@ def juju_version():
1043 universal_newlines=True).strip() 1066 universal_newlines=True).strip()
1044 1067
1045 1068
1046@cached
1047def has_juju_version(minimum_version): 1069def has_juju_version(minimum_version):
1048 """Return True if the Juju version is at least the provided version""" 1070 """Return True if the Juju version is at least the provided version"""
1049 return LooseVersion(juju_version()) >= LooseVersion(minimum_version) 1071 return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@@ -1103,6 +1125,8 @@ def _run_atexit():
1103@translate_exc(from_exc=OSError, to_exc=NotImplementedError) 1125@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1104def network_get_primary_address(binding): 1126def network_get_primary_address(binding):
1105 ''' 1127 '''
1128 Deprecated since Juju 2.3; use network_get()
1129
1106 Retrieve the primary network address for a named binding 1130 Retrieve the primary network address for a named binding
1107 1131
1108 :param binding: string. The name of a relation of extra-binding 1132 :param binding: string. The name of a relation of extra-binding
@@ -1123,7 +1147,6 @@ def network_get_primary_address(binding):
1123 return response 1147 return response
1124 1148
1125 1149
1126@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1127def network_get(endpoint, relation_id=None): 1150def network_get(endpoint, relation_id=None):
1128 """ 1151 """
1129 Retrieve the network details for a relation endpoint 1152 Retrieve the network details for a relation endpoint
@@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None):
1131 :param endpoint: string. The name of a relation endpoint 1154 :param endpoint: string. The name of a relation endpoint
1132 :param relation_id: int. The ID of the relation for the current context. 1155 :param relation_id: int. The ID of the relation for the current context.
1133 :return: dict. The loaded YAML output of the network-get query. 1156 :return: dict. The loaded YAML output of the network-get query.
1134 :raise: NotImplementedError if run on Juju < 2.1 1157 :raise: NotImplementedError if request not supported by the Juju version.
1135 """ 1158 """
1159 if not has_juju_version('2.2'):
1160 raise NotImplementedError(juju_version()) # earlier versions require --primary-address
1161 if relation_id and not has_juju_version('2.3'):
1162 raise NotImplementedError # 2.3 added the -r option
1163
1136 cmd = ['network-get', endpoint, '--format', 'yaml'] 1164 cmd = ['network-get', endpoint, '--format', 'yaml']
1137 if relation_id: 1165 if relation_id:
1138 cmd.append('-r') 1166 cmd.append('-r')
1139 cmd.append(relation_id) 1167 cmd.append(relation_id)
1140 try: 1168 response = subprocess.check_output(
1141 response = subprocess.check_output( 1169 cmd,
1142 cmd, 1170 stderr=subprocess.STDOUT).decode('UTF-8').strip()
1143 stderr=subprocess.STDOUT).decode('UTF-8').strip()
1144 except CalledProcessError as e:
1145 # Early versions of Juju 2.0.x required the --primary-address argument.
1146 # We catch that condition here and raise NotImplementedError since
1147 # the requested semantics are not available - the caller can then
1148 # use the network_get_primary_address() method instead.
1149 if '--primary-address is currently required' in e.output.decode('UTF-8'):
1150 raise NotImplementedError
1151 raise
1152 return yaml.safe_load(response) 1171 return yaml.safe_load(response)
1153 1172
1154 1173
@@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name):
1204 1223
1205def ingress_address(rid=None, unit=None): 1224def ingress_address(rid=None, unit=None):
1206 """ 1225 """
1207 Retrieve the ingress-address from a relation when available. Otherwise, 1226 Retrieve the ingress-address from a relation when available.
1208 return the private-address. This function is to be used on the consuming 1227 Otherwise, return the private-address.
1209 side of the relation. 1228
1229 When used on the consuming side of the relation (unit is a remote
1230 unit), the ingress-address is the IP address that this unit needs
1231 to use to reach the provided service on the remote unit.
1232
1233 When used on the providing side of the relation (unit == local_unit()),
1234 the ingress-address is the IP address that is advertised to remote
1235 units on this relation. Remote units need to use this address to
1236 reach the local provided service on this unit.
1237
1238 Note that charms may document some other method to use in
1239 preference to the ingress_address(), such as an address provided
1240 on a different relation attribute or a service discovery mechanism.
1241 This allows charms to redirect inbound connections to their peers
1242 or different applications such as load balancers.
1210 1243
1211 Usage: 1244 Usage:
1212 addresses = [ingress_address(rid=u.rid, unit=u.unit) 1245 addresses = [ingress_address(rid=u.rid, unit=u.unit)
@@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None):
1220 settings = relation_get(rid=rid, unit=unit) 1253 settings = relation_get(rid=rid, unit=unit)
1221 return (settings.get('ingress-address') or 1254 return (settings.get('ingress-address') or
1222 settings.get('private-address')) 1255 settings.get('private-address'))
1256
1257
1258def egress_subnets(rid=None, unit=None):
1259 """
1260 Retrieve the egress-subnets from a relation.
1261
1262 This function is to be used on the providing side of the
1263 relation, and provides the ranges of addresses that client
1264 connections may come from. The result is uninteresting on
1265 the consuming side of a relation (unit == local_unit()).
1266
1267 Returns a stable list of subnets in CIDR format.
1268 eg. ['192.168.1.0/24', '2001::F00F/128']
1269
1270 If egress-subnets is not available, falls back to using the published
1271 ingress-address, or finally private-address.
1272
1273 :param rid: string relation id
1274 :param unit: string unit name
1275 :side effect: calls relation_get
1276 :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
1277 """
1278 def _to_range(addr):
1279 if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
1280 addr += '/32'
1281 elif ':' in addr and '/' not in addr: # IPv6
1282 addr += '/128'
1283 return addr
1284
1285 settings = relation_get(rid=rid, unit=unit)
1286 if 'egress-subnets' in settings:
1287 return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
1288 if 'ingress-address' in settings:
1289 return [_to_range(settings['ingress-address'])]
1290 if 'private-address' in settings:
1291 return [_to_range(settings['private-address'])]
1292 return [] # Should never happen
diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py
index fd14d60..322ab2a 100644
--- a/tests/charmhelpers/core/host.py
+++ b/tests/charmhelpers/core/host.py
@@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path):
993 return output 993 return output
994 994
995 995
996def modulo_distribution(modulo=3, wait=30): 996def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
997 """ Modulo distribution 997 """ Modulo distribution
998 998
999 This helper uses the unit number, a modulo value and a constant wait time 999 This helper uses the unit number, a modulo value and a constant wait time
@@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
1015 1015
1016 @param modulo: int The modulo number creates the group distribution 1016 @param modulo: int The modulo number creates the group distribution
1017 @param wait: int The constant time wait value 1017 @param wait: int The constant time wait value
1018 @param non_zero_wait: boolean Override unit % modulo == 0,
1019 return modulo * wait. Used to avoid collisions with
1020 leader nodes which are often given priority.
1018 @return: int Calculated time to wait for unit operation 1021 @return: int Calculated time to wait for unit operation
1019 """ 1022 """
1020 unit_number = int(local_unit().split('/')[1]) 1023 unit_number = int(local_unit().split('/')[1])
1021 return (unit_number % modulo) * wait 1024 calculated_wait_time = (unit_number % modulo) * wait
1025 if non_zero_wait and calculated_wait_time == 0:
1026 return modulo * wait
1027 else:
1028 return calculated_wait_time
diff --git a/tests/charmhelpers/core/services/base.py b/tests/charmhelpers/core/services/base.py
index ca9dc99..179ad4f 100644
--- a/tests/charmhelpers/core/services/base.py
+++ b/tests/charmhelpers/core/services/base.py
@@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
307 """ 307 """
308 def __call__(self, manager, service_name, event_name): 308 def __call__(self, manager, service_name, event_name):
309 service = manager.get_service(service_name) 309 service = manager.get_service(service_name)
310 new_ports = service.get('ports', []) 310 # turn this generator into a list,
311 # as we'll be going over it multiple times
312 new_ports = list(service.get('ports', []))
311 port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) 313 port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
312 if os.path.exists(port_file): 314 if os.path.exists(port_file):
313 with open(port_file) as fp: 315 with open(port_file) as fp:
314 old_ports = fp.read().split(',') 316 old_ports = fp.read().split(',')
315 for old_port in old_ports: 317 for old_port in old_ports:
316 if bool(old_port): 318 if bool(old_port) and not self.ports_contains(old_port, new_ports):
317 old_port = int(old_port) 319 hookenv.close_port(old_port)
318 if old_port not in new_ports:
319 hookenv.close_port(old_port)
320 with open(port_file, 'w') as fp: 320 with open(port_file, 'w') as fp:
321 fp.write(','.join(str(port) for port in new_ports)) 321 fp.write(','.join(str(port) for port in new_ports))
322 for port in new_ports: 322 for port in new_ports:
323 # A port is either a number or 'ICMP'
324 protocol = 'TCP'
325 if str(port).upper() == 'ICMP':
326 protocol = 'ICMP'
323 if event_name == 'start': 327 if event_name == 'start':
324 hookenv.open_port(port) 328 hookenv.open_port(port, protocol)
325 elif event_name == 'stop': 329 elif event_name == 'stop':
326 hookenv.close_port(port) 330 hookenv.close_port(port, protocol)
331
332 def ports_contains(self, port, ports):
333 if not bool(port):
334 return False
335 if str(port).upper() != 'ICMP':
336 port = int(port)
337 return port in ports
327 338
328 339
329def service_stop(service_name): 340def service_stop(service_name):
diff --git a/tests/charmhelpers/core/sysctl.py b/tests/charmhelpers/core/sysctl.py
index 6e413e3..1f188d8 100644
--- a/tests/charmhelpers/core/sysctl.py
+++ b/tests/charmhelpers/core/sysctl.py
@@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
31def create(sysctl_dict, sysctl_file): 31def create(sysctl_dict, sysctl_file):
32 """Creates a sysctl.conf file from a YAML associative array 32 """Creates a sysctl.conf file from a YAML associative array
33 33
34 :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" 34 :param sysctl_dict: a dict or YAML-formatted string of sysctl
35 options eg "{ 'kernel.max_pid': 1337 }"
35 :type sysctl_dict: str 36 :type sysctl_dict: str
36 :param sysctl_file: path to the sysctl file to be saved 37 :param sysctl_file: path to the sysctl file to be saved
37 :type sysctl_file: str or unicode 38 :type sysctl_file: str or unicode
38 :returns: None 39 :returns: None
39 """ 40 """
40 try: 41 if type(sysctl_dict) is not dict:
41 sysctl_dict_parsed = yaml.safe_load(sysctl_dict) 42 try:
42 except yaml.YAMLError: 43 sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
43 log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), 44 except yaml.YAMLError:
44 level=ERROR) 45 log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
45 return 46 level=ERROR)
47 return
48 else:
49 sysctl_dict_parsed = sysctl_dict
46 50
47 with open(sysctl_file, "w") as fd: 51 with open(sysctl_file, "w") as fd:
48 for key, value in sysctl_dict_parsed.items(): 52 for key, value in sysctl_dict_parsed.items():
diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py
index 6d7b494..ab55432 100644
--- a/tests/charmhelpers/core/unitdata.py
+++ b/tests/charmhelpers/core/unitdata.py
@@ -166,6 +166,10 @@ class Storage(object):
166 166
167 To support dicts, lists, integer, floats, and booleans values 167 To support dicts, lists, integer, floats, and booleans values
168 are automatically json encoded/decoded. 168 are automatically json encoded/decoded.
169
170 Note: to facilitate unit testing, ':memory:' can be passed as the
171 path parameter which causes sqlite3 to only build the db in memory.
172 This should only be used for testing purposes.
169 """ 173 """
170 def __init__(self, path=None): 174 def __init__(self, path=None):
171 self.db_path = path 175 self.db_path = path
@@ -175,8 +179,9 @@ class Storage(object):
175 else: 179 else:
176 self.db_path = os.path.join( 180 self.db_path = os.path.join(
177 os.environ.get('CHARM_DIR', ''), '.unit-state.db') 181 os.environ.get('CHARM_DIR', ''), '.unit-state.db')
178 with open(self.db_path, 'a') as f: 182 if self.db_path != ':memory:':
179 os.fchmod(f.fileno(), 0o600) 183 with open(self.db_path, 'a') as f:
184 os.fchmod(f.fileno(), 0o600)
180 self.conn = sqlite3.connect('%s' % self.db_path) 185 self.conn = sqlite3.connect('%s' % self.db_path)
181 self.cursor = self.conn.cursor() 186 self.cursor = self.conn.cursor()
182 self.revision = None 187 self.revision = None
diff --git a/tests/dev-basic-bionic-queens b/tests/gate-basic-bionic-queens
index fd50f59..fd50f59 100755
--- a/tests/dev-basic-bionic-queens
+++ b/tests/gate-basic-bionic-queens
diff --git a/tests/gate-basic-xenial-queens b/tests/gate-basic-xenial-queens
new file mode 100755
index 0000000..f0e8d5e
--- /dev/null
+++ b/tests/gate-basic-xenial-queens
@@ -0,0 +1,25 @@
1#!/usr/bin/env python
2#
3# Copyright 2016 Canonical Ltd
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17"""Amulet tests on a basic cinder backup deployment on xenial-queens."""
18
19from basic_deployment import CinderBackupBasicDeployment
20
21if __name__ == '__main__':
22 deployment = CinderBackupBasicDeployment(series='xenial',
23 openstack='cloud:xenial-queens',
24 source='cloud:xenial-updates/queens')
25 deployment.run_tests()
diff --git a/tox.ini b/tox.ini
index 4319064..09ca045 100644
--- a/tox.ini
+++ b/tox.ini
@@ -60,7 +60,7 @@ basepython = python2.7
60deps = -r{toxinidir}/requirements.txt 60deps = -r{toxinidir}/requirements.txt
61 -r{toxinidir}/test-requirements.txt 61 -r{toxinidir}/test-requirements.txt
62commands = 62commands =
63 bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy 63 bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy
64 64
65[testenv:func27-dfs] 65[testenv:func27-dfs]
66# Charm Functional Test 66# Charm Functional Test