summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJunaid Ali <junaidali@plumgrid.com>2016-04-18 05:43:41 -0700
committerJunaid Ali <junaidali@plumgrid.com>2016-04-18 05:43:41 -0700
commit1dee6f9daa01a6931191020465cc5968ab218fe2 (patch)
tree3a802ca14a699c466e5156e091acbbfb135c73a0
parentc9cefc745c79c975f205712cc9752a98d532615e (diff)
parent364ac8e1a02401c81c64a30150831a337f5f761f (diff)
Liberty changes
-rw-r--r--hooks/charmhelpers/contrib/amulet/deployment.py6
-rw-r--r--hooks/charmhelpers/contrib/amulet/utils.py449
-rw-r--r--hooks/charmhelpers/contrib/charmsupport/nrpe.py66
-rw-r--r--hooks/charmhelpers/contrib/mellanox/__init__.py0
-rw-r--r--hooks/charmhelpers/contrib/mellanox/infiniband.py151
-rw-r--r--hooks/charmhelpers/contrib/network/ip.py56
-rw-r--r--hooks/charmhelpers/contrib/network/ufw.py11
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/deployment.py147
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/utils.py381
-rw-r--r--hooks/charmhelpers/contrib/openstack/context.py283
-rw-r--r--hooks/charmhelpers/contrib/openstack/neutron.py75
-rw-r--r--hooks/charmhelpers/contrib/openstack/templating.py32
-rw-r--r--hooks/charmhelpers/contrib/openstack/utils.py448
-rw-r--r--hooks/charmhelpers/contrib/peerstorage/__init__.py9
-rw-r--r--hooks/charmhelpers/contrib/python/packages.py17
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/ceph.py695
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/loopback.py10
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/utils.py5
-rw-r--r--hooks/charmhelpers/contrib/templating/jinja.py7
-rw-r--r--hooks/charmhelpers/core/hookenv.py200
-rw-r--r--hooks/charmhelpers/core/host.py301
-rw-r--r--hooks/charmhelpers/core/hugepage.py71
-rw-r--r--hooks/charmhelpers/core/kernel.py68
-rw-r--r--hooks/charmhelpers/core/services/helpers.py35
-rw-r--r--hooks/charmhelpers/core/strutils.py30
-rw-r--r--hooks/charmhelpers/core/templating.py29
-rw-r--r--hooks/charmhelpers/core/unitdata.py78
-rw-r--r--hooks/charmhelpers/fetch/__init__.py20
-rw-r--r--hooks/charmhelpers/fetch/archiveurl.py2
-rw-r--r--hooks/charmhelpers/fetch/bzrurl.py54
-rw-r--r--hooks/charmhelpers/fetch/giturl.py41
31 files changed, 3260 insertions, 517 deletions
diff --git a/hooks/charmhelpers/contrib/amulet/deployment.py b/hooks/charmhelpers/contrib/amulet/deployment.py
index 367d6b4..d451698 100644
--- a/hooks/charmhelpers/contrib/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/amulet/deployment.py
@@ -51,7 +51,8 @@ class AmuletDeployment(object):
51 if 'units' not in this_service: 51 if 'units' not in this_service:
52 this_service['units'] = 1 52 this_service['units'] = 1
53 53
54 self.d.add(this_service['name'], units=this_service['units']) 54 self.d.add(this_service['name'], units=this_service['units'],
55 constraints=this_service.get('constraints'))
55 56
56 for svc in other_services: 57 for svc in other_services:
57 if 'location' in svc: 58 if 'location' in svc:
@@ -64,7 +65,8 @@ class AmuletDeployment(object):
64 if 'units' not in svc: 65 if 'units' not in svc:
65 svc['units'] = 1 66 svc['units'] = 1
66 67
67 self.d.add(svc['name'], charm=branch_location, units=svc['units']) 68 self.d.add(svc['name'], charm=branch_location, units=svc['units'],
69 constraints=svc.get('constraints'))
68 70
69 def _add_relations(self, relations): 71 def _add_relations(self, relations):
70 """Add all of the relations for the services.""" 72 """Add all of the relations for the services."""
diff --git a/hooks/charmhelpers/contrib/amulet/utils.py b/hooks/charmhelpers/contrib/amulet/utils.py
index 3de26af..2591a9b 100644
--- a/hooks/charmhelpers/contrib/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/amulet/utils.py
@@ -14,17 +14,25 @@
14# You should have received a copy of the GNU Lesser General Public License 14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. 15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16 16
17import amulet
18import ConfigParser
19import distro_info
20import io 17import io
18import json
21import logging 19import logging
22import os 20import os
23import re 21import re
24import six 22import socket
23import subprocess
25import sys 24import sys
26import time 25import time
27import urlparse 26import uuid
27
28import amulet
29import distro_info
30import six
31from six.moves import configparser
32if six.PY3:
33 from urllib import parse as urlparse
34else:
35 import urlparse
28 36
29 37
30class AmuletUtils(object): 38class AmuletUtils(object):
@@ -108,7 +116,7 @@ class AmuletUtils(object):
108 # /!\ DEPRECATION WARNING (beisner): 116 # /!\ DEPRECATION WARNING (beisner):
109 # New and existing tests should be rewritten to use 117 # New and existing tests should be rewritten to use
110 # validate_services_by_name() as it is aware of init systems. 118 # validate_services_by_name() as it is aware of init systems.
111 self.log.warn('/!\\ DEPRECATION WARNING: use ' 119 self.log.warn('DEPRECATION WARNING: use '
112 'validate_services_by_name instead of validate_services ' 120 'validate_services_by_name instead of validate_services '
113 'due to init system differences.') 121 'due to init system differences.')
114 122
@@ -142,19 +150,23 @@ class AmuletUtils(object):
142 150
143 for service_name in services_list: 151 for service_name in services_list:
144 if (self.ubuntu_releases.index(release) >= systemd_switch or 152 if (self.ubuntu_releases.index(release) >= systemd_switch or
145 service_name == "rabbitmq-server"): 153 service_name in ['rabbitmq-server', 'apache2']):
146 # init is systemd 154 # init is systemd (or regular sysv)
147 cmd = 'sudo service {} status'.format(service_name) 155 cmd = 'sudo service {} status'.format(service_name)
156 output, code = sentry_unit.run(cmd)
157 service_running = code == 0
148 elif self.ubuntu_releases.index(release) < systemd_switch: 158 elif self.ubuntu_releases.index(release) < systemd_switch:
149 # init is upstart 159 # init is upstart
150 cmd = 'sudo status {}'.format(service_name) 160 cmd = 'sudo status {}'.format(service_name)
161 output, code = sentry_unit.run(cmd)
162 service_running = code == 0 and "start/running" in output
151 163
152 output, code = sentry_unit.run(cmd)
153 self.log.debug('{} `{}` returned ' 164 self.log.debug('{} `{}` returned '
154 '{}'.format(sentry_unit.info['unit_name'], 165 '{}'.format(sentry_unit.info['unit_name'],
155 cmd, code)) 166 cmd, code))
156 if code != 0: 167 if not service_running:
157 return "command `{}` returned {}".format(cmd, str(code)) 168 return u"command `{}` returned {} {}".format(
169 cmd, output, str(code))
158 return None 170 return None
159 171
160 def _get_config(self, unit, filename): 172 def _get_config(self, unit, filename):
@@ -164,7 +176,7 @@ class AmuletUtils(object):
164 # NOTE(beisner): by default, ConfigParser does not handle options 176 # NOTE(beisner): by default, ConfigParser does not handle options
165 # with no value, such as the flags used in the mysql my.cnf file. 177 # with no value, such as the flags used in the mysql my.cnf file.
166 # https://bugs.python.org/issue7005 178 # https://bugs.python.org/issue7005
167 config = ConfigParser.ConfigParser(allow_no_value=True) 179 config = configparser.ConfigParser(allow_no_value=True)
168 config.readfp(io.StringIO(file_contents)) 180 config.readfp(io.StringIO(file_contents))
169 return config 181 return config
170 182
@@ -259,33 +271,52 @@ class AmuletUtils(object):
259 """Get last modification time of directory.""" 271 """Get last modification time of directory."""
260 return sentry_unit.directory_stat(directory)['mtime'] 272 return sentry_unit.directory_stat(directory)['mtime']
261 273
262 def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): 274 def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
263 """Get process' start time. 275 """Get start time of a process based on the last modification time
276 of the /proc/pid directory.
264 277
265 Determine start time of the process based on the last modification 278 :sentry_unit: The sentry unit to check for the service on
266 time of the /proc/pid directory. If pgrep_full is True, the process 279 :service: service name to look for in process table
267 name is matched against the full command line. 280 :pgrep_full: [Deprecated] Use full command line search mode with pgrep
268 """ 281 :returns: epoch time of service process start
269 if pgrep_full: 282 :param commands: list of bash commands
270 cmd = 'pgrep -o -f {}'.format(service) 283 :param sentry_units: list of sentry unit pointers
271 else: 284 :returns: None if successful; Failure message otherwise
272 cmd = 'pgrep -o {}'.format(service) 285 """
273 cmd = cmd + ' | grep -v pgrep || exit 0' 286 if pgrep_full is not None:
274 cmd_out = sentry_unit.run(cmd) 287 # /!\ DEPRECATION WARNING (beisner):
275 self.log.debug('CMDout: ' + str(cmd_out)) 288 # No longer implemented, as pidof is now used instead of pgrep.
276 if cmd_out[0]: 289 # https://bugs.launchpad.net/charm-helpers/+bug/1474030
277 self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) 290 self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
278 proc_dir = '/proc/{}'.format(cmd_out[0].strip()) 291 'longer implemented re: lp 1474030.')
279 return self._get_dir_mtime(sentry_unit, proc_dir) 292
293 pid_list = self.get_process_id_list(sentry_unit, service)
294 pid = pid_list[0]
295 proc_dir = '/proc/{}'.format(pid)
296 self.log.debug('Pid for {} on {}: {}'.format(
297 service, sentry_unit.info['unit_name'], pid))
298
299 return self._get_dir_mtime(sentry_unit, proc_dir)
280 300
281 def service_restarted(self, sentry_unit, service, filename, 301 def service_restarted(self, sentry_unit, service, filename,
282 pgrep_full=False, sleep_time=20): 302 pgrep_full=None, sleep_time=20):
283 """Check if service was restarted. 303 """Check if service was restarted.
284 304
285 Compare a service's start time vs a file's last modification time 305 Compare a service's start time vs a file's last modification time
286 (such as a config file for that service) to determine if the service 306 (such as a config file for that service) to determine if the service
287 has been restarted. 307 has been restarted.
288 """ 308 """
309 # /!\ DEPRECATION WARNING (beisner):
310 # This method is prone to races in that no before-time is known.
311 # Use validate_service_config_changed instead.
312
313 # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
314 # used instead of pgrep. pgrep_full is still passed through to ensure
315 # deprecation WARNS. lp1474030
316 self.log.warn('DEPRECATION WARNING: use '
317 'validate_service_config_changed instead of '
318 'service_restarted due to known races.')
319
289 time.sleep(sleep_time) 320 time.sleep(sleep_time)
290 if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= 321 if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
291 self._get_file_mtime(sentry_unit, filename)): 322 self._get_file_mtime(sentry_unit, filename)):
@@ -294,78 +325,122 @@ class AmuletUtils(object):
294 return False 325 return False
295 326
296 def service_restarted_since(self, sentry_unit, mtime, service, 327 def service_restarted_since(self, sentry_unit, mtime, service,
297 pgrep_full=False, sleep_time=20, 328 pgrep_full=None, sleep_time=20,
298 retry_count=2): 329 retry_count=30, retry_sleep_time=10):
299 """Check if service was been started after a given time. 330 """Check if service was been started after a given time.
300 331
301 Args: 332 Args:
302 sentry_unit (sentry): The sentry unit to check for the service on 333 sentry_unit (sentry): The sentry unit to check for the service on
303 mtime (float): The epoch time to check against 334 mtime (float): The epoch time to check against
304 service (string): service name to look for in process table 335 service (string): service name to look for in process table
305 pgrep_full (boolean): Use full command line search mode with pgrep 336 pgrep_full: [Deprecated] Use full command line search mode with pgrep
306 sleep_time (int): Seconds to sleep before looking for process 337 sleep_time (int): Initial sleep time (s) before looking for file
307 retry_count (int): If service is not found, how many times to retry 338 retry_sleep_time (int): Time (s) to sleep between retries
339 retry_count (int): If file is not found, how many times to retry
308 340
309 Returns: 341 Returns:
310 bool: True if service found and its start time it newer than mtime, 342 bool: True if service found and its start time it newer than mtime,
311 False if service is older than mtime or if service was 343 False if service is older than mtime or if service was
312 not found. 344 not found.
313 """ 345 """
314 self.log.debug('Checking %s restarted since %s' % (service, mtime)) 346 # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
347 # used instead of pgrep. pgrep_full is still passed through to ensure
348 # deprecation WARNS. lp1474030
349
350 unit_name = sentry_unit.info['unit_name']
351 self.log.debug('Checking that %s service restarted since %s on '
352 '%s' % (service, mtime, unit_name))
315 time.sleep(sleep_time) 353 time.sleep(sleep_time)
316 proc_start_time = self._get_proc_start_time(sentry_unit, service, 354 proc_start_time = None
317 pgrep_full) 355 tries = 0
318 while retry_count > 0 and not proc_start_time: 356 while tries <= retry_count and not proc_start_time:
319 self.log.debug('No pid file found for service %s, will retry %i ' 357 try:
320 'more times' % (service, retry_count)) 358 proc_start_time = self._get_proc_start_time(sentry_unit,
321 time.sleep(30) 359 service,
322 proc_start_time = self._get_proc_start_time(sentry_unit, service, 360 pgrep_full)
323 pgrep_full) 361 self.log.debug('Attempt {} to get {} proc start time on {} '
324 retry_count = retry_count - 1 362 'OK'.format(tries, service, unit_name))
363 except IOError as e:
364 # NOTE(beisner) - race avoidance, proc may not exist yet.
365 # https://bugs.launchpad.net/charm-helpers/+bug/1474030
366 self.log.debug('Attempt {} to get {} proc start time on {} '
367 'failed\n{}'.format(tries, service,
368 unit_name, e))
369 time.sleep(retry_sleep_time)
370 tries += 1
325 371
326 if not proc_start_time: 372 if not proc_start_time:
327 self.log.warn('No proc start time found, assuming service did ' 373 self.log.warn('No proc start time found, assuming service did '
328 'not start') 374 'not start')
329 return False 375 return False
330 if proc_start_time >= mtime: 376 if proc_start_time >= mtime:
331 self.log.debug('proc start time is newer than provided mtime' 377 self.log.debug('Proc start time is newer than provided mtime'
332 '(%s >= %s)' % (proc_start_time, mtime)) 378 '(%s >= %s) on %s (OK)' % (proc_start_time,
379 mtime, unit_name))
333 return True 380 return True
334 else: 381 else:
335 self.log.warn('proc start time (%s) is older than provided mtime ' 382 self.log.warn('Proc start time (%s) is older than provided mtime '
336 '(%s), service did not restart' % (proc_start_time, 383 '(%s) on %s, service did not '
337 mtime)) 384 'restart' % (proc_start_time, mtime, unit_name))
338 return False 385 return False
339 386
340 def config_updated_since(self, sentry_unit, filename, mtime, 387 def config_updated_since(self, sentry_unit, filename, mtime,
341 sleep_time=20): 388 sleep_time=20, retry_count=30,
389 retry_sleep_time=10):
342 """Check if file was modified after a given time. 390 """Check if file was modified after a given time.
343 391
344 Args: 392 Args:
345 sentry_unit (sentry): The sentry unit to check the file mtime on 393 sentry_unit (sentry): The sentry unit to check the file mtime on
346 filename (string): The file to check mtime of 394 filename (string): The file to check mtime of
347 mtime (float): The epoch time to check against 395 mtime (float): The epoch time to check against
348 sleep_time (int): Seconds to sleep before looking for process 396 sleep_time (int): Initial sleep time (s) before looking for file
397 retry_sleep_time (int): Time (s) to sleep between retries
398 retry_count (int): If file is not found, how many times to retry
349 399
350 Returns: 400 Returns:
351 bool: True if file was modified more recently than mtime, False if 401 bool: True if file was modified more recently than mtime, False if
352 file was modified before mtime, 402 file was modified before mtime, or if file not found.
353 """ 403 """
354 self.log.debug('Checking %s updated since %s' % (filename, mtime)) 404 unit_name = sentry_unit.info['unit_name']
405 self.log.debug('Checking that %s updated since %s on '
406 '%s' % (filename, mtime, unit_name))
355 time.sleep(sleep_time) 407 time.sleep(sleep_time)
356 file_mtime = self._get_file_mtime(sentry_unit, filename) 408 file_mtime = None
409 tries = 0
410 while tries <= retry_count and not file_mtime:
411 try:
412 file_mtime = self._get_file_mtime(sentry_unit, filename)
413 self.log.debug('Attempt {} to get {} file mtime on {} '
414 'OK'.format(tries, filename, unit_name))
415 except IOError as e:
416 # NOTE(beisner) - race avoidance, file may not exist yet.
417 # https://bugs.launchpad.net/charm-helpers/+bug/1474030
418 self.log.debug('Attempt {} to get {} file mtime on {} '
419 'failed\n{}'.format(tries, filename,
420 unit_name, e))
421 time.sleep(retry_sleep_time)
422 tries += 1
423
424 if not file_mtime:
425 self.log.warn('Could not determine file mtime, assuming '
426 'file does not exist')
427 return False
428
357 if file_mtime >= mtime: 429 if file_mtime >= mtime:
358 self.log.debug('File mtime is newer than provided mtime ' 430 self.log.debug('File mtime is newer than provided mtime '
359 '(%s >= %s)' % (file_mtime, mtime)) 431 '(%s >= %s) on %s (OK)' % (file_mtime,
432 mtime, unit_name))
360 return True 433 return True
361 else: 434 else:
362 self.log.warn('File mtime %s is older than provided mtime %s' 435 self.log.warn('File mtime is older than provided mtime'
363 % (file_mtime, mtime)) 436 '(%s < on %s) on %s' % (file_mtime,
437 mtime, unit_name))
364 return False 438 return False
365 439
366 def validate_service_config_changed(self, sentry_unit, mtime, service, 440 def validate_service_config_changed(self, sentry_unit, mtime, service,
367 filename, pgrep_full=False, 441 filename, pgrep_full=None,
368 sleep_time=20, retry_count=2): 442 sleep_time=20, retry_count=30,
443 retry_sleep_time=10):
369 """Check service and file were updated after mtime 444 """Check service and file were updated after mtime
370 445
371 Args: 446 Args:
@@ -373,9 +448,10 @@ class AmuletUtils(object):
373 mtime (float): The epoch time to check against 448 mtime (float): The epoch time to check against
374 service (string): service name to look for in process table 449 service (string): service name to look for in process table
375 filename (string): The file to check mtime of 450 filename (string): The file to check mtime of
376 pgrep_full (boolean): Use full command line search mode with pgrep 451 pgrep_full: [Deprecated] Use full command line search mode with pgrep
377 sleep_time (int): Seconds to sleep before looking for process 452 sleep_time (int): Initial sleep in seconds to pass to test helpers
378 retry_count (int): If service is not found, how many times to retry 453 retry_count (int): If service is not found, how many times to retry
454 retry_sleep_time (int): Time in seconds to wait between retries
379 455
380 Typical Usage: 456 Typical Usage:
381 u = OpenStackAmuletUtils(ERROR) 457 u = OpenStackAmuletUtils(ERROR)
@@ -392,15 +468,27 @@ class AmuletUtils(object):
392 mtime, False if service is older than mtime or if service was 468 mtime, False if service is older than mtime or if service was
393 not found or if filename was modified before mtime. 469 not found or if filename was modified before mtime.
394 """ 470 """
395 self.log.debug('Checking %s restarted since %s' % (service, mtime)) 471
396 time.sleep(sleep_time) 472 # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
397 service_restart = self.service_restarted_since(sentry_unit, mtime, 473 # used instead of pgrep. pgrep_full is still passed through to ensure
398 service, 474 # deprecation WARNS. lp1474030
399 pgrep_full=pgrep_full, 475
400 sleep_time=0, 476 service_restart = self.service_restarted_since(
401 retry_count=retry_count) 477 sentry_unit, mtime,
402 config_update = self.config_updated_since(sentry_unit, filename, mtime, 478 service,
403 sleep_time=0) 479 pgrep_full=pgrep_full,
480 sleep_time=sleep_time,
481 retry_count=retry_count,
482 retry_sleep_time=retry_sleep_time)
483
484 config_update = self.config_updated_since(
485 sentry_unit,
486 filename,
487 mtime,
488 sleep_time=sleep_time,
489 retry_count=retry_count,
490 retry_sleep_time=retry_sleep_time)
491
404 return service_restart and config_update 492 return service_restart and config_update
405 493
406 def get_sentry_time(self, sentry_unit): 494 def get_sentry_time(self, sentry_unit):
@@ -418,7 +506,6 @@ class AmuletUtils(object):
418 """Return a list of all Ubuntu releases in order of release.""" 506 """Return a list of all Ubuntu releases in order of release."""
419 _d = distro_info.UbuntuDistroInfo() 507 _d = distro_info.UbuntuDistroInfo()
420 _release_list = _d.all 508 _release_list = _d.all
421 self.log.debug('Ubuntu release list: {}'.format(_release_list))
422 return _release_list 509 return _release_list
423 510
424 def file_to_url(self, file_rel_path): 511 def file_to_url(self, file_rel_path):
@@ -450,15 +537,20 @@ class AmuletUtils(object):
450 cmd, code, output)) 537 cmd, code, output))
451 return None 538 return None
452 539
453 def get_process_id_list(self, sentry_unit, process_name): 540 def get_process_id_list(self, sentry_unit, process_name,
541 expect_success=True):
454 """Get a list of process ID(s) from a single sentry juju unit 542 """Get a list of process ID(s) from a single sentry juju unit
455 for a single process name. 543 for a single process name.
456 544
457 :param sentry_unit: Pointer to amulet sentry instance (juju unit) 545 :param sentry_unit: Amulet sentry instance (juju unit)
458 :param process_name: Process name 546 :param process_name: Process name
547 :param expect_success: If False, expect the PID to be missing,
548 raise if it is present.
459 :returns: List of process IDs 549 :returns: List of process IDs
460 """ 550 """
461 cmd = 'pidof {}'.format(process_name) 551 cmd = 'pidof -x {}'.format(process_name)
552 if not expect_success:
553 cmd += " || exit 0 && exit 1"
462 output, code = sentry_unit.run(cmd) 554 output, code = sentry_unit.run(cmd)
463 if code != 0: 555 if code != 0:
464 msg = ('{} `{}` returned {} ' 556 msg = ('{} `{}` returned {} '
@@ -467,14 +559,23 @@ class AmuletUtils(object):
467 amulet.raise_status(amulet.FAIL, msg=msg) 559 amulet.raise_status(amulet.FAIL, msg=msg)
468 return str(output).split() 560 return str(output).split()
469 561
470 def get_unit_process_ids(self, unit_processes): 562 def get_unit_process_ids(self, unit_processes, expect_success=True):
471 """Construct a dict containing unit sentries, process names, and 563 """Construct a dict containing unit sentries, process names, and
472 process IDs.""" 564 process IDs.
565
566 :param unit_processes: A dictionary of Amulet sentry instance
567 to list of process names.
568 :param expect_success: if False expect the processes to not be
569 running, raise if they are.
570 :returns: Dictionary of Amulet sentry instance to dictionary
571 of process names to PIDs.
572 """
473 pid_dict = {} 573 pid_dict = {}
474 for sentry_unit, process_list in unit_processes.iteritems(): 574 for sentry_unit, process_list in six.iteritems(unit_processes):
475 pid_dict[sentry_unit] = {} 575 pid_dict[sentry_unit] = {}
476 for process in process_list: 576 for process in process_list:
477 pids = self.get_process_id_list(sentry_unit, process) 577 pids = self.get_process_id_list(
578 sentry_unit, process, expect_success=expect_success)
478 pid_dict[sentry_unit].update({process: pids}) 579 pid_dict[sentry_unit].update({process: pids})
479 return pid_dict 580 return pid_dict
480 581
@@ -488,7 +589,7 @@ class AmuletUtils(object):
488 return ('Unit count mismatch. expected, actual: {}, ' 589 return ('Unit count mismatch. expected, actual: {}, '
489 '{} '.format(len(expected), len(actual))) 590 '{} '.format(len(expected), len(actual)))
490 591
491 for (e_sentry, e_proc_names) in expected.iteritems(): 592 for (e_sentry, e_proc_names) in six.iteritems(expected):
492 e_sentry_name = e_sentry.info['unit_name'] 593 e_sentry_name = e_sentry.info['unit_name']
493 if e_sentry in actual.keys(): 594 if e_sentry in actual.keys():
494 a_proc_names = actual[e_sentry] 595 a_proc_names = actual[e_sentry]
@@ -507,11 +608,23 @@ class AmuletUtils(object):
507 '{}'.format(e_proc_name, a_proc_name)) 608 '{}'.format(e_proc_name, a_proc_name))
508 609
509 a_pids_length = len(a_pids) 610 a_pids_length = len(a_pids)
510 if e_pids_length != a_pids_length: 611 fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
511 return ('PID count mismatch. {} ({}) expected, actual: '
512 '{}, {} ({})'.format(e_sentry_name, e_proc_name, 612 '{}, {} ({})'.format(e_sentry_name, e_proc_name,
513 e_pids_length, a_pids_length, 613 e_pids_length, a_pids_length,
514 a_pids)) 614 a_pids))
615
616 # If expected is not bool, ensure PID quantities match
617 if not isinstance(e_pids_length, bool) and \
618 a_pids_length != e_pids_length:
619 return fail_msg
620 # If expected is bool True, ensure 1 or more PIDs exist
621 elif isinstance(e_pids_length, bool) and \
622 e_pids_length is True and a_pids_length < 1:
623 return fail_msg
624 # If expected is bool False, ensure 0 PIDs exist
625 elif isinstance(e_pids_length, bool) and \
626 e_pids_length is False and a_pids_length != 0:
627 return fail_msg
515 else: 628 else:
516 self.log.debug('PID check OK: {} {} {}: ' 629 self.log.debug('PID check OK: {} {} {}: '
517 '{}'.format(e_sentry_name, e_proc_name, 630 '{}'.format(e_sentry_name, e_proc_name,
@@ -531,3 +644,175 @@ class AmuletUtils(object):
531 return 'Dicts within list are not identical' 644 return 'Dicts within list are not identical'
532 645
533 return None 646 return None
647
648 def validate_sectionless_conf(self, file_contents, expected):
649 """A crude conf parser. Useful to inspect configuration files which
650 do not have section headers (as would be necessary in order to use
651 the configparser). Such as openstack-dashboard or rabbitmq confs."""
652 for line in file_contents.split('\n'):
653 if '=' in line:
654 args = line.split('=')
655 if len(args) <= 1:
656 continue
657 key = args[0].strip()
658 value = args[1].strip()
659 if key in expected.keys():
660 if expected[key] != value:
661 msg = ('Config mismatch. Expected, actual: {}, '
662 '{}'.format(expected[key], value))
663 amulet.raise_status(amulet.FAIL, msg=msg)
664
665 def get_unit_hostnames(self, units):
666 """Return a dict of juju unit names to hostnames."""
667 host_names = {}
668 for unit in units:
669 host_names[unit.info['unit_name']] = \
670 str(unit.file_contents('/etc/hostname').strip())
671 self.log.debug('Unit host names: {}'.format(host_names))
672 return host_names
673
674 def run_cmd_unit(self, sentry_unit, cmd):
675 """Run a command on a unit, return the output and exit code."""
676 output, code = sentry_unit.run(cmd)
677 if code == 0:
678 self.log.debug('{} `{}` command returned {} '
679 '(OK)'.format(sentry_unit.info['unit_name'],
680 cmd, code))
681 else:
682 msg = ('{} `{}` command returned {} '
683 '{}'.format(sentry_unit.info['unit_name'],
684 cmd, code, output))
685 amulet.raise_status(amulet.FAIL, msg=msg)
686 return str(output), code
687
688 def file_exists_on_unit(self, sentry_unit, file_name):
689 """Check if a file exists on a unit."""
690 try:
691 sentry_unit.file_stat(file_name)
692 return True
693 except IOError:
694 return False
695 except Exception as e:
696 msg = 'Error checking file {}: {}'.format(file_name, e)
697 amulet.raise_status(amulet.FAIL, msg=msg)
698
699 def file_contents_safe(self, sentry_unit, file_name,
700 max_wait=60, fatal=False):
701 """Get file contents from a sentry unit. Wrap amulet file_contents
702 with retry logic to address races where a file checks as existing,
703 but no longer exists by the time file_contents is called.
704 Return None if file not found. Optionally raise if fatal is True."""
705 unit_name = sentry_unit.info['unit_name']
706 file_contents = False
707 tries = 0
708 while not file_contents and tries < (max_wait / 4):
709 try:
710 file_contents = sentry_unit.file_contents(file_name)
711 except IOError:
712 self.log.debug('Attempt {} to open file {} from {} '
713 'failed'.format(tries, file_name,
714 unit_name))
715 time.sleep(4)
716 tries += 1
717
718 if file_contents:
719 return file_contents
720 elif not fatal:
721 return None
722 elif fatal:
723 msg = 'Failed to get file contents from unit.'
724 amulet.raise_status(amulet.FAIL, msg)
725
726 def port_knock_tcp(self, host="localhost", port=22, timeout=15):
727 """Open a TCP socket to check for a listening sevice on a host.
728
729 :param host: host name or IP address, default to localhost
730 :param port: TCP port number, default to 22
731 :param timeout: Connect timeout, default to 15 seconds
732 :returns: True if successful, False if connect failed
733 """
734
735 # Resolve host name if possible
736 try:
737 connect_host = socket.gethostbyname(host)
738 host_human = "{} ({})".format(connect_host, host)
739 except socket.error as e:
740 self.log.warn('Unable to resolve address: '
741 '{} ({}) Trying anyway!'.format(host, e))
742 connect_host = host
743 host_human = connect_host
744
745 # Attempt socket connection
746 try:
747 knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
748 knock.settimeout(timeout)
749 knock.connect((connect_host, port))
750 knock.close()
751 self.log.debug('Socket connect OK for host '
752 '{} on port {}.'.format(host_human, port))
753 return True
754 except socket.error as e:
755 self.log.debug('Socket connect FAIL for'
756 ' {} port {} ({})'.format(host_human, port, e))
757 return False
758
759 def port_knock_units(self, sentry_units, port=22,
760 timeout=15, expect_success=True):
761 """Open a TCP socket to check for a listening sevice on each
762 listed juju unit.
763
764 :param sentry_units: list of sentry unit pointers
765 :param port: TCP port number, default to 22
766 :param timeout: Connect timeout, default to 15 seconds
767 :expect_success: True by default, set False to invert logic
768 :returns: None if successful, Failure message otherwise
769 """
770 for unit in sentry_units:
771 host = unit.info['public-address']
772 connected = self.port_knock_tcp(host, port, timeout)
773 if not connected and expect_success:
774 return 'Socket connect failed.'
775 elif connected and not expect_success:
776 return 'Socket connected unexpectedly.'
777
778 def get_uuid_epoch_stamp(self):
779 """Returns a stamp string based on uuid4 and epoch time. Useful in
780 generating test messages which need to be unique-ish."""
781 return '[{}-{}]'.format(uuid.uuid4(), time.time())
782
783# amulet juju action helpers:
784 def run_action(self, unit_sentry, action,
785 _check_output=subprocess.check_output):
786 """Run the named action on a given unit sentry.
787
788 _check_output parameter is used for dependency injection.
789
790 @return action_id.
791 """
792 unit_id = unit_sentry.info["unit_name"]
793 command = ["juju", "action", "do", "--format=json", unit_id, action]
794 self.log.info("Running command: %s\n" % " ".join(command))
795 output = _check_output(command, universal_newlines=True)
796 data = json.loads(output)
797 action_id = data[u'Action queued with id']
798 return action_id
799
800 def wait_on_action(self, action_id, _check_output=subprocess.check_output):
801 """Wait for a given action, returning if it completed or not.
802
803 _check_output parameter is used for dependency injection.
804 """
805 command = ["juju", "action", "fetch", "--format=json", "--wait=0",
806 action_id]
807 output = _check_output(command, universal_newlines=True)
808 data = json.loads(output)
809 return data.get(u"status") == "completed"
810
811 def status_get(self, unit):
812 """Return the current service status of this unit."""
813 raw_status, return_code = unit.run(
814 "status-get --format=json --include-data")
815 if return_code != 0:
816 return ("unknown", "")
817 status = json.loads(raw_status)
818 return (status["status"], status["message"])
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
index 95a79c2..2f24642 100644
--- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
@@ -148,6 +148,13 @@ define service {{
148 self.description = description 148 self.description = description
149 self.check_cmd = self._locate_cmd(check_cmd) 149 self.check_cmd = self._locate_cmd(check_cmd)
150 150
151 def _get_check_filename(self):
152 return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
153
154 def _get_service_filename(self, hostname):
155 return os.path.join(NRPE.nagios_exportdir,
156 'service__{}_{}.cfg'.format(hostname, self.command))
157
151 def _locate_cmd(self, check_cmd): 158 def _locate_cmd(self, check_cmd):
152 search_path = ( 159 search_path = (
153 '/usr/lib/nagios/plugins', 160 '/usr/lib/nagios/plugins',
@@ -163,9 +170,21 @@ define service {{
163 log('Check command not found: {}'.format(parts[0])) 170 log('Check command not found: {}'.format(parts[0]))
164 return '' 171 return ''
165 172
173 def _remove_service_files(self):
174 if not os.path.exists(NRPE.nagios_exportdir):
175 return
176 for f in os.listdir(NRPE.nagios_exportdir):
177 if f.endswith('_{}.cfg'.format(self.command)):
178 os.remove(os.path.join(NRPE.nagios_exportdir, f))
179
180 def remove(self, hostname):
181 nrpe_check_file = self._get_check_filename()
182 if os.path.exists(nrpe_check_file):
183 os.remove(nrpe_check_file)
184 self._remove_service_files()
185
166 def write(self, nagios_context, hostname, nagios_servicegroups): 186 def write(self, nagios_context, hostname, nagios_servicegroups):
167 nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( 187 nrpe_check_file = self._get_check_filename()
168 self.command)
169 with open(nrpe_check_file, 'w') as nrpe_check_config: 188 with open(nrpe_check_file, 'w') as nrpe_check_config:
170 nrpe_check_config.write("# check {}\n".format(self.shortname)) 189 nrpe_check_config.write("# check {}\n".format(self.shortname))
171 nrpe_check_config.write("command[{}]={}\n".format( 190 nrpe_check_config.write("command[{}]={}\n".format(
@@ -180,9 +199,7 @@ define service {{
180 199
181 def write_service_config(self, nagios_context, hostname, 200 def write_service_config(self, nagios_context, hostname,
182 nagios_servicegroups): 201 nagios_servicegroups):
183 for f in os.listdir(NRPE.nagios_exportdir): 202 self._remove_service_files()
184 if re.search('.*{}.cfg'.format(self.command), f):
185 os.remove(os.path.join(NRPE.nagios_exportdir, f))
186 203
187 templ_vars = { 204 templ_vars = {
188 'nagios_hostname': hostname, 205 'nagios_hostname': hostname,
@@ -192,8 +209,7 @@ define service {{
192 'command': self.command, 209 'command': self.command,
193 } 210 }
194 nrpe_service_text = Check.service_template.format(**templ_vars) 211 nrpe_service_text = Check.service_template.format(**templ_vars)
195 nrpe_service_file = '{}/service__{}_{}.cfg'.format( 212 nrpe_service_file = self._get_service_filename(hostname)
196 NRPE.nagios_exportdir, hostname, self.command)
197 with open(nrpe_service_file, 'w') as nrpe_service_config: 213 with open(nrpe_service_file, 'w') as nrpe_service_config:
198 nrpe_service_config.write(str(nrpe_service_text)) 214 nrpe_service_config.write(str(nrpe_service_text))
199 215
@@ -218,12 +234,32 @@ class NRPE(object):
218 if hostname: 234 if hostname:
219 self.hostname = hostname 235 self.hostname = hostname
220 else: 236 else:
221 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) 237 nagios_hostname = get_nagios_hostname()
238 if nagios_hostname:
239 self.hostname = nagios_hostname
240 else:
241 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
222 self.checks = [] 242 self.checks = []
223 243
224 def add_check(self, *args, **kwargs): 244 def add_check(self, *args, **kwargs):
225 self.checks.append(Check(*args, **kwargs)) 245 self.checks.append(Check(*args, **kwargs))
226 246
247 def remove_check(self, *args, **kwargs):
248 if kwargs.get('shortname') is None:
249 raise ValueError('shortname of check must be specified')
250
251 # Use sensible defaults if they're not specified - these are not
252 # actually used during removal, but they're required for constructing
253 # the Check object; check_disk is chosen because it's part of the
254 # nagios-plugins-basic package.
255 if kwargs.get('check_cmd') is None:
256 kwargs['check_cmd'] = 'check_disk'
257 if kwargs.get('description') is None:
258 kwargs['description'] = ''
259
260 check = Check(*args, **kwargs)
261 check.remove(self.hostname)
262
227 def write(self): 263 def write(self):
228 try: 264 try:
229 nagios_uid = pwd.getpwnam('nagios').pw_uid 265 nagios_uid = pwd.getpwnam('nagios').pw_uid
@@ -260,7 +296,7 @@ def get_nagios_hostcontext(relation_name='nrpe-external-master'):
260 :param str relation_name: Name of relation nrpe sub joined to 296 :param str relation_name: Name of relation nrpe sub joined to
261 """ 297 """
262 for rel in relations_of_type(relation_name): 298 for rel in relations_of_type(relation_name):
263 if 'nagios_hostname' in rel: 299 if 'nagios_host_context' in rel:
264 return rel['nagios_host_context'] 300 return rel['nagios_host_context']
265 301
266 302
@@ -301,11 +337,13 @@ def add_init_service_checks(nrpe, services, unit_name):
301 upstart_init = '/etc/init/%s.conf' % svc 337 upstart_init = '/etc/init/%s.conf' % svc
302 sysv_init = '/etc/init.d/%s' % svc 338 sysv_init = '/etc/init.d/%s' % svc
303 if os.path.exists(upstart_init): 339 if os.path.exists(upstart_init):
304 nrpe.add_check( 340 # Don't add a check for these services from neutron-gateway
305 shortname=svc, 341 if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
306 description='process check {%s}' % unit_name, 342 nrpe.add_check(
307 check_cmd='check_upstart_job %s' % svc 343 shortname=svc,
308 ) 344 description='process check {%s}' % unit_name,
345 check_cmd='check_upstart_job %s' % svc
346 )
309 elif os.path.exists(sysv_init): 347 elif os.path.exists(sysv_init):
310 cronpath = '/etc/cron.d/nagios-service-check-%s' % svc 348 cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
311 cron_file = ('*/5 * * * * root ' 349 cron_file = ('*/5 * * * * root '
diff --git a/hooks/charmhelpers/contrib/mellanox/__init__.py b/hooks/charmhelpers/contrib/mellanox/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/hooks/charmhelpers/contrib/mellanox/__init__.py
diff --git a/hooks/charmhelpers/contrib/mellanox/infiniband.py b/hooks/charmhelpers/contrib/mellanox/infiniband.py
new file mode 100644
index 0000000..8ff2f71
--- /dev/null
+++ b/hooks/charmhelpers/contrib/mellanox/infiniband.py
@@ -0,0 +1,151 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4# Copyright 2014-2015 Canonical Limited.
5#
6# This file is part of charm-helpers.
7#
8# charm-helpers is free software: you can redistribute it and/or modify
9# it under the terms of the GNU Lesser General Public License version 3 as
10# published by the Free Software Foundation.
11#
12# charm-helpers is distributed in the hope that it will be useful,
13# but WITHOUT ANY WARRANTY; without even the implied warranty of
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15# GNU Lesser General Public License for more details.
16#
17# You should have received a copy of the GNU Lesser General Public License
18# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
19
20
21__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
22
23from charmhelpers.fetch import (
24 apt_install,
25 apt_update,
26)
27
28from charmhelpers.core.hookenv import (
29 log,
30 INFO,
31)
32
33try:
34 from netifaces import interfaces as network_interfaces
35except ImportError:
36 apt_install('python-netifaces')
37 from netifaces import interfaces as network_interfaces
38
39import os
40import re
41import subprocess
42
43from charmhelpers.core.kernel import modprobe
44
45REQUIRED_MODULES = (
46 "mlx4_ib",
47 "mlx4_en",
48 "mlx4_core",
49 "ib_ipath",
50 "ib_mthca",
51 "ib_srpt",
52 "ib_srp",
53 "ib_ucm",
54 "ib_isert",
55 "ib_iser",
56 "ib_ipoib",
57 "ib_cm",
58 "ib_uverbs"
59 "ib_umad",
60 "ib_sa",
61 "ib_mad",
62 "ib_core",
63 "ib_addr",
64 "rdma_ucm",
65)
66
67REQUIRED_PACKAGES = (
68 "ibutils",
69 "infiniband-diags",
70 "ibverbs-utils",
71)
72
73IPOIB_DRIVERS = (
74 "ib_ipoib",
75)
76
77ABI_VERSION_FILE = "/sys/class/infiniband_mad/abi_version"
78
79
80class DeviceInfo(object):
81 pass
82
83
84def install_packages():
85 apt_update()
86 apt_install(REQUIRED_PACKAGES, fatal=True)
87
88
89def load_modules():
90 for module in REQUIRED_MODULES:
91 modprobe(module, persist=True)
92
93
94def is_enabled():
95 """Check if infiniband is loaded on the system"""
96 return os.path.exists(ABI_VERSION_FILE)
97
98
99def stat():
100 """Return full output of ibstat"""
101 return subprocess.check_output(["ibstat"])
102
103
104def devices():
105 """Returns a list of IB enabled devices"""
106 return subprocess.check_output(['ibstat', '-l']).splitlines()
107
108
109def device_info(device):
110 """Returns a DeviceInfo object with the current device settings"""
111
112 status = subprocess.check_output([
113 'ibstat', device, '-s']).splitlines()
114
115 regexes = {
116 "CA type: (.*)": "device_type",
117 "Number of ports: (.*)": "num_ports",
118 "Firmware version: (.*)": "fw_ver",
119 "Hardware version: (.*)": "hw_ver",
120 "Node GUID: (.*)": "node_guid",
121 "System image GUID: (.*)": "sys_guid",
122 }
123
124 device = DeviceInfo()
125
126 for line in status:
127 for expression, key in regexes.items():
128 matches = re.search(expression, line)
129 if matches:
130 setattr(device, key, matches.group(1))
131
132 return device
133
134
135def ipoib_interfaces():
136 """Return a list of IPOIB capable ethernet interfaces"""
137 interfaces = []
138
139 for interface in network_interfaces():
140 try:
141 driver = re.search('^driver: (.+)$', subprocess.check_output([
142 'ethtool', '-i',
143 interface]), re.M).group(1)
144
145 if driver in IPOIB_DRIVERS:
146 interfaces.append(interface)
147 except:
148 log("Skipping interface %s" % interface, level=INFO)
149 continue
150
151 return interfaces
diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py
index fff6d5c..998f00c 100644
--- a/hooks/charmhelpers/contrib/network/ip.py
+++ b/hooks/charmhelpers/contrib/network/ip.py
@@ -23,7 +23,7 @@ import socket
23from functools import partial 23from functools import partial
24 24
25from charmhelpers.core.hookenv import unit_get 25from charmhelpers.core.hookenv import unit_get
26from charmhelpers.fetch import apt_install 26from charmhelpers.fetch import apt_install, apt_update
27from charmhelpers.core.hookenv import ( 27from charmhelpers.core.hookenv import (
28 log, 28 log,
29 WARNING, 29 WARNING,
@@ -32,13 +32,15 @@ from charmhelpers.core.hookenv import (
32try: 32try:
33 import netifaces 33 import netifaces
34except ImportError: 34except ImportError:
35 apt_install('python-netifaces') 35 apt_update(fatal=True)
36 apt_install('python-netifaces', fatal=True)
36 import netifaces 37 import netifaces
37 38
38try: 39try:
39 import netaddr 40 import netaddr
40except ImportError: 41except ImportError:
41 apt_install('python-netaddr') 42 apt_update(fatal=True)
43 apt_install('python-netaddr', fatal=True)
42 import netaddr 44 import netaddr
43 45
44 46
@@ -51,7 +53,7 @@ def _validate_cidr(network):
51 53
52 54
53def no_ip_found_error_out(network): 55def no_ip_found_error_out(network):
54 errmsg = ("No IP address found in network: %s" % network) 56 errmsg = ("No IP address found in network(s): %s" % network)
55 raise ValueError(errmsg) 57 raise ValueError(errmsg)
56 58
57 59
@@ -59,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
59 """Get an IPv4 or IPv6 address within the network from the host. 61 """Get an IPv4 or IPv6 address within the network from the host.
60 62
61 :param network (str): CIDR presentation format. For example, 63 :param network (str): CIDR presentation format. For example,
62 '192.168.1.0/24'. 64 '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
63 :param fallback (str): If no address is found, return fallback. 65 :param fallback (str): If no address is found, return fallback.
64 :param fatal (boolean): If no address is found, fallback is not 66 :param fatal (boolean): If no address is found, fallback is not
65 set and fatal is True then exit(1). 67 set and fatal is True then exit(1).
@@ -73,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False):
73 else: 75 else:
74 return None 76 return None
75 77
76 _validate_cidr(network) 78 networks = network.split() or [network]
77 network = netaddr.IPNetwork(network) 79 for network in networks:
78 for iface in netifaces.interfaces(): 80 _validate_cidr(network)
79 addresses = netifaces.ifaddresses(iface) 81 network = netaddr.IPNetwork(network)
80 if network.version == 4 and netifaces.AF_INET in addresses: 82 for iface in netifaces.interfaces():
81 addr = addresses[netifaces.AF_INET][0]['addr'] 83 addresses = netifaces.ifaddresses(iface)
82 netmask = addresses[netifaces.AF_INET][0]['netmask'] 84 if network.version == 4 and netifaces.AF_INET in addresses:
83 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) 85 addr = addresses[netifaces.AF_INET][0]['addr']
84 if cidr in network: 86 netmask = addresses[netifaces.AF_INET][0]['netmask']
85 return str(cidr.ip) 87 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
86 88 if cidr in network:
87 if network.version == 6 and netifaces.AF_INET6 in addresses: 89 return str(cidr.ip)
88 for addr in addresses[netifaces.AF_INET6]: 90
89 if not addr['addr'].startswith('fe80'): 91 if network.version == 6 and netifaces.AF_INET6 in addresses:
90 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], 92 for addr in addresses[netifaces.AF_INET6]:
91 addr['netmask'])) 93 if not addr['addr'].startswith('fe80'):
92 if cidr in network: 94 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
93 return str(cidr.ip) 95 addr['netmask']))
96 if cidr in network:
97 return str(cidr.ip)
94 98
95 if fallback is not None: 99 if fallback is not None:
96 return fallback 100 return fallback
@@ -435,8 +439,12 @@ def get_hostname(address, fqdn=True):
435 439
436 rev = dns.reversename.from_address(address) 440 rev = dns.reversename.from_address(address)
437 result = ns_query(rev) 441 result = ns_query(rev)
442
438 if not result: 443 if not result:
439 return None 444 try:
445 result = socket.gethostbyaddr(address)[0]
446 except:
447 return None
440 else: 448 else:
441 result = address 449 result = address
442 450
diff --git a/hooks/charmhelpers/contrib/network/ufw.py b/hooks/charmhelpers/contrib/network/ufw.py
index d40110d..b65d963 100644
--- a/hooks/charmhelpers/contrib/network/ufw.py
+++ b/hooks/charmhelpers/contrib/network/ufw.py
@@ -40,7 +40,9 @@ Examples:
40import re 40import re
41import os 41import os
42import subprocess 42import subprocess
43
43from charmhelpers.core import hookenv 44from charmhelpers.core import hookenv
45from charmhelpers.core.kernel import modprobe, is_module_loaded
44 46
45__author__ = "Felipe Reyes <felipe.reyes@canonical.com>" 47__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
46 48
@@ -82,14 +84,11 @@ def is_ipv6_ok(soft_fail=False):
82 # do we have IPv6 in the machine? 84 # do we have IPv6 in the machine?
83 if os.path.isdir('/proc/sys/net/ipv6'): 85 if os.path.isdir('/proc/sys/net/ipv6'):
84 # is ip6tables kernel module loaded? 86 # is ip6tables kernel module loaded?
85 lsmod = subprocess.check_output(['lsmod'], universal_newlines=True) 87 if not is_module_loaded('ip6_tables'):
86 matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
87 if len(matches) == 0:
88 # ip6tables support isn't complete, let's try to load it 88 # ip6tables support isn't complete, let's try to load it
89 try: 89 try:
90 subprocess.check_output(['modprobe', 'ip6_tables'], 90 modprobe('ip6_tables')
91 universal_newlines=True) 91 # great, we can load the module
92 # great, we could load the module
93 return True 92 return True
94 except subprocess.CalledProcessError as ex: 93 except subprocess.CalledProcessError as ex:
95 hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, 94 hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
index b01e6cb..d2ede32 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -14,12 +14,18 @@
14# You should have received a copy of the GNU Lesser General Public License 14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. 15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16 16
17import logging
18import re
19import sys
17import six 20import six
18from collections import OrderedDict 21from collections import OrderedDict
19from charmhelpers.contrib.amulet.deployment import ( 22from charmhelpers.contrib.amulet.deployment import (
20 AmuletDeployment 23 AmuletDeployment
21) 24)
22 25
26DEBUG = logging.DEBUG
27ERROR = logging.ERROR
28
23 29
24class OpenStackAmuletDeployment(AmuletDeployment): 30class OpenStackAmuletDeployment(AmuletDeployment):
25 """OpenStack amulet deployment. 31 """OpenStack amulet deployment.
@@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
28 that is specifically for use by OpenStack charms. 34 that is specifically for use by OpenStack charms.
29 """ 35 """
30 36
31 def __init__(self, series=None, openstack=None, source=None, stable=True): 37 def __init__(self, series=None, openstack=None, source=None,
38 stable=True, log_level=DEBUG):
32 """Initialize the deployment environment.""" 39 """Initialize the deployment environment."""
33 super(OpenStackAmuletDeployment, self).__init__(series) 40 super(OpenStackAmuletDeployment, self).__init__(series)
41 self.log = self.get_logger(level=log_level)
42 self.log.info('OpenStackAmuletDeployment: init')
34 self.openstack = openstack 43 self.openstack = openstack
35 self.source = source 44 self.source = source
36 self.stable = stable 45 self.stable = stable
@@ -38,26 +47,55 @@ class OpenStackAmuletDeployment(AmuletDeployment):
38 # out. 47 # out.
39 self.current_next = "trusty" 48 self.current_next = "trusty"
40 49
50 def get_logger(self, name="deployment-logger", level=logging.DEBUG):
51 """Get a logger object that will log to stdout."""
52 log = logging
53 logger = log.getLogger(name)
54 fmt = log.Formatter("%(asctime)s %(funcName)s "
55 "%(levelname)s: %(message)s")
56
57 handler = log.StreamHandler(stream=sys.stdout)
58 handler.setLevel(level)
59 handler.setFormatter(fmt)
60
61 logger.addHandler(handler)
62 logger.setLevel(level)
63
64 return logger
65
41 def _determine_branch_locations(self, other_services): 66 def _determine_branch_locations(self, other_services):
42 """Determine the branch locations for the other services. 67 """Determine the branch locations for the other services.
43 68
44 Determine if the local branch being tested is derived from its 69 Determine if the local branch being tested is derived from its
45 stable or next (dev) branch, and based on this, use the corresonding 70 stable or next (dev) branch, and based on this, use the corresonding
46 stable or next branches for the other_services.""" 71 stable or next branches for the other_services."""
47 base_charms = ['mysql', 'mongodb'] 72
73 self.log.info('OpenStackAmuletDeployment: determine branch locations')
74
75 # Charms outside the lp:~openstack-charmers namespace
76 base_charms = ['mysql', 'mongodb', 'nrpe']
77
78 # Force these charms to current series even when using an older series.
79 # ie. Use trusty/nrpe even when series is precise, as the P charm
80 # does not possess the necessary external master config and hooks.
81 force_series_current = ['nrpe']
48 82
49 if self.series in ['precise', 'trusty']: 83 if self.series in ['precise', 'trusty']:
50 base_series = self.series 84 base_series = self.series
51 else: 85 else:
52 base_series = self.current_next 86 base_series = self.current_next
53 87
54 if self.stable: 88 for svc in other_services:
55 for svc in other_services: 89 if svc['name'] in force_series_current:
90 base_series = self.current_next
91 # If a location has been explicitly set, use it
92 if svc.get('location'):
93 continue
94 if self.stable:
56 temp = 'lp:charms/{}/{}' 95 temp = 'lp:charms/{}/{}'
57 svc['location'] = temp.format(base_series, 96 svc['location'] = temp.format(base_series,
58 svc['name']) 97 svc['name'])
59 else: 98 else:
60 for svc in other_services:
61 if svc['name'] in base_charms: 99 if svc['name'] in base_charms:
62 temp = 'lp:charms/{}/{}' 100 temp = 'lp:charms/{}/{}'
63 svc['location'] = temp.format(base_series, 101 svc['location'] = temp.format(base_series,
@@ -66,10 +104,13 @@ class OpenStackAmuletDeployment(AmuletDeployment):
66 temp = 'lp:~openstack-charmers/charms/{}/{}/next' 104 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
67 svc['location'] = temp.format(self.current_next, 105 svc['location'] = temp.format(self.current_next,
68 svc['name']) 106 svc['name'])
107
69 return other_services 108 return other_services
70 109
71 def _add_services(self, this_service, other_services): 110 def _add_services(self, this_service, other_services):
72 """Add services to the deployment and set openstack-origin/source.""" 111 """Add services to the deployment and set openstack-origin/source."""
112 self.log.info('OpenStackAmuletDeployment: adding services')
113
73 other_services = self._determine_branch_locations(other_services) 114 other_services = self._determine_branch_locations(other_services)
74 115
75 super(OpenStackAmuletDeployment, self)._add_services(this_service, 116 super(OpenStackAmuletDeployment, self)._add_services(this_service,
@@ -77,29 +118,103 @@ class OpenStackAmuletDeployment(AmuletDeployment):
77 118
78 services = other_services 119 services = other_services
79 services.append(this_service) 120 services.append(this_service)
121
122 # Charms which should use the source config option
80 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 123 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
81 'ceph-osd', 'ceph-radosgw'] 124 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
82 # Most OpenStack subordinate charms do not expose an origin option 125
83 # as that is controlled by the principle. 126 # Charms which can not use openstack-origin, ie. many subordinates
84 ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] 127 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
128 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
129 'cinder-backup']
85 130
86 if self.openstack: 131 if self.openstack:
87 for svc in services: 132 for svc in services:
88 if svc['name'] not in use_source + ignore: 133 if svc['name'] not in use_source + no_origin:
89 config = {'openstack-origin': self.openstack} 134 config = {'openstack-origin': self.openstack}
90 self.d.configure(svc['name'], config) 135 self.d.configure(svc['name'], config)
91 136
92 if self.source: 137 if self.source:
93 for svc in services: 138 for svc in services:
94 if svc['name'] in use_source and svc['name'] not in ignore: 139 if svc['name'] in use_source and svc['name'] not in no_origin:
95 config = {'source': self.source} 140 config = {'source': self.source}
96 self.d.configure(svc['name'], config) 141 self.d.configure(svc['name'], config)
97 142
98 def _configure_services(self, configs): 143 def _configure_services(self, configs):
99 """Configure all of the services.""" 144 """Configure all of the services."""
145 self.log.info('OpenStackAmuletDeployment: configure services')
100 for service, config in six.iteritems(configs): 146 for service, config in six.iteritems(configs):
101 self.d.configure(service, config) 147 self.d.configure(service, config)
102 148
149 def _auto_wait_for_status(self, message=None, exclude_services=None,
150 include_only=None, timeout=1800):
151 """Wait for all units to have a specific extended status, except
152 for any defined as excluded. Unless specified via message, any
153 status containing any case of 'ready' will be considered a match.
154
155 Examples of message usage:
156
157 Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
158 message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
159
160 Wait for all units to reach this status (exact match):
161 message = re.compile('^Unit is ready and clustered$')
162
163 Wait for all units to reach any one of these (exact match):
164 message = re.compile('Unit is ready|OK|Ready')
165
166 Wait for at least one unit to reach this status (exact match):
167 message = {'ready'}
168
169 See Amulet's sentry.wait_for_messages() for message usage detail.
170 https://github.com/juju/amulet/blob/master/amulet/sentry.py
171
172 :param message: Expected status match
173 :param exclude_services: List of juju service names to ignore,
174 not to be used in conjuction with include_only.
175 :param include_only: List of juju service names to exclusively check,
176 not to be used in conjuction with exclude_services.
177 :param timeout: Maximum time in seconds to wait for status match
178 :returns: None. Raises if timeout is hit.
179 """
180 self.log.info('Waiting for extended status on units...')
181
182 all_services = self.d.services.keys()
183
184 if exclude_services and include_only:
185 raise ValueError('exclude_services can not be used '
186 'with include_only')
187
188 if message:
189 if isinstance(message, re._pattern_type):
190 match = message.pattern
191 else:
192 match = message
193
194 self.log.debug('Custom extended status wait match: '
195 '{}'.format(match))
196 else:
197 self.log.debug('Default extended status wait match: contains '
198 'READY (case-insensitive)')
199 message = re.compile('.*ready.*', re.IGNORECASE)
200
201 if exclude_services:
202 self.log.debug('Excluding services from extended status match: '
203 '{}'.format(exclude_services))
204 else:
205 exclude_services = []
206
207 if include_only:
208 services = include_only
209 else:
210 services = list(set(all_services) - set(exclude_services))
211
212 self.log.debug('Waiting up to {}s for extended status on services: '
213 '{}'.format(timeout, services))
214 service_messages = {service: message for service in services}
215 self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
216 self.log.info('OK')
217
103 def _get_openstack_release(self): 218 def _get_openstack_release(self):
104 """Get openstack release. 219 """Get openstack release.
105 220
@@ -111,7 +226,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
111 self.precise_havana, self.precise_icehouse, 226 self.precise_havana, self.precise_icehouse,
112 self.trusty_icehouse, self.trusty_juno, self.utopic_juno, 227 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
113 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, 228 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
114 self.wily_liberty) = range(12) 229 self.wily_liberty, self.trusty_mitaka,
230 self.xenial_mitaka) = range(14)
115 231
116 releases = { 232 releases = {
117 ('precise', None): self.precise_essex, 233 ('precise', None): self.precise_essex,
@@ -123,9 +239,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
123 ('trusty', 'cloud:trusty-juno'): self.trusty_juno, 239 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
124 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, 240 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
125 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, 241 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
242 ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
126 ('utopic', None): self.utopic_juno, 243 ('utopic', None): self.utopic_juno,
127 ('vivid', None): self.vivid_kilo, 244 ('vivid', None): self.vivid_kilo,
128 ('wily', None): self.wily_liberty} 245 ('wily', None): self.wily_liberty,
246 ('xenial', None): self.xenial_mitaka}
129 return releases[(self.series, self.openstack)] 247 return releases[(self.series, self.openstack)]
130 248
131 def _get_openstack_release_string(self): 249 def _get_openstack_release_string(self):
@@ -142,6 +260,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
142 ('utopic', 'juno'), 260 ('utopic', 'juno'),
143 ('vivid', 'kilo'), 261 ('vivid', 'kilo'),
144 ('wily', 'liberty'), 262 ('wily', 'liberty'),
263 ('xenial', 'mitaka'),
145 ]) 264 ])
146 if self.openstack: 265 if self.openstack:
147 os_origin = self.openstack.split(':')[1] 266 os_origin = self.openstack.split(':')[1]
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index 03f7927..388b60e 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -18,6 +18,7 @@ import amulet
18import json 18import json
19import logging 19import logging
20import os 20import os
21import re
21import six 22import six
22import time 23import time
23import urllib 24import urllib
@@ -27,6 +28,7 @@ import glanceclient.v1.client as glance_client
27import heatclient.v1.client as heat_client 28import heatclient.v1.client as heat_client
28import keystoneclient.v2_0 as keystone_client 29import keystoneclient.v2_0 as keystone_client
29import novaclient.v1_1.client as nova_client 30import novaclient.v1_1.client as nova_client
31import pika
30import swiftclient 32import swiftclient
31 33
32from charmhelpers.contrib.amulet.utils import ( 34from charmhelpers.contrib.amulet.utils import (
@@ -602,3 +604,382 @@ class OpenStackAmuletUtils(AmuletUtils):
602 self.log.debug('Ceph {} samples (OK): ' 604 self.log.debug('Ceph {} samples (OK): '
603 '{}'.format(sample_type, samples)) 605 '{}'.format(sample_type, samples))
604 return None 606 return None
607
608 # rabbitmq/amqp specific helpers:
609
610 def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
611 """Wait for rmq units extended status to show cluster readiness,
612 after an optional initial sleep period. Initial sleep is likely
613 necessary to be effective following a config change, as status
614 message may not instantly update to non-ready."""
615
616 if init_sleep:
617 time.sleep(init_sleep)
618
619 message = re.compile('^Unit is ready and clustered$')
620 deployment._auto_wait_for_status(message=message,
621 timeout=timeout,
622 include_only=['rabbitmq-server'])
623
624 def add_rmq_test_user(self, sentry_units,
625 username="testuser1", password="changeme"):
626 """Add a test user via the first rmq juju unit, check connection as
627 the new user against all sentry units.
628
629 :param sentry_units: list of sentry unit pointers
630 :param username: amqp user name, default to testuser1
631 :param password: amqp user password
632 :returns: None if successful. Raise on error.
633 """
634 self.log.debug('Adding rmq user ({})...'.format(username))
635
636 # Check that user does not already exist
637 cmd_user_list = 'rabbitmqctl list_users'
638 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
639 if username in output:
640 self.log.warning('User ({}) already exists, returning '
641 'gracefully.'.format(username))
642 return
643
644 perms = '".*" ".*" ".*"'
645 cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
646 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
647
648 # Add user via first unit
649 for cmd in cmds:
650 output, _ = self.run_cmd_unit(sentry_units[0], cmd)
651
652 # Check connection against the other sentry_units
653 self.log.debug('Checking user connect against units...')
654 for sentry_unit in sentry_units:
655 connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
656 username=username,
657 password=password)
658 connection.close()
659
660 def delete_rmq_test_user(self, sentry_units, username="testuser1"):
661 """Delete a rabbitmq user via the first rmq juju unit.
662
663 :param sentry_units: list of sentry unit pointers
664 :param username: amqp user name, default to testuser1
665 :param password: amqp user password
666 :returns: None if successful or no such user.
667 """
668 self.log.debug('Deleting rmq user ({})...'.format(username))
669
670 # Check that the user exists
671 cmd_user_list = 'rabbitmqctl list_users'
672 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
673
674 if username not in output:
675 self.log.warning('User ({}) does not exist, returning '
676 'gracefully.'.format(username))
677 return
678
679 # Delete the user
680 cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
681 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
682
683 def get_rmq_cluster_status(self, sentry_unit):
684 """Execute rabbitmq cluster status command on a unit and return
685 the full output.
686
687 :param unit: sentry unit
688 :returns: String containing console output of cluster status command
689 """
690 cmd = 'rabbitmqctl cluster_status'
691 output, _ = self.run_cmd_unit(sentry_unit, cmd)
692 self.log.debug('{} cluster_status:\n{}'.format(
693 sentry_unit.info['unit_name'], output))
694 return str(output)
695
696 def get_rmq_cluster_running_nodes(self, sentry_unit):
697 """Parse rabbitmqctl cluster_status output string, return list of
698 running rabbitmq cluster nodes.
699
700 :param unit: sentry unit
701 :returns: List containing node names of running nodes
702 """
703 # NOTE(beisner): rabbitmqctl cluster_status output is not
704 # json-parsable, do string chop foo, then json.loads that.
705 str_stat = self.get_rmq_cluster_status(sentry_unit)
706 if 'running_nodes' in str_stat:
707 pos_start = str_stat.find("{running_nodes,") + 15
708 pos_end = str_stat.find("]},", pos_start) + 1
709 str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
710 run_nodes = json.loads(str_run_nodes)
711 return run_nodes
712 else:
713 return []
714
715 def validate_rmq_cluster_running_nodes(self, sentry_units):
716 """Check that all rmq unit hostnames are represented in the
717 cluster_status output of all units.
718
719 :param host_names: dict of juju unit names to host names
720 :param units: list of sentry unit pointers (all rmq units)
721 :returns: None if successful, otherwise return error message
722 """
723 host_names = self.get_unit_hostnames(sentry_units)
724 errors = []
725
726 # Query every unit for cluster_status running nodes
727 for query_unit in sentry_units:
728 query_unit_name = query_unit.info['unit_name']
729 running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
730
731 # Confirm that every unit is represented in the queried unit's
732 # cluster_status running nodes output.
733 for validate_unit in sentry_units:
734 val_host_name = host_names[validate_unit.info['unit_name']]
735 val_node_name = 'rabbit@{}'.format(val_host_name)
736
737 if val_node_name not in running_nodes:
738 errors.append('Cluster member check failed on {}: {} not '
739 'in {}\n'.format(query_unit_name,
740 val_node_name,
741 running_nodes))
742 if errors:
743 return ''.join(errors)
744
745 def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
746 """Check a single juju rmq unit for ssl and port in the config file."""
747 host = sentry_unit.info['public-address']
748 unit_name = sentry_unit.info['unit_name']
749
750 conf_file = '/etc/rabbitmq/rabbitmq.config'
751 conf_contents = str(self.file_contents_safe(sentry_unit,
752 conf_file, max_wait=16))
753 # Checks
754 conf_ssl = 'ssl' in conf_contents
755 conf_port = str(port) in conf_contents
756
757 # Port explicitly checked in config
758 if port and conf_port and conf_ssl:
759 self.log.debug('SSL is enabled @{}:{} '
760 '({})'.format(host, port, unit_name))
761 return True
762 elif port and not conf_port and conf_ssl:
763 self.log.debug('SSL is enabled @{} but not on port {} '
764 '({})'.format(host, port, unit_name))
765 return False
766 # Port not checked (useful when checking that ssl is disabled)
767 elif not port and conf_ssl:
768 self.log.debug('SSL is enabled @{}:{} '
769 '({})'.format(host, port, unit_name))
770 return True
771 elif not conf_ssl:
772 self.log.debug('SSL not enabled @{}:{} '
773 '({})'.format(host, port, unit_name))
774 return False
775 else:
776 msg = ('Unknown condition when checking SSL status @{}:{} '
777 '({})'.format(host, port, unit_name))
778 amulet.raise_status(amulet.FAIL, msg)
779
780 def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
781 """Check that ssl is enabled on rmq juju sentry units.
782
783 :param sentry_units: list of all rmq sentry units
784 :param port: optional ssl port override to validate
785 :returns: None if successful, otherwise return error message
786 """
787 for sentry_unit in sentry_units:
788 if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
789 return ('Unexpected condition: ssl is disabled on unit '
790 '({})'.format(sentry_unit.info['unit_name']))
791 return None
792
793 def validate_rmq_ssl_disabled_units(self, sentry_units):
794 """Check that ssl is enabled on listed rmq juju sentry units.
795
796 :param sentry_units: list of all rmq sentry units
797 :returns: True if successful. Raise on error.
798 """
799 for sentry_unit in sentry_units:
800 if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
801 return ('Unexpected condition: ssl is enabled on unit '
802 '({})'.format(sentry_unit.info['unit_name']))
803 return None
804
805 def configure_rmq_ssl_on(self, sentry_units, deployment,
806 port=None, max_wait=60):
807 """Turn ssl charm config option on, with optional non-default
808 ssl port specification. Confirm that it is enabled on every
809 unit.
810
811 :param sentry_units: list of sentry units
812 :param deployment: amulet deployment object pointer
813 :param port: amqp port, use defaults if None
814 :param max_wait: maximum time to wait in seconds to confirm
815 :returns: None if successful. Raise on error.
816 """
817 self.log.debug('Setting ssl charm config option: on')
818
819 # Enable RMQ SSL
820 config = {'ssl': 'on'}
821 if port:
822 config['ssl_port'] = port
823
824 deployment.d.configure('rabbitmq-server', config)
825
826 # Wait for unit status
827 self.rmq_wait_for_cluster(deployment)
828
829 # Confirm
830 tries = 0
831 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
832 while ret and tries < (max_wait / 4):
833 time.sleep(4)
834 self.log.debug('Attempt {}: {}'.format(tries, ret))
835 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
836 tries += 1
837
838 if ret:
839 amulet.raise_status(amulet.FAIL, ret)
840
841 def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
842 """Turn ssl charm config option off, confirm that it is disabled
843 on every unit.
844
845 :param sentry_units: list of sentry units
846 :param deployment: amulet deployment object pointer
847 :param max_wait: maximum time to wait in seconds to confirm
848 :returns: None if successful. Raise on error.
849 """
850 self.log.debug('Setting ssl charm config option: off')
851
852 # Disable RMQ SSL
853 config = {'ssl': 'off'}
854 deployment.d.configure('rabbitmq-server', config)
855
856 # Wait for unit status
857 self.rmq_wait_for_cluster(deployment)
858
859 # Confirm
860 tries = 0
861 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
862 while ret and tries < (max_wait / 4):
863 time.sleep(4)
864 self.log.debug('Attempt {}: {}'.format(tries, ret))
865 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
866 tries += 1
867
868 if ret:
869 amulet.raise_status(amulet.FAIL, ret)
870
871 def connect_amqp_by_unit(self, sentry_unit, ssl=False,
872 port=None, fatal=True,
873 username="testuser1", password="changeme"):
874 """Establish and return a pika amqp connection to the rabbitmq service
875 running on a rmq juju unit.
876
877 :param sentry_unit: sentry unit pointer
878 :param ssl: boolean, default to False
879 :param port: amqp port, use defaults if None
880 :param fatal: boolean, default to True (raises on connect error)
881 :param username: amqp user name, default to testuser1
882 :param password: amqp user password
883 :returns: pika amqp connection pointer or None if failed and non-fatal
884 """
885 host = sentry_unit.info['public-address']
886 unit_name = sentry_unit.info['unit_name']
887
888 # Default port logic if port is not specified
889 if ssl and not port:
890 port = 5671
891 elif not ssl and not port:
892 port = 5672
893
894 self.log.debug('Connecting to amqp on {}:{} ({}) as '
895 '{}...'.format(host, port, unit_name, username))
896
897 try:
898 credentials = pika.PlainCredentials(username, password)
899 parameters = pika.ConnectionParameters(host=host, port=port,
900 credentials=credentials,
901 ssl=ssl,
902 connection_attempts=3,
903 retry_delay=5,
904 socket_timeout=1)
905 connection = pika.BlockingConnection(parameters)
906 assert connection.server_properties['product'] == 'RabbitMQ'
907 self.log.debug('Connect OK')
908 return connection
909 except Exception as e:
910 msg = ('amqp connection failed to {}:{} as '
911 '{} ({})'.format(host, port, username, str(e)))
912 if fatal:
913 amulet.raise_status(amulet.FAIL, msg)
914 else:
915 self.log.warn(msg)
916 return None
917
918 def publish_amqp_message_by_unit(self, sentry_unit, message,
919 queue="test", ssl=False,
920 username="testuser1",
921 password="changeme",
922 port=None):
923 """Publish an amqp message to a rmq juju unit.
924
925 :param sentry_unit: sentry unit pointer
926 :param message: amqp message string
927 :param queue: message queue, default to test
928 :param username: amqp user name, default to testuser1
929 :param password: amqp user password
930 :param ssl: boolean, default to False
931 :param port: amqp port, use defaults if None
932 :returns: None. Raises exception if publish failed.
933 """
934 self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
935 message))
936 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
937 port=port,
938 username=username,
939 password=password)
940
941 # NOTE(beisner): extra debug here re: pika hang potential:
942 # https://github.com/pika/pika/issues/297
943 # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
944 self.log.debug('Defining channel...')
945 channel = connection.channel()
946 self.log.debug('Declaring queue...')
947 channel.queue_declare(queue=queue, auto_delete=False, durable=True)
948 self.log.debug('Publishing message...')
949 channel.basic_publish(exchange='', routing_key=queue, body=message)
950 self.log.debug('Closing channel...')
951 channel.close()
952 self.log.debug('Closing connection...')
953 connection.close()
954
955 def get_amqp_message_by_unit(self, sentry_unit, queue="test",
956 username="testuser1",
957 password="changeme",
958 ssl=False, port=None):
959 """Get an amqp message from a rmq juju unit.
960
961 :param sentry_unit: sentry unit pointer
962 :param queue: message queue, default to test
963 :param username: amqp user name, default to testuser1
964 :param password: amqp user password
965 :param ssl: boolean, default to False
966 :param port: amqp port, use defaults if None
967 :returns: amqp message body as string. Raise if get fails.
968 """
969 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
970 port=port,
971 username=username,
972 password=password)
973 channel = connection.channel()
974 method_frame, _, body = channel.basic_get(queue)
975
976 if method_frame:
977 self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
978 body))
979 channel.basic_ack(method_frame.delivery_tag)
980 channel.close()
981 connection.close()
982 return body
983 else:
984 msg = 'No message retrieved.'
985 amulet.raise_status(amulet.FAIL, msg)
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index b213fd7..ff597c9 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -14,6 +14,7 @@
14# You should have received a copy of the GNU Lesser General Public License 14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. 15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16 16
17import glob
17import json 18import json
18import os 19import os
19import re 20import re
@@ -50,10 +51,13 @@ from charmhelpers.core.sysctl import create as sysctl_create
50from charmhelpers.core.strutils import bool_from_string 51from charmhelpers.core.strutils import bool_from_string
51 52
52from charmhelpers.core.host import ( 53from charmhelpers.core.host import (
54 get_bond_master,
55 is_phy_iface,
53 list_nics, 56 list_nics,
54 get_nic_hwaddr, 57 get_nic_hwaddr,
55 mkdir, 58 mkdir,
56 write_file, 59 write_file,
60 pwgen,
57) 61)
58from charmhelpers.contrib.hahelpers.cluster import ( 62from charmhelpers.contrib.hahelpers.cluster import (
59 determine_apache_port, 63 determine_apache_port,
@@ -84,6 +88,14 @@ from charmhelpers.contrib.network.ip import (
84 is_bridge_member, 88 is_bridge_member,
85) 89)
86from charmhelpers.contrib.openstack.utils import get_host_ip 90from charmhelpers.contrib.openstack.utils import get_host_ip
91from charmhelpers.core.unitdata import kv
92
93try:
94 import psutil
95except ImportError:
96 apt_install('python-psutil', fatal=True)
97 import psutil
98
87CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' 99CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
88ADDRESS_TYPES = ['admin', 'internal', 'public'] 100ADDRESS_TYPES = ['admin', 'internal', 'public']
89 101
@@ -192,10 +204,50 @@ def config_flags_parser(config_flags):
192class OSContextGenerator(object): 204class OSContextGenerator(object):
193 """Base class for all context generators.""" 205 """Base class for all context generators."""
194 interfaces = [] 206 interfaces = []
207 related = False
208 complete = False
209 missing_data = []
195 210
196 def __call__(self): 211 def __call__(self):
197 raise NotImplementedError 212 raise NotImplementedError
198 213
214 def context_complete(self, ctxt):
215 """Check for missing data for the required context data.
216 Set self.missing_data if it exists and return False.
217 Set self.complete if no missing data and return True.
218 """
219 # Fresh start
220 self.complete = False
221 self.missing_data = []
222 for k, v in six.iteritems(ctxt):
223 if v is None or v == '':
224 if k not in self.missing_data:
225 self.missing_data.append(k)
226
227 if self.missing_data:
228 self.complete = False
229 log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
230 else:
231 self.complete = True
232 return self.complete
233
234 def get_related(self):
235 """Check if any of the context interfaces have relation ids.
236 Set self.related and return True if one of the interfaces
237 has relation ids.
238 """
239 # Fresh start
240 self.related = False
241 try:
242 for interface in self.interfaces:
243 if relation_ids(interface):
244 self.related = True
245 return self.related
246 except AttributeError as e:
247 log("{} {}"
248 "".format(self, e), 'INFO')
249 return self.related
250
199 251
200class SharedDBContext(OSContextGenerator): 252class SharedDBContext(OSContextGenerator):
201 interfaces = ['shared-db'] 253 interfaces = ['shared-db']
@@ -211,6 +263,7 @@ class SharedDBContext(OSContextGenerator):
211 self.database = database 263 self.database = database
212 self.user = user 264 self.user = user
213 self.ssl_dir = ssl_dir 265 self.ssl_dir = ssl_dir
266 self.rel_name = self.interfaces[0]
214 267
215 def __call__(self): 268 def __call__(self):
216 self.database = self.database or config('database') 269 self.database = self.database or config('database')
@@ -244,6 +297,7 @@ class SharedDBContext(OSContextGenerator):
244 password_setting = self.relation_prefix + '_password' 297 password_setting = self.relation_prefix + '_password'
245 298
246 for rid in relation_ids(self.interfaces[0]): 299 for rid in relation_ids(self.interfaces[0]):
300 self.related = True
247 for unit in related_units(rid): 301 for unit in related_units(rid):
248 rdata = relation_get(rid=rid, unit=unit) 302 rdata = relation_get(rid=rid, unit=unit)
249 host = rdata.get('db_host') 303 host = rdata.get('db_host')
@@ -255,7 +309,7 @@ class SharedDBContext(OSContextGenerator):
255 'database_password': rdata.get(password_setting), 309 'database_password': rdata.get(password_setting),
256 'database_type': 'mysql' 310 'database_type': 'mysql'
257 } 311 }
258 if context_complete(ctxt): 312 if self.context_complete(ctxt):
259 db_ssl(rdata, ctxt, self.ssl_dir) 313 db_ssl(rdata, ctxt, self.ssl_dir)
260 return ctxt 314 return ctxt
261 return {} 315 return {}
@@ -276,6 +330,7 @@ class PostgresqlDBContext(OSContextGenerator):
276 330
277 ctxt = {} 331 ctxt = {}
278 for rid in relation_ids(self.interfaces[0]): 332 for rid in relation_ids(self.interfaces[0]):
333 self.related = True
279 for unit in related_units(rid): 334 for unit in related_units(rid):
280 rel_host = relation_get('host', rid=rid, unit=unit) 335 rel_host = relation_get('host', rid=rid, unit=unit)
281 rel_user = relation_get('user', rid=rid, unit=unit) 336 rel_user = relation_get('user', rid=rid, unit=unit)
@@ -285,7 +340,7 @@ class PostgresqlDBContext(OSContextGenerator):
285 'database_user': rel_user, 340 'database_user': rel_user,
286 'database_password': rel_passwd, 341 'database_password': rel_passwd,
287 'database_type': 'postgresql'} 342 'database_type': 'postgresql'}
288 if context_complete(ctxt): 343 if self.context_complete(ctxt):
289 return ctxt 344 return ctxt
290 345
291 return {} 346 return {}
@@ -346,6 +401,7 @@ class IdentityServiceContext(OSContextGenerator):
346 ctxt['signing_dir'] = cachedir 401 ctxt['signing_dir'] = cachedir
347 402
348 for rid in relation_ids(self.rel_name): 403 for rid in relation_ids(self.rel_name):
404 self.related = True
349 for unit in related_units(rid): 405 for unit in related_units(rid):
350 rdata = relation_get(rid=rid, unit=unit) 406 rdata = relation_get(rid=rid, unit=unit)
351 serv_host = rdata.get('service_host') 407 serv_host = rdata.get('service_host')
@@ -364,7 +420,7 @@ class IdentityServiceContext(OSContextGenerator):
364 'service_protocol': svc_protocol, 420 'service_protocol': svc_protocol,
365 'auth_protocol': auth_protocol}) 421 'auth_protocol': auth_protocol})
366 422
367 if context_complete(ctxt): 423 if self.context_complete(ctxt):
368 # NOTE(jamespage) this is required for >= icehouse 424 # NOTE(jamespage) this is required for >= icehouse
369 # so a missing value just indicates keystone needs 425 # so a missing value just indicates keystone needs
370 # upgrading 426 # upgrading
@@ -403,6 +459,7 @@ class AMQPContext(OSContextGenerator):
403 ctxt = {} 459 ctxt = {}
404 for rid in relation_ids(self.rel_name): 460 for rid in relation_ids(self.rel_name):
405 ha_vip_only = False 461 ha_vip_only = False
462 self.related = True
406 for unit in related_units(rid): 463 for unit in related_units(rid):
407 if relation_get('clustered', rid=rid, unit=unit): 464 if relation_get('clustered', rid=rid, unit=unit):
408 ctxt['clustered'] = True 465 ctxt['clustered'] = True
@@ -435,7 +492,7 @@ class AMQPContext(OSContextGenerator):
435 ha_vip_only = relation_get('ha-vip-only', 492 ha_vip_only = relation_get('ha-vip-only',
436 rid=rid, unit=unit) is not None 493 rid=rid, unit=unit) is not None
437 494
438 if context_complete(ctxt): 495 if self.context_complete(ctxt):
439 if 'rabbit_ssl_ca' in ctxt: 496 if 'rabbit_ssl_ca' in ctxt:
440 if not self.ssl_dir: 497 if not self.ssl_dir:
441 log("Charm not setup for ssl support but ssl ca " 498 log("Charm not setup for ssl support but ssl ca "
@@ -467,7 +524,7 @@ class AMQPContext(OSContextGenerator):
467 ctxt['oslo_messaging_flags'] = config_flags_parser( 524 ctxt['oslo_messaging_flags'] = config_flags_parser(
468 oslo_messaging_flags) 525 oslo_messaging_flags)
469 526
470 if not context_complete(ctxt): 527 if not self.complete:
471 return {} 528 return {}
472 529
473 return ctxt 530 return ctxt
@@ -483,13 +540,15 @@ class CephContext(OSContextGenerator):
483 540
484 log('Generating template context for ceph', level=DEBUG) 541 log('Generating template context for ceph', level=DEBUG)
485 mon_hosts = [] 542 mon_hosts = []
486 auth = None 543 ctxt = {
487 key = None 544 'use_syslog': str(config('use-syslog')).lower()
488 use_syslog = str(config('use-syslog')).lower() 545 }
489 for rid in relation_ids('ceph'): 546 for rid in relation_ids('ceph'):
490 for unit in related_units(rid): 547 for unit in related_units(rid):
491 auth = relation_get('auth', rid=rid, unit=unit) 548 if not ctxt.get('auth'):
492 key = relation_get('key', rid=rid, unit=unit) 549 ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
550 if not ctxt.get('key'):
551 ctxt['key'] = relation_get('key', rid=rid, unit=unit)
493 ceph_pub_addr = relation_get('ceph-public-address', rid=rid, 552 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
494 unit=unit) 553 unit=unit)
495 unit_priv_addr = relation_get('private-address', rid=rid, 554 unit_priv_addr = relation_get('private-address', rid=rid,
@@ -498,15 +557,12 @@ class CephContext(OSContextGenerator):
498 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr 557 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
499 mon_hosts.append(ceph_addr) 558 mon_hosts.append(ceph_addr)
500 559
501 ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), 560 ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
502 'auth': auth,
503 'key': key,
504 'use_syslog': use_syslog}
505 561
506 if not os.path.isdir('/etc/ceph'): 562 if not os.path.isdir('/etc/ceph'):
507 os.mkdir('/etc/ceph') 563 os.mkdir('/etc/ceph')
508 564
509 if not context_complete(ctxt): 565 if not self.context_complete(ctxt):
510 return {} 566 return {}
511 567
512 ensure_packages(['ceph-common']) 568 ensure_packages(['ceph-common'])
@@ -579,15 +635,28 @@ class HAProxyContext(OSContextGenerator):
579 if config('haproxy-client-timeout'): 635 if config('haproxy-client-timeout'):
580 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') 636 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
581 637
638 if config('haproxy-queue-timeout'):
639 ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
640
641 if config('haproxy-connect-timeout'):
642 ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
643
582 if config('prefer-ipv6'): 644 if config('prefer-ipv6'):
583 ctxt['ipv6'] = True 645 ctxt['ipv6'] = True
584 ctxt['local_host'] = 'ip6-localhost' 646 ctxt['local_host'] = 'ip6-localhost'
585 ctxt['haproxy_host'] = '::' 647 ctxt['haproxy_host'] = '::'
586 ctxt['stat_port'] = ':::8888'
587 else: 648 else:
588 ctxt['local_host'] = '127.0.0.1' 649 ctxt['local_host'] = '127.0.0.1'
589 ctxt['haproxy_host'] = '0.0.0.0' 650 ctxt['haproxy_host'] = '0.0.0.0'
590 ctxt['stat_port'] = ':8888' 651
652 ctxt['stat_port'] = '8888'
653
654 db = kv()
655 ctxt['stat_password'] = db.get('stat-password')
656 if not ctxt['stat_password']:
657 ctxt['stat_password'] = db.set('stat-password',
658 pwgen(32))
659 db.flush()
591 660
592 for frontend in cluster_hosts: 661 for frontend in cluster_hosts:
593 if (len(cluster_hosts[frontend]['backends']) > 1 or 662 if (len(cluster_hosts[frontend]['backends']) > 1 or
@@ -878,19 +947,6 @@ class NeutronContext(OSContextGenerator):
878 947
879 return calico_ctxt 948 return calico_ctxt
880 949
881 def pg_ctxt(self):
882 driver = neutron_plugin_attribute(self.plugin, 'driver',
883 self.network_manager)
884 config = neutron_plugin_attribute(self.plugin, 'config',
885 self.network_manager)
886 ovs_ctxt = {'core_plugin': driver,
887 'neutron_plugin': 'plumgrid',
888 'neutron_security_groups': self.neutron_security_groups,
889 'local_ip': unit_private_ip(),
890 'config': config}
891
892 return ovs_ctxt
893
894 def neutron_ctxt(self): 950 def neutron_ctxt(self):
895 if https(): 951 if https():
896 proto = 'https' 952 proto = 'https'
@@ -906,6 +962,31 @@ class NeutronContext(OSContextGenerator):
906 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} 962 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
907 return ctxt 963 return ctxt
908 964
965 def pg_ctxt(self):
966 driver = neutron_plugin_attribute(self.plugin, 'driver',
967 self.network_manager)
968 config = neutron_plugin_attribute(self.plugin, 'config',
969 self.network_manager)
970 ovs_ctxt = {'core_plugin': driver,
971 'neutron_plugin': 'plumgrid',
972 'neutron_security_groups': self.neutron_security_groups,
973 'local_ip': unit_private_ip(),
974 'config': config}
975 return ovs_ctxt
976
977 def midonet_ctxt(self):
978 driver = neutron_plugin_attribute(self.plugin, 'driver',
979 self.network_manager)
980 midonet_config = neutron_plugin_attribute(self.plugin, 'config',
981 self.network_manager)
982 mido_ctxt = {'core_plugin': driver,
983 'neutron_plugin': 'midonet',
984 'neutron_security_groups': self.neutron_security_groups,
985 'local_ip': unit_private_ip(),
986 'config': midonet_config}
987
988 return mido_ctxt
989
909 def __call__(self): 990 def __call__(self):
910 if self.network_manager not in ['quantum', 'neutron']: 991 if self.network_manager not in ['quantum', 'neutron']:
911 return {} 992 return {}
@@ -927,6 +1008,8 @@ class NeutronContext(OSContextGenerator):
927 ctxt.update(self.nuage_ctxt()) 1008 ctxt.update(self.nuage_ctxt())
928 elif self.plugin == 'plumgrid': 1009 elif self.plugin == 'plumgrid':
929 ctxt.update(self.pg_ctxt()) 1010 ctxt.update(self.pg_ctxt())
1011 elif self.plugin == 'midonet':
1012 ctxt.update(self.midonet_ctxt())
930 1013
931 alchemy_flags = config('neutron-alchemy-flags') 1014 alchemy_flags = config('neutron-alchemy-flags')
932 if alchemy_flags: 1015 if alchemy_flags:
@@ -938,7 +1021,6 @@ class NeutronContext(OSContextGenerator):
938 1021
939 1022
940class NeutronPortContext(OSContextGenerator): 1023class NeutronPortContext(OSContextGenerator):
941 NIC_PREFIXES = ['eth', 'bond']
942 1024
943 def resolve_ports(self, ports): 1025 def resolve_ports(self, ports):
944 """Resolve NICs not yet bound to bridge(s) 1026 """Resolve NICs not yet bound to bridge(s)
@@ -950,7 +1032,18 @@ class NeutronPortContext(OSContextGenerator):
950 1032
951 hwaddr_to_nic = {} 1033 hwaddr_to_nic = {}
952 hwaddr_to_ip = {} 1034 hwaddr_to_ip = {}
953 for nic in list_nics(self.NIC_PREFIXES): 1035 for nic in list_nics():
1036 # Ignore virtual interfaces (bond masters will be identified from
1037 # their slaves)
1038 if not is_phy_iface(nic):
1039 continue
1040
1041 _nic = get_bond_master(nic)
1042 if _nic:
1043 log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
1044 level=DEBUG)
1045 nic = _nic
1046
954 hwaddr = get_nic_hwaddr(nic) 1047 hwaddr = get_nic_hwaddr(nic)
955 hwaddr_to_nic[hwaddr] = nic 1048 hwaddr_to_nic[hwaddr] = nic
956 addresses = get_ipv4_addr(nic, fatal=False) 1049 addresses = get_ipv4_addr(nic, fatal=False)
@@ -976,7 +1069,8 @@ class NeutronPortContext(OSContextGenerator):
976 # trust it to be the real external network). 1069 # trust it to be the real external network).
977 resolved.append(entry) 1070 resolved.append(entry)
978 1071
979 return resolved 1072 # Ensure no duplicates
1073 return list(set(resolved))
980 1074
981 1075
982class OSConfigFlagContext(OSContextGenerator): 1076class OSConfigFlagContext(OSContextGenerator):
@@ -1016,6 +1110,20 @@ class OSConfigFlagContext(OSContextGenerator):
1016 config_flags_parser(config_flags)} 1110 config_flags_parser(config_flags)}
1017 1111
1018 1112
1113class LibvirtConfigFlagsContext(OSContextGenerator):
1114 """
1115 This context provides support for extending
1116 the libvirt section through user-defined flags.
1117 """
1118 def __call__(self):
1119 ctxt = {}
1120 libvirt_flags = config('libvirt-flags')
1121 if libvirt_flags:
1122 ctxt['libvirt_flags'] = config_flags_parser(
1123 libvirt_flags)
1124 return ctxt
1125
1126
1019class SubordinateConfigContext(OSContextGenerator): 1127class SubordinateConfigContext(OSContextGenerator):
1020 1128
1021 """ 1129 """
@@ -1048,7 +1156,7 @@ class SubordinateConfigContext(OSContextGenerator):
1048 1156
1049 ctxt = { 1157 ctxt = {
1050 ... other context ... 1158 ... other context ...
1051 'subordinate_config': { 1159 'subordinate_configuration': {
1052 'DEFAULT': { 1160 'DEFAULT': {
1053 'key1': 'value1', 1161 'key1': 'value1',
1054 }, 1162 },
@@ -1066,13 +1174,22 @@ class SubordinateConfigContext(OSContextGenerator):
1066 :param config_file : Service's config file to query sections 1174 :param config_file : Service's config file to query sections
1067 :param interface : Subordinate interface to inspect 1175 :param interface : Subordinate interface to inspect
1068 """ 1176 """
1069 self.service = service
1070 self.config_file = config_file 1177 self.config_file = config_file
1071 self.interface = interface 1178 if isinstance(service, list):
1179 self.services = service
1180 else:
1181 self.services = [service]
1182 if isinstance(interface, list):
1183 self.interfaces = interface
1184 else:
1185 self.interfaces = [interface]
1072 1186
1073 def __call__(self): 1187 def __call__(self):
1074 ctxt = {'sections': {}} 1188 ctxt = {'sections': {}}
1075 for rid in relation_ids(self.interface): 1189 rids = []
1190 for interface in self.interfaces:
1191 rids.extend(relation_ids(interface))
1192 for rid in rids:
1076 for unit in related_units(rid): 1193 for unit in related_units(rid):
1077 sub_config = relation_get('subordinate_configuration', 1194 sub_config = relation_get('subordinate_configuration',
1078 rid=rid, unit=unit) 1195 rid=rid, unit=unit)
@@ -1080,33 +1197,37 @@ class SubordinateConfigContext(OSContextGenerator):
1080 try: 1197 try:
1081 sub_config = json.loads(sub_config) 1198 sub_config = json.loads(sub_config)
1082 except: 1199 except:
1083 log('Could not parse JSON from subordinate_config ' 1200 log('Could not parse JSON from '
1084 'setting from %s' % rid, level=ERROR) 1201 'subordinate_configuration setting from %s'
1202 % rid, level=ERROR)
1085 continue 1203 continue
1086 1204
1087 if self.service not in sub_config: 1205 for service in self.services:
1088 log('Found subordinate_config on %s but it contained' 1206 if service not in sub_config:
1089 'nothing for %s service' % (rid, self.service), 1207 log('Found subordinate_configuration on %s but it '
1090 level=INFO) 1208 'contained nothing for %s service'
1091 continue 1209 % (rid, service), level=INFO)
1092 1210 continue
1093 sub_config = sub_config[self.service] 1211
1094 if self.config_file not in sub_config: 1212 sub_config = sub_config[service]
1095 log('Found subordinate_config on %s but it contained' 1213 if self.config_file not in sub_config:
1096 'nothing for %s' % (rid, self.config_file), 1214 log('Found subordinate_configuration on %s but it '
1097 level=INFO) 1215 'contained nothing for %s'
1098 continue 1216 % (rid, self.config_file), level=INFO)
1099 1217 continue
1100 sub_config = sub_config[self.config_file] 1218
1101 for k, v in six.iteritems(sub_config): 1219 sub_config = sub_config[self.config_file]
1102 if k == 'sections': 1220 for k, v in six.iteritems(sub_config):
1103 for section, config_dict in six.iteritems(v): 1221 if k == 'sections':
1104 log("adding section '%s'" % (section), 1222 for section, config_list in six.iteritems(v):
1105 level=DEBUG) 1223 log("adding section '%s'" % (section),
1106 ctxt[k][section] = config_dict 1224 level=DEBUG)
1107 else: 1225 if ctxt[k].get(section):
1108 ctxt[k] = v 1226 ctxt[k][section].extend(config_list)
1109 1227 else:
1228 ctxt[k][section] = config_list
1229 else:
1230 ctxt[k] = v
1110 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) 1231 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1111 return ctxt 1232 return ctxt
1112 1233
@@ -1143,13 +1264,11 @@ class WorkerConfigContext(OSContextGenerator):
1143 1264
1144 @property 1265 @property
1145 def num_cpus(self): 1266 def num_cpus(self):
1146 try: 1267 # NOTE: use cpu_count if present (16.04 support)
1147 from psutil import NUM_CPUS 1268 if hasattr(psutil, 'cpu_count'):
1148 except ImportError: 1269 return psutil.cpu_count()
1149 apt_install('python-psutil', fatal=True) 1270 else:
1150 from psutil import NUM_CPUS 1271 return psutil.NUM_CPUS
1151
1152 return NUM_CPUS
1153 1272
1154 def __call__(self): 1273 def __call__(self):
1155 multiplier = config('worker-multiplier') or 0 1274 multiplier = config('worker-multiplier') or 0
@@ -1283,15 +1402,19 @@ class DataPortContext(NeutronPortContext):
1283 def __call__(self): 1402 def __call__(self):
1284 ports = config('data-port') 1403 ports = config('data-port')
1285 if ports: 1404 if ports:
1405 # Map of {port/mac:bridge}
1286 portmap = parse_data_port_mappings(ports) 1406 portmap = parse_data_port_mappings(ports)
1287 ports = portmap.values() 1407 ports = portmap.keys()
1408 # Resolve provided ports or mac addresses and filter out those
1409 # already attached to a bridge.
1288 resolved = self.resolve_ports(ports) 1410 resolved = self.resolve_ports(ports)
1411 # FIXME: is this necessary?
1289 normalized = {get_nic_hwaddr(port): port for port in resolved 1412 normalized = {get_nic_hwaddr(port): port for port in resolved
1290 if port not in ports} 1413 if port not in ports}
1291 normalized.update({port: port for port in resolved 1414 normalized.update({port: port for port in resolved
1292 if port in ports}) 1415 if port in ports})
1293 if resolved: 1416 if resolved:
1294 return {bridge: normalized[port] for bridge, port in 1417 return {normalized[port]: bridge for port, bridge in
1295 six.iteritems(portmap) if port in normalized.keys()} 1418 six.iteritems(portmap) if port in normalized.keys()}
1296 1419
1297 return None 1420 return None
@@ -1302,12 +1425,22 @@ class PhyNICMTUContext(DataPortContext):
1302 def __call__(self): 1425 def __call__(self):
1303 ctxt = {} 1426 ctxt = {}
1304 mappings = super(PhyNICMTUContext, self).__call__() 1427 mappings = super(PhyNICMTUContext, self).__call__()
1305 if mappings and mappings.values(): 1428 if mappings and mappings.keys():
1306 ports = mappings.values() 1429 ports = sorted(mappings.keys())
1307 napi_settings = NeutronAPIContext()() 1430 napi_settings = NeutronAPIContext()()
1308 mtu = napi_settings.get('network_device_mtu') 1431 mtu = napi_settings.get('network_device_mtu')
1432 all_ports = set()
1433 # If any of ports is a vlan device, its underlying device must have
1434 # mtu applied first.
1435 for port in ports:
1436 for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1437 lport = os.path.basename(lport)
1438 all_ports.add(lport.split('_')[1])
1439
1440 all_ports = list(all_ports)
1441 all_ports.extend(ports)
1309 if mtu: 1442 if mtu:
1310 ctxt["devs"] = '\\n'.join(ports) 1443 ctxt["devs"] = '\\n'.join(all_ports)
1311 ctxt['mtu'] = mtu 1444 ctxt['mtu'] = mtu
1312 1445
1313 return ctxt 1446 return ctxt
@@ -1339,6 +1472,6 @@ class NetworkServiceContext(OSContextGenerator):
1339 'auth_protocol': 1472 'auth_protocol':
1340 rdata.get('auth_protocol') or 'http', 1473 rdata.get('auth_protocol') or 'http',
1341 } 1474 }
1342 if context_complete(ctxt): 1475 if self.context_complete(ctxt):
1343 return ctxt 1476 return ctxt
1344 return {} 1477 return {}
diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py
index c43d857..9a8e0ef 100644
--- a/hooks/charmhelpers/contrib/openstack/neutron.py
+++ b/hooks/charmhelpers/contrib/openstack/neutron.py
@@ -50,7 +50,7 @@ def determine_dkms_package():
50 if kernel_version() >= (3, 13): 50 if kernel_version() >= (3, 13):
51 return [] 51 return []
52 else: 52 else:
53 return ['openvswitch-datapath-dkms'] 53 return [headers_package(), 'openvswitch-datapath-dkms']
54 54
55 55
56# legacy 56# legacy
@@ -70,7 +70,7 @@ def quantum_plugins():
70 relation_prefix='neutron', 70 relation_prefix='neutron',
71 ssl_dir=QUANTUM_CONF_DIR)], 71 ssl_dir=QUANTUM_CONF_DIR)],
72 'services': ['quantum-plugin-openvswitch-agent'], 72 'services': ['quantum-plugin-openvswitch-agent'],
73 'packages': [[headers_package()] + determine_dkms_package(), 73 'packages': [determine_dkms_package(),
74 ['quantum-plugin-openvswitch-agent']], 74 ['quantum-plugin-openvswitch-agent']],
75 'server_packages': ['quantum-server', 75 'server_packages': ['quantum-server',
76 'quantum-plugin-openvswitch'], 76 'quantum-plugin-openvswitch'],
@@ -111,7 +111,7 @@ def neutron_plugins():
111 relation_prefix='neutron', 111 relation_prefix='neutron',
112 ssl_dir=NEUTRON_CONF_DIR)], 112 ssl_dir=NEUTRON_CONF_DIR)],
113 'services': ['neutron-plugin-openvswitch-agent'], 113 'services': ['neutron-plugin-openvswitch-agent'],
114 'packages': [[headers_package()] + determine_dkms_package(), 114 'packages': [determine_dkms_package(),
115 ['neutron-plugin-openvswitch-agent']], 115 ['neutron-plugin-openvswitch-agent']],
116 'server_packages': ['neutron-server', 116 'server_packages': ['neutron-server',
117 'neutron-plugin-openvswitch'], 117 'neutron-plugin-openvswitch'],
@@ -155,7 +155,7 @@ def neutron_plugins():
155 relation_prefix='neutron', 155 relation_prefix='neutron',
156 ssl_dir=NEUTRON_CONF_DIR)], 156 ssl_dir=NEUTRON_CONF_DIR)],
157 'services': [], 157 'services': [],
158 'packages': [[headers_package()] + determine_dkms_package(), 158 'packages': [determine_dkms_package(),
159 ['neutron-plugin-cisco']], 159 ['neutron-plugin-cisco']],
160 'server_packages': ['neutron-server', 160 'server_packages': ['neutron-server',
161 'neutron-plugin-cisco'], 161 'neutron-plugin-cisco'],
@@ -174,7 +174,7 @@ def neutron_plugins():
174 'neutron-dhcp-agent', 174 'neutron-dhcp-agent',
175 'nova-api-metadata', 175 'nova-api-metadata',
176 'etcd'], 176 'etcd'],
177 'packages': [[headers_package()] + determine_dkms_package(), 177 'packages': [determine_dkms_package(),
178 ['calico-compute', 178 ['calico-compute',
179 'bird', 179 'bird',
180 'neutron-dhcp-agent', 180 'neutron-dhcp-agent',
@@ -209,6 +209,20 @@ def neutron_plugins():
209 'server_packages': ['neutron-server', 209 'server_packages': ['neutron-server',
210 'neutron-plugin-plumgrid'], 210 'neutron-plugin-plumgrid'],
211 'server_services': ['neutron-server'] 211 'server_services': ['neutron-server']
212 },
213 'midonet': {
214 'config': '/etc/neutron/plugins/midonet/midonet.ini',
215 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
216 'contexts': [
217 context.SharedDBContext(user=config('neutron-database-user'),
218 database=config('neutron-database'),
219 relation_prefix='neutron',
220 ssl_dir=NEUTRON_CONF_DIR)],
221 'services': [],
222 'packages': [determine_dkms_package()],
223 'server_packages': ['neutron-server',
224 'python-neutron-plugin-midonet'],
225 'server_services': ['neutron-server']
212 } 226 }
213 } 227 }
214 if release >= 'icehouse': 228 if release >= 'icehouse':
@@ -219,6 +233,14 @@ def neutron_plugins():
219 'neutron-plugin-ml2'] 233 'neutron-plugin-ml2']
220 # NOTE: patch in vmware renames nvp->nsx for icehouse onwards 234 # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
221 plugins['nvp'] = plugins['nsx'] 235 plugins['nvp'] = plugins['nsx']
236 if release >= 'kilo':
237 plugins['midonet']['driver'] = (
238 'neutron.plugins.midonet.plugin.MidonetPluginV2')
239 if release >= 'liberty':
240 midonet_origin = config('midonet-origin')
241 if midonet_origin is not None and midonet_origin[4:5] == '1':
242 plugins['midonet']['driver'] = (
243 'midonet.neutron.plugin_v1.MidonetPluginV2')
222 return plugins 244 return plugins
223 245
224 246
@@ -269,17 +291,30 @@ def network_manager():
269 return 'neutron' 291 return 'neutron'
270 292
271 293
272def parse_mappings(mappings): 294def parse_mappings(mappings, key_rvalue=False):
295 """By default mappings are lvalue keyed.
296
297 If key_rvalue is True, the mapping will be reversed to allow multiple
298 configs for the same lvalue.
299 """
273 parsed = {} 300 parsed = {}
274 if mappings: 301 if mappings:
275 mappings = mappings.split() 302 mappings = mappings.split()
276 for m in mappings: 303 for m in mappings:
277 p = m.partition(':') 304 p = m.partition(':')
278 key = p[0].strip() 305
279 if p[1]: 306 if key_rvalue:
280 parsed[key] = p[2].strip() 307 key_index = 2
308 val_index = 0
309 # if there is no rvalue skip to next
310 if not p[1]:
311 continue
281 else: 312 else:
282 parsed[key] = '' 313 key_index = 0
314 val_index = 2
315
316 key = p[key_index].strip()
317 parsed[key] = p[val_index].strip()
283 318
284 return parsed 319 return parsed
285 320
@@ -297,25 +332,25 @@ def parse_bridge_mappings(mappings):
297def parse_data_port_mappings(mappings, default_bridge='br-data'): 332def parse_data_port_mappings(mappings, default_bridge='br-data'):
298 """Parse data port mappings. 333 """Parse data port mappings.
299 334
300 Mappings must be a space-delimited list of bridge:port mappings. 335 Mappings must be a space-delimited list of bridge:port.
301 336
302 Returns dict of the form {bridge:port}. 337 Returns dict of the form {port:bridge} where ports may be mac addresses or
338 interface names.
303 """ 339 """
304 _mappings = parse_mappings(mappings) 340
341 # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
342 # proposed for <port> since it may be a mac address which will differ
343 # across units this allowing first-known-good to be chosen.
344 _mappings = parse_mappings(mappings, key_rvalue=True)
305 if not _mappings or list(_mappings.values()) == ['']: 345 if not _mappings or list(_mappings.values()) == ['']:
306 if not mappings: 346 if not mappings:
307 return {} 347 return {}
308 348
309 # For backwards-compatibility we need to support port-only provided in 349 # For backwards-compatibility we need to support port-only provided in
310 # config. 350 # config.
311 _mappings = {default_bridge: mappings.split()[0]} 351 _mappings = {mappings.split()[0]: default_bridge}
312
313 bridges = _mappings.keys()
314 ports = _mappings.values()
315 if len(set(bridges)) != len(bridges):
316 raise Exception("It is not allowed to have more than one port "
317 "configured on the same bridge")
318 352
353 ports = _mappings.keys()
319 if len(set(ports)) != len(ports): 354 if len(set(ports)) != len(ports):
320 raise Exception("It is not allowed to have the same port configured " 355 raise Exception("It is not allowed to have the same port configured "
321 "on more than one bridge") 356 "on more than one bridge")
diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py
index 021d8cf..e5e3cb1 100644
--- a/hooks/charmhelpers/contrib/openstack/templating.py
+++ b/hooks/charmhelpers/contrib/openstack/templating.py
@@ -18,7 +18,7 @@ import os
18 18
19import six 19import six
20 20
21from charmhelpers.fetch import apt_install 21from charmhelpers.fetch import apt_install, apt_update
22from charmhelpers.core.hookenv import ( 22from charmhelpers.core.hookenv import (
23 log, 23 log,
24 ERROR, 24 ERROR,
@@ -29,6 +29,7 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
29try: 29try:
30 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions 30 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
31except ImportError: 31except ImportError:
32 apt_update(fatal=True)
32 apt_install('python-jinja2', fatal=True) 33 apt_install('python-jinja2', fatal=True)
33 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions 34 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
34 35
@@ -112,7 +113,7 @@ class OSConfigTemplate(object):
112 113
113 def complete_contexts(self): 114 def complete_contexts(self):
114 ''' 115 '''
115 Return a list of interfaces that have atisfied contexts. 116 Return a list of interfaces that have satisfied contexts.
116 ''' 117 '''
117 if self._complete_contexts: 118 if self._complete_contexts:
118 return self._complete_contexts 119 return self._complete_contexts
@@ -293,3 +294,30 @@ class OSConfigRenderer(object):
293 [interfaces.extend(i.complete_contexts()) 294 [interfaces.extend(i.complete_contexts())
294 for i in six.itervalues(self.templates)] 295 for i in six.itervalues(self.templates)]
295 return interfaces 296 return interfaces
297
298 def get_incomplete_context_data(self, interfaces):
299 '''
300 Return dictionary of relation status of interfaces and any missing
301 required context data. Example:
302 {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
303 'zeromq-configuration': {'related': False}}
304 '''
305 incomplete_context_data = {}
306
307 for i in six.itervalues(self.templates):
308 for context in i.contexts:
309 for interface in interfaces:
310 related = False
311 if interface in context.interfaces:
312 related = context.get_related()
313 missing_data = context.missing_data
314 if missing_data:
315 incomplete_context_data[interface] = {'missing_data': missing_data}
316 if related:
317 if incomplete_context_data.get(interface):
318 incomplete_context_data[interface].update({'related': True})
319 else:
320 incomplete_context_data[interface] = {'related': True}
321 else:
322 incomplete_context_data[interface] = {'related': False}
323 return incomplete_context_data
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 4dd000c..2ed7955 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -1,5 +1,3 @@
1#!/usr/bin/python
2
3# Copyright 2014-2015 Canonical Limited. 1# Copyright 2014-2015 Canonical Limited.
4# 2#
5# This file is part of charm-helpers. 3# This file is part of charm-helpers.
@@ -24,8 +22,11 @@ import subprocess
24import json 22import json
25import os 23import os
26import sys 24import sys
25import re
27 26
28import six 27import six
28import traceback
29import uuid
29import yaml 30import yaml
30 31
31from charmhelpers.contrib.network import ip 32from charmhelpers.contrib.network import ip
@@ -35,12 +36,17 @@ from charmhelpers.core import (
35) 36)
36 37
37from charmhelpers.core.hookenv import ( 38from charmhelpers.core.hookenv import (
39 action_fail,
40 action_set,
38 config, 41 config,
39 log as juju_log, 42 log as juju_log,
40 charm_dir, 43 charm_dir,
41 INFO, 44 INFO,
45 related_units,
42 relation_ids, 46 relation_ids,
43 relation_set 47 relation_set,
48 status_set,
49 hook_name
44) 50)
45 51
46from charmhelpers.contrib.storage.linux.lvm import ( 52from charmhelpers.contrib.storage.linux.lvm import (
@@ -50,7 +56,8 @@ from charmhelpers.contrib.storage.linux.lvm import (
50) 56)
51 57
52from charmhelpers.contrib.network.ip import ( 58from charmhelpers.contrib.network.ip import (
53 get_ipv6_addr 59 get_ipv6_addr,
60 is_ipv6,
54) 61)
55 62
56from charmhelpers.contrib.python.packages import ( 63from charmhelpers.contrib.python.packages import (
@@ -69,7 +76,6 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
69DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 76DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
70 'restricted main multiverse universe') 77 'restricted main multiverse universe')
71 78
72
73UBUNTU_OPENSTACK_RELEASE = OrderedDict([ 79UBUNTU_OPENSTACK_RELEASE = OrderedDict([
74 ('oneiric', 'diablo'), 80 ('oneiric', 'diablo'),
75 ('precise', 'essex'), 81 ('precise', 'essex'),
@@ -80,6 +86,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
80 ('utopic', 'juno'), 86 ('utopic', 'juno'),
81 ('vivid', 'kilo'), 87 ('vivid', 'kilo'),
82 ('wily', 'liberty'), 88 ('wily', 'liberty'),
89 ('xenial', 'mitaka'),
83]) 90])
84 91
85 92
@@ -93,31 +100,73 @@ OPENSTACK_CODENAMES = OrderedDict([
93 ('2014.2', 'juno'), 100 ('2014.2', 'juno'),
94 ('2015.1', 'kilo'), 101 ('2015.1', 'kilo'),
95 ('2015.2', 'liberty'), 102 ('2015.2', 'liberty'),
103 ('2016.1', 'mitaka'),
96]) 104])
97 105
98# The ugly duckling 106# The ugly duckling - must list releases oldest to newest
99SWIFT_CODENAMES = OrderedDict([ 107SWIFT_CODENAMES = OrderedDict([
100 ('1.4.3', 'diablo'), 108 ('diablo',
101 ('1.4.8', 'essex'), 109 ['1.4.3']),
102 ('1.7.4', 'folsom'), 110 ('essex',
103 ('1.8.0', 'grizzly'), 111 ['1.4.8']),
104 ('1.7.7', 'grizzly'), 112 ('folsom',
105 ('1.7.6', 'grizzly'), 113 ['1.7.4']),
106 ('1.10.0', 'havana'), 114 ('grizzly',
107 ('1.9.1', 'havana'), 115 ['1.7.6', '1.7.7', '1.8.0']),
108 ('1.9.0', 'havana'), 116 ('havana',
109 ('1.13.1', 'icehouse'), 117 ['1.9.0', '1.9.1', '1.10.0']),
110 ('1.13.0', 'icehouse'), 118 ('icehouse',
111 ('1.12.0', 'icehouse'), 119 ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
112 ('1.11.0', 'icehouse'), 120 ('juno',
113 ('2.0.0', 'juno'), 121 ['2.0.0', '2.1.0', '2.2.0']),
114 ('2.1.0', 'juno'), 122 ('kilo',
115 ('2.2.0', 'juno'), 123 ['2.2.1', '2.2.2']),
116 ('2.2.1', 'kilo'), 124 ('liberty',
117 ('2.2.2', 'kilo'), 125 ['2.3.0', '2.4.0', '2.5.0']),
118 ('2.3.0', 'liberty'), 126 ('mitaka',
127 ['2.5.0']),
119]) 128])
120 129
130# >= Liberty version->codename mapping
131PACKAGE_CODENAMES = {
132 'nova-common': OrderedDict([
133 ('12.0', 'liberty'),
134 ('13.0', 'mitaka'),
135 ]),
136 'neutron-common': OrderedDict([
137 ('7.0', 'liberty'),
138 ('8.0', 'mitaka'),
139 ]),
140 'cinder-common': OrderedDict([
141 ('7.0', 'liberty'),
142 ('8.0', 'mitaka'),
143 ]),
144 'keystone': OrderedDict([
145 ('8.0', 'liberty'),
146 ('9.0', 'mitaka'),
147 ]),
148 'horizon-common': OrderedDict([
149 ('8.0', 'liberty'),
150 ('9.0', 'mitaka'),
151 ]),
152 'ceilometer-common': OrderedDict([
153 ('5.0', 'liberty'),
154 ('6.0', 'mitaka'),
155 ]),
156 'heat-common': OrderedDict([
157 ('5.0', 'liberty'),
158 ('6.0', 'mitaka'),
159 ]),
160 'glance-common': OrderedDict([
161 ('11.0', 'liberty'),
162 ('12.0', 'mitaka'),
163 ]),
164 'openstack-dashboard': OrderedDict([
165 ('8.0', 'liberty'),
166 ('9.0', 'mitaka'),
167 ]),
168}
169
121DEFAULT_LOOPBACK_SIZE = '5G' 170DEFAULT_LOOPBACK_SIZE = '5G'
122 171
123 172
@@ -167,9 +216,9 @@ def get_os_codename_version(vers):
167 error_out(e) 216 error_out(e)
168 217
169 218
170def get_os_version_codename(codename): 219def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
171 '''Determine OpenStack version number from codename.''' 220 '''Determine OpenStack version number from codename.'''
172 for k, v in six.iteritems(OPENSTACK_CODENAMES): 221 for k, v in six.iteritems(version_map):
173 if v == codename: 222 if v == codename:
174 return k 223 return k
175 e = 'Could not derive OpenStack version for '\ 224 e = 'Could not derive OpenStack version for '\
@@ -177,6 +226,33 @@ def get_os_version_codename(codename):
177 error_out(e) 226 error_out(e)
178 227
179 228
229def get_os_version_codename_swift(codename):
230 '''Determine OpenStack version number of swift from codename.'''
231 for k, v in six.iteritems(SWIFT_CODENAMES):
232 if k == codename:
233 return v[-1]
234 e = 'Could not derive swift version for '\
235 'codename: %s' % codename
236 error_out(e)
237
238
239def get_swift_codename(version):
240 '''Determine OpenStack codename that corresponds to swift version.'''
241 codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
242 if len(codenames) > 1:
243 # If more than one release codename contains this version we determine
244 # the actual codename based on the highest available install source.
245 for codename in reversed(codenames):
246 releases = UBUNTU_OPENSTACK_RELEASE
247 release = [k for k, v in six.iteritems(releases) if codename in v]
248 ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
249 if codename in ret or release[0] in ret:
250 return codename
251 elif len(codenames) == 1:
252 return codenames[0]
253 return None
254
255
180def get_os_codename_package(package, fatal=True): 256def get_os_codename_package(package, fatal=True):
181 '''Derive OpenStack release codename from an installed package.''' 257 '''Derive OpenStack release codename from an installed package.'''
182 import apt_pkg as apt 258 import apt_pkg as apt
@@ -201,20 +277,33 @@ def get_os_codename_package(package, fatal=True):
201 error_out(e) 277 error_out(e)
202 278
203 vers = apt.upstream_version(pkg.current_ver.ver_str) 279 vers = apt.upstream_version(pkg.current_ver.ver_str)
280 if 'swift' in pkg.name:
281 # Fully x.y.z match for swift versions
282 match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
283 else:
284 # x.y match only for 20XX.X
285 # and ignore patch level for other packages
286 match = re.match('^(\d+)\.(\d+)', vers)
204 287
205 try: 288 if match:
206 if 'swift' in pkg.name: 289 vers = match.group(0)
207 swift_vers = vers[:5] 290
208 if swift_vers not in SWIFT_CODENAMES: 291 # >= Liberty independent project versions
209 # Deal with 1.10.0 upward 292 if (package in PACKAGE_CODENAMES and
210 swift_vers = vers[:6] 293 vers in PACKAGE_CODENAMES[package]):
211 return SWIFT_CODENAMES[swift_vers] 294 return PACKAGE_CODENAMES[package][vers]
212 else: 295 else:
213 vers = vers[:6] 296 # < Liberty co-ordinated project versions
214 return OPENSTACK_CODENAMES[vers] 297 try:
215 except KeyError: 298 if 'swift' in pkg.name:
216 e = 'Could not determine OpenStack codename for version %s' % vers 299 return get_swift_codename(vers)
217 error_out(e) 300 else:
301 return OPENSTACK_CODENAMES[vers]
302 except KeyError:
303 if not fatal:
304 return None
305 e = 'Could not determine OpenStack codename for version %s' % vers
306 error_out(e)
218 307
219 308
220def get_os_version_package(pkg, fatal=True): 309def get_os_version_package(pkg, fatal=True):
@@ -226,12 +315,14 @@ def get_os_version_package(pkg, fatal=True):
226 315
227 if 'swift' in pkg: 316 if 'swift' in pkg:
228 vers_map = SWIFT_CODENAMES 317 vers_map = SWIFT_CODENAMES
318 for cname, version in six.iteritems(vers_map):
319 if cname == codename:
320 return version[-1]
229 else: 321 else:
230 vers_map = OPENSTACK_CODENAMES 322 vers_map = OPENSTACK_CODENAMES
231 323 for version, cname in six.iteritems(vers_map):
232 for version, cname in six.iteritems(vers_map): 324 if cname == codename:
233 if cname == codename: 325 return version
234 return version
235 # e = "Could not determine OpenStack version for package: %s" % pkg 326 # e = "Could not determine OpenStack version for package: %s" % pkg
236 # error_out(e) 327 # error_out(e)
237 328
@@ -327,6 +418,9 @@ def configure_installation_source(rel):
327 'liberty': 'trusty-updates/liberty', 418 'liberty': 'trusty-updates/liberty',
328 'liberty/updates': 'trusty-updates/liberty', 419 'liberty/updates': 'trusty-updates/liberty',
329 'liberty/proposed': 'trusty-proposed/liberty', 420 'liberty/proposed': 'trusty-proposed/liberty',
421 'mitaka': 'trusty-updates/mitaka',
422 'mitaka/updates': 'trusty-updates/mitaka',
423 'mitaka/proposed': 'trusty-proposed/mitaka',
330 } 424 }
331 425
332 try: 426 try:
@@ -392,9 +486,18 @@ def openstack_upgrade_available(package):
392 import apt_pkg as apt 486 import apt_pkg as apt
393 src = config('openstack-origin') 487 src = config('openstack-origin')
394 cur_vers = get_os_version_package(package) 488 cur_vers = get_os_version_package(package)
395 available_vers = get_os_version_install_source(src) 489 if "swift" in package:
490 codename = get_os_codename_install_source(src)
491 avail_vers = get_os_version_codename_swift(codename)
492 else:
493 avail_vers = get_os_version_install_source(src)
396 apt.init() 494 apt.init()
397 return apt.version_compare(available_vers, cur_vers) == 1 495 if "swift" in package:
496 major_cur_vers = cur_vers.split('.', 1)[0]
497 major_avail_vers = avail_vers.split('.', 1)[0]
498 major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
499 return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
500 return apt.version_compare(avail_vers, cur_vers) == 1
398 501
399 502
400def ensure_block_device(block_device): 503def ensure_block_device(block_device):
@@ -469,6 +572,12 @@ def sync_db_with_multi_ipv6_addresses(database, database_user,
469 relation_prefix=None): 572 relation_prefix=None):
470 hosts = get_ipv6_addr(dynamic_only=False) 573 hosts = get_ipv6_addr(dynamic_only=False)
471 574
575 if config('vip'):
576 vips = config('vip').split()
577 for vip in vips:
578 if vip and is_ipv6(vip):
579 hosts.append(vip)
580
472 kwargs = {'database': database, 581 kwargs = {'database': database,
473 'username': database_user, 582 'username': database_user,
474 'hostname': json.dumps(hosts)} 583 'hostname': json.dumps(hosts)}
@@ -517,7 +626,7 @@ def _git_yaml_load(projects_yaml):
517 return yaml.load(projects_yaml) 626 return yaml.load(projects_yaml)
518 627
519 628
520def git_clone_and_install(projects_yaml, core_project, depth=1): 629def git_clone_and_install(projects_yaml, core_project):
521 """ 630 """
522 Clone/install all specified OpenStack repositories. 631 Clone/install all specified OpenStack repositories.
523 632
@@ -567,6 +676,9 @@ def git_clone_and_install(projects_yaml, core_project, depth=1):
567 for p in projects['repositories']: 676 for p in projects['repositories']:
568 repo = p['repository'] 677 repo = p['repository']
569 branch = p['branch'] 678 branch = p['branch']
679 depth = '1'
680 if 'depth' in p.keys():
681 depth = p['depth']
570 if p['name'] == 'requirements': 682 if p['name'] == 'requirements':
571 repo_dir = _git_clone_and_install_single(repo, branch, depth, 683 repo_dir = _git_clone_and_install_single(repo, branch, depth,
572 parent_dir, http_proxy, 684 parent_dir, http_proxy,
@@ -611,19 +723,13 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
611 """ 723 """
612 Clone and install a single git repository. 724 Clone and install a single git repository.
613 """ 725 """
614 dest_dir = os.path.join(parent_dir, os.path.basename(repo))
615
616 if not os.path.exists(parent_dir): 726 if not os.path.exists(parent_dir):
617 juju_log('Directory already exists at {}. ' 727 juju_log('Directory already exists at {}. '
618 'No need to create directory.'.format(parent_dir)) 728 'No need to create directory.'.format(parent_dir))
619 os.mkdir(parent_dir) 729 os.mkdir(parent_dir)
620 730
621 if not os.path.exists(dest_dir): 731 juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
622 juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) 732 repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth)
623 repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
624 depth=depth)
625 else:
626 repo_dir = dest_dir
627 733
628 venv = os.path.join(parent_dir, 'venv') 734 venv = os.path.join(parent_dir, 'venv')
629 735
@@ -704,3 +810,235 @@ def git_yaml_value(projects_yaml, key):
704 return projects[key] 810 return projects[key]
705 811
706 return None 812 return None
813
814
815def os_workload_status(configs, required_interfaces, charm_func=None):
816 """
817 Decorator to set workload status based on complete contexts
818 """
819 def wrap(f):
820 @wraps(f)
821 def wrapped_f(*args, **kwargs):
822 # Run the original function first
823 f(*args, **kwargs)
824 # Set workload status now that contexts have been
825 # acted on
826 set_os_workload_status(configs, required_interfaces, charm_func)
827 return wrapped_f
828 return wrap
829
830
831def set_os_workload_status(configs, required_interfaces, charm_func=None):
832 """
833 Set workload status based on complete contexts.
834 status-set missing or incomplete contexts
835 and juju-log details of missing required data.
836 charm_func is a charm specific function to run checking
837 for charm specific requirements such as a VIP setting.
838 """
839 incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
840 state = 'active'
841 missing_relations = []
842 incomplete_relations = []
843 message = None
844 charm_state = None
845 charm_message = None
846
847 for generic_interface in incomplete_rel_data.keys():
848 related_interface = None
849 missing_data = {}
850 # Related or not?
851 for interface in incomplete_rel_data[generic_interface]:
852 if incomplete_rel_data[generic_interface][interface].get('related'):
853 related_interface = interface
854 missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
855 # No relation ID for the generic_interface
856 if not related_interface:
857 juju_log("{} relation is missing and must be related for "
858 "functionality. ".format(generic_interface), 'WARN')
859 state = 'blocked'
860 if generic_interface not in missing_relations:
861 missing_relations.append(generic_interface)
862 else:
863 # Relation ID exists but no related unit
864 if not missing_data:
865 # Edge case relation ID exists but departing
866 if ('departed' in hook_name() or 'broken' in hook_name()) \
867 and related_interface in hook_name():
868 state = 'blocked'
869 if generic_interface not in missing_relations:
870 missing_relations.append(generic_interface)
871 juju_log("{} relation's interface, {}, "
872 "relationship is departed or broken "
873 "and is required for functionality."
874 "".format(generic_interface, related_interface), "WARN")
875 # Normal case relation ID exists but no related unit
876 # (joining)
877 else:
878 juju_log("{} relations's interface, {}, is related but has "
879 "no units in the relation."
880 "".format(generic_interface, related_interface), "INFO")
881 # Related unit exists and data missing on the relation
882 else:
883 juju_log("{} relation's interface, {}, is related awaiting "
884 "the following data from the relationship: {}. "
885 "".format(generic_interface, related_interface,
886 ", ".join(missing_data)), "INFO")
887 if state != 'blocked':
888 state = 'waiting'
889 if generic_interface not in incomplete_relations \
890 and generic_interface not in missing_relations:
891 incomplete_relations.append(generic_interface)
892
893 if missing_relations:
894 message = "Missing relations: {}".format(", ".join(missing_relations))
895 if incomplete_relations:
896 message += "; incomplete relations: {}" \
897 "".format(", ".join(incomplete_relations))
898 state = 'blocked'
899 elif incomplete_relations:
900 message = "Incomplete relations: {}" \
901 "".format(", ".join(incomplete_relations))
902 state = 'waiting'