summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJunaid Ali <junaidali@plumgrid.com>2016-05-19 11:35:31 +0500
committerJunaid Ali <junaidali@plumgrid.com>2016-05-19 11:35:31 +0500
commit728031666748cfd6ba2c73047fc49aff7f35e74b (patch)
tree8a7437d2b3e7d5aec2ae779d6e8c4b7cdc2d4f06
parent95868fa97bfd31f00c0815d8684a7c8546cf4435 (diff)
parent86037a00c80417be68860620d1beef3314a4f549 (diff)
Merge: Liberty/Mitaka changes
-rw-r--r--Makefile2
-rw-r--r--bin/charm_helpers_sync.py253
-rw-r--r--charm-helpers-sync.yaml7
-rw-r--r--hooks/charmhelpers/contrib/amulet/deployment.py6
-rw-r--r--hooks/charmhelpers/contrib/amulet/utils.py466
-rw-r--r--hooks/charmhelpers/contrib/ansible/__init__.py254
-rw-r--r--hooks/charmhelpers/contrib/benchmark/__init__.py126
-rw-r--r--hooks/charmhelpers/contrib/charmhelpers/__init__.py208
-rw-r--r--hooks/charmhelpers/contrib/charmsupport/__init__.py15
-rw-r--r--hooks/charmhelpers/contrib/charmsupport/nrpe.py360
-rw-r--r--hooks/charmhelpers/contrib/charmsupport/volumes.py175
-rw-r--r--hooks/charmhelpers/contrib/database/__init__.py0
-rw-r--r--hooks/charmhelpers/contrib/database/mysql.py412
-rw-r--r--hooks/charmhelpers/contrib/network/ip.py80
-rw-r--r--hooks/charmhelpers/contrib/network/ovs/__init__.py8
-rw-r--r--hooks/charmhelpers/contrib/network/ufw.py11
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/deployment.py149
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/utils.py434
-rw-r--r--hooks/charmhelpers/contrib/openstack/context.py393
-rw-r--r--hooks/charmhelpers/contrib/openstack/ip.py42
-rw-r--r--hooks/charmhelpers/contrib/openstack/neutron.py81
-rw-r--r--hooks/charmhelpers/contrib/openstack/templating.py32
-rw-r--r--hooks/charmhelpers/contrib/openstack/utils.py1007
-rw-r--r--hooks/charmhelpers/contrib/peerstorage/__init__.py268
-rw-r--r--hooks/charmhelpers/contrib/python/packages.py46
-rw-r--r--hooks/charmhelpers/contrib/saltstack/__init__.py118
-rw-r--r--hooks/charmhelpers/contrib/ssl/__init__.py94
-rw-r--r--hooks/charmhelpers/contrib/ssl/service.py279
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/ceph.py864
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/loopback.py10
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/utils.py15
-rw-r--r--hooks/charmhelpers/contrib/templating/__init__.py15
-rw-r--r--hooks/charmhelpers/contrib/templating/contexts.py139
-rw-r--r--hooks/charmhelpers/contrib/templating/jinja.py39
-rw-r--r--hooks/charmhelpers/contrib/templating/pyformat.py29
-rw-r--r--hooks/charmhelpers/contrib/unison/__init__.py313
-rw-r--r--hooks/charmhelpers/core/hookenv.py231
-rw-r--r--hooks/charmhelpers/core/host.py373
-rw-r--r--hooks/charmhelpers/core/hugepage.py71
-rw-r--r--hooks/charmhelpers/core/kernel.py68
-rw-r--r--hooks/charmhelpers/core/services/helpers.py35
-rw-r--r--hooks/charmhelpers/core/strutils.py30
-rw-r--r--hooks/charmhelpers/core/templating.py29
-rw-r--r--hooks/charmhelpers/core/unitdata.py78
-rw-r--r--hooks/charmhelpers/fetch/__init__.py20
-rw-r--r--hooks/charmhelpers/fetch/archiveurl.py2
-rw-r--r--hooks/charmhelpers/fetch/bzrurl.py54
-rw-r--r--hooks/charmhelpers/fetch/giturl.py41
-rw-r--r--hooks/pg_gw_utils.py5
-rw-r--r--unit_tests/test_pg_gw_hooks.py3
50 files changed, 4377 insertions, 3413 deletions
diff --git a/Makefile b/Makefile
index b4bfdbf..2a833ad 100644
--- a/Makefile
+++ b/Makefile
@@ -4,7 +4,7 @@ PYTHON := /usr/bin/env python
4virtualenv: 4virtualenv:
5 virtualenv .venv 5 virtualenv .venv
6 .venv/bin/pip install flake8 nose coverage mock pyyaml netifaces \ 6 .venv/bin/pip install flake8 nose coverage mock pyyaml netifaces \
7 netaddr jinja2 7 netaddr jinja2 pyflakes pep8 six pbr funcsigs psutil
8 8
9lint: virtualenv 9lint: virtualenv
10 .venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402 10 .venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402
diff --git a/bin/charm_helpers_sync.py b/bin/charm_helpers_sync.py
new file mode 100644
index 0000000..f67fdb9
--- /dev/null
+++ b/bin/charm_helpers_sync.py
@@ -0,0 +1,253 @@
1#!/usr/bin/python
2
3# Copyright 2014-2015 Canonical Limited.
4#
5# This file is part of charm-helpers.
6#
7# charm-helpers is free software: you can redistribute it and/or modify
8# it under the terms of the GNU Lesser General Public License version 3 as
9# published by the Free Software Foundation.
10#
11# charm-helpers is distributed in the hope that it will be useful,
12# but WITHOUT ANY WARRANTY; without even the implied warranty of
13# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14# GNU Lesser General Public License for more details.
15#
16# You should have received a copy of the GNU Lesser General Public License
17# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
18
19# Authors:
20# Adam Gandelman <adamg@ubuntu.com>
21
22import logging
23import optparse
24import os
25import subprocess
26import shutil
27import sys
28import tempfile
29import yaml
30from fnmatch import fnmatch
31
32import six
33
34CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
35
36
37def parse_config(conf_file):
38 if not os.path.isfile(conf_file):
39 logging.error('Invalid config file: %s.' % conf_file)
40 return False
41 return yaml.load(open(conf_file).read())
42
43
44def clone_helpers(work_dir, branch):
45 dest = os.path.join(work_dir, 'charm-helpers')
46 logging.info('Checking out %s to %s.' % (branch, dest))
47 cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
48 subprocess.check_call(cmd)
49 return dest
50
51
52def _module_path(module):
53 return os.path.join(*module.split('.'))
54
55
56def _src_path(src, module):
57 return os.path.join(src, 'charmhelpers', _module_path(module))
58
59
60def _dest_path(dest, module):
61 return os.path.join(dest, _module_path(module))
62
63
64def _is_pyfile(path):
65 return os.path.isfile(path + '.py')
66
67
68def ensure_init(path):
69 '''
70 ensure directories leading up to path are importable, omitting
71 parent directory, eg path='/hooks/helpers/foo'/:
72 hooks/
73 hooks/helpers/__init__.py
74 hooks/helpers/foo/__init__.py
75 '''
76 for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
77 _i = os.path.join(d, '__init__.py')
78 if not os.path.exists(_i):
79 logging.info('Adding missing __init__.py: %s' % _i)
80 open(_i, 'wb').close()
81
82
83def sync_pyfile(src, dest):
84 src = src + '.py'
85 src_dir = os.path.dirname(src)
86 logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
87 if not os.path.exists(dest):
88 os.makedirs(dest)
89 shutil.copy(src, dest)
90 if os.path.isfile(os.path.join(src_dir, '__init__.py')):
91 shutil.copy(os.path.join(src_dir, '__init__.py'),
92 dest)
93 ensure_init(dest)
94
95
96def get_filter(opts=None):
97 opts = opts or []
98 if 'inc=*' in opts:
99 # do not filter any files, include everything
100 return None
101
102 def _filter(dir, ls):
103 incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
104 _filter = []
105 for f in ls:
106 _f = os.path.join(dir, f)
107
108 if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
109 if True not in [fnmatch(_f, inc) for inc in incs]:
110 logging.debug('Not syncing %s, does not match include '
111 'filters (%s)' % (_f, incs))
112 _filter.append(f)
113 else:
114 logging.debug('Including file, which matches include '
115 'filters (%s): %s' % (incs, _f))
116 elif (os.path.isfile(_f) and not _f.endswith('.py')):
117 logging.debug('Not syncing file: %s' % f)
118 _filter.append(f)
119 elif (os.path.isdir(_f) and not
120 os.path.isfile(os.path.join(_f, '__init__.py'))):
121 logging.debug('Not syncing directory: %s' % f)
122 _filter.append(f)
123 return _filter
124 return _filter
125
126
127def sync_directory(src, dest, opts=None):
128 if os.path.exists(dest):
129 logging.debug('Removing existing directory: %s' % dest)
130 shutil.rmtree(dest)
131 logging.info('Syncing directory: %s -> %s.' % (src, dest))
132
133 shutil.copytree(src, dest, ignore=get_filter(opts))
134 ensure_init(dest)
135
136
137def sync(src, dest, module, opts=None):
138
139 # Sync charmhelpers/__init__.py for bootstrap code.
140 sync_pyfile(_src_path(src, '__init__'), dest)
141
142 # Sync other __init__.py files in the path leading to module.
143 m = []
144 steps = module.split('.')[:-1]
145 while steps:
146 m.append(steps.pop(0))
147 init = '.'.join(m + ['__init__'])
148 sync_pyfile(_src_path(src, init),
149 os.path.dirname(_dest_path(dest, init)))
150
151 # Sync the module, or maybe a .py file.
152 if os.path.isdir(_src_path(src, module)):
153 sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
154 elif _is_pyfile(_src_path(src, module)):
155 sync_pyfile(_src_path(src, module),
156 os.path.dirname(_dest_path(dest, module)))
157 else:
158 logging.warn('Could not sync: %s. Neither a pyfile or directory, '
159 'does it even exist?' % module)
160
161
162def parse_sync_options(options):
163 if not options:
164 return []
165 return options.split(',')
166
167
168def extract_options(inc, global_options=None):
169 global_options = global_options or []
170 if global_options and isinstance(global_options, six.string_types):
171 global_options = [global_options]
172 if '|' not in inc:
173 return (inc, global_options)
174 inc, opts = inc.split('|')
175 return (inc, parse_sync_options(opts) + global_options)
176
177
178def sync_helpers(include, src, dest, options=None):
179 if not os.path.isdir(dest):
180 os.makedirs(dest)
181
182 global_options = parse_sync_options(options)
183
184 for inc in include:
185 if isinstance(inc, str):
186 inc, opts = extract_options(inc, global_options)
187 sync(src, dest, inc, opts)
188 elif isinstance(inc, dict):
189 # could also do nested dicts here.
190 for k, v in six.iteritems(inc):
191 if isinstance(v, list):
192 for m in v:
193 inc, opts = extract_options(m, global_options)
194 sync(src, dest, '%s.%s' % (k, inc), opts)
195
196if __name__ == '__main__':
197 parser = optparse.OptionParser()
198 parser.add_option('-c', '--config', action='store', dest='config',
199 default=None, help='helper config file')
200 parser.add_option('-D', '--debug', action='store_true', dest='debug',
201 default=False, help='debug')
202 parser.add_option('-b', '--branch', action='store', dest='branch',
203 help='charm-helpers bzr branch (overrides config)')
204 parser.add_option('-d', '--destination', action='store', dest='dest_dir',
205 help='sync destination dir (overrides config)')
206 (opts, args) = parser.parse_args()
207
208 if opts.debug:
209 logging.basicConfig(level=logging.DEBUG)
210 else:
211 logging.basicConfig(level=logging.INFO)
212
213 if opts.config:
214 logging.info('Loading charm helper config from %s.' % opts.config)
215 config = parse_config(opts.config)
216 if not config:
217 logging.error('Could not parse config from %s.' % opts.config)
218 sys.exit(1)
219 else:
220 config = {}
221
222 if 'branch' not in config:
223 config['branch'] = CHARM_HELPERS_BRANCH
224 if opts.branch:
225 config['branch'] = opts.branch
226 if opts.dest_dir:
227 config['destination'] = opts.dest_dir
228
229 if 'destination' not in config:
230 logging.error('No destination dir. specified as option or config.')
231 sys.exit(1)
232
233 if 'include' not in config:
234 if not args:
235 logging.error('No modules to sync specified as option or config.')
236 sys.exit(1)
237 config['include'] = []
238 [config['include'].append(a) for a in args]
239
240 sync_options = None
241 if 'options' in config:
242 sync_options = config['options']
243 tmpd = tempfile.mkdtemp()
244 try:
245 checkout = clone_helpers(tmpd, config['branch'])
246 sync_helpers(config['include'], checkout, config['destination'],
247 options=sync_options)
248 except Exception as e:
249 logging.error("Could not sync: %s" % e)
250 raise e
251 finally:
252 logging.debug('Cleaning up %s' % tmpd)
253 shutil.rmtree(tmpd)
diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml
index 390163e..4d4cfa5 100644
--- a/charm-helpers-sync.yaml
+++ b/charm-helpers-sync.yaml
@@ -3,5 +3,10 @@ destination: hooks/charmhelpers
3include: 3include:
4 - core 4 - core
5 - fetch 5 - fetch
6 - contrib 6 - contrib.amulet
7 - contrib.hahelpers
8 - contrib.network
9 - contrib.openstack
10 - contrib.python
11 - contrib.storage
7 - payload 12 - payload
diff --git a/hooks/charmhelpers/contrib/amulet/deployment.py b/hooks/charmhelpers/contrib/amulet/deployment.py
index 367d6b4..d451698 100644
--- a/hooks/charmhelpers/contrib/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/amulet/deployment.py
@@ -51,7 +51,8 @@ class AmuletDeployment(object):
51 if 'units' not in this_service: 51 if 'units' not in this_service:
52 this_service['units'] = 1 52 this_service['units'] = 1
53 53
54 self.d.add(this_service['name'], units=this_service['units']) 54 self.d.add(this_service['name'], units=this_service['units'],
55 constraints=this_service.get('constraints'))
55 56
56 for svc in other_services: 57 for svc in other_services:
57 if 'location' in svc: 58 if 'location' in svc:
@@ -64,7 +65,8 @@ class AmuletDeployment(object):
64 if 'units' not in svc: 65 if 'units' not in svc:
65 svc['units'] = 1 66 svc['units'] = 1
66 67
67 self.d.add(svc['name'], charm=branch_location, units=svc['units']) 68 self.d.add(svc['name'], charm=branch_location, units=svc['units'],
69 constraints=svc.get('constraints'))
68 70
69 def _add_relations(self, relations): 71 def _add_relations(self, relations):
70 """Add all of the relations for the services.""" 72 """Add all of the relations for the services."""
diff --git a/hooks/charmhelpers/contrib/amulet/utils.py b/hooks/charmhelpers/contrib/amulet/utils.py
index 3de26af..7e5c25a 100644
--- a/hooks/charmhelpers/contrib/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/amulet/utils.py
@@ -14,17 +14,25 @@
14# You should have received a copy of the GNU Lesser General Public License 14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. 15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16 16
17import amulet
18import ConfigParser
19import distro_info
20import io 17import io
18import json
21import logging 19import logging
22import os 20import os
23import re 21import re
24import six 22import socket
23import subprocess
25import sys 24import sys
26import time 25import time
27import urlparse 26import uuid
27
28import amulet
29import distro_info
30import six
31from six.moves import configparser
32if six.PY3:
33 from urllib import parse as urlparse
34else:
35 import urlparse
28 36
29 37
30class AmuletUtils(object): 38class AmuletUtils(object):
@@ -108,7 +116,7 @@ class AmuletUtils(object):
108 # /!\ DEPRECATION WARNING (beisner): 116 # /!\ DEPRECATION WARNING (beisner):
109 # New and existing tests should be rewritten to use 117 # New and existing tests should be rewritten to use
110 # validate_services_by_name() as it is aware of init systems. 118 # validate_services_by_name() as it is aware of init systems.
111 self.log.warn('/!\\ DEPRECATION WARNING: use ' 119 self.log.warn('DEPRECATION WARNING: use '
112 'validate_services_by_name instead of validate_services ' 120 'validate_services_by_name instead of validate_services '
113 'due to init system differences.') 121 'due to init system differences.')
114 122
@@ -142,19 +150,23 @@ class AmuletUtils(object):
142 150
143 for service_name in services_list: 151 for service_name in services_list:
144 if (self.ubuntu_releases.index(release) >= systemd_switch or 152 if (self.ubuntu_releases.index(release) >= systemd_switch or
145 service_name == "rabbitmq-server"): 153 service_name in ['rabbitmq-server', 'apache2']):
146 # init is systemd 154 # init is systemd (or regular sysv)
147 cmd = 'sudo service {} status'.format(service_name) 155 cmd = 'sudo service {} status'.format(service_name)
156 output, code = sentry_unit.run(cmd)
157 service_running = code == 0
148 elif self.ubuntu_releases.index(release) < systemd_switch: 158 elif self.ubuntu_releases.index(release) < systemd_switch:
149 # init is upstart 159 # init is upstart
150 cmd = 'sudo status {}'.format(service_name) 160 cmd = 'sudo status {}'.format(service_name)
161 output, code = sentry_unit.run(cmd)
162 service_running = code == 0 and "start/running" in output
151 163
152 output, code = sentry_unit.run(cmd)
153 self.log.debug('{} `{}` returned ' 164 self.log.debug('{} `{}` returned '
154 '{}'.format(sentry_unit.info['unit_name'], 165 '{}'.format(sentry_unit.info['unit_name'],
155 cmd, code)) 166 cmd, code))
156 if code != 0: 167 if not service_running:
157 return "command `{}` returned {}".format(cmd, str(code)) 168 return u"command `{}` returned {} {}".format(
169 cmd, output, str(code))
158 return None 170 return None
159 171
160 def _get_config(self, unit, filename): 172 def _get_config(self, unit, filename):
@@ -164,7 +176,7 @@ class AmuletUtils(object):
164 # NOTE(beisner): by default, ConfigParser does not handle options 176 # NOTE(beisner): by default, ConfigParser does not handle options
165 # with no value, such as the flags used in the mysql my.cnf file. 177 # with no value, such as the flags used in the mysql my.cnf file.
166 # https://bugs.python.org/issue7005 178 # https://bugs.python.org/issue7005
167 config = ConfigParser.ConfigParser(allow_no_value=True) 179 config = configparser.ConfigParser(allow_no_value=True)
168 config.readfp(io.StringIO(file_contents)) 180 config.readfp(io.StringIO(file_contents))
169 return config 181 return config
170 182
@@ -259,33 +271,52 @@ class AmuletUtils(object):
259 """Get last modification time of directory.""" 271 """Get last modification time of directory."""
260 return sentry_unit.directory_stat(directory)['mtime'] 272 return sentry_unit.directory_stat(directory)['mtime']
261 273
262 def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): 274 def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
263 """Get process' start time. 275 """Get start time of a process based on the last modification time
276 of the /proc/pid directory.
264 277
265 Determine start time of the process based on the last modification 278 :sentry_unit: The sentry unit to check for the service on
266 time of the /proc/pid directory. If pgrep_full is True, the process 279 :service: service name to look for in process table
267 name is matched against the full command line. 280 :pgrep_full: [Deprecated] Use full command line search mode with pgrep
268 """ 281 :returns: epoch time of service process start
269 if pgrep_full: 282 :param commands: list of bash commands
270 cmd = 'pgrep -o -f {}'.format(service) 283 :param sentry_units: list of sentry unit pointers
271 else: 284 :returns: None if successful; Failure message otherwise
272 cmd = 'pgrep -o {}'.format(service) 285 """
273 cmd = cmd + ' | grep -v pgrep || exit 0' 286 if pgrep_full is not None:
274 cmd_out = sentry_unit.run(cmd) 287 # /!\ DEPRECATION WARNING (beisner):
275 self.log.debug('CMDout: ' + str(cmd_out)) 288 # No longer implemented, as pidof is now used instead of pgrep.
276 if cmd_out[0]: 289 # https://bugs.launchpad.net/charm-helpers/+bug/1474030
277 self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) 290 self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
278 proc_dir = '/proc/{}'.format(cmd_out[0].strip()) 291 'longer implemented re: lp 1474030.')
279 return self._get_dir_mtime(sentry_unit, proc_dir) 292
293 pid_list = self.get_process_id_list(sentry_unit, service)
294 pid = pid_list[0]
295 proc_dir = '/proc/{}'.format(pid)
296 self.log.debug('Pid for {} on {}: {}'.format(
297 service, sentry_unit.info['unit_name'], pid))
298
299 return self._get_dir_mtime(sentry_unit, proc_dir)
280 300
281 def service_restarted(self, sentry_unit, service, filename, 301 def service_restarted(self, sentry_unit, service, filename,
282 pgrep_full=False, sleep_time=20): 302 pgrep_full=None, sleep_time=20):
283 """Check if service was restarted. 303 """Check if service was restarted.
284 304
285 Compare a service's start time vs a file's last modification time 305 Compare a service's start time vs a file's last modification time
286 (such as a config file for that service) to determine if the service 306 (such as a config file for that service) to determine if the service
287 has been restarted. 307 has been restarted.
288 """ 308 """
309 # /!\ DEPRECATION WARNING (beisner):
310 # This method is prone to races in that no before-time is known.
311 # Use validate_service_config_changed instead.
312
313 # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
314 # used instead of pgrep. pgrep_full is still passed through to ensure
315 # deprecation WARNS. lp1474030
316 self.log.warn('DEPRECATION WARNING: use '
317 'validate_service_config_changed instead of '
318 'service_restarted due to known races.')
319
289 time.sleep(sleep_time) 320 time.sleep(sleep_time)
290 if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= 321 if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
291 self._get_file_mtime(sentry_unit, filename)): 322 self._get_file_mtime(sentry_unit, filename)):
@@ -294,78 +325,122 @@ class AmuletUtils(object):
294 return False 325 return False
295 326
296 def service_restarted_since(self, sentry_unit, mtime, service, 327 def service_restarted_since(self, sentry_unit, mtime, service,
297 pgrep_full=False, sleep_time=20, 328 pgrep_full=None, sleep_time=20,
298 retry_count=2): 329 retry_count=30, retry_sleep_time=10):
299 """Check if service was been started after a given time. 330 """Check if service was been started after a given time.
300 331
301 Args: 332 Args:
302 sentry_unit (sentry): The sentry unit to check for the service on 333 sentry_unit (sentry): The sentry unit to check for the service on
303 mtime (float): The epoch time to check against 334 mtime (float): The epoch time to check against
304 service (string): service name to look for in process table 335 service (string): service name to look for in process table
305 pgrep_full (boolean): Use full command line search mode with pgrep 336 pgrep_full: [Deprecated] Use full command line search mode with pgrep
306 sleep_time (int): Seconds to sleep before looking for process 337 sleep_time (int): Initial sleep time (s) before looking for file
307 retry_count (int): If service is not found, how many times to retry 338 retry_sleep_time (int): Time (s) to sleep between retries
339 retry_count (int): If file is not found, how many times to retry
308 340
309 Returns: 341 Returns:
310 bool: True if service found and its start time it newer than mtime, 342 bool: True if service found and its start time it newer than mtime,
311 False if service is older than mtime or if service was 343 False if service is older than mtime or if service was
312 not found. 344 not found.
313 """ 345 """
314 self.log.debug('Checking %s restarted since %s' % (service, mtime)) 346 # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
347 # used instead of pgrep. pgrep_full is still passed through to ensure
348 # deprecation WARNS. lp1474030
349
350 unit_name = sentry_unit.info['unit_name']
351 self.log.debug('Checking that %s service restarted since %s on '
352 '%s' % (service, mtime, unit_name))
315 time.sleep(sleep_time) 353 time.sleep(sleep_time)
316 proc_start_time = self._get_proc_start_time(sentry_unit, service, 354 proc_start_time = None
317 pgrep_full) 355 tries = 0
318 while retry_count > 0 and not proc_start_time: 356 while tries <= retry_count and not proc_start_time:
319 self.log.debug('No pid file found for service %s, will retry %i ' 357 try:
320 'more times' % (service, retry_count)) 358 proc_start_time = self._get_proc_start_time(sentry_unit,
321 time.sleep(30) 359 service,
322 proc_start_time = self._get_proc_start_time(sentry_unit, service, 360 pgrep_full)
323 pgrep_full) 361 self.log.debug('Attempt {} to get {} proc start time on {} '
324 retry_count = retry_count - 1 362 'OK'.format(tries, service, unit_name))
363 except IOError as e:
364 # NOTE(beisner) - race avoidance, proc may not exist yet.
365 # https://bugs.launchpad.net/charm-helpers/+bug/1474030
366 self.log.debug('Attempt {} to get {} proc start time on {} '
367 'failed\n{}'.format(tries, service,
368 unit_name, e))
369 time.sleep(retry_sleep_time)
370 tries += 1
325 371
326 if not proc_start_time: 372 if not proc_start_time:
327 self.log.warn('No proc start time found, assuming service did ' 373 self.log.warn('No proc start time found, assuming service did '
328 'not start') 374 'not start')
329 return False 375 return False
330 if proc_start_time >= mtime: 376 if proc_start_time >= mtime:
331 self.log.debug('proc start time is newer than provided mtime' 377 self.log.debug('Proc start time is newer than provided mtime'
332 '(%s >= %s)' % (proc_start_time, mtime)) 378 '(%s >= %s) on %s (OK)' % (proc_start_time,
379 mtime, unit_name))
333 return True 380 return True
334 else: 381 else:
335 self.log.warn('proc start time (%s) is older than provided mtime ' 382 self.log.warn('Proc start time (%s) is older than provided mtime '
336 '(%s), service did not restart' % (proc_start_time, 383 '(%s) on %s, service did not '
337 mtime)) 384 'restart' % (proc_start_time, mtime, unit_name))
338 return False 385 return False
339 386
340 def config_updated_since(self, sentry_unit, filename, mtime, 387 def config_updated_since(self, sentry_unit, filename, mtime,
341 sleep_time=20): 388 sleep_time=20, retry_count=30,
389 retry_sleep_time=10):
342 """Check if file was modified after a given time. 390 """Check if file was modified after a given time.
343 391
344 Args: 392 Args:
345 sentry_unit (sentry): The sentry unit to check the file mtime on 393 sentry_unit (sentry): The sentry unit to check the file mtime on
346 filename (string): The file to check mtime of 394 filename (string): The file to check mtime of
347 mtime (float): The epoch time to check against 395 mtime (float): The epoch time to check against
348 sleep_time (int): Seconds to sleep before looking for process 396 sleep_time (int): Initial sleep time (s) before looking for file
397 retry_sleep_time (int): Time (s) to sleep between retries
398 retry_count (int): If file is not found, how many times to retry
349 399
350 Returns: 400 Returns:
351 bool: True if file was modified more recently than mtime, False if 401 bool: True if file was modified more recently than mtime, False if
352 file was modified before mtime, 402 file was modified before mtime, or if file not found.
353 """ 403 """
354 self.log.debug('Checking %s updated since %s' % (filename, mtime)) 404 unit_name = sentry_unit.info['unit_name']
405 self.log.debug('Checking that %s updated since %s on '
406 '%s' % (filename, mtime, unit_name))
355 time.sleep(sleep_time) 407 time.sleep(sleep_time)
356 file_mtime = self._get_file_mtime(sentry_unit, filename) 408 file_mtime = None
409 tries = 0
410 while tries <= retry_count and not file_mtime:
411 try:
412 file_mtime = self._get_file_mtime(sentry_unit, filename)
413 self.log.debug('Attempt {} to get {} file mtime on {} '
414 'OK'.format(tries, filename, unit_name))
415 except IOError as e:
416 # NOTE(beisner) - race avoidance, file may not exist yet.
417 # https://bugs.launchpad.net/charm-helpers/+bug/1474030
418 self.log.debug('Attempt {} to get {} file mtime on {} '
419 'failed\n{}'.format(tries, filename,
420 unit_name, e))
421 time.sleep(retry_sleep_time)
422 tries += 1
423
424 if not file_mtime:
425 self.log.warn('Could not determine file mtime, assuming '
426 'file does not exist')
427 return False
428
357 if file_mtime >= mtime: 429 if file_mtime >= mtime:
358 self.log.debug('File mtime is newer than provided mtime ' 430 self.log.debug('File mtime is newer than provided mtime '
359 '(%s >= %s)' % (file_mtime, mtime)) 431 '(%s >= %s) on %s (OK)' % (file_mtime,
432 mtime, unit_name))
360 return True 433 return True
361 else: 434 else:
362 self.log.warn('File mtime %s is older than provided mtime %s' 435 self.log.warn('File mtime is older than provided mtime'
363 % (file_mtime, mtime)) 436 '(%s < on %s) on %s' % (file_mtime,
437 mtime, unit_name))
364 return False 438 return False
365 439
366 def validate_service_config_changed(self, sentry_unit, mtime, service, 440 def validate_service_config_changed(self, sentry_unit, mtime, service,
367 filename, pgrep_full=False, 441 filename, pgrep_full=None,
368 sleep_time=20, retry_count=2): 442 sleep_time=20, retry_count=30,
443 retry_sleep_time=10):
369 """Check service and file were updated after mtime 444 """Check service and file were updated after mtime
370 445
371 Args: 446 Args:
@@ -373,9 +448,10 @@ class AmuletUtils(object):
373 mtime (float): The epoch time to check against 448 mtime (float): The epoch time to check against
374 service (string): service name to look for in process table 449 service (string): service name to look for in process table
375 filename (string): The file to check mtime of 450 filename (string): The file to check mtime of
376 pgrep_full (boolean): Use full command line search mode with pgrep 451 pgrep_full: [Deprecated] Use full command line search mode with pgrep
377 sleep_time (int): Seconds to sleep before looking for process 452 sleep_time (int): Initial sleep in seconds to pass to test helpers
378 retry_count (int): If service is not found, how many times to retry 453 retry_count (int): If service is not found, how many times to retry
454 retry_sleep_time (int): Time in seconds to wait between retries
379 455
380 Typical Usage: 456 Typical Usage:
381 u = OpenStackAmuletUtils(ERROR) 457 u = OpenStackAmuletUtils(ERROR)
@@ -392,15 +468,27 @@ class AmuletUtils(object):
392 mtime, False if service is older than mtime or if service was 468 mtime, False if service is older than mtime or if service was
393 not found or if filename was modified before mtime. 469 not found or if filename was modified before mtime.
394 """ 470 """
395 self.log.debug('Checking %s restarted since %s' % (service, mtime)) 471
396 time.sleep(sleep_time) 472 # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
397 service_restart = self.service_restarted_since(sentry_unit, mtime, 473 # used instead of pgrep. pgrep_full is still passed through to ensure
398 service, 474 # deprecation WARNS. lp1474030
399 pgrep_full=pgrep_full, 475
400 sleep_time=0, 476 service_restart = self.service_restarted_since(
401 retry_count=retry_count) 477 sentry_unit, mtime,
402 config_update = self.config_updated_since(sentry_unit, filename, mtime, 478 service,
403 sleep_time=0) 479 pgrep_full=pgrep_full,
480 sleep_time=sleep_time,
481 retry_count=retry_count,
482 retry_sleep_time=retry_sleep_time)
483
484 config_update = self.config_updated_since(
485 sentry_unit,
486 filename,
487 mtime,
488 sleep_time=sleep_time,
489 retry_count=retry_count,
490 retry_sleep_time=retry_sleep_time)
491
404 return service_restart and config_update 492 return service_restart and config_update
405 493
406 def get_sentry_time(self, sentry_unit): 494 def get_sentry_time(self, sentry_unit):
@@ -418,7 +506,6 @@ class AmuletUtils(object):
418 """Return a list of all Ubuntu releases in order of release.""" 506 """Return a list of all Ubuntu releases in order of release."""
419 _d = distro_info.UbuntuDistroInfo() 507 _d = distro_info.UbuntuDistroInfo()
420 _release_list = _d.all 508 _release_list = _d.all
421 self.log.debug('Ubuntu release list: {}'.format(_release_list))
422 return _release_list 509 return _release_list
423 510
424 def file_to_url(self, file_rel_path): 511 def file_to_url(self, file_rel_path):
@@ -450,15 +537,20 @@ class AmuletUtils(object):
450 cmd, code, output)) 537 cmd, code, output))
451 return None 538 return None
452 539
453 def get_process_id_list(self, sentry_unit, process_name): 540 def get_process_id_list(self, sentry_unit, process_name,
541 expect_success=True):
454 """Get a list of process ID(s) from a single sentry juju unit 542 """Get a list of process ID(s) from a single sentry juju unit
455 for a single process name. 543 for a single process name.
456 544
457 :param sentry_unit: Pointer to amulet sentry instance (juju unit) 545 :param sentry_unit: Amulet sentry instance (juju unit)
458 :param process_name: Process name 546 :param process_name: Process name
547 :param expect_success: If False, expect the PID to be missing,
548 raise if it is present.
459 :returns: List of process IDs 549 :returns: List of process IDs
460 """ 550 """
461 cmd = 'pidof {}'.format(process_name) 551 cmd = 'pidof -x {}'.format(process_name)
552 if not expect_success:
553 cmd += " || exit 0 && exit 1"
462 output, code = sentry_unit.run(cmd) 554 output, code = sentry_unit.run(cmd)
463 if code != 0: 555 if code != 0:
464 msg = ('{} `{}` returned {} ' 556 msg = ('{} `{}` returned {} '
@@ -467,14 +559,23 @@ class AmuletUtils(object):
467 amulet.raise_status(amulet.FAIL, msg=msg) 559 amulet.raise_status(amulet.FAIL, msg=msg)
468 return str(output).split() 560 return str(output).split()
469 561
470 def get_unit_process_ids(self, unit_processes): 562 def get_unit_process_ids(self, unit_processes, expect_success=True):
471 """Construct a dict containing unit sentries, process names, and 563 """Construct a dict containing unit sentries, process names, and
472 process IDs.""" 564 process IDs.
565
566 :param unit_processes: A dictionary of Amulet sentry instance
567 to list of process names.
568 :param expect_success: if False expect the processes to not be
569 running, raise if they are.
570 :returns: Dictionary of Amulet sentry instance to dictionary
571 of process names to PIDs.
572 """
473 pid_dict = {} 573 pid_dict = {}
474 for sentry_unit, process_list in unit_processes.iteritems(): 574 for sentry_unit, process_list in six.iteritems(unit_processes):
475 pid_dict[sentry_unit] = {} 575 pid_dict[sentry_unit] = {}
476 for process in process_list: 576 for process in process_list:
477 pids = self.get_process_id_list(sentry_unit, process) 577 pids = self.get_process_id_list(
578 sentry_unit, process, expect_success=expect_success)
478 pid_dict[sentry_unit].update({process: pids}) 579 pid_dict[sentry_unit].update({process: pids})
479 return pid_dict 580 return pid_dict
480 581
@@ -488,7 +589,7 @@ class AmuletUtils(object):
488 return ('Unit count mismatch. expected, actual: {}, ' 589 return ('Unit count mismatch. expected, actual: {}, '
489 '{} '.format(len(expected), len(actual))) 590 '{} '.format(len(expected), len(actual)))
490 591
491 for (e_sentry, e_proc_names) in expected.iteritems(): 592 for (e_sentry, e_proc_names) in six.iteritems(expected):
492 e_sentry_name = e_sentry.info['unit_name'] 593 e_sentry_name = e_sentry.info['unit_name']
493 if e_sentry in actual.keys(): 594 if e_sentry in actual.keys():
494 a_proc_names = actual[e_sentry] 595 a_proc_names = actual[e_sentry]
@@ -500,22 +601,40 @@ class AmuletUtils(object):
500 return ('Process name count mismatch. expected, actual: {}, ' 601 return ('Process name count mismatch. expected, actual: {}, '
501 '{}'.format(len(expected), len(actual))) 602 '{}'.format(len(expected), len(actual)))
502 603
503 for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ 604 for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
504 zip(e_proc_names.items(), a_proc_names.items()): 605 zip(e_proc_names.items(), a_proc_names.items()):
505 if e_proc_name != a_proc_name: 606 if e_proc_name != a_proc_name:
506 return ('Process name mismatch. expected, actual: {}, ' 607 return ('Process name mismatch. expected, actual: {}, '
507 '{}'.format(e_proc_name, a_proc_name)) 608 '{}'.format(e_proc_name, a_proc_name))
508 609
509 a_pids_length = len(a_pids) 610 a_pids_length = len(a_pids)
510 if e_pids_length != a_pids_length: 611 fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
511 return ('PID count mismatch. {} ({}) expected, actual: '
512 '{}, {} ({})'.format(e_sentry_name, e_proc_name, 612 '{}, {} ({})'.format(e_sentry_name, e_proc_name,
513 e_pids_length, a_pids_length, 613 e_pids, a_pids_length,
514 a_pids)) 614 a_pids))
615
616 # If expected is a list, ensure at least one PID quantity match
617 if isinstance(e_pids, list) and \
618 a_pids_length not in e_pids:
619 return fail_msg
620 # If expected is not bool and not list,
621 # ensure PID quantities match
622 elif not isinstance(e_pids, bool) and \
623 not isinstance(e_pids, list) and \
624 a_pids_length != e_pids:
625 return fail_msg
626 # If expected is bool True, ensure 1 or more PIDs exist
627 elif isinstance(e_pids, bool) and \
628 e_pids is True and a_pids_length < 1:
629 return fail_msg
630 # If expected is bool False, ensure 0 PIDs exist
631 elif isinstance(e_pids, bool) and \
632 e_pids is False and a_pids_length != 0:
633 return fail_msg
515 else: 634 else:
516 self.log.debug('PID check OK: {} {} {}: ' 635 self.log.debug('PID check OK: {} {} {}: '
517 '{}'.format(e_sentry_name, e_proc_name, 636 '{}'.format(e_sentry_name, e_proc_name,
518 e_pids_length, a_pids)) 637 e_pids, a_pids))
519 return None 638 return None
520 639
521 def validate_list_of_identical_dicts(self, list_of_dicts): 640 def validate_list_of_identical_dicts(self, list_of_dicts):
@@ -531,3 +650,180 @@ class AmuletUtils(object):
531 return 'Dicts within list are not identical' 650 return 'Dicts within list are not identical'
532 651
533 return None 652 return None
653
654 def validate_sectionless_conf(self, file_contents, expected):
655 """A crude conf parser. Useful to inspect configuration files which
656 do not have section headers (as would be necessary in order to use
657 the configparser). Such as openstack-dashboard or rabbitmq confs."""
658 for line in file_contents.split('\n'):
659 if '=' in line:
660 args = line.split('=')
661 if len(args) <= 1:
662 continue
663 key = args[0].strip()
664 value = args[1].strip()
665 if key in expected.keys():
666 if expected[key] != value:
667 msg = ('Config mismatch. Expected, actual: {}, '
668 '{}'.format(expected[key], value))
669 amulet.raise_status(amulet.FAIL, msg=msg)
670
671 def get_unit_hostnames(self, units):
672 """Return a dict of juju unit names to hostnames."""
673 host_names = {}
674 for unit in units:
675 host_names[unit.info['unit_name']] = \
676 str(unit.file_contents('/etc/hostname').strip())
677 self.log.debug('Unit host names: {}'.format(host_names))
678 return host_names
679
680 def run_cmd_unit(self, sentry_unit, cmd):
681 """Run a command on a unit, return the output and exit code."""
682 output, code = sentry_unit.run(cmd)
683 if code == 0:
684 self.log.debug('{} `{}` command returned {} '
685 '(OK)'.format(sentry_unit.info['unit_name'],
686 cmd, code))
687 else:
688 msg = ('{} `{}` command returned {} '
689 '{}'.format(sentry_unit.info['unit_name'],
690 cmd, code, output))
691 amulet.raise_status(amulet.FAIL, msg=msg)
692 return str(output), code
693
694 def file_exists_on_unit(self, sentry_unit, file_name):
695 """Check if a file exists on a unit."""
696 try:
697 sentry_unit.file_stat(file_name)
698 return True
699 except IOError:
700 return False
701 except Exception as e:
702 msg = 'Error checking file {}: {}'.format(file_name, e)
703 amulet.raise_status(amulet.FAIL, msg=msg)
704
705 def file_contents_safe(self, sentry_unit, file_name,
706 max_wait=60, fatal=False):
707 """Get file contents from a sentry unit. Wrap amulet file_contents
708 with retry logic to address races where a file checks as existing,
709 but no longer exists by the time file_contents is called.
710 Return None if file not found. Optionally raise if fatal is True."""
711 unit_name = sentry_unit.info['unit_name']
712 file_contents = False
713 tries = 0
714 while not file_contents and tries < (max_wait / 4):
715 try:
716 file_contents = sentry_unit.file_contents(file_name)
717 except IOError:
718 self.log.debug('Attempt {} to open file {} from {} '
719 'failed'.format(tries, file_name,
720 unit_name))
721 time.sleep(4)
722 tries += 1
723
724 if file_contents:
725 return file_contents
726 elif not fatal:
727 return None
728 elif fatal:
729 msg = 'Failed to get file contents from unit.'
730 amulet.raise_status(amulet.FAIL, msg)
731
732 def port_knock_tcp(self, host="localhost", port=22, timeout=15):
733 """Open a TCP socket to check for a listening sevice on a host.
734
735 :param host: host name or IP address, default to localhost
736 :param port: TCP port number, default to 22
737 :param timeout: Connect timeout, default to 15 seconds
738 :returns: True if successful, False if connect failed
739 """
740
741 # Resolve host name if possible
742 try:
743 connect_host = socket.gethostbyname(host)
744 host_human = "{} ({})".format(connect_host, host)
745 except socket.error as e:
746 self.log.warn('Unable to resolve address: '
747 '{} ({}) Trying anyway!'.format(host, e))
748 connect_host = host
749 host_human = connect_host
750
751 # Attempt socket connection
752 try:
753 knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
754 knock.settimeout(timeout)
755 knock.connect((connect_host, port))
756 knock.close()
757 self.log.debug('Socket connect OK for host '
758 '{} on port {}.'.format(host_human, port))
759 return True
760 except socket.error as e:
761 self.log.debug('Socket connect FAIL for'
762 ' {} port {} ({})'.format(host_human, port, e))
763 return False
764
765 def port_knock_units(self, sentry_units, port=22,
766 timeout=15, expect_success=True):
767 """Open a TCP socket to check for a listening sevice on each
768 listed juju unit.
769
770 :param sentry_units: list of sentry unit pointers
771 :param port: TCP port number, default to 22
772 :param timeout: Connect timeout, default to 15 seconds
773 :expect_success: True by default, set False to invert logic
774 :returns: None if successful, Failure message otherwise
775 """
776 for unit in sentry_units:
777 host = unit.info['public-address']
778 connected = self.port_knock_tcp(host, port, timeout)
779 if not connected and expect_success:
780 return 'Socket connect failed.'
781 elif connected and not expect_success:
782 return 'Socket connected unexpectedly.'
783
784 def get_uuid_epoch_stamp(self):
785 """Returns a stamp string based on uuid4 and epoch time. Useful in
786 generating test messages which need to be unique-ish."""
787 return '[{}-{}]'.format(uuid.uuid4(), time.time())
788
789# amulet juju action helpers:
790 def run_action(self, unit_sentry, action,
791 _check_output=subprocess.check_output,
792 params=None):
793 """Run the named action on a given unit sentry.
794
795 params a dict of parameters to use
796 _check_output parameter is used for dependency injection.
797
798 @return action_id.
799 """
800 unit_id = unit_sentry.info["unit_name"]
801 command = ["juju", "action", "do", "--format=json", unit_id, action]
802 if params is not None:
803 for key, value in params.iteritems():
804 command.append("{}={}".format(key, value))
805 self.log.info("Running command: %s\n" % " ".join(command))
806 output = _check_output(command, universal_newlines=True)
807 data = json.loads(output)
808 action_id = data[u'Action queued with id']
809 return action_id
810
811 def wait_on_action(self, action_id, _check_output=subprocess.check_output):
812 """Wait for a given action, returning if it completed or not.
813
814 _check_output parameter is used for dependency injection.
815 """
816 command = ["juju", "action", "fetch", "--format=json", "--wait=0",
817 action_id]
818 output = _check_output(command, universal_newlines=True)
819 data = json.loads(output)
820 return data.get(u"status") == "completed"
821
822 def status_get(self, unit):
823 """Return the current service status of this unit."""
824 raw_status, return_code = unit.run(
825 "status-get --format=json --include-data")
826 if return_code != 0:
827 return ("unknown", "")
828 status = json.loads(raw_status)
829 return (status["status"], status["message"])
diff --git a/hooks/charmhelpers/contrib/ansible/__init__.py b/hooks/charmhelpers/contrib/ansible/__init__.py
deleted file mode 100644
index 944f406..0000000
--- a/hooks/charmhelpers/contrib/ansible/__init__.py
+++ /dev/null
@@ -1,254 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# Copyright 2013 Canonical Ltd.
18#
19# Authors:
20# Charm Helpers Developers <juju@lists.ubuntu.com>
21"""Charm Helpers ansible - declare the state of your machines.
22
23This helper enables you to declare your machine state, rather than
24program it procedurally (and have to test each change to your procedures).
25Your install hook can be as simple as::
26
27 {{{
28 import charmhelpers.contrib.ansible
29
30
31 def install():
32 charmhelpers.contrib.ansible.install_ansible_support()
33 charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
34 }}}
35
36and won't need to change (nor will its tests) when you change the machine
37state.
38
39All of your juju config and relation-data are available as template
40variables within your playbooks and templates. An install playbook looks
41something like::
42
43 {{{
44 ---
45 - hosts: localhost
46 user: root
47
48 tasks:
49 - name: Add private repositories.
50 template:
51 src: ../templates/private-repositories.list.jinja2
52 dest: /etc/apt/sources.list.d/private.list
53
54 - name: Update the cache.
55 apt: update_cache=yes
56
57 - name: Install dependencies.
58 apt: pkg={{ item }}
59 with_items:
60 - python-mimeparse
61 - python-webob
62 - sunburnt
63
64 - name: Setup groups.
65 group: name={{ item.name }} gid={{ item.gid }}
66 with_items:
67 - { name: 'deploy_user', gid: 1800 }
68 - { name: 'service_user', gid: 1500 }
69
70 ...
71 }}}
72
73Read more online about `playbooks`_ and standard ansible `modules`_.
74
75.. _playbooks: http://www.ansibleworks.com/docs/playbooks.html
76.. _modules: http://www.ansibleworks.com/docs/modules.html
77
78A further feature os the ansible hooks is to provide a light weight "action"
79scripting tool. This is a decorator that you apply to a function, and that
80function can now receive cli args, and can pass extra args to the playbook.
81
82e.g.
83
84
85@hooks.action()
86def some_action(amount, force="False"):
87 "Usage: some-action AMOUNT [force=True]" # <-- shown on error
88 # process the arguments
89 # do some calls
90 # return extra-vars to be passed to ansible-playbook
91 return {
92 'amount': int(amount),
93 'type': force,
94 }
95
96You can now create a symlink to hooks.py that can be invoked like a hook, but
97with cli params:
98
99# link actions/some-action to hooks/hooks.py
100
101actions/some-action amount=10 force=true
102
103"""
104import os
105import stat
106import subprocess
107import functools
108
109import charmhelpers.contrib.templating.contexts
110import charmhelpers.core.host
111import charmhelpers.core.hookenv
112import charmhelpers.fetch
113
114
115charm_dir = os.environ.get('CHARM_DIR', '')
116ansible_hosts_path = '/etc/ansible/hosts'
117# Ansible will automatically include any vars in the following
118# file in its inventory when run locally.
119ansible_vars_path = '/etc/ansible/host_vars/localhost'
120
121
122def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'):
123 """Installs the ansible package.
124
125 By default it is installed from the `PPA`_ linked from
126 the ansible `website`_ or from a ppa specified by a charm config..
127
128 .. _PPA: https://launchpad.net/~rquillo/+archive/ansible
129 .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu
130
131 If from_ppa is empty, you must ensure that the package is available
132 from a configured repository.
133 """
134 if from_ppa:
135 charmhelpers.fetch.add_source(ppa_location)
136 charmhelpers.fetch.apt_update(fatal=True)
137 charmhelpers.fetch.apt_install('ansible')
138 with open(ansible_hosts_path, 'w+') as hosts_file:
139 hosts_file.write('localhost ansible_connection=local')
140
141
142def apply_playbook(playbook, tags=None, extra_vars=None):
143 tags = tags or []
144 tags = ",".join(tags)
145 charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
146 ansible_vars_path, namespace_separator='__',
147 allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR))
148
149 # we want ansible's log output to be unbuffered
150 env = os.environ.copy()
151 env['PYTHONUNBUFFERED'] = "1"
152 call = [
153 'ansible-playbook',
154 '-c',
155 'local',
156 playbook,
157 ]
158 if tags:
159 call.extend(['--tags', '{}'.format(tags)])
160 if extra_vars:
161 extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()]
162 call.extend(['--extra-vars', " ".join(extra)])
163 subprocess.check_call(call, env=env)
164
165
166class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
167 """Run a playbook with the hook-name as the tag.
168
169 This helper builds on the standard hookenv.Hooks helper,
170 but additionally runs the playbook with the hook-name specified
171 using --tags (ie. running all the tasks tagged with the hook-name).
172
173 Example::
174
175 hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
176
177 # All the tasks within my_machine_state.yaml tagged with 'install'
178 # will be run automatically after do_custom_work()
179 @hooks.hook()
180 def install():
181 do_custom_work()
182
183 # For most of your hooks, you won't need to do anything other
184 # than run the tagged tasks for the hook:
185 @hooks.hook('config-changed', 'start', 'stop')
186 def just_use_playbook():
187 pass
188
189 # As a convenience, you can avoid the above noop function by specifying
190 # the hooks which are handled by ansible-only and they'll be registered
191 # for you:
192 # hooks = AnsibleHooks(
193 # 'playbooks/my_machine_state.yaml',
194 # default_hooks=['config-changed', 'start', 'stop'])
195
196 if __name__ == "__main__":
197 # execute a hook based on the name the program is called by
198 hooks.execute(sys.argv)
199
200 """
201
202 def __init__(self, playbook_path, default_hooks=None):
203 """Register any hooks handled by ansible."""
204 super(AnsibleHooks, self).__init__()
205
206 self._actions = {}
207 self.playbook_path = playbook_path
208
209 default_hooks = default_hooks or []
210
211 def noop(*args, **kwargs):
212 pass
213
214 for hook in default_hooks:
215 self.register(hook, noop)
216
217 def register_action(self, name, function):
218 """Register a hook"""
219 self._actions[name] = function
220
221 def execute(self, args):
222 """Execute the hook followed by the playbook using the hook as tag."""
223 hook_name = os.path.basename(args[0])
224 extra_vars = None
225 if hook_name in self._actions:
226 extra_vars = self._actions[hook_name](args[1:])
227 else:
228 super(AnsibleHooks, self).execute(args)
229
230 charmhelpers.contrib.ansible.apply_playbook(
231 self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
232
233 def action(self, *action_names):
234 """Decorator, registering them as actions"""
235 def action_wrapper(decorated):
236
237 @functools.wraps(decorated)
238 def wrapper(argv):
239 kwargs = dict(arg.split('=') for arg in argv)
240 try:
241 return decorated(**kwargs)
242 except TypeError as e:
243 if decorated.__doc__:
244 e.args += (decorated.__doc__,)
245 raise
246
247 self.register_action(decorated.__name__, wrapper)
248 if '_' in decorated.__name__:
249 self.register_action(
250 decorated.__name__.replace('_', '-'), wrapper)
251
252 return wrapper
253
254 return action_wrapper
diff --git a/hooks/charmhelpers/contrib/benchmark/__init__.py b/hooks/charmhelpers/contrib/benchmark/__init__.py
deleted file mode 100644
index 1d039ea..0000000
--- a/hooks/charmhelpers/contrib/benchmark/__init__.py
+++ /dev/null
@@ -1,126 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import subprocess
18import time
19import os
20from distutils.spawn import find_executable
21
22from charmhelpers.core.hookenv import (
23 in_relation_hook,
24 relation_ids,
25 relation_set,
26 relation_get,
27)
28
29
30def action_set(key, val):
31 if find_executable('action-set'):
32 action_cmd = ['action-set']
33
34 if isinstance(val, dict):
35 for k, v in iter(val.items()):
36 action_set('%s.%s' % (key, k), v)
37 return True
38
39 action_cmd.append('%s=%s' % (key, val))
40 subprocess.check_call(action_cmd)
41 return True
42 return False
43
44
45class Benchmark():
46 """
47 Helper class for the `benchmark` interface.
48
49 :param list actions: Define the actions that are also benchmarks
50
51 From inside the benchmark-relation-changed hook, you would
52 Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
53
54 Examples:
55
56 siege = Benchmark(['siege'])
57 siege.start()
58 [... run siege ...]
59 # The higher the score, the better the benchmark
60 siege.set_composite_score(16.70, 'trans/sec', 'desc')
61 siege.finish()
62
63
64 """
65
66 BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
67
68 required_keys = [
69 'hostname',
70 'port',
71 'graphite_port',
72 'graphite_endpoint',
73 'api_port'
74 ]
75
76 def __init__(self, benchmarks=None):
77 if in_relation_hook():
78 if benchmarks is not None:
79 for rid in sorted(relation_ids('benchmark')):
80 relation_set(relation_id=rid, relation_settings={
81 'benchmarks': ",".join(benchmarks)
82 })
83
84 # Check the relation data
85 config = {}
86 for key in self.required_keys:
87 val = relation_get(key)
88 if val is not None:
89 config[key] = val
90 else:
91 # We don't have all of the required keys
92 config = {}
93 break
94
95 if len(config):
96 with open(self.BENCHMARK_CONF, 'w') as f:
97 for key, val in iter(config.items()):
98 f.write("%s=%s\n" % (key, val))
99
100 @staticmethod
101 def start():
102 action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
103
104 """
105 If the collectd charm is also installed, tell it to send a snapshot
106 of the current profile data.
107 """
108 COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
109 if os.path.exists(COLLECT_PROFILE_DATA):
110 subprocess.check_output([COLLECT_PROFILE_DATA])
111
112 @staticmethod
113 def finish():
114 action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
115
116 @staticmethod
117 def set_composite_score(value, units, direction='asc'):
118 """
119 Set the composite score for a benchmark run. This is a single number
120 representative of the benchmark results. This could be the most
121 important metric, or an amalgamation of metric scores.
122 """
123 return action_set(
124 "meta.composite",
125 {'value': value, 'units': units, 'direction': direction}
126 )
diff --git a/hooks/charmhelpers/contrib/charmhelpers/__init__.py b/hooks/charmhelpers/contrib/charmhelpers/__init__.py
deleted file mode 100644
index edba750..0000000
--- a/hooks/charmhelpers/contrib/charmhelpers/__init__.py
+++ /dev/null
@@ -1,208 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# Copyright 2012 Canonical Ltd. This software is licensed under the
18# GNU Affero General Public License version 3 (see the file LICENSE).
19
20import warnings
21warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa
22
23import operator
24import tempfile
25import time
26import yaml
27import subprocess
28
29import six
30if six.PY3:
31 from urllib.request import urlopen
32 from urllib.error import (HTTPError, URLError)
33else:
34 from urllib2 import (urlopen, HTTPError, URLError)
35
36"""Helper functions for writing Juju charms in Python."""
37
38__metaclass__ = type
39__all__ = [
40 # 'get_config', # core.hookenv.config()
41 # 'log', # core.hookenv.log()
42 # 'log_entry', # core.hookenv.log()
43 # 'log_exit', # core.hookenv.log()
44 # 'relation_get', # core.hookenv.relation_get()
45 # 'relation_set', # core.hookenv.relation_set()
46 # 'relation_ids', # core.hookenv.relation_ids()
47 # 'relation_list', # core.hookenv.relation_units()
48 # 'config_get', # core.hookenv.config()
49 # 'unit_get', # core.hookenv.unit_get()
50 # 'open_port', # core.hookenv.open_port()
51 # 'close_port', # core.hookenv.close_port()
52 # 'service_control', # core.host.service()
53 'unit_info', # client-side, NOT IMPLEMENTED
54 'wait_for_machine', # client-side, NOT IMPLEMENTED
55 'wait_for_page_contents', # client-side, NOT IMPLEMENTED
56 'wait_for_relation', # client-side, NOT IMPLEMENTED
57 'wait_for_unit', # client-side, NOT IMPLEMENTED
58]
59
60
61SLEEP_AMOUNT = 0.1
62
63
64# We create a juju_status Command here because it makes testing much,
65# much easier.
66def juju_status():
67 subprocess.check_call(['juju', 'status'])
68
69# re-implemented as charmhelpers.fetch.configure_sources()
70# def configure_source(update=False):
71# source = config_get('source')
72# if ((source.startswith('ppa:') or
73# source.startswith('cloud:') or
74# source.startswith('http:'))):
75# run('add-apt-repository', source)
76# if source.startswith("http:"):
77# run('apt-key', 'import', config_get('key'))
78# if update:
79# run('apt-get', 'update')
80
81
82# DEPRECATED: client-side only
83def make_charm_config_file(charm_config):
84 charm_config_file = tempfile.NamedTemporaryFile(mode='w+')
85 charm_config_file.write(yaml.dump(charm_config))
86 charm_config_file.flush()
87 # The NamedTemporaryFile instance is returned instead of just the name
88 # because we want to take advantage of garbage collection-triggered
89 # deletion of the temp file when it goes out of scope in the caller.
90 return charm_config_file
91
92
93# DEPRECATED: client-side only
94def unit_info(service_name, item_name, data=None, unit=None):
95 if data is None:
96 data = yaml.safe_load(juju_status())
97 service = data['services'].get(service_name)
98 if service is None:
99 # XXX 2012-02-08 gmb:
100 # This allows us to cope with the race condition that we
101 # have between deploying a service and having it come up in
102 # `juju status`. We could probably do with cleaning it up so
103 # that it fails a bit more noisily after a while.
104 return ''
105 units = service['units']
106 if unit is not None:
107 item = units[unit][item_name]
108 else:
109 # It might seem odd to sort the units here, but we do it to
110 # ensure that when no unit is specified, the first unit for the
111 # service (or at least the one with the lowest number) is the
112 # one whose data gets returned.
113 sorted_unit_names = sorted(units.keys())
114 item = units[sorted_unit_names[0]][item_name]
115 return item
116
117
118# DEPRECATED: client-side only
119def get_machine_data():
120 return yaml.safe_load(juju_status())['machines']
121
122
123# DEPRECATED: client-side only
124def wait_for_machine(num_machines=1, timeout=300):
125 """Wait `timeout` seconds for `num_machines` machines to come up.
126
127 This wait_for... function can be called by other wait_for functions
128 whose timeouts might be too short in situations where only a bare
129 Juju setup has been bootstrapped.
130
131 :return: A tuple of (num_machines, time_taken). This is used for
132 testing.
133 """
134 # You may think this is a hack, and you'd be right. The easiest way
135 # to tell what environment we're working in (LXC vs EC2) is to check
136 # the dns-name of the first machine. If it's localhost we're in LXC
137 # and we can just return here.
138 if get_machine_data()[0]['dns-name'] == 'localhost':
139 return 1, 0
140 start_time = time.time()
141 while True:
142 # Drop the first machine, since it's the Zookeeper and that's
143 # not a machine that we need to wait for. This will only work
144 # for EC2 environments, which is why we return early above if
145 # we're in LXC.
146 machine_data = get_machine_data()
147 non_zookeeper_machines = [
148 machine_data[key] for key in list(machine_data.keys())[1:]]
149 if len(non_zookeeper_machines) >= num_machines:
150 all_machines_running = True
151 for machine in non_zookeeper_machines:
152 if machine.get('instance-state') != 'running':
153 all_machines_running = False
154 break
155 if all_machines_running:
156 break
157 if time.time() - start_time >= timeout:
158 raise RuntimeError('timeout waiting for service to start')
159 time.sleep(SLEEP_AMOUNT)
160 return num_machines, time.time() - start_time
161
162
163# DEPRECATED: client-side only
164def wait_for_unit(service_name, timeout=480):
165 """Wait `timeout` seconds for a given service name to come up."""
166 wait_for_machine(num_machines=1)
167 start_time = time.time()
168 while True:
169 state = unit_info(service_name, 'agent-state')
170 if 'error' in state or state == 'started':
171 break
172 if time.time() - start_time >= timeout:
173 raise RuntimeError('timeout waiting for service to start')
174 time.sleep(SLEEP_AMOUNT)
175 if state != 'started':
176 raise RuntimeError('unit did not start, agent-state: ' + state)
177
178
179# DEPRECATED: client-side only
180def wait_for_relation(service_name, relation_name, timeout=120):
181 """Wait `timeout` seconds for a given relation to come up."""
182 start_time = time.time()
183 while True:
184 relation = unit_info(service_name, 'relations').get(relation_name)
185 if relation is not None and relation['state'] == 'up':
186 break
187 if time.time() - start_time >= timeout:
188 raise RuntimeError('timeout waiting for relation to be up')
189 time.sleep(SLEEP_AMOUNT)
190
191
192# DEPRECATED: client-side only
193def wait_for_page_contents(url, contents, timeout=120, validate=None):
194 if validate is None:
195 validate = operator.contains
196 start_time = time.time()
197 while True:
198 try:
199 stream = urlopen(url)
200 except (HTTPError, URLError):
201 pass
202 else:
203 page = stream.read()
204 if validate(page, contents):
205 return page
206 if time.time() - start_time >= timeout:
207 raise RuntimeError('timeout waiting for contents of ' + url)
208 time.sleep(SLEEP_AMOUNT)
diff --git a/hooks/charmhelpers/contrib/charmsupport/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/hooks/charmhelpers/contrib/charmsupport/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
deleted file mode 100644
index 95a79c2..0000000
--- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ /dev/null
@@ -1,360 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17"""Compatibility with the nrpe-external-master charm"""
18# Copyright 2012 Canonical Ltd.
19#
20# Authors:
21# Matthew Wedgwood <matthew.wedgwood@canonical.com>
22
23import subprocess
24import pwd
25import grp
26import os
27import glob
28import shutil
29import re
30import shlex
31import yaml
32
33from charmhelpers.core.hookenv import (
34 config,
35 local_unit,
36 log,
37 relation_ids,
38 relation_set,
39 relations_of_type,
40)
41
42from charmhelpers.core.host import service
43
44# This module adds compatibility with the nrpe-external-master and plain nrpe
45# subordinate charms. To use it in your charm:
46#
47# 1. Update metadata.yaml
48#
49# provides:
50# (...)
51# nrpe-external-master:
52# interface: nrpe-external-master
53# scope: container
54#
55# and/or
56#
57# provides:
58# (...)
59# local-monitors:
60# interface: local-monitors
61# scope: container
62
63#
64# 2. Add the following to config.yaml
65#
66# nagios_context:
67# default: "juju"
68# type: string
69# description: |
70# Used by the nrpe subordinate charms.
71# A string that will be prepended to instance name to set the host name
72# in nagios. So for instance the hostname would be something like:
73# juju-myservice-0
74# If you're running multiple environments with the same services in them
75# this allows you to differentiate between them.
76# nagios_servicegroups:
77# default: ""
78# type: string
79# description: |
80# A comma-separated list of nagios servicegroups.
81# If left empty, the nagios_context will be used as the servicegroup
82#
83# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
84#
85# 4. Update your hooks.py with something like this:
86#
87# from charmsupport.nrpe import NRPE
88# (...)
89# def update_nrpe_config():
90# nrpe_compat = NRPE()
91# nrpe_compat.add_check(
92# shortname = "myservice",
93# description = "Check MyService",
94# check_cmd = "check_http -w 2 -c 10 http://localhost"
95# )
96# nrpe_compat.add_check(
97# "myservice_other",
98# "Check for widget failures",
99# check_cmd = "/srv/myapp/scripts/widget_check"
100# )
101# nrpe_compat.write()
102#
103# def config_changed():
104# (...)
105# update_nrpe_config()
106#
107# def nrpe_external_master_relation_changed():
108# update_nrpe_config()
109#
110# def local_monitors_relation_changed():
111# update_nrpe_config()
112#
113# 5. ln -s hooks.py nrpe-external-master-relation-changed
114# ln -s hooks.py local-monitors-relation-changed
115
116
117class CheckException(Exception):
118 pass
119
120
121class Check(object):
122 shortname_re = '[A-Za-z0-9-_]+$'
123 service_template = ("""
124#---------------------------------------------------
125# This file is Juju managed
126#---------------------------------------------------
127define service {{
128 use active-service
129 host_name {nagios_hostname}
130 service_description {nagios_hostname}[{shortname}] """
131 """{description}
132 check_command check_nrpe!{command}
133 servicegroups {nagios_servicegroup}
134}}
135""")
136
137 def __init__(self, shortname, description, check_cmd):
138 super(Check, self).__init__()
139 # XXX: could be better to calculate this from the service name
140 if not re.match(self.shortname_re, shortname):
141 raise CheckException("shortname must match {}".format(
142 Check.shortname_re))
143 self.shortname = shortname
144 self.command = "check_{}".format(shortname)
145 # Note: a set of invalid characters is defined by the
146 # Nagios server config
147 # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
148 self.description = description
149 self.check_cmd = self._locate_cmd(check_cmd)
150
151 def _locate_cmd(self, check_cmd):
152 search_path = (
153 '/usr/lib/nagios/plugins',
154 '/usr/local/lib/nagios/plugins',
155 )
156 parts = shlex.split(check_cmd)
157 for path in search_path:
158 if os.path.exists(os.path.join(path, parts[0])):
159 command = os.path.join(path, parts[0])
160 if len(parts) > 1:
161 command += " " + " ".join(parts[1:])
162 return command
163 log('Check command not found: {}'.format(parts[0]))
164 return ''
165
166 def write(self, nagios_context, hostname, nagios_servicegroups):
167 nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
168 self.command)
169 with open(nrpe_check_file, 'w') as nrpe_check_config:
170 nrpe_check_config.write("# check {}\n".format(self.shortname))
171 nrpe_check_config.write("command[{}]={}\n".format(
172 self.command, self.check_cmd))
173
174 if not os.path.exists(NRPE.nagios_exportdir):
175 log('Not writing service config as {} is not accessible'.format(
176 NRPE.nagios_exportdir))
177 else:
178 self.write_service_config(nagios_context, hostname,
179 nagios_servicegroups)
180
181 def write_service_config(self, nagios_context, hostname,
182 nagios_servicegroups):
183 for f in os.listdir(NRPE.nagios_exportdir):
184 if re.search('.*{}.cfg'.format(self.command), f):
185 os.remove(os.path.join(NRPE.nagios_exportdir, f))
186
187 templ_vars = {
188 'nagios_hostname': hostname,
189 'nagios_servicegroup': nagios_servicegroups,
190 'description': self.description,
191 'shortname': self.shortname,
192 'command': self.command,
193 }
194 nrpe_service_text = Check.service_template.format(**templ_vars)
195 nrpe_service_file = '{}/service__{}_{}.cfg'.format(
196 NRPE.nagios_exportdir, hostname, self.command)
197 with open(nrpe_service_file, 'w') as nrpe_service_config:
198 nrpe_service_config.write(str(nrpe_service_text))
199
200 def run(self):
201 subprocess.call(self.check_cmd)
202
203
204class NRPE(object):
205 nagios_logdir = '/var/log/nagios'
206 nagios_exportdir = '/var/lib/nagios/export'
207 nrpe_confdir = '/etc/nagios/nrpe.d'
208
209 def __init__(self, hostname=None):
210 super(NRPE, self).__init__()
211 self.config = config()
212 self.nagios_context = self.config['nagios_context']
213 if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
214 self.nagios_servicegroups = self.config['nagios_servicegroups']
215 else:
216 self.nagios_servicegroups = self.nagios_context
217 self.unit_name = local_unit().replace('/', '-')
218 if hostname:
219 self.hostname = hostname
220 else:
221 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
222 self.checks = []
223
224 def add_check(self, *args, **kwargs):
225 self.checks.append(Check(*args, **kwargs))
226
227 def write(self):
228 try:
229 nagios_uid = pwd.getpwnam('nagios').pw_uid
230 nagios_gid = grp.getgrnam('nagios').gr_gid
231 except:
232 log("Nagios user not set up, nrpe checks not updated")
233 return
234
235 if not os.path.exists(NRPE.nagios_logdir):
236 os.mkdir(NRPE.nagios_logdir)
237 os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
238
239 nrpe_monitors = {}
240 monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
241 for nrpecheck in self.checks:
242 nrpecheck.write(self.nagios_context, self.hostname,
243 self.nagios_servicegroups)
244 nrpe_monitors[nrpecheck.shortname] = {
245 "command": nrpecheck.command,
246 }
247
248 service('restart', 'nagios-nrpe-server')
249
250 monitor_ids = relation_ids("local-monitors") + \
251 relation_ids("nrpe-external-master")
252 for rid in monitor_ids:
253 relation_set(relation_id=rid, monitors=yaml.dump(monitors))
254
255
256def get_nagios_hostcontext(relation_name='nrpe-external-master'):
257 """
258 Query relation with nrpe subordinate, return the nagios_host_context
259
260 :param str relation_name: Name of relation nrpe sub joined to
261 """
262 for rel in relations_of_type(relation_name):
263 if 'nagios_hostname' in rel:
264 return rel['nagios_host_context']
265
266
267def get_nagios_hostname(relation_name='nrpe-external-master'):
268 """
269 Query relation with nrpe subordinate, return the nagios_hostname
270
271 :param str relation_name: Name of relation nrpe sub joined to
272 """
273 for rel in relations_of_type(relation_name):
274 if 'nagios_hostname' in rel:
275 return rel['nagios_hostname']
276
277
278def get_nagios_unit_name(relation_name='nrpe-external-master'):
279 """
280 Return the nagios unit name prepended with host_context if needed
281
282 :param str relation_name: Name of relation nrpe sub joined to
283 """
284 host_context = get_nagios_hostcontext(relation_name)
285 if host_context:
286 unit = "%s:%s" % (host_context, local_unit())
287 else:
288 unit = local_unit()
289 return unit
290
291
292def add_init_service_checks(nrpe, services, unit_name):
293 """
294 Add checks for each service in list
295
296 :param NRPE nrpe: NRPE object to add check to
297 :param list services: List of services to check
298 :param str unit_name: Unit name to use in check description
299 """
300 for svc in services:
301 upstart_init = '/etc/init/%s.conf' % svc
302 sysv_init = '/etc/init.d/%s' % svc
303 if os.path.exists(upstart_init):
304 nrpe.add_check(
305 shortname=svc,
306 description='process check {%s}' % unit_name,
307 check_cmd='check_upstart_job %s' % svc
308 )
309 elif os.path.exists(sysv_init):
310 cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
311 cron_file = ('*/5 * * * * root '
312 '/usr/local/lib/nagios/plugins/check_exit_status.pl '
313 '-s /etc/init.d/%s status > '
314 '/var/lib/nagios/service-check-%s.txt\n' % (svc,
315 svc)
316 )
317 f = open(cronpath, 'w')
318 f.write(cron_file)
319 f.close()
320 nrpe.add_check(
321 shortname=svc,
322 description='process check {%s}' % unit_name,
323 check_cmd='check_status_file.py -f '
324 '/var/lib/nagios/service-check-%s.txt' % svc,
325 )
326
327
328def copy_nrpe_checks():
329 """
330 Copy the nrpe checks into place
331
332 """
333 NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
334 nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
335 'charmhelpers', 'contrib', 'openstack',
336 'files')
337
338 if not os.path.exists(NAGIOS_PLUGINS):
339 os.makedirs(NAGIOS_PLUGINS)
340 for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
341 if os.path.isfile(fname):
342 shutil.copy2(fname,
343 os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
344
345
346def add_haproxy_checks(nrpe, unit_name):
347 """
348 Add checks for each service in list
349
350 :param NRPE nrpe: NRPE object to add check to
351 :param str unit_name: Unit name to use in check description
352 """
353 nrpe.add_check(
354 shortname='haproxy_servers',
355 description='Check HAProxy {%s}' % unit_name,
356 check_cmd='check_haproxy.sh')
357 nrpe.add_check(
358 shortname='haproxy_queue',
359 description='Check HAProxy queue depth {%s}' % unit_name,
360 check_cmd='check_haproxy_queue_depth.sh')
diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py
deleted file mode 100644
index 320961b..0000000
--- a/hooks/charmhelpers/contrib/charmsupport/volumes.py
+++ /dev/null
@@ -1,175 +0,0 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17'''
18Functions for managing volumes in juju units. One volume is supported per unit.
19Subordinates may have their own storage, provided it is on its own partition.
20
21Configuration stanzas::
22
23 volume-ephemeral:
24 type: boolean
25 default: true
26 description: >
27 If false, a volume is mounted as sepecified in "volume-map"
28 If true, ephemeral storage will be used, meaning that log data
29 will only exist as long as the machine. YOU HAVE BEEN WARNED.
30 volume-map:
31 type: string
32 default: {}
33 description: >
34 YAML map of units to device names, e.g:
35 "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
36 Service units will raise a configure-error if volume-ephemeral
37 is 'true' and no volume-map value is set. Use 'juju set' to set a
38 value and 'juju resolved' to complete configuration.
39
40Usage::
41
42 from charmsupport.volumes import configure_volume, VolumeConfigurationError
43 from charmsupport.hookenv import log, ERROR
44 def post_mount_hook():
45 stop_service('myservice')
46 def post_mount_hook():
47 start_service('myservice')
48
49 if __name__ == '__main__':
50 try:
51 configure_volume(before_change=pre_mount_hook,
52 after_change=post_mount_hook)
53 except VolumeConfigurationError:
54 log('Storage could not be configured', ERROR)
55
56'''
57
58# XXX: Known limitations
59# - fstab is neither consulted nor updated
60
61import os
62from charmhelpers.core import hookenv
63from charmhelpers.core import host
64import yaml
65
66
67MOUNT_BASE = '/srv/juju/volumes'
68
69
70class VolumeConfigurationError(Exception):
71 '''Volume configuration data is missing or invalid'''
72 pass
73
74
75def get_config():
76 '''Gather and sanity-check volume configuration data'''
77 volume_config = {}
78 config = hookenv.config()
79
80 errors = False
81
82 if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
83 volume_config['ephemeral'] = True
84 else:
85 volume_config['ephemeral'] = False
86
87 try:
88 volume_map = yaml.safe_load(config.get('volume-map', '{}'))
89 except yaml.YAMLError as e:
90 hookenv.log("Error parsing YAML volume-map: {}".format(e),
91 hookenv.ERROR)
92 errors = True
93 if volume_map is None:
94 # probably an empty string
95 volume_map = {}
96 elif not isinstance(volume_map, dict):
97 hookenv.log("Volume-map should be a dictionary, not {}".format(
98 type(volume_map)))
99 errors = True
100
101 volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
102 if volume_config['device'] and volume_config['ephemeral']:
103 # asked for ephemeral storage but also defined a volume ID
104 hookenv.log('A volume is defined for this unit, but ephemeral '
105 'storage was requested', hookenv.ERROR)
106 errors = True
107 elif not volume_config['device'] and not volume_config['ephemeral']:
108 # asked for permanent storage but did not define volume ID
109 hookenv.log('Ephemeral storage was requested, but there is no volume '
110 'defined for this unit.', hookenv.ERROR)
111 errors = True
112
113 unit_mount_name = hookenv.local_unit().replace('/', '-')
114 volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
115
116 if errors:
117 return None
118 return volume_config
119
120
121def mount_volume(config):
122 if os.path.exists(config['mountpoint']):
123 if not os.path.isdir(config['mountpoint']):
124 hookenv.log('Not a directory: {}'.format(config['mountpoint']))
125 raise VolumeConfigurationError()
126 else:
127 host.mkdir(config['mountpoint'])
128 if os.path.ismount(config['mountpoint']):
129 unmount_volume(config)
130 if not host.mount(config['device'], config['mountpoint'], persist=True):
131 raise VolumeConfigurationError()
132
133
134def unmount_volume(config):
135 if os.path.ismount(config['mountpoint']):
136 if not host.umount(config['mountpoint'], persist=True):
137 raise VolumeConfigurationError()
138
139
140def managed_mounts():
141 '''List of all mounted managed volumes'''
142 return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
143
144
145def configure_volume(before_change=lambda: None, after_change=lambda: None):
146 '''Set up storage (or don't) according to the charm's volume configuration.
147 Returns the mount point or "ephemeral". before_change and after_change
148 are optional functions to be called if the volume configuration changes.
149 '''
150
151 config = get_config()
152 if not config:
153 hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
154 raise VolumeConfigurationError()
155
156 if config['ephemeral']:
157 if os.path.ismount(config['mountpoint']):
158 before_change()
159 unmount_volume(config)
160 after_change()
161 return 'ephemeral'
162 else:
163 # persistent storage
164 if os.path.ismount(config['mountpoint']):
165 mounts = dict(managed_mounts())
166 if mounts.get(config['mountpoint']) != config['device']:
167 before_change()
168 unmount_volume(config)
169 mount_volume(config)
170 after_change()
171 else:
172 before_change()
173 mount_volume(config)
174 after_change()
175 return config['mountpoint']
diff --git a/hooks/charmhelpers/contrib/database/__init__.py b/hooks/charmhelpers/contrib/database/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/hooks/charmhelpers/contrib/database/__init__.py
+++ /dev/null
diff --git a/hooks/charmhelpers/contrib/database/mysql.py b/hooks/charmhelpers/contrib/database/mysql.py
deleted file mode 100644
index 20f6141..0000000
--- a/hooks/charmhelpers/contrib/database/mysql.py
+++ /dev/null
@@ -1,412 +0,0 @@
1"""Helper for working with a MySQL database"""
2import json
3import re
4import sys
5import platform
6import os
7import glob
8
9# from string import upper
10
11from charmhelpers.core.host import (
12 mkdir,
13 pwgen,
14 write_file
15)
16from charmhelpers.core.hookenv import (
17 config as config_get,
18 relation_get,
19 related_units,
20 unit_get,
21 log,
22 DEBUG,
23 INFO,
24 WARNING,
25)
26from charmhelpers.fetch import (
27 apt_install,
28 apt_update,
29 filter_installed_packages,
30)
31from charmhelpers.contrib.peerstorage import (
32 peer_store,
33 peer_retrieve,
34)
35from charmhelpers.contrib.network.ip import get_host_ip
36
37try:
38 import MySQLdb
39except ImportError:
40 apt_update(fatal=True)
41 apt_install(filter_installed_packages(['python-mysqldb']), fatal=True)
42 import MySQLdb
43
44
45class MySQLHelper(object):
46
47 def __init__(self, rpasswdf_template, upasswdf_template, host='localhost',
48 migrate_passwd_to_peer_relation=True,
49 delete_ondisk_passwd_file=True):
50 self.host = host
51 # Password file path templates
52 self.root_passwd_file_template = rpasswdf_template
53 self.user_passwd_file_template = upasswdf_template
54
55 self.migrate_passwd_to_peer_relation = migrate_passwd_to_peer_relation
56 # If we migrate we have the option to delete local copy of root passwd
57 self.delete_ondisk_passwd_file = delete_ondisk_passwd_file
58
59 def connect(self, user='root', password=None):
60 log("Opening db connection for %s@%s" % (user, self.host), level=DEBUG)
61 self.connection = MySQLdb.connect(user=user, host=self.host,
62 passwd=password)
63
64 def database_exists(self, db_name):
65 cursor = self.connection.cursor()
66 try:
67 cursor.execute("SHOW DATABASES")
68 databases = [i[0] for i in cursor.fetchall()]
69 finally:
70 cursor.close()
71
72 return db_name in databases
73
74 def create_database(self, db_name):
75 cursor = self.connection.cursor()
76 try:
77 cursor.execute("CREATE DATABASE {} CHARACTER SET UTF8"
78 .format(db_name))
79 finally:
80 cursor.close()
81
82 def grant_exists(self, db_name, db_user, remote_ip):
83 cursor = self.connection.cursor()
84 priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \
85 "TO '{}'@'{}'".format(db_name, db_user, remote_ip)
86 try:
87 cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user,
88 remote_ip))
89 grants = [i[0] for i in cursor.fetchall()]
90 except MySQLdb.OperationalError:
91 return False
92 finally:
93 cursor.close()
94
95 # TODO: review for different grants
96 return priv_string in grants
97
98 def create_grant(self, db_name, db_user, remote_ip, password):
99 cursor = self.connection.cursor()
100 try:
101 # TODO: review for different grants
102 cursor.execute("GRANT ALL PRIVILEGES ON {}.* TO '{}'@'{}' "
103 "IDENTIFIED BY '{}'".format(db_name,
104 db_user,
105 remote_ip,
106 password))
107 finally:
108 cursor.close()
109
110 def create_admin_grant(self, db_user, remote_ip, password):
111 cursor = self.connection.cursor()
112 try:
113 cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' "
114 "IDENTIFIED BY '{}'".format(db_user,
115 remote_ip,
116 password))
117 finally:
118 cursor.close()
119
120 def cleanup_grant(self, db_user, remote_ip):
121 cursor = self.connection.cursor()
122 try:
123 cursor.execute("DROP FROM mysql.user WHERE user='{}' "
124 "AND HOST='{}'".format(db_user,
125 remote_ip))
126 finally:
127 cursor.close()
128
129 def execute(self, sql):
130 """Execute arbitary SQL against the database."""
131 cursor = self.connection.cursor()
132 try:
133 cursor.execute(sql)
134 finally:
135 cursor.close()
136
137 def migrate_passwords_to_peer_relation(self, excludes=None):
138 """Migrate any passwords storage on disk to cluster peer relation."""
139 dirname = os.path.dirname(self.root_passwd_file_template)
140 path = os.path.join(dirname, '*.passwd')
141 for f in glob.glob(path):
142 if excludes and f in excludes:
143 log("Excluding %s from peer migration" % (f), level=DEBUG)
144 continue
145
146 key = os.path.basename(f)
147 with open(f, 'r') as passwd:
148 _value = passwd.read().strip()
149
150 try:
151 peer_store(key, _value)
152
153 if self.delete_ondisk_passwd_file:
154 os.unlink(f)
155 except ValueError:
156 # NOTE cluster relation not yet ready - skip for now
157 pass
158
159 def get_mysql_password_on_disk(self, username=None, password=None):
160 """Retrieve, generate or store a mysql password for the provided
161 username on disk."""
162 if username:
163 template = self.user_passwd_file_template
164 passwd_file = template.format(username)
165 else:
166 passwd_file = self.root_passwd_file_template
167
168 _password = None
169 if os.path.exists(passwd_file):
170 log("Using existing password file '%s'" % passwd_file, level=DEBUG)
171 with open(passwd_file, 'r') as passwd:
172 _password = passwd.read().strip()
173 else:
174 log("Generating new password file '%s'" % passwd_file, level=DEBUG)
175 if not os.path.isdir(os.path.dirname(passwd_file)):
176 # NOTE: need to ensure this is not mysql root dir (which needs
177 # to be mysql readable)
178 mkdir(os.path.dirname(passwd_file), owner='root', group='root',
179 perms=0o770)
180 # Force permissions - for some reason the chmod in makedirs
181 # fails
182 os.chmod(os.path.dirname(passwd_file), 0o770)
183
184 _password = password or pwgen(length=32)
185 write_file(passwd_file, _password, owner='root', group='root',
186 perms=0o660)
187
188 return _password
189
190 def passwd_keys(self, username):
191 """Generator to return keys used to store passwords in peer store.
192
193 NOTE: we support both legacy and new format to support mysql
194 charm prior to refactor. This is necessary to avoid LP 1451890.
195 """
196 keys = []
197 if username == 'mysql':
198 log("Bad username '%s'" % (username), level=WARNING)
199
200 if username:
201 # IMPORTANT: *newer* format must be returned first
202 keys.append('mysql-%s.passwd' % (username))
203 keys.append('%s.passwd' % (username))
204 else:
205 keys.append('mysql.passwd')
206
207 for key in keys:
208 yield key
209
210 def get_mysql_password(self, username=None, password=None):
211 """Retrieve, generate or store a mysql password for the provided
212 username using peer relation cluster."""
213 excludes = []
214
215 # First check peer relation.
216 try:
217 for key in self.passwd_keys(username):
218 _password = peer_retrieve(key)
219 if _password:
220 break
221
222 # If root password available don't update peer relation from local
223 if _password and not username:
224 excludes.append(self.root_passwd_file_template)
225
226 except ValueError:
227 # cluster relation is not yet started; use on-disk
228 _password = None
229
230 # If none available, generate new one
231 if not _password:
232 _password = self.get_mysql_password_on_disk(username, password)
233
234 # Put on wire if required
235 if self.migrate_passwd_to_peer_relation:
236 self.migrate_passwords_to_peer_relation(excludes=excludes)
237
238 return _password
239
240 def get_mysql_root_password(self, password=None):
241 """Retrieve or generate mysql root password for service units."""
242 return self.get_mysql_password(username=None, password=password)
243
244 def normalize_address(self, hostname):
245 """Ensure that address returned is an IP address (i.e. not fqdn)"""
246 if config_get('prefer-ipv6'):
247 # TODO: add support for ipv6 dns
248 return hostname
249
250 if hostname != unit_get('private-address'):
251 return get_host_ip(hostname, fallback=hostname)
252
253 # Otherwise assume localhost
254 return '127.0.0.1'
255
256 def get_allowed_units(self, database, username, relation_id=None):
257 """Get list of units with access grants for database with username.
258
259 This is typically used to provide shared-db relations with a list of
260 which units have been granted access to the given database.
261 """
262 self.connect(password=self.get_mysql_root_password())
263 allowed_units = set()
264 for unit in related_units(relation_id):
265 settings = relation_get(rid=relation_id, unit=unit)
266 # First check for setting with prefix, then without
267 for attr in ["%s_hostname" % (database), 'hostname']:
268 hosts = settings.get(attr, None)
269 if hosts:
270 break
271
272 if hosts:
273 # hostname can be json-encoded list of hostnames
274 try:
275 hosts = json.loads(hosts)
276 except ValueError:
277 hosts = [hosts]
278 else:
279 hosts = [settings['private-address']]
280
281 if hosts:
282 for host in hosts:
283 host = self.normalize_address(host)
284 if self.grant_exists(database, username, host):
285 log("Grant exists for host '%s' on db '%s'" %
286 (host, database), level=DEBUG)
287 if unit not in allowed_units:
288 allowed_units.add(unit)
289 else:
290 log("Grant does NOT exist for host '%s' on db '%s'" %
291 (host, database), level=DEBUG)
292 else:
293 log("No hosts found for grant check", level=INFO)
294
295 return allowed_units
296
297 def configure_db(self, hostname, database, username, admin=False):
298 """Configure access to database for username from hostname."""
299 self.connect(password=self.get_mysql_root_password())
300 if not self.database_exists(database):
301 self.create_database(database)
302
303 remote_ip = self.normalize_address(hostname)
304 password = self.get_mysql_password(username)
305 if not self.grant_exists(database, username, remote_ip):
306 if not admin:
307 self.create_grant(database, username, remote_ip, password)
308 else:
309 self.create_admin_grant(username, remote_ip, password)
310
311 return password
312
313
314class PerconaClusterHelper(object):
315
316 # Going for the biggest page size to avoid wasted bytes.
317 # InnoDB page size is 16MB
318
319 DEFAULT_PAGE_SIZE = 16 * 1024 * 1024
320 DEFAULT_INNODB_BUFFER_FACTOR = 0.50
321
322 def human_to_bytes(self, human):
323 """Convert human readable configuration options to bytes."""
324 num_re = re.compile('^[0-9]+$')
325 if num_re.match(human):
326 return human
327
328 factors = {
329 'K': 1024,
330 'M': 1048576,
331 'G': 1073741824,
332 'T': 1099511627776
333 }
334 modifier = human[-1]
335 if modifier in factors:
336 return int(human[:-1]) * factors[modifier]
337
338 if modifier == '%':
339 total_ram = self.human_to_bytes(self.get_mem_total())
340 if self.is_32bit_system() and total_ram > self.sys_mem_limit():
341 total_ram = self.sys_mem_limit()
342 factor = int(human[:-1]) * 0.01
343 pctram = total_ram * factor
344 return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE))
345
346 raise ValueError("Can only convert K,M,G, or T")
347
348 def is_32bit_system(self):
349 """Determine whether system is 32 or 64 bit."""
350 try:
351 return sys.maxsize < 2 ** 32
352 except OverflowError:
353 return False
354
355 def sys_mem_limit(self):
356 """Determine the default memory limit for the current service unit."""
357 if platform.machine() in ['armv7l']:
358 _mem_limit = self.human_to_bytes('2700M') # experimentally determined
359 else:
360 # Limit for x86 based 32bit systems
361 _mem_limit = self.human_to_bytes('4G')
362
363 return _mem_limit
364
365 def get_mem_total(self):
366 """Calculate the total memory in the current service unit."""
367 with open('/proc/meminfo') as meminfo_file:
368 for line in meminfo_file:
369 key, mem = line.split(':', 2)
370 if key == 'MemTotal':
371 mtot, modifier = mem.strip().split(' ')
372 return '%s%s' % (mtot, modifier[0].upper())
373
374 def parse_config(self):
375 """Parse charm configuration and calculate values for config files."""
376 config = config_get()
377 mysql_config = {}
378 if 'max-connections' in config:
379 mysql_config['max_connections'] = config['max-connections']
380
381 if 'wait-timeout' in config:
382 mysql_config['wait_timeout'] = config['wait-timeout']
383
384 if 'innodb-flush-log-at-trx-commit' in config:
385 mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit']
386
387 # Set a sane default key_buffer size
388 mysql_config['key_buffer'] = self.human_to_bytes('32M')
389 total_memory = self.human_to_bytes(self.get_mem_total())
390
391 dataset_bytes = config.get('dataset-size', None)
392 innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None)
393
394 if innodb_buffer_pool_size:
395 innodb_buffer_pool_size = self.human_to_bytes(
396 innodb_buffer_pool_size)
397 elif dataset_bytes:
398 log("Option 'dataset-size' has been deprecated, please use"
399 "innodb_buffer_pool_size option instead", level="WARN")
400 innodb_buffer_pool_size = self.human_to_bytes(
401 dataset_bytes)
402 else:
403 innodb_buffer_pool_size = int(
404 total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR)
405
406 if innodb_buffer_pool_size > total_memory:
407 log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format(
408 innodb_buffer_pool_size,
409 total_memory), level='WARN')
410
411 mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size
412 return mysql_config
diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py
index fff6d5c..b9c7900 100644
--- a/hooks/charmhelpers/contrib/network/ip.py
+++ b/hooks/charmhelpers/contrib/network/ip.py
@@ -23,7 +23,7 @@ import socket
23from functools import partial 23from functools import partial
24 24
25from charmhelpers.core.hookenv import unit_get 25from charmhelpers.core.hookenv import unit_get
26from charmhelpers.fetch import apt_install 26from charmhelpers.fetch import apt_install, apt_update
27from charmhelpers.core.hookenv import ( 27from charmhelpers.core.hookenv import (
28 log, 28 log,
29 WARNING, 29 WARNING,
@@ -32,13 +32,15 @@ from charmhelpers.core.hookenv import (
32try: 32try:
33 import netifaces 33 import netifaces
34except ImportError: 34except ImportError:
35 apt_install('python-netifaces') 35 apt_update(fatal=True)
36 apt_install('python-netifaces', fatal=True)
36 import netifaces 37 import netifaces
37 38
38try: 39try:
39 import netaddr 40 import netaddr
40except ImportError: 41except ImportError:
41 apt_install('python-netaddr') 42 apt_update(fatal=True)
43 apt_install('python-netaddr', fatal=True)
42 import netaddr 44 import netaddr
43 45
44 46
@@ -51,7 +53,7 @@ def _validate_cidr(network):
51 53
52 54
53def no_ip_found_error_out(network): 55def no_ip_found_error_out(network):
54 errmsg = ("No IP address found in network: %s" % network) 56 errmsg = ("No IP address found in network(s): %s" % network)
55 raise ValueError(errmsg) 57 raise ValueError(errmsg)
56 58
57 59
@@ -59,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
59 """Get an IPv4 or IPv6 address within the network from the host. 61 """Get an IPv4 or IPv6 address within the network from the host.
60 62
61 :param network (str): CIDR presentation format. For example, 63 :param network (str): CIDR presentation format. For example,
62 '192.168.1.0/24'. 64 '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
63 :param fallback (str): If no address is found, return fallback. 65 :param fallback (str): If no address is found, return fallback.
64 :param fatal (boolean): If no address is found, fallback is not 66 :param fatal (boolean): If no address is found, fallback is not
65 set and fatal is True then exit(1). 67 set and fatal is True then exit(1).
@@ -73,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False):
73 else: 75 else:
74 return None 76 return None
75 77
76 _validate_cidr(network) 78 networks = network.split() or [network]
77 network = netaddr.IPNetwork(network) 79 for network in networks:
78 for iface in netifaces.interfaces(): 80 _validate_cidr(network)
79 addresses = netifaces.ifaddresses(iface) 81 network = netaddr.IPNetwork(network)
80 if network.version == 4 and netifaces.AF_INET in addresses: 82 for iface in netifaces.interfaces():
81 addr = addresses[netifaces.AF_INET][0]['addr'] 83 addresses = netifaces.ifaddresses(iface)
82 netmask = addresses[netifaces.AF_INET][0]['netmask'] 84 if network.version == 4 and netifaces.AF_INET in addresses:
83 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) 85 addr = addresses[netifaces.AF_INET][0]['addr']
84 if cidr in network: 86 netmask = addresses[netifaces.AF_INET][0]['netmask']
85 return str(cidr.ip) 87 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
86 88 if cidr in network:
87 if network.version == 6 and netifaces.AF_INET6 in addresses: 89 return str(cidr.ip)
88 for addr in addresses[netifaces.AF_INET6]: 90
89 if not addr['addr'].startswith('fe80'): 91 if network.version == 6 and netifaces.AF_INET6 in addresses:
90 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], 92 for addr in addresses[netifaces.AF_INET6]:
91 addr['netmask'])) 93 if not addr['addr'].startswith('fe80'):
92 if cidr in network: 94 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
93 return str(cidr.ip) 95 addr['netmask']))
96 if cidr in network:
97 return str(cidr.ip)
94 98
95 if fallback is not None: 99 if fallback is not None:
96 return fallback 100 return fallback
@@ -187,6 +191,15 @@ get_iface_for_address = partial(_get_for_address, key='iface')
187get_netmask_for_address = partial(_get_for_address, key='netmask') 191get_netmask_for_address = partial(_get_for_address, key='netmask')
188 192
189 193
194def resolve_network_cidr(ip_address):
195 '''
196 Resolves the full address cidr of an ip_address based on
197 configured network interfaces
198 '''
199 netmask = get_netmask_for_address(ip_address)
200 return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
201
202
190def format_ipv6_addr(address): 203def format_ipv6_addr(address):
191 """If address is IPv6, wrap it in '[]' otherwise return None. 204 """If address is IPv6, wrap it in '[]' otherwise return None.
192 205
@@ -435,8 +448,12 @@ def get_hostname(address, fqdn=True):
435 448
436 rev = dns.reversename.from_address(address) 449 rev = dns.reversename.from_address(address)
437 result = ns_query(rev) 450 result = ns_query(rev)
451
438 if not result: 452 if not result:
439 return None 453 try:
454 result = socket.gethostbyaddr(address)[0]
455 except:
456 return None
440 else: 457 else:
441 result = address 458 result = address
442 459
@@ -448,3 +465,18 @@ def get_hostname(address, fqdn=True):
448 return result 465 return result
449 else: 466 else:
450 return result.split('.')[0] 467 return result.split('.')[0]
468
469
470def port_has_listener(address, port):
471 """
472 Returns True if the address:port is open and being listened to,
473 else False.
474
475 @param address: an IP address or hostname
476 @param port: integer port
477
478 Note calls 'zc' via a subprocess shell
479 """
480 cmd = ['nc', '-z', address, str(port)]
481 result = subprocess.call(cmd)
482 return not(bool(result))
diff --git a/hooks/charmhelpers/contrib/network/ovs/__init__.py b/hooks/charmhelpers/contrib/network/ovs/__init__.py
index 77e2db7..83d1813 100644
--- a/hooks/charmhelpers/contrib/network/ovs/__init__.py
+++ b/hooks/charmhelpers/contrib/network/ovs/__init__.py
@@ -25,10 +25,14 @@ from charmhelpers.core.host import (
25) 25)
26 26
27 27
28def add_bridge(name): 28def add_bridge(name, datapath_type=None):
29 ''' Add the named bridge to openvswitch ''' 29 ''' Add the named bridge to openvswitch '''
30 log('Creating bridge {}'.format(name)) 30 log('Creating bridge {}'.format(name))
31 subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name]) 31 cmd = ["ovs-vsctl", "--", "--may-exist", "add-br", name]
32 if datapath_type is not None:
33 cmd += ['--', 'set', 'bridge', name,
34 'datapath_type={}'.format(datapath_type)]
35 subprocess.check_call(cmd)
32 36
33 37
34def del_bridge(name): 38def del_bridge(name):
diff --git a/hooks/charmhelpers/contrib/network/ufw.py b/hooks/charmhelpers/contrib/network/ufw.py
index d40110d..b65d963 100644
--- a/hooks/charmhelpers/contrib/network/ufw.py
+++ b/hooks/charmhelpers/contrib/network/ufw.py
@@ -40,7 +40,9 @@ Examples:
40import re 40import re
41import os 41import os
42import subprocess 42import subprocess
43
43from charmhelpers.core import hookenv 44from charmhelpers.core import hookenv
45from charmhelpers.core.kernel import modprobe, is_module_loaded
44 46
45__author__ = "Felipe Reyes <felipe.reyes@canonical.com>" 47__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
46 48
@@ -82,14 +84,11 @@ def is_ipv6_ok(soft_fail=False):
82 # do we have IPv6 in the machine? 84 # do we have IPv6 in the machine?
83 if os.path.isdir('/proc/sys/net/ipv6'): 85 if os.path.isdir('/proc/sys/net/ipv6'):
84 # is ip6tables kernel module loaded? 86 # is ip6tables kernel module loaded?
85 lsmod = subprocess.check_output(['lsmod'], universal_newlines=True) 87 if not is_module_loaded('ip6_tables'):
86 matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
87 if len(matches) == 0:
88 # ip6tables support isn't complete, let's try to load it 88 # ip6tables support isn't complete, let's try to load it
89 try: 89 try:
90 subprocess.check_output(['modprobe', 'ip6_tables'], 90 modprobe('ip6_tables')
91 universal_newlines=True) 91 # great, we can load the module
92 # great, we could load the module
93 return True 92 return True
94 except subprocess.CalledProcessError as ex: 93 except subprocess.CalledProcessError as ex:
95 hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, 94 hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
index b01e6cb..d21c9c7 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -14,12 +14,18 @@
14# You should have received a copy of the GNU Lesser General Public License 14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. 15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16 16
17import logging
18import re
19import sys
17import six 20import six
18from collections import OrderedDict 21from collections import OrderedDict
19from charmhelpers.contrib.amulet.deployment import ( 22from charmhelpers.contrib.amulet.deployment import (
20 AmuletDeployment 23 AmuletDeployment
21) 24)
22 25
26DEBUG = logging.DEBUG
27ERROR = logging.ERROR
28
23 29
24class OpenStackAmuletDeployment(AmuletDeployment): 30class OpenStackAmuletDeployment(AmuletDeployment):
25 """OpenStack amulet deployment. 31 """OpenStack amulet deployment.
@@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
28 that is specifically for use by OpenStack charms. 34 that is specifically for use by OpenStack charms.
29 """ 35 """
30 36
31 def __init__(self, series=None, openstack=None, source=None, stable=True): 37 def __init__(self, series=None, openstack=None, source=None,
38 stable=True, log_level=DEBUG):
32 """Initialize the deployment environment.""" 39 """Initialize the deployment environment."""
33 super(OpenStackAmuletDeployment, self).__init__(series) 40 super(OpenStackAmuletDeployment, self).__init__(series)
41 self.log = self.get_logger(level=log_level)
42 self.log.info('OpenStackAmuletDeployment: init')
34 self.openstack = openstack 43 self.openstack = openstack
35 self.source = source 44 self.source = source
36 self.stable = stable 45 self.stable = stable
@@ -38,26 +47,55 @@ class OpenStackAmuletDeployment(AmuletDeployment):
38 # out. 47 # out.
39 self.current_next = "trusty" 48 self.current_next = "trusty"
40 49
50 def get_logger(self, name="deployment-logger", level=logging.DEBUG):
51 """Get a logger object that will log to stdout."""
52 log = logging
53 logger = log.getLogger(name)
54 fmt = log.Formatter("%(asctime)s %(funcName)s "
55 "%(levelname)s: %(message)s")
56
57 handler = log.StreamHandler(stream=sys.stdout)
58 handler.setLevel(level)
59 handler.setFormatter(fmt)
60
61 logger.addHandler(handler)
62 logger.setLevel(level)
63
64 return logger
65
41 def _determine_branch_locations(self, other_services): 66 def _determine_branch_locations(self, other_services):
42 """Determine the branch locations for the other services. 67 """Determine the branch locations for the other services.
43 68
44 Determine if the local branch being tested is derived from its 69 Determine if the local branch being tested is derived from its
45 stable or next (dev) branch, and based on this, use the corresonding 70 stable or next (dev) branch, and based on this, use the corresonding
46 stable or next branches for the other_services.""" 71 stable or next branches for the other_services."""
47 base_charms = ['mysql', 'mongodb'] 72
73 self.log.info('OpenStackAmuletDeployment: determine branch locations')
74
75 # Charms outside the lp:~openstack-charmers namespace
76 base_charms = ['mysql', 'mongodb', 'nrpe']
77
78 # Force these charms to current series even when using an older series.
79 # ie. Use trusty/nrpe even when series is precise, as the P charm
80 # does not possess the necessary external master config and hooks.
81 force_series_current = ['nrpe']
48 82
49 if self.series in ['precise', 'trusty']: 83 if self.series in ['precise', 'trusty']:
50 base_series = self.series 84 base_series = self.series
51 else: 85 else:
52 base_series = self.current_next 86 base_series = self.current_next
53 87
54 if self.stable: 88 for svc in other_services:
55 for svc in other_services: 89 if svc['name'] in force_series_current:
90 base_series = self.current_next
91 # If a location has been explicitly set, use it
92 if svc.get('location'):
93 continue
94 if self.stable:
56 temp = 'lp:charms/{}/{}' 95 temp = 'lp:charms/{}/{}'
57 svc['location'] = temp.format(base_series, 96 svc['location'] = temp.format(base_series,
58 svc['name']) 97 svc['name'])
59 else: 98 else:
60 for svc in other_services:
61 if svc['name'] in base_charms: 99 if svc['name'] in base_charms:
62 temp = 'lp:charms/{}/{}' 100 temp = 'lp:charms/{}/{}'
63 svc['location'] = temp.format(base_series, 101 svc['location'] = temp.format(base_series,
@@ -66,10 +104,13 @@ class OpenStackAmuletDeployment(AmuletDeployment):
66 temp = 'lp:~openstack-charmers/charms/{}/{}/next' 104 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
67 svc['location'] = temp.format(self.current_next, 105 svc['location'] = temp.format(self.current_next,
68 svc['name']) 106 svc['name'])
107
69 return other_services 108 return other_services
70 109
71 def _add_services(self, this_service, other_services): 110 def _add_services(self, this_service, other_services):
72 """Add services to the deployment and set openstack-origin/source.""" 111 """Add services to the deployment and set openstack-origin/source."""
112 self.log.info('OpenStackAmuletDeployment: adding services')
113
73 other_services = self._determine_branch_locations(other_services) 114 other_services = self._determine_branch_locations(other_services)
74 115
75 super(OpenStackAmuletDeployment, self)._add_services(this_service, 116 super(OpenStackAmuletDeployment, self)._add_services(this_service,
@@ -77,29 +118,105 @@ class OpenStackAmuletDeployment(AmuletDeployment):
77 118
78 services = other_services 119 services = other_services
79 services.append(this_service) 120 services.append(this_service)
121
122 # Charms which should use the source config option
80 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 123 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
81 'ceph-osd', 'ceph-radosgw'] 124 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
82 # Most OpenStack subordinate charms do not expose an origin option 125
83 # as that is controlled by the principle. 126 # Charms which can not use openstack-origin, ie. many subordinates
84 ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] 127 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
128 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
129 'cinder-backup', 'nexentaedge-data',
130 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
131 'cinder-nexentaedge', 'nexentaedge-mgmt']
85 132
86 if self.openstack: 133 if self.openstack:
87 for svc in services: 134 for svc in services:
88 if svc['name'] not in use_source + ignore: 135 if svc['name'] not in use_source + no_origin:
89 config = {'openstack-origin': self.openstack} 136 config = {'openstack-origin': self.openstack}
90 self.d.configure(svc['name'], config) 137 self.d.configure(svc['name'], config)
91 138
92 if self.source: 139 if self.source:
93 for svc in services: 140 for svc in services:
94 if svc['name'] in use_source and svc['name'] not in ignore: 141 if svc['name'] in use_source and svc['name'] not in no_origin:
95 config = {'source': self.source} 142 config = {'source': self.source}
96 self.d.configure(svc['name'], config) 143 self.d.configure(svc['name'], config)
97 144
98 def _configure_services(self, configs): 145 def _configure_services(self, configs):
99 """Configure all of the services.""" 146 """Configure all of the services."""
147 self.log.info('OpenStackAmuletDeployment: configure services')
100 for service, config in six.iteritems(configs): 148 for service, config in six.iteritems(configs):
101 self.d.configure(service, config) 149 self.d.configure(service, config)
102 150
151 def _auto_wait_for_status(self, message=None, exclude_services=None,
152 include_only=None, timeout=1800):
153 """Wait for all units to have a specific extended status, except
154 for any defined as excluded. Unless specified via message, any
155 status containing any case of 'ready' will be considered a match.
156
157 Examples of message usage:
158
159 Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
160 message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
161
162 Wait for all units to reach this status (exact match):
163 message = re.compile('^Unit is ready and clustered$')
164
165 Wait for all units to reach any one of these (exact match):
166 message = re.compile('Unit is ready|OK|Ready')
167
168 Wait for at least one unit to reach this status (exact match):
169 message = {'ready'}
170
171 See Amulet's sentry.wait_for_messages() for message usage detail.
172 https://github.com/juju/amulet/blob/master/amulet/sentry.py
173
174 :param message: Expected status match
175 :param exclude_services: List of juju service names to ignore,
176 not to be used in conjuction with include_only.
177 :param include_only: List of juju service names to exclusively check,
178 not to be used in conjuction with exclude_services.
179 :param timeout: Maximum time in seconds to wait for status match
180 :returns: None. Raises if timeout is hit.
181 """
182 self.log.info('Waiting for extended status on units...')
183
184 all_services = self.d.services.keys()
185
186 if exclude_services and include_only:
187 raise ValueError('exclude_services can not be used '
188 'with include_only')
189
190 if message:
191 if isinstance(message, re._pattern_type):
192 match = message.pattern
193 else:
194 match = message
195
196 self.log.debug('Custom extended status wait match: '
197 '{}'.format(match))
198 else:
199 self.log.debug('Default extended status wait match: contains '
200 'READY (case-insensitive)')
201 message = re.compile('.*ready.*', re.IGNORECASE)
202
203 if exclude_services:
204 self.log.debug('Excluding services from extended status match: '
205 '{}'.format(exclude_services))
206 else:
207 exclude_services = []
208
209 if include_only:
210 services = include_only
211 else:
212 services = list(set(all_services) - set(exclude_services))
213
214 self.log.debug('Waiting up to {}s for extended status on services: '
215 '{}'.format(timeout, services))
216 service_messages = {service: message for service in services}
217 self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
218 self.log.info('OK')
219
103 def _get_openstack_release(self): 220 def _get_openstack_release(self):
104 """Get openstack release. 221 """Get openstack release.
105 222
@@ -111,7 +228,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
111 self.precise_havana, self.precise_icehouse, 228 self.precise_havana, self.precise_icehouse,
112 self.trusty_icehouse, self.trusty_juno, self.utopic_juno, 229 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
113 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, 230 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
114 self.wily_liberty) = range(12) 231 self.wily_liberty, self.trusty_mitaka,
232 self.xenial_mitaka) = range(14)
115 233
116 releases = { 234 releases = {
117 ('precise', None): self.precise_essex, 235 ('precise', None): self.precise_essex,
@@ -123,9 +241,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
123 ('trusty', 'cloud:trusty-juno'): self.trusty_juno, 241 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
124 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, 242 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
125 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, 243 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
244 ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
126 ('utopic', None): self.utopic_juno, 245 ('utopic', None): self.utopic_juno,
127 ('vivid', None): self.vivid_kilo, 246 ('vivid', None): self.vivid_kilo,
128 ('wily', None): self.wily_liberty} 247 ('wily', None): self.wily_liberty,
248 ('xenial', None): self.xenial_mitaka}
129 return releases[(self.series, self.openstack)] 249 return releases[(self.series, self.openstack)]
130 250
131 def _get_openstack_release_string(self): 251 def _get_openstack_release_string(self):
@@ -142,6 +262,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
142 ('utopic', 'juno'), 262 ('utopic', 'juno'),
143 ('vivid', 'kilo'), 263 ('vivid', 'kilo'),
144 ('wily', 'liberty'), 264 ('wily', 'liberty'),
265 ('xenial', 'mitaka'),
145 ]) 266 ])
146 if self.openstack: 267 if self.openstack:
147 os_origin = self.openstack.split(':')[1] 268 os_origin = self.openstack.split(':')[1]
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index 03f7927..ef3bdcc 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -18,6 +18,7 @@ import amulet
18import json 18import json
19import logging 19import logging
20import os 20import os
21import re
21import six 22import six
22import time 23import time
23import urllib 24import urllib
@@ -26,7 +27,12 @@ import cinderclient.v1.client as cinder_client
26import glanceclient.v1.client as glance_client 27import glanceclient.v1.client as glance_client
27import heatclient.v1.client as heat_client 28import heatclient.v1.client as heat_client
28import keystoneclient.v2_0 as keystone_client 29import keystoneclient.v2_0 as keystone_client
29import novaclient.v1_1.client as nova_client 30from keystoneclient.auth.identity import v3 as keystone_id_v3
31from keystoneclient import session as keystone_session
32from keystoneclient.v3 import client as keystone_client_v3
33
34import novaclient.client as nova_client
35import pika
30import swiftclient 36import swiftclient
31 37
32from charmhelpers.contrib.amulet.utils import ( 38from charmhelpers.contrib.amulet.utils import (
@@ -36,6 +42,8 @@ from charmhelpers.contrib.amulet.utils import (
36DEBUG = logging.DEBUG 42DEBUG = logging.DEBUG
37ERROR = logging.ERROR 43ERROR = logging.ERROR
38 44
45NOVA_CLIENT_VERSION = "2"
46
39 47
40class OpenStackAmuletUtils(AmuletUtils): 48class OpenStackAmuletUtils(AmuletUtils):
41 """OpenStack amulet utilities. 49 """OpenStack amulet utilities.
@@ -137,7 +145,7 @@ class OpenStackAmuletUtils(AmuletUtils):
137 return "role {} does not exist".format(e['name']) 145 return "role {} does not exist".format(e['name'])
138 return ret 146 return ret
139 147
140 def validate_user_data(self, expected, actual): 148 def validate_user_data(self, expected, actual, api_version=None):
141 """Validate user data. 149 """Validate user data.
142 150
143 Validate a list of actual user data vs a list of expected user 151 Validate a list of actual user data vs a list of expected user
@@ -148,10 +156,15 @@ class OpenStackAmuletUtils(AmuletUtils):
148 for e in expected: 156 for e in expected:
149 found = False 157 found = False
150 for act in actual: 158 for act in actual:
151 a = {'enabled': act.enabled, 'name': act.name, 159 if e['name'] == act.name:
152 'email': act.email, 'tenantId': act.tenantId, 160 a = {'enabled': act.enabled, 'name': act.name,
153 'id': act.id} 161 'email': act.email, 'id': act.id}
154 if e['name'] == a['name']: 162 if api_version == 3:
163 a['default_project_id'] = getattr(act,
164 'default_project_id',
165 'none')
166 else:
167 a['tenantId'] = act.tenantId
155 found = True 168 found = True
156 ret = self._validate_dict_data(e, a) 169 ret = self._validate_dict_data(e, a)
157 if ret: 170 if ret:
@@ -186,15 +199,30 @@ class OpenStackAmuletUtils(AmuletUtils):
186 return cinder_client.Client(username, password, tenant, ept) 199 return cinder_client.Client(username, password, tenant, ept)
187 200
188 def authenticate_keystone_admin(self, keystone_sentry, user, password, 201 def authenticate_keystone_admin(self, keystone_sentry, user, password,
189 tenant): 202 tenant=None, api_version=None,
203 keystone_ip=None):
190 """Authenticates admin user with the keystone admin endpoint.""" 204 """Authenticates admin user with the keystone admin endpoint."""
191 self.log.debug('Authenticating keystone admin...') 205 self.log.debug('Authenticating keystone admin...')
192 unit = keystone_sentry 206 unit = keystone_sentry
193 service_ip = unit.relation('shared-db', 207 if not keystone_ip:
194 'mysql:shared-db')['private-address'] 208 keystone_ip = unit.relation('shared-db',
195 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) 209 'mysql:shared-db')['private-address']
196 return keystone_client.Client(username=user, password=password, 210 base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
197 tenant_name=tenant, auth_url=ep) 211 if not api_version or api_version == 2:
212 ep = base_ep + "/v2.0"
213 return keystone_client.Client(username=user, password=password,
214 tenant_name=tenant, auth_url=ep)
215 else:
216 ep = base_ep + "/v3"
217 auth = keystone_id_v3.Password(
218 user_domain_name='admin_domain',
219 username=user,
220 password=password,
221 domain_name='admin_domain',
222 auth_url=ep,
223 )
224 sess = keystone_session.Session(auth=auth)
225 return keystone_client_v3.Client(session=sess)
198 226
199 def authenticate_keystone_user(self, keystone, user, password, tenant): 227 def authenticate_keystone_user(self, keystone, user, password, tenant):
200 """Authenticates a regular user with the keystone public endpoint.""" 228 """Authenticates a regular user with the keystone public endpoint."""
@@ -223,7 +251,8 @@ class OpenStackAmuletUtils(AmuletUtils):
223 self.log.debug('Authenticating nova user ({})...'.format(user)) 251 self.log.debug('Authenticating nova user ({})...'.format(user))
224 ep = keystone.service_catalog.url_for(service_type='identity', 252 ep = keystone.service_catalog.url_for(service_type='identity',
225 endpoint_type='publicURL') 253 endpoint_type='publicURL')
226 return nova_client.Client(username=user, api_key=password, 254 return nova_client.Client(NOVA_CLIENT_VERSION,
255 username=user, api_key=password,
227 project_id=tenant, auth_url=ep) 256 project_id=tenant, auth_url=ep)
228 257
229 def authenticate_swift_user(self, keystone, user, password, tenant): 258 def authenticate_swift_user(self, keystone, user, password, tenant):
@@ -602,3 +631,382 @@ class OpenStackAmuletUtils(AmuletUtils):
602 self.log.debug('Ceph {} samples (OK): ' 631 self.log.debug('Ceph {} samples (OK): '
603 '{}'.format(sample_type, samples)) 632 '{}'.format(sample_type, samples))
604 return None 633 return None
634
635 # rabbitmq/amqp specific helpers:
636
637 def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
638 """Wait for rmq units extended status to show cluster readiness,
639 after an optional initial sleep period. Initial sleep is likely
640 necessary to be effective following a config change, as status
641 message may not instantly update to non-ready."""
642
643 if init_sleep:
644 time.sleep(init_sleep)
645
646 message = re.compile('^Unit is ready and clustered$')
647 deployment._auto_wait_for_status(message=message,
648 timeout=timeout,
649 include_only=['rabbitmq-server'])
650
651 def add_rmq_test_user(self, sentry_units,
652 username="testuser1", password="changeme"):
653 """Add a test user via the first rmq juju unit, check connection as
654 the new user against all sentry units.
655
656 :param sentry_units: list of sentry unit pointers
657 :param username: amqp user name, default to testuser1
658 :param password: amqp user password
659 :returns: None if successful. Raise on error.
660 """
661 self.log.debug('Adding rmq user ({})...'.format(username))
662
663 # Check that user does not already exist
664 cmd_user_list = 'rabbitmqctl list_users'
665 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
666 if username in output:
667 self.log.warning('User ({}) already exists, returning '
668 'gracefully.'.format(username))
669 return
670
671 perms = '".*" ".*" ".*"'
672 cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
673 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
674
675 # Add user via first unit
676 for cmd in cmds:
677 output, _ = self.run_cmd_unit(sentry_units[0], cmd)
678
679 # Check connection against the other sentry_units