Sync charmhelpers to pull in Queens, KSv3 and other updates

Additional verification against this charm will be necessary.
This commit is contained in:
Ryan Beisner 2018-04-06 22:11:21 +00:00
parent 718676787a
commit 626ff0b063
30 changed files with 1359 additions and 791 deletions

View File

@ -30,6 +30,7 @@ import yaml
from charmhelpers.core.hookenv import (
config,
hook_name,
local_unit,
log,
relation_ids,
@ -285,7 +286,7 @@ class NRPE(object):
try:
nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid
except:
except Exception:
log("Nagios user not set up, nrpe checks not updated")
return
@ -302,7 +303,12 @@ class NRPE(object):
"command": nrpecheck.command,
}
service('restart', 'nagios-nrpe-server')
# update-status hooks are configured to firing every 5 minutes by
# default. When nagios-nrpe-server is restarted, the nagios server
# reports checks failing causing unneccessary alerts. Let's not restart
# on update-status hooks.
if not hook_name() == 'update-status':
service('restart', 'nagios-nrpe-server')
monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master")

View File

@ -65,7 +65,8 @@ def get_ca_cert():
if ca_cert is None:
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
for r_id in relation_ids('identity-service'):
for r_id in (relation_ids('identity-service') +
relation_ids('identity-credentials')):
for unit in relation_list(r_id):
if ca_cert is None:
ca_cert = relation_get('ca_cert',
@ -90,6 +91,6 @@ def install_ca_cert(ca_cert):
log("CA cert is the same as installed version", level=INFO)
else:
log("Installing new CA cert", level=INFO)
with open(cert_file, 'w') as crt:
with open(cert_file, 'wb') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])

View File

@ -27,6 +27,7 @@ clustering-related helpers.
import subprocess
import os
import time
from socket import gethostname as get_unit_hostname
@ -45,6 +46,9 @@ from charmhelpers.core.hookenv import (
is_leader as juju_is_leader,
status_set,
)
from charmhelpers.core.host import (
modulo_distribution,
)
from charmhelpers.core.decorators import (
retry_on_exception,
)
@ -361,3 +365,37 @@ def canonical_url(configs, vip_setting='vip'):
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)
def distributed_wait(modulo=None, wait=None, operation_name='operation'):
''' Distribute operations by waiting based on modulo_distribution
If modulo and or wait are not set, check config_get for those values.
If config values are not set, default to modulo=3 and wait=30.
:param modulo: int The modulo number creates the group distribution
:param wait: int The constant time wait value
:param operation_name: string Operation name for status message
i.e. 'restart'
:side effect: Calls config_get()
:side effect: Calls log()
:side effect: Calls status_set()
:side effect: Calls time.sleep()
'''
if modulo is None:
modulo = config_get('modulo-nodes') or 3
if wait is None:
wait = config_get('known-wait') or 30
if juju_is_leader():
# The leader should never wait
calculated_wait = 0
else:
# non_zero_wait=True guarantees the non-leader who gets modulo 0
# will still wait
calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
non_zero_wait=True)
msg = "Waiting {} seconds for {} ...".format(calculated_wait,
operation_name)
log(msg, DEBUG)
status_set('maintenance', msg)
time.sleep(calculated_wait)

View File

@ -27,6 +27,7 @@ from charmhelpers.core.hookenv import (
network_get_primary_address,
unit_get,
WARNING,
NoNetworkBinding,
)
from charmhelpers.core.host import (
@ -109,7 +110,12 @@ def get_address_in_network(network, fallback=None, fatal=False):
_validate_cidr(network)
network = netaddr.IPNetwork(network)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
try:
addresses = netifaces.ifaddresses(iface)
except ValueError:
# If an instance was deleted between
# netifaces.interfaces() run and now, its interfaces are gone
continue
if network.version == 4 and netifaces.AF_INET in addresses:
for addr in addresses[netifaces.AF_INET]:
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
@ -490,7 +496,7 @@ def get_host_ip(hostname, fallback=None):
if not ip_addr:
try:
ip_addr = socket.gethostbyname(hostname)
except:
except Exception:
log("Failed to resolve hostname '%s'" % (hostname),
level=WARNING)
return fallback
@ -518,7 +524,7 @@ def get_hostname(address, fqdn=True):
if not result:
try:
result = socket.gethostbyaddr(address)[0]
except:
except Exception:
return None
else:
result = address
@ -578,6 +584,9 @@ def get_relation_ip(interface, cidr_network=None):
except NotImplementedError:
# If network-get is not available
address = get_host_ip(unit_get('private-address'))
except NoNetworkBinding:
log("No network binding for {}".format(interface), WARNING)
address = get_host_ip(unit_get('private-address'))
if config('prefer-ipv6'):
# Currently IPv6 has priority, eventually we want IPv6 to just be

View File

@ -29,3 +29,16 @@ def install_alternative(name, target, source, priority=50):
target, name, source, str(priority)
]
subprocess.check_call(cmd)
def remove_alternative(name, source):
"""Remove an installed alternative configuration file
:param name: string name of the alternative to remove
:param source: string full path to alternative to remove
"""
cmd = [
'update-alternatives', '--remove',
name, source
]
subprocess.check_call(cmd)

View File

@ -13,6 +13,7 @@
# limitations under the License.
import logging
import os
import re
import sys
import six
@ -20,6 +21,9 @@ from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OPENSTACK_RELEASES_PAIRS
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
@ -185,7 +189,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=1800):
include_only=None, timeout=None):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
@ -215,7 +219,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
self.log.info('Waiting for extended status on units...')
if not timeout:
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
self.log.info('Waiting for extended status on units for {}s...'
''.format(timeout))
all_services = self.d.services.keys()
@ -250,7 +257,14 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
# Check for idleness
self.d.sentry.wait(timeout=timeout)
# Check for error states and bail early
self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
# Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
@ -260,10 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
(self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
self.xenial_pike, self.artful_pike) = range(11)
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
setattr(self, os_pair, i)
releases = {
('trusty', None): self.trusty_icehouse,
@ -274,9 +286,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
('xenial', 'cloud:xenial-queens'): self.xenial_queens,
('yakkety', None): self.yakkety_newton,
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
('bionic', None): self.bionic_queens,
}
return releases[(self.series, self.openstack)]
@ -291,6 +305,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
@ -303,20 +318,27 @@ class OpenStackAmuletDeployment(AmuletDeployment):
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier
if self._get_openstack_release() == self.trusty_icehouse:
# Icehouse
pools = [
'data',
'metadata',
'rbd',
'cinder',
'cinder-ceph',
'glance'
]
elif (self.trusty_kilo <= self._get_openstack_release() <=
self.zesty_ocata):
# Kilo through Ocata
pools = [
'rbd',
'cinder-ceph',
'glance'
]
else:
# Pike and later
pools = [
'cinder-ceph',
'glance'
]

View File

@ -23,6 +23,7 @@ import urllib
import urlparse
import cinderclient.v1.client as cinder_client
import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client
@ -42,7 +43,6 @@ import swiftclient
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.core.host import CompareHostReleases
DEBUG = logging.DEBUG
@ -50,6 +50,13 @@ ERROR = logging.ERROR
NOVA_CLIENT_VERSION = "2"
OPENSTACK_RELEASES_PAIRS = [
'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
'xenial_pike', 'artful_pike', 'xenial_queens',
'bionic_queens']
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils):
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
public_port, expected, openstack_release=None):
"""Validate endpoint data. Pick the correct validator based on
OpenStack release. Expected data should be in the v2 format:
{
'id': id,
'region': region,
'adminurl': adminurl,
'internalurl': internalurl,
'publicurl': publicurl,
'service_id': service_id}
"""
validation_function = self.validate_v2_endpoint_data
xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
if openstack_release and openstack_release >= xenial_queens:
validation_function = self.validate_v3_endpoint_data
expected = {
'id': expected['id'],
'region': expected['region'],
'region_id': 'RegionOne',
'url': self.valid_url,
'interface': self.not_null,
'service_id': expected['service_id']}
return validation_function(endpoints, admin_port, internal_port,
public_port, expected)
def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
@ -92,7 +126,7 @@ class OpenStackAmuletUtils(AmuletUtils):
return 'endpoint not found'
def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
public_port, expected, expected_num_eps=3):
"""Validate keystone v3 endpoint data.
Validate the v3 endpoint data which has changed from v2. The
@ -138,10 +172,89 @@ class OpenStackAmuletUtils(AmuletUtils):
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if len(found) != 3:
if len(found) != expected_num_eps:
return 'Unexpected number of endpoints found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
"""Convert v2 endpoint data into v3.
{
'service_name1': [
{
'adminURL': adminURL,
'id': id,
'region': region.
'publicURL': publicURL,
'internalURL': internalURL
}],
'service_name2': [
{
'adminURL': adminURL,
'id': id,
'region': region.
'publicURL': publicURL,
'internalURL': internalURL
}],
}
"""
self.log.warn("Endpoint ID and Region ID validation is limited to not "
"null checks after v2 to v3 conversion")
for svc in ep_data.keys():
assert len(ep_data[svc]) == 1, "Unknown data format"
svc_ep_data = ep_data[svc][0]
ep_data[svc] = [
{
'url': svc_ep_data['adminURL'],
'interface': 'admin',
'region': svc_ep_data['region'],
'region_id': self.not_null,
'id': self.not_null},
{
'url': svc_ep_data['publicURL'],
'interface': 'public',
'region': svc_ep_data['region'],
'region_id': self.not_null,
'id': self.not_null},
{
'url': svc_ep_data['internalURL'],
'interface': 'internal',
'region': svc_ep_data['region'],
'region_id': self.not_null,
'id': self.not_null}]
return ep_data
def validate_svc_catalog_endpoint_data(self, expected, actual,
openstack_release=None):
"""Validate service catalog endpoint data. Pick the correct validator
for the OpenStack version. Expected data should be in the v2 format:
{
'service_name1': [
{
'adminURL': adminURL,
'id': id,
'region': region.
'publicURL': publicURL,
'internalURL': internalURL
}],
'service_name2': [
{
'adminURL': adminURL,
'id': id,
'region': region.
'publicURL': publicURL,
'internalURL': internalURL
}],
}
"""
validation_function = self.validate_v2_svc_catalog_endpoint_data
xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
if openstack_release and openstack_release >= xenial_queens:
validation_function = self.validate_v3_svc_catalog_endpoint_data
expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
return validation_function(expected, actual)
def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
"""Validate service catalog endpoint data.
Validate a list of actual service catalog endpoints vs a list of
@ -310,7 +423,6 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
@retry_on_exception(5, base_delay=10)
def keystone_wait_for_propagation(self, sentry_relation_pairs,
api_version):
"""Iterate over list of sentry and relation tuples and verify that
@ -326,10 +438,10 @@ class OpenStackAmuletUtils(AmuletUtils):
rel = sentry.relation('identity-service',
relation_name)
self.log.debug('keystone relation data: {}'.format(rel))
if rel['api_version'] != str(api_version):
if rel.get('api_version') != str(api_version):
raise Exception("api_version not propagated through relation"
" data yet ('{}' != '{}')."
"".format(rel['api_version'], api_version))
"".format(rel.get('api_version'), api_version))
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
api_version):
@ -348,15 +460,16 @@ class OpenStackAmuletUtils(AmuletUtils):
config = {'preferred-api-version': api_version}
deployment.d.configure('keystone', config)
deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
def authenticate_cinder_admin(self, keystone_sentry, username,
password, tenant):
def authenticate_cinder_admin(self, keystone, api_version=2):
"""Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens.
keystone_ip = keystone_sentry.info['public-address']
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
return cinder_client.Client(username, password, tenant, ept)
self.log.debug('Authenticating cinder admin...')
_clients = {
1: cinder_client.Client,
2: cinder_clientv2.Client}
return _clients[api_version](session=keystone.session)
def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False,
@ -364,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils):
project_domain_name=None, project_name=None):
"""Authenticate with Keystone"""
self.log.debug('Authenticating with keystone...')
port = 5000
if admin_port:
port = 35357
base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
port)
if not api_version or api_version == 2:
ep = base_ep + "/v2.0"
if not api_version:
api_version = 2
sess, auth = self.get_keystone_session(
keystone_ip=keystone_ip,
username=username,
password=password,
api_version=api_version,
admin_port=admin_port,
user_domain_name=user_domain_name,
domain_name=domain_name,
project_domain_name=project_domain_name,
project_name=project_name
)
if api_version == 2:
client = keystone_client.Client(session=sess)
else:
client = keystone_client_v3.Client(session=sess)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(sess)
return client
def get_keystone_session(self, keystone_ip, username, password,
api_version=False, admin_port=False,
user_domain_name=None, domain_name=None,
project_domain_name=None, project_name=None):
"""Return a keystone session object"""
ep = self.get_keystone_endpoint(keystone_ip,
api_version=api_version,
admin_port=admin_port)
if api_version == 2:
auth = v2.Password(
username=username,
password=password,
@ -378,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils):
auth_url=ep
)
sess = keystone_session.Session(auth=auth)
client = keystone_client.Client(session=sess)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(sess)
return client
else:
ep = base_ep + "/v3"
auth = v3.Password(
user_domain_name=user_domain_name,
username=username,
@ -394,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils):
auth_url=ep
)
sess = keystone_session.Session(auth=auth)
client = keystone_client_v3.Client(session=sess)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(sess)
return client
return (sess, auth)
def get_keystone_endpoint(self, keystone_ip, api_version=None,
admin_port=False):
"""Return keystone endpoint"""
port = 5000
if admin_port:
port = 35357
base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
port)
if api_version == 2:
ep = base_ep + "/v2.0"
else:
ep = base_ep + "/v3"
return ep
def get_default_keystone_session(self, keystone_sentry,
openstack_release=None):
"""Return a keystone session object and client object assuming standard
default settings
Example call in amulet tests:
self.keystone_session, self.keystone = u.get_default_keystone_session(
self.keystone_sentry,
openstack_release=self._get_openstack_release())
The session can then be used to auth other clients:
neutronclient.Client(session=session)
aodh_client.Client(session=session)
eyc
"""
self.log.debug('Authenticating keystone admin...')
api_version = 2
client_class = keystone_client.Client
# 11 => xenial_queens
if openstack_release and openstack_release >= 11:
api_version = 3
client_class = keystone_client_v3.Client
keystone_ip = keystone_sentry.info['public-address']
session, auth = self.get_keystone_session(
keystone_ip,
api_version=api_version,
username='admin',
password='openstack',
project_name='admin',
user_domain_name='admin_domain',
project_domain_name='admin_domain')
client = client_class(session=session)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(session)
return session, client
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant=None, api_version=None,
@ -617,13 +795,25 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name))
return _keypair
except:
except Exception:
self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name)
return _keypair
def _get_cinder_obj_name(self, cinder_object):
"""Retrieve name of cinder object.
:param cinder_object: cinder snapshot or volume object
:returns: str cinder object name
"""
# v1 objects store name in 'display_name' attr but v2+ use 'name'
try:
return cinder_object.display_name
except AttributeError:
return cinder_object.name
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
img_id=None, src_vol_id=None, snap_id=None):
"""Create cinder volume, optionally from a glance image, OR
@ -674,6 +864,13 @@ class OpenStackAmuletUtils(AmuletUtils):
source_volid=src_vol_id,
snapshot_id=snap_id)
vol_id = vol_new.id
except TypeError:
vol_new = cinder.volumes.create(name=vol_name,
imageRef=img_id,
size=vol_size,
source_volid=src_vol_id,
snapshot_id=snap_id)
vol_id = vol_new.id
except Exception as e:
msg = 'Failed to create volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
@ -688,7 +885,7 @@ class OpenStackAmuletUtils(AmuletUtils):
# Re-validate new volume
self.log.debug('Validating volume attributes...')
val_vol_name = cinder.volumes.get(vol_id).display_name
val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
val_vol_boot = cinder.volumes.get(vol_id).bootable
val_vol_stat = cinder.volumes.get(vol_id).status
val_vol_size = cinder.volumes.get(vol_id).size
@ -836,9 +1033,12 @@ class OpenStackAmuletUtils(AmuletUtils):
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
pool_name = df['pools'][pool_id]['name']
obj_count = df['pools'][pool_id]['stats']['objects']
kb_used = df['pools'][pool_id]['stats']['kb_used']
for pool in df['pools']:
if pool['id'] == pool_id:
pool_name = pool['name']
obj_count = pool['stats']['objects']
kb_used = pool['stats']['kb_used']
self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import glob
import json
import math
@ -92,14 +93,14 @@ from charmhelpers.contrib.network.ip import (
format_ipv6_addr,
is_bridge_member,
is_ipv6_disabled,
get_relation_ip,
)
from charmhelpers.contrib.openstack.utils import (
config_flags_parser,
get_host_ip,
git_determine_usr_bin,
git_determine_python_path,
enable_memcache,
snap_install_requested,
CompareOpenStackReleases,
os_release,
)
from charmhelpers.core.unitdata import kv
@ -292,7 +293,7 @@ class PostgresqlDBContext(OSContextGenerator):
def db_ssl(rdata, ctxt, ssl_dir):
if 'ssl_ca' in rdata and ssl_dir:
ca_path = os.path.join(ssl_dir, 'db-client.ca')
with open(ca_path, 'w') as fh:
with open(ca_path, 'wb') as fh:
fh.write(b64decode(rdata['ssl_ca']))
ctxt['database_ssl_ca'] = ca_path
@ -307,12 +308,12 @@ def db_ssl(rdata, ctxt, ssl_dir):
log("Waiting 1m for ssl client cert validity", level=INFO)
time.sleep(60)
with open(cert_path, 'w') as fh:
with open(cert_path, 'wb') as fh:
fh.write(b64decode(rdata['ssl_cert']))
ctxt['database_ssl_cert'] = cert_path
key_path = os.path.join(ssl_dir, 'db-client.key')
with open(key_path, 'w') as fh:
with open(key_path, 'wb') as fh:
fh.write(b64decode(rdata['ssl_key']))
ctxt['database_ssl_key'] = key_path
@ -331,10 +332,7 @@ class IdentityServiceContext(OSContextGenerator):
self.rel_name = rel_name
self.interfaces = [self.rel_name]
def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {}
def _setup_pki_cache(self):
if self.service and self.service_user:
# This is required for pki token signing if we don't want /tmp to
# be used.
@ -344,6 +342,15 @@ class IdentityServiceContext(OSContextGenerator):
mkdir(path=cachedir, owner=self.service_user,
group=self.service_user, perms=0o700)
return cachedir
return None
def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {}
cachedir = self._setup_pki_cache()
if cachedir:
ctxt['signing_dir'] = cachedir
for rid in relation_ids(self.rel_name):
@ -377,6 +384,63 @@ class IdentityServiceContext(OSContextGenerator):
# so a missing value just indicates keystone needs
# upgrading
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
ctxt['admin_domain_id'] = rdata.get('service_domain_id')
return ctxt
return {}
class IdentityCredentialsContext(IdentityServiceContext):
'''Context for identity-credentials interface type'''
def __init__(self,
service=None,
service_user=None,
rel_name='identity-credentials'):
super(IdentityCredentialsContext, self).__init__(service,
service_user,
rel_name)
def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {}
cachedir = self._setup_pki_cache()
if cachedir:
ctxt['signing_dir'] = cachedir
for rid in relation_ids(self.rel_name):
self.related = True
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
credentials_host = rdata.get('credentials_host')
credentials_host = (
format_ipv6_addr(credentials_host) or credentials_host
)
auth_host = rdata.get('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host
svc_protocol = rdata.get('credentials_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({
'service_port': rdata.get('credentials_port'),
'service_host': credentials_host,
'auth_host': auth_host,
'auth_port': rdata.get('auth_port'),
'admin_tenant_name': rdata.get('credentials_project'),
'admin_tenant_id': rdata.get('credentials_project_id'),
'admin_user': rdata.get('credentials_username'),
'admin_password': rdata.get('credentials_password'),
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol,
'api_version': api_version
})
if float(api_version) > 2:
ctxt.update({'admin_domain_name':
rdata.get('domain')})
if self.context_complete(ctxt):
return ctxt
return {}
@ -458,7 +522,7 @@ class AMQPContext(OSContextGenerator):
ca_path = os.path.join(
self.ssl_dir, 'rabbit-client-ca.pem')
with open(ca_path, 'w') as fh:
with open(ca_path, 'wb') as fh:
fh.write(b64decode(ctxt['rabbit_ssl_ca']))
ctxt['rabbit_ssl_ca'] = ca_path
@ -554,7 +618,9 @@ class HAProxyContext(OSContextGenerator):
"""
interfaces = ['cluster']
def __init__(self, singlenode_mode=False):
def __init__(self, singlenode_mode=False,
address_types=ADDRESS_TYPES):
self.address_types = address_types
self.singlenode_mode = singlenode_mode
def __call__(self):
@ -563,41 +629,56 @@ class HAProxyContext(OSContextGenerator):
if not relation_ids('cluster') and not self.singlenode_mode:
return {}
if config('prefer-ipv6'):
addr = get_ipv6_addr(exc_list=[config('vip')])[0]
else:
addr = get_host_ip(unit_get('private-address'))
l_unit = local_unit().replace('/', '-')
cluster_hosts = {}
# NOTE(jamespage): build out map of configured network endpoints
# and associated backends
for addr_type in ADDRESS_TYPES:
for addr_type in self.address_types:
cfg_opt = 'os-{}-network'.format(addr_type)
laddr = get_address_in_network(config(cfg_opt))
# NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather
# than 'internal'
if addr_type == 'internal':
_addr_map_type = INTERNAL
else:
_addr_map_type = addr_type
# Network spaces aware
laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'],
config(cfg_opt))
if laddr:
netmask = get_netmask_for_address(laddr)
cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
netmask),
'backends': {l_unit: laddr}}
cluster_hosts[laddr] = {
'network': "{}/{}".format(laddr,
netmask),
'backends': collections.OrderedDict([(l_unit,
laddr)])
}
for rid in relation_ids('cluster'):
for unit in related_units(rid):
for unit in sorted(related_units(rid)):
# API Charms will need to set {addr_type}-address with
# get_relation_ip(addr_type)
_laddr = relation_get('{}-address'.format(addr_type),
rid=rid, unit=unit)
if _laddr:
_unit = unit.replace('/', '-')
cluster_hosts[laddr]['backends'][_unit] = _laddr
# NOTE(jamespage) add backend based on private address - this
# with either be the only backend or the fallback if no acls
# NOTE(jamespage) add backend based on get_relation_ip - this
# will either be the only backend or the fallback if no acls
# match in the frontend
# Network spaces aware
addr = get_relation_ip('cluster')
cluster_hosts[addr] = {}
netmask = get_netmask_for_address(addr)
cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
'backends': {l_unit: addr}}
cluster_hosts[addr] = {
'network': "{}/{}".format(addr, netmask),
'backends': collections.OrderedDict([(l_unit,
addr)])
}
for rid in relation_ids('cluster'):
for unit in related_units(rid):
for unit in sorted(related_units(rid)):
# API Charms will need to set their private-address with
# get_relation_ip('cluster')
_laddr = relation_get('private-address',
rid=rid, unit=unit)
if _laddr:
@ -628,6 +709,8 @@ class HAProxyContext(OSContextGenerator):
ctxt['local_host'] = '127.0.0.1'
ctxt['haproxy_host'] = '0.0.0.0'
ctxt['ipv6_enabled'] = not is_ipv6_disabled()
ctxt['stat_port'] = '8888'
db = kv()
@ -802,8 +885,9 @@ class ApacheSSLContext(OSContextGenerator):
else:
# Expect cert/key provided in config (currently assumed that ca
# uses ip for cn)
cn = resolve_address(endpoint_type=INTERNAL)
self.configure_cert(cn)
for net_type in (INTERNAL, ADMIN, PUBLIC):
cn = resolve_address(endpoint_type=net_type)
self.configure_cert(cn)
addresses = self.get_network_addresses()
for address, endpoint in addresses:
@ -843,15 +927,6 @@ class NeutronContext(OSContextGenerator):
for pkgs in self.packages:
ensure_packages(pkgs)
def _save_flag_file(self):
if self.network_manager == 'quantum':
_file = '/etc/nova/quantum_plugin.conf'
else:
_file = '/etc/nova/neutron_plugin.conf'
with open(_file, 'wb') as out:
out.write(self.plugin + '\n')
def ovs_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
@ -996,7 +1071,6 @@ class NeutronContext(OSContextGenerator):
flags = config_flags_parser(alchemy_flags)
ctxt['neutron_alchemy_flags'] = flags
self._save_flag_file()
return ctxt
@ -1176,7 +1250,7 @@ class SubordinateConfigContext(OSContextGenerator):
if sub_config and sub_config != '':
try:
sub_config = json.loads(sub_config)
except:
except Exception:
log('Could not parse JSON from '
'subordinate_configuration setting from %s'
% rid, level=ERROR)
@ -1321,8 +1395,6 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
"usr_bin": git_determine_usr_bin(),
"python_path": git_determine_python_path(),
}
return ctxt
@ -1570,6 +1642,82 @@ class InternalEndpointContext(OSContextGenerator):
return {'use_internal_endpoints': config('use-internal-endpoints')}
class VolumeAPIContext(InternalEndpointContext):
"""Volume API context.
This context provides information regarding the volume endpoint to use
when communicating between services. It determines which version of the
API is appropriate for use.
This value will be determined in the resulting context dictionary
returned from calling the VolumeAPIContext object. Information provided
by this context is as follows:
volume_api_version: the volume api version to use, currently
'v2' or 'v3'
volume_catalog_info: the information to use for a cinder client
configuration that consumes API endpoints from the keystone
catalog. This is defined as the type:name:endpoint_type string.
"""
# FIXME(wolsen) This implementation is based on the provider being able
# to specify the package version to check but does not guarantee that the
# volume service api version selected is available. In practice, it is
# quite likely the volume service *is* providing the v3 volume service.
# This should be resolved when the service-discovery spec is implemented.
def __init__(self, pkg):
"""
Creates a new VolumeAPIContext for use in determining which version
of the Volume API should be used for communication. A package codename
should be supplied for determining the currently installed OpenStack
version.
:param pkg: the package codename to use in order to determine the
component version (e.g. nova-common). See
charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more.
"""
super(VolumeAPIContext, self).__init__()
self._ctxt = None
if not pkg:
raise ValueError('package name must be provided in order to '
'determine current OpenStack version.')
self.pkg = pkg
@property
def ctxt(self):
if self._ctxt is not None:
return self._ctxt
self._ctxt = self._determine_ctxt()
return self._ctxt
def _determine_ctxt(self):
"""Determines the Volume API endpoint information.
Determines the appropriate version of the API that should be used
as well as the catalog_info string that would be supplied. Returns
a dict containing the volume_api_version and the volume_catalog_info.
"""
rel = os_release(self.pkg, base='icehouse')
version = '2'
if CompareOpenStackReleases(rel) >= 'pike':
version = '3'
service_type = 'volumev{version}'.format(version=version)
service_name = 'cinderv{version}'.format(version=version)
endpoint_type = 'publicURL'
if config('use-internal-endpoints'):
endpoint_type = 'internalURL'
catalog_info = '{type}:{name}:{endpoint}'.format(
type=service_type, name=service_name, endpoint=endpoint_type)
return {
'volume_api_version': version,
'volume_catalog_info': catalog_info,
}
def __call__(self):
return self.ctxt
class AppArmorContext(OSContextGenerator):
"""Base class for apparmor contexts."""
@ -1705,3 +1853,30 @@ class MemcacheContext(OSContextGenerator):
ctxt['memcache_server_formatted'],
ctxt['memcache_port'])
return ctxt
class EnsureDirContext(OSContextGenerator):
'''
Serves as a generic context to create a directory as a side-effect.
Useful for software that supports drop-in files (.d) in conjunction
with config option-based templates. Examples include:
* OpenStack oslo.policy drop-in files;
* systemd drop-in config files;
* other software that supports overriding defaults with .d files
Another use-case is when a subordinate generates a configuration for
primary to render in a separate directory.
Some software requires a user to create a target directory to be
scanned for drop-in files with a specific format. This is why this
context is needed to do that before rendering a template.
'''
def __init__(self, dirname):
'''Used merely to ensure that a given directory exists.'''
self.dirname = dirname
def __call__(self):
mkdir(self.dirname)
return {}

View File

@ -9,7 +9,7 @@
CRITICAL=0
NOTACTIVE=''
LOGFILE=/var/log/nagios/check_haproxy.log
AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
typeset -i N_INSTANCES=0
for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)

View File

@ -10,7 +10,7 @@
CURRQthrsh=0
MAXQthrsh=100
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)

View File

@ -23,6 +23,8 @@
Helpers for high availability.
"""
import json
import re
from charmhelpers.core.hookenv import (
@ -32,6 +34,7 @@ from charmhelpers.core.hookenv import (
config,
status_set,
DEBUG,
WARNING,
)
from charmhelpers.core.host import (
@ -40,6 +43,23 @@ from charmhelpers.core.host import (
from charmhelpers.contrib.openstack.ip import (
resolve_address,
is_ipv6,
)
from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address,
)
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config
)
JSON_ENCODE_OPTIONS = dict(
sort_keys=True,
allow_nan=False,
indent=None,
separators=(',', ':'),
)
@ -53,8 +73,8 @@ class DNSHAException(Exception):
def update_dns_ha_resource_params(resources, resource_params,
relation_id=None,
crm_ocf='ocf:maas:dns'):
""" Check for os-*-hostname settings and update resource dictionaries for
the HA relation.
""" Configure DNS-HA resources based on provided configuration and
update resource dictionaries for the HA relation.
@param resources: Pointer to dictionary of resources.
Usually instantiated in ha_joined().
@ -64,61 +84,20 @@ def update_dns_ha_resource_params(resources, resource_params,
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
DNS HA
"""
# Validate the charm environment for DNS HA
assert_charm_supports_dns_ha()
settings = ['os-admin-hostname', 'os-internal-hostname',
'os-public-hostname', 'os-access-hostname']
# Check which DNS settings are set and update dictionaries
hostname_group = []
for setting in settings:
hostname = config(setting)
if hostname is None:
log('DNS HA: Hostname setting {} is None. Ignoring.'
''.format(setting),
DEBUG)
continue
m = re.search('os-(.+?)-hostname', setting)
if m:
networkspace = m.group(1)
else:
msg = ('Unexpected DNS hostname setting: {}. '
'Cannot determine network space name'
''.format(setting))
status_set('blocked', msg)
raise DNSHAException(msg)
hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
if hostname_key in hostname_group:
log('DNS HA: Resource {}: {} already exists in '
'hostname group - skipping'.format(hostname_key, hostname),
DEBUG)
continue
hostname_group.append(hostname_key)
resources[hostname_key] = crm_ocf
resource_params[hostname_key] = (
'params fqdn="{}" ip_address="{}" '
''.format(hostname, resolve_address(endpoint_type=networkspace,
override=False)))
if len(hostname_group) >= 1:
log('DNS HA: Hostname group is set with {} as members. '
'Informing the ha relation'.format(' '.join(hostname_group)),
DEBUG)
relation_set(relation_id=relation_id, groups={
'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
else:
msg = 'DNS HA: Hostname group has no members.'
status_set('blocked', msg)
raise DNSHAException(msg)
_relation_data = {'resources': {}, 'resource_params': {}}
update_hacluster_dns_ha(charm_name(),
_relation_data,
crm_ocf)
resources.update(_relation_data['resources'])
resource_params.update(_relation_data['resource_params'])
relation_set(relation_id=relation_id, groups=_relation_data['groups'])
def assert_charm_supports_dns_ha():
"""Validate prerequisites for DNS HA
The MAAS client is only available on Xenial or greater
:raises DNSHAException: if release is < 16.04
"""
if lsb_release().get('DISTRIB_RELEASE') < '16.04':
msg = ('DNS HA is only supported on 16.04 and greater '
@ -137,3 +116,150 @@ def expect_ha():
@returns boolean
"""
return config('vip') or config('dns-ha')
def generate_ha_relation_data(service):
""" Generate relation data for ha relation
Based on configuration options and unit interfaces, generate a json
encoded dict of relation data items for the hacluster relation,
providing configuration for DNS HA or VIP's + haproxy clone sets.
@returns dict: json encoded data for use with relation_set
"""
_haproxy_res = 'res_{}_haproxy'.format(service)
_relation_data = {
'resources': {
_haproxy_res: 'lsb:haproxy',
},
'resource_params': {
_haproxy_res: 'op monitor interval="5s"'
},
'init_services': {
_haproxy_res: 'haproxy'
},
'clones': {
'cl_{}_haproxy'.format(service): _haproxy_res
},
}
if config('dns-ha'):
update_hacluster_dns_ha(service, _relation_data)
else:
update_hacluster_vip(service, _relation_data)
return {
'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS)
for k, v in _relation_data.items() if v
}
def update_hacluster_dns_ha(service, relation_data,
crm_ocf='ocf:maas:dns'):
""" Configure DNS-HA resources based on provided configuration
@param service: Name of the service being configured
@param relation_data: Pointer to dictionary of relation data.
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
DNS HA
"""
# Validate the charm environment for DNS HA
assert_charm_supports_dns_ha()
settings = ['os-admin-hostname', 'os-internal-hostname',
'os-public-hostname', 'os-access-hostname']
# Check which DNS settings are set and update dictionaries
hostname_group = []
for setting in settings:
hostname = config(setting)
if hostname is None:
log('DNS HA: Hostname setting {} is None. Ignoring.'
''.format(setting),
DEBUG)
continue
m = re.search('os-(.+?)-hostname', setting)
if m:
endpoint_type = m.group(1)
# resolve_address's ADDRESS_MAP uses 'int' not 'internal'
if endpoint_type == 'internal':
endpoint_type = 'int'
else:
msg = ('Unexpected DNS hostname setting: {}. '
'Cannot determine endpoint_type name'
''.format(setting))
status_set('blocked', msg)
raise DNSHAException(msg)
hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type)
if hostname_key in hostname_group:
log('DNS HA: Resource {}: {} already exists in '
'hostname group - skipping'.format(hostname_key, hostname),
DEBUG)
continue
hostname_group.append(hostname_key)
relation_data['resources'][hostname_key] = crm_ocf
relation_data['resource_params'][hostname_key] = (
'params fqdn="{}" ip_address="{}"'
.format(hostname, resolve_address(endpoint_type=endpoint_type,
override=False)))
if len(hostname_group) >= 1:
log('DNS HA: Hostname group is set with {} as members. '
'Informing the ha relation'.format(' '.join(hostname_group)),
DEBUG)
relation_data['groups'] = {
'grp_{}_hostnames'.format(service): ' '.join(hostname_group)
}
else:
msg = 'DNS HA: Hostname group has no members.'
status_set('blocked', msg)
raise DNSHAException(msg)
def update_hacluster_vip(service, relation_data):
""" Configure VIP resources based on provided configuration
@param service: Name of the service being configured
@param relation_data: Pointer to dictionary of relation data.
"""
cluster_config = get_hacluster_config()
vip_group = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_neutron_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_neutron_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = (get_iface_for_address(vip) or
config('vip_iface'))
netmask = (get_netmask_for_address(vip) or
config('vip_cidr'))
if iface is not None:
vip_key = 'res_{}_{}_vip'.format(service, iface)
if vip_key in vip_group:
if vip not in relation_data['resource_params'][vip_key]:
vip_key = '{}_{}'.format(vip_key, vip_params)
else:
log("Resource '%s' (vip='%s') already exists in "
"vip group - skipping" % (vip_key, vip), WARNING)
continue
relation_data['resources'][vip_key] = res_neutron_vip
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}" '
'nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
vip_group.append(vip_key)
if len(vip_group) >= 1:
relation_data['groups'] = {
'grp_{}_vips'.format(service): ' '.join(vip_group)
}

View File

@ -59,18 +59,13 @@ def determine_dkms_package():
def quantum_plugins():
from charmhelpers.contrib.openstack import context
return {
'ovs': {
'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'contexts': [],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
@ -82,11 +77,7 @@ def quantum_plugins():
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'contexts': [],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
@ -100,7 +91,6 @@ NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins():
from charmhelpers.contrib.openstack import context
release = os_release('nova-common')
plugins = {
'ovs': {
@ -108,11 +98,7 @@ def neutron_plugins():
'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'contexts': [],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
@ -124,11 +110,7 @@ def neutron_plugins():
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
@ -138,11 +120,7 @@ def neutron_plugins():
'nsx': {
'config': '/etc/neutron/plugins/vmware/nsx.ini',
'driver': 'vmware',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
@ -152,11 +130,7 @@ def neutron_plugins():
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'contexts': [],
'services': [],
'packages': [determine_dkms_package(),
['neutron-plugin-cisco']],
@ -167,11 +141,7 @@ def neutron_plugins():
'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'contexts': [],
'services': ['calico-felix',
'bird',
'neutron-dhcp-agent',
@ -189,11 +159,7 @@ def neutron_plugins():
'vsp': {
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
@ -203,10 +169,7 @@ def neutron_plugins():
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
'driver': ('neutron.plugins.plumgrid.plumgrid_plugin'
'.plumgrid_plugin.NeutronPluginPLUMgridV2'),
'contexts': [
context.SharedDBContext(user=config('database-user'),
database=config('database'),
ssl_dir=NEUTRON_CONF_DIR)],
'contexts': [],
'services': [],
'packages': ['plumgrid-lxc',
'iovisor-dkms'],
@ -217,11 +180,7 @@ def neutron_plugins():
'midonet': {
'config': '/etc/neutron/plugins/midonet/midonet.ini',
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'contexts': [],
'services': [],
'packages': [determine_dkms_package()],
'server_packages': ['neutron-server',

View File

@ -18,7 +18,7 @@ rbd default features = {{ rbd_features }}
[client]
{% if rbd_client_cache_settings -%}
{% for key, value in rbd_client_cache_settings.iteritems() -%}
{% for key, value in rbd_client_cache_settings.items() -%}
{{ key }} = {{ value }}
{% endfor -%}
{%- endif %}
{%- endif %}

View File

@ -17,22 +17,22 @@ defaults
{%- if haproxy_queue_timeout %}
timeout queue {{ haproxy_queue_timeout }}
{%- else %}
timeout queue 5000
timeout queue 9000
{%- endif %}
{%- if haproxy_connect_timeout %}
timeout connect {{ haproxy_connect_timeout }}
{%- else %}
timeout connect 5000
timeout connect 9000
{%- endif %}
{%- if haproxy_client_timeout %}
timeout client {{ haproxy_client_timeout }}
{%- else %}
timeout client 30000
timeout client 90000
{%- endif %}
{%- if haproxy_server_timeout %}
timeout server {{ haproxy_server_timeout }}
{%- else %}
timeout server 30000
timeout server 90000
{%- endif %}
listen stats
@ -48,7 +48,9 @@ listen stats
{% for service, ports in service_ports.items() -%}
frontend tcp-in_{{ service }}
bind *:{{ ports[0] }}
{% if ipv6_enabled -%}
bind :::{{ ports[0] }}
{% endif -%}
{% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}

View File

@ -0,0 +1,6 @@
[cache]
{% if memcache_url %}
enabled = true
backend = oslo_cache.memcache_pool
memcache_servers = {{ memcache_url }}
{% endif %}

View File

@ -15,9 +15,6 @@ Listen {{ public_port }}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
{% if python_path -%}
python-path={{ python_path }} \
{% endif -%}
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
@ -29,7 +26,7 @@ Listen {{ public_port }}
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory {{ usr_bin }}>
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
@ -44,9 +41,6 @@ Listen {{ public_port }}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
{% if python_path -%}
python-path={{ python_path }} \
{% endif -%}
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
@ -58,7 +52,7 @@ Listen {{ public_port }}
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory {{ usr_bin }}>
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
@ -73,9 +67,6 @@ Listen {{ public_port }}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
{% if python_path -%}
python-path={{ python_path }} \
{% endif -%}
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
@ -87,7 +78,7 @@ Listen {{ public_port }}
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory {{ usr_bin }}>
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>

View File

@ -93,7 +93,8 @@ class OSConfigTemplate(object):
Associates a config file template with a list of context generators.
Responsible for constructing a template context based on those generators.
"""
def __init__(self, config_file, contexts):
def __init__(self, config_file, contexts, config_template=None):
self.config_file = config_file
if hasattr(contexts, '__call__'):
@ -103,6 +104,8 @@ class OSConfigTemplate(object):
self._complete_contexts = []
self.config_template = config_template
def context(self):
ctxt = {}
for context in self.contexts:
@ -124,6 +127,11 @@ class OSConfigTemplate(object):
self.context()
return self._complete_contexts
@property
def is_string_template(self):
""":returns: Boolean if this instance is a template initialised with a string"""
return self.config_template is not None
class OSConfigRenderer(object):
"""
@ -148,6 +156,10 @@ class OSConfigRenderer(object):
contexts=[context.IdentityServiceContext()])
configs.register(config_file='/etc/haproxy/haproxy.conf',
contexts=[context.HAProxyContext()])
configs.register(config_file='/etc/keystone/policy.d/extra.cfg',
contexts=[context.ExtraPolicyContext()
context.KeystoneContext()],
config_template=hookenv.config('extra-policy'))
# write out a single config
configs.write('/etc/nova/nova.conf')
# write out all registered configs
@ -218,14 +230,23 @@ class OSConfigRenderer(object):
else:
apt_install('python3-jinja2')
def register(self, config_file, contexts):
def register(self, config_file, contexts, config_template=None):
"""
Register a config file with a list of context generators to be called
during rendering.
config_template can be used to load a template from a string instead of
using template loaders and template files.
:param config_file (str): a path where a config file will be rendered
:param contexts (list): a list of context dictionaries with kv pairs
:param config_template (str): an optional template string to use
"""
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
contexts=contexts)
log('Registered config file: %s' % config_file, level=INFO)
self.templates[config_file] = OSConfigTemplate(
config_file=config_file,
contexts=contexts,
config_template=config_template
)
log('Registered config file: {}'.format(config_file),
level=INFO)
def _get_tmpl_env(self):
if not self._tmpl_env:
@ -235,32 +256,58 @@ class OSConfigRenderer(object):
def _get_template(self, template):
self._get_tmpl_env()
template = self._tmpl_env.get_template(template)
log('Loaded template from %s' % template.filename, level=INFO)
log('Loaded template from {}'.format(template.filename),
level=INFO)
return template
def _get_template_from_string(self, ostmpl):
'''
Get a jinja2 template object from a string.
:param ostmpl: OSConfigTemplate to use as a data source.
'''
self._get_tmpl_env()
template = self._tmpl_env.from_string(ostmpl.config_template)
log('Loaded a template from a string for {}'.format(
ostmpl.config_file),
level=INFO)
return template
def render(self, config_file):
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
log('Config not registered: {}'.format(config_file), level=ERROR)
raise OSConfigException
ctxt = self.templates[config_file].context()
_tmpl = os.path.basename(config_file)
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound:
# if no template is found with basename, try looking for it
# using a munged full path, eg:
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
_tmpl = '_'.join(config_file.split('/')[1:])
ostmpl = self.templates[config_file]
ctxt = ostmpl.context()
if ostmpl.is_string_template:
template = self._get_template_from_string(ostmpl)
log('Rendering from a string template: '
'{}'.format(config_file),
level=INFO)
else:
_tmpl = os.path.basename(config_file)
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound as e:
log('Could not load template from %s by %s or %s.' %
(self.templates_dir, os.path.basename(config_file), _tmpl),
level=ERROR)
raise e
except exceptions.TemplateNotFound:
# if no template is found with basename, try looking
# for it using a munged full path, eg:
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
_tmpl = '_'.join(config_file.split('/')[1:])
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound as e:
log('Could not load template from {} by {} or {}.'
''.format(
self.templates_dir,
os.path.basename(config_file),
_tmpl
),
level=ERROR)
raise e
log('Rendering from template: %s' % _tmpl, level=INFO)
log('Rendering from template: {}'.format(config_file),
level=INFO)
return template.render(ctxt)
def write(self, config_file):
@ -272,6 +319,8 @@ class OSConfigRenderer(object):
raise OSConfigException
_out = self.render(config_file)
if six.PY3:
_out = _out.encode('UTF-8')
with open(config_file, 'wb') as out:
out.write(_out)

View File

@ -23,7 +23,6 @@ import sys
import re
import itertools
import functools
import shutil
import six
import traceback
@ -47,7 +46,6 @@ from charmhelpers.core.hookenv import (
related_units,
relation_ids,
relation_set,
service_name,
status_set,
hook_name,
application_version_set,
@ -68,11 +66,6 @@ from charmhelpers.contrib.network.ip import (
port_has_listener,
)
from charmhelpers.contrib.python.packages import (
pip_create_virtualenv,
pip_install,
)
from charmhelpers.core.host import (
lsb_release,
mounts,
@ -84,7 +77,6 @@ from charmhelpers.core.host import (
)
from charmhelpers.fetch import (
apt_cache,
install_remote,
import_key as fetch_import_key,
add_source as fetch_add_source,
SourceConfigError,
@ -95,7 +87,7 @@ from charmhelpers.fetch import (
from charmhelpers.fetch.snap import (
snap_install,
snap_refresh,
SNAP_CHANNELS,
valid_snap_channel,
)
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
@ -140,6 +132,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
])
@ -157,6 +150,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2016.2', 'newton'),
('2017.1', 'ocata'),
('2017.2', 'pike'),
('2018.1', 'queens'),
])
# The ugly duckling - must list releases oldest to newest
@ -187,6 +181,8 @@ SWIFT_CODENAMES = OrderedDict([
['2.11.0', '2.12.0', '2.13.0']),
('pike',
['2.13.0', '2.15.0']),
('queens',
['2.16.0', '2.17.0']),
])
# >= Liberty version->codename mapping
@ -274,27 +270,6 @@ PACKAGE_CODENAMES = {
]),
}
GIT_DEFAULT_REPOS = {
'requirements': 'git://github.com/openstack/requirements',
'cinder': 'git://github.com/openstack/cinder',
'glance': 'git://github.com/openstack/glance',
'horizon': 'git://github.com/openstack/horizon',
'keystone': 'git://github.com/openstack/keystone',
'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
'neutron': 'git://github.com/openstack/neutron',
'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
'nova': 'git://github.com/openstack/nova',
}
GIT_DEFAULT_BRANCHES = {
'liberty': 'stable/liberty',
'mitaka': 'stable/mitaka',
'newton': 'stable/newton',
'master': 'master',
}
DEFAULT_LOOPBACK_SIZE = '5G'
@ -388,6 +363,8 @@ def get_swift_codename(version):
releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
if six.PY3:
ret = ret.decode('UTF-8')
if codename in ret or release[0] in ret:
return codename
elif len(codenames) == 1:
@ -412,6 +389,8 @@ def get_os_codename_package(package, fatal=True):
cmd = ['snap', 'list', package]
try:
out = subprocess.check_output(cmd)
if six.PY3:
out = out.decode('UTF-8')
except subprocess.CalledProcessError as e:
return None
lines = out.split('\n')
@ -426,7 +405,7 @@ def get_os_codename_package(package, fatal=True):
try:
pkg = cache[package]
except:
except Exception:
if not fatal:
return None
# the package is unknown to the current apt cache.
@ -522,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False):
if _os_rel:
return _os_rel
_os_rel = (
git_os_codename_install_source(config('openstack-origin-git')) or
get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
@ -579,6 +557,9 @@ def configure_installation_source(source_plus_key):
Note that the behaviour on error is to log the error to the juju log and
then call sys.exit(1).
"""
if source_plus_key.startswith('snap'):
# Do nothing for snap installs
return
# extract the key if there is one, denoted by a '|' in the rel
source, key = get_source_and_pgp_key(source_plus_key)
@ -615,7 +596,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path))
with open(juju_rc_path, 'wb') as rc_script:
with open(juju_rc_path, 'wt') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
@ -645,11 +626,6 @@ def openstack_upgrade_available(package):
else:
avail_vers = get_os_version_install_source(src)
apt.init()
if "swift" in package:
major_cur_vers = cur_vers.split('.', 1)[0]
major_avail_vers = avail_vers.split('.', 1)[0]
major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
return apt.version_compare(avail_vers, cur_vers) == 1
@ -760,417 +736,6 @@ def os_requires_version(ostack_release, pkg):
return wrap
def git_install_requested():
"""
Returns true if openstack-origin-git is specified.
"""
return config('openstack-origin-git') is not None
def git_os_codename_install_source(projects_yaml):
"""
Returns OpenStack codename of release being installed from source.
"""
if git_install_requested():
projects = _git_yaml_load(projects_yaml)
if projects in GIT_DEFAULT_BRANCHES.keys():
if projects == 'master':
return 'ocata'
return projects
if 'release' in projects:
if projects['release'] == 'master':
return 'ocata'
return projects['release']
return None
def git_default_repos(projects_yaml):
"""
Returns default repos if a default openstack-origin-git value is specified.
"""
service = service_name()
core_project = service
for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
if projects_yaml == default:
# add the requirements repo first
repo = {
'name': 'requirements',
'repository': GIT_DEFAULT_REPOS['requirements'],
'branch': branch,
}
repos = [repo]
# neutron-* and nova-* charms require some additional repos
if service in ['neutron-api', 'neutron-gateway',
'neutron-openvswitch']:
core_project = 'neutron'
if service == 'neutron-api':
repo = {
'name': 'networking-hyperv',
'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
'branch': branch,
}
repos.append(repo)
for project in ['neutron-fwaas', 'neutron-lbaas',
'neutron-vpnaas', 'nova']:
repo = {
'name': project,
'repository': GIT_DEFAULT_REPOS[project],
'branch': branch,
}
repos.append(repo)
elif service in ['nova-cloud-controller', 'nova-compute']:
core_project = 'nova'
repo = {
'name': 'neutron',
'repository': GIT_DEFAULT_REPOS['neutron'],
'branch': branch,
}
repos.append(repo)
elif service == 'openstack-dashboard':
core_project = 'horizon'
# finally add the current service's core project repo
repo = {
'name': core_project,
'repository': GIT_DEFAULT_REPOS[core_project],
'branch': branch,
}
repos.append(repo)
return yaml.dump(dict(repositories=repos, release=default))
return projects_yaml
def _git_yaml_load(projects_yaml):
"""
Load the specified yaml into a dictionary.
"""
if not projects_yaml:
return None
return yaml.load(projects_yaml)
requirements_dir = None
def git_clone_and_install(projects_yaml, core_project):
"""
Clone/install all specified OpenStack repositories.
The expected format of projects_yaml is:
repositories:
- {name: keystone,
repository: 'git://git.openstack.org/openstack/keystone.git',
branch: 'stable/icehouse'}
- {name: requirements,
repository: 'git://git.openstack.org/openstack/requirements.git',
branch: 'stable/icehouse'}
directory: /mnt/openstack-git
http_proxy: squid-proxy-url
https_proxy: squid-proxy-url
The directory, http_proxy, and https_proxy keys are optional.
"""
global requirements_dir
parent_dir = '/mnt/openstack-git'
http_proxy = None
projects = _git_yaml_load(projects_yaml)
_git_validate_projects_yaml(projects, core_project)
old_environ = dict(os.environ)
if 'http_proxy' in projects.keys():
http_proxy = projects['http_proxy']
os.environ['http_proxy'] = projects['http_proxy']
if 'https_proxy' in projects.keys():
os.environ['https_proxy'] = projects['https_proxy']
if 'directory' in projects.keys():
parent_dir = projects['directory']
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
# Upgrade setuptools and pip from default virtualenv versions. The default
# versions in trusty break master OpenStack branch deployments.
for p in ['pip', 'setuptools']:
pip_install(p, upgrade=True, proxy=http_proxy,
venv=os.path.join(parent_dir, 'venv'))
constraints = None
for p in projects['repositories']:
repo = p['repository']
branch = p['branch']
depth = '1'
if 'depth' in p.keys():
depth = p['depth']
if p['name'] == 'requirements':
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=False)
requirements_dir = repo_dir
constraints = os.path.join(repo_dir, "upper-constraints.txt")
# upper-constraints didn't exist until after icehouse
if not os.path.isfile(constraints):
constraints = None
# use constraints unless project yaml sets use_constraints to false
if 'use_constraints' in projects.keys():
if not projects['use_constraints']:
constraints = None
else:
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=True,
constraints=constraints)
os.environ = old_environ
def _git_validate_projects_yaml(projects, core_project):
"""
Validate the projects yaml.
"""
_git_ensure_key_exists('repositories', projects)
for project in projects['repositories']:
_git_ensure_key_exists('name', project.keys())
_git_ensure_key_exists('repository', project.keys())
_git_ensure_key_exists('branch', project.keys())
if projects['repositories'][0]['name'] != 'requirements':
error_out('{} git repo must be specified first'.format('requirements'))
if projects['repositories'][-1]['name'] != core_project:
error_out('{} git repo must be specified last'.format(core_project))
_git_ensure_key_exists('release', projects)
def _git_ensure_key_exists(key, keys):
"""
Ensure that key exists in keys.
"""
if key not in keys:
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
update_requirements, constraints=None):
"""
Clone and install a single git repository.
"""
if not os.path.exists(parent_dir):
juju_log('Directory already exists at {}. '
'No need to create directory.'.format(parent_dir))
os.mkdir(parent_dir)
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(
repo, dest=parent_dir, branch=branch, depth=depth)
venv = os.path.join(parent_dir, 'venv')
if update_requirements:
if not requirements_dir:
error_out('requirements repo must be cloned before '
'updating from global requirements.')
_git_update_requirements(venv, repo_dir, requirements_dir)
juju_log('Installing git repo from dir: {}'.format(repo_dir))
if http_proxy:
pip_install(repo_dir, proxy=http_proxy, venv=venv,
constraints=constraints)
else:
pip_install(repo_dir, venv=venv, constraints=constraints)
return repo_dir
def _git_update_requirements(venv, package_dir, reqs_dir):
"""
Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt.
"""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
python = os.path.join(venv, 'bin/python')
cmd = [python, 'update.py', package_dir]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from "
"global-requirements.txt".format(package))
os.chdir(orig_dir)
def git_pip_venv_dir(projects_yaml):
"""
Return the pip virtualenv path.
"""
parent_dir = '/mnt/openstack-git'
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
return os.path.join(parent_dir, 'venv')
def git_src_dir(projects_yaml, project):
"""
Return the directory where the specified project's source is located.
"""
parent_dir = '/mnt/openstack-git'
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
for p in projects['repositories']:
if p['name'] == project:
return os.path.join(parent_dir, os.path.basename(p['repository']))
return None
def git_yaml_value(projects_yaml, key):
"""
Return the value in projects_yaml for the specified key.
"""
projects = _git_yaml_load(projects_yaml)
if key in projects.keys():
return projects[key]
return None
def git_generate_systemd_init_files(templates_dir):
"""
Generate systemd init files.
Generates and installs systemd init units and script files based on the
*.init.in files contained in the templates_dir directory.
This code is based on the openstack-pkg-tools package and its init
script generation, which is used by the OpenStack packages.
"""
for f in os.listdir(templates_dir):
# Create the init script and systemd unit file from the template
if f.endswith(".init.in"):
init_in_file = f
init_file = f[:-8]
service_file = "{}.service".format(init_file)
init_in_source = os.path.join(templates_dir, init_in_file)
init_source = os.path.join(templates_dir, init_file)
service_source = os.path.join(templates_dir, service_file)
init_dest = os.path.join('/etc/init.d', init_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
shutil.copyfile(init_in_source, init_source)
with open(init_source, 'a') as outfile:
template = ('/usr/share/openstack-pkg-tools/'
'init-script-template')
with open(template) as infile:
outfile.write('\n\n{}'.format(infile.read()))
cmd = ['pkgos-gen-systemd-unit', init_in_source]
subprocess.check_call(cmd)
if os.path.exists(init_dest):
os.remove(init_dest)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(init_source, init_dest)
shutil.copyfile(service_source, service_dest)
os.chmod(init_dest, 0o755)
for f in os.listdir(templates_dir):
# If there's a service.in file, use it instead of the generated one
if f.endswith(".service.in"):
service_in_file = f
service_file = f[:-3]
service_in_source = os.path.join(templates_dir, service_in_file)
service_source = os.path.join(templates_dir, service_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
shutil.copyfile(service_in_source, service_source)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(service_source, service_dest)
for f in os.listdir(templates_dir):
# Generate the systemd unit if there's no existing .service.in
if f.endswith(".init.in"):
init_in_file = f
init_file = f[:-8]
service_in_file = "{}.service.in".format(init_file)
service_file = "{}.service".format(init_file)
init_in_source = os.path.join(templates_dir, init_in_file)
service_in_source = os.path.join(templates_dir, service_in_file)
service_source = os.path.join(templates_dir, service_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
if not os.path.exists(service_in_source):
cmd = ['pkgos-gen-systemd-unit', init_in_source]
subprocess.check_call(cmd)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(service_source, service_dest)
def git_determine_usr_bin():
"""Return the /usr/bin path for Apache2 config.
The /usr/bin path will be located in the virtualenv if the charm
is configured to deploy from source.
"""
if git_install_requested():
projects_yaml = config('openstack-origin-git')
projects_yaml = git_default_repos(projects_yaml)
return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
else:
return '/usr/bin'
def git_determine_python_path():
"""Return the python-path for Apache2 config.
Returns 'None' unless the charm is configured to deploy from source,
in which case the path of the virtualenv's site-packages is returned.
"""
if git_install_requested():
projects_yaml = config('openstack-origin-git')
projects_yaml = git_default_repos(projects_yaml)
return os.path.join(git_pip_venv_dir(projects_yaml),
'lib/python2.7/site-packages')
else:
return None
def os_workload_status(configs, required_interfaces, charm_func=None):
"""
Decorator to set workload status based on complete contexts
@ -1604,27 +1169,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
"""
ret = False
if git_install_requested():
action_set({'outcome': 'installed from source, skipped upgrade.'})
else:
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'no upgrade available.'})
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
else:
action_set({'outcome': 'no upgrade available.'})
return ret
@ -1720,7 +1282,7 @@ def is_unit_paused_set():
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused')))
except:
except Exception:
return False
@ -2034,21 +1596,32 @@ def token_cache_pkgs(source=None, release=None):
def update_json_file(filename, items):
"""Updates the json `filename` with a given dict.
:param filename: json filename (i.e.: /etc/glance/policy.json)
:param filename: path to json file (e.g. /etc/glance/policy.json)
:param items: dict of items to update
"""
if not items:
return
with open(filename) as fd:
policy = json.load(fd)
# Compare before and after and if nothing has changed don't write the file
# since that could cause unnecessary service restarts.
before = json.dumps(policy, indent=4, sort_keys=True)
policy.update(items)
after = json.dumps(policy, indent=4, sort_keys=True)
if before == after:
return
with open(filename, "w") as fd:
fd.write(json.dumps(policy, indent=4))
fd.write(after)
@cached
def snap_install_requested():
""" Determine if installing from snaps
If openstack-origin is of the form snap:channel-series-release
If openstack-origin is of the form snap:track/channel[/branch]
and channel is in SNAPS_CHANNELS return True.
"""
origin = config('openstack-origin') or ""
@ -2056,10 +1629,12 @@ def snap_install_requested():
return False
_src = origin[5:]
channel, series, release = _src.split('-')
if channel.lower() in SNAP_CHANNELS:
return True
return False
if '/' in _src:
channel = _src.split('/')[1]
else:
# Handle snap:track with no channel
channel = 'stable'
return valid_snap_channel(channel)
def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
@ -2067,7 +1642,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
@param snaps: List of snaps
@param src: String of openstack-origin or source of the form
snap:channel-series-track
snap:track/channel
@param mode: String classic, devmode or jailmode
@returns: Dictionary of snaps with channels and modes
"""
@ -2077,8 +1652,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
return {}
_src = src[5:]
_channel, _series, _release = _src.split('-')
channel = '--channel={}/{}'.format(_release, _channel)
channel = '--channel={}'.format(_src)
return {snap: {'channel': channel, 'mode': mode}
for snap in snaps}
@ -2090,8 +1664,8 @@ def install_os_snaps(snaps, refresh=False):
@param snaps: Dictionary of snaps with channels and modes of the form:
{'snap_name': {'channel': 'snap_channel',
'mode': 'snap_mode'}}
Where channel a snapstore channel and mode is --classic, --devmode or
--jailmode.
Where channel is a snapstore channel and mode is --classic, --devmode
or --jailmode.
@param post_snap_install: Callback function to run after snaps have been
installed
"""

View File

@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
assert isinstance(valid_range, list), \
"valid_range must be a list, was given {}".format(valid_range)
# If we're dealing with strings
if valid_type is six.string_types:
if isinstance(value, six.string_types):
assert value in valid_range, \
"{} is not in the list {}".format(value, valid_range)
# Integer, float should have a min and max
@ -370,18 +370,19 @@ def get_mon_map(service):
Also raises CalledProcessError if our ceph command fails
"""
try:
mon_status = check_output(
['ceph', '--id', service,
'mon_status', '--format=json'])
mon_status = check_output(['ceph', '--id', service,
'mon_status', '--format=json'])
if six.PY3:
mon_status = mon_status.decode('UTF-8')
try:
return json.loads(mon_status)
except ValueError as v:
log("Unable to parse mon_status json: {}. Error: {}".format(
mon_status, v.message))
log("Unable to parse mon_status json: {}. Error: {}"
.format(mon_status, str(v)))
raise
except CalledProcessError as e:
log("mon_status command failed with message: {}".format(
e.message))
log("mon_status command failed with message: {}"
.format(str(e)))
raise
@ -457,7 +458,7 @@ def monitor_key_get(service, key):
try:
output = check_output(
['ceph', '--id', service,
'config-key', 'get', str(key)])
'config-key', 'get', str(key)]).decode('UTF-8')
return output
except CalledProcessError as e:
log("Monitor config-key get failed with message: {}".format(
@ -500,6 +501,8 @@ def get_erasure_profile(service, name):
out = check_output(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get',
name, '--format=json'])
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
except (CalledProcessError, OSError, ValueError):
return None
@ -514,7 +517,8 @@ def pool_set(service, pool_name, key, value):
:param value:
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
str(value).lower()]
try:
check_call(cmd)
except CalledProcessError:
@ -618,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param durability_estimator: int
:return: None. Can raise CalledProcessError
"""
version = ceph_version()
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
'ruleset_failure_domain=' + failure_domain]
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
]
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
# failure_domain changed in luminous
if version and version >= '12.0.0':
cmd.append('crush-failure-domain=' + failure_domain)
else:
cmd.append('ruleset-failure-domain=' + failure_domain)
# Add plugin specific information
if locality is not None:
# For local erasure codes
@ -686,7 +698,10 @@ def get_cache_mode(service, pool_name):
"""
validator(value=service, valid_type=six.string_types)
validator(value=pool_name, valid_type=six.string_types)
out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
out = check_output(['ceph', '--id', service,
'osd', 'dump', '--format=json'])
if six.PY3:
out = out.decode('UTF-8')
try:
osd_json = json.loads(out)
for pool in osd_json['pools']:
@ -700,8 +715,9 @@ def get_cache_mode(service, pool_name):
def pool_exists(service, name):
"""Check to see if a RADOS pool already exists."""
try:
out = check_output(['rados', '--id', service,
'lspools']).decode('UTF-8')
out = check_output(['rados', '--id', service, 'lspools'])
if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError:
return False
@ -714,9 +730,12 @@ def get_osds(service):
"""
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json']).decode('UTF-8'))
out = check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json'])
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
return None
@ -734,7 +753,9 @@ def rbd_exists(service, pool, rbd_img):
"""Check to see if a RADOS block device exists."""
try:
out = check_output(['rbd', 'list', '--id',
service, '--pool', pool]).decode('UTF-8')
service, '--pool', pool])
if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError:
return False
@ -859,7 +880,9 @@ def configure(service, key, auth, use_syslog):
def image_mapped(name):
"""Determine whether a RADOS block device is mapped locally."""
try:
out = check_output(['rbd', 'showmapped']).decode('UTF-8')
out = check_output(['rbd', 'showmapped'])
if six.PY3:
out = out.decode('UTF-8')
except CalledProcessError:
return False
@ -1018,7 +1041,9 @@ def ceph_version():
"""Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd).decode('US-ASCII')
output = check_output(cmd)
if six.PY3:
output = output.decode('UTF-8')
output = output.split()
if len(output) > 3:
return output[2]
@ -1048,14 +1073,24 @@ class CephBrokerRq(object):
self.ops = []
def add_op_request_access_to_group(self, name, namespace=None,
permission=None, key_name=None):
permission=None, key_name=None,
object_prefix_permissions=None):
"""
Adds the requested permissions to the current service's Ceph key,
allowing the key to access only the specified pools
allowing the key to access only the specified pools or
object prefixes. object_prefix_permissions should be a dictionary
keyed on the permission with the corresponding value being a list
of prefixes to apply that permission to.
{
'rwx': ['prefix1', 'prefix2'],
'class-read': ['prefix3']}
"""
self.ops.append({'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace, 'name': key_name or service_name(),
'group-permission': permission})
self.ops.append({
'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace,
'name': key_name or service_name(),
'group-permission': permission,
'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None):
@ -1091,7 +1126,10 @@ class CephBrokerRq(object):
def _ops_equal(self, other):
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
for key in [
'replicas', 'name', 'op', 'pg_num', 'weight',
'group', 'group-namespace', 'group-permission',
'object-prefix-permissions']:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from subprocess import (
CalledProcessError,
check_call,
@ -74,10 +75,10 @@ def list_lvm_volume_group(block_device):
'''
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
l = l.decode('UTF-8')
if l.strip().startswith('VG Name'):
vg = ' '.join(l.strip().split()[2:])
for lvm in pvd:
lvm = lvm.decode('UTF-8')
if lvm.strip().startswith('VG Name'):
vg = ' '.join(lvm.strip().split()[2:])
return vg
@ -101,3 +102,81 @@ def create_lvm_volume_group(volume_group, block_device):
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])
def list_logical_volumes(select_criteria=None, path_mode=False):
'''
List logical volumes
:param select_criteria: str: Limit list to those volumes matching this
criteria (see 'lvs -S help' for more details)
:param path_mode: bool: return logical volume name in 'vg/lv' format, this
format is required for some commands like lvextend
:returns: [str]: List of logical volumes
'''
lv_diplay_attr = 'lv_name'
if path_mode:
# Parsing output logic relies on the column order
lv_diplay_attr = 'vg_name,' + lv_diplay_attr
cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
if select_criteria:
cmd.extend(['--select', select_criteria])
lvs = []
for lv in check_output(cmd).decode('UTF-8').splitlines():
if not lv:
continue
if path_mode:
lvs.append('/'.join(lv.strip().split()))
else:
lvs.append(lv.strip())
return lvs
list_thin_logical_volume_pools = functools.partial(
list_logical_volumes,
select_criteria='lv_attr =~ ^t')
list_thin_logical_volumes = functools.partial(
list_logical_volumes,
select_criteria='lv_attr =~ ^V')
def extend_logical_volume_by_device(lv_name, block_device):
'''
Extends the size of logical volume lv_name by the amount of free space on
physical volume block_device.
:param lv_name: str: name of logical volume to be extended (vg/lv format)
:param block_device: str: name of block_device to be allocated to lv_name
'''
cmd = ['lvextend', lv_name, block_device]
check_call(cmd)
def create_logical_volume(lv_name, volume_group, size=None):
'''
Create a new logical volume in an existing volume group
:param lv_name: str: name of logical volume to be created.
:param volume_group: str: Name of volume group to use for the new volume.
:param size: str: Size of logical volume to create (100% if not supplied)
:raises subprocess.CalledProcessError: in the event that the lvcreate fails.
'''
if size:
check_call([
'lvcreate',
'--yes',
'-L',
'{}'.format(size),
'-n', lv_name, volume_group
])
# create the lv with all the space available, this is needed because the
# system call is different for LVM
else:
check_call([
'lvcreate',
'--yes',
'-l',
'100%FREE',
'-n', lv_name, volume_group
])

View File

@ -64,6 +64,6 @@ def is_device_mounted(device):
'''
try:
out = check_output(['lsblk', '-P', device]).decode('UTF-8')
except:
except Exception:
return False
return bool(re.search(r'MOUNTPOINT=".+"', out))

View File

@ -22,10 +22,12 @@ from __future__ import print_function
import copy
from distutils.version import LooseVersion
from functools import wraps
from collections import namedtuple
import glob
import os
import json
import yaml
import re
import subprocess
import sys
import errno
@ -38,6 +40,7 @@ if not six.PY3:
else:
from collections import UserDict
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
@ -65,7 +68,7 @@ def cached(func):
@wraps(func)
def wrapper(*args, **kwargs):
global cache
key = str((func, args, kwargs))
key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
try:
return cache[key]
except KeyError:
@ -343,6 +346,7 @@ class Config(dict):
"""
with open(self.path, 'w') as f:
os.fchmod(f.fileno(), 0o600)
json.dump(self, f)
def _implicit_save(self):
@ -644,18 +648,31 @@ def is_relation_made(relation, keys='private-address'):
return False
def _port_op(op_name, port, protocol="TCP"):
"""Open or close a service network port"""
_args = [op_name]
icmp = protocol.upper() == "ICMP"
if icmp:
_args.append(protocol)
else:
_args.append('{}/{}'.format(port, protocol))
try:
subprocess.check_call(_args)
except subprocess.CalledProcessError:
# Older Juju pre 2.3 doesn't support ICMP
# so treat it as a no-op if it fails.
if not icmp:
raise
def open_port(port, protocol="TCP"):
"""Open a service network port"""
_args = ['open-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
_port_op('open-port', port, protocol)
def close_port(port, protocol="TCP"):
"""Close a service network port"""
_args = ['close-port']
_args.append('{}/{}'.format(port, protocol))
subprocess.check_call(_args)
_port_op('close-port', port, protocol)
def open_ports(start, end, protocol="TCP"):
@ -672,6 +689,17 @@ def close_ports(start, end, protocol="TCP"):
subprocess.check_call(_args)
def opened_ports():
"""Get the opened ports
*Note that this will only show ports opened in a previous hook*
:returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
"""
_args = ['opened-ports', '--format=json']
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
@ -793,6 +821,10 @@ class Hooks(object):
return wrapper
class NoNetworkBinding(Exception):
pass
def charm_dir():
"""Return the root directory of the current charm"""
d = os.environ.get('JUJU_CHARM_DIR')
@ -1012,7 +1044,6 @@ def juju_version():
universal_newlines=True).strip()
@cached
def has_juju_version(minimum_version):
"""Return True if the Juju version is at least the provided version"""
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@ -1072,6 +1103,8 @@ def _run_atexit():
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding):
'''
Deprecated since Juju 2.3; use network_get()
Retrieve the primary network address for a named binding
:param binding: string. The name of a relation of extra-binding
@ -1079,7 +1112,41 @@ def network_get_primary_address(binding):
:raise: NotImplementedError if run on Juju < 2.0
'''
cmd = ['network-get', '--primary-address', binding]
return subprocess.check_output(cmd).decode('UTF-8').strip()
try:
response = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT).decode('UTF-8').strip()
except CalledProcessError as e:
if 'no network config found for binding' in e.output.decode('UTF-8'):
raise NoNetworkBinding("No network binding for {}"
.format(binding))
else:
raise
return response
def network_get(endpoint, relation_id=None):
"""
Retrieve the network details for a relation endpoint
:param endpoint: string. The name of a relation endpoint
:param relation_id: int. The ID of the relation for the current context.
:return: dict. The loaded YAML output of the network-get query.
:raise: NotImplementedError if request not supported by the Juju version.
"""
if not has_juju_version('2.2'):
raise NotImplementedError(juju_version()) # earlier versions require --primary-address
if relation_id and not has_juju_version('2.3'):
raise NotImplementedError # 2.3 added the -r option
cmd = ['network-get', endpoint, '--format', 'yaml']
if relation_id:
cmd.append('-r')
cmd.append(relation_id)
response = subprocess.check_output(
cmd,
stderr=subprocess.STDOUT).decode('UTF-8').strip()
return yaml.safe_load(response)
def add_metric(*args, **kwargs):
@ -1111,3 +1178,93 @@ def meter_info():
"""Get the meter status information, if running in the meter-status-changed
hook."""
return os.environ.get('JUJU_METER_INFO')
def iter_units_for_relation_name(relation_name):
"""Iterate through all units in a relation
Generator that iterates through all the units in a relation and yields
a named tuple with rid and unit field names.
Usage:
data = [(u.rid, u.unit)
for u in iter_units_for_relation_name(relation_name)]
:param relation_name: string relation name
:yield: Named Tuple with rid and unit field names
"""
RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
for rid in relation_ids(relation_name):
for unit in related_units(rid):
yield RelatedUnit(rid, unit)
def ingress_address(rid=None, unit=None):
"""
Retrieve the ingress-address from a relation when available.
Otherwise, return the private-address.
When used on the consuming side of the relation (unit is a remote
unit), the ingress-address is the IP address that this unit needs
to use to reach the provided service on the remote unit.
When used on the providing side of the relation (unit == local_unit()),
the ingress-address is the IP address that is advertised to remote
units on this relation. Remote units need to use this address to
reach the local provided service on this unit.
Note that charms may document some other method to use in
preference to the ingress_address(), such as an address provided
on a different relation attribute or a service discovery mechanism.
This allows charms to redirect inbound connections to their peers
or different applications such as load balancers.
Usage:
addresses = [ingress_address(rid=u.rid, unit=u.unit)
for u in iter_units_for_relation_name(relation_name)]
:param rid: string relation id
:param unit: string unit name
:side effect: calls relation_get
:return: string IP address
"""
settings = relation_get(rid=rid, unit=unit)
return (settings.get('ingress-address') or
settings.get('private-address'))
def egress_subnets(rid=None, unit=None):
"""
Retrieve the egress-subnets from a relation.
This function is to be used on the providing side of the
relation, and provides the ranges of addresses that client
connections may come from. The result is uninteresting on
the consuming side of a relation (unit == local_unit()).
Returns a stable list of subnets in CIDR format.
eg. ['192.168.1.0/24', '2001::F00F/128']
If egress-subnets is not available, falls back to using the published
ingress-address, or finally private-address.
:param rid: string relation id
:param unit: string unit name
:side effect: calls relation_get
:return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
"""
def _to_range(addr):
if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
addr += '/32'
elif ':' in addr and '/' not in addr: # IPv6
addr += '/128'
return addr
settings = relation_get(rid=rid, unit=unit)
if 'egress-subnets' in settings:
return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
if 'ingress-address' in settings:
return [_to_range(settings['ingress-address'])]
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen

View File

@ -34,7 +34,7 @@ import six
from contextlib import contextmanager
from collections import OrderedDict
from .hookenv import log, DEBUG
from .hookenv import log, DEBUG, local_unit
from .fstab import Fstab
from charmhelpers.osplatform import get_platform
@ -441,6 +441,49 @@ def add_user_to_group(username, group):
subprocess.check_call(cmd)
def chage(username, lastday=None, expiredate=None, inactive=None,
mindays=None, maxdays=None, root=None, warndays=None):
"""Change user password expiry information
:param str username: User to update
:param str lastday: Set when password was changed in YYYY-MM-DD format
:param str expiredate: Set when user's account will no longer be
accessible in YYYY-MM-DD format.
-1 will remove an account expiration date.
:param str inactive: Set the number of days of inactivity after a password
has expired before the account is locked.
-1 will remove an account's inactivity.
:param str mindays: Set the minimum number of days between password
changes to MIN_DAYS.
0 indicates the password can be changed anytime.
:param str maxdays: Set the maximum number of days during which a
password is valid.
-1 as MAX_DAYS will remove checking maxdays
:param str root: Apply changes in the CHROOT_DIR directory
:param str warndays: Set the number of days of warning before a password
change is required
:raises subprocess.CalledProcessError: if call to chage fails
"""
cmd = ['chage']
if root:
cmd.extend(['--root', root])
if lastday:
cmd.extend(['--lastday', lastday])
if expiredate:
cmd.extend(['--expiredate', expiredate])
if inactive:
cmd.extend(['--inactive', inactive])
if mindays:
cmd.extend(['--mindays', mindays])
if maxdays:
cmd.extend(['--maxdays', maxdays])
if warndays:
cmd.extend(['--warndays', warndays])
cmd.append(username)
subprocess.check_call(cmd)
remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
@ -506,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
if six.PY3 and isinstance(content, six.string_types):
content = content.encode('UTF-8')
target.write(content)
return
# the contents were the same, but we might still need to change the
@ -946,3 +991,38 @@ def updatedb(updatedb_text, new_path):
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
output = "\n".join(lines)
return output
def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
""" Modulo distribution
This helper uses the unit number, a modulo value and a constant wait time
to produce a calculated wait time distribution. This is useful in large
scale deployments to distribute load during an expensive operation such as
service restarts.
If you have 1000 nodes that need to restart 100 at a time 1 minute at a
time:
time.wait(modulo_distribution(modulo=100, wait=60))
restart()
If you need restarts to happen serially set modulo to the exact number of
nodes and set a high constant wait time:
time.wait(modulo_distribution(modulo=10, wait=120))
restart()
@param modulo: int The modulo number creates the group distribution
@param wait: int The constant time wait value
@param non_zero_wait: boolean Override unit % modulo == 0,
return modulo * wait. Used to avoid collisions with
leader nodes which are often given priority.
@return: int Calculated time to wait for unit operation
"""
unit_number = int(local_unit().split('/')[1])
calculated_wait_time = (unit_number % modulo) * wait
if non_zero_wait and calculated_wait_time == 0:
return modulo * wait
else:
return calculated_wait_time

View File

@ -20,6 +20,7 @@ UBUNTU_RELEASES = (
'yakkety',
'zesty',
'artful',
'bionic',
)

View File

@ -313,17 +313,26 @@ class PortManagerCallback(ManagerCallback):
with open(port_file) as fp:
old_ports = fp.read().split(',')
for old_port in old_ports:
if bool(old_port):
old_port = int(old_port)
if old_port not in new_ports:
hookenv.close_port(old_port)
if bool(old_port) and not self.ports_contains(old_port, new_ports):
hookenv.close_port(old_port)
with open(port_file, 'w') as fp:
fp.write(','.join(str(port) for port in new_ports))
for port in new_ports:
# A port is either a number or 'ICMP'
protocol = 'TCP'
if str(port).upper() == 'ICMP':
protocol = 'ICMP'
if event_name == 'start':
hookenv.open_port(port)
hookenv.open_port(port, protocol)
elif event_name == 'stop':
hookenv.close_port(port)
hookenv.close_port(port, protocol)
def ports_contains(self, port, ports):
if not bool(port):
return False
if str(port).upper() != 'ICMP':
port = int(port)
return port in ports
def service_stop(service_name):

View File

@ -61,13 +61,19 @@ def bytes_from_string(value):
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
msg = "Unable to interpret non-string value '%s' as bytes" % (value)
raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
if not matches:
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
if matches:
size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
else:
# Assume that value passed in is bytes
try:
size = int(value)
except ValueError:
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return size
class BasicStringComparator(object):

View File

@ -20,7 +20,8 @@ from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root',
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
perms=0o444, templates_dir=None, encoding='UTF-8',
template_loader=None, config_template=None):
"""
Render a template.
@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root',
The context should be a dict containing the values to be replaced in the
template.
config_template may be provided to render from a provided template instead
of loading from a file.
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root',
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
template_env = Environment(loader=FileSystemLoader(templates_dir))
try:
source = source
template = template_env.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
# load from a string if provided explicitly
if config_template is not None:
template = template_env.from_string(config_template)
else:
try:
source = source
template = template_env.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
if target is not None:
target_dir = os.path.dirname(target)

View File

@ -175,6 +175,8 @@ class Storage(object):
else:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
with open(self.db_path, 'a') as f:
os.fchmod(f.fileno(), 0o600)
self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None
@ -358,7 +360,7 @@ class Storage(object):
try:
yield self.revision
self.revision = None
except:
except Exception:
self.flush(False)
self.revision = None
raise

View File

@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception):
pass
class InvalidSnapChannel(Exception):
pass
def _snap_exec(commands):
"""
Execute snap commands.
@ -132,3 +136,15 @@ def snap_refresh(packages, *flags):
log(message, level='INFO')
return _snap_exec(['refresh'] + flags + packages)
def valid_snap_channel(channel):
""" Validate snap channel exists
:raises InvalidSnapChannel: When channel does not exist
:return: Boolean
"""
if channel.lower() in SNAP_CHANNELS:
return True
else:
raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel))

View File

@ -572,7 +572,7 @@ def get_upstream_version(package):
cache = apt_cache()
try:
pkg = cache[package]
except:
except Exception:
# the package is unknown to the current apt cache.
return None