Series Upgrade

Implement the series-upgrade feature allowing to move between Ubuntu
series.

Change-Id: I8a9a3e61613322a692389b8718f29a91aba65267
This commit is contained in:
David Ames 2018-10-23 21:40:13 +00:00
parent 152d2f17cd
commit e9d7e291b7
18 changed files with 950 additions and 71 deletions

View File

@ -23,8 +23,8 @@
#
import os
import subprocess
from charmhelpers.core import host
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
@ -83,14 +83,4 @@ def retrieve_ca_cert(cert_file):
def install_ca_cert(ca_cert):
if ca_cert:
cert_file = ('/usr/local/share/ca-certificates/'
'keystone_juju_ca_cert.crt')
old_cert = retrieve_ca_cert(cert_file)
if old_cert and old_cert == ca_cert:
log("CA cert is the same as installed version", level=INFO)
else:
log("Installing new CA cert", level=INFO)
with open(cert_file, 'wb') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])
host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert')

View File

@ -24,7 +24,8 @@ import urlparse
import cinderclient.v1.client as cinder_client
import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client
import glanceclient.v1 as glance_client
import glanceclient.v2 as glance_clientv2
import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client
from keystoneauth1.identity import (
@ -617,13 +618,13 @@ class OpenStackAmuletUtils(AmuletUtils):
return self.authenticate_keystone(keystone_ip, user, password,
project_name=tenant)
def authenticate_glance_admin(self, keystone):
def authenticate_glance_admin(self, keystone, force_v1_client=False):
"""Authenticates admin user with glance."""
self.log.debug('Authenticating glance admin...')
ep = keystone.service_catalog.url_for(service_type='image',
interface='adminURL')
if keystone.session:
return glance_client.Client(ep, session=keystone.session)
if not force_v1_client and keystone.session:
return glance_clientv2.Client("2", session=keystone.session)
else:
return glance_client.Client(ep, token=keystone.auth_token)
@ -679,18 +680,30 @@ class OpenStackAmuletUtils(AmuletUtils):
nova.flavors.create(name, ram, vcpus, disk, flavorid,
ephemeral, swap, rxtx_factor, is_public)
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance,
validate and return a resource pointer.
def glance_create_image(self, glance, image_name, image_url,
download_dir='tests',
hypervisor_type=None,
disk_format='qcow2',
architecture='x86_64',
container_format='bare'):
"""Download an image and upload it to glance, validate its status
and return an image object pointer. KVM defaults, can override for
LXD.
:param glance: pointer to authenticated glance connection
:param glance: pointer to authenticated glance api connection
:param image_name: display name for new image
:param image_url: url to retrieve
:param download_dir: directory to store downloaded image file
:param hypervisor_type: glance image hypervisor property
:param disk_format: glance image disk format
:param architecture: glance image architecture property
:param container_format: glance image container format
:returns: glance image pointer
"""
self.log.debug('Creating glance cirros image '
'({})...'.format(image_name))
self.log.debug('Creating glance image ({}) from '
'{}...'.format(image_name, image_url))
# Download cirros image
# Download image
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
@ -699,22 +712,34 @@ class OpenStackAmuletUtils(AmuletUtils):
else:
opener = urllib.FancyURLopener()
f = opener.open('http://download.cirros-cloud.net/version/released')
version = f.read().strip()
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
local_path = os.path.join('tests', cirros_img)
if not os.path.exists(local_path):
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
version, cirros_img)
opener.retrieve(cirros_url, local_path)
f.close()
abs_file_name = os.path.join(download_dir, image_name)
if not os.path.exists(abs_file_name):
opener.retrieve(image_url, abs_file_name)
# Create glance image
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
glance_properties = {
'architecture': architecture,
}
if hypervisor_type:
glance_properties['hypervisor_type'] = hypervisor_type
# Create glance image
if float(glance.version) < 2.0:
with open(abs_file_name) as f:
image = glance.images.create(
name=image_name,
is_public=True,
disk_format=disk_format,
container_format=container_format,
properties=glance_properties,
data=f)
else:
image = glance.images.create(
name=image_name,
visibility="public",
disk_format=disk_format,
container_format=container_format)
glance.images.upload(image.id, open(abs_file_name, 'rb'))
glance.images.update(image.id, **glance_properties)
# Wait for image to reach active status
img_id = image.id
@ -729,24 +754,68 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Validating image attributes...')
val_img_name = glance.images.get(img_id).name
val_img_stat = glance.images.get(img_id).status
val_img_pub = glance.images.get(img_id).is_public
val_img_cfmt = glance.images.get(img_id).container_format
val_img_dfmt = glance.images.get(img_id).disk_format
if float(glance.version) < 2.0:
val_img_pub = glance.images.get(img_id).is_public
else:
val_img_pub = glance.images.get(img_id).visibility == "public"
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
'container fmt:{} disk fmt:{}'.format(
val_img_name, val_img_pub, img_id,
val_img_stat, val_img_cfmt, val_img_dfmt))
if val_img_name == image_name and val_img_stat == 'active' \
and val_img_pub is True and val_img_cfmt == 'bare' \
and val_img_dfmt == 'qcow2':
and val_img_pub is True and val_img_cfmt == container_format \
and val_img_dfmt == disk_format:
self.log.debug(msg_attr)
else:
msg = ('Volume validation failed, {}'.format(msg_attr))
msg = ('Image validation failed, {}'.format(msg_attr))
amulet.raise_status(amulet.FAIL, msg=msg)
return image
def create_cirros_image(self, glance, image_name, hypervisor_type=None):
"""Download the latest cirros image and upload it to glance,
validate and return a resource pointer.
:param glance: pointer to authenticated glance connection
:param image_name: display name for new image
:param hypervisor_type: glance image hypervisor property
:returns: glance image pointer
"""
# /!\ DEPRECATION WARNING
self.log.warn('/!\\ DEPRECATION WARNING: use '
'glance_create_image instead of '
'create_cirros_image.')
self.log.debug('Creating glance cirros image '
'({})...'.format(image_name))
# Get cirros image URL
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open('http://download.cirros-cloud.net/version/released')
version = f.read().strip()
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
version, cirros_img)
f.close()
return self.glance_create_image(
glance,
image_name,
cirros_url,
hypervisor_type=hypervisor_type)
def delete_image(self, glance, image):
"""Delete the specified image."""
@ -998,6 +1067,9 @@ class OpenStackAmuletUtils(AmuletUtils):
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
# For mimic ceph osd lspools output
output = output.replace("\n", ",")
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
for pool in str(output).split(','):
pool_id_name = pool.split(' ')

View File

@ -1389,11 +1389,12 @@ class WorkerConfigContext(OSContextGenerator):
class WSGIWorkerConfigContext(WorkerConfigContext):
def __init__(self, name=None, script=None, admin_script=None,
public_script=None, process_weight=1.00,
public_script=None, user=None, group=None,
process_weight=1.00,
admin_process_weight=0.25, public_process_weight=0.75):
self.service_name = name
self.user = name
self.group = name
self.user = user or name
self.group = group or name
self.script = script
self.admin_script = admin_script
self.public_script = public_script
@ -1518,6 +1519,10 @@ class NeutronAPIContext(OSContextGenerator):
'rel_key': 'enable-qos',
'default': False,
},
'enable_nsg_logging': {
'rel_key': 'enable-nsg-logging',
'default': False,
},
}
ctxt = self.get_neutron_options({})
for rid in relation_ids('neutron-plugin-api'):
@ -1529,10 +1534,15 @@ class NeutronAPIContext(OSContextGenerator):
if 'l2-population' in rdata:
ctxt.update(self.get_neutron_options(rdata))
extension_drivers = []
if ctxt['enable_qos']:
ctxt['extension_drivers'] = 'qos'
else:
ctxt['extension_drivers'] = ''
extension_drivers.append('qos')
if ctxt['enable_nsg_logging']:
extension_drivers.append('log')
ctxt['extension_drivers'] = ','.join(extension_drivers)
return ctxt
@ -1892,7 +1902,7 @@ class EnsureDirContext(OSContextGenerator):
Some software requires a user to create a target directory to be
scanned for drop-in files with a specific format. This is why this
context is needed to do that before rendering a template.
'''
'''
def __init__(self, dirname, **kwargs):
'''Used merely to ensure that a given directory exists.'''
@ -1902,3 +1912,23 @@ class EnsureDirContext(OSContextGenerator):
def __call__(self):
mkdir(self.dirname, **self.kwargs)
return {}
class VersionsContext(OSContextGenerator):
"""Context to return the openstack and operating system versions.
"""
def __init__(self, pkg='python-keystone'):
"""Initialise context.
:param pkg: Package to extrapolate openstack version from.
:type pkg: str
"""
self.pkg = pkg
def __call__(self):
ostack = os_release(self.pkg, base='icehouse')
osystem = lsb_release()['DISTRIB_CODENAME'].lower()
return {
'openstack_release': ostack,
'operating_system_release': osystem}

View File

@ -28,6 +28,7 @@ import json
import re
from charmhelpers.core.hookenv import (
expected_related_units,
log,
relation_set,
charm_name,
@ -110,12 +111,17 @@ def assert_charm_supports_dns_ha():
def expect_ha():
""" Determine if the unit expects to be in HA
Check for VIP or dns-ha settings which indicate the unit should expect to
be related to hacluster.
Check juju goal-state if ha relation is expected, check for VIP or dns-ha
settings which indicate the unit should expect to be related to hacluster.
@returns boolean
"""
return config('vip') or config('dns-ha')
ha_related_units = []
try:
ha_related_units = list(expected_related_units(reltype='ha'))
except (NotImplementedError, KeyError):
pass
return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
def generate_ha_relation_data(service):

View File

@ -0,0 +1,412 @@
# Copyright 2018 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from charmhelpers.core.hookenv import (
ERROR,
log,
relation_get,
)
from charmhelpers.contrib.network.ip import (
is_ipv6,
ns_query,
)
from charmhelpers.contrib.openstack.utils import (
get_hostname,
get_host_ip,
is_ip,
)
NOVA_SSH_DIR = '/etc/nova/compute_ssh/'
def ssh_directory_for_unit(application_name, user=None):
"""Return the directory used to store ssh assets for the application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Fully qualified directory path.
:rtype: str
"""
if user:
application_name = "{}_{}".format(application_name, user)
_dir = os.path.join(NOVA_SSH_DIR, application_name)
for d in [NOVA_SSH_DIR, _dir]:
if not os.path.isdir(d):
os.mkdir(d)
for f in ['authorized_keys', 'known_hosts']:
f = os.path.join(_dir, f)
if not os.path.isfile(f):
open(f, 'w').close()
return _dir
def known_hosts(application_name, user=None):
"""Return the known hosts file for the application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Fully qualified path to file.
:rtype: str
"""
return os.path.join(
ssh_directory_for_unit(application_name, user),
'known_hosts')
def authorized_keys(application_name, user=None):
"""Return the authorized keys file for the application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Fully qualified path to file.
:rtype: str
"""
return os.path.join(
ssh_directory_for_unit(application_name, user),
'authorized_keys')
def ssh_known_host_key(host, application_name, user=None):
"""Return the first entry in known_hosts for host.
:param host: hostname to lookup in file.
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Host key
:rtype: str or None
"""
cmd = [
'ssh-keygen',
'-f', known_hosts(application_name, user),
'-H',
'-F',
host]
try:
# The first line of output is like '# Host xx found: line 1 type RSA',
# which should be excluded.
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
# RC of 1 seems to be legitimate for most ssh-keygen -F calls.
if e.returncode == 1:
output = e.output
else:
raise
output = output.strip()
if output:
# Bug #1500589 cmd has 0 rc on precise if entry not present
lines = output.split('\n')
if len(lines) >= 1:
return lines[0]
return None
def remove_known_host(host, application_name, user=None):
"""Remove the entry in known_hosts for host.
:param host: hostname to lookup in file.
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
log('Removing SSH known host entry for compute host at %s' % host)
cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host]
subprocess.check_call(cmd)
def is_same_key(key_1, key_2):
"""Extract the key from two host entries and compare them.
:param key_1: Host key
:type key_1: str
:param key_2: Host key
:type key_2: str
"""
# The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp'
# 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare
# the part start with 'ssh-rsa' followed with '= ', because the hash
# value in the beginning will change each time.
k_1 = key_1.split('= ')[1]
k_2 = key_2.split('= ')[1]
return k_1 == k_2
def add_known_host(host, application_name, user=None):
"""Add the given host key to the known hosts file.
:param host: host name
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
try:
remote_key = subprocess.check_output(cmd).strip()
except Exception as e:
log('Could not obtain SSH host key from %s' % host, level=ERROR)
raise e
current_key = ssh_known_host_key(host, application_name, user)
if current_key and remote_key:
if is_same_key(remote_key, current_key):
log('Known host key for compute host %s up to date.' % host)
return
else:
remove_known_host(host, application_name, user)
log('Adding SSH host key to known hosts for compute node at %s.' % host)
with open(known_hosts(application_name, user), 'a') as out:
out.write("{}\n".format(remote_key))
def ssh_authorized_key_exists(public_key, application_name, user=None):
"""Check if given key is in the authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Whether given key is in the authorized_key file.
:rtype: boolean
"""
with open(authorized_keys(application_name, user)) as keys:
return ('%s' % public_key) in keys.read()
def add_authorized_key(public_key, application_name, user=None):
"""Add given key to the authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
with open(authorized_keys(application_name, user), 'a') as keys:
keys.write("{}\n".format(public_key))
def ssh_compute_add_host_and_key(public_key, hostname, private_address,
application_name, user=None):
"""Add a compute nodes ssh details to local cache.
Collect various hostname variations and add the corresponding host keys to
the local known hosts file. Finally, add the supplied public key to the
authorized_key file.
:param public_key: Public key.
:type public_key: str
:param hostname: Hostname to collect host keys from.
:type hostname: str
:param private_address:aCorresponding private address for hostname
:type private_address: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
# If remote compute node hands us a hostname, ensure we have a
# known hosts entry for its IP, hostname and FQDN.
hosts = [private_address]
if not is_ipv6(private_address):
if hostname:
hosts.append(hostname)
if is_ip(private_address):
hn = get_hostname(private_address)
if hn:
hosts.append(hn)
short = hn.split('.')[0]
if ns_query(short):
hosts.append(short)
else:
hosts.append(get_host_ip(private_address))
short = private_address.split('.')[0]
if ns_query(short):
hosts.append(short)
for host in list(set(hosts)):
add_known_host(host, application_name, user)
if not ssh_authorized_key_exists(public_key, application_name, user):
log('Saving SSH authorized key for compute host at %s.' %
private_address)
add_authorized_key(public_key, application_name, user)
def ssh_compute_add(public_key, application_name, rid=None, unit=None,
user=None):
"""Add a compute nodes ssh details to local cache.
Collect various hostname variations and add the corresponding host keys to
the local known hosts file. Finally, add the supplied public key to the
authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param rid: Relation id of the relation between this charm and the app. If
none is supplied it is assumed its the relation relating to
the current hook context.
:type rid: str
:param unit: Unit to add ssh asserts for if none is supplied it is assumed
its the unit relating to the current hook context.
:type unit: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
relation_data = relation_get(rid=rid, unit=unit)
ssh_compute_add_host_and_key(
public_key,
relation_data.get('hostname'),
relation_data.get('private-address'),
application_name,
user=user)
def ssh_known_hosts_lines(application_name, user=None):
"""Return contents of known_hosts file for given application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
known_hosts_list = []
with open(known_hosts(application_name, user)) as hosts:
for hosts_line in hosts:
if hosts_line.rstrip():
known_hosts_list.append(hosts_line.rstrip())
return(known_hosts_list)
def ssh_authorized_keys_lines(application_name, user=None):
"""Return contents of authorized_keys file for given application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
authorized_keys_list = []
with open(authorized_keys(application_name, user)) as keys:
for authkey_line in keys:
if authkey_line.rstrip():
authorized_keys_list.append(authkey_line.rstrip())
return(authorized_keys_list)
def ssh_compute_remove(public_key, application_name, user=None):
"""Remove given public key from authorized_keys file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
if not (os.path.isfile(authorized_keys(application_name, user)) or
os.path.isfile(known_hosts(application_name, user))):
return
keys = ssh_authorized_keys_lines(application_name, user=None)
keys = [k.strip() for k in keys]
if public_key not in keys:
return
[keys.remove(key) for key in keys if key == public_key]
with open(authorized_keys(application_name, user), 'w') as _keys:
keys = '\n'.join(keys)
if not keys.endswith('\n'):
keys += '\n'
_keys.write(keys)
def get_ssh_settings(application_name, user=None):
"""Retrieve the known host entries and public keys for application
Retrieve the known host entries and public keys for application for all
units of the given application related to this application for the
app + user combination.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Public keys + host keys for all units for app + user combination.
:rtype: dict
"""
settings = {}
keys = {}
prefix = ''
if user:
prefix = '{}_'.format(user)
for i, line in enumerate(ssh_known_hosts_lines(
application_name=application_name, user=user)):
settings['{}known_hosts_{}'.format(prefix, i)] = line
if settings:
settings['{}known_hosts_max_index'.format(prefix)] = len(
settings.keys())
for i, line in enumerate(ssh_authorized_keys_lines(
application_name=application_name, user=user)):
keys['{}authorized_keys_{}'.format(prefix, i)] = line
if keys:
keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys())
settings.update(keys)
return settings
def get_all_user_ssh_settings(application_name):
"""Retrieve the known host entries and public keys for application
Retrieve the known host entries and public keys for application for all
units of the given application related to this application for root user
and nova user.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:returns: Public keys + host keys for all units for app + user combination.
:rtype: dict
"""
settings = get_ssh_settings(application_name)
settings.update(get_ssh_settings(application_name, user='nova'))
return settings

View File

@ -1,12 +1,14 @@
{% if auth_host -%}
[keystone_authtoken]
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
auth_type = password
{% if api_version == "3" -%}
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3
project_domain_name = {{ admin_domain_name }}
user_domain_name = {{ admin_domain_name }}
{% else -%}
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
project_domain_name = default
user_domain_name = default
{% endif -%}

View File

@ -14,7 +14,7 @@ Listen {{ public_port }}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
@ -40,7 +40,7 @@ Listen {{ public_port }}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
@ -66,7 +66,7 @@ Listen {{ public_port }}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}

View File

@ -0,0 +1,91 @@
# Configuration file maintained by Juju. Local changes may be overwritten.
{% if port -%}
Listen {{ port }}
{% endif -%}
{% if admin_port -%}
Listen {{ admin_port }}
{% endif -%}
{% if public_port -%}
Listen {{ public_port }}
{% endif -%}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
{% endif -%}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
{% endif -%}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
{% endif -%}

View File

@ -186,7 +186,7 @@ SWIFT_CODENAMES = OrderedDict([
('queens',
['2.16.0', '2.17.0']),
('rocky',
['2.18.0']),
['2.18.0', '2.19.0']),
])
# >= Liberty version->codename mapping
@ -831,12 +831,25 @@ def _ows_check_if_paused(services=None, ports=None):
"""Check if the unit is supposed to be paused, and if so check that the
services/ports (if passed) are actually stopped/not being listened to.
if the unit isn't supposed to be paused, just return None, None
If the unit isn't supposed to be paused, just return None, None
If the unit is performing a series upgrade, return a message indicating
this.
@param services: OPTIONAL services spec or list of service names.
@param ports: OPTIONAL list of port numbers.
@returns state, message or None, None
"""
if is_unit_upgrading_set():
state, message = check_actually_paused(services=services,
ports=ports)
if state is None:
# we're paused okay, so set maintenance and return
state = "blocked"
message = ("Ready for do-release-upgrade and reboot. "
"Set complete when finished.")
return state, message
if is_unit_paused_set():
state, message = check_actually_paused(services=services,
ports=ports)
@ -1339,7 +1352,7 @@ def pause_unit(assess_status_func, services=None, ports=None,
message = assess_status_func()
if message:
messages.append(message)
if messages:
if messages and not is_unit_upgrading_set():
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
@ -1689,3 +1702,62 @@ def install_os_snaps(snaps, refresh=False):
snap_install(snap,
_ensure_flag(snaps[snap]['channel']),
_ensure_flag(snaps[snap]['mode']))
def set_unit_upgrading():
"""Set the unit to a upgrading state in the local kv() store.
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-upgrading', True)
def clear_unit_upgrading():
"""Clear the unit from a upgrading state in the local kv() store
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-upgrading', False)
def is_unit_upgrading_set():
"""Return the state of the kv().get('unit-upgrading').
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
try:
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-upgrading')))
except Exception:
return False
def series_upgrade_prepare(pause_unit_helper=None, configs=None):
""" Run common series upgrade prepare tasks.
:param pause_unit_helper: function: Function to pause unit
:param configs: OSConfigRenderer object: Configurations
:returns None:
"""
set_unit_upgrading()
if pause_unit_helper and configs:
if not is_unit_paused_set():
pause_unit_helper(configs)
def series_upgrade_complete(resume_unit_helper=None, configs=None):
""" Run common series upgrade complete tasks.
:param resume_unit_helper: function: Function to resume unit
:param configs: OSConfigRenderer object: Configurations
:returns None:
"""
clear_unit_paused()
clear_unit_upgrading()
if configs:
configs.write_all()
if resume_unit_helper:
resume_unit_helper(configs)

View File

@ -48,6 +48,7 @@ INFO = "INFO"
DEBUG = "DEBUG"
TRACE = "TRACE"
MARKER = object()
SH_MAX_ARG = 131071
cache = {}
@ -98,7 +99,7 @@ def log(message, level=None):
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message]
command += [message[:SH_MAX_ARG]]
# Missing juju-log should not cause failures in unit tests
# Send log output to stderr
try:
@ -201,11 +202,35 @@ def remote_unit():
return os.environ.get('JUJU_REMOTE_UNIT', None)
def service_name():
"""The name service group this unit belongs to"""
def application_name():
"""
The name of the deployed application this unit belongs to.
"""
return local_unit().split('/')[0]
def service_name():
"""
.. deprecated:: 0.19.1
Alias for :func:`application_name`.
"""
return application_name()
def model_name():
"""
Name of the model that this unit is deployed in.
"""
return os.environ['JUJU_MODEL_NAME']
def model_uuid():
"""
UUID of the model that this unit is deployed in.
"""
return os.environ['JUJU_MODEL_UUID']
def principal_unit():
"""Returns the principal unit of this unit, otherwise None"""
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
@ -485,6 +510,67 @@ def related_units(relid=None):
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
def expected_peer_units():
"""Get a generator for units we expect to join peer relation based on
goal-state.
The local unit is excluded from the result to make it easy to gauge
completion of all peers joining the relation with existing hook tools.
Example usage:
log('peer {} of {} joined peer relation'
.format(len(related_units()),
len(list(expected_peer_units()))))
This function will raise NotImplementedError if used with juju versions
without goal-state support.
:returns: iterator
:rtype: types.GeneratorType
:raises: NotImplementedError
"""
if not has_juju_version("2.4.0"):
# goal-state first appeared in 2.4.0.
raise NotImplementedError("goal-state")
_goal_state = goal_state()
return (key for key in _goal_state['units']
if '/' in key and key != local_unit())
def expected_related_units(reltype=None):
"""Get a generator for units we expect to join relation based on
goal-state.
Note that you can not use this function for the peer relation, take a look
at expected_peer_units() for that.
This function will raise KeyError if you request information for a
relation type for which juju goal-state does not have information. It will
raise NotImplementedError if used with juju versions without goal-state
support.
Example usage:
log('participant {} of {} joined relation {}'
.format(len(related_units()),
len(list(expected_related_units())),
relation_type()))
:param reltype: Relation type to list data for, default is to list data for
the realtion type we are currently executing a hook for.
:type reltype: str
:returns: iterator
:rtype: types.GeneratorType
:raises: KeyError, NotImplementedError
"""
if not has_juju_version("2.4.4"):
# goal-state existed in 2.4.0, but did not list individual units to
# join a relation in 2.4.1 through 2.4.3. (LP: #1794739)
raise NotImplementedError("goal-state relation unit count")
reltype = reltype or relation_type()
_goal_state = goal_state()
return (key for key in _goal_state['relations'][reltype] if '/' in key)
@cached
def relation_for_unit(unit=None, rid=None):
"""Get the json represenation of a unit's relation"""
@ -973,6 +1059,7 @@ def application_version_set(version):
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
@cached
def goal_state():
"""Juju goal state values"""
cmd = ['goal-state', '--format=json']
@ -1297,3 +1384,33 @@ def egress_subnets(rid=None, unit=None):
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen
def unit_doomed(unit=None):
"""Determines if the unit is being removed from the model
Requires Juju 2.4.1.
:param unit: string unit name, defaults to local_unit
:side effect: calls goal_state
:side effect: calls local_unit
:side effect: calls has_juju_version
:return: True if the unit is being removed, already gone, or never existed
"""
if not has_juju_version("2.4.1"):
# We cannot risk blindly returning False for 'we don't know',
# because that could cause data loss; if call sites don't
# need an accurate answer, they likely don't need this helper
# at all.
# goal-state existed in 2.4.0, but did not handle removals
# correctly until 2.4.1.
raise NotImplementedError("is_doomed")
if unit is None:
unit = local_unit()
gs = goal_state()
units = gs.get('units', {})
if unit not in units:
return True
# I don't think 'dead' units ever show up in the goal-state, but
# check anyway in addition to 'dying'.
return units[unit]['status'] in ('dying', 'dead')

View File

@ -34,7 +34,7 @@ import six
from contextlib import contextmanager
from collections import OrderedDict
from .hookenv import log, DEBUG, local_unit
from .hookenv import log, INFO, DEBUG, local_unit, charm_name
from .fstab import Fstab
from charmhelpers.osplatform import get_platform
@ -535,12 +535,14 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
# lets see if we can grab the file and compare the context, to avoid doing
# a write.
existing_content = None
existing_uid, existing_gid = None, None
existing_uid, existing_gid, existing_perms = None, None, None
try:
with open(path, 'rb') as target:
existing_content = target.read()
stat = os.stat(path)
existing_uid, existing_gid = stat.st_uid, stat.st_gid
existing_uid, existing_gid, existing_perms = (
stat.st_uid, stat.st_gid, stat.st_mode
)
except:
pass
if content != existing_content:
@ -554,7 +556,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
target.write(content)
return
# the contents were the same, but we might still need to change the
# ownership.
# ownership or permissions.
if existing_uid != uid:
log("Changing uid on already existing content: {} -> {}"
.format(existing_uid, uid), level=DEBUG)
@ -563,6 +565,10 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
log("Changing gid on already existing content: {} -> {}"
.format(existing_gid, gid), level=DEBUG)
os.chown(path, -1, gid)
if existing_perms != perms:
log("Changing permissions on existing content: {} -> {}"
.format(existing_perms, perms), level=DEBUG)
os.chmod(path, perms)
def fstab_remove(mp):
@ -1040,3 +1046,27 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
return modulo * wait
else:
return calculated_wait_time
def install_ca_cert(ca_cert, name=None):
"""
Install the given cert as a trusted CA.
The ``name`` is the stem of the filename where the cert is written, and if
not provided, it will default to ``juju-{charm_name}``.
If the cert is empty or None, or is unchanged, nothing is done.
"""
if not ca_cert:
return
if not isinstance(ca_cert, bytes):
ca_cert = ca_cert.encode('utf8')
if not name:
name = 'juju-{}'.format(charm_name())
cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name)
new_hash = hashlib.md5(ca_cert).hexdigest()
if file_hash(cert_file) == new_hash:
return
log("Installing new CA cert at: {}".format(cert_file), level=INFO)
write_file(cert_file, ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])

View File

@ -84,6 +84,7 @@ module = "charmhelpers.fetch.%s" % __platform__
fetch = importlib.import_module(module)
filter_installed_packages = fetch.filter_installed_packages
filter_missing_packages = fetch.filter_missing_packages
install = fetch.apt_install
upgrade = fetch.apt_upgrade
update = _fetch_update = fetch.apt_update
@ -96,6 +97,7 @@ if __platform__ == "ubuntu":
apt_update = fetch.apt_update
apt_upgrade = fetch.apt_upgrade
apt_purge = fetch.apt_purge
apt_autoremove = fetch.apt_autoremove
apt_mark = fetch.apt_mark
apt_hold = fetch.apt_hold
apt_unhold = fetch.apt_unhold

View File

@ -13,7 +13,7 @@
# limitations under the License.
import os
from subprocess import check_call
from subprocess import STDOUT, check_output
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
@ -55,7 +55,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
cmd = ['bzr', 'branch']
cmd += cmd_opts
cmd += [source, dest]
check_call(cmd)
check_output(cmd, stderr=STDOUT)
def install(self, source, dest=None, revno=None):
url_parts = self.parse_url(source)

View File

@ -13,7 +13,7 @@
# limitations under the License.
import os
from subprocess import check_call, CalledProcessError
from subprocess import check_output, CalledProcessError, STDOUT
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
@ -50,7 +50,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
cmd = ['git', 'clone', source, dest, '--branch', branch]
if depth:
cmd.extend(['--depth', depth])
check_call(cmd)
check_output(cmd, stderr=STDOUT)
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)

View File

@ -189,6 +189,18 @@ def filter_installed_packages(packages):
return _pkgs
def filter_missing_packages(packages):
"""Return a list of packages that are installed.
:param packages: list of packages to evaluate.
:returns list: Packages that are installed.
"""
return list(
set(packages) -
set(filter_installed_packages(packages))
)
def apt_cache(in_memory=True, progress=None):
"""Build and return an apt cache."""
from apt import apt_pkg
@ -248,6 +260,14 @@ def apt_purge(packages, fatal=False):
_run_apt_command(cmd, fatal)
def apt_autoremove(purge=True, fatal=False):
"""Purge one or more packages."""
cmd = ['apt-get', '--assume-yes', 'autoremove']
if purge:
cmd.append('--purge')
_run_apt_command(cmd, fatal)
def apt_mark(packages, mark, fatal=False):
"""Flag one or more packages using apt-mark."""
log("Marking {} as {}".format(packages, mark))
@ -434,6 +454,9 @@ def _add_apt_repository(spec):
:param spec: the parameter to pass to add_apt_repository
"""
if '{series}' in spec:
series = lsb_release()['DISTRIB_CODENAME']
spec = spec.replace('{series}', series)
_run_with_retries(['add-apt-repository', '--yes', spec])

View File

@ -26,7 +26,13 @@ from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.openstack.context import (AMQPContext,
IdentityServiceContext,
OSContextGenerator)
from charmhelpers.contrib.openstack.utils import get_os_codename_package
from charmhelpers.contrib.openstack.utils import (
get_os_codename_package,
clear_unit_paused,
clear_unit_upgrading,
set_unit_paused,
set_unit_upgrading,
)
from charmhelpers.contrib.openstack.templating import OSConfigRenderer
from charmhelpers.contrib.charmsupport import nrpe
@ -290,6 +296,30 @@ def update_nrpe_config():
nrpe_setup.write()
@hooks.hook('pre-series-upgrade')
def pre_series_upgrade():
hookenv.log("Running prepare series upgrade hook", "INFO")
# NOTE: In order to indicate the step of the series upgrade process for
# administrators and automated scripts, the charm sets the paused and
# upgrading states.
set_unit_paused()
set_unit_upgrading()
hookenv.status_set("blocked",
"Ready for do-release-upgrade and reboot. "
"Set complete when finished.")
@hooks.hook('post-series-upgrade')
def post_series_upgrade():
hookenv.log("Running complete series upgrade hook", "INFO")
# In order to indicate the step of the series upgrade process for
# administrators and automated scripts, the charm clears the paused and
# upgrading states.
clear_unit_paused()
clear_unit_upgrading()
hookenv.status_set("active", "")
if __name__ == '__main__':
try:
hooks.execute(sys.argv)

1
hooks/post-series-upgrade Symbolic link
View File

@ -0,0 +1 @@
hooks.py

1
hooks/pre-series-upgrade Symbolic link
View File

@ -0,0 +1 @@
hooks.py