Support https under multiple networks

This commit is contained in:
james.page@ubuntu.com 2014-09-22 15:23:26 +01:00
parent 58422dad6c
commit fcca20c248
17 changed files with 509 additions and 146 deletions

View File

@ -4,5 +4,6 @@
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/keystone/hooks</path>
<path>/keystone/unit_tests</path>
</pydev_pathproperty>
</pydev_project>

View File

@ -1,4 +1,4 @@
branch: lp:charm-helpers
branch: lp:~james-page/charm-helpers/multiple-https-networks
destination: hooks/charmhelpers
include:
- core

View File

@ -20,7 +20,8 @@ from charmhelpers.core.hookenv import (
)
def get_cert():
def get_cert(cn):
# TODO: deal with multiple https endpoints via charm config
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
@ -30,10 +31,10 @@ def get_cert():
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
cert = relation_get('ssl_cert_{}'.format(cn),
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
key = relation_get('ssl_key_{}'.format(cn),
rid=r_id, unit=unit)
return (cert, key)

View File

@ -1,10 +1,11 @@
import glob
import sys
from functools import partial
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
ERROR, log, config,
ERROR, log,
)
try:
@ -156,19 +157,102 @@ get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')
def get_ipv6_addr(iface="eth0"):
def format_ipv6_addr(address):
"""
IPv6 needs to be wrapped with [] in url link to parse correctly.
"""
if is_ipv6(address):
address = "[%s]" % address
else:
log("Not an valid ipv6 address: %s" % address,
level=ERROR)
address = None
return address
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None):
"""
Return the assigned IP address for a given interface, if any, or [].
"""
# Extract nic if passed /dev/ethX
if '/' in iface:
iface = iface.split('/')[-1]
if not exc_list:
exc_list = []
try:
iface_addrs = netifaces.ifaddresses(iface)
if netifaces.AF_INET6 not in iface_addrs:
raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
inet_num = getattr(netifaces, inet_type)
except AttributeError:
raise Exception('Unknown inet type ' + str(inet_type))
addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
and config('vip') != a['addr']]
if not ipv6_addr:
raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
interfaces = netifaces.interfaces()
if inc_aliases:
ifaces = []
for _iface in interfaces:
if iface == _iface or _iface.split(':')[0] == iface:
ifaces.append(_iface)
if fatal and not ifaces:
raise Exception("Invalid interface '%s'" % iface)
ifaces.sort()
else:
if iface not in interfaces:
if fatal:
raise Exception("%s not found " % (iface))
else:
return []
else:
ifaces = [iface]
return ipv6_addr[0]
addresses = []
for netiface in ifaces:
net_info = netifaces.ifaddresses(netiface)
if inet_num in net_info:
for entry in net_info[inet_num]:
if 'addr' in entry and entry['addr'] not in exc_list:
addresses.append(entry['addr'])
if fatal and not addresses:
raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type))
return addresses
except ValueError:
raise ValueError("Invalid interface '%s'" % iface)
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None):
"""
Return the assigned IPv6 address for a given interface, if any, or [].
"""
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
inc_aliases=inc_aliases, fatal=fatal,
exc_list=exc_list)
remotly_addressable = []
for address in addresses:
if not address.startswith('fe80'):
remotly_addressable.append(address)
if fatal and not remotly_addressable:
raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
return remotly_addressable
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
"""
Return a list of bridges on the system or []
"""
b_rgex = vnic_dir + '/*/bridge'
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
"""
Return a list of nics comprising a given bridge on the system or []
"""
brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
def is_bridge_member(nic):
"""
Check if a given nic is a member of a bridge
"""
for bridge in get_bridges():
if nic in get_bridge_nics(bridge):
return True
return False

View File

@ -8,7 +8,6 @@ from subprocess import (
check_call
)
from charmhelpers.fetch import (
apt_install,
filter_installed_packages,
@ -28,6 +27,11 @@ from charmhelpers.core.hookenv import (
INFO
)
from charmhelpers.core.host import (
mkdir,
write_file
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
@ -38,6 +42,7 @@ from charmhelpers.contrib.hahelpers.cluster import (
from charmhelpers.contrib.hahelpers.apache import (
get_cert,
get_ca_cert,
install_ca_cert,
)
from charmhelpers.contrib.openstack.neutron import (
@ -47,6 +52,7 @@ from charmhelpers.contrib.openstack.neutron import (
from charmhelpers.contrib.network.ip import (
get_address_in_network,
get_ipv6_addr,
is_address_in_network
)
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -490,22 +496,28 @@ class ApacheSSLContext(OSContextGenerator):
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
check_call(cmd)
def configure_cert(self):
if not os.path.isdir('/etc/apache2/ssl'):
os.mkdir('/etc/apache2/ssl')
def configure_cert(self, cn=None):
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
if not os.path.isdir(ssl_dir):
os.mkdir(ssl_dir)
cert, key = get_cert()
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
cert_out.write(b64decode(cert))
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
key_out.write(b64decode(key))
ca_cert = get_ca_cert()
if ca_cert:
with open(CA_CERT_PATH, 'w') as ca_out:
ca_out.write(b64decode(ca_cert))
check_call(['update-ca-certificates'])
mkdir(path=ssl_dir)
cert, key = get_cert(cn)
write_file(path=os.path.join(ssl_dir, 'cert_{}'.format(cn)),
content=b64decode(cert))
write_file(path=os.path.join(ssl_dir, 'key_{}'.format(cn)),
content=b64decode(key))
def configure_ca(self):
install_ca_cert(get_ca_cert())
def canonical_names(self):
'''Figure out which canonical names clients will access this service'''
cns = []
for r_id in relation_ids('identity-service'):
for unit in related_units(r_id):
rdata = relation_get(rid=r_id, unit=unit)
for k in rdata:
if k.startswith('ssl_key_'):
cns.append(k.lstrip('ssl_key_'))
return list(set(cns))
def __call__(self):
if isinstance(self.external_ports, basestring):
@ -513,21 +525,44 @@ class ApacheSSLContext(OSContextGenerator):
if (not self.external_ports or not https()):
return {}
self.configure_cert()
self.configure_ca()
self.enable_modules()
ctxt = {
'namespace': self.service_namespace,
'private_address': unit_get('private-address'),
'endpoints': []
}
if is_clustered():
ctxt['private_address'] = config('vip')
for api_port in self.external_ports:
ext_port = determine_apache_port(api_port)
int_port = determine_api_port(api_port)
portmap = (int(ext_port), int(int_port))
ctxt['endpoints'].append(portmap)
for cn in self.canonical_names():
self.configure_cert(cn)
addresses = []
vips = []
if config('vip'):
vips = config('vip').split()
for network_type in ['os-internal-network',
'os-admin-network',
'os-public-network']:
address = get_address_in_network(config(network_type),
unit_get('private-address'))
if len(vips) > 0 and is_clustered():
for vip in vips:
if is_address_in_network(config(network_type),
vip):
addresses.append((address, vip))
break
elif is_clustered():
addresses.append((address, config('vip')))
else:
addresses.append((address, address))
for address, endpoint in set(addresses):
for api_port in self.external_ports:
ext_port = determine_apache_port(api_port)
int_port = determine_api_port(api_port)
portmap = (address, endpoint, int(ext_port), int(int_port))
ctxt['endpoints'].append(portmap)
return ctxt

View File

@ -1,16 +1,16 @@
{% if endpoints -%}
{% for ext, int in endpoints -%}
{% for address, endpoint, ext, int in endpoints -%}
Listen {{ ext }}
NameVirtualHost *:{{ ext }}
<VirtualHost *:{{ ext }}>
ServerName {{ private_address }}
<VirtualHost {{ address }}:{{ ext }}>
ServerName {{ endpoint }}
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
ProxyPass / http://localhost:{{ int }}/
ProxyPassReverse / http://localhost:{{ int }}/
ProxyPreserveHost on
</VirtualHost>
{% endfor -%}
<Proxy *>
Order deny,allow
Allow from all
@ -19,5 +19,4 @@ NameVirtualHost *:{{ ext }}
Order allow,deny
Allow from all
</Location>
{% endfor -%}
{% endif -%}

View File

@ -1,16 +1,16 @@
{% if endpoints -%}
{% for ext, int in endpoints -%}
{% for address, endpoint, ext, int in endpoints -%}
Listen {{ ext }}
NameVirtualHost *:{{ ext }}
<VirtualHost *:{{ ext }}>
ServerName {{ private_address }}
<VirtualHost {{ address }}:{{ ext }}>
ServerName {{ endpoint }}
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
ProxyPass / http://localhost:{{ int }}/
ProxyPassReverse / http://localhost:{{ int }}/
ProxyPreserveHost on
</VirtualHost>
{% endfor -%}
<Proxy *>
Order deny,allow
Allow from all
@ -19,5 +19,4 @@ NameVirtualHost *:{{ ext }}
Order allow,deny
Allow from all
</Location>
{% endfor -%}
{% endif -%}

View File

@ -70,6 +70,7 @@ SWIFT_CODENAMES = OrderedDict([
('1.13.0', 'icehouse'),
('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'),
('2.0.0', 'juno'),
])
DEFAULT_LOOPBACK_SIZE = '5G'

View File

@ -7,40 +7,38 @@ from charmhelpers.core.hookenv import (
relation_set,
)
"""
This helper provides functions to support use of a peer relation
for basic key/value storage, with the added benefit that all storage
can be replicated across peer units, so this is really useful for
services that issue usernames/passwords to remote services.
can be replicated across peer units.
def shared_db_changed()
# Only the lead unit should create passwords
if not is_leader():
return
username = relation_get('username')
key = '{}.password'.format(username)
# Attempt to retrieve any existing password for this user
password = peer_retrieve(key)
if password is None:
# New user, create password and store
password = pwgen(length=64)
peer_store(key, password)
create_access(username, password)
relation_set(password=password)
Requirement to use:
To use this, the "peer_echo()" method has to be called form the peer
relation's relation-changed hook:
def cluster_changed()
# Echo any relation data other that *-address
# back onto the peer relation so all units have
# all *.password keys stored on their local relation
# for later retrieval.
@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
def cluster_relation_changed():
peer_echo()
Once this is done, you can use peer storage from anywhere:
@hooks.hook("some-hook")
def some_hook():
# You can store and retrieve key/values this way:
if is_relation_made("cluster"): # from charmhelpers.core.hookenv
# There are peers available so we can work with peer storage
peer_store("mykey", "myvalue")
value = peer_retrieve("mykey")
print value
else:
print "No peers joind the relation, cannot share key/values :("
"""
def peer_retrieve(key, relation_name='cluster'):
""" Retrieve a named key from peer relation relation_name """
"""Retrieve a named key from peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
@ -70,7 +68,7 @@ def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
def peer_store(key, value, relation_name='cluster'):
""" Store the key/value pair on the named peer relation relation_name """
"""Store the key/value pair on the named peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
@ -82,10 +80,10 @@ def peer_store(key, value, relation_name='cluster'):
def peer_echo(includes=None):
"""Echo filtered attributes back onto the same relation for storage
"""Echo filtered attributes back onto the same relation for storage.
Note that this helper must only be called within a peer relation
changed hook
This is a requirement to use the peerstorage module - it needs to be called
from the peer relation's changed hook.
"""
rdata = relation_get()
echo_data = {}

View File

@ -156,12 +156,15 @@ def hook_name():
class Config(dict):
"""A Juju charm config dictionary that can write itself to
disk (as json) and track which values have changed since
the previous hook invocation.
"""A dictionary representation of the charm's config.yaml, with some
extra features:
Do not instantiate this object directly - instead call
``hookenv.config()``
- See which values in the dictionary have changed since the previous hook.
- For values that have changed, see what the previous value was.
- Store arbitrary data for use in a later hook.
NOTE: Do not instantiate this object directly - instead call
``hookenv.config()``, which will return an instance of :class:`Config`.
Example usage::
@ -170,8 +173,8 @@ class Config(dict):
>>> config = hookenv.config()
>>> config['foo']
'bar'
>>> # store a new key/value for later use
>>> config['mykey'] = 'myval'
>>> config.save()
>>> # user runs `juju set mycharm foo=baz`
@ -188,22 +191,34 @@ class Config(dict):
>>> # keys/values that we add are preserved across hooks
>>> config['mykey']
'myval'
>>> # don't forget to save at the end of hook!
>>> config.save()
"""
CONFIG_FILE_NAME = '.juju-persistent-config'
def __init__(self, *args, **kw):
super(Config, self).__init__(*args, **kw)
self.implicit_save = True
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
self.load_previous()
def __getitem__(self, key):
"""For regular dict lookups, check the current juju config first,
then the previous (saved) copy. This ensures that user-saved values
will be returned by a dict lookup.
"""
try:
return dict.__getitem__(self, key)
except KeyError:
return (self._prev_dict or {})[key]
def load_previous(self, path=None):
"""Load previous copy of config from disk so that current values
can be compared to previous values.
"""Load previous copy of config from disk.
In normal usage you don't need to call this method directly - it
is called automatically at object initialization.
:param path:
@ -218,8 +233,8 @@ class Config(dict):
self._prev_dict = json.load(f)
def changed(self, key):
"""Return true if the value for this key has changed since
the last save.
"""Return True if the current value for this key is different from
the previous value.
"""
if self._prev_dict is None:
@ -228,7 +243,7 @@ class Config(dict):
def previous(self, key):
"""Return previous value for this key, or None if there
is no "previous" value.
is no previous value.
"""
if self._prev_dict:
@ -238,7 +253,13 @@ class Config(dict):
def save(self):
"""Save this config to disk.
Preserves items in _prev_dict that do not exist in self.
If the charm is using the :mod:`Services Framework <services.base>`
or :meth:'@hook <Hooks.hook>' decorator, this
is called automatically at the end of successful hook execution.
Otherwise, it should be called directly by user code.
To disable automatic saves, set ``implicit_save=False`` on this
instance.
"""
if self._prev_dict:
@ -478,6 +499,9 @@ class Hooks(object):
hook_name = os.path.basename(args[0])
if hook_name in self._hooks:
self._hooks[hook_name]()
cfg = config()
if cfg.implicit_save:
cfg.save()
else:
raise UnregisteredHookError(hook_name)

View File

@ -209,10 +209,15 @@ def mounts():
return system_mounts
def file_hash(path):
"""Generate a md5 hash of the contents of 'path' or None if not found """
def file_hash(path, hash_type='md5'):
"""
Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
if os.path.exists(path):
h = hashlib.md5()
h = getattr(hashlib, hash_type)()
with open(path, 'r') as source:
h.update(source.read()) # IGNORE:E1101 - it does have update
return h.hexdigest()
@ -220,6 +225,26 @@ def file_hash(path):
return None
def check_hash(path, checksum, hash_type='md5'):
"""
Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate :param:`checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum
"""
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
class ChecksumError(ValueError):
pass
def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing

View File

@ -118,6 +118,9 @@ class ServiceManager(object):
else:
self.provide_data()
self.reconfigure_services()
cfg = hookenv.config()
if cfg.implicit_save:
cfg.save()
def provide_data(self):
"""

View File

@ -1,3 +1,5 @@
import os
import yaml
from charmhelpers.core import hookenv
from charmhelpers.core import templating
@ -19,15 +21,21 @@ class RelationContext(dict):
the `name` attribute that are complete will used to populate the dictionary
values (see `get_data`, below).
The generated context will be namespaced under the interface type, to prevent
potential naming conflicts.
The generated context will be namespaced under the relation :attr:`name`,
to prevent potential naming conflicts.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = None
interface = None
required_keys = []
def __init__(self, *args, **kwargs):
super(RelationContext, self).__init__(*args, **kwargs)
def __init__(self, name=None, additional_required_keys=None):
if name is not None:
self.name = name
if additional_required_keys is not None:
self.required_keys.extend(additional_required_keys)
self.get_data()
def __bool__(self):
@ -101,9 +109,115 @@ class RelationContext(dict):
return {}
class MysqlRelation(RelationContext):
"""
Relation context for the `mysql` interface.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = 'db'
interface = 'mysql'
required_keys = ['host', 'user', 'password', 'database']
class HttpRelation(RelationContext):
"""
Relation context for the `http` interface.
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
:param list additional_required_keys: Extend the list of :attr:`required_keys`
"""
name = 'website'
interface = 'http'
required_keys = ['host', 'port']
def provide_data(self):
return {
'host': hookenv.unit_get('private-address'),
'port': 80,
}
class RequiredConfig(dict):
"""
Data context that loads config options with one or more mandatory options.
Once the required options have been changed from their default values, all
config options will be available, namespaced under `config` to prevent
potential naming conflicts (for example, between a config option and a
relation property).
:param list *args: List of options that must be changed from their default values.
"""
def __init__(self, *args):
self.required_options = args
self['config'] = hookenv.config()
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
self.config = yaml.load(fp).get('options', {})
def __bool__(self):
for option in self.required_options:
if option not in self['config']:
return False
current_value = self['config'][option]
default_value = self.config[option].get('default')
if current_value == default_value:
return False
if current_value in (None, '') and default_value in (None, ''):
return False
return True
def __nonzero__(self):
return self.__bool__()
class StoredContext(dict):
"""
A data context that always returns the data that it was first created with.
This is useful to do a one-time generation of things like passwords, that
will thereafter use the same value that was originally generated, instead
of generating a new value each time it is run.
"""
def __init__(self, file_name, config_data):
"""
If the file exists, populate `self` with the data from the file.
Otherwise, populate with the given data and persist it to the file.
"""
if os.path.exists(file_name):
self.update(self.read_context(file_name))
else:
self.store_context(file_name, config_data)
self.update(config_data)
def store_context(self, file_name, config_data):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'w') as file_stream:
os.fchmod(file_stream.fileno(), 0600)
yaml.dump(config_data, file_stream)
def read_context(self, file_name):
if not os.path.isabs(file_name):
file_name = os.path.join(hookenv.charm_dir(), file_name)
with open(file_name, 'r') as file_stream:
data = yaml.load(file_stream)
if not data:
raise OSError("%s is empty" % file_name)
return data
class TemplateCallback(ManagerCallback):
"""
Callback class that will render a template, for use as a ready action.
Callback class that will render a Jinja2 template, for use as a ready action.
:param str source: The template source file, relative to `$CHARM_DIR/templates`
:param str target: The target to write the rendered template to
:param str owner: The owner of the rendered file
:param str group: The group of the rendered file
:param int perms: The permissions of the rendered file
"""
def __init__(self, source, target, owner='root', group='root', perms=0444):
self.source = source

View File

@ -311,22 +311,35 @@ def configure_sources(update=False,
apt_update(fatal=True)
def install_remote(source):
def install_remote(source, *args, **kwargs):
"""
Install a file tree from a remote source
The specified source should be a url of the form:
scheme://[host]/path[#[option=value][&...]]
Schemes supported are based on this modules submodules
Options supported are submodule-specific"""
Schemes supported are based on this modules submodules.
Options supported are submodule-specific.
Additional arguments are passed through to the submodule.
For example::
dest = install_remote('http://example.com/archive.tgz',
checksum='deadbeef',
hash_type='sha1')
This will download `archive.tgz`, validate it using SHA1 and, if
the file is ok, extract it and return the directory in which it
was extracted. If the checksum fails, it will raise
:class:`charmhelpers.core.host.ChecksumError`.
"""
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None
for handler in handlers:
try:
installed_to = handler.install(source)
installed_to = handler.install(source, *args, **kwargs)
except UnhandledSource:
pass
if not installed_to:

View File

@ -1,6 +1,8 @@
import os
import urllib2
from urllib import urlretrieve
import urlparse
import hashlib
from charmhelpers.fetch import (
BaseFetchHandler,
@ -10,11 +12,19 @@ from charmhelpers.payload.archive import (
get_archive_handler,
extract,
)
from charmhelpers.core.host import mkdir
from charmhelpers.core.host import mkdir, check_hash
class ArchiveUrlFetchHandler(BaseFetchHandler):
"""Handler for archives via generic URLs"""
"""
Handler to download archive files from arbitrary URLs.
Can fetch from http, https, ftp, and file URLs.
Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
Installs the contents of the archive in $CHARM_DIR/fetched/.
"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
@ -24,6 +34,12 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
return False
def download(self, source, dest):
"""
Download an archive file.
:param str source: URL pointing to an archive file.
:param str dest: Local path location to download archive file to.
"""
# propogate all exceptions
# URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
@ -48,7 +64,29 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
os.unlink(dest)
raise e
def install(self, source):
# Mandatory file validation via Sha1 or MD5 hashing.
def download_and_validate(self, url, hashsum, validate="sha1"):
tempfile, headers = urlretrieve(url)
check_hash(tempfile, hashsum, validate)
return tempfile
def install(self, source, dest=None, checksum=None, hash_type='sha1'):
"""
Download and install an archive file, with optional checksum validation.
The checksum can also be given on the :param:`source` URL's fragment.
For example::
handler.install('http://example.com/file.tgz#sha1=deadbeef')
:param str source: URL pointing to an archive file.
:param str dest: Local destination path to install to. If not given,
installs to `$CHARM_DIR/archives/archive_file_name`.
:param str checksum: If given, validate the archive file after download.
:param str hash_type: Algorithm used to generate :param:`checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
url_parts = self.parse_url(source)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
if not os.path.exists(dest_dir):
@ -60,4 +98,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
raise UnhandledSource(e.reason)
except OSError as e:
raise UnhandledSource(e.strerror)
return extract(dld_file)
options = urlparse.parse_qs(url_parts.fragment)
for key, value in options.items():
if key in hashlib.algorithms:
check_hash(dld_file, value, key)
if checksum:
check_hash(dld_file, checksum, hash_type)
return extract(dld_file, dest)

View File

@ -1,17 +1,19 @@
from charmhelpers.core.hookenv import (
config, unit_private_ip)
from charmhelpers.core.hookenv import config, unit_get
from charmhelpers.core.host import mkdir, write_file
from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
is_clustered,
is_clustered
)
from subprocess import (
check_call
)
from charmhelpers.contrib.hahelpers.apache import install_ca_cert
from charmhelpers.contrib.network.ip import (
get_address_in_network, is_address_in_network)
import os
@ -24,34 +26,50 @@ class ApacheSSLContext(context.ApacheSSLContext):
external_ports = []
service_namespace = 'keystone'
def __call__(self):
# late import to work around circular dependency
from keystone_utils import determine_ports
self.external_ports = determine_ports()
return super(ApacheSSLContext, self).__call__()
def configure_cert(self):
# import keystone_ssl as ssl
def configure_cert(self, cn):
from keystone_utils import SSH_USER, get_ca
if not os.path.isdir('/etc/apache2/ssl'):
os.mkdir('/etc/apache2/ssl')
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
if not os.path.isdir(ssl_dir):
os.mkdir(ssl_dir)
if is_clustered():
https_cn = config('vip')
else:
https_cn = unit_private_ip()
mkdir(path=ssl_dir)
ca = get_ca(user=SSH_USER)
cert, key = ca.get_cert_and_key(common_name=https_cn)
with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
cert_out.write(cert)
with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
key_out.write(key)
if ca:
with open(CA_CERT_PATH, 'w') as ca_out:
ca_out.write(ca.get_ca_bundle())
check_call(['update-ca-certificates'])
cert, key = ca.get_cert_and_key(common_name=cn)
write_file(path=os.path.join(ssl_dir, 'cert_{}'.format(cn)),
content=cert)
write_file(path=os.path.join(ssl_dir, 'key_{}'.format(cn)),
content=key)
def configure_ca(self):
from keystone_utils import SSH_USER, get_ca
ca = get_ca(user=SSH_USER)
install_ca_cert(ca.get_ca_bundle())
def canonical_names(self):
addresses = []
vips = []
if config('vip'):
vips = config('vip').split()
for network_type in ['os-internal-network',
'os-admin-network',
'os-public-network']:
address = get_address_in_network(config(network_type),
unit_get('private-address'))
if len(vips) > 0 and is_clustered():
for vip in vips:
if is_address_in_network(config(network_type),
vip):
addresses.append(vip)
break
elif is_clustered():
addresses.append(config('vip'))
else:
addresses.append(address)
return list(set(addresses))
class HAProxyContext(context.HAProxyContext):

View File

@ -1,5 +1,5 @@
import keystone_context as context
from mock import patch
from mock import patch, MagicMock
from test_utils import (
CharmTestCase
@ -16,6 +16,7 @@ class TestKeystoneContexts(CharmTestCase):
def setUp(self):
super(TestKeystoneContexts, self).setUp(context, TO_PATCH)
@patch('charmhelpers.contrib.openstack.context.config')
@patch('charmhelpers.contrib.openstack.context.is_clustered')
@patch('charmhelpers.contrib.openstack.context.determine_apache_port')
@patch('charmhelpers.contrib.openstack.context.determine_api_port')
@ -25,21 +26,24 @@ class TestKeystoneContexts(CharmTestCase):
mock_unit_get,
mock_determine_api_port,
mock_determine_apache_port,
mock_is_clustered):
mock_is_clustered,
mock_config):
mock_https.return_value = True
mock_unit_get.return_value = '1.2.3.4'
mock_determine_api_port.return_value = '12'
mock_determine_apache_port.return_value = '34'
mock_is_clustered.return_value = False
mock_config.return_value = None
ctxt = context.ApacheSSLContext()
with patch.object(ctxt, 'enable_modules'):
with patch.object(ctxt, 'configure_cert'):
self.assertEquals(ctxt(), {'endpoints': [(34, 12)],
'private_address': '1.2.3.4',
'namespace': 'keystone'})
self.assertTrue(mock_https.called)
mock_unit_get.assert_called_with('private-address')
ctxt.enable_modules = MagicMock()
ctxt.configure_cert = MagicMock()
ctxt.configure_ca = MagicMock()
ctxt.canonical_names = MagicMock()
self.assertEquals(ctxt(), {'endpoints': [('1.2.3.4', '1.2.3.4', 34, 12)],
'namespace': 'keystone'})
self.assertTrue(mock_https.called)
mock_unit_get.assert_called_with('private-address')
@patch('charmhelpers.contrib.openstack.context.config')
@patch('charmhelpers.contrib.openstack.context.relation_ids')