Enable haproxy

This commit is contained in:
Liam Young 2015-01-14 16:48:07 +00:00
parent e7e6db7dc2
commit 7d14bf9ef0
32 changed files with 3561 additions and 34 deletions

View File

@ -9,6 +9,8 @@ include:
- apache
- cluster
- payload.execd
- contrib.openstack.alternatives
- contrib.openstack|inc=*
- contrib.network.ip
- contrib.openstack.ip
- contrib.storage.linux
- contrib.python.packages

View File

@ -0,0 +1,29 @@
from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.hahelpers.cluster import (
determine_api_port,
determine_apache_port,
)
class HAProxyContext(context.HAProxyContext):
def __call__(self):
ctxt = super(HAProxyContext, self).__call__()
# Apache ports
a_cephradosgw_api = determine_apache_port(80,
singlenode_mode=True)
port_mapping = {
'cephradosgw-server': [
80, a_cephradosgw_api]
}
ctxt['cephradosgw_bind_port'] = determine_api_port(
80,
singlenode_mode=True,
)
# for haproxy.conf
ctxt['service_ports'] = port_mapping
return ctxt

View File

@ -0,0 +1,22 @@
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa

View File

@ -13,6 +13,7 @@ clustering-related helpers.
import subprocess
import os
from socket import gethostname as get_unit_hostname
import six
@ -28,12 +29,19 @@ from charmhelpers.core.hookenv import (
WARNING,
unit_get,
)
from charmhelpers.core.decorators import (
retry_on_exception,
)
class HAIncompleteConfig(Exception):
pass
class CRMResourceNotFound(Exception):
pass
def is_elected_leader(resource):
"""
Returns True if the charm executing this is the elected cluster leader.
@ -68,24 +76,30 @@ def is_clustered():
return False
def is_crm_leader(resource):
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
def is_crm_leader(resource, retry=False):
"""
Returns True if the charm calling this is the elected corosync leader,
as returned by calling the external "crm" command.
We allow this operation to be retried to avoid the possibility of getting a
false negative. See LP #1396246 for more info.
"""
cmd = [
"crm", "resource",
"show", resource
]
cmd = ['crm', 'resource', 'show', resource]
try:
status = subprocess.check_output(cmd).decode('UTF-8')
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
status = None
if status and get_unit_hostname() in status:
return True
if status and "resource %s is NOT running" % (resource) in status:
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
return False
def is_leader(resource):

View File

@ -0,0 +1,92 @@
import six
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None, stable=True):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
if self.stable:
for svc in other_services:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
for svc in other_services:
if svc['name'] in base_charms:
temp = 'lp:charms/{}'
svc['location'] = temp.format(svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw']
if self.openstack:
for svc in services:
if svc['name'] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse) = range(6)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
return releases[(self.series, self.openstack)]

View File

@ -0,0 +1,278 @@
import logging
import os
import time
import urllib
import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
import six
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities.
This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate endpoint data.
Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint.
"""
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if (admin_port in ep.adminurl and
internal_port in ep.internalurl and
public_port in ep.publicurl):
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate service catalog endpoint data.
Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in six.iteritems(expected):
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate tenant data.
Validate a list of actual tenant data vs list of expected tenant
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate role data.
Validate a list of actual role data vs a list of expected role
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate user data.
Validate a list of actual user data vs a list of expected user
data.
"""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate flavor data.
Validate a list of actual flavors vs a list of expected flavors.
"""
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists."""
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
unit = keystone_sentry
service_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
local_path = os.path.join('tests', cirros_img)
if not os.path.exists(local_path):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, local_path)
f.close()
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
count = 1
status = image.status
while status != 'active' and count < 10:
time.sleep(3)
image = glance.images.get(image.id)
status = image.status
self.log.debug('image status: {}'.format(status))
count += 1
if status != 'active':
self.log.error('image creation timed out')
return None
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
num_before = len(list(glance.images.list()))
glance.images.delete(image)
count = 1
num_after = len(list(glance.images.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(glance.images.list()))
self.log.debug('number of images: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('image deletion timed out')
return False
return True
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status != 'ACTIVE':
self.log.error('instance creation timed out')
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
num_before = len(list(nova.servers.list()))
nova.servers.delete(instance)
count = 1
num_after = len(list(nova.servers.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(nova.servers.list()))
self.log.debug('number of instances: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('instance deletion timed out')
return False
return True

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,223 @@
# Various utilies for dealing with Neutron and the renaming from Quantum.
from subprocess import check_output
from charmhelpers.core.hookenv import (
config,
log,
ERROR,
)
from charmhelpers.contrib.openstack.utils import os_release
def headers_package():
"""Ensures correct linux-headers for running kernel are installed,
for building DKMS package"""
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
return 'linux-headers-%s' % kver
QUANTUM_CONF_DIR = '/etc/quantum'
def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
kver = kver.split('.')
return (int(kver[0]), int(kver[1]))
def determine_dkms_package():
""" Determine which DKMS package should be used based on kernel version """
# NOTE: 3.13 kernels have support for GRE and VXLAN native
if kernel_version() >= (3, 13):
return []
else:
return ['openvswitch-datapath-dkms']
# legacy
def quantum_plugins():
from charmhelpers.contrib.openstack import context
return {
'ovs': {
'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
'server_services': ['quantum-server']
},
'nvp': {
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=QUANTUM_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
'quantum-plugin-nicira'],
'server_services': ['quantum-server']
}
}
NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins():
from charmhelpers.contrib.openstack import context
release = os_release('nova-common')
plugins = {
'ovs': {
'config': '/etc/neutron/plugins/openvswitch/'
'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
},
'nvp': {
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
},
'nsx': {
'config': '/etc/neutron/plugins/vmware/nsx.ini',
'driver': 'vmware',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-vmware'],
'server_services': ['neutron-server']
},
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
},
'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': ['calico-felix',
'bird',
'neutron-dhcp-agent',
'nova-api-metadata'],
'packages': [[headers_package()] + determine_dkms_package(),
['calico-compute',
'bird',
'neutron-dhcp-agent',
'nova-api-metadata']],
'server_packages': ['neutron-server', 'calico-control'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':
# NOTE: patch in ml2 plugin for icehouse onwards
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['ovs']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
plugins['nvp'] = plugins['nsx']
return plugins
def neutron_plugin_attribute(plugin, attr, net_manager=None):
manager = net_manager or network_manager()
if manager == 'quantum':
plugins = quantum_plugins()
elif manager == 'neutron':
plugins = neutron_plugins()
else:
log("Network manager '%s' does not support plugins." % (manager),
level=ERROR)
raise Exception
try:
_plugin = plugins[plugin]
except KeyError:
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
raise Exception
try:
return _plugin[attr]
except KeyError:
return None
def network_manager():
'''
Deals with the renaming of Quantum to Neutron in H and any situations
that require compatability (eg, deploying H with network-manager=quantum,
upgrading from G).
'''
release = os_release('nova-common')
manager = config('network-manager').lower()
if manager not in ['quantum', 'neutron']:
return manager
if release in ['essex']:
# E does not support neutron
log('Neutron networking not supported in Essex.', level=ERROR)
raise Exception
elif release in ['folsom', 'grizzly']:
# neutron is named quantum in F and G
return 'quantum'
else:
# ensure accurate naming for all releases post-H
return 'neutron'

View File

@ -0,0 +1,2 @@
# dummy __init__.py to fool syncer into thinking this is a syncable python
# module

View File

@ -0,0 +1,15 @@
###############################################################################
# [ WARNING ]
# cinder configuration file maintained by Juju
# local changes may be overwritten.
###############################################################################
[global]
{% if auth -%}
auth_supported = {{ auth }}
keyring = /etc/ceph/$cluster.$name.keyring
mon host = {{ mon_hosts }}
{% endif -%}
log to syslog = {{ use_syslog }}
err to syslog = {{ use_syslog }}
clog to syslog = {{ use_syslog }}

View File

@ -0,0 +1,58 @@
global
log {{ local_host }} local0
log {{ local_host }} local1 notice
maxconn 20000
user haproxy
group haproxy
spread-checks 0
defaults
log global
mode tcp
option tcplog
option dontlognull
retries 3
timeout queue 1000
timeout connect 1000
{% if haproxy_client_timeout -%}
timeout client {{ haproxy_client_timeout }}
{% else -%}
timeout client 30000
{% endif -%}
{% if haproxy_server_timeout -%}
timeout server {{ haproxy_server_timeout }}
{% else -%}
timeout server 30000
{% endif -%}
listen stats {{ stat_port }}
mode http
stats enable
stats hide-version
stats realm Haproxy\ Statistics
stats uri /
stats auth admin:password
{% if frontends -%}
{% for service, ports in service_ports.items() -%}
frontend tcp-in_{{ service }}
bind *:{{ ports[0] }}
{% if ipv6 -%}
bind :::{{ ports[0] }}
{% endif -%}
{% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
{% endfor -%}
default_backend {{ service }}_{{ default_backend }}
{% for frontend in frontends -%}
backend {{ service }}_{{ frontend }}
balance leastconn
{% for unit, address in frontends[frontend]['backends'].items() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}
{% endfor -%}
{% endfor -%}
{% endif -%}

View File

@ -0,0 +1,24 @@
{% if endpoints -%}
{% for ext_port in ext_ports -%}
Listen {{ ext_port }}
{% endfor -%}
{% for address, endpoint, ext, int in endpoints -%}
<VirtualHost {{ address }}:{{ ext }}>
ServerName {{ endpoint }}
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
ProxyPass / http://localhost:{{ int }}/
ProxyPassReverse / http://localhost:{{ int }}/
ProxyPreserveHost on
</VirtualHost>
{% endfor -%}
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
<Location />
Order allow,deny
Allow from all
</Location>
{% endif -%}

View File

@ -0,0 +1,24 @@
{% if endpoints -%}
{% for ext_port in ext_ports -%}
Listen {{ ext_port }}
{% endfor -%}
{% for address, endpoint, ext, int in endpoints -%}
<VirtualHost {{ address }}:{{ ext }}>
ServerName {{ endpoint }}
SSLEngine on
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
ProxyPass / http://localhost:{{ int }}/
ProxyPassReverse / http://localhost:{{ int }}/
ProxyPreserveHost on
</VirtualHost>
{% endfor -%}
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
<Location />
Order allow,deny
Allow from all
</Location>
{% endif -%}

View File

@ -0,0 +1,279 @@
import os
import six
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
log,
ERROR,
INFO
)
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
try:
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
except ImportError:
# python-jinja2 may not be installed yet, or we're running unittests.
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
class OSConfigException(Exception):
pass
def get_loader(templates_dir, os_release):
"""
Create a jinja2.ChoiceLoader containing template dirs up to
and including os_release. If directory template directory
is missing at templates_dir, it will be omitted from the loader.
templates_dir is added to the bottom of the search list as a base
loading dir.
A charm may also ship a templates dir with this module
and it will be appended to the bottom of the search list, eg::
hooks/charmhelpers/contrib/openstack/templates
:param templates_dir (str): Base template directory containing release
sub-directories.
:param os_release (str): OpenStack release codename to construct template
loader.
:returns: jinja2.ChoiceLoader constructed with a list of
jinja2.FilesystemLoaders, ordered in descending
order by OpenStack release.
"""
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in six.itervalues(OPENSTACK_CODENAMES)]
if not os.path.isdir(templates_dir):
log('Templates directory not found @ %s.' % templates_dir,
level=ERROR)
raise OSConfigException
# the bottom contains tempaltes_dir and possibly a common templates dir
# shipped with the helper.
loaders = [FileSystemLoader(templates_dir)]
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
if os.path.isdir(helper_templates):
loaders.append(FileSystemLoader(helper_templates))
for rel, tmpl_dir in tmpl_dirs:
if os.path.isdir(tmpl_dir):
loaders.insert(0, FileSystemLoader(tmpl_dir))
if rel == os_release:
break
log('Creating choice loader with dirs: %s' %
[l.searchpath for l in loaders], level=INFO)
return ChoiceLoader(loaders)
class OSConfigTemplate(object):
"""
Associates a config file template with a list of context generators.
Responsible for constructing a template context based on those generators.
"""
def __init__(self, config_file, contexts):
self.config_file = config_file
if hasattr(contexts, '__call__'):
self.contexts = [contexts]
else:
self.contexts = contexts
self._complete_contexts = []
def context(self):
ctxt = {}
for context in self.contexts:
_ctxt = context()
if _ctxt:
ctxt.update(_ctxt)
# track interfaces for every complete context.
[self._complete_contexts.append(interface)
for interface in context.interfaces
if interface not in self._complete_contexts]
return ctxt
def complete_contexts(self):
'''
Return a list of interfaces that have atisfied contexts.
'''
if self._complete_contexts:
return self._complete_contexts
self.context()
return self._complete_contexts
class OSConfigRenderer(object):
"""
This class provides a common templating system to be used by OpenStack
charms. It is intended to help charms share common code and templates,
and ease the burden of managing config templates across multiple OpenStack
releases.
Basic usage::
# import some common context generates from charmhelpers
from charmhelpers.contrib.openstack import context
# Create a renderer object for a specific OS release.
configs = OSConfigRenderer(templates_dir='/tmp/templates',
openstack_release='folsom')
# register some config files with context generators.
configs.register(config_file='/etc/nova/nova.conf',
contexts=[context.SharedDBContext(),
context.AMQPContext()])
configs.register(config_file='/etc/nova/api-paste.ini',
contexts=[context.IdentityServiceContext()])
configs.register(config_file='/etc/haproxy/haproxy.conf',
contexts=[context.HAProxyContext()])
# write out a single config
configs.write('/etc/nova/nova.conf')
# write out all registered configs
configs.write_all()
**OpenStack Releases and template loading**
When the object is instantiated, it is associated with a specific OS
release. This dictates how the template loader will be constructed.
The constructed loader attempts to load the template from several places
in the following order:
- from the most recent OS release-specific template dir (if one exists)
- the base templates_dir
- a template directory shipped in the charm with this helper file.
For the example above, '/tmp/templates' contains the following structure::
/tmp/templates/nova.conf
/tmp/templates/api-paste.ini
/tmp/templates/grizzly/api-paste.ini
/tmp/templates/havana/api-paste.ini
Since it was registered with the grizzly release, it first seraches
the grizzly directory for nova.conf, then the templates dir.
When writing api-paste.ini, it will find the template in the grizzly
directory.
If the object were created with folsom, it would fall back to the
base templates dir for its api-paste.ini template.
This system should help manage changes in config files through
openstack releases, allowing charms to fall back to the most recently
updated config template for a given release
The haproxy.conf, since it is not shipped in the templates dir, will
be loaded from the module directory's template directory, eg
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
us to ship common templates (haproxy, apache) with the helpers.
**Context generators**
Context generators are used to generate template contexts during hook
execution. Doing so may require inspecting service relations, charm
config, etc. When registered, a config file is associated with a list
of generators. When a template is rendered and written, all context
generates are called in a chain to generate the context dictionary
passed to the jinja2 template. See context.py for more info.
"""
def __init__(self, templates_dir, openstack_release):
if not os.path.isdir(templates_dir):
log('Could not locate templates dir %s' % templates_dir,
level=ERROR)
raise OSConfigException
self.templates_dir = templates_dir
self.openstack_release = openstack_release
self.templates = {}
self._tmpl_env = None
if None in [Environment, ChoiceLoader, FileSystemLoader]:
# if this code is running, the object is created pre-install hook.
# jinja2 shouldn't get touched until the module is reloaded on next
# hook execution, with proper jinja2 bits successfully imported.
apt_install('python-jinja2')
def register(self, config_file, contexts):
"""
Register a config file with a list of context generators to be called
during rendering.
"""
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
contexts=contexts)
log('Registered config file: %s' % config_file, level=INFO)
def _get_tmpl_env(self):
if not self._tmpl_env:
loader = get_loader(self.templates_dir, self.openstack_release)
self._tmpl_env = Environment(loader=loader)
def _get_template(self, template):
self._get_tmpl_env()
template = self._tmpl_env.get_template(template)
log('Loaded template from %s' % template.filename, level=INFO)
return template
def render(self, config_file):
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
ctxt = self.templates[config_file].context()
_tmpl = os.path.basename(config_file)
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound:
# if no template is found with basename, try looking for it
# using a munged full path, eg:
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
_tmpl = '_'.join(config_file.split('/')[1:])
try:
template = self._get_template(_tmpl)
except exceptions.TemplateNotFound as e:
log('Could not load template from %s by %s or %s.' %
(self.templates_dir, os.path.basename(config_file), _tmpl),
level=ERROR)
raise e
log('Rendering from template: %s' % _tmpl, level=INFO)
return template.render(ctxt)
def write(self, config_file):
"""
Write a single config file, raises if config file is not registered.
"""
if config_file not in self.templates:
log('Config not registered: %s' % config_file, level=ERROR)
raise OSConfigException
_out = self.render(config_file)
with open(config_file, 'wb') as out:
out.write(_out)
log('Wrote template %s.' % config_file, level=INFO)
def write_all(self):
"""
Write out all registered config files.
"""
[self.write(k) for k in six.iterkeys(self.templates)]
def set_release(self, openstack_release):
"""
Resets the template environment and generates a new template loader
based on a the new openstack release.
"""
self._tmpl_env = None
self.openstack_release = openstack_release
self._get_tmpl_env()
def complete_contexts(self):
'''
Returns a list of context interfaces that yield a complete context.
'''
interfaces = []
[interfaces.extend(i.complete_contexts())
for i in six.itervalues(self.templates)]
return interfaces

View File

@ -0,0 +1,625 @@
#!/usr/bin/python
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
from functools import wraps
import subprocess
import json
import os
import socket
import sys
import six
import yaml
from charmhelpers.core.hookenv import (
config,
log as juju_log,
charm_dir,
INFO,
relation_ids,
relation_set
)
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
)
from charmhelpers.contrib.network.ip import (
get_ipv6_addr
)
from charmhelpers.core.host import lsb_release, mounts, umount
from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.python.packages import pip_install
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
])
OPENSTACK_CODENAMES = OrderedDict([
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
('2013.1', 'grizzly'),
('2013.2', 'havana'),
('2014.1', 'icehouse'),
('2014.2', 'juno'),
('2015.1', 'kilo'),
])
# The ugly duckling
SWIFT_CODENAMES = OrderedDict([
('1.4.3', 'diablo'),
('1.4.8', 'essex'),
('1.7.4', 'folsom'),
('1.8.0', 'grizzly'),
('1.7.7', 'grizzly'),
('1.7.6', 'grizzly'),
('1.10.0', 'havana'),
('1.9.1', 'havana'),
('1.9.0', 'havana'),
('1.13.1', 'icehouse'),
('1.13.0', 'icehouse'),
('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'),
('2.0.0', 'juno'),
('2.1.0', 'juno'),
('2.2.0', 'juno'),
('2.2.1', 'kilo'),
])
DEFAULT_LOOPBACK_SIZE = '5G'
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
sys.exit(1)
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src is None:
return rel
if src in ['distro', 'distro-proposed']:
try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
except KeyError:
e = 'Could not derive openstack release for '\
'this Ubuntu release: %s' % ubuntu_rel
error_out(e)
return rel
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided
if src.startswith('deb') or src.startswith('ppa'):
for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v in src:
return v
def get_os_version_install_source(src):
codename = get_os_codename_install_source(src)
return get_os_version_codename(codename)
def get_os_codename_version(vers):
'''Determine OpenStack codename from version number.'''
try:
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_codename(codename):
'''Determine OpenStack version number from codename.'''
for k, v in six.iteritems(OPENSTACK_CODENAMES):
if v == codename:
return k
e = 'Could not derive OpenStack version for '\
'codename: %s' % codename
error_out(e)
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
import apt_pkg as apt
cache = apt_cache()
try:
pkg = cache[package]
except:
if not fatal:
return None
# the package is unknown to the current apt cache.
e = 'Could not determine version of package with no installation '\
'candidate: %s' % package
error_out(e)
if not pkg.current_ver:
if not fatal:
return None
# package is known, but no version is currently installed.
e = 'Could not determine version of uninstalled package: %s' % package
error_out(e)
vers = apt.upstream_version(pkg.current_ver.ver_str)
try:
if 'swift' in pkg.name:
swift_vers = vers[:5]
if swift_vers not in SWIFT_CODENAMES:
# Deal with 1.10.0 upward
swift_vers = vers[:6]
return SWIFT_CODENAMES[swift_vers]
else:
vers = vers[:6]
return OPENSTACK_CODENAMES[vers]
except KeyError:
e = 'Could not determine OpenStack codename for version %s' % vers
error_out(e)
def get_os_version_package(pkg, fatal=True):
'''Derive OpenStack version number from an installed package.'''
codename = get_os_codename_package(pkg, fatal=fatal)
if not codename:
return None
if 'swift' in pkg:
vers_map = SWIFT_CODENAMES
else:
vers_map = OPENSTACK_CODENAMES
for version, cname in six.iteritems(vers_map):
if cname == codename:
return version
# e = "Could not determine OpenStack version for package: %s" % pkg
# error_out(e)
os_rel = None
def os_release(package, base='essex'):
'''
Returns OpenStack release codename from a cached global.
If the codename can not be determined from either an installed package or
the installation source, the earliest release supported by the charm should
be returned.
'''
global os_rel
if os_rel:
return os_rel
os_rel = (get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
return os_rel
def import_key(keyid):
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
error_out("Error importing repo key %s" % keyid)
def configure_installation_source(rel):
'''Configure apt installation source.'''
if rel == 'distro':
return
elif rel == 'distro-proposed':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(DISTRO_PROPOSED % ubuntu_rel)
elif rel[:4] == "ppa:":
src = rel
subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb":
l = len(rel.split('|'))
if l == 2:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
import_key(key)
elif l == 1:
src = rel
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src)
elif rel[:6] == 'cloud:':
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = rel.split(':')[1]
u_rel = rel.split('-')[0]
ca_rel = rel.split('-')[1]
if u_rel != ubuntu_rel:
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
'version (%s)' % (ca_rel, ubuntu_rel)
error_out(e)
if 'staging' in ca_rel:
# staging is just a regular PPA.
os_rel = ca_rel.split('/')[0]
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
cmd = 'add-apt-repository -y %s' % ppa
subprocess.check_call(cmd.split(' '))
return
# map charm config options to actual archive pockets.
pockets = {
'folsom': 'precise-updates/folsom',
'folsom/updates': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'grizzly': 'precise-updates/grizzly',
'grizzly/updates': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'juno': 'trusty-updates/juno',
'juno/updates': 'trusty-updates/juno',
'juno/proposed': 'trusty-proposed/juno',
'kilo': 'trusty-updates/kilo',
'kilo/updates': 'trusty-updates/kilo',
'kilo/proposed': 'trusty-proposed/kilo',
}
try:
pocket = pockets[ca_rel]
except KeyError:
e = 'Invalid Cloud Archive release specified: %s' % rel
error_out(e)
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
apt_install('ubuntu-cloud-keyring', fatal=True)
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
f.write(src)
else:
error_out("Invalid openstack-release specified: %s" % rel)
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
exported environment variables provided by env_vars. Any charm scripts run
outside the juju hook environment can source this scriptrc to obtain
updated config information necessary to perform health checks or
service changes.
"""
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path))
with open(juju_rc_path, 'wb') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
for u, p in six.iteritems(env_vars) if u != "script_path"]
def openstack_upgrade_available(package):
"""
Determines if an OpenStack upgrade is available from installation
source, based on version of installed package.
:param package: str: Name of installed package.
:returns: bool: : Returns True if configured installation source offers
a newer version of package.
"""
import apt_pkg as apt
src = config('openstack-origin')
cur_vers = get_os_version_package(package)
available_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(available_vers, cur_vers) == 1
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
error_out('prepare_storage(): Missing required input: block_device=%s.'
% block_device)
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
error_out('Failed to locate valid block device at %s' % bdev)
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
def is_ip(address):
"""
Returns True if address is a valid IP address.
"""
try:
# Test to see if already an IPv4 address
socket.inet_aton(address)
return True
except socket.error:
return False
def ns_query(address):
try:
import dns.resolver
except ImportError:
apt_install('python-dnspython')
import dns.resolver
if isinstance(address, dns.name.Name):
rtype = 'PTR'
elif isinstance(address, six.string_types):
rtype = 'A'
else:
return None
answers = dns.resolver.query(address, rtype)
if answers:
return str(answers[0])
return None
def get_host_ip(hostname):
"""
Resolves the IP for a given hostname, or returns
the input if it is already an IP.
"""
if is_ip(hostname):
return hostname
return ns_query(hostname)
def get_hostname(address, fqdn=True):
"""
Resolves hostname for given IP, or returns the input
if it is already a hostname.
"""
if is_ip(address):
try:
import dns.reversename
except ImportError:
apt_install('python-dnspython')
import dns.reversename
rev = dns.reversename.from_address(address)
result = ns_query(rev)
if not result:
return None
else:
result = address
if fqdn:
# strip trailing .
if result.endswith('.'):
return result[:-1]
else:
return result
else:
return result.split('.')[0]
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
mm_map = {}
if os.path.isfile(mm_file):
with open(mm_file, 'r') as f:
mm_map = json.load(f)
return mm_map
def sync_db_with_multi_ipv6_addresses(database, database_user,
relation_prefix=None):
hosts = get_ipv6_addr(dynamic_only=False)
kwargs = {'database': database,
'username': database_user,
'hostname': json.dumps(hosts)}
if relation_prefix:
for key in list(kwargs.keys()):
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
del kwargs[key]
for rid in relation_ids('shared-db'):
relation_set(relation_id=rid, **kwargs)
def os_requires_version(ostack_release, pkg):
"""
Decorator for hook to specify minimum supported release
"""
def wrap(f):
@wraps(f)
def wrapped_f(*args):
if os_release(pkg) < ostack_release:
raise Exception("This hook is not supported on releases"
" before %s" % ostack_release)
f(*args)
return wrapped_f
return wrap
def git_install_requested():
"""Returns true if openstack-origin-git is specified."""
return config('openstack-origin-git') != "None"
requirements_dir = None
def git_clone_and_install(file_name, core_project):
"""Clone/install all OpenStack repos specified in yaml config file."""
global requirements_dir
if file_name == "None":
return
yaml_file = os.path.join(charm_dir(), file_name)
# clone/install the requirements project first
installed = _git_clone_and_install_subset(yaml_file,
whitelist=['requirements'])
if 'requirements' not in installed:
error_out('requirements git repository must be specified')
# clone/install all other projects except requirements and the core project
blacklist = ['requirements', core_project]
_git_clone_and_install_subset(yaml_file, blacklist=blacklist,
update_requirements=True)
# clone/install the core project
whitelist = [core_project]
installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
update_requirements=True)
if core_project not in installed:
error_out('{} git repository must be specified'.format(core_project))
def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
update_requirements=False):
"""Clone/install subset of OpenStack repos specified in yaml config file."""
global requirements_dir
installed = []
with open(yaml_file, 'r') as fd:
projects = yaml.load(fd)
for proj, val in projects.items():
# The project subset is chosen based on the following 3 rules:
# 1) If project is in blacklist, we don't clone/install it, period.
# 2) If whitelist is empty, we clone/install everything else.
# 3) If whitelist is not empty, we clone/install everything in the
# whitelist.
if proj in blacklist:
continue
if whitelist and proj not in whitelist:
continue
repo = val['repository']
branch = val['branch']
repo_dir = _git_clone_and_install_single(repo, branch,
update_requirements)
if proj == 'requirements':
requirements_dir = repo_dir
installed.append(proj)
return installed
def _git_clone_and_install_single(repo, branch, update_requirements=False):
"""Clone and install a single git repository."""
dest_parent_dir = "/mnt/openstack-git/"
dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
if not os.path.exists(dest_parent_dir):
juju_log('Host dir not mounted at {}. '
'Creating directory there instead.'.format(dest_parent_dir))
os.mkdir(dest_parent_dir)
if not os.path.exists(dest_dir):
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
else:
repo_dir = dest_dir
if update_requirements:
if not requirements_dir:
error_out('requirements repo must be cloned before '
'updating from global requirements.')
_git_update_requirements(repo_dir, requirements_dir)
juju_log('Installing git repo from dir: {}'.format(repo_dir))
pip_install(repo_dir)
return repo_dir
def _git_update_requirements(package_dir, reqs_dir):
"""Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt."""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
cmd = "python update.py {}".format(package_dir)
try:
subprocess.check_call(cmd.split(' '))
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from global-requirements.txt".format(package))
os.chdir(orig_dir)

View File

@ -0,0 +1,77 @@
#!/usr/bin/env python
# coding: utf-8
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import log
try:
from pip import main as pip_execute
except ImportError:
apt_update()
apt_install('python-pip')
from pip import main as pip_execute
def parse_options(given, available):
"""Given a set of options, check if available"""
for key, value in sorted(given.items()):
if key in available:
yield "--{0}={1}".format(key, value)
def pip_install_requirements(requirements, **options):
"""Install a requirements file """
command = ["install"]
available_options = ('proxy', 'src', 'log', )
for option in parse_options(options, available_options):
command.append(option)
command.append("-r {0}".format(requirements))
log("Installing from file: {} with options: {}".format(requirements,
command))
pip_execute(command)
def pip_install(package, fatal=False, **options):
"""Install a python package"""
command = ["install"]
available_options = ('proxy', 'src', 'log', "index-url", )
for option in parse_options(options, available_options):
command.append(option)
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Installing {} package with options: {}".format(package,
command))
pip_execute(command)
def pip_uninstall(package, **options):
"""Uninstall a python package"""
command = ["uninstall", "-q", "-y"]
available_options = ('proxy', 'log', )
for option in parse_options(options, available_options):
command.append(option)
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Uninstalling {} package with options: {}".format(package,
command))
pip_execute(command)
def pip_list():
"""Returns the list of current python installed packages
"""
return pip_execute(["list"])

View File

@ -0,0 +1,428 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import os
import shutil
import json
import time
from subprocess import (
check_call,
check_output,
CalledProcessError,
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
DEBUG,
INFO,
WARNING,
ERROR,
)
from charmhelpers.core.host import (
mount,
mounts,
service_start,
service_stop,
service_running,
umount,
)
from charmhelpers.fetch import (
apt_install,
)
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
KEYFILE = '/etc/ceph/ceph.client.{}.key'
CEPH_CONF = """[global]
auth supported = {auth}
keyring = {keyring}
mon host = {mon_hosts}
log to syslog = {use_syslog}
err to syslog = {use_syslog}
clog to syslog = {use_syslog}
"""
def install():
"""Basic Ceph client installation."""
ceph_dir = "/etc/ceph"
if not os.path.exists(ceph_dir):
os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
"""Check to see if a RADOS block device exists."""
try:
out = check_output(['rbd', 'list', '--id',
service, '--pool', pool]).decode('UTF-8')
except CalledProcessError:
return False
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
"""Create a new RADOS block device."""
cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
'--pool', pool]
check_call(cmd)
def pool_exists(service, name):
"""Check to see if a RADOS pool already exists."""
try:
out = check_output(['rados', '--id', service,
'lspools']).decode('UTF-8')
except CalledProcessError:
return False
return name in out
def get_osds(service):
"""Return a list of all Ceph Object Storage Daemons currently in the
cluster.
"""
version = ceph_version()
if version and version >= '0.56':
return json.loads(check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json']).decode('UTF-8'))
return None
def create_pool(service, name, replicas=3):
"""Create a new RADOS pool."""
if pool_exists(service, name):
log("Ceph pool {} already exists, skipping creation".format(name),
level=WARNING)
return
# Calculate the number of placement groups based
# on upstream recommended best practices.
osds = get_osds(service)
if osds:
pgnum = (len(osds) * 100 // replicas)
else:
# NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli
pgnum = 200
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
check_call(cmd)
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
str(replicas)]
check_call(cmd)
def delete_pool(service, name):
"""Delete a RADOS pool from ceph."""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
'--yes-i-really-really-mean-it']
check_call(cmd)
def _keyfile_path(service):
return KEYFILE.format(service)
def _keyring_path(service):
return KEYRING.format(service)
def create_keyring(service, key):
"""Create a new Ceph keyring containing key."""
keyring = _keyring_path(service)
if os.path.exists(keyring):
log('Ceph keyring exists at %s.' % keyring, level=WARNING)
return
cmd = ['ceph-authtool', keyring, '--create-keyring',
'--name=client.{}'.format(service), '--add-key={}'.format(key)]
check_call(cmd)
log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
def delete_keyring(service):
"""Delete an existing Ceph keyring."""
keyring = _keyring_path(service)
if not os.path.exists(keyring):
log('Keyring does not exist at %s' % keyring, level=WARNING)
return
os.remove(keyring)
log('Deleted ring at %s.' % keyring, level=INFO)
def create_key_file(service, key):
"""Create a file containing key."""
keyfile = _keyfile_path(service)
if os.path.exists(keyfile):
log('Keyfile exists at %s.' % keyfile, level=WARNING)
return
with open(keyfile, 'w') as fd:
fd.write(key)
log('Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
"""Query named relation 'ceph' to determine current nodes."""
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
def configure(service, key, auth, use_syslog):
"""Perform basic configuration of Ceph."""
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF.format(auth=auth,
keyring=_keyring_path(service),
mon_hosts=",".join(map(str, hosts)),
use_syslog=use_syslog))
modprobe('rbd')
def image_mapped(name):
"""Determine whether a RADOS block device is mapped locally."""
try:
out = check_output(['rbd', 'showmapped']).decode('UTF-8')
except CalledProcessError:
return False
return name in out
def map_block_storage(service, pool, image):
"""Map a RADOS block device for local use."""
cmd = [
'rbd',
'map',
'{}/{}'.format(pool, image),
'--user',
service,
'--secret',
_keyfile_path(service),
]
check_call(cmd)
def filesystem_mounted(fs):
"""Determine whether a filesytems is already mounted."""
return fs in [f for f, m in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10):
"""Make a new filesystem on the specified block device."""
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('Gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('Waiting for block device %s to appear' % blk_device,
level=DEBUG)
count += 1
time.sleep(1)
else:
log('Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_block_device(blk_device, data_src_dst):
"""Migrate data in data_src_dst to blk_device and then remount."""
# mount block device into /mnt
mount(blk_device, '/mnt')
# copy data to /mnt
copy_files(data_src_dst, '/mnt')
# umount block device
umount('/mnt')
# Grab user/group ID's from original source
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
# TODO: persist is currently a NO-OP in core.host
mount(blk_device, data_src_dst, persist=True)
# ensure original ownership of new mount.
os.chown(data_src_dst, uid, gid)
# TODO: re-use
def modprobe(module):
"""Load a kernel module and configure for auto-load on reboot."""
log('Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
with open('/etc/modules', 'r+') as modules:
if module not in modules.read():
modules.write(module)
def copy_files(src, dst, symlinks=False, ignore=None):
"""Copy files from src to dst."""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[],
replicas=3):
"""NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.
If formatting a device for the first time, data existing at mount_point
will be migrated to the RBD device before being re-mounted.
All services listed in system_services will be stopped prior to data
migration and restarted when complete.
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
log('Creating new pool {}.'.format(pool), level=INFO)
create_pool(service, pool, replicas=replicas)
if not rbd_exists(service, pool, rbd_img):
log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
level=INFO)
map_block_storage(service, pool, rbd_img)
# make file system
# TODO: What happens if for whatever reason this is run again and
# the data is already in the rbd device and/or is mounted??
# When it is mounted already, it will fail to make the fs
# XXX: This is really sketchy! Need to at least add an fstab entry
# otherwise this hook will blow away existing data if its executed
# after a reboot.
if not filesystem_mounted(mount_point):
make_filesystem(blk_device, fstype)
for svc in system_services:
if service_running(svc):
log('Stopping services {} prior to migrating data.'
.format(svc), level=DEBUG)
service_stop(svc)
place_data_on_block_device(blk_device, mount_point)
for svc in system_services:
log('Starting service {} after migrating data.'
.format(svc), level=DEBUG)
service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None):
"""Ensures a ceph keyring is created for a named service and optionally
ensures user and group ownership.
Returns False if no ceph key is available in relation state.
"""
key = None
for rid in relation_ids('ceph'):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
break
if not key:
return False
create_keyring(service=service, key=key)
keyring = _keyring_path(service)
if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring])
return True
def ceph_version():
"""Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd).decode('US-ASCII')
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None
class CephBrokerRq(object):
"""Ceph broker request.
Multiple operations can be added to a request and sent to the Ceph broker
to be executed.
Request is json-encoded for sending over the wire.
The API is versioned and defaults to version 1.
"""
def __init__(self, api_version=1):
self.api_version = api_version
self.ops = []
def add_op_create_pool(self, name, replica_count=3):
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count})
@property
def request(self):
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
class CephBrokerRsp(object):
"""Ceph broker response.
Response is json-decoded and contents provided as methods/properties.
The API is versioned and defaults to version 1.
"""
def __init__(self, encoded_rsp):
self.api_version = None
self.rsp = json.loads(encoded_rsp)
@property
def exit_code(self):
return self.rsp.get('exit-code')
@property
def exit_msg(self):
return self.rsp.get('stderr')

View File

@ -0,0 +1,62 @@
import os
import re
from subprocess import (
check_call,
check_output,
)
import six
##################################################
# loopback device helpers.
##################################################
def loopback_devices():
'''
Parse through 'losetup -a' output to determine currently mapped
loopback devices. Output is expected to look like:
/dev/loop0: [0807]:961814 (/tmp/my.img)
:returns: dict: a dict mapping {loopback_dev: backing_file}
'''
loopbacks = {}
cmd = ['losetup', '-a']
devs = [d.strip().split(' ') for d in
check_output(cmd).splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
return loopbacks
def create_loopback(file_path):
'''
Create a loopback device for a given backing file.
:returns: str: Full path to new loopback device (eg, /dev/loop0)
'''
file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path])
for d, f in six.iteritems(loopback_devices()):
if f == file_path:
return d
def ensure_loopback_device(path, size):
'''
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in six.iteritems(loopback_devices()):
if f == path:
return d
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd)
return create_loopback(path)

View File

@ -0,0 +1,89 @@
from subprocess import (
CalledProcessError,
check_call,
check_output,
Popen,
PIPE,
)
##################################################
# LVM helpers.
##################################################
def deactivate_lvm_volume_group(block_device):
'''
Deactivate any volume gruop associated with an LVM physical volume.
:param block_device: str: Full path to LVM physical volume
'''
vg = list_lvm_volume_group(block_device)
if vg:
cmd = ['vgchange', '-an', vg]
check_call(cmd)
def is_lvm_physical_volume(block_device):
'''
Determine whether a block device is initialized as an LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: boolean: True if block device is a PV, False if not.
'''
try:
check_output(['pvdisplay', block_device])
return True
except CalledProcessError:
return False
def remove_lvm_physical_volume(block_device):
'''
Remove LVM PV signatures from a given block device.
:param block_device: str: Full path of block device to scrub.
'''
p = Popen(['pvremove', '-ff', block_device],
stdin=PIPE)
p.communicate(input='y\n')
def list_lvm_volume_group(block_device):
'''
List LVM volume group associated with a given block device.
Assumes block device is a valid LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: str: Name of volume group associated with block device or None
'''
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
l = l.decode('UTF-8')
if l.strip().startswith('VG Name'):
vg = ' '.join(l.strip().split()[2:])
return vg
def create_lvm_physical_volume(block_device):
'''
Initialize a block device as an LVM physical volume.
:param block_device: str: Full path of block device to initialize.
'''
check_call(['pvcreate', block_device])
def create_lvm_volume_group(volume_group, block_device):
'''
Create an LVM volume group backed by a given block device.
Assumes block device has already been initialized as an LVM PV.
:param volume_group: str: Name of volume group to create.
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])

View File

@ -0,0 +1,41 @@
#
# Copyright 2014 Canonical Ltd.
#
# Authors:
# Edward Hope-Morley <opentastic@gmail.com>
#
import time
from charmhelpers.core.hookenv import (
log,
INFO,
)
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
"""If the decorated function raises exception exc_type, allow num_retries
retry attempts before raise the exception.
"""
def _retry_on_exception_inner_1(f):
def _retry_on_exception_inner_2(*args, **kwargs):
retries = num_retries
multiplier = 1
while True:
try:
return f(*args, **kwargs)
except exc_type:
if not retries:
raise
delay = base_delay * multiplier
multiplier += 1
log("Retrying '%s' %d more times (delay=%s)" %
(f.__name__, retries, delay), level=INFO)
retries -= 1
if delay:
time.sleep(delay)
return _retry_on_exception_inner_2
return _retry_on_exception_inner_1

View File

@ -68,6 +68,8 @@ def log(message, level=None):
command = ['juju-log']
if level:
command += ['-l', level]
if not isinstance(message, six.string_types):
message = repr(message)
command += [message]
subprocess.call(command)
@ -393,21 +395,31 @@ def relations_of_type(reltype=None):
return relation_data
@cached
def metadata():
"""Get the current charm metadata.yaml contents as a python object"""
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
return yaml.safe_load(md)
@cached
def relation_types():
"""Get a list of relation types supported by this charm"""
charmdir = os.environ.get('CHARM_DIR', '')
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
md = yaml.safe_load(mdf)
rel_types = []
md = metadata()
for key in ('provides', 'requires', 'peers'):
section = md.get(key)
if section:
rel_types.extend(section.keys())
mdf.close()
return rel_types
@cached
def charm_name():
"""Get the name of the current charm as is specified on metadata.yaml"""
return metadata().get('name')
@cached
def relations():
"""Get a nested dictionary of relation data for all related units"""

View File

@ -101,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
return user_info
def add_group(group_name, system_group=False):
"""Add a group to the system"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
except KeyError:
log('creating group {0}'.format(group_name))
cmd = ['addgroup']
if system_group:
cmd.append('--system')
else:
cmd.extend([
'--group',
])
cmd.append(group_name)
subprocess.check_call(cmd)
group_info = grp.getgrnam(group_name)
return group_info
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = [
@ -142,13 +162,16 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False):
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
if os.path.exists(realpath):
if force and not os.path.isdir(realpath):
path_exists = os.path.exists(realpath)
if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
else:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
elif not path_exists:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
os.chown(realpath, uid, gid)
def write_file(path, content, owner='root', group='root', perms=0o444):
@ -368,8 +391,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
'''
import apt_pkg
from charmhelpers.fetch import apt_cache
if not pkgcache:
from charmhelpers.fetch import apt_cache
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)

View File

@ -48,5 +48,5 @@ def render(source, target, context, owner='root', group='root',
level=hookenv.ERROR)
raise e
content = template.render(context)
host.mkdir(os.path.dirname(target))
host.mkdir(os.path.dirname(target), owner, group)
host.write_file(target, content, owner, group, perms)

View File

@ -64,9 +64,16 @@ CLOUD_ARCHIVE_POCKETS = {
'trusty-juno/updates': 'trusty-updates/juno',
'trusty-updates/juno': 'trusty-updates/juno',
'juno/proposed': 'trusty-proposed/juno',
'juno/proposed': 'trusty-proposed/juno',
'trusty-juno/proposed': 'trusty-proposed/juno',
'trusty-proposed/juno': 'trusty-proposed/juno',
# Kilo
'kilo': 'trusty-updates/kilo',
'trusty-kilo': 'trusty-updates/kilo',
'trusty-kilo/updates': 'trusty-updates/kilo',
'trusty-updates/kilo': 'trusty-updates/kilo',
'kilo/proposed': 'trusty-proposed/kilo',
'trusty-kilo/proposed': 'trusty-proposed/kilo',
'trusty-proposed/kilo': 'trusty-proposed/kilo',
}
# The order of this list is very important. Handlers should be listed in from

View File

@ -34,11 +34,14 @@ class GitUrlFetchHandler(BaseFetchHandler):
repo = Repo.clone_from(source, dest)
repo.git.checkout(branch)
def install(self, source, branch="master"):
def install(self, source, branch="master", dest=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
try:

View File

@ -41,6 +41,7 @@ from utils import (
enable_pocket,
is_apache_24,
CEPHRG_HA_RES,
register_configs,
)
from charmhelpers.payload.execd import execd_preinstall
@ -58,6 +59,7 @@ from charmhelpers.contrib.openstack.ip import (
)
hooks = Hooks()
CONFIGS = register_configs()
def install_www_scripts():
@ -86,6 +88,7 @@ def install_ceph_optimised_packages():
PACKAGES = [
'radosgw',
'ntp',
'haproxy',
]
APACHE_PACKAGES = [
@ -172,10 +175,12 @@ def apache_reload():
@hooks.hook('upgrade-charm',
'config-changed')
@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']})
@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw'],
'/etc/haproxy/haproxy.cfg': ['haproxy']})
def config_changed():
install_packages()
emit_cephconf()
CONFIGS.write_all()
if not config('use-embedded-webserver'):
emit_apacheconf()
install_www_scripts()
@ -236,6 +241,7 @@ def get_keystone_conf():
@hooks.hook('mon-relation-departed',
'mon-relation-changed')
@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']})
def mon_relation():
emit_cephconf()
key = relation_get('radosgw_key')
@ -289,7 +295,6 @@ def identity_joined(relid=None):
(canonical_url(INTERNAL), port)
public_url = '%s:%s/swift/v1' % \
(canonical_url(PUBLIC), port)
log('LY identity_joined relation_set public_url=%s relation_id=%s' % (public_url, str(relid)), level=ERROR)
relation_set(service='swift',
region=config('region'),
public_url=public_url, internal_url=internal_url,
@ -299,6 +304,7 @@ def identity_joined(relid=None):
@hooks.hook('identity-service-relation-changed')
@restart_on_change({'/etc/ceph/ceph.conf': ['radosgw']})
def identity_changed():
emit_cephconf()
restart()
@ -306,10 +312,10 @@ def identity_changed():
@hooks.hook('cluster-relation-changed',
'cluster-relation-joined')
@restart_on_change({'/etc/haproxy/haproxy.cfg': ['haproxy']})
def cluster_changed():
log('LY In cluster_changed triggering identity_joined', level=ERROR)
CONFIGS.write_all()
for r_id in relation_ids('identity-service'):
log('LY In cluster_changed triggering identity_joined for relid: ' + r_id, level=ERROR)
identity_joined(relid=r_id)

View File

@ -10,12 +10,27 @@
import socket
import re
import os
from copy import deepcopy
from collections import OrderedDict
from charmhelpers.core.hookenv import unit_get
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.openstack import context, templating
from charmhelpers.contrib.openstack.utils import os_release
import ceph_radosgw_context
CEPHRG_HA_RES = 'grp_cephrg_vips'
TEMPLATES_DIR = 'templates'
TEMPLATES = 'templates/'
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
BASE_RESOURCE_MAP = OrderedDict([
(HAPROXY_CONF, {
'contexts': [context.HAProxyContext(singlenode_mode=True),
ceph_radosgw_context.HAProxyContext()],
'services': ['haproxy'],
}),
])
try:
import jinja2
@ -30,6 +45,23 @@ except ImportError:
import dns.resolver
def resource_map():
'''
Dynamically generate a map of resources that will be managed for a single
hook execution.
'''
resource_map = deepcopy(BASE_RESOURCE_MAP)
return resource_map
def register_configs(release='icehouse'):
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release)
for cfg, rscs in resource_map().iteritems():
configs.register(cfg, rscs['contexts'])
return configs
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)

View File

@ -17,7 +17,7 @@ keyring = /etc/ceph/keyring.rados.gateway
rgw socket path = /tmp/radosgw.sock
log file = /var/log/ceph/radosgw.log
{% if embedded_webserver %}
rgw frontends = civetweb port=80
rgw frontends = civetweb port=70
{% else %}
# Turn off 100-continue optimization as stock mod_fastcgi
# does not support it

View File

@ -2,7 +2,7 @@
FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock
</IfModule>
<VirtualHost *:80>
<VirtualHost *:70>
ServerName {{ hostname }}
ServerAdmin ceph@ubuntu.com
DocumentRoot /var/www

View File

@ -0,0 +1,22 @@
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
import subprocess
import sys
try:
import six # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # flake8: noqa
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa