Merged upstream changes

This commit is contained in:
Jacek Nykis 2014-03-31 16:24:56 +01:00
commit 1434f04c40
22 changed files with 345 additions and 82 deletions

View File

@ -26,6 +26,12 @@ options:
default: openstack
type: string
decsription: Rabbitmq vhost
use-syslog:
type: boolean
default: False
description: |
By default, all services will log into their corresponding log files.
Setting this to True will force all services to log to the syslog.
database-user:
default: nova
type: string

View File

@ -126,17 +126,17 @@ def determine_api_port(public_port):
return public_port - (i * 10)
def determine_haproxy_port(public_port):
def determine_apache_port(public_port):
'''
Description: Determine correct proxy listening port based on public IP +
existence of HTTPS reverse proxy.
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if https():
if len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10)

View File

@ -23,15 +23,13 @@ from charmhelpers.core.hookenv import (
unit_get,
unit_private_ip,
ERROR,
WARNING,
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
determine_haproxy_port,
https,
is_clustered,
peer_units,
is_clustered
)
from charmhelpers.contrib.hahelpers.apache import (
@ -68,6 +66,43 @@ def context_complete(ctxt):
return True
def config_flags_parser(config_flags):
if config_flags.find('==') >= 0:
log("config_flags is not in expected format (key=value)",
level=ERROR)
raise OSContextError
# strip the following from each value.
post_strippers = ' ,'
# we strip any leading/trailing '=' or ' ' from the string then
# split on '='.
split = config_flags.strip(' =').split('=')
limit = len(split)
flags = {}
for i in xrange(0, limit - 1):
current = split[i]
next = split[i + 1]
vindex = next.rfind(',')
if (i == limit - 2) or (vindex < 0):
value = next
else:
value = next[:vindex]
if i == 0:
key = current
else:
# if this not the first entry, expect an embedded key.
index = current.rfind(',')
if index < 0:
log("invalid config value(s) at index %s" % (i),
level=ERROR)
raise OSContextError
key = current[index + 1:]
# Add to collection.
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
return flags
class OSContextGenerator(object):
interfaces = []
@ -164,6 +199,7 @@ class AMQPContext(OSContextGenerator):
ctxt = {}
for rid in relation_ids('amqp'):
ha_vip_only = False
for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit):
ctxt['clustered'] = True
@ -178,14 +214,23 @@ class AMQPContext(OSContextGenerator):
unit=unit),
'rabbitmq_virtual_host': vhost,
})
if relation_get('ha_queues', rid=rid, unit=unit) is not None:
ctxt['rabbitmq_ha_queues'] = True
ha_vip_only = relation_get('ha-vip-only',
rid=rid, unit=unit) is not None
if context_complete(ctxt):
# Sufficient information found = break out!
break
# Used for active/active rabbitmq >= grizzly
ctxt['rabbitmq_hosts'] = []
for unit in related_units(rid):
ctxt['rabbitmq_hosts'].append(relation_get('private-address',
rid=rid, unit=unit))
if ('clustered' not in ctxt or ha_vip_only) \
and len(related_units(rid)) > 1:
rabbitmq_hosts = []
for unit in related_units(rid):
rabbitmq_hosts.append(relation_get('private-address',
rid=rid, unit=unit))
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
if not context_complete(ctxt):
return {}
else:
@ -199,10 +244,13 @@ class CephContext(OSContextGenerator):
'''This generates context for /etc/ceph/ceph.conf templates'''
if not relation_ids('ceph'):
return {}
log('Generating template context for ceph')
mon_hosts = []
auth = None
key = None
use_syslog = str(config('use-syslog')).lower()
for rid in relation_ids('ceph'):
for unit in related_units(rid):
mon_hosts.append(relation_get('private-address', rid=rid,
@ -214,6 +262,7 @@ class CephContext(OSContextGenerator):
'mon_hosts': ' '.join(mon_hosts),
'auth': auth,
'key': key,
'use_syslog': use_syslog
}
if not os.path.isdir('/etc/ceph'):
@ -286,6 +335,7 @@ class ImageServiceContext(OSContextGenerator):
class ApacheSSLContext(OSContextGenerator):
"""
Generates a context for an apache vhost configuration that configures
HTTPS reverse proxying for one or many endpoints. Generated context
@ -341,17 +391,15 @@ class ApacheSSLContext(OSContextGenerator):
'private_address': unit_get('private-address'),
'endpoints': []
}
for ext_port in self.external_ports:
if peer_units() or is_clustered():
int_port = determine_haproxy_port(ext_port)
else:
int_port = determine_api_port(ext_port)
for api_port in self.external_ports:
ext_port = determine_apache_port(api_port)
int_port = determine_api_port(api_port)
portmap = (int(ext_port), int(int_port))
ctxt['endpoints'].append(portmap)
return ctxt
class NeutronContext(object):
class NeutronContext(OSContextGenerator):
interfaces = []
@property
@ -412,6 +460,22 @@ class NeutronContext(object):
return nvp_ctxt
def neutron_ctxt(self):
if https():
proto = 'https'
else:
proto = 'http'
if is_clustered():
host = config('vip')
else:
host = unit_get('private-address')
url = '%s://%s:%s' % (proto, host, '9696')
ctxt = {
'network_manager': self.network_manager,
'neutron_url': url,
}
return ctxt
def __call__(self):
self._ensure_packages()
@ -421,40 +485,44 @@ class NeutronContext(object):
if not self.plugin:
return {}
ctxt = {'network_manager': self.network_manager}
ctxt = self.neutron_ctxt()
if self.plugin == 'ovs':
ctxt.update(self.ovs_ctxt())
elif self.plugin == 'nvp':
ctxt.update(self.nvp_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:
flags = config_flags_parser(alchemy_flags)
ctxt['neutron_alchemy_flags'] = flags
self._save_flag_file()
return ctxt
class OSConfigFlagContext(OSContextGenerator):
'''
Responsible adding user-defined config-flags in charm config to a
to a template context.
'''
"""
Responsible for adding user-defined config-flags in charm config to a
template context.
NOTE: the value of config-flags may be a comma-separated list of
key=value pairs and some Openstack config files support
comma-separated lists as values.
"""
def __call__(self):
config_flags = config('config-flags')
if not config_flags or config_flags in ['None', '']:
if not config_flags:
return {}
config_flags = config_flags.split(',')
flags = {}
for flag in config_flags:
if '=' not in flag:
log('Improperly formatted config-flag, expected k=v '
'got %s' % flag, level=WARNING)
continue
k, v = flag.split('=')
flags[k.strip()] = v
ctxt = {'user_config_flags': flags}
return ctxt
flags = config_flags_parser(config_flags)
return {'user_config_flags': flags}
class SubordinateConfigContext(OSContextGenerator):
"""
Responsible for inspecting relations to subordinates that
may be exporting required config via a json blob.
@ -495,6 +563,7 @@ class SubordinateConfigContext(OSContextGenerator):
}
"""
def __init__(self, service, config_file, interface):
"""
:param service : Service name key to query in any subordinate
@ -539,3 +608,12 @@ class SubordinateConfigContext(OSContextGenerator):
ctxt['sections'] = {}
return ctxt
class SyslogContext(OSContextGenerator):
def __call__(self):
ctxt = {
'use_syslog': config('use-syslog')
}
return ctxt

View File

@ -18,6 +18,22 @@ def headers_package():
return 'linux-headers-%s' % kver
def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).strip()
kver = kver.split('.')
return (int(kver[0]), int(kver[1]))
def determine_dkms_package():
""" Determine which DKMS package should be used based on kernel version """
# NOTE: 3.13 kernels have support for GRE and VXLAN native
if kernel_version() >= (3, 13):
return []
else:
return ['openvswitch-datapath-dkms']
# legacy
def quantum_plugins():
from charmhelpers.contrib.openstack import context
@ -32,7 +48,7 @@ def quantum_plugins():
database=config('neutron-database'),
relation_prefix='neutron')],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
'packages': [[headers_package()] + determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
@ -57,7 +73,8 @@ def quantum_plugins():
def neutron_plugins():
from charmhelpers.contrib.openstack import context
return {
release = os_release('nova-common')
plugins = {
'ovs': {
'config': '/etc/neutron/plugins/openvswitch/'
'ovs_neutron_plugin.ini',
@ -68,8 +85,8 @@ def neutron_plugins():
database=config('neutron-database'),
relation_prefix='neutron')],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
['quantum-plugin-openvswitch-agent']],
'packages': [[headers_package()] + determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
@ -89,6 +106,13 @@ def neutron_plugins():
'server_services': ['neutron-server']
}
}
# NOTE: patch in ml2 plugin for icehouse onwards
if release >= 'icehouse':
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['ovs']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
return plugins
def neutron_plugin_attribute(plugin, attr, net_manager=None):

View File

@ -3,9 +3,13 @@
# cinder configuration file maintained by Juju
# local changes may be overwritten.
###############################################################################
{% if auth -%}
[global]
{% if auth -%}
auth_supported = {{ auth }}
keyring = /etc/ceph/$cluster.$name.keyring
mon host = {{ mon_hosts }}
{% endif -%}
log to syslog = {{ use_syslog }}
err to syslog = {{ use_syslog }}
clog to syslog = {{ use_syslog }}

View File

@ -8,8 +8,8 @@ global
defaults
log global
mode http
option httplog
mode tcp
option tcplog
option dontlognull
retries 3
timeout queue 1000
@ -29,7 +29,6 @@ listen stats :8888
{% for service, ports in service_ports.iteritems() -%}
listen {{ service }} 0.0.0.0:{{ ports[0] }}
balance roundrobin
option tcplog
{% for unit, address in units.iteritems() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}

View File

@ -41,6 +41,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse')
])
@ -64,6 +65,9 @@ SWIFT_CODENAMES = OrderedDict([
('1.10.0', 'havana'),
('1.9.1', 'havana'),
('1.9.0', 'havana'),
('1.13.0', 'icehouse'),
('1.12.0', 'icehouse'),
('1.11.0', 'icehouse'),
])
DEFAULT_LOOPBACK_SIZE = '5G'
@ -201,7 +205,7 @@ def os_release(package, base='essex'):
def import_key(keyid):
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
"--recv-keys %s" % keyid
try:
subprocess.check_call(cmd.split(' '))
@ -260,6 +264,9 @@ def configure_installation_source(rel):
'havana': 'precise-updates/havana',
'havana/updates': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
}
try:
@ -411,26 +418,30 @@ def get_host_ip(hostname):
return ns_query(hostname)
def get_hostname(address):
def get_hostname(address, fqdn=True):
"""
Resolves hostname for given IP, or returns the input
if it is already a hostname.
"""
if not is_ip(address):
return address
if is_ip(address):
try:
import dns.reversename
except ImportError:
apt_install('python-dnspython')
import dns.reversename
try:
import dns.reversename
except ImportError:
apt_install('python-dnspython')
import dns.reversename
rev = dns.reversename.from_address(address)
result = ns_query(rev)
if not result:
return None
else:
result = address
rev = dns.reversename.from_address(address)
result = ns_query(rev)
if not result:
return None
# strip trailing .
if result.endswith('.'):
return result[:-1]
return result
if fqdn:
# strip trailing .
if result.endswith('.'):
return result[:-1]
else:
return result
else:
return result.split('.')[0]

View File

@ -49,6 +49,9 @@ CEPH_CONF = """[global]
auth supported = {auth}
keyring = {keyring}
mon host = {mon_hosts}
log to syslog = {use_syslog}
err to syslog = {use_syslog}
clog to syslog = {use_syslog}
"""
@ -194,7 +197,7 @@ def get_ceph_nodes():
return hosts
def configure(service, key, auth):
def configure(service, key, auth, use_syslog):
''' Perform basic configuration of Ceph '''
create_keyring(service, key)
create_key_file(service, key)
@ -202,7 +205,8 @@ def configure(service, key, auth):
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF.format(auth=auth,
keyring=_keyring_path(service),
mon_hosts=",".join(map(str, hosts))))
mon_hosts=",".join(map(str, hosts)),
use_syslog=use_syslog))
modprobe('rbd')

View File

@ -22,4 +22,5 @@ def zap_disk(block_device):
:param block_device: str: Full path of block device to clean.
'''
check_call(['sgdisk', '--zap-all', block_device])
check_call(['sgdisk', '--zap-all', '--clear',
'--mbrtogpt', block_device])

View File

@ -8,6 +8,7 @@ import os
import json
import yaml
import subprocess
import sys
import UserDict
from subprocess import CalledProcessError
@ -149,6 +150,11 @@ def service_name():
return local_unit().split('/')[0]
def hook_name():
"""The name of the currently executing hook"""
return os.path.basename(sys.argv[0])
@cached
def config(scope=None):
"""Juju charm configuration"""

View File

@ -194,7 +194,7 @@ def file_hash(path):
return None
def restart_on_change(restart_map):
def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing
This function is used a decorator, for example
@ -219,8 +219,14 @@ def restart_on_change(restart_map):
for path in restart_map:
if checksums[path] != file_hash(path):
restarts += restart_map[path]
for service_name in list(OrderedDict.fromkeys(restarts)):
service('restart', service_name)
services_list = list(OrderedDict.fromkeys(restarts))
if not stopstart:
for service_name in services_list:
service('restart', service_name)
else:
for action in ['stop', 'start']:
for service_name in services_list:
service(action, service_name)
return wrapped_f
return wrap
@ -245,3 +251,47 @@ def pwgen(length=None):
random_chars = [
random.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
def list_nics(nic_type):
'''Return a list of nics of given type(s)'''
if isinstance(nic_type, basestring):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
interfaces.append(line.split()[1].replace(":", ""))
return interfaces
def set_nic_mtu(nic, mtu):
'''Set MTU on a network interface'''
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
subprocess.check_call(cmd)
def get_nic_mtu(nic):
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).split('\n')
mtu = ""
for line in ip_output:
words = line.split()
if 'mtu' in words:
mtu = words[words.index("mtu") + 1]
return mtu
def get_nic_hwaddr(nic):
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd)
hwaddr = ""
words = ip_output.split()
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr

View File

@ -13,6 +13,7 @@ from charmhelpers.core.hookenv import (
log,
)
import apt_pkg
import os
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
@ -43,8 +44,16 @@ CLOUD_ARCHIVE_POCKETS = {
'precise-havana/updates': 'precise-updates/havana',
'precise-updates/havana': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'precies-havana/proposed': 'precise-proposed/havana',
'precise-havana/proposed': 'precise-proposed/havana',
'precise-proposed/havana': 'precise-proposed/havana',
# Icehouse
'icehouse': 'precise-updates/icehouse',
'precise-icehouse': 'precise-updates/icehouse',
'precise-icehouse/updates': 'precise-updates/icehouse',
'precise-updates/icehouse': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'precise-icehouse/proposed': 'precise-proposed/icehouse',
'precise-proposed/icehouse': 'precise-proposed/icehouse',
}
@ -66,8 +75,10 @@ def filter_installed_packages(packages):
def apt_install(packages, options=None, fatal=False):
"""Install one or more packages"""
options = options or []
cmd = ['apt-get', '-y']
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, basestring):
@ -76,10 +87,37 @@ def apt_install(packages, options=None, fatal=False):
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
subprocess.check_call(cmd)
subprocess.check_call(cmd, env=env)
else:
subprocess.call(cmd)
subprocess.call(cmd, env=env)
def apt_upgrade(options=None, fatal=False, dist=False):
"""Upgrade all packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
if dist:
cmd.append('dist-upgrade')
else:
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
subprocess.check_call(cmd, env=env)
else:
subprocess.call(cmd, env=env)
def apt_update(fatal=False):
@ -93,7 +131,7 @@ def apt_update(fatal=False):
def apt_purge(packages, fatal=False):
"""Purge one or more packages"""
cmd = ['apt-get', '-y', 'purge']
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, basestring):
cmd.append(packages)
else:
@ -120,17 +158,23 @@ def apt_hold(packages, fatal=False):
def add_source(source, key=None):
if source is None:
log('Source is not present. Skipping')
return
if (source.startswith('ppa:') or
source.startswith('http:') or
source.startswith('http') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
source.startswith('cloud-archive:')):
subprocess.check_call(['add-apt-repository', '--yes', source])
elif source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True)
pocket = source.split(':')[-1]
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError('Unsupported cloud: source option %s' % pocket)
raise SourceConfigError(
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
@ -139,7 +183,9 @@ def add_source(source, key=None):
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
if key:
subprocess.check_call(['apt-key', 'import', key])
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'keyserver.ubuntu.com', '--recv',
key])
class SourceConfigError(Exception):
@ -220,7 +266,9 @@ def install_from_config(config_var_name):
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
@ -248,10 +296,13 @@ def plugins(fetch_handlers=None):
for handler_name in fetch_handlers:
package, classname = handler_name.rsplit('.', 1)
try:
handler_class = getattr(importlib.import_module(package), classname)
handler_class = getattr(
importlib.import_module(package),
classname)
plugin_list.append(handler_class())
except (ImportError, AttributeError):
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(handler_name))
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list

View File

@ -1,5 +1,7 @@
import os
import urllib2
import urlparse
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
@ -24,6 +26,19 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
def download(self, source, dest):
# propogate all exceptions
# URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
if proto in ('http', 'https'):
auth, barehost = urllib2.splituser(netloc)
if auth is not None:
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
username, password = urllib2.splitpasswd(auth)
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
# Realm is set to None in add_password to force the username and password
# to be used whatever the realm
passman.add_password(None, source, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
response = urllib2.urlopen(source)
try:
with open(dest, 'w') as dest_file:

View File

@ -70,6 +70,7 @@ BASE_RESOURCE_MAP = {
CloudComputeContext(),
NovaComputeLibvirtContext(),
NovaComputeCephContext(),
context.SyslogContext(),
context.SubordinateConfigContext(
interface='nova-ceilometer',
service='nova',
@ -94,7 +95,9 @@ QUANTUM_CONF = '/etc/quantum/quantum.conf'
QUANTUM_RESOURCES = {
QUANTUM_CONF: {
'services': [],
'contexts': [context.AMQPContext(), NeutronComputeContext()],
'contexts': [context.AMQPContext(),
NeutronComputeContext(),
context.SyslogContext()],
}
}
@ -103,7 +106,9 @@ NEUTRON_CONF = '/etc/neutron/neutron.conf'
NEUTRON_RESOURCES = {
NEUTRON_CONF: {
'services': [],
'contexts': [context.AMQPContext(), NeutronComputeContext()],
'contexts': [context.AMQPContext(),
NeutronComputeContext(),
context.SyslogContext()],
}
}

View File

@ -1 +1 @@
130
131

View File

@ -16,6 +16,7 @@
--connection_type=libvirt
--root_helper=sudo nova-rootwrap
--verbose
--use_syslog={{ use_syslog }}
--ec2_private_dns_show_ip
{% if database_host -%}
--sql_connection=mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}

View File

@ -18,6 +18,7 @@ libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
use_syslog = {{ use_syslog }}
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes

View File

@ -3,6 +3,9 @@
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
use_syslog = {{ use_syslog }}
[OVS]
tunnel_id_ranges = 1:1000
tenant_network_type = gre

View File

@ -11,6 +11,7 @@ bind_port = 9696
{% if core_plugin -%}
core_plugin = {{ core_plugin }}
{% endif -%}
use_syslog = {{ use_syslog }}
api_paste_config = /etc/quantum/api-paste.ini
auth_strategy = keystone
notification_driver = quantum.openstack.common.notifier.rpc_notifier

View File

@ -18,6 +18,7 @@ libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
use_syslog = {{ use_syslog }}
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes

View File

@ -16,6 +16,7 @@ core_plugin = {{ core_plugin }}
api_paste_config = /etc/neutron/api-paste.ini
auth_strategy = keystone
notification_driver = neutron.openstack.common.notifier.rpc_notifier
use_syslog = {{ use_syslog }}
default_notification_level = INFO
notification_topics = notifications

View File

@ -18,6 +18,7 @@ libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
use_syslog = {{ use_syslog }}
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes