Finish up: new templates, ssh key creation, Makefile.

This commit is contained in:
Adam Gandelman 2013-08-01 16:21:58 -07:00
parent 3088213f27
commit 9446e951af
21 changed files with 555 additions and 55 deletions

6
.coveragerc Normal file
View File

@ -0,0 +1,6 @@
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
if __name__ == .__main__.:
include=
hooks/nova_*

14
Makefile Normal file
View File

@ -0,0 +1,14 @@
#!/usr/bin/make
PYTHON := /usr/bin/env python
lint:
@flake8 --exclude hooks/charmhelpers hooks
@flake8 --exclude hooks/charmhelpers tests
@charm proof
test:
@echo Starting tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage tests
sync:
@charm-helper-sync -c charm-helpers-sync.yaml

View File

@ -153,7 +153,7 @@ class CephContext(OSContextGenerator):
def __call__(self):
'''This generates context for /etc/ceph/ceph.conf templates'''
log('Generating tmeplate context for ceph')
log('Generating template context for ceph')
mon_hosts = []
auth = None
for rid in relation_ids('ceph'):

View File

@ -12,11 +12,13 @@ from charmhelpers.contrib.hahelpers.ceph import (
)
# This was pulled from cinder redux. It should go somewhere common, charmhelpers.hahelpers.ceph?
# This was pulled from cinder redux. It should go somewhere common,
# charmhelpers.hahelpers.ceph?
def ensure_ceph_keyring(service):
'''Ensures a ceph keyring exists. Returns True if so, False otherwise'''
# TODO: This can be shared between cinder + glance, find a home for it.
# TODO: This can be shared between nova + glance + cinder, find a home for
# it.
key = None
for rid in relation_ids('ceph'):
for unit in related_units(rid):
@ -27,5 +29,5 @@ def ensure_ceph_keyring(service):
return False
ceph_create_keyring(service=service, key=key)
keyring = ceph_keyring_path(service)
subprocess.check_call(['chown', 'cinder.cinder', keyring])
subprocess.check_call(['chown', 'nova.nova', keyring])
return True

View File

@ -7,6 +7,7 @@ from charmhelpers.core.hookenv import (
log,
relation_get,
relation_ids,
service_name,
unit_private_ip,
ERROR,
WARNING,
@ -37,19 +38,27 @@ class NovaComputeLibvirtContext(context.OSContextGenerator):
interfaces = []
def __call__(self):
# distro defaults
ctxt = {
# /etc/default/libvirt-bin
'libvirtd_opts': '-d',
# /etc/libvirt/libvirtd.conf (
'listen_tls': 1,
}
# enable tcp listening if configured for live migration.
if config('enable-live-migration'):
opts = '-d -l'
else:
opts = '-d'
return {
'libvirtd_opts': opts,
}
ctxt['libvirtd_opts'] += ' -l'
if config('migration-auth-type') in ['none', 'None', 'ssh']:
ctxt['listen_tls'] = 0
return ctxt
class NovaComputeVirtContext(context.OSContextGenerator):
interfaces = []
def __call__(self):
return {}
@ -59,9 +68,17 @@ class NovaComputeCephContext(context.CephContext):
ctxt = super(NovaComputeCephContext, self).__call__()
if not ctxt:
return {}
svc = service_name()
# secret.xml
ctxt['ceph_secret_uuid'] = CEPH_SECRET_UUID
# nova.conf
ctxt['service_name'] = svc
ctxt['rbd_user'] = svc
ctxt['rbd_secret_uuid'] = CEPH_SECRET_UUID
ctxt['rbd_pool'] = 'nova'
return ctxt
class CloudComputeContext(context.OSContextGenerator):
'''
Generates main context for writing nova.conf and quantum.conf templates
@ -105,7 +122,7 @@ class CloudComputeContext(context.OSContextGenerator):
'quantum_security_groups': relation_get('quantum_security_groups'),
'quantum_plugin': relation_get('quantum_plugin'),
}
missing = [k for k, v in quantum_ctxt.iteritems() if v == None]
missing = [k for k, v in quantum_ctxt.iteritems() if v is None]
if missing:
log('Missing required relation settings for Quantum: ' +
' '.join(missing))
@ -133,7 +150,6 @@ class CloudComputeContext(context.OSContextGenerator):
raise
return vol_ctxt
def __call__(self):
rids = relation_ids('cloud-compute')
if not rids:
@ -182,6 +198,7 @@ class OSConfigFlagContext(context.OSContextGenerator):
ctxt = {'user_config_flags': flags}
return ctxt
class QuantumPluginContext(context.OSContextGenerator):
interfaces = []
@ -194,8 +211,8 @@ class QuantumPluginContext(context.OSContextGenerator):
def ovs_context(self):
q_driver = 'quantum.plugins.openvswitch.ovs_quantum_plugin.'\
'OVSQuantumPluginV2'
q_fw_driver = 'quantum.agent.linux.iptables_firewall.'\
'OVSHybridIptablesFirewallDriver'
q_fw_driver = 'quantum.agent.linux.iptables_firewall.'\
'OVSHybridIptablesFirewallDriver'
if get_os_codename_package('nova-common') in ['essex', 'folsom']:
n_driver = 'nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver'
@ -243,5 +260,4 @@ class QuantumPluginContext(context.OSContextGenerator):
_save_flag_file(path='/etc/nova/quantum_plugin.conf', data=plugin)
return ctxt

View File

@ -143,7 +143,7 @@ def compute_changed():
def ceph_joined():
if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph')
apt_install(filter_installed_packages('ceph-common'))
apt_install(filter_installed_packages(['ceph-common']))
@hooks.hook('ceph-relation-changed')

View File

@ -3,7 +3,7 @@ import pwd
from base64 import b64decode
from copy import deepcopy
from subprocess import check_call
from subprocess import check_call, check_output
from charmhelpers.core.hookenv import (
config,
@ -27,7 +27,7 @@ from nova_compute_context import (
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
TEMPLATES='templates/'
TEMPLATES = 'templates/'
BASE_PACKAGES = [
'nova-compute',
@ -39,6 +39,10 @@ BASE_RESOURCE_MAP = {
'services': ['libvirt-bin'],
'contexts': [],
},
'/etc/libvirt/libvirtd.conf': {
'services': ['libvirt-bin'],
'contexts': [NovaComputeLibvirtContext()],
},
'/etc/default/libvirt-bin': {
'services': ['libvirt-bin'],
'contexts': [NovaComputeLibvirtContext()],
@ -129,6 +133,7 @@ def resource_map():
return resource_map
def restart_map():
'''
Constructs a restart map based on charm config settings and relation
@ -136,6 +141,7 @@ def restart_map():
'''
return {k: v['services'] for k, v in resource_map().iteritems()}
def register_configs():
'''
Returns an OSTemplateRenderer object with all required configs registered.
@ -224,6 +230,7 @@ def quantum_attribute(plugin, attr):
except KeyError:
return None
def public_ssh_key(user='root'):
home = pwd.getpwnam(user).pw_dir
try:
@ -233,8 +240,27 @@ def public_ssh_key(user='root'):
return None
def initialize_ssh_keys():
pass
def initialize_ssh_keys(user='root'):
home_dir = pwd.getpwnam(user).pw_dir
ssh_dir = os.path.join(home_dir, '.ssh')
if not os.path.isdir(ssh_dir):
os.mkdir(ssh_dir)
priv_key = os.path.join(ssh_dir, 'id_rsa')
if not os.path.isfile(priv_key):
log('Generating new ssh key for user %s.' % user)
cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
'-f', priv_key]
check_output(cmd)
pub_key = '%s.pub' % priv_key
if not os.path.isfile(pub_key):
log('Generating missing ssh public key @ %s.' % pub_key)
cmd = ['ssh-keygen', '-y', '-f', priv_key]
p = check_output(cmd).strip()
with open(pub_key, 'wb') as out:
out.write(p)
check_output(['chown', '-R', user, ssh_dir])
def import_authorized_keys(user='root'):
@ -244,17 +270,20 @@ def import_authorized_keys(user='root'):
# XXX: Should this be managed via templates + contexts?
hosts = relation_get('known_hosts')
auth_keys = relation_get('authorized_keys')
if None in [hosts, auth_keys]:
# XXX: Need to fix charm-helpers to return None for empty settings,
# in all cases.
if not hosts or not auth_keys:
return
dest = os.path.join(pwd.getpwnam(user).pw_dir, '.ssh')
log('Saving new known_hosts and authorized_keys file to: %s.' % dest)
with open(os.path.join(dest, 'authorized_keys')) as _keys:
with open(os.path.join(dest, 'authorized_keys'), 'wb') as _keys:
_keys.write(b64decode(auth_keys))
with open(os.path.join(dest, 'known_hosts')) as _hosts:
with open(os.path.join(dest, 'known_hosts'), 'wb') as _hosts:
_hosts.write(b64decode(hosts))
def configure_live_migration(configs=None):
"""
Ensure libvirt live migration is properly configured or disabled,

1
hooks/start Symbolic link
View File

@ -0,0 +1 @@
nova_compute_relations.py

1
hooks/stop Symbolic link
View File

@ -0,0 +1 @@
nova_compute_relations.py

View File

@ -5,6 +5,8 @@ description: |
OpenStack Compute, codenamed Nova, is a cloud computing fabric controller. In
addition to its "native" API (the OpenStack API), it also supports the Amazon
EC2 API.
categories:
- openstack
provides:
cloud-compute:
interface: nova-compute

View File

@ -1 +1 @@
105
110

View File

@ -1,3 +1,7 @@
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
--dhcpbridge_flagfile=/etc/nova/nova.conf
--dhcpbridge=/usr/bin/nova-dhcpbridge
--logdir=/var/log/nova

View File

@ -1,8 +1,7 @@
###############################################################################
###############################################################################
# [ WARNING ]
# cinder configuration file maintained by Juju
# local changes may be overwritten.
###############################################################################
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge

View File

@ -1,3 +1,8 @@
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
# Defaults for libvirt-bin initscript (/etc/init.d/libvirt-bin)
# This is a POSIX shell fragment

400
templates/libvirtd.conf Normal file
View File

@ -0,0 +1,400 @@
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
# Master libvirt daemon configuration file
#
# For further information consult http://libvirt.org/format.html
#
# NOTE: the tests/daemon-conf regression test script requires
# that each "PARAMETER = VALUE" line in this file have the parameter
# name just after a leading "#".
#################################################################
#
# Network connectivity controls
#
# Flag listening for secure TLS connections on the public TCP/IP port.
# NB, must pass the --listen flag to the libvirtd process for this to
# have any effect.
#
# It is necessary to setup a CA and issue server certificates before
# using this capability.
#
# This is enabled by default, uncomment this to disable it
listen_tls = {{ listen_tls }}
# Listen for unencrypted TCP connections on the public TCP/IP port.
# NB, must pass the --listen flag to the libvirtd process for this to
# have any effect.
#
# Using the TCP socket requires SASL authentication by default. Only
# SASL mechanisms which support data encryption are allowed. This is
# DIGEST_MD5 and GSSAPI (Kerberos5)
#
# This is disabled by default, uncomment this to enable it.
#listen_tcp = 1
# Override the port for accepting secure TLS connections
# This can be a port number, or service name
#
#tls_port = "16514"
# Override the port for accepting insecure TCP connections
# This can be a port number, or service name
#
#tcp_port = "16509"
# Override the default configuration which binds to all network
# interfaces. This can be a numeric IPv4/6 address, or hostname
#
#listen_addr = "192.168.0.1"
# Flag toggling mDNS advertizement of the libvirt service.
#
# Alternatively can disable for all services on a host by
# stopping the Avahi daemon
#
# This is disabled by default, uncomment this to enable it
#mdns_adv = 1
# Override the default mDNS advertizement name. This must be
# unique on the immediate broadcast network.
#
# The default is "Virtualization Host HOSTNAME", where HOSTNAME
# is subsituted for the short hostname of the machine (without domain)
#
#mdns_name = "Virtualization Host Joe Demo"
#################################################################
#
# UNIX socket access controls
#
# Set the UNIX domain socket group ownership. This can be used to
# allow a 'trusted' set of users access to management capabilities
# without becoming root.
#
# This is restricted to 'root' by default.
unix_sock_group = "libvirtd"
# Set the UNIX socket permissions for the R/O socket. This is used
# for monitoring VM status only
#
# Default allows any user. If setting group ownership may want to
# restrict this to:
#unix_sock_ro_perms = "0777"
# Set the UNIX socket permissions for the R/W socket. This is used
# for full management of VMs
#
# Default allows only root. If PolicyKit is enabled on the socket,
# the default will change to allow everyone (eg, 0777)
#
# If not using PolicyKit and setting group ownership for access
# control then you may want to relax this to:
unix_sock_rw_perms = "0770"
# Set the name of the directory in which sockets will be found/created.
#unix_sock_dir = "/var/run/libvirt"
#################################################################
#
# Authentication.
#
# - none: do not perform auth checks. If you can connect to the
# socket you are allowed. This is suitable if there are
# restrictions on connecting to the socket (eg, UNIX
# socket permissions), or if there is a lower layer in
# the network providing auth (eg, TLS/x509 certificates)
#
# - sasl: use SASL infrastructure. The actual auth scheme is then
# controlled from /etc/sasl2/libvirt.conf. For the TCP
# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
# For non-TCP or TLS sockets, any scheme is allowed.
#
# - polkit: use PolicyKit to authenticate. This is only suitable
# for use on the UNIX sockets. The default policy will
# require a user to supply their own password to gain
# full read/write access (aka sudo like), while anyone
# is allowed read/only access.
#
# Set an authentication scheme for UNIX read-only sockets
# By default socket permissions allow anyone to connect
#
# To restrict monitoring of domains you may wish to enable
# an authentication mechanism here
auth_unix_ro = "none"
# Set an authentication scheme for UNIX read-write sockets
# By default socket permissions only allow root. If PolicyKit
# support was compiled into libvirt, the default will be to
# use 'polkit' auth.
#
# If the unix_sock_rw_perms are changed you may wish to enable
# an authentication mechanism here
auth_unix_rw = "none"
# Change the authentication scheme for TCP sockets.
#
# If you don't enable SASL, then all TCP traffic is cleartext.
# Don't do this outside of a dev/test scenario. For real world
# use, always enable SASL and use the GSSAPI or DIGEST-MD5
# mechanism in /etc/sasl2/libvirt.conf
#auth_tcp = "sasl"
# Change the authentication scheme for TLS sockets.
#
# TLS sockets already have encryption provided by the TLS
# layer, and limited authentication is done by certificates
#
# It is possible to make use of any SASL authentication
# mechanism as well, by using 'sasl' for this option
#auth_tls = "none"
#################################################################
#
# TLS x509 certificate configuration
#
# Override the default server key file path
#
#key_file = "/etc/pki/libvirt/private/serverkey.pem"
# Override the default server certificate file path
#
#cert_file = "/etc/pki/libvirt/servercert.pem"
# Override the default CA certificate path
#
#ca_file = "/etc/pki/CA/cacert.pem"
# Specify a certificate revocation list.
#
# Defaults to not using a CRL, uncomment to enable it
#crl_file = "/etc/pki/CA/crl.pem"
#################################################################
#
# Authorization controls
#
# Flag to disable verification of our own server certificates
#
# When libvirtd starts it performs some sanity checks against
# its own certificates.
#
# Default is to always run sanity checks. Uncommenting this
# will disable sanity checks which is not a good idea
#tls_no_sanity_certificate = 1
# Flag to disable verification of client certificates
#
# Client certificate verification is the primary authentication mechanism.
# Any client which does not present a certificate signed by the CA
# will be rejected.
#
# Default is to always verify. Uncommenting this will disable
# verification - make sure an IP whitelist is set
#tls_no_verify_certificate = 1
# A whitelist of allowed x509 Distinguished Names
# This list may contain wildcards such as
#
# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
#
# See the POSIX fnmatch function for the format of the wildcards.
#
# NB If this is an empty list, no client can connect, so comment out
# entirely rather than using empty list to disable these checks
#
# By default, no DN's are checked
#tls_allowed_dn_list = ["DN1", "DN2"]
# A whitelist of allowed SASL usernames. The format for usernames
# depends on the SASL authentication mechanism. Kerberos usernames
# look like username@REALM
#
# This list may contain wildcards such as
#
# "*@EXAMPLE.COM"
#
# See the POSIX fnmatch function for the format of the wildcards.
#
# NB If this is an empty list, no client can connect, so comment out
# entirely rather than using empty list to disable these checks
#
# By default, no Username's are checked
#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
#################################################################
#
# Processing controls
#
# The maximum number of concurrent client connections to allow
# over all sockets combined.
#max_clients = 20
# The minimum limit sets the number of workers to start up
# initially. If the number of active clients exceeds this,
# then more threads are spawned, up to max_workers limit.
# Typically you'd want max_workers to equal maximum number
# of clients allowed
#min_workers = 5
#max_workers = 20
# The number of priority workers. If all workers from above
# pool will stuck, some calls marked as high priority
# (notably domainDestroy) can be executed in this pool.
#prio_workers = 5
# Total global limit on concurrent RPC calls. Should be
# at least as large as max_workers. Beyond this, RPC requests
# will be read into memory and queued. This directly impact
# memory usage, currently each request requires 256 KB of
# memory. So by default up to 5 MB of memory is used
#
# XXX this isn't actually enforced yet, only the per-client
# limit is used so far
#max_requests = 20
# Limit on concurrent requests from a single client
# connection. To avoid one client monopolizing the server
# this should be a small fraction of the global max_requests
# and max_workers parameter
#max_client_requests = 5
#################################################################
#
# Logging controls
#
# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
# basically 1 will log everything possible
#log_level = 3
# Logging filters:
# A filter allows to select a different logging level for a given category
# of logs
# The format for a filter is one of:
# x:name
# x:+name
# where name is a string which is matched against source file name,
# e.g., "remote", "qemu", or "util/json", the optional "+" prefix
# tells libvirt to log stack trace for each message matching name,
# and x is the minimal level where matching messages should be logged:
# 1: DEBUG
# 2: INFO
# 3: WARNING
# 4: ERROR
#
# Multiple filter can be defined in a single @filters, they just need to be
# separated by spaces.
#
# e.g. to only get warning or errors from the remote layer and only errors
# from the event layer:
#log_filters="3:remote 4:event"
# Logging outputs:
# An output is one of the places to save logging information
# The format for an output can be:
# x:stderr
# output goes to stderr
# x:syslog:name
# use syslog for the output and use the given name as the ident
# x:file:file_path
# output to a file, with the given filepath
# In all case the x prefix is the minimal level, acting as a filter
# 1: DEBUG
# 2: INFO
# 3: WARNING
# 4: ERROR
#
# Multiple output can be defined, they just need to be separated by spaces.
# e.g. to log all warnings and errors to syslog under the libvirtd ident:
#log_outputs="3:syslog:libvirtd"
#
# Log debug buffer size: default 64
# The daemon keeps an internal debug log buffer which will be dumped in case
# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
# the default buffer size in kilobytes.
# If value is 0 or less the debug log buffer is deactivated
#log_buffer_size = 64
##################################################################
#
# Auditing
#
# This setting allows usage of the auditing subsystem to be altered:
#
# audit_level == 0 -> disable all auditing
# audit_level == 1 -> enable auditing, only if enabled on host (default)
# audit_level == 2 -> enable auditing, and exit if disabled on host
#
#audit_level = 2
#
# If set to 1, then audit messages will also be sent
# via libvirt logging infrastructure. Defaults to 0
#
#audit_logging = 1
###################################################################
# UUID of the host:
# Provide the UUID of the host here in case the command
# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
# 'dmidecode' does not provide a valid UUID and none is provided here, a
# temporary UUID will be generated.
# Keep the format of the example UUID below. UUID must not have all digits
# be the same.
# NB This default all-zeros UUID will not work. Replace
# it with the output of the 'uuidgen' command and then
# uncomment this entry
#host_uuid = "00000000-0000-0000-0000-000000000000"
###################################################################
# Keepalive protocol:
# This allows libvirtd to detect broken client connections or even
# dead client. A keepalive message is sent to a client after
# keepalive_interval seconds of inactivity to check if the client is
# still responding; keepalive_count is a maximum number of keepalive
# messages that are allowed to be sent to the client without getting
# any response before the connection is considered broken. In other
# words, the connection is automatically closed approximately after
# keepalive_interval * (keepalive_count + 1) seconds since the last
# message received from the client. If keepalive_interval is set to
# -1, libvirtd will never send keepalive requests; however clients
# can still send them and the deamon will send responses. When
# keepalive_count is set to 0, connections will be automatically
# closed after keepalive_interval seconds of inactivity without
# sending any keepalive messages.
#
#keepalive_interval = 5
#keepalive_count = 5
#
# If set to 1, libvirtd will refuse to talk to clients that do not
# support keepalive protocol. Defaults to 0.
#
#keepalive_required = 1

View File

@ -1,3 +1,8 @@
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
# File installed by Juju nova-compute charm
cgroup_device_acl = [
"/dev/null", "/dev/full", "/dev/zero",

8
templates/secret.xml Normal file
View File

@ -0,0 +1,8 @@
{% if ceph_secret_uuid -%}
<secret ephemeral='no' private='no'>
<uuid>{{ ceph_secret_uuid }}</uuid>
<usage type='ceph'>
<name>client.{{ service_name }} secret</name>
</usage>
</secret>
{% endif -%}

View File

@ -32,13 +32,13 @@ QUANTUM_CONTEXT = {
# Context for an OVS plugin contains at least the following. Other bits
# (driver names) are dependent on OS release.
BASE_QUANTUM_OVS_PLUGIN_CONTEXT = {
'core_plugin': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'\
'core_plugin': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
'enable_tunneling': True,
'libvirt_use_virtio_for_bridges': True,
'local_ip': '10.0.0.1',
'nova_firewall_driver': 'nova.virt.firewall.NoopFirewallDriver',
'ovs_firewall_driver': 'quantum.agent.linux.iptables_firewall.'\
'ovs_firewall_driver': 'quantum.agent.linux.iptables_firewall.'
'OVSHybridIptablesFirewallDriver',
'tenant_network_type': 'gre',
'tunnel_id_ranges': '1:1000',
@ -46,10 +46,12 @@ BASE_QUANTUM_OVS_PLUGIN_CONTEXT = {
'quantum_security_groups': True,
}
def fake_log(msg, level=None):
level = level or 'INFO'
print '[juju test log (%s)] %s' % (level, msg)
class NovaComputeContextTests(CharmTestCase):
def setUp(self):
super(NovaComputeContextTests, self).setUp(context, TO_PATCH)
@ -88,7 +90,6 @@ class NovaComputeContextTests(CharmTestCase):
}
self.assertEquals(ex_ctxt, result)
def test_cloud_compute_volume_context_nova_vol_unsupported(self):
self.relation_ids.return_value = 'cloud-compute:0'
cloud_compute = context.CloudComputeContext()
@ -115,18 +116,19 @@ class NovaComputeContextTests(CharmTestCase):
def test_cloud_compute_quantum_context(self):
self.test_relation.set(QUANTUM_CONTEXT)
cloud_compute = context.CloudComputeContext()
ex_ctxt = { 'network_manager_config': {
'auth_port': '5000',
'keystone_host': 'keystone_host',
'network_api_class': 'nova.network.quantumv2.api.API',
'quantum_admin_auth_url': 'http://keystone_host:5000/v2.0',
'quantum_admin_password': 'openstack',
'quantum_admin_tenant_name': 'admin',
'quantum_admin_username': 'admin',
'quantum_auth_strategy': 'keystone',
'quantum_plugin': 'ovs',
'quantum_security_groups': 'yes',
'quantum_url': 'http://quantum_url'
ex_ctxt = {
'network_manager_config': {
'auth_port': '5000',
'keystone_host': 'keystone_host',
'network_api_class': 'nova.network.quantumv2.api.API',
'quantum_admin_auth_url': 'http://keystone_host:5000/v2.0',
'quantum_admin_password': 'openstack',
'quantum_admin_tenant_name': 'admin',
'quantum_admin_username': 'admin',
'quantum_auth_strategy': 'keystone',
'quantum_plugin': 'ovs',
'quantum_security_groups': 'yes',
'quantum_url': 'http://quantum_url'
}
}
self.assertEquals(ex_ctxt, cloud_compute())
@ -163,15 +165,15 @@ class NovaComputeContextTests(CharmTestCase):
path='/etc/nova/quantum_plugin.conf', data='ovs')
def test_libvirt_bin_context_no_migration(self):
self.test_config.set('enable-live-migration', 'false')
self.test_config.set('enable-live-migration', False)
libvirt = context.NovaComputeLibvirtContext()
self.assertEquals({'libvirtd_opts': '-d'}, libvirt())
self.assertEquals({'libvirtd_opts': '-d', 'listen_tls': 1}, libvirt())
def test_libvirt_bin_context_migration_tcp_listen(self):
self.test_config.set('enable-live-migration', 'true')
self.test_config.set('enable-live-migration', True)
libvirt = context.NovaComputeLibvirtContext()
self.assertEquals({'libvirtd_opts': '-d -l'}, libvirt())
self.assertEquals(
{'libvirtd_opts': '-d -l', 'listen_tls': 1}, libvirt())
def test_config_flag_context_none_set_in_config(self):
flags = context.OSConfigFlagContext()

View File

@ -27,6 +27,7 @@ TO_PATCH = [
# charmhelpers.core.host
'apt_install',
'apt_update',
'filter_installed_packages',
'restart_on_change',
#charmhelpers.contrib.openstack.utils
'configure_installation_source',
@ -37,6 +38,7 @@ TO_PATCH = [
'determine_packages',
'import_authorized_keys',
'import_keystone_ca_cert',
'initialize_ssh_keys',
'migration_enabled',
'do_openstack_upgrade',
'quantum_attribute',
@ -49,11 +51,16 @@ TO_PATCH = [
]
def fake_filter(packages):
return packages
class NovaComputeRelationsTests(CharmTestCase):
def setUp(self):
super(NovaComputeRelationsTests, self).setUp(relations,
TO_PATCH)
self.config.side_effect = self.test_config.get
self.filter_installed_packages.side_effect = fake_filter
def test_install_hook(self):
repo = 'cloud:precise-grizzly'
@ -215,7 +222,7 @@ class NovaComputeRelationsTests(CharmTestCase):
isdir.return_value = False
relations.ceph_joined()
mkdir.assert_called_with('/etc/ceph')
self.apt_install.assert_called_with('ceph-common')
self.apt_install.assert_called_with(['ceph-common'])
@patch.object(relations, 'CONFIGS')
def test_ceph_changed_missing_relation_data(self, configs):

View File

@ -182,8 +182,8 @@ class NovaComputeUtilsTests(CharmTestCase):
]
ex_open = [
call('/home/foo/.ssh/authorized_keys'),
call('/home/foo/.ssh/known_hosts')
call('/home/foo/.ssh/authorized_keys', 'wb'),
call('/home/foo/.ssh/known_hosts', 'wb')
]
ex_write = [
call('foo_host\n'),
@ -195,7 +195,6 @@ class NovaComputeUtilsTests(CharmTestCase):
self.assertEquals(ex_open, _open.call_args_list)
self.assertEquals(ex_write, _file.write.call_args_list)
@patch('subprocess.check_call')
def test_import_keystone_cert_missing_data(self, check_call):
self.relation_get.return_value = None
@ -236,8 +235,8 @@ class NovaComputeUtilsTests(CharmTestCase):
}
resource_map.return_value = rsc_map
utils.register_configs()
renderer.assert_called_with(openstack_release='havana',
templates_dir='templates/')
renderer.assert_called_with(
openstack_release='havana', templates_dir='templates/')
ex_reg = [
call('/etc/nova/nova-compute.conf', [ctxt2]),
call('/etc/nova/nova.conf', [ctxt1])

View File

@ -93,7 +93,7 @@ class TestRelation(object):
self.relation_data = relation_data
def get(self, attr=None, unit=None, rid=None):
if attr == None:
if attr is None:
return self.relation_data
elif attr in self.relation_data:
return self.relation_data[attr]