Second commit:

This commit is contained in:
Bilal Baqar 2016-03-07 12:29:25 -08:00
parent 127e2a07a2
commit 458f98446b
7 changed files with 233 additions and 70 deletions

View File

@ -7,7 +7,7 @@ virtualenv:
netaddr jinja2
lint: virtualenv
.venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests
.venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402
@charm proof
unit_test: virtualenv

View File

@ -21,3 +21,22 @@ options:
type: string
description: |
Provide the version of networking-plumgrid package that needs to be installed
plumgrid-username:
default: plumgrid
type: string
description: Username to access PLUMgrid Director
plumgrid-password:
default: plumgrid
type: string
description: Password to access PLUMgrid Director
plumgrid-virtual-ip:
default:
type: string
description: IP address of PLUMgrid Director
# end of PLUMgrid configuration
manage-neutron-plugin-legacy-mode:
type: boolean
default: False
description: |
If True neutron-api charm will install neutron packages for the plugin
configured. Also needs to be set in neutron-api charm

View File

@ -243,6 +243,10 @@ def neutron_plugins():
'python-neutron-plugin-midonet')
plugins['midonet']['server_packages'].append(
'python-networking-midonet')
plugins['plumgrid']['driver'] = (
'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
plugins['plumgrid']['server_packages'].remove(
'neutron-plugin-plumgrid')
return plugins

View File

@ -24,6 +24,8 @@
# Adam Gandelman <adamg@ubuntu.com>
#
import bisect
import errno
import hashlib
import six
import os
@ -163,7 +165,7 @@ class Pool(object):
:return: None
"""
# read-only is easy, writeback is much harder
mode = get_cache_mode(cache_pool)
mode = get_cache_mode(self.service, cache_pool)
if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@ -259,6 +261,134 @@ class ErasurePool(Pool):
Returns json formatted output"""
def get_mon_map(service):
"""
Returns the current monitor map.
:param service: six.string_types. The Ceph user name to run the command under
:return: json string. :raise: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails
"""
try:
mon_status = check_output(
['ceph', '--id', service,
'ceph', 'mon_status', '--format=json'])
try:
return json.loads(mon_status)
except ValueError as v:
log("Unable to parse mon_status json: {}. Error: {}".format(
mon_status, v.message))
raise
except CalledProcessError as e:
log("mon_status command failed with message: {}".format(
e.message))
raise
def hash_monitor_names(service):
"""
Uses the get_mon_map() function to get information about the monitor
cluster.
Hash the name of each monitor. Return a sorted list of monitor hashes
in an ascending order.
:param service: six.string_types. The Ceph user name to run the command under
:rtype : dict. json dict of monitor name, ip address and rank
example: {
'name': 'ip-172-31-13-165',
'rank': 0,
'addr': '172.31.13.165:6789/0'}
"""
try:
hash_list = []
monitor_list = get_mon_map(service=service)
if monitor_list['monmap']['mons']:
for mon in monitor_list['monmap']['mons']:
hash_list.append(
hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
return sorted(hash_list)
else:
return None
except (ValueError, CalledProcessError):
raise
def monitor_key_delete(service, key):
"""
Delete a key and value pair from the monitor cluster
:param service: six.string_types. The Ceph user name to run the command under
Deletes a key value pair on the monitor cluster.
:param key: six.string_types. The key to delete.
"""
try:
check_output(
['ceph', '--id', service,
'ceph', 'config-key', 'del', str(key)])
except CalledProcessError as e:
log("Monitor config-key put failed with message: {}".format(
e.output))
raise
def monitor_key_set(service, key, value):
"""
Sets a key value pair on the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to set.
:param value: The value to set. This will be converted to a string
before setting
"""
try:
check_output(
['ceph', '--id', service,
'ceph', 'config-key', 'put', str(key), str(value)])
except CalledProcessError as e:
log("Monitor config-key put failed with message: {}".format(
e.output))
raise
def monitor_key_get(service, key):
"""
Gets the value of an existing key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for.
:return: Returns the value of that key or None if not found.
"""
try:
output = check_output(
['ceph', '--id', service,
'ceph', 'config-key', 'get', str(key)])
return output
except CalledProcessError as e:
log("Monitor config-key get failed with message: {}".format(
e.output))
return None
def monitor_key_exists(service, key):
"""
Searches for the existence of a key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for
:return: Returns True if the key exists, False if not and raises an
exception if an unknown error occurs. :raise: CalledProcessError if
an unknown error occurs
"""
try:
check_call(
['ceph', '--id', service,
'config-key', 'exists', str(key)])
# I can return true here regardless because Ceph returns
# ENOENT if the key wasn't found
return True
except CalledProcessError as e:
if e.returncode == errno.ENOENT:
return False
else:
log("Unknown error from ceph config-get exists: {} {}".format(
e.returncode, e.output))
raise
def get_erasure_profile(service, name):
"""
:param service: six.string_types. The Ceph user name to run the command under

View File

@ -10,7 +10,7 @@ from charmhelpers.core.hookenv import (
relation_get,
)
from charmhelpers.contrib.openstack import context
from socket import gethostbyname
def _edge_settings():
'''
@ -28,39 +28,19 @@ def _edge_settings():
return ctxt
def _container_settings():
'''
Inspects current container relation to get keystone context.
'''
container_settings = {
'auth_host': '10.0.0.1',
'auth_port': '35357',
'auth_protocol': 'http',
'service_protocol': 'http',
'service_host': '10.0.0.1',
'service_port': '35357',
'service_tenant': 'admin',
'service_username': 'admin',
'service_password': 'admin',
}
for rid in relation_ids('container'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
if 'auth_host' not in rdata:
continue
container_settings = {
'auth_host': rdata['auth_host'],
'auth_port': rdata['auth_port'],
'auth_protocol': rdata['auth_protocol'],
'service_protocol': rdata['service_protocol'],
'service_host': rdata['service_host'],
'service_port': rdata['service_port'],
'service_tenant': rdata['service_tenant'],
'service_username': rdata['service_username'],
'service_password': rdata['service_password'],
}
return container_settings
return container_settings
def _identity_context():
ctxs = [ { "auth_host": gethostbyname(hostname),
"auth_port": relation_get("service_port", unit, rid),
"admin_user": relation_get("service_username", unit, rid),
"admin_password": relation_get("service_password", unit, rid),
"service_protocol": relation_get("service_protocol", unit, rid),
"admin_tenant_name": relation_get("service_tenant_name", unit, rid) }
for rid in relation_ids("identity-admin")
for unit, hostname in
((unit, relation_get("service_hostname", unit, rid)) for unit in related_units(rid))
if hostname ]
print ctxs
return ctxs[0] if ctxs else {}
class NeutronPGPluginContext(context.NeutronContext):
@ -106,6 +86,10 @@ class NeutronPGPluginContext(context.NeutronContext):
conf = config()
enable_metadata = conf['enable-metadata']
# (TODO) get this information from director
pg_ctxt['pg_username'] = conf['plumgrid-username']
pg_ctxt['pg_password'] = conf['plumgrid-password']
pg_ctxt['virtual_ip'] = conf['plumgrid-virtual-ip']
pg_ctxt['enable_metadata'] = enable_metadata
pg_ctxt['pg_metadata_ip'] = '169.254.169.254'
pg_ctxt['pg_metadata_port'] = '8775'
@ -117,12 +101,12 @@ class NeutronPGPluginContext(context.NeutronContext):
else:
pg_ctxt['nova_metadata_proxy_secret'] = 'plumgrid'
#neutron_api_settings = _container_settings()
#pg_ctxt['admin_user'] = neutron_api_settings['service_username']
#pg_ctxt['admin_password'] = neutron_api_settings['service_password']
#pg_ctxt['admin_tenant_name'] = neutron_api_settings['service_tenant']
#pg_ctxt['service_protocol'] = neutron_api_settings['auth_protocol']
#pg_ctxt['auth_port'] = neutron_api_settings['auth_port']
#pg_ctxt['auth_host'] = neutron_api_settings['auth_host']
identity_context = _identity_context()
pg_ctxt['admin_user'] = identity_context['admin_user']
pg_ctxt['admin_password'] = identity_context['admin_password']
pg_ctxt['admin_tenant_name'] = identity_context['admin_tenant_name']
pg_ctxt['service_protocol'] = identity_context['service_protocol']
pg_ctxt['auth_port'] = identity_context['auth_port']
pg_ctxt['auth_host'] = identity_context['auth_host']
return pg_ctxt

View File

@ -11,8 +11,8 @@ from charmhelpers.contrib.python.packages import pip_uninstall
from charmhelpers.core.hookenv import (
Hooks,
UnregisteredHookError,
log,
relation_set
log,\
relation_get
)
from charmhelpers.core.host import (
@ -31,10 +31,7 @@ from neutron_plumgrid_utils import (
register_configs,
restart_map,
ensure_files,
)
from charmhelpers.contrib.openstack.utils import (
os_release,
set_neutron_relation,
)
hooks = Hooks()
@ -85,19 +82,22 @@ def relation_changed():
@hooks.hook("neutron-plugin-api-subordinate-relation-joined")
def neutron_plugin_joined():
# create plugin config
release = os_release('neutron-server', base='kilo')
print "#############"
print release
plugin = "neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" \
if release == 'kilo'\
else "networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2"
settings = { "neutron-plugin": "plumgrid",
"core-plugin": plugin,
"neutron-plugin-config": "/etc/neutron/plugins/plumgrid/plumgrid.ini",
"service-plugins": " ",
"quota-driver": " "}
relation_set(relation_settings=settings)
set_neutron_relation()
@hooks.hook("identity-admin-relation-changed")
def identity_admin_changed():
if not relation_get("service_hostname"):
log("Relation not ready")
return
identity_admin_relation()
@hooks.hook("identity-admin-relation-departed")
@hooks.hook("identity-admin-relation-broken")
@restart_on_change(restart_map())
def identity_admin_relation():
CONFIGS.write_all()
@hooks.hook('stop')
@ -105,10 +105,11 @@ def stop():
'''
This hook is run when the charm is destroyed.
'''
pkgs = determine_packages()
for pkg in pkgs:
apt_purge(pkg, fatal=False)
pip_uninstall('networking-plumgrid')
print "exiting"
#pkgs = determine_packages()
#for pkg in pkgs:
# apt_purge(pkg, fatal=False)
#pip_uninstall('networking-plumgrid')
def main():

View File

@ -6,7 +6,9 @@
from collections import OrderedDict
from copy import deepcopy
import os
from subprocess import check_call
from charmhelpers.contrib.openstack import templating
from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute
from charmhelpers.contrib.python.packages import pip_install
from charmhelpers.fetch import (
apt_cache
@ -17,6 +19,9 @@ from charmhelpers.core.hookenv import (
from charmhelpers.contrib.openstack.utils import (
os_release,
)
from charmhelpers.core.hookenv import (
relation_set,
)
import neutron_plumgrid_context
@ -24,7 +29,7 @@ TEMPLATES = 'templates/'
PG_PACKAGES = [
'plumgrid-pythonlib',
'neutron-plugin-plumgrid'
#'neutron-plugin-plumgrid'
]
NEUTRON_CONF_DIR = "/etc/neutron"
@ -72,7 +77,12 @@ def determine_packages():
"Build version '%s' for package '%s' not available" \
% (tag, pkg)
raise ValueError(error_msg)
# return list(set(PG_PACKAGES))
# if subordinate
#pkgs.append('neutron-plugin-plumgrid')
cmd = ['mkdir', '-p', '/etc/neutron/plugins/plumgrid']
check_call(cmd)
cmd = ['touch', '/etc/neutron/plugins/plumgrid/plumgrid.ini']
check_call(cmd)
return pkgs
@ -90,7 +100,7 @@ def register_configs(release=None):
Returns an object of the Openstack Tempating Class which contains the
the context required for all templates of this charm.
'''
release = release or os_release('neutron-server', base='kilo')
release = release or os_release('neutron-common', base='kilo')
if release < 'kilo':
raise ValueError('OpenStack %s release not supported' % release)
@ -123,10 +133,25 @@ def install_networking_plumgrid():
'''
Installs networking-plumgrid package
'''
release = os_release('neutron-server', base='kilo')
release = os_release('neutron-common', base='kilo')
if config('networking-plumgrid-version') is None:
package_version = NETWORKING_PLUMGRID_VERSION[release]
else:
package_version = config('networking-plumgrid-version')
package_name = 'networking-plumgrid==%s' % package_version
pip_install(package_name, fatal=True)
def set_neutron_relation():
#release = os_release('neutron-common', base='kilo')
#plugin = "neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" \
# if release == 'kilo'\
# else "networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2"
print "#### core-plugin: %s" % neutron_plugin_attribute('plumgrid','driver','neutron')
print "#### neutron-plugin-config %s" % neutron_plugin_attribute('plumgrid','config','neutron')
settings = { "neutron-plugin": "plumgrid",
"core-plugin": neutron_plugin_attribute('plumgrid','driver','neutron'),
"neutron-plugin-config": neutron_plugin_attribute('plumgrid','config','neutron'),
"service-plugins": " ",
"quota-driver": " "}
relation_set(relation_settings=settings)