From 3ffc850f342aab5996fd6ecfde1c06224d7863ad Mon Sep 17 00:00:00 2001 From: Chris MacNaughton Date: Thu, 15 Sep 2016 09:33:40 -0700 Subject: [PATCH] nearly complete --- src/layer.yaml | 2 +- src/metadata.yaml | 11 +-- src/reactive/ceph_fs.py | 140 ++++++++++++++++++++++++++++++++++ src/reactive/charm_ceph_fs.py | 71 ----------------- src/templates/ceph.conf | 19 +---- 5 files changed, 151 insertions(+), 92 deletions(-) create mode 100644 src/reactive/ceph_fs.py delete mode 100644 src/reactive/charm_ceph_fs.py diff --git a/src/layer.yaml b/src/layer.yaml index c58e112..07b0417 100644 --- a/src/layer.yaml +++ b/src/layer.yaml @@ -1 +1 @@ -includes: ['layer:ceph-base'] # if you use any interfaces, add them here +includes: ['layer:ceph-base', 'interface:ceph-admin'] # if you use any interfaces, add them here diff --git a/src/metadata.yaml b/src/metadata.yaml index bbe5656..5f12fda 100644 --- a/src/metadata.yaml +++ b/src/metadata.yaml @@ -1,4 +1,4 @@ -name: charm-ceph-fs +name: ceph-fs summary: Highly scalable distributed storage maintainer: OpenStack Charmers description: | @@ -9,8 +9,9 @@ tags: - storage - file-servers - misc +series: + - xenial subordinate: false -#provides: -#requires: - #ceph-mon-cephfs: - #interface: ceph-mon-cephfs +requires: + ceph-admin: + interface: ceph-admin diff --git a/src/reactive/ceph_fs.py b/src/reactive/ceph_fs.py new file mode 100644 index 0000000..a4b6283 --- /dev/null +++ b/src/reactive/ceph_fs.py @@ -0,0 +1,140 @@ +import os +import subprocess +import json + +from charms.reactive import when, when_not, set_state + +from charms.apt import queue_install + +from charmhelpers.core.hookenv import ( + config, charm_name, + log, INFO, ERROR) + +from charmhelpers.core.host import service_restart + +from charmhelpers.contrib.network.ip import ( + get_address_in_network +) + +import jinja2 + +TEMPLATES_DIR = 'templates' + + +def render_template(template_name, context, template_dir=TEMPLATES_DIR): + templates = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_dir)) + template = templates.get_template(template_name) + return template.render(context) + +@when_not('apt.installed.ceph-mds') +def install_cephfs(): + queue_install(['ceph-mds']) + + +@when('apt.installed.ceph-mds', 'ceph.installed') +@when_not('cephfs.started') +def setup_mds(): + log("I'm in setup_mds()") + # try: + # from rados import Error as RadosError + # from ceph_api import ceph_command + # except ImportError as err: + # log("rados is not installed yet: {}".format(err)) + # return + # TODO: Monitor needs a new CephFS relation + # TODO: Update with the conf file location + # osd = ceph_command.OsdCommand('/etc/ceph/ceph.conf') + # mds = ceph_command.MdsCommand('/etc/ceph/ceph.conf') + + try: + name = charm_name() + log("Creating cephfs_data pool", level=INFO) + data_pool = "{}_data".format(name) + # TODO: Update with better pg values + try: + subprocess.check_call(["ceph", "osd", "pool", "create", data_pool, "256"]) + except subprocess.CalledProcessError as err: + log("Creating data pool failed!") + raise(err) + # osd.osd_pool_create('cephfs_data', 256) + + log("Creating cephfs_metadata pool", level=INFO) + metadata_pool = "{}_metadata".format(name) + # TODO: Update with better pg values + try: + subprocess.check_call(["ceph", "osd", "pool", "create", metadata_pool, "256"]) + except subprocess.CalledProcessError as err: + log("Creating metadata pool failed!") + raise(err) + # osd.osd_pool_create('cephfs_metadata', 256) + + log("Creating ceph fs", level=INFO) + try: + subprocess.check_call(["ceph", "fs", "new", name, metadata_pool, data_pool]) + except subprocess.CalledProcessError as err: + log("Creating metadata pool failed!") + raise(err) + service_restart('ceph-mds') + set_state('cephfs.started') + # mds.mds_newfs(metadata='cephfs_metadata', data='cephfs_data', sure=["--yes-i-really-mean-it"]) + except subprocess.CalledProcessError as err: + log(message='Error: {}'.format(err), level=ERROR) + + +@when('ceph-admin.available') +# @when_not('cephfs.configured') +def config_changed(ceph_client): + charm_ceph_conf = os.path.join(os.sep, + 'etc', + 'ceph', + 'ceph.conf') + cephx_key = os.path.join(os.sep, + 'etc', + 'ceph', + 'ceph.client.admin.keyring') + + networks = get_networks('ceph-public-network') + public_network = ', '.join(networks) + + networks = get_networks('ceph-cluster-network') + cluster_network = ', '.join(networks) + + ceph_context = { + 'mon_hosts': ceph_client.mon_hosts(), + 'fsid': ceph_client.fsid(), + 'auth_supported': ceph_client.auth(), + 'use_syslog': str(config('use-syslog')).lower(), + 'ceph_public_network': public_network, + 'ceph_cluster_network': cluster_network, + 'loglevel': config('loglevel'), + } + + try: + with open(charm_ceph_conf, 'w') as ceph_conf: + ceph_conf.write(render_template('ceph.conf', ceph_context)) + except IOError as err: + log("IOError writing ceph.conf: {}".format(err)) + + try: + with open(cephx_key, 'w') as key_file: + key_file.write("[client.admin]\n\tkey = {}\n".format( + ceph_client.key() + )) + except IOError as err: + log("IOError writing ceph.client.admin.keyring: {}".format(err)) + set_state('cephfs.configured') + + +def get_networks(config_opt='ceph-public-network'): + """Get all configured networks from provided config option. + + If public network(s) are provided, go through them and return those for + which we have an address configured. + """ + networks = config(config_opt) + if networks: + networks = networks.split() + return [n for n in networks if get_address_in_network(n)] + + return [] diff --git a/src/reactive/charm_ceph_fs.py b/src/reactive/charm_ceph_fs.py deleted file mode 100644 index b661fe7..0000000 --- a/src/reactive/charm_ceph_fs.py +++ /dev/null @@ -1,71 +0,0 @@ -from charms.reactive import when - -from charmhelpers.core.hookenv import ( - config, - log, INFO, ERROR) - -from charmhelpers.contrib.network.ip import ( - get_address_in_network -) - -@when('ceph.installed') -# @when('ceph-mon.available') -def setup_mds(mon): - log("I'm in setup_mds()") - try: - from rados import Error as RadosError - from ceph_api import ceph_command - except ImportError as err: - log("rados is not installed yet: {}".format(err)) - return - # TODO: Monitor needs a new CephFS relation - # TODO: Update with the conf file location - osd = ceph_command.OsdCommand('/etc/ceph/ceph.conf') - mds = ceph_command.MdsCommand('/etc/ceph/ceph.conf') - - try: - log("Creating cephfs_data pool", level=INFO) - # TODO: Update with better pg values - osd.osd_pool_create('cephfs_data', 256) - - log("Creating cephfs_metadata pool", level=INFO) - # TODO: Update with better pg values - osd.osd_pool_create('cephfs_metadata', 256) - - log("Creating ceph fs", level=INFO) - mds.mds_newfs(metadata='cephfs_metadata', data='cephfs_data', sure=["--yes-i-really-mean-it"]) - except RadosError as err: - log(message='Error: {}'.format(err.message), level=ERROR) - - -@when('config.changed', 'ceph-mon.available') -def config_changed(): - networks = get_networks('ceph-public-network') - public_network = ', '.join(networks) - - networks = get_networks('ceph-cluster-network') - cluster_network = ', '.join(networks) - - cephcontext = { - # 'mon_hosts': ' '.join(get_mon_hosts()), - # 'fsid': leader_get('fsid'), - 'osd_journal_size': config('osd-journal-size'), - 'use_syslog': str(config('use-syslog')).lower(), - 'ceph_public_network': public_network, - 'ceph_cluster_network': cluster_network, - 'loglevel': config('loglevel'), - } - - -def get_networks(config_opt='ceph-public-network'): - """Get all configured networks from provided config option. - - If public network(s) are provided, go through them and return those for - which we have an address configured. - """ - networks = config(config_opt) - if networks: - networks = networks.split() - return [n for n in networks if get_address_in_network(n)] - - return [] diff --git a/src/templates/ceph.conf b/src/templates/ceph.conf index ea9b733..cd62c5a 100644 --- a/src/templates/ceph.conf +++ b/src/templates/ceph.conf @@ -1,8 +1,8 @@ + [global] auth cluster required = {{ auth_supported }} auth service required = {{ auth_supported }} auth client required = {{ auth_supported }} -{% endif %} keyring = /etc/ceph/$cluster.$name.keyring mon host = {{ mon_hosts }} fsid = {{ fsid }} @@ -14,20 +14,9 @@ mon cluster log to syslog = {{ use_syslog }} debug mon = {{ loglevel }}/5 debug osd = {{ loglevel }}/5 -{%- if ceph_public_network is string %} -public network = {{ ceph_public_network }} -{%- endif %} -{%- if ceph_cluster_network is string %} -cluster network = {{ ceph_cluster_network }} -{%- endif %} - -{% if public_addr %} -public addr = {{ public_addr }} -{% endif %} -{% if cluster_addr %} -cluster addr = {{ cluster_addr }} -{%- endif %} +[client] +log file = /var/log/ceph.log [mds] -keyring = /var/lib/ceph/mds/$cluster-$id/keyring +keyring = /etc/ceph/ceph.client.admin.keyring