Service crashes with wrong mds name but everything else works
This commit is contained in:
parent
3ffc850f34
commit
e2d4555e49
|
@ -1 +1,2 @@
|
|||
includes: ['layer:ceph-base', 'interface:ceph-admin'] # if you use any interfaces, add them here
|
||||
includes: ['layer:ceph-base', 'interface:/home/chris/repos/juju-interface-ceph-mds', 'interface:/home/chris/repos/juju-interface-ceph-admin'] # if you use any interfaces, add them here
|
||||
repo: git@github.com:cholcombe973/charm-ceph-fs.git
|
||||
|
|
|
@ -13,5 +13,7 @@ series:
|
|||
- xenial
|
||||
subordinate: false
|
||||
requires:
|
||||
ceph-mds:
|
||||
interface: ceph-mds
|
||||
ceph-admin:
|
||||
interface: ceph-admin
|
||||
|
|
|
@ -1,23 +1,20 @@
|
|||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import json
|
||||
|
||||
from charms.reactive import when, when_not, set_state
|
||||
|
||||
from charms.apt import queue_install
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config, charm_name,
|
||||
log, INFO, ERROR)
|
||||
|
||||
from charmhelpers.core.host import service_restart
|
||||
|
||||
from charmhelpers.contrib.storage.linux import ceph
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_address_in_network
|
||||
)
|
||||
|
||||
import jinja2
|
||||
|
||||
from charms.apt import queue_install
|
||||
|
||||
TEMPLATES_DIR = 'templates'
|
||||
|
||||
|
||||
|
@ -27,72 +24,73 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR):
|
|||
template = templates.get_template(template_name)
|
||||
return template.render(context)
|
||||
|
||||
|
||||
@when_not('apt.installed.ceph-mds')
|
||||
def install_cephfs():
|
||||
queue_install(['ceph-mds'])
|
||||
|
||||
|
||||
@when('apt.installed.ceph-mds', 'ceph.installed')
|
||||
@when('cephfs.configured')
|
||||
@when('admin_key.saved')
|
||||
@when_not('cephfs.started')
|
||||
def setup_mds():
|
||||
log("I'm in setup_mds()")
|
||||
# try:
|
||||
# from rados import Error as RadosError
|
||||
# from ceph_api import ceph_command
|
||||
# except ImportError as err:
|
||||
# log("rados is not installed yet: {}".format(err))
|
||||
# return
|
||||
# TODO: Monitor needs a new CephFS relation
|
||||
# TODO: Update with the conf file location
|
||||
# osd = ceph_command.OsdCommand('/etc/ceph/ceph.conf')
|
||||
# mds = ceph_command.MdsCommand('/etc/ceph/ceph.conf')
|
||||
|
||||
try:
|
||||
name = charm_name()
|
||||
log("Creating cephfs_data pool", level=INFO)
|
||||
data_pool = "{}_data".format(name)
|
||||
# TODO: Update with better pg values
|
||||
try:
|
||||
subprocess.check_call(["ceph", "osd", "pool", "create", data_pool, "256"])
|
||||
ceph.ReplicatedPool(name=data_pool, service='admin').create()
|
||||
except subprocess.CalledProcessError as err:
|
||||
log("Creating data pool failed!")
|
||||
raise(err)
|
||||
# osd.osd_pool_create('cephfs_data', 256)
|
||||
raise err
|
||||
|
||||
log("Creating cephfs_metadata pool", level=INFO)
|
||||
metadata_pool = "{}_metadata".format(name)
|
||||
# TODO: Update with better pg values
|
||||
try:
|
||||
subprocess.check_call(["ceph", "osd", "pool", "create", metadata_pool, "256"])
|
||||
ceph.ReplicatedPool(name=metadata_pool, service='admin').create()
|
||||
except subprocess.CalledProcessError as err:
|
||||
log("Creating metadata pool failed!")
|
||||
raise(err)
|
||||
# osd.osd_pool_create('cephfs_metadata', 256)
|
||||
raise err
|
||||
|
||||
log("Creating ceph fs", level=INFO)
|
||||
try:
|
||||
subprocess.check_call(["ceph", "fs", "new", name, metadata_pool, data_pool])
|
||||
except subprocess.CalledProcessError as err:
|
||||
log("Creating metadata pool failed!")
|
||||
raise(err)
|
||||
raise err
|
||||
service_restart('ceph-mds')
|
||||
set_state('cephfs.started')
|
||||
# mds.mds_newfs(metadata='cephfs_metadata', data='cephfs_data', sure=["--yes-i-really-mean-it"])
|
||||
except subprocess.CalledProcessError as err:
|
||||
log(message='Error: {}'.format(err), level=ERROR)
|
||||
|
||||
|
||||
@when('ceph-admin.available')
|
||||
# @when_not('cephfs.configured')
|
||||
def handle_admin_key(ceph_client):
|
||||
cephx_key = os.path.join(os.sep,
|
||||
'etc',
|
||||
'ceph',
|
||||
'ceph.client.admin.keyring')
|
||||
try:
|
||||
with open(cephx_key, 'w') as key_file:
|
||||
key_file.write("[client.admin]\n\tkey = {}\n".format(
|
||||
ceph_client.key()
|
||||
))
|
||||
except IOError as err:
|
||||
log("IOError writing mds-a.keyring: {}".format(err))
|
||||
set_state('admin_key.saved')
|
||||
|
||||
|
||||
@when('ceph-mds.available')
|
||||
def config_changed(ceph_client):
|
||||
charm_ceph_conf = os.path.join(os.sep,
|
||||
'etc',
|
||||
'ceph',
|
||||
'ceph.conf')
|
||||
cephx_key = os.path.join(os.sep,
|
||||
'etc',
|
||||
'ceph',
|
||||
'ceph.client.admin.keyring')
|
||||
key_path = os.path.join(os.sep, 'var', 'lib', 'ceph', 'mds', 'ceph-a')
|
||||
if not os.path.exists(key_path):
|
||||
os.makedirs(key_path)
|
||||
cephx_key = os.path.join(key_path,
|
||||
'keyring')
|
||||
|
||||
networks = get_networks('ceph-public-network')
|
||||
public_network = ', '.join(networks)
|
||||
|
@ -108,6 +106,8 @@ def config_changed(ceph_client):
|
|||
'ceph_public_network': public_network,
|
||||
'ceph_cluster_network': cluster_network,
|
||||
'loglevel': config('loglevel'),
|
||||
'hostname': socket.gethostname(),
|
||||
'mds_name': 'a',
|
||||
}
|
||||
|
||||
try:
|
||||
|
@ -118,11 +118,11 @@ def config_changed(ceph_client):
|
|||
|
||||
try:
|
||||
with open(cephx_key, 'w') as key_file:
|
||||
key_file.write("[client.admin]\n\tkey = {}\n".format(
|
||||
key_file.write("[mds.a]\n\tkey = {}\n".format(
|
||||
ceph_client.key()
|
||||
))
|
||||
except IOError as err:
|
||||
log("IOError writing ceph.client.admin.keyring: {}".format(err))
|
||||
log("IOError writing mds-a.keyring: {}".format(err))
|
||||
set_state('cephfs.configured')
|
||||
|
||||
|
||||
|
|
|
@ -18,5 +18,9 @@ debug osd = {{ loglevel }}/5
|
|||
log file = /var/log/ceph.log
|
||||
|
||||
[mds]
|
||||
keyring = /etc/ceph/ceph.client.admin.keyring
|
||||
keyring = /var/lib/ceph/mds/$cluster-$id/keyring
|
||||
|
||||
[mds.{{ mds_name }}]
|
||||
host = {{ hostname }}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue