Add flake8 and charm proof target for Make

This commit is contained in:
James Page 2013-06-24 10:13:52 +01:00
parent bc84ae2559
commit 068b8b05a4
5 changed files with 158 additions and 163 deletions

5
Makefile Normal file
View File

@ -0,0 +1,5 @@
#!/usr/bin/make
lint:
flake8 --exclude hooks/charmhelpers hooks
charm proof

View File

@ -12,9 +12,14 @@ import subprocess
import time
import os
import apt_pkg as apt
from charmhelpers.core.host import (
mkdir,
service_start,
log
)
from utils import (
get_unit_hostname
)
get_unit_hostname
)
LEADER = 'leader'
PEON = 'peon'
@ -30,7 +35,7 @@ def is_quorum():
"--admin-daemon",
asok,
"mon_status"
]
]
if os.path.exists(asok):
try:
result = json.loads(subprocess.check_output(cmd))
@ -54,7 +59,7 @@ def is_leader():
"--admin-daemon",
asok,
"mon_status"
]
]
if os.path.exists(asok):
try:
result = json.loads(subprocess.check_output(cmd))
@ -84,7 +89,7 @@ def add_bootstrap_hint(peer):
asok,
"add_bootstrap_peer_hint",
peer
]
]
if os.path.exists(asok):
# Ignore any errors for this call
subprocess.call(cmd)
@ -93,7 +98,7 @@ DISK_FORMATS = [
'xfs',
'ext4',
'btrfs'
]
]
def is_osd_disk(dev):
@ -103,7 +108,7 @@ def is_osd_disk(dev):
for line in info:
if line.startswith(
'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D'
):
):
return True
except subprocess.CalledProcessError:
pass
@ -114,7 +119,7 @@ def rescan_osd_devices():
cmd = [
'udevadm', 'trigger',
'--subsystem-match=block', '--action=add'
]
]
subprocess.call(cmd)
@ -144,7 +149,7 @@ def import_osd_bootstrap_key(key):
'--create-keyring',
'--name=client.bootstrap-osd',
'--add-key={}'.format(key)
]
]
subprocess.check_call(cmd)
# OSD caps taken from ceph-create-keys
@ -152,10 +157,10 @@ _osd_bootstrap_caps = {
'mon': [
'allow command osd create ...',
'allow command osd crush set ...',
r'allow command auth add * osd allow\ * mon allow\ rwx',
r'allow command auth add * osd allow\ * mon allow\ rwx',
'allow command mon getmap'
]
}
]
}
def get_osd_bootstrap_key():
@ -173,14 +178,14 @@ def import_radosgw_key(key):
'--create-keyring',
'--name=client.radosgw.gateway',
'--add-key={}'.format(key)
]
]
subprocess.check_call(cmd)
# OSD caps taken from ceph-create-keys
_radosgw_caps = {
'mon': ['allow r'],
'osd': ['allow rwx']
}
}
def get_radosgw_key():
@ -190,7 +195,7 @@ def get_radosgw_key():
_default_caps = {
'mon': ['allow r'],
'osd': ['allow rwx']
}
}
def get_named_key(name, caps=None):
@ -200,16 +205,16 @@ def get_named_key(name, caps=None):
'--name', 'mon.',
'--keyring',
'/var/lib/ceph/mon/ceph-{}/keyring'.format(
get_unit_hostname()
),
get_unit_hostname()
),
'auth', 'get-or-create', 'client.{}'.format(name),
]
]
# Add capabilities
for subsystem, subcaps in caps.iteritems():
cmd.extend([
subsystem,
'; '.join(subcaps),
])
])
output = subprocess.check_output(cmd).strip() # IGNORE:E1103
# get-or-create appears to have different output depending
# on whether its 'get' or 'create'
@ -225,6 +230,42 @@ def get_named_key(name, caps=None):
return key
def bootstrap_monitor_cluster(secret):
hostname = get_unit_hostname()
path = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
done = '{}/done'.format(path)
upstart = '{}/upstart'.format(path)
keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname)
if os.path.exists(done):
log('bootstrap_monitor_cluster: mon already initialized.')
else:
# Ceph >= 0.61.3 needs this for ceph-mon fs creation
mkdir('/var/run/ceph', perms=0755)
mkdir(path)
# end changes for Ceph >= 0.61.3
try:
subprocess.check_call(['ceph-authtool', keyring,
'--create-keyring', '--name=mon.',
'--add-key={}'.format(secret),
'--cap', 'mon', 'allow *'])
subprocess.check_call(['ceph-mon', '--mkfs',
'-i', hostname,
'--keyring', keyring])
with open(done, 'w'):
pass
with open(upstart, 'w'):
pass
service_start('ceph-mon-all')
except:
raise
finally:
os.unlink(keyring)
def get_ceph_version():
apt.init()
cache = apt.Cache()
@ -237,3 +278,51 @@ def get_ceph_version():
def version_compare(a, b):
return apt.version_compare(a, b)
def update_monfs():
hostname = get_unit_hostname()
monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
upstart = '{}/upstart'.format(monfs)
if os.path.exists(monfs) and not os.path.exists(upstart):
# Mark mon as managed by upstart so that
# it gets start correctly on reboots
with open(upstart, 'w'):
pass
def osdize(dev, osd_format, osd_journal, reformat_osd=False):
if not os.path.exists(dev):
log('Path {} does not exist - bailing'.format(dev))
return
if (is_osd_disk(dev) and not reformat_osd):
log('Looks like {} is already an OSD, skipping.'.format(dev))
return
if device_mounted(dev):
log('Looks like {} is in use, skipping.'.format(dev))
return
cmd = ['ceph-disk-prepare']
# Later versions of ceph support more options
if get_ceph_version() >= "0.48.3":
if osd_format:
cmd.append('--fs-type')
cmd.append(osd_format)
cmd.append(dev)
if osd_journal and os.path.exists(osd_journal):
cmd.append(osd_journal)
else:
# Just provide the device - no other options
# for older versions of ceph
cmd.append(dev)
subprocess.call(cmd)
def device_mounted(dev):
return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0
def filesystem_mounted(fs):
return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0

View File

@ -10,37 +10,36 @@
import glob
import os
import subprocess
import shutil
import sys
import ceph
#import utils
from charmhelpers.core.hookenv import (
log,
ERROR,
config,
relation_ids,
related_units,
relation_get,
relation_set,
remote_unit,
Hooks,
UnregisteredHookError
)
log,
ERROR,
config,
relation_ids,
related_units,
relation_get,
relation_set,
remote_unit,
Hooks,
UnregisteredHookError
)
from charmhelpers.core.host import (
apt_install,
apt_update,
filter_installed_packages,
mkdir
)
apt_install,
apt_update,
filter_installed_packages,
service_start,
umount
)
from utils import (
render_template,
configure_source,
get_host_ip,
get_unit_hostname
)
render_template,
configure_source,
get_host_ip,
)
hooks = Hooks()
@ -68,7 +67,7 @@ def emit_cephconf():
'mon_hosts': ' '.join(get_mon_hosts()),
'fsid': config('fsid'),
'version': ceph.get_ceph_version()
}
}
with open('/etc/ceph/ceph.conf', 'w') as cephconf:
cephconf.write(render_template('ceph.conf', cephcontext))
@ -96,25 +95,23 @@ def config_changed():
emit_cephconf()
e_mountpoint = config('ephemeral-unmount')
if (e_mountpoint and
filesystem_mounted(e_mountpoint)):
subprocess.call(['umount', e_mountpoint])
if e_mountpoint and ceph.filesystem_mounted(e_mountpoint):
umount(e_mountpoint)
osd_journal = config('osd-journal')
if (osd_journal and
not os.path.exists(JOURNAL_ZAPPED) and
os.path.exists(osd_journal)):
if (osd_journal and not os.path.exists(JOURNAL_ZAPPED)
and os.path.exists(osd_journal)):
ceph.zap_disk(osd_journal)
with open(JOURNAL_ZAPPED, 'w') as zapped:
zapped.write('DONE')
for dev in config('osd-devices').split(' '):
osdize(dev)
ceph.osdize(dev, config('osd-format'), config('osd-journal'),
reformat_osd())
# Support use of single node ceph
if (not ceph.is_bootstrapped() and
int(config('monitor-count')) == 1):
bootstrap_monitor_cluster()
if (not ceph.is_bootstrapped() and int(config('monitor-count')) == 1):
ceph.bootstrap_monitor_cluster(config('monitor-secret'))
ceph.wait_for_bootstrap()
if ceph.is_bootstrapped():
@ -130,64 +127,14 @@ def get_mon_hosts():
for relid in relation_ids('mon'):
for unit in related_units(relid):
hosts.append(
'{}:6789'.format(get_host_ip(
relation_get('private-address',
unit, relid)))
)
'{}:6789'.format(get_host_ip(relation_get('private-address',
unit, relid)))
)
hosts.sort()
return hosts
def update_monfs():
hostname = get_unit_hostname()
monfs = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
upstart = '{}/upstart'.format(monfs)
if (os.path.exists(monfs) and
not os.path.exists(upstart)):
# Mark mon as managed by upstart so that
# it gets start correctly on reboots
with open(upstart, 'w'):
pass
def bootstrap_monitor_cluster():
hostname = get_unit_hostname()
path = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
done = '{}/done'.format(path)
upstart = '{}/upstart'.format(path)
secret = config('monitor-secret')
keyring = '/var/lib/ceph/tmp/{}.mon.keyring'.format(hostname)
if os.path.exists(done):
log('bootstrap_monitor_cluster: mon already initialized.')
else:
# Ceph >= 0.61.3 needs this for ceph-mon fs creation
mkdir('/var/run/ceph', perms=0755)
mkdir(path)
# end changes for Ceph >= 0.61.3
try:
subprocess.check_call(['ceph-authtool', keyring,
'--create-keyring', '--name=mon.',
'--add-key={}'.format(secret),
'--cap', 'mon', 'allow *'])
subprocess.check_call(['ceph-mon', '--mkfs',
'-i', hostname,
'--keyring', keyring])
with open(done, 'w'):
pass
with open(upstart, 'w'):
pass
subprocess.check_call(['start', 'ceph-mon-all-starter'])
except:
raise
finally:
os.unlink(keyring)
def reformat_osd():
if config('osd-reformat'):
return True
@ -195,48 +142,6 @@ def reformat_osd():
return False
def osdize(dev):
if not os.path.exists(dev):
log('Path {} does not exist - bailing'.format(dev))
return
if (ceph.is_osd_disk(dev) and not
reformat_osd()):
log('Looks like {} is already an OSD, skipping.'
.format(dev))
return
if device_mounted(dev):
log('Looks like {} is in use, skipping.'.format(dev))
return
cmd = ['ceph-disk-prepare']
# Later versions of ceph support more options
if ceph.get_ceph_version() >= "0.48.3":
osd_format = config('osd-format')
if osd_format:
cmd.append('--fs-type')
cmd.append(osd_format)
cmd.append(dev)
osd_journal = config('osd-journal')
if (osd_journal and
os.path.exists(osd_journal)):
cmd.append(osd_journal)
else:
# Just provide the device - no other options
# for older versions of ceph
cmd.append(dev)
subprocess.call(cmd)
def device_mounted(dev):
return subprocess.call(['grep', '-wqs', dev + '1', '/proc/mounts']) == 0
def filesystem_mounted(fs):
return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
@hooks.hook('mon-relation-departed',
'mon-relation-joined')
def mon_relation():
@ -245,15 +150,15 @@ def mon_relation():
moncount = int(config('monitor-count'))
if len(get_mon_hosts()) >= moncount:
bootstrap_monitor_cluster()
ceph.bootstrap_monitor_cluster(config('monitor-secret'))
ceph.wait_for_bootstrap()
ceph.rescan_osd_devices()
notify_osds()
notify_radosgws()
notify_client()
else:
log('Not enough mons ({}), punting.'.format(
len(get_mon_hosts())))
log('Not enough mons ({}), punting.'
.format(len(get_mon_hosts())))
log('End mon-relation hook.')
@ -316,7 +221,6 @@ def radosgw_relation():
# Install radosgw for admin tools
apt_install(packages=filter_installed_packages(['radosgw']))
if ceph.is_quorum():
log('mon cluster in quorum - providing radosgw with keys')
relation_set(radosgw_key=ceph.get_radosgw_key(),
@ -348,7 +252,7 @@ def upgrade_charm():
emit_cephconf()
apt_install(packages=filter_installed_packages(ceph.PACKAGES), fatal=True)
install_upstart_scripts()
update_monfs()
ceph.update_monfs()
log('End upgrade-charm hook.')
@ -356,7 +260,7 @@ def upgrade_charm():
def start():
# In case we're being redeployed to the same machines, try
# to make sure everything is running as soon as possible.
subprocess.call(['start', 'ceph-mon-all'])
service_start('ceph-mon-all')
ceph.rescan_osd_devices()

View File

@ -14,12 +14,12 @@ from charmhelpers.core.hookenv import (
config,
unit_get,
cached
)
)
from charmhelpers.core.host import (
apt_install,
apt_update,
filter_installed_packages
)
)
TEMPLATES_DIR = 'templates'
@ -40,14 +40,12 @@ except ImportError:
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)
)
loader=jinja2.FileSystemLoader(template_dir))
template = templates.get_template(template_name)
return template.render(context)
CLOUD_ARCHIVE = \
""" # Ubuntu Cloud Archive
CLOUD_ARCHIVE = """ # Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
@ -59,7 +57,7 @@ def configure_source(source=None):
cmd = [
'add-apt-repository',
source
]
]
subprocess.check_call(cmd)
if source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
@ -76,7 +74,7 @@ def configure_source(source=None):
'apt-key',
'adv', '--keyserver keyserver.ubuntu.com',
'--recv-keys', key
]
]
subprocess.check_call(cmd)
apt_update(fatal=True)

View File

@ -1,7 +1,6 @@
name: ceph
summary: Highly scalable distributed storage
maintainer: James Page <james.page@ubuntu.com>,
Paul Collins <paul.collins@canonical.com>
maintainer: James Page <james.page@ubuntu.com>
description: |
Ceph is a distributed storage and network file system designed to provide
excellent performance, reliability, and scalability.