Resync helpers, add standard targets to Makefile

This commit is contained in:
James Page 2014-06-24 12:05:17 +01:00
parent ea727e8286
commit bcf76b01e3
20 changed files with 728 additions and 393 deletions

View File

@ -1 +1,2 @@
bin
.coverage

View File

@ -10,5 +10,14 @@ test:
@echo Starting tests...
@$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests
sync:
@charm-helper-sync -c charm-helpers-sync.yaml
bin/charm_helpers_sync.py:
@mkdir -p bin
@bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
> bin/charm_helpers_sync.py
sync: bin/charm_helpers_sync.py
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
publish: lint test
bzr push lp:charms/neutron-api
bzr push lp:charms/trusty/neutron-api

View File

@ -3,7 +3,7 @@ destination: hooks/charmhelpers
include:
- core
- fetch
- contrib.openstack
- contrib.openstack|inc=*
- contrib.hahelpers
- contrib.network.ovs
- contrib.storage.linux

View File

@ -81,11 +81,6 @@ options:
description: |
Default multicast port number that will be used to communicate between
HA Cluster nodes.
use-syslog:
type: boolean
default: False
description: |
If set to True, supporting services will log to syslog.
debug:
default: False
type: boolean

View File

@ -1,297 +0,0 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import commands
import os
import shutil
import time
from subprocess import (
check_call,
check_output,
CalledProcessError
)
from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
log,
INFO,
ERROR
)
from charmhelpers.fetch import (
apt_install,
)
from charmhelpers.core.host import (
mount,
mounts,
service_start,
service_stop,
umount,
)
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
KEYFILE = '/etc/ceph/ceph.client.%s.key'
CEPH_CONF = """[global]
auth supported = %(auth)s
keyring = %(keyring)s
mon host = %(mon_hosts)s
log to syslog = %(use_syslog)s
err to syslog = %(use_syslog)s
clog to syslog = %(use_syslog)s
"""
def running(service):
# this local util can be dropped as soon the following branch lands
# in lp:charm-helpers
# https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/
try:
output = check_output(['service', service, 'status'])
except CalledProcessError:
return False
else:
if ("start/running" in output or "is running" in output):
return True
else:
return False
def install():
ceph_dir = "/etc/ceph"
if not os.path.isdir(ceph_dir):
os.mkdir(ceph_dir)
apt_install('ceph-common', fatal=True)
def rbd_exists(service, pool, rbd_img):
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %
(service, pool))
return rbd_img in out
def create_rbd_image(service, pool, image, sizemb):
cmd = [
'rbd',
'create',
image,
'--size',
str(sizemb),
'--id',
service,
'--pool',
pool
]
check_call(cmd)
def pool_exists(service, name):
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
return name in out
def create_pool(service, name):
cmd = [
'rados',
'--id',
service,
'mkpool',
name
]
check_call(cmd)
def keyfile_path(service):
return KEYFILE % service
def keyring_path(service):
return KEYRING % service
def create_keyring(service, key):
keyring = keyring_path(service)
if os.path.exists(keyring):
log('ceph: Keyring exists at %s.' % keyring, level=INFO)
cmd = [
'ceph-authtool',
keyring,
'--create-keyring',
'--name=client.%s' % service,
'--add-key=%s' % key
]
check_call(cmd)
log('ceph: Created new ring at %s.' % keyring, level=INFO)
def create_key_file(service, key):
# create a file containing the key
keyfile = keyfile_path(service)
if os.path.exists(keyfile):
log('ceph: Keyfile exists at %s.' % keyfile, level=INFO)
fd = open(keyfile, 'w')
fd.write(key)
fd.close()
log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
def get_ceph_nodes():
hosts = []
for r_id in relation_ids('ceph'):
for unit in related_units(r_id):
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
return hosts
def configure(service, key, auth):
create_keyring(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
mon_hosts = ",".join(map(str, hosts))
keyring = keyring_path(service)
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
ceph_conf.write(CEPH_CONF % locals())
modprobe_kernel_module('rbd')
def image_mapped(image_name):
(rc, out) = commands.getstatusoutput('rbd showmapped')
return image_name in out
def map_block_storage(service, pool, image):
cmd = [
'rbd',
'map',
'%s/%s' % (pool, image),
'--user',
service,
'--secret',
keyfile_path(service),
]
check_call(cmd)
def filesystem_mounted(fs):
return fs in [f for m, f in mounts()]
def make_filesystem(blk_device, fstype='ext4', timeout=10):
count = 0
e_noent = os.errno.ENOENT
while not os.path.exists(blk_device):
if count >= timeout:
log('ceph: gave up waiting on block device %s' % blk_device,
level=ERROR)
raise IOError(e_noent, os.strerror(e_noent), blk_device)
log('ceph: waiting for block device %s to appear' % blk_device,
level=INFO)
count += 1
time.sleep(1)
else:
log('ceph: Formatting block device %s as filesystem %s.' %
(blk_device, fstype), level=INFO)
check_call(['mkfs', '-t', fstype, blk_device])
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
# mount block device into /mnt
mount(blk_device, '/mnt')
# copy data to /mnt
try:
copy_files(data_src_dst, '/mnt')
except:
pass
# umount block device
umount('/mnt')
_dir = os.stat(data_src_dst)
uid = _dir.st_uid
gid = _dir.st_gid
# re-mount where the data should originally be
mount(blk_device, data_src_dst, persist=True)
# ensure original ownership of new mount.
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
check_call(cmd)
# TODO: re-use
def modprobe_kernel_module(module):
log('ceph: Loading kernel module', level=INFO)
cmd = ['modprobe', module]
check_call(cmd)
cmd = 'echo %s >> /etc/modules' % module
check_call(cmd, shell=True)
def copy_files(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
To be called from the current cluster leader.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.
If formatting a device for the first time, data existing at mount_point
will be migrated to the RBD device before being remounted.
All services listed in system_services will be stopped prior to data
migration and restarted when complete.
"""
# Ensure pool, RBD image, RBD mappings are in place.
if not pool_exists(service, pool):
log('ceph: Creating new pool %s.' % pool, level=INFO)
create_pool(service, pool)
if not rbd_exists(service, pool, rbd_img):
log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO)
create_rbd_image(service, pool, rbd_img, sizemb)
if not image_mapped(rbd_img):
log('ceph: Mapping RBD Image as a Block Device.', level=INFO)
map_block_storage(service, pool, rbd_img)
# make file system
# TODO: What happens if for whatever reason this is run again and
# the data is already in the rbd device and/or is mounted??
# When it is mounted already, it will fail to make the fs
# XXX: This is really sketchy! Need to at least add an fstab entry
# otherwise this hook will blow away existing data if its executed
# after a reboot.
if not filesystem_mounted(mount_point):
make_filesystem(blk_device, fstype)
for svc in system_services:
if running(svc):
log('Stopping services %s prior to migrating data.' % svc,
level=INFO)
service_stop(svc)
place_data_on_ceph(service, blk_device, mount_point, fstype)
for svc in system_services:
service_start(svc)

View File

@ -0,0 +1,75 @@
''' Helpers for interacting with OpenvSwitch '''
import subprocess
import os
from charmhelpers.core.hookenv import (
log, WARNING
)
from charmhelpers.core.host import (
service
)
def add_bridge(name):
''' Add the named bridge to openvswitch '''
log('Creating bridge {}'.format(name))
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
def del_bridge(name):
''' Delete the named bridge from openvswitch '''
log('Deleting bridge {}'.format(name))
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
def add_bridge_port(name, port):
''' Add a port to the named openvswitch bridge '''
log('Adding port {} to bridge {}'.format(port, name))
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
name, port])
subprocess.check_call(["ip", "link", "set", port, "up"])
def del_bridge_port(name, port):
''' Delete a port from the named openvswitch bridge '''
log('Deleting port {} from bridge {}'.format(port, name))
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
name, port])
subprocess.check_call(["ip", "link", "set", port, "down"])
def set_manager(manager):
''' Set the controller for the local openvswitch '''
log('Setting manager for local ovs to {}'.format(manager))
subprocess.check_call(['ovs-vsctl', 'set-manager',
'ssl:{}'.format(manager)])
CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
def get_certificate():
''' Read openvswitch certificate from disk '''
if os.path.exists(CERT_PATH):
log('Reading ovs certificate from {}'.format(CERT_PATH))
with open(CERT_PATH, 'r') as cert:
full_cert = cert.read()
begin_marker = "-----BEGIN CERTIFICATE-----"
end_marker = "-----END CERTIFICATE-----"
begin_index = full_cert.find(begin_marker)
end_index = full_cert.rfind(end_marker)
if end_index == -1 or begin_index == -1:
raise RuntimeError("Certificate does not contain valid begin"
" and end markers.")
full_cert = full_cert[begin_index:(end_index + len(end_marker))]
return full_cert
else:
log('Certificate not found', level=WARNING)
return None
def full_restart():
''' Full restart and reload of openvswitch '''
if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
service('start', 'openvswitch-force-reload-kmod')
else:
service('force-reload-kmod', 'openvswitch-switch')

View File

@ -0,0 +1,38 @@
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, series=None, openstack=None):
"""Initialize the deployment environment."""
self.openstack = None
super(OpenStackAmuletDeployment, self).__init__(series)
if openstack:
self.openstack = openstack
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
if service == self.this_service:
config['openstack-origin'] = self.openstack
self.d.configure(service, config)
def _get_openstack_release(self):
"""Return an integer representing the enum value of the openstack
release."""
self.precise_essex, self.precise_folsom, self.precise_grizzly, \
self.precise_havana, self.precise_icehouse, \
self.trusty_icehouse = range(6)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
return releases[(self.series, self.openstack)]

View File

@ -0,0 +1,151 @@
import logging
import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint."""
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if admin_port in ep.adminurl and internal_port in ep.internalurl \
and public_port in ep.publicurl:
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints."""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems():
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate a list of actual tenant data vs list of expected tenant
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e.name)
return ret
def validate_role_data(self, expected, actual):
"""Validate a list of actual role data vs a list of expected role
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e.name)
return ret
def validate_user_data(self, expected, actual):
"""Validate a list of actual user data vs a list of expected user
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e.name)
return ret
def validate_flavor_data(self, expected, actual):
"""Validate a list of actual flavors vs a list of expected flavors."""
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists"""
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
service_ip = \
keystone_sentry.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)

View File

@ -507,7 +507,7 @@ class NeutronContext(OSContextGenerator):
if self.network_manager == 'quantum':
_file = '/etc/nova/quantum_plugin.conf'
else:
_file = '/etc/neutron/neutron_plugin.conf'
_file = '/etc/nova/neutron_plugin.conf'
with open(_file, 'wb') as out:
out.write(self.plugin + '\n')
@ -541,6 +541,26 @@ class NeutronContext(OSContextGenerator):
return nvp_ctxt
def n1kv_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
n1kv_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'n1kv',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': n1kv_config,
'vsm_ip': config('n1kv-vsm-ip'),
'vsm_username': config('n1kv-vsm-username'),
'vsm_password': config('n1kv-vsm-password'),
'restrict_policy_profiles': config(
'n1kv_restrict_policy_profiles'),
}
return n1kv_ctxt
def neutron_ctxt(self):
if https():
proto = 'https'
@ -570,8 +590,10 @@ class NeutronContext(OSContextGenerator):
if self.plugin == 'ovs':
ctxt.update(self.ovs_ctxt())
elif self.plugin == 'nvp':
elif self.plugin in ['nvp', 'nsx']:
ctxt.update(self.nvp_ctxt())
elif self.plugin == 'n1kv':
ctxt.update(self.n1kv_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:

View File

@ -114,14 +114,44 @@ def neutron_plugins():
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
},
'nsx': {
'config': '/etc/neutron/plugins/vmware/nsx.ini',
'driver': 'vmware',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-vmware'],
'server_services': ['neutron-server']
},
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
}
}
# NOTE: patch in ml2 plugin for icehouse onwards
if release >= 'icehouse':
# NOTE: patch in ml2 plugin for icehouse onwards
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['ovs']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
plugins['nvp'] = plugins['nsx']
return plugins
@ -154,7 +184,7 @@ def network_manager():
upgrading from G).
'''
release = os_release('nova-common')
manager = 'neutron'
manager = config('network-manager').lower()
if manager not in ['quantum', 'neutron']:
return manager

View File

@ -3,7 +3,6 @@
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
import apt_pkg as apt
import subprocess
import os
import socket
@ -41,7 +40,8 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse')
('trusty', 'icehouse'),
('utopic', 'juno'),
])
@ -52,6 +52,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2013.1', 'grizzly'),
('2013.2', 'havana'),
('2014.1', 'icehouse'),
('2014.2', 'juno'),
])
# The ugly duckling
@ -130,7 +131,13 @@ def get_os_version_codename(codename):
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
import apt_pkg as apt
apt.init()
# Tell apt to build an in-memory cache to prevent race conditions (if
# another process is already building the cache).
apt.config.set("Dir::Cache::pkgcache", "")
cache = apt.Cache()
try:
@ -182,8 +189,8 @@ def get_os_version_package(pkg, fatal=True):
for version, cname in vers_map.iteritems():
if cname == codename:
return version
#e = "Could not determine OpenStack version for package: %s" % pkg
#error_out(e)
# e = "Could not determine OpenStack version for package: %s" % pkg
# error_out(e)
os_rel = None
@ -268,6 +275,9 @@ def configure_installation_source(rel):
'icehouse': 'precise-updates/icehouse',
'icehouse/updates': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'juno': 'trusty-updates/juno',
'juno/updates': 'trusty-updates/juno',
'juno/proposed': 'trusty-proposed/juno',
}
try:
@ -315,6 +325,7 @@ def openstack_upgrade_available(package):
"""
import apt_pkg as apt
src = config('openstack-origin')
cur_vers = get_os_version_package(package)
available_vers = get_os_version_install_source(src)
@ -401,6 +412,8 @@ def ns_query(address):
rtype = 'PTR'
elif isinstance(address, basestring):
rtype = 'A'
else:
return None
answers = dns.resolver.query(address, rtype)
if answers:

View File

@ -62,7 +62,7 @@ def list_lvm_volume_group(block_device):
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
if l.strip().startswith('VG Name'):
vg = ' '.join(l.split()).split(' ').pop()
vg = ' '.join(l.strip().split()[2:])
return vg

View File

@ -1,4 +1,5 @@
from os import stat
import os
import re
from stat import S_ISBLK
from subprocess import (
@ -14,7 +15,9 @@ def is_block_device(path):
:returns: boolean: True if path is a block device, False if not.
'''
return S_ISBLK(stat(path).st_mode)
if not os.path.exists(path):
return False
return S_ISBLK(os.stat(path).st_mode)
def zap_disk(block_device):
@ -29,7 +32,19 @@ def zap_disk(block_device):
'--clear', block_device])
dev_end = check_output(['blockdev', '--getsz', block_device])
gpt_end = int(dev_end.split()[0]) - 100
check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=1M', 'count=1'])
check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),
'bs=512', 'count=100', 'seek=%s'%(gpt_end)])
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
def is_device_mounted(device):
'''Given a device path, return True if that device is mounted, and False
if it isn't.
:param device: str: Full path of the device to check.
:returns: boolean: True if the path represents a mounted device, False if
it doesn't.
'''
out = check_output(['mount'])
return bool(re.search(device + r"[0-9]+\b", out))

View File

@ -0,0 +1,116 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import os
class Fstab(file):
"""This class extends file in order to implement a file reader/writer
for file `/etc/fstab`
"""
class Entry(object):
"""Entry class represents a non-comment line on the `/etc/fstab` file
"""
def __init__(self, device, mountpoint, filesystem,
options, d=0, p=0):
self.device = device
self.mountpoint = mountpoint
self.filesystem = filesystem
if not options:
options = "defaults"
self.options = options
self.d = d
self.p = p
def __eq__(self, o):
return str(self) == str(o)
def __str__(self):
return "{} {} {} {} {} {}".format(self.device,
self.mountpoint,
self.filesystem,
self.options,
self.d,
self.p)
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
def __init__(self, path=None):
if path:
self._path = path
else:
self._path = self.DEFAULT_PATH
file.__init__(self, self._path, 'r+')
def _hydrate_entry(self, line):
# NOTE: use split with no arguments to split on any
# whitespace including tabs
return Fstab.Entry(*filter(
lambda x: x not in ('', None),
line.strip("\n").split()))
@property
def entries(self):
self.seek(0)
for line in self.readlines():
try:
if not line.startswith("#"):
yield self._hydrate_entry(line)
except ValueError:
pass
def get_entry_by_attr(self, attr, value):
for entry in self.entries:
e_attr = getattr(entry, attr)
if e_attr == value:
return entry
return None
def add_entry(self, entry):
if self.get_entry_by_attr('device', entry.device):
return False
self.write(str(entry) + '\n')
self.truncate()
return entry
def remove_entry(self, entry):
self.seek(0)
lines = self.readlines()
found = False
for index, line in enumerate(lines):
if not line.startswith("#"):
if self._hydrate_entry(line) == entry:
found = True
break
if not found:
return False
lines.remove(line)
self.seek(0)
self.write(''.join(lines))
self.truncate()
return True
@classmethod
def remove_by_mountpoint(cls, mountpoint, path=None):
fstab = cls(path=path)
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
if entry:
return fstab.remove_entry(entry)
return False
@classmethod
def add(cls, device, mountpoint, filesystem, options=None, path=None):
return cls(path=path).add_entry(Fstab.Entry(device,
mountpoint, filesystem,
options=options))

View File

@ -155,6 +155,100 @@ def hook_name():
return os.path.basename(sys.argv[0])
class Config(dict):
"""A Juju charm config dictionary that can write itself to
disk (as json) and track which values have changed since
the previous hook invocation.
Do not instantiate this object directly - instead call
``hookenv.config()``
Example usage::
>>> # inside a hook
>>> from charmhelpers.core import hookenv
>>> config = hookenv.config()
>>> config['foo']
'bar'
>>> config['mykey'] = 'myval'
>>> config.save()
>>> # user runs `juju set mycharm foo=baz`
>>> # now we're inside subsequent config-changed hook
>>> config = hookenv.config()
>>> config['foo']
'baz'
>>> # test to see if this val has changed since last hook
>>> config.changed('foo')
True
>>> # what was the previous value?
>>> config.previous('foo')
'bar'
>>> # keys/values that we add are preserved across hooks
>>> config['mykey']
'myval'
>>> # don't forget to save at the end of hook!
>>> config.save()
"""
CONFIG_FILE_NAME = '.juju-persistent-config'
def __init__(self, *args, **kw):
super(Config, self).__init__(*args, **kw)
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
self.load_previous()
def load_previous(self, path=None):
"""Load previous copy of config from disk so that current values
can be compared to previous values.
:param path:
File path from which to load the previous config. If `None`,
config is loaded from the default location. If `path` is
specified, subsequent `save()` calls will write to the same
path.
"""
self.path = path or self.path
with open(self.path) as f:
self._prev_dict = json.load(f)
def changed(self, key):
"""Return true if the value for this key has changed since
the last save.
"""
if self._prev_dict is None:
return True
return self.previous(key) != self.get(key)
def previous(self, key):
"""Return previous value for this key, or None if there
is no "previous" value.
"""
if self._prev_dict:
return self._prev_dict.get(key)
return None
def save(self):
"""Save this config to disk.
Preserves items in _prev_dict that do not exist in self.
"""
if self._prev_dict:
for k, v in self._prev_dict.iteritems():
if k not in self:
self[k] = v
with open(self.path, 'w') as f:
json.dump(self, f)
@cached
def config(scope=None):
"""Juju charm configuration"""
@ -163,7 +257,10 @@ def config(scope=None):
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
return json.loads(subprocess.check_output(config_cmd_line))
config_data = json.loads(subprocess.check_output(config_cmd_line))
if scope is not None:
return config_data
return Config(config_data)
except ValueError:
return None

View File

@ -16,6 +16,7 @@ import hashlib
from collections import OrderedDict
from hookenv import log
from fstab import Fstab
def service_start(service_name):
@ -34,7 +35,8 @@ def service_restart(service_name):
def service_reload(service_name, restart_on_failure=False):
"""Reload a system service, optionally falling back to restart if reload fails"""
"""Reload a system service, optionally falling back to restart if
reload fails"""
service_result = service('reload', service_name)
if not service_result and restart_on_failure:
service_result = service('restart', service_name)
@ -143,7 +145,19 @@ def write_file(path, content, owner='root', group='root', perms=0444):
target.write(content)
def mount(device, mountpoint, options=None, persist=False):
def fstab_remove(mp):
"""Remove the given mountpoint entry from /etc/fstab
"""
return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None):
"""Adds the given device entry to the /etc/fstab file
"""
return Fstab.add(dev, mp, fs, options=options)
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
"""Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount']
if options is not None:
@ -154,9 +168,9 @@ def mount(device, mountpoint, options=None, persist=False):
except subprocess.CalledProcessError, e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
if persist:
# TODO: update fstab
pass
return fstab_add(device, mountpoint, filesystem, options=options)
return True
@ -168,9 +182,9 @@ def umount(mountpoint, persist=False):
except subprocess.CalledProcessError, e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
if persist:
# TODO: update fstab
pass
return fstab_remove(mountpoint)
return True
@ -295,3 +309,17 @@ def get_nic_hwaddr(nic):
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr
def cmp_pkgrevno(package, revno, pkgcache=None):
'''Compare supplied revno with the revno of the installed package
1 => Installed revno is greater than supplied arg
0 => Installed revno is the same as supplied arg
-1 => Installed revno is less than supplied arg
'''
import apt_pkg
if not pkgcache:
apt_pkg.init()
pkgcache = apt_pkg.Cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)

View File

@ -1,4 +1,5 @@
import importlib
import time
from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
@ -12,9 +13,9 @@ from charmhelpers.core.hookenv import (
config,
log,
)
import apt_pkg
import os
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
@ -54,12 +55,74 @@ CLOUD_ARCHIVE_POCKETS = {
'icehouse/proposed': 'precise-proposed/icehouse',
'precise-icehouse/proposed': 'precise-proposed/icehouse',
'precise-proposed/icehouse': 'precise-proposed/icehouse',
# Juno
'juno': 'trusty-updates/juno',
'trusty-juno': 'trusty-updates/juno',
'trusty-juno/updates': 'trusty-updates/juno',
'trusty-updates/juno': 'trusty-updates/juno',
'juno/proposed': 'trusty-proposed/juno',
'juno/proposed': 'trusty-proposed/juno',
'trusty-juno/proposed': 'trusty-proposed/juno',
'trusty-proposed/juno': 'trusty-proposed/juno',
}
# The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching.
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
)
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
class SourceConfigError(Exception):
pass
class UnhandledSource(Exception):
pass
class AptLockError(Exception):
pass
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
return "Wrong source type"
def install(self, source):
"""Try to download and unpack the source. Return the path to the
unpacked files or raise UnhandledSource."""
raise UnhandledSource("Wrong source type {}".format(source))
def parse_url(self, url):
return urlparse(url)
def base_url(self, url):
"""Return url without querystring or fragment"""
parts = list(self.parse_url(url))
parts[4:] = ['' for i in parts[4:]]
return urlunparse(parts)
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
import apt_pkg
apt_pkg.init()
# Tell apt to build an in-memory cache to prevent race conditions (if
# another process is already building the cache).
apt_pkg.config.set("Dir::Cache::pkgcache", "")
cache = apt_pkg.Cache()
_pkgs = []
for package in packages:
@ -87,14 +150,7 @@ def apt_install(packages, options=None, fatal=False):
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
subprocess.check_call(cmd, env=env)
else:
subprocess.call(cmd, env=env)
_run_apt_command(cmd, fatal)
def apt_upgrade(options=None, fatal=False, dist=False):
@ -109,24 +165,13 @@ def apt_upgrade(options=None, fatal=False, dist=False):
else:
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
subprocess.check_call(cmd, env=env)
else:
subprocess.call(cmd, env=env)
_run_apt_command(cmd, fatal)
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
_run_apt_command(cmd, fatal)
def apt_purge(packages, fatal=False):
@ -137,10 +182,7 @@ def apt_purge(packages, fatal=False):
else:
cmd.extend(packages)
log("Purging {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
_run_apt_command(cmd, fatal)
def apt_hold(packages, fatal=False):
@ -151,6 +193,7 @@ def apt_hold(packages, fatal=False):
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
@ -184,14 +227,10 @@ def add_source(source, key=None):
apt.write(PROPOSED_POCKET.format(release))
if key:
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'keyserver.ubuntu.com', '--recv',
'hkp://keyserver.ubuntu.com:80', '--recv',
key])
class SourceConfigError(Exception):
pass
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
@ -224,17 +263,6 @@ def configure_sources(update=False,
if update:
apt_update(fatal=True)
# The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching.
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
)
class UnhandledSource(Exception):
pass
def install_remote(source):
"""
@ -265,30 +293,6 @@ def install_from_config(config_var_name):
return install_remote(source)
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
return "Wrong source type"
def install(self, source):
"""Try to download and unpack the source. Return the path to the
unpacked files or raise UnhandledSource."""
raise UnhandledSource("Wrong source type {}".format(source))
def parse_url(self, url):
return urlparse(url)
def base_url(self, url):
"""Return url without querystring or fragment"""
parts = list(self.parse_url(url))
parts[4:] = ['' for i in parts[4:]]
return urlunparse(parts)
def plugins(fetch_handlers=None):
if not fetch_handlers:
fetch_handlers = FETCH_HANDLERS
@ -306,3 +310,40 @@ def plugins(fetch_handlers=None):
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list
def _run_apt_command(cmd, fatal=False):
"""
Run an APT command, checking output and retrying if the fatal flag is set
to True.
:param: cmd: str: The apt command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried.
"""
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
retry_count = 0
result = None
# If the command is considered "fatal", we need to retry if the apt
# lock was not acquired.
while result is None or result == APT_NO_LOCK:
try:
result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError, e:
retry_count = retry_count + 1
if retry_count > APT_NO_LOCK_RETRY_COUNT:
raise
result = e.returncode
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
"".format(APT_NO_LOCK_RETRY_DELAY))
time.sleep(APT_NO_LOCK_RETRY_DELAY)
else:
subprocess.call(cmd, env=env)

View File

@ -39,7 +39,8 @@ class BzrUrlFetchHandler(BaseFetchHandler):
def install(self, source):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
try: