Sync charmhelpers to get fix for LP 1581598

Change-Id: Ie180c0d992c408fef0d5bb9143e706a4f195116e
Closes-Bug: 1581598
This commit is contained in:
Edward Hope-Morley 2016-06-21 11:18:25 +01:00
parent 117b32387e
commit 53f67cecd0
11 changed files with 518 additions and 107 deletions

View File

@ -9,5 +9,6 @@ include:
- contrib.storage
- contrib.network.ip
- contrib.openstack.utils
- contrib.openstack.exceptions
- contrib.python.packages
- contrib.charmsupport

View File

@ -41,10 +41,11 @@ from charmhelpers.core.hookenv import (
relation_get,
config as config_get,
INFO,
ERROR,
DEBUG,
WARNING,
unit_get,
is_leader as juju_is_leader
is_leader as juju_is_leader,
status_set,
)
from charmhelpers.core.decorators import (
retry_on_exception,
@ -60,6 +61,10 @@ class HAIncompleteConfig(Exception):
pass
class HAIncorrectConfig(Exception):
pass
class CRMResourceNotFound(Exception):
pass
@ -274,27 +279,71 @@ def get_hacluster_config(exclude_keys=None):
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip
ha-bindiface, ha-mcastport, vip, os-internal-hostname,
os-admin-hostname, os-public-hostname
param: exclude_keys: list of setting key(s) to be excluded.
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing.
raises: HAIncompleteConfig if settings are missing or incorrect.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip']
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
'os-admin-hostname', 'os-public-hostname']
conf = {}
for setting in settings:
if exclude_keys and setting in exclude_keys:
continue
conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in six.iteritems(conf) if v is None]
if missing:
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig
if not valid_hacluster_config():
raise HAIncorrectConfig('Insufficient or incorrect config data to '
'configure hacluster.')
return conf
def valid_hacluster_config():
'''
Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
must be set.
Note: ha-bindiface and ha-macastport both have defaults and will always
be set. We only care that either vip or dns-ha is set.
:returns: boolean: valid config returns true.
raises: HAIncompatibileConfig if settings conflict.
raises: HAIncompleteConfig if settings are missing.
'''
vip = config_get('vip')
dns = config_get('dns-ha')
if not(bool(vip) ^ bool(dns)):
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
'use high availability')
status_set('blocked', msg)
raise HAIncorrectConfig(msg)
# If dns-ha then one of os-*-hostname must be set
if dns:
dns_settings = ['os-internal-hostname', 'os-admin-hostname',
'os-public-hostname']
# At this point it is unknown if one or all of the possible
# network spaces are in HA. Validate at least one is set which is
# the minimum required.
for setting in dns_settings:
if config_get(setting):
log('DNS HA: At least one hostname is set {}: {}'
''.format(setting, config_get(setting)),
level=DEBUG)
return True
msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
'DNS HA')
status_set('blocked', msg)
raise HAIncompleteConfig(msg)
log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
return True
def canonical_url(configs, vip_setting='vip'):
'''
Returns the correct HTTP URL to this host given the state of HTTPS

View File

@ -214,7 +214,16 @@ def format_ipv6_addr(address):
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
fatal=True, exc_list=None):
"""Return the assigned IP address for a given interface, if any."""
"""Return the assigned IP address for a given interface, if any.
:param iface: network interface on which address(es) are expected to
be found.
:param inet_type: inet address family
:param inc_aliases: include alias interfaces in search
:param fatal: if True, raise exception if address not found
:param exc_list: list of addresses to ignore
:return: list of ip addresses
"""
# Extract nic if passed /dev/ethX
if '/' in iface:
iface = iface.split('/')[-1]
@ -315,6 +324,14 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
We currently only support scope global IPv6 addresses i.e. non-temporary
addresses. If no global IPv6 address is found, return the first one found
in the ipv6 address list.
:param iface: network interface on which ipv6 address(es) are expected to
be found.
:param inc_aliases: include alias interfaces in search
:param fatal: if True, raise exception if address not found
:param exc_list: list of addresses to ignore
:param dynamic_only: only recognise dynamic addresses
:return: list of ipv6 addresses
"""
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
inc_aliases=inc_aliases, fatal=fatal,
@ -336,7 +353,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
cmd = ['ip', 'addr', 'show', iface]
out = subprocess.check_output(cmd).decode('UTF-8')
if dynamic_only:
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
else:
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
@ -388,10 +405,10 @@ def is_ip(address):
Returns True if address is a valid IP address.
"""
try:
# Test to see if already an IPv4 address
socket.inet_aton(address)
# Test to see if already an IPv4/IPv6 address
address = netaddr.IPAddress(address)
return True
except socket.error:
except netaddr.AddrFormatError:
return False

View File

@ -0,0 +1,6 @@
class OSContextError(Exception):
"""Raised when an error occurs during context generation.
This exception is principally used in contrib.openstack.context
"""
pass

View File

@ -25,6 +25,7 @@ import sys
import re
import itertools
import functools
import shutil
import six
import tempfile
@ -46,9 +47,11 @@ from charmhelpers.core.hookenv import (
charm_dir,
DEBUG,
INFO,
ERROR,
related_units,
relation_ids,
relation_set,
service_name,
status_set,
hook_name
)
@ -82,6 +85,7 @@ from charmhelpers.core.host import (
from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
from charmhelpers.contrib.openstack.exceptions import OSContextError
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
@ -100,6 +104,8 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zebra', 'ocata'), # TODO: upload with real Z name
])
@ -114,6 +120,8 @@ OPENSTACK_CODENAMES = OrderedDict([
('2015.1', 'kilo'),
('2015.2', 'liberty'),
('2016.1', 'mitaka'),
('2016.2', 'newton'),
('2017.1', 'ocata'),
])
# The ugly duckling - must list releases oldest to newest
@ -138,49 +146,89 @@ SWIFT_CODENAMES = OrderedDict([
['2.3.0', '2.4.0', '2.5.0']),
('mitaka',
['2.5.0', '2.6.0', '2.7.0']),
('newton',
['2.8.0']),
])
# >= Liberty version->codename mapping
PACKAGE_CODENAMES = {
'nova-common': OrderedDict([
('12.0', 'liberty'),
('13.0', 'mitaka'),
('12', 'liberty'),
('13', 'mitaka'),
('14', 'newton'),
('15', 'ocata'),
]),
'neutron-common': OrderedDict([
('7.0', 'liberty'),
('8.0', 'mitaka'),
('7', 'liberty'),
('8', 'mitaka'),
('9', 'newton'),
('10', 'ocata'),
]),
'cinder-common': OrderedDict([
('7.0', 'liberty'),
('8.0', 'mitaka'),
('7', 'liberty'),
('8', 'mitaka'),
('9', 'newton'),
('10', 'ocata'),
]),
'keystone': OrderedDict([
('8.0', 'liberty'),
('8.1', 'liberty'),
('9.0', 'mitaka'),
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
]),
'horizon-common': OrderedDict([
('8.0', 'liberty'),
('9.0', 'mitaka'),
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
]),
'ceilometer-common': OrderedDict([
('5.0', 'liberty'),
('6.0', 'mitaka'),
('5', 'liberty'),
('6', 'mitaka'),
('7', 'newton'),
('8', 'ocata'),
]),
'heat-common': OrderedDict([
('5.0', 'liberty'),
('6.0', 'mitaka'),
('5', 'liberty'),
('6', 'mitaka'),
('7', 'newton'),
('8', 'ocata'),
]),
'glance-common': OrderedDict([
('11.0', 'liberty'),
('12.0', 'mitaka'),
('11', 'liberty'),
('12', 'mitaka'),
('13', 'newton'),
('14', 'ocata'),
]),
'openstack-dashboard': OrderedDict([
('8.0', 'liberty'),
('9.0', 'mitaka'),
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
]),
}
GIT_DEFAULT_REPOS = {
'requirements': 'git://github.com/openstack/requirements',
'cinder': 'git://github.com/openstack/cinder',
'glance': 'git://github.com/openstack/glance',
'horizon': 'git://github.com/openstack/horizon',
'keystone': 'git://github.com/openstack/keystone',
'neutron': 'git://github.com/openstack/neutron',
'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
'nova': 'git://github.com/openstack/nova',
}
GIT_DEFAULT_BRANCHES = {
'icehouse': 'icehouse-eol',
'kilo': 'stable/kilo',
'liberty': 'stable/liberty',
'mitaka': 'stable/mitaka',
'master': 'master',
}
DEFAULT_LOOPBACK_SIZE = '5G'
@ -253,6 +301,7 @@ def get_os_version_codename_swift(codename):
def get_swift_codename(version):
'''Determine OpenStack codename that corresponds to swift version.'''
codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
if len(codenames) > 1:
# If more than one release codename contains this version we determine
# the actual codename based on the highest available install source.
@ -264,6 +313,16 @@ def get_swift_codename(version):
return codename
elif len(codenames) == 1:
return codenames[0]
# NOTE: fallback - attempt to match with just major.minor version
match = re.match('^(\d+)\.(\d+)', version)
if match:
major_minor_version = match.group(0)
for codename, versions in six.iteritems(SWIFT_CODENAMES):
for release_version in versions:
if release_version.startswith(major_minor_version):
return codename
return None
@ -302,10 +361,13 @@ def get_os_codename_package(package, fatal=True):
if match:
vers = match.group(0)
# Generate a major version number for newer semantic
# versions of openstack projects
major_vers = vers.split('.')[0]
# >= Liberty independent project versions
if (package in PACKAGE_CODENAMES and
vers in PACKAGE_CODENAMES[package]):
return PACKAGE_CODENAMES[package][vers]
major_vers in PACKAGE_CODENAMES[package]):
return PACKAGE_CODENAMES[package][major_vers]
else:
# < Liberty co-ordinated project versions
try:
@ -465,6 +527,9 @@ def configure_installation_source(rel):
'mitaka': 'trusty-updates/mitaka',
'mitaka/updates': 'trusty-updates/mitaka',
'mitaka/proposed': 'trusty-proposed/mitaka',
'newton': 'xenial-updates/newton',
'newton/updates': 'xenial-updates/newton',
'newton/proposed': 'xenial-proposed/newton',
}
try:
@ -660,6 +725,53 @@ def git_install_requested():
requirements_dir = None
def git_default_repos(projects):
"""
Returns default repos if a default openstack-origin-git value is specified.
"""
service = service_name()
for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
if projects == default:
# add the requirements repo first
repo = {
'name': 'requirements',
'repository': GIT_DEFAULT_REPOS['requirements'],
'branch': branch,
}
repos = [repo]
# neutron and nova charms require some additional repos
if service == 'neutron':
for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']:
repo = {
'name': svc,
'repository': GIT_DEFAULT_REPOS[svc],
'branch': branch,
}
repos.append(repo)
elif service == 'nova':
repo = {
'name': 'neutron',
'repository': GIT_DEFAULT_REPOS['neutron'],
'branch': branch,
}
repos.append(repo)
# finally add the current service's repo
repo = {
'name': service,
'repository': GIT_DEFAULT_REPOS[service],
'branch': branch,
}
repos.append(repo)
return yaml.dump(dict(repositories=repos))
return projects
def _git_yaml_load(projects_yaml):
"""
Load the specified yaml into a dictionary.
@ -857,6 +969,47 @@ def git_yaml_value(projects_yaml, key):
return None
def git_generate_systemd_init_files(templates_dir):
"""
Generate systemd init files.
Generates and installs systemd init units and script files based on the
*.init.in files contained in the templates_dir directory.
This code is based on the openstack-pkg-tools package and its init
script generation, which is used by the OpenStack packages.
"""
for f in os.listdir(templates_dir):
if f.endswith(".init.in"):
init_in_file = f
init_file = f[:-8]
service_file = "{}.service".format(init_file)
init_in_source = os.path.join(templates_dir, init_in_file)
init_source = os.path.join(templates_dir, init_file)
service_source = os.path.join(templates_dir, service_file)
init_dest = os.path.join('/etc/init.d', init_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
shutil.copyfile(init_in_source, init_source)
with open(init_source, 'a') as outfile:
template = '/usr/share/openstack-pkg-tools/init-script-template'
with open(template) as infile:
outfile.write('\n\n{}'.format(infile.read()))
cmd = ['pkgos-gen-systemd-unit', init_in_source]
subprocess.check_call(cmd)
if os.path.exists(init_dest):
os.remove(init_dest)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.move(init_source, init_dest)
shutil.move(service_source, service_dest)
os.chmod(init_dest, 0o755)
def os_workload_status(configs, required_interfaces, charm_func=None):
"""
Decorator to set workload status based on complete contexts
@ -1573,3 +1726,82 @@ def pausable_restart_on_change(restart_map, stopstart=False,
restart_functions)
return wrapped_f
return wrap
def config_flags_parser(config_flags):
"""Parses config flags string into dict.
This parsing method supports a few different formats for the config
flag values to be parsed:
1. A string in the simple format of key=value pairs, with the possibility
of specifying multiple key value pairs within the same string. For
example, a string in the format of 'key1=value1, key2=value2' will
return a dict of:
{'key1': 'value1',
'key2': 'value2'}.
2. A string in the above format, but supporting a comma-delimited list
of values for the same key. For example, a string in the format of
'key1=value1, key2=value3,value4,value5' will return a dict of:
{'key1', 'value1',
'key2', 'value2,value3,value4'}
3. A string containing a colon character (:) prior to an equal
character (=) will be treated as yaml and parsed as such. This can be
used to specify more complex key value pairs. For example,
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
return a dict of:
{'key1', 'subkey1=value1, subkey2=value2'}
The provided config_flags string may be a list of comma-separated values
which themselves may be comma-separated list of values.
"""
# If we find a colon before an equals sign then treat it as yaml.
# Note: limit it to finding the colon first since this indicates assignment
# for inline yaml.
colon = config_flags.find(':')
equals = config_flags.find('=')
if colon > 0:
if colon < equals or equals < 0:
return yaml.safe_load(config_flags)
if config_flags.find('==') >= 0:
juju_log("config_flags is not in expected format (key=value)",
level=ERROR)
raise OSContextError
# strip the following from each value.
post_strippers = ' ,'
# we strip any leading/trailing '=' or ' ' from the string then
# split on '='.
split = config_flags.strip(' =').split('=')
limit = len(split)
flags = {}
for i in range(0, limit - 1):
current = split[i]
next = split[i + 1]
vindex = next.rfind(',')
if (i == limit - 2) or (vindex < 0):
value = next
else:
value = next[:vindex]
if i == 0:
key = current
else:
# if this not the first entry, expect an embedded key.
index = current.rfind(',')
if index < 0:
juju_log("Invalid config value(s) at index %s" % (i),
level=ERROR)
raise OSContextError
key = current[index + 1:]
# Add to collection.
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
return flags

View File

@ -40,6 +40,7 @@ from subprocess import (
CalledProcessError,
)
from charmhelpers.core.hookenv import (
config,
local_unit,
relation_get,
relation_ids,
@ -64,6 +65,7 @@ from charmhelpers.fetch import (
)
from charmhelpers.core.kernel import modprobe
from charmhelpers.contrib.openstack.utils import config_flags_parser
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
KEYFILE = '/etc/ceph/ceph.client.{}.key'
@ -166,12 +168,19 @@ class Pool(object):
"""
# read-only is easy, writeback is much harder
mode = get_cache_mode(self.service, cache_pool)
version = ceph_version()
if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
elif mode == 'writeback':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
'cache-mode', cache_pool, 'forward']
if version >= '10.1':
# Jewel added a mandatory flag
pool_forward_cmd.append('--yes-i-really-mean-it')
check_call(pool_forward_cmd)
# Flush the cache and wait for it to return
check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
@ -221,6 +230,10 @@ class ReplicatedPool(Pool):
self.name, str(self.pg_num)]
try:
check_call(cmd)
# Set the pool replica size
update_pool(client=self.service,
pool=self.name,
settings={'size': str(self.replicas)})
except CalledProcessError:
raise
@ -604,7 +617,7 @@ def pool_exists(service, name):
except CalledProcessError:
return False
return name in out
return name in out.split()
def get_osds(service):
@ -1193,3 +1206,42 @@ def send_request_if_needed(request, relation='ceph'):
for rid in relation_ids(relation):
log('Sending request {}'.format(request.request_id), level=DEBUG)
relation_set(relation_id=rid, broker_req=request.request)
class CephConfContext(object):
"""Ceph config (ceph.conf) context.
Supports user-provided Ceph configuration settings. Use can provide a
dictionary as the value for the config-flags charm option containing
Ceph configuration settings keyede by their section in ceph.conf.
"""
def __init__(self, permitted_sections=None):
self.permitted_sections = permitted_sections or []
def __call__(self):
conf = config('config-flags')
if not conf:
return {}
conf = config_flags_parser(conf)
if type(conf) != dict:
log("Provided config-flags is not a dictionary - ignoring",
level=WARNING)
return {}
permitted = self.permitted_sections
if permitted:
diff = set(conf.keys()).difference(set(permitted))
if diff:
log("Config-flags contains invalid keys '%s' - they will be "
"ignored" % (', '.join(diff)), level=WARNING)
ceph_conf = {}
for key in conf:
if permitted and key not in permitted:
log("Ignoring key '%s'" % key, level=WARNING)
continue
ceph_conf[key] = conf[key]
return ceph_conf

View File

@ -64,8 +64,8 @@ def is_device_mounted(device):
:returns: boolean: True if the path represents a mounted device, False if
it doesn't.
'''
is_partition = bool(re.search(r".*[0-9]+\b", device))
out = check_output(['mount']).decode('UTF-8')
if is_partition:
return bool(re.search(device + r"\b", out))
return bool(re.search(device + r"[0-9]*\b", out))
try:
out = check_output(['lsblk', '-P', device]).decode('UTF-8')
except:
return False
return bool(re.search(r'MOUNTPOINT=".+"', out))

View File

@ -128,11 +128,8 @@ def service(action, service_name):
return subprocess.call(cmd) == 0
def systemv_services_running():
output = subprocess.check_output(
['service', '--status-all'],
stderr=subprocess.STDOUT).decode('UTF-8')
return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row]
_UPSTART_CONF = "/etc/init/{}.conf"
_INIT_D_CONF = "/etc/init.d/{}"
def service_running(service_name):
@ -140,22 +137,22 @@ def service_running(service_name):
if init_is_systemd():
return service('is-active', service_name)
else:
try:
output = subprocess.check_output(
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running 'start/running'
if ("start/running" in output or "is running" in output or
"up and running" in output):
return True
if os.path.exists(_UPSTART_CONF.format(service_name)):
try:
output = subprocess.check_output(
['status', service_name],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running 'start/running'
if "start/running" in output:
return True
elif os.path.exists(_INIT_D_CONF.format(service_name)):
# Check System V scripts init script return codes
if service_name in systemv_services_running():
return True
return False
return service('status', service_name)
return False
def service_available(service_name):
@ -179,7 +176,7 @@ def init_is_systemd():
def adduser(username, password=None, shell='/bin/bash', system_user=False,
primary_group=None, secondary_groups=None):
primary_group=None, secondary_groups=None, uid=None):
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
@ -190,15 +187,21 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False,
:param bool system_user: Whether to create a login or system user
:param str primary_group: Primary group for user; defaults to username
:param list secondary_groups: Optional list of additional groups
:param int uid: UID for user being created
:returns: The password database entry struct, as returned by `pwd.getpwnam`
"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
if uid:
user_info = pwd.getpwuid(int(uid))
log('user with uid {0} already exists!'.format(uid))
except KeyError:
log('creating user {0}'.format(username))
cmd = ['useradd']
if uid:
cmd.extend(['--uid', str(uid)])
if system_user or password is None:
cmd.append('--system')
else:
@ -233,14 +236,58 @@ def user_exists(username):
return user_exists
def add_group(group_name, system_group=False):
"""Add a group to the system"""
def uid_exists(uid):
"""Check if a uid exists"""
try:
pwd.getpwuid(uid)
uid_exists = True
except KeyError:
uid_exists = False
return uid_exists
def group_exists(groupname):
"""Check if a group exists"""
try:
grp.getgrnam(groupname)
group_exists = True
except KeyError:
group_exists = False
return group_exists
def gid_exists(gid):
"""Check if a gid exists"""
try:
grp.getgrgid(gid)
gid_exists = True
except KeyError:
gid_exists = False
return gid_exists
def add_group(group_name, system_group=False, gid=None):
"""Add a group to the system
Will log but otherwise succeed if the group already exists.
:param str group_name: group to create
:param bool system_group: Create system group
:param int gid: GID for user being created
:returns: The password database entry struct, as returned by `grp.getgrnam`
"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
if gid:
group_info = grp.getgrgid(gid)
log('group with gid {0} already exists!'.format(gid))
except KeyError:
log('creating group {0}'.format(group_name))
cmd = ['addgroup']
if gid:
cmd.extend(['--gid', str(gid)])
if system_group:
cmd.append('--system')
else:

View File

@ -106,6 +106,14 @@ CLOUD_ARCHIVE_POCKETS = {
'mitaka/proposed': 'trusty-proposed/mitaka',
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
# Newton
'newton': 'xenial-updates/newton',
'xenial-newton': 'xenial-updates/newton',
'xenial-newton/updates': 'xenial-updates/newton',
'xenial-updates/newton': 'xenial-updates/newton',
'newton/proposed': 'xenial-proposed/newton',
'xenial-newton/proposed': 'xenial-proposed/newton',
'xenial-proposed/newton': 'xenial-proposed/newton',
}
# The order of this list is very important. Handlers should be listed in from
@ -390,16 +398,13 @@ def install_remote(source, *args, **kwargs):
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None
for handler in handlers:
try:
installed_to = handler.install(source, *args, **kwargs)
return handler.install(source, *args, **kwargs)
except UnhandledSource as e:
log('Install source attempt unsuccessful: {}'.format(e),
level='WARNING')
if not installed_to:
raise UnhandledSource("No handler found for source {}".format(source))
return installed_to
raise UnhandledSource("No handler found for source {}".format(source))
def install_from_config(config_var_name):

View File

@ -42,15 +42,23 @@ class BzrUrlFetchHandler(BaseFetchHandler):
else:
return True
def branch(self, source, dest):
def branch(self, source, dest, revno=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
cmd_opts = []
if revno:
cmd_opts += ['-r', str(revno)]
if os.path.exists(dest):
check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
cmd = ['bzr', 'pull']
cmd += cmd_opts
cmd += ['--overwrite', '-d', dest, source]
else:
check_call(['bzr', 'branch', source, dest])
cmd = ['bzr', 'branch']
cmd += cmd_opts
cmd += [source, dest]
check_call(cmd)
def install(self, source, dest=None):
def install(self, source, dest=None, revno=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
@ -59,10 +67,11 @@ class BzrUrlFetchHandler(BaseFetchHandler):
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
if dest and not os.path.exists(dest):
mkdir(dest, perms=0o755)
try:
self.branch(source, dest_dir)
self.branch(source, dest_dir, revno)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -43,9 +43,6 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.openstack = openstack
self.source = source
self.stable = stable
# Note(coreycb): this needs to be changed when new next branches come
# out.
self.current_next = "trusty"
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
@ -72,38 +69,34 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the lp:~openstack-charmers namespace
base_charms = ['mysql', 'mongodb', 'nrpe']
# Force these charms to current series even when using an older series.
# ie. Use trusty/nrpe even when series is precise, as the P charm
# does not possess the necessary external master config and hooks.
force_series_current = ['nrpe']
if self.series in ['precise', 'trusty']:
base_series = self.series
else:
base_series = self.current_next
# Charms outside the ~openstack-charmers
base_charms = {
'mysql': ['precise', 'trusty'],
'mongodb': ['precise', 'trusty'],
'nrpe': ['precise', 'trusty'],
}
for svc in other_services:
if svc['name'] in force_series_current:
base_series = self.current_next
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if self.stable:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
if svc['name'] in base_charms:
# NOTE: not all charms have support for all series we
# want/need to test against, so fix to most recent
# that each base charm supports
target_series = self.series
if self.series not in base_charms[svc['name']]:
target_series = base_charms[svc['name']][-1]
svc['location'] = 'cs:{}/{}'.format(target_series,
svc['name'])
elif self.stable:
svc['location'] = 'cs:{}/{}'.format(self.series,
svc['name'])
else:
if svc['name'] in base_charms:
temp = 'lp:charms/{}/{}'
svc['location'] = temp.format(base_series,
svc['name'])
else:
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
svc['location'] = temp.format(self.current_next,
svc['name'])
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
self.series,
svc['name']
)
return other_services