upgrade: Fix pike to queens upgrade

apt.version_compare changed behaviour to return >= 1 in the event
that a newer package version is detected.

Resync charmhelpers code to pickup fixes for openstack upgrade
detection.

Change-Id: Iacf8db9bbacca782646584a4982b265937b63b9e
Closes-Bug: 1817384
This commit is contained in:
James Page 2019-03-05 11:48:16 +00:00
parent b7c4897438
commit 976391951e
10 changed files with 889 additions and 99 deletions

View File

@ -19,9 +19,16 @@ from charmhelpers.core import unitdata
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
def unitdata_cmd(subparser):
nested = subparser.add_subparsers()
get_cmd = nested.add_parser('get', help='Retrieve data')
get_cmd.add_argument('key', help='Key to retrieve the value of')
get_cmd.set_defaults(action='get', value=None)
getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data')
getrange_cmd.add_argument('key', metavar='prefix',
help='Prefix of the keys to retrieve')
getrange_cmd.set_defaults(action='getrange', value=None)
set_cmd = nested.add_parser('set', help='Store data')
set_cmd.add_argument('key', help='Key to set')
set_cmd.add_argument('value', help='Value to store')
@ -30,6 +37,8 @@ def unitdata_cmd(subparser):
def _unitdata_cmd(action, key, value):
if action == 'get':
return unitdata.kv().get(key)
elif action == 'getrange':
return unitdata.kv().getrange(key)
elif action == 'set':
unitdata.kv().set(key, value)
unitdata.kv().flush()

View File

@ -0,0 +1,212 @@
# Copyright 2019 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenStack Security Audit code"""
import collections
from enum import Enum
import traceback
from charmhelpers.core.host import cmp_pkgrevno
import charmhelpers.contrib.openstack.utils as openstack_utils
import charmhelpers.core.hookenv as hookenv
class AuditType(Enum):
OpenStackSecurityGuide = 1
_audits = {}
Audit = collections.namedtuple('Audit', 'func filters')
def audit(*args):
"""Decorator to register an audit.
These are used to generate audits that can be run on a
deployed system that matches the given configuration
:param args: List of functions to filter tests against
:type args: List[Callable[Dict]]
"""
def wrapper(f):
test_name = f.__name__
if _audits.get(test_name):
raise RuntimeError(
"Test name '{}' used more than once"
.format(test_name))
non_callables = [fn for fn in args if not callable(fn)]
if non_callables:
raise RuntimeError(
"Configuration includes non-callable filters: {}"
.format(non_callables))
_audits[test_name] = Audit(func=f, filters=args)
return f
return wrapper
def is_audit_type(*args):
"""This audit is included in the specified kinds of audits.
:param *args: List of AuditTypes to include this audit in
:type args: List[AuditType]
:rtype: Callable[Dict]
"""
def _is_audit_type(audit_options):
if audit_options.get('audit_type') in args:
return True
else:
return False
return _is_audit_type
def since_package(pkg, pkg_version):
"""This audit should be run after the specified package version (incl).
:param pkg: Package name to compare
:type pkg: str
:param release: The package version
:type release: str
:rtype: Callable[Dict]
"""
def _since_package(audit_options=None):
return cmp_pkgrevno(pkg, pkg_version) >= 0
return _since_package
def before_package(pkg, pkg_version):
"""This audit should be run before the specified package version (excl).
:param pkg: Package name to compare
:type pkg: str
:param release: The package version
:type release: str
:rtype: Callable[Dict]
"""
def _before_package(audit_options=None):
return not since_package(pkg, pkg_version)()
return _before_package
def since_openstack_release(pkg, release):
"""This audit should run after the specified OpenStack version (incl).
:param pkg: Package name to compare
:type pkg: str
:param release: The OpenStack release codename
:type release: str
:rtype: Callable[Dict]
"""
def _since_openstack_release(audit_options=None):
_release = openstack_utils.get_os_codename_package(pkg)
return openstack_utils.CompareOpenStackReleases(_release) >= release
return _since_openstack_release
def before_openstack_release(pkg, release):
"""This audit should run before the specified OpenStack version (excl).
:param pkg: Package name to compare
:type pkg: str
:param release: The OpenStack release codename
:type release: str
:rtype: Callable[Dict]
"""
def _before_openstack_release(audit_options=None):
return not since_openstack_release(pkg, release)()
return _before_openstack_release
def it_has_config(config_key):
"""This audit should be run based on specified config keys.
:param config_key: Config key to look for
:type config_key: str
:rtype: Callable[Dict]
"""
def _it_has_config(audit_options):
return audit_options.get(config_key) is not None
return _it_has_config
def run(audit_options):
"""Run the configured audits with the specified audit_options.
:param audit_options: Configuration for the audit
:type audit_options: Config
:rtype: Dict[str, str]
"""
errors = {}
results = {}
for name, audit in sorted(_audits.items()):
result_name = name.replace('_', '-')
if result_name in audit_options.get('excludes', []):
print(
"Skipping {} because it is"
"excluded in audit config"
.format(result_name))
continue
if all(p(audit_options) for p in audit.filters):
try:
audit.func(audit_options)
print("{}: PASS".format(name))
results[result_name] = {
'success': True,
}
except AssertionError as e:
print("{}: FAIL ({})".format(name, e))
results[result_name] = {
'success': False,
'message': e,
}
except Exception as e:
print("{}: ERROR ({})".format(name, e))
errors[name] = e
results[result_name] = {
'success': False,
'message': e,
}
for name, error in errors.items():
print("=" * 20)
print("Error in {}: ".format(name))
traceback.print_tb(error.__traceback__)
print()
return results
def action_parse_results(result):
"""Parse the result of `run` in the context of an action.
:param result: The result of running the security-checklist
action on a unit
:type result: Dict[str, Dict[str, str]]
:rtype: int
"""
passed = True
for test, result in result.items():
if result['success']:
hookenv.action_set({test: 'PASS'})
else:
hookenv.action_set({test: 'FAIL - {}'.format(result['message'])})
passed = False
if not passed:
hookenv.action_fail("One or more tests failed")
return 0 if passed else 1

View File

@ -0,0 +1,303 @@
# Copyright 2019 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import configparser
import glob
import os.path
import subprocess
from charmhelpers.contrib.openstack.audits import (
audit,
AuditType,
# filters
is_audit_type,
it_has_config,
)
from charmhelpers.core.hookenv import (
cached,
)
FILE_ASSERTIONS = {
'barbican': {
# From security guide
'/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'},
'/etc/barbican/barbican-api-paste.ini':
{'group': 'barbican', 'mode': '640'},
'/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'},
},
'ceph-mon': {
'/var/lib/charm/ceph-mon/ceph.conf':
{'owner': 'root', 'group': 'root', 'mode': '644'},
'/etc/ceph/ceph.client.admin.keyring':
{'owner': 'ceph', 'group': 'ceph'},
'/etc/ceph/rbdmap': {'mode': '644'},
'/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
'/var/lib/ceph/bootstrap-*/ceph.keyring':
{'owner': 'ceph', 'group': 'ceph', 'mode': '600'}
},
'ceph-osd': {
'/var/lib/charm/ceph-osd/ceph.conf':
{'owner': 'ceph', 'group': 'ceph', 'mode': '644'},
'/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
'/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
'/var/lib/ceph/bootstrap-*/ceph.keyring':
{'owner': 'ceph', 'group': 'ceph', 'mode': '600'},
'/var/lib/ceph/radosgw':
{'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
},
'cinder': {
# From security guide
'/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'},
'/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'},
'/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'},
},
'glance': {
# From security guide
'/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-registry-paste.ini':
{'group': 'glance', 'mode': '640'},
'/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'},
'/etc/glance/glance-swift-store.conf':
{'group': 'glance', 'mode': '640'},
'/etc/glance/policy.json': {'group': 'glance', 'mode': '640'},
'/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'},
'/etc/glance/schema.json': {'group': 'glance', 'mode': '640'},
},
'keystone': {
# From security guide
'/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'},
'/etc/keystone/keystone-paste.ini':
{'group': 'keystone', 'mode': '640'},
'/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'},
'/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'},
'/etc/keystone/ssl/certs/signing_cert.pem':
{'group': 'keystone', 'mode': '640'},
'/etc/keystone/ssl/private/signing_key.pem':
{'group': 'keystone', 'mode': '640'},
'/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'},
},
'manilla': {
# From security guide
'/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'},
'/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'},
'/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'},
'/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'},
},
'neutron-gateway': {
'/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
'/etc/neutron/rootwrap.conf': {'mode': '640'},
'/etc/neutron/rootwrap.d': {'mode': '755'},
'/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
},
'neutron-api': {
# From security guide
'/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
'/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'},
'/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'},
# Additional validations
'/etc/neutron/rootwrap.d': {'mode': '755'},
'/etc/neutron/neutron_lbaas.conf': {'mode': '644'},
'/etc/neutron/neutron_vpnaas.conf': {'mode': '644'},
'/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
},
'nova-cloud-controller': {
# From security guide
'/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
'/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'},
'/etc/nova/*': {'group': 'nova', 'mode': '640'},
# Additional validations
'/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
},
'nova-compute': {
# From security guide
'/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'},
'/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
'/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'},
# Additional Validations
'/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'},
'/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
'/etc/nova/nm.conf': {'mode': '644'},
'/etc/nova/*': {'group': 'nova', 'mode': '640'},
},
'openstack-dashboard': {
# From security guide
'/etc/openstack-dashboard/local_settings.py':
{'group': 'horizon', 'mode': '640'},
},
}
Ownership = collections.namedtuple('Ownership', 'owner group mode')
@cached
def _stat(file):
"""
Get the Ownership information from a file.
:param file: The path to a file to stat
:type file: str
:returns: owner, group, and mode of the specified file
:rtype: Ownership
:raises subprocess.CalledProcessError: If the underlying stat fails
"""
out = subprocess.check_output(
['stat', '-c', '%U %G %a', file]).decode('utf-8')
return Ownership(*out.strip().split(' '))
@cached
def _config_ini(path):
"""
Parse an ini file
:param path: The path to a file to parse
:type file: str
:returns: Configuration contained in path
:rtype: Dict
"""
conf = configparser.ConfigParser()
conf.read(path)
return dict(conf)
def _validate_file_ownership(owner, group, file_name):
"""
Validate that a specified file is owned by `owner:group`.
:param owner: Name of the owner
:type owner: str
:param group: Name of the group
:type group: str
:param file_name: Path to the file to verify
:type file_name: str
"""
try:
ownership = _stat(file_name)
except subprocess.CalledProcessError as e:
print("Error reading file: {}".format(e))
assert False, "Specified file does not exist: {}".format(file_name)
assert owner == ownership.owner, \
"{} has an incorrect owner: {} should be {}".format(
file_name, ownership.owner, owner)
assert group == ownership.group, \
"{} has an incorrect group: {} should be {}".format(
file_name, ownership.group, group)
print("Validate ownership of {}: PASS".format(file_name))
def _validate_file_mode(mode, file_name):
"""
Validate that a specified file has the specified permissions.
:param mode: file mode that is desires
:type owner: str
:param file_name: Path to the file to verify
:type file_name: str
"""
try:
ownership = _stat(file_name)
except subprocess.CalledProcessError as e:
print("Error reading file: {}".format(e))
assert False, "Specified file does not exist: {}".format(file_name)
assert mode == ownership.mode, \
"{} has an incorrect mode: {} should be {}".format(
file_name, ownership.mode, mode)
print("Validate mode of {}: PASS".format(file_name))
@cached
def _config_section(config, section):
"""Read the configuration file and return a section."""
path = os.path.join(config.get('config_path'), config.get('config_file'))
conf = _config_ini(path)
return conf.get(section)
@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
it_has_config('files'))
def validate_file_ownership(config):
"""Verify that configuration files are owned by the correct user/group."""
files = config.get('files', {})
for file_name, options in files.items():
for key in options.keys():
if key not in ["owner", "group", "mode"]:
raise RuntimeError(
"Invalid ownership configuration: {}".format(key))
owner = options.get('owner', config.get('owner', 'root'))
group = options.get('group', config.get('group', 'root'))
if '*' in file_name:
for file in glob.glob(file_name):
if file not in files.keys():
if os.path.isfile(file):
_validate_file_ownership(owner, group, file)
else:
if os.path.isfile(file_name):
_validate_file_ownership(owner, group, file_name)
@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
it_has_config('files'))
def validate_file_permissions(config):
"""Verify that permissions on configuration files are secure enough."""
files = config.get('files', {})
for file_name, options in files.items():
for key in options.keys():
if key not in ["owner", "group", "mode"]:
raise RuntimeError(
"Invalid ownership configuration: {}".format(key))
mode = options.get('mode', config.get('permissions', '600'))
if '*' in file_name:
for file in glob.glob(file_name):
if file not in files.keys():
if os.path.isfile(file):
_validate_file_mode(mode, file)
else:
if os.path.isfile(file_name):
_validate_file_mode(mode, file_name)
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
def validate_uses_keystone(audit_options):
"""Validate that the service uses Keystone for authentication."""
section = _config_section(audit_options, 'DEFAULT')
assert section is not None, "Missing section 'DEFAULT'"
assert section.get('auth_strategy') == "keystone", \
"Application is not using Keystone"
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
def validate_uses_tls_for_keystone(audit_options):
"""Verify that TLS is used to communicate with Keystone."""
section = _config_section(audit_options, 'keystone_authtoken')
assert section is not None, "Missing section 'keystone_authtoken'"
assert not section.get('insecure') and \
"https://" in section.get("auth_uri"), \
"TLS is not used for Keystone"
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
def validate_uses_tls_for_glance(audit_options):
"""Verify that TLS is used to communicate with Glance."""
section = _config_section(audit_options, 'glance')
assert section is not None, "Missing section 'glance'"
assert not section.get('insecure') and \
"https://" in section.get("api_servers"), \
"TLS is not used for Glance"

View File

@ -29,6 +29,7 @@ from charmhelpers.fetch import (
filter_installed_packages,
)
from charmhelpers.core.hookenv import (
NoNetworkBinding,
config,
is_relation_made,
local_unit,
@ -868,7 +869,7 @@ class ApacheSSLContext(OSContextGenerator):
addr = network_get_primary_address(
ADDRESS_MAP[net_type]['binding']
)
except NotImplementedError:
except (NotImplementedError, NoNetworkBinding):
addr = fallback
endpoint = resolve_address(net_type)

View File

@ -13,6 +13,7 @@
# limitations under the License.
from charmhelpers.core.hookenv import (
NoNetworkBinding,
config,
unit_get,
service_name,
@ -175,7 +176,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except NotImplementedError:
except (NotImplementedError, NoNetworkBinding):
resolved_address = fallback_addr
if resolved_address is None:

View File

@ -0,0 +1,10 @@
[oslo_messaging_rabbit]
{% if rabbitmq_ha_queues -%}
rabbit_ha_queues = True
{% endif -%}
{% if rabbit_ssl_port -%}
ssl = True
{% endif -%}
{% if rabbit_ssl_ca -%}
ssl_ca_file = {{ rabbit_ssl_ca }}
{% endif -%}

View File

@ -194,7 +194,7 @@ SWIFT_CODENAMES = OrderedDict([
('rocky',
['2.18.0', '2.19.0']),
('stein',
['2.19.0']),
['2.20.0']),
])
# >= Liberty version->codename mapping
@ -656,7 +656,7 @@ def openstack_upgrade_available(package):
else:
avail_vers = get_os_version_install_source(src)
apt.init()
return apt.version_compare(avail_vers, cur_vers) == 1
return apt.version_compare(avail_vers, cur_vers) >= 1
def ensure_block_device(block_device):

View File

@ -59,6 +59,7 @@ from charmhelpers.core.host import (
service_stop,
service_running,
umount,
cmp_pkgrevno,
)
from charmhelpers.fetch import (
apt_install,
@ -178,7 +179,6 @@ class Pool(object):
"""
# read-only is easy, writeback is much harder
mode = get_cache_mode(self.service, cache_pool)
version = ceph_version()
if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@ -186,7 +186,7 @@ class Pool(object):
elif mode == 'writeback':
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
'cache-mode', cache_pool, 'forward']
if version >= '10.1':
if cmp_pkgrevno('ceph-common', '10.1') >= 0:
# Jewel added a mandatory flag
pool_forward_cmd.append('--yes-i-really-mean-it')
@ -196,7 +196,8 @@ class Pool(object):
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
device_class=None):
"""Return the number of placement groups to use when creating the pool.
Returns the number of placement groups which should be specified when
@ -229,6 +230,9 @@ class Pool(object):
increased. NOTE: the default is primarily to handle the scenario
where related charms requiring pools has not been upgraded to
include an update to indicate their relative usage of the pools.
:param device_class: str. class of storage to use for basis of pgs
calculation; ceph supports nvme, ssd and hdd by default based
on presence of devices of each type in the deployment.
:return: int. The number of pgs to use.
"""
@ -243,17 +247,20 @@ class Pool(object):
# If the expected-osd-count is specified, then use the max between
# the expected-osd-count and the actual osd_count
osd_list = get_osds(self.service)
osd_list = get_osds(self.service, device_class)
expected = config('expected-osd-count') or 0
if osd_list:
osd_count = max(expected, len(osd_list))
if device_class:
osd_count = len(osd_list)
else:
osd_count = max(expected, len(osd_list))
# Log a message to provide some insight if the calculations claim
# to be off because someone is setting the expected count and
# there are more OSDs in reality. Try to make a proper guess
# based upon the cluster itself.
if expected and osd_count != expected:
if not device_class and expected and osd_count != expected:
log("Found more OSDs than provided expected count. "
"Using the actual count instead", INFO)
elif expected:
@ -575,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name):
raise
# max_bytes should be an int or long
def set_pool_quota(service, pool_name, max_bytes):
def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
"""
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types
:param max_bytes: int or long
:return: None. Can raise CalledProcessError
:param service: The Ceph user name to run the command under
:type service: str
:param pool_name: Name of pool
:type pool_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
:raises: subprocess.CalledProcessError
"""
# Set a byte quota on a RADOS pool in ceph.
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
'max_bytes', str(max_bytes)]
try:
check_call(cmd)
except CalledProcessError:
raise
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
if max_bytes:
cmd = cmd + ['max_bytes', str(max_bytes)]
if max_objects:
cmd = cmd + ['max_objects', str(max_objects)]
check_call(cmd)
def remove_pool_quota(service, pool_name):
@ -626,7 +636,8 @@ def remove_erasure_profile(service, profile_name):
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
failure_domain='host',
data_chunks=2, coding_chunks=1,
locality=None, durability_estimator=None):
locality=None, durability_estimator=None,
device_class=None):
"""
Create a new erasure code profile if one does not already exist for it. Updates
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
@ -640,10 +651,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param coding_chunks: int
:param locality: int
:param durability_estimator: int
:param device_class: six.string_types
:return: None. Can raise CalledProcessError
"""
version = ceph_version()
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
@ -654,12 +664,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
# failure_domain changed in luminous
if version and version >= '12.0.0':
if luminous_or_later:
cmd.append('crush-failure-domain=' + failure_domain)
else:
cmd.append('ruleset-failure-domain=' + failure_domain)
# device class new in luminous
if luminous_or_later and device_class:
cmd.append('crush-device-class={}'.format(device_class))
else:
log('Skipping device class configuration (ceph < 12.0.0)',
level=DEBUG)
# Add plugin specific information
if locality is not None:
# For local erasure codes
@ -744,20 +762,26 @@ def pool_exists(service, name):
return name in out.split()
def get_osds(service):
def get_osds(service, device_class=None):
"""Return a list of all Ceph Object Storage Daemons currently in the
cluster.
cluster (optionally filtered by storage device class).
:param device_class: Class of storage device for OSD's
:type device_class: str
"""
version = ceph_version()
if version and version >= '0.56':
luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
if luminous_or_later and device_class:
out = check_output(['ceph', '--id', service,
'osd', 'crush', 'class',
'ls-osd', device_class,
'--format=json'])
else:
out = check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json'])
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
return None
if six.PY3:
out = out.decode('UTF-8')
return json.loads(out)
def install():
@ -811,7 +835,7 @@ def set_app_name_for_pool(client, pool, name):
:raises: CalledProcessError if ceph call fails
"""
if ceph_version() >= '12.0.0':
if cmp_pkgrevno('ceph-common', '12.0.0') >= 0:
cmd = ['ceph', '--id', client, 'osd', 'pool',
'application', 'enable', pool, name]
check_call(cmd)
@ -1091,22 +1115,6 @@ def ensure_ceph_keyring(service, user=None, group=None,
return True
def ceph_version():
"""Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
if six.PY3:
output = output.decode('UTF-8')
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None
class CephBrokerRq(object):
"""Ceph broker request.
@ -1147,14 +1155,47 @@ class CephBrokerRq(object):
'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None):
"""Adds an operation to create a pool.
weight=None, group=None, namespace=None,
app_name=None, max_bytes=None, max_objects=None):
"""DEPRECATED: Use ``add_op_create_replicated_pool()`` or
``add_op_create_erasure_pool()`` instead.
"""
return self.add_op_create_replicated_pool(
name, replica_count=replica_count, pg_num=pg_num, weight=weight,
group=group, namespace=namespace, app_name=app_name,
max_bytes=max_bytes, max_objects=max_objects)
@param pg_num setting: optional setting. If not provided, this value
will be calculated by the broker based on how many OSDs are in the
cluster at the time of creation. Note that, if provided, this value
will be capped at the current available maximum.
@param weight: the percentage of data the pool makes up
def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None,
app_name=None, max_bytes=None,
max_objects=None):
"""Adds an operation to create a replicated pool.
:param name: Name of pool to create
:type name: str
:param replica_count: Number of copies Ceph should keep of your data.
:type replica_count: int
:param pg_num: Request specific number of Placement Groups to create
for pool.
:type pg_num: int
:param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs.
Used to calculate number of Placement Groups to create
for pool.
:type weight: float
:param group: Group to add pool to
:type group: str
:param namespace: Group namespace
:type namespace: str
:param app_name: (Optional) Tag pool with application name. Note that
there is certain protocols emerging upstream with
regard to meaningful application names to use.
Examples are ``rbd`` and ``rgw``.
:type app_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
"""
if pg_num and weight:
raise ValueError('pg_num and weight are mutually exclusive')
@ -1162,7 +1203,41 @@ class CephBrokerRq(object):
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count, 'pg_num': pg_num,
'weight': weight, 'group': group,
'group-namespace': namespace})
'group-namespace': namespace, 'app-name': app_name,
'max-bytes': max_bytes, 'max-objects': max_objects})
def add_op_create_erasure_pool(self, name, erasure_profile=None,
weight=None, group=None, app_name=None,
max_bytes=None, max_objects=None):
"""Adds an operation to create a erasure coded pool.
:param name: Name of pool to create
:type name: str
:param erasure_profile: Name of erasure code profile to use. If not
set the ceph-mon unit handling the broker
request will set its default value.
:type erasure_profile: str
:param weight: The percentage of data that is expected to be contained
in the pool from the total available space on the OSDs.
:type weight: float
:param group: Group to add pool to
:type group: str
:param app_name: (Optional) Tag pool with application name. Note that
there is certain protocols emerging upstream with
regard to meaningful application names to use.
Examples are ``rbd`` and ``rgw``.
:type app_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
"""
self.ops.append({'op': 'create-pool', 'name': name,
'pool-type': 'erasure',
'erasure-profile': erasure_profile,
'weight': weight,
'group': group, 'app-name': app_name,
'max-bytes': max_bytes, 'max-objects': max_objects})
def set_ops(self, ops):
"""Set request ops to provided value.

View File

@ -50,6 +50,11 @@ TRACE = "TRACE"
MARKER = object()
SH_MAX_ARG = 131071
RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
'This may not be compatible with software you are '
'running in your shell.')
cache = {}
@ -1414,3 +1419,72 @@ def unit_doomed(unit=None):
# I don't think 'dead' units ever show up in the goal-state, but
# check anyway in addition to 'dying'.
return units[unit]['status'] in ('dying', 'dead')
def env_proxy_settings(selected_settings=None):
"""Get proxy settings from process environment variables.
Get charm proxy settings from environment variables that correspond to
juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
see lp:1782236) in a format suitable for passing to an application that
reacts to proxy settings passed as environment variables. Some applications
support lowercase or uppercase notation (e.g. curl), some support only
lowercase (e.g. wget), there are also subjectively rare cases of only
uppercase notation support. no_proxy CIDR and wildcard support also varies
between runtimes and applications as there is no enforced standard.
Some applications may connect to multiple destinations and expose config
options that would affect only proxy settings for a specific destination
these should be handled in charms in an application-specific manner.
:param selected_settings: format only a subset of possible settings
:type selected_settings: list
:rtype: Option(None, dict[str, str])
"""
SUPPORTED_SETTINGS = {
'http': 'HTTP_PROXY',
'https': 'HTTPS_PROXY',
'no_proxy': 'NO_PROXY',
'ftp': 'FTP_PROXY'
}
if selected_settings is None:
selected_settings = SUPPORTED_SETTINGS
selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()
if k in selected_settings]
proxy_settings = {}
for var in selected_vars:
var_val = os.getenv(var)
if var_val:
proxy_settings[var] = var_val
proxy_settings[var.lower()] = var_val
# Now handle juju-prefixed environment variables. The legacy vs new
# environment variable usage is mutually exclusive
charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))
if charm_var_val:
proxy_settings[var] = charm_var_val
proxy_settings[var.lower()] = charm_var_val
if 'no_proxy' in proxy_settings:
if _contains_range(proxy_settings['no_proxy']):
log(RANGE_WARNING, level=WARNING)
return proxy_settings if proxy_settings else None
def _contains_range(addresses):
"""Check for cidr or wildcard domain in a string.
Given a string comprising a comma seperated list of ip addresses
and domain names, determine whether the string contains IP ranges
or wildcard domains.
:param addresses: comma seperated list of domains and ip addresses.
:type addresses: str
"""
return (
# Test for cidr (e.g. 10.20.20.0/24)
"/" in addresses or
# Test for wildcard domains (*.foo.com or .foo.com)
"*" in addresses or
addresses.startswith(".") or
",." in addresses or
" ." in addresses)

View File

@ -19,15 +19,14 @@ import re
import six
import time
import subprocess
from tempfile import NamedTemporaryFile
from charmhelpers.core.host import (
lsb_release
)
from charmhelpers.core.host import get_distrib_codename
from charmhelpers.core.hookenv import (
log,
DEBUG,
WARNING,
env_proxy_settings,
)
from charmhelpers.fetch import SourceConfigError, GPGKeyError
@ -303,12 +302,17 @@ def import_key(key):
"""Import an ASCII Armor key.
A Radix64 format keyid is also supported for backwards
compatibility, but should never be used; the key retrieval
mechanism is insecure and subject to man-in-the-middle attacks
voiding all signature checks using that key.
compatibility. In this case Ubuntu keyserver will be
queried for a key via HTTPS by its keyid. This method
is less preferrable because https proxy servers may
require traffic decryption which is equivalent to a
man-in-the-middle attack (a proxy server impersonates
keyserver TLS certificates and has to be explicitly
trusted by the system).
:param keyid: The key in ASCII armor format,
including BEGIN and END markers.
:param key: A GPG key in ASCII armor format,
including BEGIN and END markers or a keyid.
:type key: (bytes, str)
:raises: GPGKeyError if the key could not be imported
"""
key = key.strip()
@ -319,35 +323,131 @@ def import_key(key):
log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
'-----END PGP PUBLIC KEY BLOCK-----' in key):
log("Importing ASCII Armor PGP key", level=DEBUG)
with NamedTemporaryFile() as keyfile:
with open(keyfile.name, 'w') as fd:
fd.write(key)
fd.write("\n")
cmd = ['apt-key', 'add', keyfile.name]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
error = "Error importing PGP key '{}'".format(key)
log(error)
raise GPGKeyError(error)
log("Writing provided PGP key in the binary format", level=DEBUG)
if six.PY3:
key_bytes = key.encode('utf-8')
else:
key_bytes = key
key_name = _get_keyid_by_gpg_key(key_bytes)
key_gpg = _dearmor_gpg_key(key_bytes)
_write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
else:
raise GPGKeyError("ASCII armor markers missing from GPG key")
else:
# We should only send things obviously not a keyid offsite
# via this unsecured protocol, as it may be a secret or part
# of one.
log("PGP key found (looks like Radix64 format)", level=WARNING)
log("INSECURLY importing PGP key from keyserver; "
log("SECURELY importing PGP key from keyserver; "
"full key not provided.", level=WARNING)
cmd = ['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
try:
_run_with_retries(cmd)
except subprocess.CalledProcessError:
error = "Error importing PGP key '{}'".format(key)
log(error)
raise GPGKeyError(error)
# as of bionic add-apt-repository uses curl with an HTTPS keyserver URL
# to retrieve GPG keys. `apt-key adv` command is deprecated as is
# apt-key in general as noted in its manpage. See lp:1433761 for more
# history. Instead, /etc/apt/trusted.gpg.d is used directly to drop
# gpg
key_asc = _get_key_by_keyid(key)
# write the key in GPG format so that apt-key list shows it
key_gpg = _dearmor_gpg_key(key_asc)
_write_apt_gpg_keyfile(key_name=key, key_material=key_gpg)
def _get_keyid_by_gpg_key(key_material):
"""Get a GPG key fingerprint by GPG key material.
Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
or binary GPG key material. Can be used, for example, to generate file
names for keys passed via charm options.
:param key_material: ASCII armor-encoded or binary GPG key material
:type key_material: bytes
:raises: GPGKeyError if invalid key material has been provided
:returns: A GPG key fingerprint
:rtype: str
"""
# Use the same gpg command for both Xenial and Bionic
cmd = 'gpg --with-colons --with-fingerprint'
ps = subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = ps.communicate(input=key_material)
if six.PY3:
out = out.decode('utf-8')
err = err.decode('utf-8')
if 'gpg: no valid OpenPGP data found.' in err:
raise GPGKeyError('Invalid GPG key material provided')
# from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
def _get_key_by_keyid(keyid):
"""Get a key via HTTPS from the Ubuntu keyserver.
Different key ID formats are supported by SKS keyservers (the longer ones
are more secure, see "dead beef attack" and https://evil32.com/). Since
HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will
impersonate keyserver.ubuntu.com and generate a certificate with
keyserver.ubuntu.com in the CN field or in SubjAltName fields of a
certificate. If such proxy behavior is expected it is necessary to add the
CA certificate chain containing the intermediate CA of the SSLBump proxy to
every machine that this code runs on via ca-certs cloud-init directive (via
cloudinit-userdata model-config) or via other means (such as through a
custom charm option). Also note that DNS resolution for the hostname in a
URL is done at a proxy server - not at the client side.
8-digit (32 bit) key ID
https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6
16-digit (64 bit) key ID
https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6
40-digit key ID:
https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6
:param keyid: An 8, 16 or 40 hex digit keyid to find a key for
:type keyid: (bytes, str)
:returns: A key material for the specified GPG key id
:rtype: (str, bytes)
:raises: subprocess.CalledProcessError
"""
# options=mr - machine-readable output (disables html wrappers)
keyserver_url = ('https://keyserver.ubuntu.com'
'/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
curl_cmd = ['curl', keyserver_url.format(keyid)]
# use proxy server settings in order to retrieve the key
return subprocess.check_output(curl_cmd,
env=env_proxy_settings(['https']))
def _dearmor_gpg_key(key_asc):
"""Converts a GPG key in the ASCII armor format to the binary format.
:param key_asc: A GPG key in ASCII armor format.
:type key_asc: (str, bytes)
:returns: A GPG key in binary format
:rtype: (str, bytes)
:raises: GPGKeyError
"""
ps = subprocess.Popen(['gpg', '--dearmor'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = ps.communicate(input=key_asc)
# no need to decode output as it is binary (invalid utf-8), only error
if six.PY3:
err = err.decode('utf-8')
if 'gpg: no valid OpenPGP data found.' in err:
raise GPGKeyError('Invalid GPG key material. Check your network setup'
' (MTU, routing, DNS) and/or proxy server settings'
' as well as destination keyserver status.')
else:
return out
def _write_apt_gpg_keyfile(key_name, key_material):
"""Writes GPG key material into a file at a provided path.
:param key_name: A key name to use for a key file (could be a fingerprint)
:type key_name: str
:param key_material: A GPG key material (binary)
:type key_material: (str, bytes)
"""
with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
'wb') as keyf:
keyf.write(key_material)
def add_source(source, key=None, fail_invalid=False):
@ -442,13 +542,13 @@ def add_source(source, key=None, fail_invalid=False):
def _add_proposed():
"""Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
Uses get_distrib_codename to determine the correct stanza for
the deb line.
For intel architecutres PROPOSED_POCKET is used for the release, but for
other architectures PROPOSED_PORTS_POCKET is used for the release.
"""
release = lsb_release()['DISTRIB_CODENAME']
release = get_distrib_codename()
arch = platform.machine()
if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
raise SourceConfigError("Arch {} not supported for (distro-)proposed"
@ -461,11 +561,16 @@ def _add_apt_repository(spec):
"""Add the spec using add_apt_repository
:param spec: the parameter to pass to add_apt_repository
:type spec: str
"""
if '{series}' in spec:
series = lsb_release()['DISTRIB_CODENAME']
series = get_distrib_codename()
spec = spec.replace('{series}', series)
_run_with_retries(['add-apt-repository', '--yes', spec])
# software-properties package for bionic properly reacts to proxy settings
# passed as environment variables (See lp:1433761). This is not the case
# LTS and non-LTS releases below bionic.
_run_with_retries(['add-apt-repository', '--yes', spec],
cmd_env=env_proxy_settings(['https']))
def _add_cloud_pocket(pocket):
@ -534,7 +639,7 @@ def _verify_is_ubuntu_rel(release, os_release):
:raises: SourceConfigError if the release is not the same as the ubuntu
release.
"""
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
ubuntu_rel = get_distrib_codename()
if release != ubuntu_rel:
raise SourceConfigError(
'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'