Pre-release charm-helpers sync 16.10

Get each charm up to date with lp:charm-helpers for release testing.

Change-Id: I93569f02dad733e75869df21dc125429b17ec2e5
This commit is contained in:
David Ames 2016-09-30 08:49:39 -07:00
parent b311aedeb8
commit 6ba551b13e
5 changed files with 111 additions and 13 deletions

View File

@ -1421,9 +1421,9 @@ class InternalEndpointContext(OSContextGenerator):
class AppArmorContext(OSContextGenerator):
"""Base class for apparmor contexts."""
def __init__(self):
def __init__(self, profile_name=None):
self._ctxt = None
self.aa_profile = None
self.aa_profile = profile_name
self.aa_utils_packages = ['apparmor-utils']
@property
@ -1442,6 +1442,8 @@ class AppArmorContext(OSContextGenerator):
if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
ctxt = {'aa_profile_mode': config('aa-profile-mode'),
'ubuntu_release': lsb_release()['DISTRIB_RELEASE']}
if self.aa_profile:
ctxt['aa_profile'] = self.aa_profile
else:
ctxt = None
return ctxt

View File

@ -151,7 +151,7 @@ SWIFT_CODENAMES = OrderedDict([
('mitaka',
['2.5.0', '2.6.0', '2.7.0']),
('newton',
['2.8.0', '2.9.0']),
['2.8.0', '2.9.0', '2.10.0']),
])
# >= Liberty version->codename mapping

View File

@ -936,6 +936,18 @@ def get_mds_key(name):
caps=mds_caps)
_mds_bootstrap_caps_profile = {
'mon': [
'allow profile bootstrap-mds'
]
}
def get_mds_bootstrap_key():
return get_named_key('bootstrap-mds',
_mds_bootstrap_caps_profile)
_default_caps = collections.OrderedDict([
('mon', ['allow rw']),
('osd', ['allow rwx']),

View File

@ -392,6 +392,44 @@ def handle_rgw_create_user(request, service):
return {'exit-code': 1, 'stderr': err.output}
def handle_create_cephfs(request, service):
"""
Create a new cephfs.
:param request: The broker request
:param service: The cephx user to run this command under
:return:
"""
cephfs_name = request.get('mds_name')
data_pool = request.get('data_pool')
metadata_pool = request.get('metadata_pool')
# Check if the user params were provided
if not cephfs_name or not data_pool or not metadata_pool:
msg = "Missing mds_name, data_pool or metadata_pool params"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
# Sanity check that the required pools exist
if not pool_exists(service=service, name=data_pool):
msg = "CephFS data pool does not exist. Cannot create CephFS"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
if not pool_exists(service=service, name=metadata_pool):
msg = "CephFS metadata pool does not exist. Cannot create CephFS"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
# Finally create CephFS
try:
check_output(["ceph",
'--id', service,
"fs", "new", cephfs_name,
metadata_pool,
data_pool])
except CalledProcessError as err:
log(err.output, level=ERROR)
return {'exit-code': 1, 'stderr': err.output}
def handle_rgw_region_set(request, service):
# radosgw-admin region set --infile us.json --name client.radosgw.us-east-1
json_file = request.get('region-json')
@ -448,7 +486,8 @@ def process_requests_v1(reqs):
ret = handle_erasure_pool(request=req, service=svc)
else:
ret = handle_replicated_pool(request=req, service=svc)
elif op == "create-cephfs":
ret = handle_create_cephfs(request=req, service=svc)
elif op == "create-cache-tier":
ret = handle_create_cache_tier(request=req, service=svc)
elif op == "remove-cache-tier":

View File

@ -98,8 +98,47 @@ class OpenStackAmuletDeployment(AmuletDeployment):
return other_services
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin/source."""
def _add_services(self, this_service, other_services, use_source=None,
no_origin=None):
"""Add services to the deployment and optionally set
openstack-origin/source.
:param this_service dict: Service dictionary describing the service
whose amulet tests are being run
:param other_services dict: List of service dictionaries describing
the services needed to support the target
service
:param use_source list: List of services which use the 'source' config
option rather than 'openstack-origin'
:param no_origin list: List of services which do not support setting
the Cloud Archive.
Service Dict:
{
'name': str charm-name,
'units': int number of units,
'constraints': dict of juju constraints,
'location': str location of charm,
}
eg
this_service = {
'name': 'openvswitch-odl',
'constraints': {'mem': '8G'},
}
other_services = [
{
'name': 'nova-compute',
'units': 2,
'constraints': {'mem': '4G'},
'location': cs:~bob/xenial/nova-compute
},
{
'name': 'mysql',
'constraints': {'mem': '2G'},
},
{'neutron-api-odl'}]
use_source = ['mysql']
no_origin = ['neutron-api-odl']
"""
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
@ -110,16 +149,22 @@ class OpenStackAmuletDeployment(AmuletDeployment):
services = other_services
services.append(this_service)
use_source = use_source or []
no_origin = no_origin or []
# Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy']
use_source = list(set(
use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon',
'ceph-proxy']))
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']
no_origin = list(set(
no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
'odl-controller', 'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt']))
if self.openstack:
for svc in services: