Enable Bionic as a gate test

Change bionic test from dev to gate for 18.05.

Change-Id: I40453750781f1d38e24177fb32de274f503db736
This commit is contained in:
David Ames 2018-05-08 12:00:17 -07:00
parent 5229518702
commit 524a41d45e
19 changed files with 348 additions and 58 deletions

View File

@ -102,6 +102,8 @@ def add_ovsbridge_linuxbridge(name, bridge):
log('Interface {} already exists'.format(interface), level=INFO)
return
check_for_eni_source()
with open('/etc/network/interfaces.d/{}.cfg'.format(
linuxbridge_port), 'w') as config:
config.write(BRIDGE_TEMPLATE.format(linuxbridge_port=linuxbridge_port,
@ -155,9 +157,40 @@ def get_certificate():
return None
def check_for_eni_source():
''' Juju removes the source line when setting up interfaces,
replace if missing '''
with open('/etc/network/interfaces', 'r') as eni:
for line in eni:
if line == 'source /etc/network/interfaces.d/*':
return
with open('/etc/network/interfaces', 'a') as eni:
eni.write('\nsource /etc/network/interfaces.d/*')
def full_restart():
''' Full restart and reload of openvswitch '''
if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
service('start', 'openvswitch-force-reload-kmod')
else:
service('force-reload-kmod', 'openvswitch-switch')
def enable_ipfix(bridge, target):
'''Enable IPfix on bridge to target.
:param bridge: Bridge to monitor
:param target: IPfix remote endpoint
'''
cmd = ['ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--',
'--id=@i', 'create', 'IPFIX', 'targets="{}"'.format(target)]
log('Enabling IPfix on {}.'.format(bridge))
subprocess.check_call(cmd)
def disable_ipfix(bridge):
'''Diable IPfix on target bridge.
:param bridge: Bridge to modify
'''
cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix']
subprocess.check_call(cmd)

View File

@ -797,9 +797,9 @@ class ApacheSSLContext(OSContextGenerator):
key_filename = 'key'
write_file(path=os.path.join(ssl_dir, cert_filename),
content=b64decode(cert))
content=b64decode(cert), perms=0o640)
write_file(path=os.path.join(ssl_dir, key_filename),
content=b64decode(key))
content=b64decode(key), perms=0o640)
def configure_ca(self):
ca_cert = get_ca_cert()
@ -1873,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator):
context is needed to do that before rendering a template.
'''
def __init__(self, dirname):
def __init__(self, dirname, **kwargs):
'''Used merely to ensure that a given directory exists.'''
self.dirname = dirname
self.kwargs = kwargs
def __call__(self):
mkdir(self.dirname)
mkdir(self.dirname, **self.kwargs)
return {}

View File

@ -0,0 +1,5 @@
[oslo_middleware]
# Bug #1758675
enable_proxy_headers_parsing = true

View File

@ -5,4 +5,7 @@ transport_url = {{ transport_url }}
{% if notification_topics -%}
topics = {{ notification_topics }}
{% endif -%}
{% if notification_format -%}
notification_format = {{ notification_format }}
{% endif -%}
{% endif -%}

View File

@ -306,7 +306,7 @@ def get_os_codename_install_source(src):
if src.startswith('cloud:'):
ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
ca_rel = ca_rel.split('-')[1].split('/')[0]
return ca_rel
# Best guess match based on deb string provided

View File

@ -0,0 +1,126 @@
# Copyright 2018 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import charmhelpers.contrib.openstack.alternatives as alternatives
import charmhelpers.contrib.openstack.context as context
import charmhelpers.core.hookenv as hookenv
import charmhelpers.core.host as host
import charmhelpers.core.templating as templating
import charmhelpers.core.unitdata as unitdata
VAULTLOCKER_BACKEND = 'charm-vaultlocker'
class VaultKVContext(context.OSContextGenerator):
"""Vault KV context for interaction with vault-kv interfaces"""
interfaces = ['secrets-storage']
def __init__(self, secret_backend=None):
super(context.OSContextGenerator, self).__init__()
self.secret_backend = (
secret_backend or 'charm-{}'.format(hookenv.service_name())
)
def __call__(self):
db = unitdata.kv()
last_token = db.get('last-token')
secret_id = db.get('secret-id')
for relation_id in hookenv.relation_ids(self.interfaces[0]):
for unit in hookenv.related_units(relation_id):
data = hookenv.relation_get(unit=unit,
rid=relation_id)
vault_url = data.get('vault_url')
role_id = data.get('{}_role_id'.format(hookenv.local_unit()))
token = data.get('{}_token'.format(hookenv.local_unit()))
if all([vault_url, role_id, token]):
token = json.loads(token)
vault_url = json.loads(vault_url)
# Tokens may change when secret_id's are being
# reissued - if so use token to get new secret_id
if token != last_token:
secret_id = retrieve_secret_id(
url=vault_url,
token=token
)
db.set('secret-id', secret_id)
db.set('last-token', token)
db.flush()
ctxt = {
'vault_url': vault_url,
'role_id': json.loads(role_id),
'secret_id': secret_id,
'secret_backend': self.secret_backend,
}
vault_ca = data.get('vault_ca')
if vault_ca:
ctxt['vault_ca'] = json.loads(vault_ca)
self.complete = True
return ctxt
return {}
def write_vaultlocker_conf(context, priority=100):
"""Write vaultlocker configuration to disk and install alternative
:param context: Dict of data from vault-kv relation
:ptype: context: dict
:param priority: Priority of alternative configuration
:ptype: priority: int"""
charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format(
hookenv.service_name()
)
host.mkdir(os.path.dirname(charm_vl_path), perms=0o700)
templating.render(source='vaultlocker.conf.j2',
target=charm_vl_path,
context=context, perms=0o600),
alternatives.install_alternative('vaultlocker.conf',
'/etc/vaultlocker/vaultlocker.conf',
charm_vl_path, priority)
def vault_relation_complete(backend=None):
"""Determine whether vault relation is complete
:param backend: Name of secrets backend requested
:ptype backend: string
:returns: whether the relation to vault is complete
:rtype: bool"""
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
vault_kv()
return vault_kv.complete
# TODO: contrib a high level unwrap method to hvac that works
def retrieve_secret_id(url, token):
"""Retrieve a response-wrapped secret_id from Vault
:param url: URL to Vault Server
:ptype url: str
:param token: One shot Token to use
:ptype token: str
:returns: secret_id to use for Vault Access
:rtype: str"""
import hvac
client = hvac.Client(url=url, token=token)
response = client._post('/v1/sys/wrapping/unwrap')
if response.status_code == 200:
data = response.json()
return data['data']['secret_id']

View File

@ -291,7 +291,7 @@ class Pool(object):
class ReplicatedPool(Pool):
def __init__(self, service, name, pg_num=None, replicas=2,
percent_data=10.0):
percent_data=10.0, app_name=None):
super(ReplicatedPool, self).__init__(service=service, name=name)
self.replicas = replicas
if pg_num:
@ -301,6 +301,10 @@ class ReplicatedPool(Pool):
self.pg_num = min(pg_num, max_pgs)
else:
self.pg_num = self.get_pgs(self.replicas, percent_data)
if app_name:
self.app_name = app_name
else:
self.app_name = 'unknown'
def create(self):
if not pool_exists(self.service, self.name):
@ -313,6 +317,12 @@ class ReplicatedPool(Pool):
update_pool(client=self.service,
pool=self.name,
settings={'size': str(self.replicas)})
try:
set_app_name_for_pool(client=self.service,
pool=self.name,
name=self.app_name)
except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
except CalledProcessError:
raise
@ -320,10 +330,14 @@ class ReplicatedPool(Pool):
# Default jerasure erasure coded pool
class ErasurePool(Pool):
def __init__(self, service, name, erasure_code_profile="default",
percent_data=10.0):
percent_data=10.0, app_name=None):
super(ErasurePool, self).__init__(service=service, name=name)
self.erasure_code_profile = erasure_code_profile
self.percent_data = percent_data
if app_name:
self.app_name = app_name
else:
self.app_name = 'unknown'
def create(self):
if not pool_exists(self.service, self.name):
@ -355,6 +369,12 @@ class ErasurePool(Pool):
'erasure', self.erasure_code_profile]
try:
check_call(cmd)
try:
set_app_name_for_pool(client=self.service,
pool=self.name,
name=self.app_name)
except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
except CalledProcessError:
raise
@ -778,6 +798,25 @@ def update_pool(client, pool, settings):
check_call(cmd)
def set_app_name_for_pool(client, pool, name):
"""
Calls `osd pool application enable` for the specified pool name
:param client: Name of the ceph client to use
:type client: str
:param pool: Pool to set app name for
:type pool: str
:param name: app name for the specified pool
:type name: str
:raises: CalledProcessError if ceph call fails
"""
if ceph_version() >= '12.0.0':
cmd = ['ceph', '--id', client, 'osd', 'pool',
'application', 'enable', pool, name]
check_call(cmd)
def create_pool(service, name, replicas=3, pg_num=None):
"""Create a new RADOS pool."""
if pool_exists(service, name):

View File

@ -67,3 +67,19 @@ def is_device_mounted(device):
except Exception:
return False
return bool(re.search(r'MOUNTPOINT=".+"', out))
def mkfs_xfs(device, force=False):
"""Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it.
:param device: Full path to device to format
:ptype device: tr
:param force: Force operation
:ptype: force: boolean"""
cmd = ['mkfs.xfs']
if force:
cmd.append("-f")
cmd += ['-i', 'size=1024', device]
check_call(cmd)

View File

@ -290,7 +290,7 @@ class Config(dict):
self.implicit_save = True
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
if os.path.exists(self.path) and os.stat(self.path).st_size:
self.load_previous()
atexit(self._implicit_save)
@ -310,7 +310,11 @@ class Config(dict):
"""
self.path = path or self.path
with open(self.path) as f:
self._prev_dict = json.load(f)
try:
self._prev_dict = json.load(f)
except ValueError as e:
log('Unable to parse previous config data - {}'.format(str(e)),
level=ERROR)
for k, v in copy.deepcopy(self._prev_dict).items():
if k not in self:
self[k] = v
@ -354,22 +358,40 @@ class Config(dict):
self.save()
@cached
_cache_config = None
def config(scope=None):
"""Juju charm configuration"""
config_cmd_line = ['config-get']
if scope is not None:
config_cmd_line.append(scope)
else:
config_cmd_line.append('--all')
config_cmd_line.append('--format=json')
"""
Get the juju charm configuration (scope==None) or individual key,
(scope=str). The returned value is a Python data structure loaded as
JSON from the Juju config command.
:param scope: If set, return the value for the specified key.
:type scope: Optional[str]
:returns: Either the whole config as a Config, or a key from it.
:rtype: Any
"""
global _cache_config
config_cmd_line = ['config-get', '--all', '--format=json']
try:
config_data = json.loads(
subprocess.check_output(config_cmd_line).decode('UTF-8'))
# JSON Decode Exception for Python3.5+
exc_json = json.decoder.JSONDecodeError
except AttributeError:
# JSON Decode Exception for Python2.7 through Python3.4
exc_json = ValueError
try:
if _cache_config is None:
config_data = json.loads(
subprocess.check_output(config_cmd_line).decode('UTF-8'))
_cache_config = Config(config_data)
if scope is not None:
return config_data
return Config(config_data)
except ValueError:
return _cache_config.get(scope)
return _cache_config
except (exc_json, UnicodeDecodeError) as e:
log('Unable to parse output from config-get: config_cmd_line="{}" '
'message="{}"'
.format(config_cmd_line, str(e)), level=ERROR)
return None

View File

@ -307,7 +307,9 @@ class PortManagerCallback(ManagerCallback):
"""
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
new_ports = service.get('ports', [])
# turn this generator into a list,
# as we'll be going over it multiple times
new_ports = list(service.get('ports', []))
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
if os.path.exists(port_file):
with open(port_file) as fp:

View File

@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
def create(sysctl_dict, sysctl_file):
"""Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
:param sysctl_dict: a dict or YAML-formatted string of sysctl
options eg "{ 'kernel.max_pid': 1337 }"
:type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
:returns: None
"""
try:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
except yaml.YAMLError:
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
level=ERROR)
return
if type(sysctl_dict) is not dict:
try:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
except yaml.YAMLError:
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
level=ERROR)
return
else:
sysctl_dict_parsed = sysctl_dict
with open(sysctl_file, "w") as fd:
for key, value in sysctl_dict_parsed.items():

View File

@ -166,6 +166,10 @@ class Storage(object):
To support dicts, lists, integer, floats, and booleans values
are automatically json encoded/decoded.
Note: to facilitate unit testing, ':memory:' can be passed as the
path parameter which causes sqlite3 to only build the db in memory.
This should only be used for testing purposes.
"""
def __init__(self, path=None):
self.db_path = path
@ -175,8 +179,9 @@ class Storage(object):
else:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
with open(self.db_path, 'a') as f:
os.fchmod(f.fileno(), 0o600)
if self.db_path != ':memory:':
with open(self.db_path, 'a') as f:
os.fchmod(f.fileno(), 0o600)
self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None

View File

@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = {
'x86_64': PROPOSED_POCKET,
'ppc64le': PROPOSED_PORTS_POCKET,
'aarch64': PROPOSED_PORTS_POCKET,
's390x': PROPOSED_PORTS_POCKET,
}
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'

View File

@ -290,7 +290,7 @@ class Config(dict):
self.implicit_save = True
self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path):
if os.path.exists(self.path) and os.stat(self.path).st_size:
self.load_previous()
atexit(self._implicit_save)
@ -310,7 +310,11 @@ class Config(dict):
"""
self.path = path or self.path
with open(self.path) as f:
self._prev_dict = json.load(f)
try:
self._prev_dict = json.load(f)
except ValueError as e:
log('Unable to parse previous config data - {}'.format(str(e)),
level=ERROR)
for k, v in copy.deepcopy(self._prev_dict).items():
if k not in self:
self[k] = v
@ -354,22 +358,40 @@ class Config(dict):
self.save()
@cached
_cache_config = None
def config(scope=None):
"""Juju charm configuration"""
config_cmd_line = ['config-get']
if scope is not None:
config_cmd_line.append(scope)
else:
config_cmd_line.append('--all')
config_cmd_line.append('--format=json')
"""
Get the juju charm configuration (scope==None) or individual key,
(scope=str). The returned value is a Python data structure loaded as
JSON from the Juju config command.
:param scope: If set, return the value for the specified key.
:type scope: Optional[str]
:returns: Either the whole config as a Config, or a key from it.
:rtype: Any
"""
global _cache_config
config_cmd_line = ['config-get', '--all', '--format=json']
try:
config_data = json.loads(
subprocess.check_output(config_cmd_line).decode('UTF-8'))
# JSON Decode Exception for Python3.5+
exc_json = json.decoder.JSONDecodeError
except AttributeError:
# JSON Decode Exception for Python2.7 through Python3.4
exc_json = ValueError
try:
if _cache_config is None:
config_data = json.loads(
subprocess.check_output(config_cmd_line).decode('UTF-8'))
_cache_config = Config(config_data)
if scope is not None:
return config_data
return Config(config_data)
except ValueError:
return _cache_config.get(scope)
return _cache_config
except (exc_json, UnicodeDecodeError) as e:
log('Unable to parse output from config-get: config_cmd_line="{}" '
'message="{}"'
.format(config_cmd_line, str(e)), level=ERROR)
return None

View File

@ -307,7 +307,9 @@ class PortManagerCallback(ManagerCallback):
"""
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
new_ports = service.get('ports', [])
# turn this generator into a list,
# as we'll be going over it multiple times
new_ports = list(service.get('ports', []))
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
if os.path.exists(port_file):
with open(port_file) as fp:

View File

@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
def create(sysctl_dict, sysctl_file):
"""Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
:param sysctl_dict: a dict or YAML-formatted string of sysctl
options eg "{ 'kernel.max_pid': 1337 }"
:type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
:returns: None
"""
try:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
except yaml.YAMLError:
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
level=ERROR)
return
if type(sysctl_dict) is not dict:
try:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
except yaml.YAMLError:
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
level=ERROR)
return
else:
sysctl_dict_parsed = sysctl_dict
with open(sysctl_file, "w") as fd:
for key, value in sysctl_dict_parsed.items():

View File

@ -166,6 +166,10 @@ class Storage(object):
To support dicts, lists, integer, floats, and booleans values
are automatically json encoded/decoded.
Note: to facilitate unit testing, ':memory:' can be passed as the
path parameter which causes sqlite3 to only build the db in memory.
This should only be used for testing purposes.
"""
def __init__(self, path=None):
self.db_path = path
@ -175,8 +179,9 @@ class Storage(object):
else:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
with open(self.db_path, 'a') as f:
os.fchmod(f.fileno(), 0o600)
if self.db_path != ':memory:':
with open(self.db_path, 'a') as f:
os.fchmod(f.fileno(), 0o600)
self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None

View File

@ -61,7 +61,7 @@ basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy
[testenv:func27-dfs]
# Charm Functional Test