From 558a95df94016a4982142f68d78146e4c2217ad7 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 14 Feb 2023 21:26:00 +0000 Subject: [PATCH] Add Antelope support * sync charm-helpers to classic charms * change openstack-origin/source default to antelope * align testing with antelope * add new antelope bundles * add antelope bundles to tests.yaml * add antelope tests to osci.yaml and .zuul.yaml * update build-on and run-on bases Change-Id: Ibceaa9d30d3d4a9a4e67fe5e45446b94f1d2e064 --- .zuul.yaml | 2 +- charmcraft.yaml | 3 + charmhelpers/contrib/hahelpers/cluster.py | 2 +- charmhelpers/contrib/network/ip.py | 2 +- charmhelpers/contrib/openstack/context.py | 149 +++++++-- charmhelpers/contrib/openstack/ha/utils.py | 29 ++ charmhelpers/contrib/openstack/ip.py | 25 ++ .../contrib/openstack/ssh_migrations.py | 4 +- .../contrib/openstack/templates/haproxy.cfg | 5 + .../templates/section-keystone-authtoken | 2 + .../openstack/templates/section-service-user | 11 + charmhelpers/contrib/openstack/utils.py | 7 +- charmhelpers/contrib/openstack/vaultlocker.py | 7 +- charmhelpers/contrib/storage/linux/utils.py | 21 +- charmhelpers/core/host.py | 2 +- charmhelpers/core/host_factory/ubuntu.py | 1 + charmhelpers/core/unitdata.py | 11 +- charmhelpers/fetch/ubuntu.py | 38 ++- metadata.yaml | 1 + osci.yaml | 32 +- test-requirements.txt | 1 + tests/bundles/jammy-antelope-ec.yaml | 303 ++++++++++++++++++ tests/bundles/jammy-antelope.yaml | 293 +++++++++++++++++ ...my-yoga-ec.yaml => lunar-antelope-ec.yaml} | 2 +- .../{jammy-yoga.yaml => lunar-antelope.yaml} | 2 +- tests/tests.yaml | 16 +- tox.ini | 2 +- 27 files changed, 897 insertions(+), 76 deletions(-) create mode 100644 charmhelpers/contrib/openstack/templates/section-service-user create mode 100644 tests/bundles/jammy-antelope-ec.yaml create mode 100644 tests/bundles/jammy-antelope.yaml rename tests/bundles/{jammy-yoga-ec.yaml => lunar-antelope-ec.yaml} (99%) rename tests/bundles/{jammy-yoga.yaml => lunar-antelope.yaml} (99%) diff --git a/.zuul.yaml b/.zuul.yaml index 23bf5f6..fd20909 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -1,4 +1,4 @@ - project: templates: - - openstack-python3-charm-zed-jobs + - openstack-python3-charm-jobs - openstack-cover-jobs diff --git a/charmcraft.yaml b/charmcraft.yaml index 595efe2..8d5c061 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -32,3 +32,6 @@ bases: - name: ubuntu channel: "22.10" architectures: [amd64, s390x, ppc64el, arm64] + - name: ubuntu + channel: "23.04" + architectures: [amd64, s390x, ppc64el, arm64] diff --git a/charmhelpers/contrib/hahelpers/cluster.py b/charmhelpers/contrib/hahelpers/cluster.py index 146beba..ffda5fe 100644 --- a/charmhelpers/contrib/hahelpers/cluster.py +++ b/charmhelpers/contrib/hahelpers/cluster.py @@ -324,7 +324,7 @@ def valid_hacluster_config(): ''' vip = config_get('vip') dns = config_get('dns-ha') - if not(bool(vip) ^ bool(dns)): + if not (bool(vip) ^ bool(dns)): msg = ('HA: Either vip or dns-ha must be set but not both in order to ' 'use high availability') status_set('blocked', msg) diff --git a/charmhelpers/contrib/network/ip.py b/charmhelpers/contrib/network/ip.py index f8edf37..cf9926b 100644 --- a/charmhelpers/contrib/network/ip.py +++ b/charmhelpers/contrib/network/ip.py @@ -539,7 +539,7 @@ def port_has_listener(address, port): """ cmd = ['nc', '-z', address, str(port)] result = subprocess.call(cmd) - return not(bool(result)) + return not (bool(result)) def assert_charm_supports_ipv6(): diff --git a/charmhelpers/contrib/openstack/context.py b/charmhelpers/contrib/openstack/context.py index 970a657..d894b6a 100644 --- a/charmhelpers/contrib/openstack/context.py +++ b/charmhelpers/contrib/openstack/context.py @@ -25,6 +25,7 @@ import socket import time from base64 import b64decode +from distutils.version import LooseVersion from subprocess import ( check_call, check_output, @@ -39,6 +40,7 @@ from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( from charmhelpers.fetch import ( apt_install, filter_installed_packages, + get_installed_version, ) from charmhelpers.core.hookenv import ( NoNetworkBinding, @@ -59,6 +61,7 @@ from charmhelpers.core.hookenv import ( network_get_primary_address, WARNING, service_name, + remote_service_name, ) from charmhelpers.core.sysctl import create as sysctl_create @@ -130,6 +133,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' ADDRESS_TYPES = ['admin', 'internal', 'public'] HAPROXY_RUN_DIR = '/var/run/haproxy/' DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" +DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404 def ensure_packages(packages): @@ -345,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir): class IdentityServiceContext(OSContextGenerator): + _forward_compat_remaps = { + 'admin_user': 'admin-user-name', + 'service_username': 'service-user-name', + 'service_tenant': 'service-project-name', + 'service_tenant_id': 'service-project-id', + 'service_domain': 'service-domain-name', + } + def __init__(self, service=None, service_user=None, @@ -397,11 +409,16 @@ class IdentityServiceContext(OSContextGenerator): # 'www_authenticate_uri' replaced 'auth_uri' since Stein, # see keystonemiddleware upstream sources for more info if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': - c.update(( - ('www_authenticate_uri', "{}://{}:{}/v3".format( - ctxt.get('service_protocol', ''), - ctxt.get('service_host', ''), - ctxt.get('service_port', ''))),)) + if 'public_auth_url' in ctxt: + c.update(( + ('www_authenticate_uri', '{}/v3'.format( + ctxt.get('public_auth_url'))),)) + else: + c.update(( + ('www_authenticate_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) else: c.update(( ('auth_uri', "{}://{}:{}/v3".format( @@ -409,11 +426,17 @@ class IdentityServiceContext(OSContextGenerator): ctxt.get('service_host', ''), ctxt.get('service_port', ''))),)) + if 'internal_auth_url' in ctxt: + c.update(( + ('auth_url', ctxt.get('internal_auth_url')),)) + else: + c.update(( + ('auth_url', "{}://{}:{}/v3".format( + ctxt.get('auth_protocol', ''), + ctxt.get('auth_host', ''), + ctxt.get('auth_port', ''))),)) + c.update(( - ('auth_url', "{}://{}:{}/v3".format( - ctxt.get('auth_protocol', ''), - ctxt.get('auth_host', ''), - ctxt.get('auth_port', ''))), ('project_domain_name', ctxt.get('admin_domain_name', '')), ('user_domain_name', ctxt.get('admin_domain_name', '')), ('project_name', ctxt.get('admin_tenant_name', '')), @@ -441,39 +464,86 @@ class IdentityServiceContext(OSContextGenerator): for rid in relation_ids(self.rel_name): self.related = True for unit in related_units(rid): + rdata = {} + # NOTE(jamespage): + # forwards compat with application data + # bag driven approach to relation. + _adata = relation_get(rid=rid, app=remote_service_name(rid)) + adata = {} + # if no app data bag presented - fallback + # to legacy unit based relation data rdata = relation_get(rid=rid, unit=unit) - serv_host = rdata.get('service_host') + if _adata: + # New app data bag uses - instead of _ + # in key names - remap for compat with + # existing relation data keys + for key, value in _adata.items(): + if key == 'api-version': + adata[key.replace('-', '_')] = value.strip('v') + else: + adata[key.replace('-', '_')] = value + # Re-map some keys for backwards compatibility + for target, source in self._forward_compat_remaps.items(): + adata[target] = _adata.get(source) + # Now preferentially get data from the app data bag, but if + # it's not available, get it from the legacy based relation + # data. + + def _resolve(key): + return adata.get(key) or rdata.get(key) + + serv_host = _resolve('service_host') serv_host = format_ipv6_addr(serv_host) or serv_host - auth_host = rdata.get('auth_host') + auth_host = _resolve('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host - int_host = rdata.get('internal_host') + int_host = _resolve('internal_host',) int_host = format_ipv6_addr(int_host) or int_host - svc_protocol = rdata.get('service_protocol') or 'http' - auth_protocol = rdata.get('auth_protocol') or 'http' - int_protocol = rdata.get('internal_protocol') or 'http' - api_version = rdata.get('api_version') or '2.0' - ctxt.update({'service_port': rdata.get('service_port'), + svc_protocol = _resolve('service_protocol') or 'http' + auth_protocol = _resolve('auth_protocol') or 'http' + admin_role = _resolve('admin_role') or 'Admin' + int_protocol = _resolve('internal_protocol') or 'http' + api_version = _resolve('api_version') or '2.0' + ctxt.update({'service_port': _resolve('service_port'), 'service_host': serv_host, 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), + 'auth_port': _resolve('auth_port'), 'internal_host': int_host, - 'internal_port': rdata.get('internal_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), + 'internal_port': _resolve('internal_port'), + 'admin_tenant_name': _resolve('service_tenant'), + 'admin_user': _resolve('service_username'), + 'admin_password': _resolve('service_password'), + 'admin_role': admin_role, 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol, 'internal_protocol': int_protocol, 'api_version': api_version}) - if rdata.get('service_type'): - ctxt['service_type'] = rdata.get('service_type') + service_type = _resolve('service_type') + if service_type: + ctxt['service_type'] = service_type if float(api_version) > 2: ctxt.update({ - 'admin_domain_name': rdata.get('service_domain'), - 'service_project_id': rdata.get('service_tenant_id'), - 'service_domain_id': rdata.get('service_domain_id')}) + 'admin_domain_name': _resolve('service_domain'), + 'service_project_id': _resolve('service_tenant_id'), + 'service_domain_id': _resolve('service_domain_id')}) + + # NOTE: + # keystone-k8s operator presents full URLS + # for all three endpoints - public and internal are + # externally addressable for machine based charm + public_auth_url = _resolve('public_auth_url') + # if 'public_auth_url' in rdata: + if public_auth_url: + ctxt.update({ + 'public_auth_url': public_auth_url, + }) + internal_auth_url = _resolve('internal_auth_url') + # if 'internal_auth_url' in rdata: + if internal_auth_url: + ctxt.update({ + 'internal_auth_url': internal_auth_url, + }) # we keep all veriables in ctxt for compatibility and # add nested dictionary for keystone_authtoken generic @@ -487,8 +557,8 @@ class IdentityServiceContext(OSContextGenerator): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading - ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') - ctxt['admin_domain_id'] = rdata.get('service_domain_id') + ctxt['admin_tenant_id'] = _resolve('service_tenant_id') + ctxt['admin_domain_id'] = _resolve('service_domain_id') return ctxt return {} @@ -860,9 +930,14 @@ class HAProxyContext(OSContextGenerator): interfaces = ['cluster'] def __init__(self, singlenode_mode=False, - address_types=ADDRESS_TYPES): + address_types=None, + exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT): + if address_types is None: + address_types = ADDRESS_TYPES[:] + self.address_types = address_types self.singlenode_mode = singlenode_mode + self.exporter_stats_port = exporter_stats_port def __call__(self): if not os.path.isdir(HAPROXY_RUN_DIR): @@ -957,10 +1032,20 @@ class HAProxyContext(OSContextGenerator): db = kv() ctxt['stat_password'] = db.get('stat-password') if not ctxt['stat_password']: - ctxt['stat_password'] = db.set('stat-password', - pwgen(32)) + ctxt['stat_password'] = db.set('stat-password', pwgen(32)) db.flush() + # NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0 + # New bind will be created and a prometheus-exporter + # will be used for path /metrics. At the same time, + # prometheus-exporter avoids using auth. + haproxy_version = get_installed_version("haproxy") + if (haproxy_version and + haproxy_version.ver_str >= LooseVersion("2.0.0") and + is_relation_made("haproxy-exporter")): + ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter") + ctxt["stats_exporter_port"] = self.exporter_stats_port + for frontend in cluster_hosts: if (len(cluster_hosts[frontend]['backends']) > 1 or self.singlenode_mode): diff --git a/charmhelpers/contrib/openstack/ha/utils.py b/charmhelpers/contrib/openstack/ha/utils.py index a5cbdf5..b4912c4 100644 --- a/charmhelpers/contrib/openstack/ha/utils.py +++ b/charmhelpers/contrib/openstack/ha/utils.py @@ -25,6 +25,7 @@ Helpers for high availability. import hashlib import json +import os import re @@ -36,6 +37,7 @@ from charmhelpers.core.hookenv import ( config, status_set, DEBUG, + application_name, ) from charmhelpers.core.host import ( @@ -65,6 +67,7 @@ JSON_ENCODE_OPTIONS = dict( VIP_GROUP_NAME = 'grp_{service}_vips' DNSHA_GROUP_NAME = 'grp_{service}_hostnames' +HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard" class DNSHAException(Exception): @@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data): relation_data['groups'] = { key: ' '.join(vip_group) } + + +def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard): + """Load grafana dashboard json model and insert prometheus datasource. + + :param prometheus_app_name: name of the 'prometheus' application that will + be used as datasource in grafana dashboard + :type prometheus_app_name: str + :param haproxy_dashboard: path to haproxy dashboard + :type haproxy_dashboard: str + :return: Grafana dashboard json model as a str. + :rtype: str + """ + from charmhelpers.contrib.templating import jinja + + dashboard_template = os.path.basename(haproxy_dashboard) + dashboard_template_dir = os.path.dirname(haproxy_dashboard) + app_name = application_name() + datasource = "{} - Juju generated source".format(prometheus_app_name) + return jinja.render(dashboard_template, + {"datasource": datasource, + "app_name": app_name, + "prometheus_app_name": prometheus_app_name}, + template_dir=dashboard_template_dir, + jinja_env_args={"variable_start_string": "<< ", + "variable_end_string": " >>"}) diff --git a/charmhelpers/contrib/openstack/ip.py b/charmhelpers/contrib/openstack/ip.py index b8c94c5..2afad36 100644 --- a/charmhelpers/contrib/openstack/ip.py +++ b/charmhelpers/contrib/openstack/ip.py @@ -25,6 +25,7 @@ from charmhelpers.contrib.network.ip import ( is_ipv6, get_ipv6_addr, resolve_network_cidr, + get_iface_for_address ) from charmhelpers.contrib.hahelpers.cluster import is_clustered @@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'): return unit_get(unit_get_fallback) +def get_invalid_vips(): + """Check if any of the provided vips are invalid. + A vip is invalid if it doesn't belong to the subnet in any interface. + If all vips are valid, this returns an empty list. + + :returns: A list of strings, where each string is an invalid vip address. + :rtype: list + """ + + clustered = is_clustered() + vips = config('vip') + if vips: + vips = vips.split() + invalid_vips = [] + + if clustered and vips: + for vip in vips: + iface_for_vip = get_iface_for_address(vip) + if iface_for_vip is None: + invalid_vips.append(vip) + + return invalid_vips + + def resolve_address(endpoint_type=PUBLIC, override=True): """Return unit address depending on net config. diff --git a/charmhelpers/contrib/openstack/ssh_migrations.py b/charmhelpers/contrib/openstack/ssh_migrations.py index 96b9f71..0512e3a 100644 --- a/charmhelpers/contrib/openstack/ssh_migrations.py +++ b/charmhelpers/contrib/openstack/ssh_migrations.py @@ -310,7 +310,7 @@ def ssh_known_hosts_lines(application_name, user=None): for hosts_line in hosts: if hosts_line.rstrip(): known_hosts_list.append(hosts_line.rstrip()) - return(known_hosts_list) + return known_hosts_list def ssh_authorized_keys_lines(application_name, user=None): @@ -327,7 +327,7 @@ def ssh_authorized_keys_lines(application_name, user=None): for authkey_line in keys: if authkey_line.rstrip(): authorized_keys_list.append(authkey_line.rstrip()) - return(authorized_keys_list) + return authorized_keys_list def ssh_compute_remove(public_key, application_name, user=None): diff --git a/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charmhelpers/contrib/openstack/templates/haproxy.cfg index 626ecba..da2522f 100644 --- a/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -49,6 +49,11 @@ defaults listen stats bind {{ local_host }}:{{ stat_port }} +{%- if stats_exporter_host and stats_exporter_port %} + bind {{ stats_exporter_host }}:{{ stats_exporter_port }} + option http-use-htx + http-request use-service prometheus-exporter if { path /metrics } +{%- endif %} mode http stats enable stats hide-version diff --git a/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/charmhelpers/contrib/openstack/templates/section-keystone-authtoken index c9b0152..dbad506 100644 --- a/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ b/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -12,4 +12,6 @@ signing_dir = {{ signing_dir }} {% if service_type -%} service_type = {{ service_type }} {% endif -%} +service_token_roles = {{ admin_role }} +service_token_roles_required = True {% endif -%} diff --git a/charmhelpers/contrib/openstack/templates/section-service-user b/charmhelpers/contrib/openstack/templates/section-service-user new file mode 100644 index 0000000..c740cc2 --- /dev/null +++ b/charmhelpers/contrib/openstack/templates/section-service-user @@ -0,0 +1,11 @@ +{% if auth_host -%} +[service_user] +send_service_user_token = true +auth_type = password +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +project_domain_id = default +user_domain_id = default +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +{% endif -%} diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py index 1fa2814..3d52eb1 100644 --- a/charmhelpers/contrib/openstack/utils.py +++ b/charmhelpers/contrib/openstack/utils.py @@ -159,6 +159,7 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2021.2', 'xena'), ('2022.1', 'yoga'), ('2022.2', 'zed'), + ('2023.1', 'antelope'), ]) # The ugly duckling - must list releases oldest to newest @@ -1327,7 +1328,7 @@ def _check_listening_on_services_ports(services, test=False): @param test: default=False, if False, test for closed, otherwise open. @returns OrderedDict(service: [port-not-open, ...]...), [boolean] """ - test = not(not(test)) # ensure test is True or False + test = not (not (test)) # ensure test is True or False all_ports = list(itertools.chain(*services.values())) ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] map_ports = OrderedDict() @@ -1583,7 +1584,7 @@ def is_unit_paused_set(): with unitdata.HookData()() as t: kv = t[0] # transform something truth-y into a Boolean. - return not(not(kv.get('unit-paused'))) + return not (not (kv.get('unit-paused'))) except Exception: return False @@ -2181,7 +2182,7 @@ def is_unit_upgrading_set(): with unitdata.HookData()() as t: kv = t[0] # transform something truth-y into a Boolean. - return not(not(kv.get('unit-upgrading'))) + return not (not (kv.get('unit-upgrading'))) except Exception: return False diff --git a/charmhelpers/contrib/openstack/vaultlocker.py b/charmhelpers/contrib/openstack/vaultlocker.py index e5418c3..002bc57 100644 --- a/charmhelpers/contrib/openstack/vaultlocker.py +++ b/charmhelpers/contrib/openstack/vaultlocker.py @@ -173,7 +173,12 @@ def retrieve_secret_id(url, token): # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate if not isinstance(client.adapter, hvac.adapters.Request): client.adapter = hvac.adapters.Request(base_uri=url, token=token) - response = client._post('/v1/sys/wrapping/unwrap') + try: + # hvac == 1.0.0 has an API to unwrap with the user token + response = client.sys.unwrap() + except AttributeError: + # fallback to hvac < 1.0.0 + response = client._post('/v1/sys/wrapping/unwrap') if response.status_code == 200: data = response.json() return data['data']['secret_id'] diff --git a/charmhelpers/contrib/storage/linux/utils.py b/charmhelpers/contrib/storage/linux/utils.py index a356176..4d05b12 100644 --- a/charmhelpers/contrib/storage/linux/utils.py +++ b/charmhelpers/contrib/storage/linux/utils.py @@ -23,6 +23,12 @@ from subprocess import ( call ) +from charmhelpers.core.hookenv import ( + log, + WARNING, + INFO +) + def _luks_uuid(dev): """ @@ -110,7 +116,7 @@ def is_device_mounted(device): return bool(re.search(r'MOUNTPOINT=".+"', out)) -def mkfs_xfs(device, force=False, inode_size=1024): +def mkfs_xfs(device, force=False, inode_size=None): """Format device with XFS filesystem. By default this should fail if the device already has a filesystem on it. @@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024): :ptype device: tr :param force: Force operation :ptype: force: boolean - :param inode_size: XFS inode size in bytes + :param inode_size: XFS inode size in bytes; if set to 0 or None, + the value used will be the XFS system default :ptype inode_size: int""" cmd = ['mkfs.xfs'] if force: cmd.append("-f") - cmd += ['-i', "size={}".format(inode_size), device] + if inode_size: + if inode_size >= 256 and inode_size <= 2048: + cmd += ['-i', "size={}".format(inode_size)] + else: + log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING) + else: + log("Using XFS filesystem with system default inode size.", level=INFO) + + cmd += [device] check_call(cmd) diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py index ef6c8ec..70dde6a 100644 --- a/charmhelpers/core/host.py +++ b/charmhelpers/core/host.py @@ -954,7 +954,7 @@ def pwgen(length=None): random_generator = random.SystemRandom() random_chars = [ random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) + return ''.join(random_chars) def is_phy_iface(interface): diff --git a/charmhelpers/core/host_factory/ubuntu.py b/charmhelpers/core/host_factory/ubuntu.py index cc2d89f..a279d5b 100644 --- a/charmhelpers/core/host_factory/ubuntu.py +++ b/charmhelpers/core/host_factory/ubuntu.py @@ -31,6 +31,7 @@ UBUNTU_RELEASES = ( 'impish', 'jammy', 'kinetic', + 'lunar', ) diff --git a/charmhelpers/core/unitdata.py b/charmhelpers/core/unitdata.py index d9b8d0b..8f4bbc6 100644 --- a/charmhelpers/core/unitdata.py +++ b/charmhelpers/core/unitdata.py @@ -171,8 +171,9 @@ class Storage(object): path parameter which causes sqlite3 to only build the db in memory. This should only be used for testing purposes. """ - def __init__(self, path=None): + def __init__(self, path=None, keep_revisions=False): self.db_path = path + self.keep_revisions = keep_revisions if path is None: if 'UNIT_STATE_DB' in os.environ: self.db_path = os.environ['UNIT_STATE_DB'] @@ -242,7 +243,7 @@ class Storage(object): Remove a key from the database entirely. """ self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) @@ -259,14 +260,14 @@ class Storage(object): if keys is not None: keys = ['%s%s' % (prefix, key) for key in keys] self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) else: self.cursor.execute('delete from kv where key like ?', ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: + if self.keep_revisions and self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) @@ -299,7 +300,7 @@ class Storage(object): where key = ?''', [serialized, key]) # Save - if not self.revision: + if (not self.keep_revisions) or (not self.revision): return value self.cursor.execute( diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py index 93b9276..effc884 100644 --- a/charmhelpers/fetch/ubuntu.py +++ b/charmhelpers/fetch/ubuntu.py @@ -230,6 +230,18 @@ CLOUD_ARCHIVE_POCKETS = { 'zed/proposed': 'jammy-proposed/zed', 'jammy-zed/proposed': 'jammy-proposed/zed', 'jammy-proposed/zed': 'jammy-proposed/zed', + # antelope + 'antelope': 'jammy-updates/antelope', + 'jammy-antelope': 'jammy-updates/antelope', + 'jammy-antelope/updates': 'jammy-updates/antelope', + 'jammy-updates/antelope': 'jammy-updates/antelope', + 'antelope/proposed': 'jammy-proposed/antelope', + 'jammy-antelope/proposed': 'jammy-proposed/antelope', + 'jammy-proposed/antelope': 'jammy-proposed/antelope', + + # OVN + 'focal-ovn-22.03': 'focal-updates/ovn-22.03', + 'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03', } @@ -257,6 +269,7 @@ OPENSTACK_RELEASES = ( 'xena', 'yoga', 'zed', + 'antelope', ) @@ -284,6 +297,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('impish', 'xena'), ('jammy', 'yoga'), ('kinetic', 'zed'), + ('lunar', 'antelope'), ]) @@ -363,6 +377,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False): :type quiet: bool :raises: subprocess.CalledProcessError """ + if not packages: + log("Nothing to install", level=DEBUG) + return if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -687,6 +704,7 @@ def add_source(source, key=None, fail_invalid=False): (r"^cloud-archive:(.*)$", _add_apt_repository), (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check), (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), (r"^cloud:(.*)$", _add_cloud_pocket), (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), @@ -750,6 +768,11 @@ def _add_apt_repository(spec): ) +def __write_sources_list_d_actual_pocket(file, actual_pocket): + with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + def _add_cloud_pocket(pocket): """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list @@ -769,8 +792,9 @@ def _add_cloud_pocket(pocket): 'Unsupported cloud: source option %s' % pocket) actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + __write_sources_list_d_actual_pocket( + 'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'), + actual_pocket) def _add_cloud_staging(cloud_archive_release, openstack_release): @@ -931,10 +955,14 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), try: result = subprocess.check_call(cmd, env=env, **kwargs) except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > max_retries: - raise result = e.returncode + if result not in retry_results: + # a non-retriable exitcode was produced + raise + retry_count += 1 + if retry_count > max_retries: + # a retriable exitcode was produced more than {max_retries} times + raise log(retry_message) time.sleep(CMD_RETRY_DELAY) diff --git a/metadata.yaml b/metadata.yaml index 3fcb427..4ee068d 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -13,6 +13,7 @@ tags: series: - jammy - kinetic +- lunar subordinate: true provides: storage-backend: diff --git a/osci.yaml b/osci.yaml index 39ea09f..3d63345 100644 --- a/osci.yaml +++ b/osci.yaml @@ -1,14 +1,17 @@ - project: templates: - charm-unit-jobs-py310 - - charm-zed-functional-jobs + - charm-functional-jobs check: jobs: - - jammy-yoga-ec_cinder-ceph - jammy-zed-ec_cinder-ceph: voting: false - kinetic-zed-ec_cinder-ceph: voting: false + - jammy-antelope-ec_cinder-ceph: + voting: false + - lunar-antelope-ec_cinder-ceph: + voting: false vars: needs_charm_build: true charm_build_name: cinder-ceph @@ -16,26 +19,33 @@ charmcraft_channel: 2.1/stable - job: - name: jammy-yoga-ec_cinder-ceph + name: jammy-zed-ec_cinder-ceph parent: func-target dependencies: - charm-build - osci-lint - name: tox-py310 soft: true - vars: - tox_extra_args: '-- jammy-yoga-ec' -- job: - name: jammy-zed-ec_cinder-ceph - parent: func-target - dependencies: - - jammy-yoga-ec_cinder-ceph vars: tox_extra_args: '-- jammy-zed-ec' - job: name: kinetic-zed-ec_cinder-ceph parent: func-target dependencies: - - jammy-yoga-ec_cinder-ceph + - jammy-zed-ec_cinder-ceph vars: tox_extra_args: '-- kinetic-zed-ec' +- job: + name: jammy-antelope-ec_cinder-ceph + parent: func-target + dependencies: + - jammy-zed-ec_cinder-ceph + vars: + tox_extra_args: '-- jammy-antelope-ec' +- job: + name: lunar-antelope-ec_cinder-ceph + parent: func-target + dependencies: + - jammy-zed-ec_cinder-ceph + vars: + tox_extra_args: '-- lunar-antelope-ec' diff --git a/test-requirements.txt b/test-requirements.txt index 40d87f3..e972406 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -26,3 +26,4 @@ git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.open git+https://opendev.org/openstack/tempest.git#egg=tempest croniter # needed for charm-rabbitmq-server unit tests +psutil diff --git a/tests/bundles/jammy-antelope-ec.yaml b/tests/bundles/jammy-antelope-ec.yaml new file mode 100644 index 0000000..d56a8be --- /dev/null +++ b/tests/bundles/jammy-antelope-ec.yaml @@ -0,0 +1,303 @@ +variables: + openstack-origin: &openstack-origin cloud:jammy-antelope + +series: &series jammy + +machines: + 0: + 1: + 2: + 3: + 4: + 5: + 6: + 7: + 8: + 9: + 10: + 11: + 12: + 13: + 14: + 15: + 16: + # for the nova-compute unit + 17: + constraints: mem=4G cores=4 root-disk=20G + 18: + 19: + 20: + +applications: + + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: latest/edge + placement-mysql-router: + charm: ch:mysql-router + channel: latest/edge + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + neutron-api-mysql-router: + charm: ch:mysql-router + channel: latest/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 6 + storage: + osd-devices: '10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + to: + - '3' + - '4' + - '5' + - '18' + - '19' + - '20' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + channel: latest/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + to: + - '9' + channel: latest/edge + + nova-cloud-controller: + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + network-manager: Neutron + debug: true + to: + - '10' + channel: latest/edge + + placement: + charm: ch:placement + num_units: 1 + constraints: mem=1G + options: + openstack-origin: *openstack-origin + to: + - '11' + channel: latest/edge + + neutron-api: + charm: ch:neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + openstack-origin: *openstack-origin + flat-network-providers: physnet1 + neutron-security-groups: true + to: + - '12' + channel: latest/edge + + keystone: + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '13' + channel: latest/edge + + neutron-gateway: + charm: ch:neutron-gateway + num_units: 1 + options: + openstack-origin: *openstack-origin + bridge-mappings: physnet1:br-ex + to: + - '14' + channel: latest/edge + + glance: + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: latest/edge + + neutron-openvswitch: + charm: ch:neutron-openvswitch + channel: latest/edge + + cinder: + charm: ch:cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: None + glance-api-version: 2 + to: + - '16' + channel: latest/edge + + cinder-ceph: + charm: ../../cinder-ceph.charm + options: + pool-type: erasure-coded + ec-profile-k: 4 + ec-profile-m: 2 + + nova-compute: + charm: ch:nova-compute + num_units: 1 + storage: + ephemeral-device: '40G' + options: + openstack-origin: *openstack-origin + config-flags: auto_assign_floating_ip=False + enable-live-migration: false + aa-profile-mode: enforce + debug: true + to: + - '17' + channel: latest/edge + +relations: + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement:identity-service' + - 'keystone:identity-service' + + - - 'placement:placement' + - 'nova-cloud-controller:placement' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'rabbitmq-server:amqp' + - 'neutron-openvswitch:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'ceph-mon:client' + - 'cinder-ceph:ceph' + + - - 'ceph-mon:osd' + - 'ceph-osd:mon' diff --git a/tests/bundles/jammy-antelope.yaml b/tests/bundles/jammy-antelope.yaml new file mode 100644 index 0000000..fde690c --- /dev/null +++ b/tests/bundles/jammy-antelope.yaml @@ -0,0 +1,293 @@ +variables: + openstack-origin: &openstack-origin cloud:jammy-antelope + +series: &series jammy + +machines: + 0: + 1: + 2: + 3: + 4: + 5: + 6: + 7: + 8: + 9: + 10: + 11: + 12: + 13: + 14: + 15: + 16: + # for the nova-compute unit + 17: + constraints: mem=4G cores=4 root-disk=20G + +applications: + + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: latest/edge + placement-mysql-router: + charm: ch:mysql-router + channel: latest/edge + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + neutron-api-mysql-router: + charm: ch:mysql-router + channel: latest/edge + cinder-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: '10G' + options: + osd-devices: '/dev/test-non-existent' + source: *openstack-origin + to: + - '3' + - '4' + - '5' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + monitor-count: '3' + source: *openstack-origin + to: + - '6' + - '7' + - '8' + channel: latest/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + to: + - '9' + channel: latest/edge + + nova-cloud-controller: + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + network-manager: Neutron + debug: true + to: + - '10' + channel: latest/edge + + placement: + charm: ch:placement + num_units: 1 + constraints: mem=1G + options: + openstack-origin: *openstack-origin + to: + - '11' + channel: latest/edge + + neutron-api: + charm: ch:neutron-api + num_units: 1 + options: + manage-neutron-plugin-legacy-mode: true + neutron-plugin: ovs + openstack-origin: *openstack-origin + flat-network-providers: physnet1 + neutron-security-groups: true + to: + - '12' + channel: latest/edge + + keystone: + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '13' + channel: latest/edge + + neutron-gateway: + charm: ch:neutron-gateway + num_units: 1 + options: + openstack-origin: *openstack-origin + bridge-mappings: physnet1:br-ex + to: + - '14' + channel: latest/edge + + glance: + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: latest/edge + + neutron-openvswitch: + charm: ch:neutron-openvswitch + channel: latest/edge + + cinder: + charm: ch:cinder + num_units: 1 + options: + openstack-origin: *openstack-origin + block-device: None + glance-api-version: 2 + to: + - '16' + channel: latest/edge + + cinder-ceph: + charm: ../../cinder-ceph.charm + + nova-compute: + charm: ch:nova-compute + num_units: 1 + storage: + ephemeral-device: '40G' + options: + openstack-origin: *openstack-origin + config-flags: auto_assign_floating_ip=False + enable-live-migration: false + aa-profile-mode: enforce + debug: true + to: + - '17' + channel: latest/edge + +relations: + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement:identity-service' + - 'keystone:identity-service' + + - - 'placement:placement' + - 'nova-cloud-controller:placement' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:ceph' + - 'ceph-mon:client' + + - - 'neutron-gateway:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:quantum-network-service' + - 'neutron-gateway:quantum-network-service' + + - - 'neutron-api:shared-db' + - 'neutron-api-mysql-router:shared-db' + - - 'neutron-api-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'neutron-api:amqp' + - 'rabbitmq-server:amqp' + + - - 'neutron-api:neutron-api' + - 'nova-cloud-controller:neutron-api' + + - - 'neutron-api:identity-service' + - 'keystone:identity-service' + + - - 'nova-compute:neutron-plugin' + - 'neutron-openvswitch:neutron-plugin' + + - - 'rabbitmq-server:amqp' + - 'neutron-openvswitch:amqp' + + - - 'cinder:shared-db' + - 'cinder-mysql-router:shared-db' + - - 'cinder-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'cinder:identity-service' + - 'keystone:identity-service' + + - - 'cinder:amqp' + - 'rabbitmq-server:amqp' + + - - 'cinder:image-service' + - 'glance:image-service' + + - - 'cinder-ceph:storage-backend' + - 'cinder:storage-backend' + + - - 'nova-compute:ceph-access' + - 'cinder-ceph:ceph-access' + + - - 'ceph-mon:client' + - 'cinder-ceph:ceph' + + - - 'ceph-mon:osd' + - 'ceph-osd:mon' diff --git a/tests/bundles/jammy-yoga-ec.yaml b/tests/bundles/lunar-antelope-ec.yaml similarity index 99% rename from tests/bundles/jammy-yoga-ec.yaml rename to tests/bundles/lunar-antelope-ec.yaml index 6fff176..06075ac 100644 --- a/tests/bundles/jammy-yoga-ec.yaml +++ b/tests/bundles/lunar-antelope-ec.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: &series jammy +series: &series lunar machines: 0: diff --git a/tests/bundles/jammy-yoga.yaml b/tests/bundles/lunar-antelope.yaml similarity index 99% rename from tests/bundles/jammy-yoga.yaml rename to tests/bundles/lunar-antelope.yaml index f1b18c9..2794ddb 100644 --- a/tests/bundles/jammy-yoga.yaml +++ b/tests/bundles/lunar-antelope.yaml @@ -1,7 +1,7 @@ variables: openstack-origin: &openstack-origin distro -series: &series jammy +series: &series lunar machines: 0: diff --git a/tests/tests.yaml b/tests/tests.yaml index edbbeac..ad197ca 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -5,19 +5,19 @@ comment: | cinder and ceph-mon zaza charm tests. smoke_bundles: - - jammy-yoga + - jammy-zed gate_bundles: - - jammy-yoga-ec - - jammy-yoga - -dev_bundles: - - jammy-yoga-ec - - jammy-yoga - jammy-zed-ec - jammy-zed + +dev_bundles: + - jammy-antelope-ec + - jammy-antelope - kinetic-zed-ec - kinetic-zed + - lunar-antelope-ec + - lunar-antelope configure: - zaza.openstack.charm_tests.glance.setup.add_cirros_image @@ -44,3 +44,5 @@ tests_options: force_deploy: - kinetic-zed - kinetic-zed-ec + - lunar-antelope + - lunar-antelope-ec diff --git a/tox.ini b/tox.ini index ae4d124..2cb6ca1 100644 --- a/tox.ini +++ b/tox.ini @@ -25,7 +25,7 @@ setenv = VIRTUAL_ENV={envdir} commands = stestr run --slowest {posargs} allowlist_externals = charmcraft - rename.sh + {toxinidir}/rename.sh passenv = HOME TERM