ch-sync: IndentityServiceContext app data fixes
The IdentityServiceContext was recently updated to add the application data bag. Unfortunately, they keystone charm uses both the regular relation data primarily, but the updates for endpoints are on both the application database and the regular relation data. The IdentityServiceContext used the app data bag exclusively if there were any keys available, which leads to it ignoring the existing relation data entirely; this results in 'identity relation not ready' with missing data items. charm-helpers is patched at [1] to solve the issue and this is a charm-helpers sync that includes that patch. This changes the functionality of the IdentityServiceContext so that it preferentially selects keys from the application data bag unless they are None, in which case it tries for those keys on the existing relation data. e.g. it will stitch the two relations together with the app data bag taking priority. This allows the nova-cloud-controller charm to correctly access the identity relation data and form a complete context. [1] https://github.com/juju/charm-helpers/pull/746 Change-Id: Ic868213c6bb42bc2a28ad25a2f7344a28ab9f04d
This commit is contained in:
parent
007f9e33b0
commit
1d06b80c8d
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
import glob
|
import glob
|
||||||
import grp
|
import grp
|
||||||
|
import json
|
||||||
import os
|
import os
|
||||||
import pwd
|
import pwd
|
||||||
import re
|
import re
|
||||||
|
@ -30,6 +31,7 @@ import yaml
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
application_name,
|
application_name,
|
||||||
config,
|
config,
|
||||||
|
ERROR,
|
||||||
hook_name,
|
hook_name,
|
||||||
local_unit,
|
local_unit,
|
||||||
log,
|
log,
|
||||||
|
@ -416,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
|
||||||
:param str unit_name: Unit name to use in check description
|
:param str unit_name: Unit name to use in check description
|
||||||
:param bool immediate_check: For sysv init, run the service check immediately
|
:param bool immediate_check: For sysv init, run the service check immediately
|
||||||
"""
|
"""
|
||||||
|
# check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details.
|
||||||
|
# just remove check_haproxy if haproxy is added as a lsb resource in hacluster.
|
||||||
|
for rid in relation_ids("ha"):
|
||||||
|
ha_resources = relation_get("json_resources", rid=rid, unit=local_unit())
|
||||||
|
if ha_resources:
|
||||||
|
try:
|
||||||
|
ha_resources_parsed = json.loads(ha_resources)
|
||||||
|
except ValueError as e:
|
||||||
|
log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR)
|
||||||
|
raise
|
||||||
|
if "lsb:haproxy" in ha_resources_parsed.values():
|
||||||
|
if "haproxy" in services:
|
||||||
|
log("removed check_haproxy. This service will be monitored by check_crm")
|
||||||
|
services.remove("haproxy")
|
||||||
for svc in services:
|
for svc in services:
|
||||||
# Don't add a check for these services from neutron-gateway
|
# Don't add a check for these services from neutron-gateway
|
||||||
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
|
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
|
||||||
|
|
|
@ -324,7 +324,7 @@ def valid_hacluster_config():
|
||||||
'''
|
'''
|
||||||
vip = config_get('vip')
|
vip = config_get('vip')
|
||||||
dns = config_get('dns-ha')
|
dns = config_get('dns-ha')
|
||||||
if not(bool(vip) ^ bool(dns)):
|
if not (bool(vip) ^ bool(dns)):
|
||||||
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
|
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
|
||||||
'use high availability')
|
'use high availability')
|
||||||
status_set('blocked', msg)
|
status_set('blocked', msg)
|
||||||
|
|
|
@ -539,7 +539,7 @@ def port_has_listener(address, port):
|
||||||
"""
|
"""
|
||||||
cmd = ['nc', '-z', address, str(port)]
|
cmd = ['nc', '-z', address, str(port)]
|
||||||
result = subprocess.call(cmd)
|
result = subprocess.call(cmd)
|
||||||
return not(bool(result))
|
return not (bool(result))
|
||||||
|
|
||||||
|
|
||||||
def assert_charm_supports_ipv6():
|
def assert_charm_supports_ipv6():
|
||||||
|
|
|
@ -25,6 +25,7 @@ import socket
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
|
from distutils.version import LooseVersion
|
||||||
from subprocess import (
|
from subprocess import (
|
||||||
check_call,
|
check_call,
|
||||||
check_output,
|
check_output,
|
||||||
|
@ -39,6 +40,7 @@ from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
apt_install,
|
apt_install,
|
||||||
filter_installed_packages,
|
filter_installed_packages,
|
||||||
|
get_installed_version,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
NoNetworkBinding,
|
NoNetworkBinding,
|
||||||
|
@ -59,6 +61,7 @@ from charmhelpers.core.hookenv import (
|
||||||
network_get_primary_address,
|
network_get_primary_address,
|
||||||
WARNING,
|
WARNING,
|
||||||
service_name,
|
service_name,
|
||||||
|
remote_service_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.sysctl import create as sysctl_create
|
from charmhelpers.core.sysctl import create as sysctl_create
|
||||||
|
@ -130,6 +133,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||||
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
||||||
HAPROXY_RUN_DIR = '/var/run/haproxy/'
|
HAPROXY_RUN_DIR = '/var/run/haproxy/'
|
||||||
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
|
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
|
||||||
|
DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404
|
||||||
|
|
||||||
|
|
||||||
def ensure_packages(packages):
|
def ensure_packages(packages):
|
||||||
|
@ -345,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir):
|
||||||
|
|
||||||
class IdentityServiceContext(OSContextGenerator):
|
class IdentityServiceContext(OSContextGenerator):
|
||||||
|
|
||||||
|
_forward_compat_remaps = {
|
||||||
|
'admin_user': 'admin-user-name',
|
||||||
|
'service_username': 'service-user-name',
|
||||||
|
'service_tenant': 'service-project-name',
|
||||||
|
'service_tenant_id': 'service-project-id',
|
||||||
|
'service_domain': 'service-domain-name',
|
||||||
|
}
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
service=None,
|
service=None,
|
||||||
service_user=None,
|
service_user=None,
|
||||||
|
@ -397,11 +409,16 @@ class IdentityServiceContext(OSContextGenerator):
|
||||||
# 'www_authenticate_uri' replaced 'auth_uri' since Stein,
|
# 'www_authenticate_uri' replaced 'auth_uri' since Stein,
|
||||||
# see keystonemiddleware upstream sources for more info
|
# see keystonemiddleware upstream sources for more info
|
||||||
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
|
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
|
||||||
c.update((
|
if 'public_auth_url' in ctxt:
|
||||||
('www_authenticate_uri', "{}://{}:{}/v3".format(
|
c.update((
|
||||||
ctxt.get('service_protocol', ''),
|
('www_authenticate_uri', '{}/v3'.format(
|
||||||
ctxt.get('service_host', ''),
|
ctxt.get('public_auth_url'))),))
|
||||||
ctxt.get('service_port', ''))),))
|
else:
|
||||||
|
c.update((
|
||||||
|
('www_authenticate_uri', "{}://{}:{}/v3".format(
|
||||||
|
ctxt.get('service_protocol', ''),
|
||||||
|
ctxt.get('service_host', ''),
|
||||||
|
ctxt.get('service_port', ''))),))
|
||||||
else:
|
else:
|
||||||
c.update((
|
c.update((
|
||||||
('auth_uri', "{}://{}:{}/v3".format(
|
('auth_uri', "{}://{}:{}/v3".format(
|
||||||
|
@ -409,11 +426,17 @@ class IdentityServiceContext(OSContextGenerator):
|
||||||
ctxt.get('service_host', ''),
|
ctxt.get('service_host', ''),
|
||||||
ctxt.get('service_port', ''))),))
|
ctxt.get('service_port', ''))),))
|
||||||
|
|
||||||
|
if 'internal_auth_url' in ctxt:
|
||||||
|
c.update((
|
||||||
|
('auth_url', ctxt.get('internal_auth_url')),))
|
||||||
|
else:
|
||||||
|
c.update((
|
||||||
|
('auth_url', "{}://{}:{}/v3".format(
|
||||||
|
ctxt.get('auth_protocol', ''),
|
||||||
|
ctxt.get('auth_host', ''),
|
||||||
|
ctxt.get('auth_port', ''))),))
|
||||||
|
|
||||||
c.update((
|
c.update((
|
||||||
('auth_url', "{}://{}:{}/v3".format(
|
|
||||||
ctxt.get('auth_protocol', ''),
|
|
||||||
ctxt.get('auth_host', ''),
|
|
||||||
ctxt.get('auth_port', ''))),
|
|
||||||
('project_domain_name', ctxt.get('admin_domain_name', '')),
|
('project_domain_name', ctxt.get('admin_domain_name', '')),
|
||||||
('user_domain_name', ctxt.get('admin_domain_name', '')),
|
('user_domain_name', ctxt.get('admin_domain_name', '')),
|
||||||
('project_name', ctxt.get('admin_tenant_name', '')),
|
('project_name', ctxt.get('admin_tenant_name', '')),
|
||||||
|
@ -441,39 +464,84 @@ class IdentityServiceContext(OSContextGenerator):
|
||||||
for rid in relation_ids(self.rel_name):
|
for rid in relation_ids(self.rel_name):
|
||||||
self.related = True
|
self.related = True
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
|
rdata = {}
|
||||||
|
# NOTE(jamespage):
|
||||||
|
# forwards compat with application data
|
||||||
|
# bag driven approach to relation.
|
||||||
|
_adata = relation_get(rid=rid, app=remote_service_name(rid))
|
||||||
|
adata = {}
|
||||||
|
# if no app data bag presented - fallback
|
||||||
|
# to legacy unit based relation data
|
||||||
rdata = relation_get(rid=rid, unit=unit)
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
serv_host = rdata.get('service_host')
|
if _adata:
|
||||||
|
# New app data bag uses - instead of _
|
||||||
|
# in key names - remap for compat with
|
||||||
|
# existing relation data keys
|
||||||
|
for key, value in _adata.items():
|
||||||
|
if key == 'api-version':
|
||||||
|
adata[key.replace('-', '_')] = value.strip('v')
|
||||||
|
else:
|
||||||
|
adata[key.replace('-', '_')] = value
|
||||||
|
# Re-map some keys for backwards compatibility
|
||||||
|
for target, source in self._forward_compat_remaps.items():
|
||||||
|
adata[target] = _adata.get(source)
|
||||||
|
# Now preferentially get data from the app data bag, but if
|
||||||
|
# it's not available, get it from the legacy based relation
|
||||||
|
# data.
|
||||||
|
|
||||||
|
def _resolve(key):
|
||||||
|
return adata.get(key) or rdata.get(key)
|
||||||
|
|
||||||
|
serv_host = _resolve('service_host')
|
||||||
serv_host = format_ipv6_addr(serv_host) or serv_host
|
serv_host = format_ipv6_addr(serv_host) or serv_host
|
||||||
auth_host = rdata.get('auth_host')
|
auth_host = _resolve('auth_host')
|
||||||
auth_host = format_ipv6_addr(auth_host) or auth_host
|
auth_host = format_ipv6_addr(auth_host) or auth_host
|
||||||
int_host = rdata.get('internal_host')
|
int_host = _resolve('internal_host',)
|
||||||
int_host = format_ipv6_addr(int_host) or int_host
|
int_host = format_ipv6_addr(int_host) or int_host
|
||||||
svc_protocol = rdata.get('service_protocol') or 'http'
|
svc_protocol = _resolve('service_protocol') or 'http'
|
||||||
auth_protocol = rdata.get('auth_protocol') or 'http'
|
auth_protocol = _resolve('auth_protocol') or 'http'
|
||||||
int_protocol = rdata.get('internal_protocol') or 'http'
|
int_protocol = _resolve('internal_protocol') or 'http'
|
||||||
api_version = rdata.get('api_version') or '2.0'
|
api_version = _resolve('api_version') or '2.0'
|
||||||
ctxt.update({'service_port': rdata.get('service_port'),
|
ctxt.update({'service_port': _resolve('service_port'),
|
||||||
'service_host': serv_host,
|
'service_host': serv_host,
|
||||||
'auth_host': auth_host,
|
'auth_host': auth_host,
|
||||||
'auth_port': rdata.get('auth_port'),
|
'auth_port': _resolve('auth_port'),
|
||||||
'internal_host': int_host,
|
'internal_host': int_host,
|
||||||
'internal_port': rdata.get('internal_port'),
|
'internal_port': _resolve('internal_port'),
|
||||||
'admin_tenant_name': rdata.get('service_tenant'),
|
'admin_tenant_name': _resolve('service_tenant'),
|
||||||
'admin_user': rdata.get('service_username'),
|
'admin_user': _resolve('service_username'),
|
||||||
'admin_password': rdata.get('service_password'),
|
'admin_password': _resolve('service_password'),
|
||||||
'service_protocol': svc_protocol,
|
'service_protocol': svc_protocol,
|
||||||
'auth_protocol': auth_protocol,
|
'auth_protocol': auth_protocol,
|
||||||
'internal_protocol': int_protocol,
|
'internal_protocol': int_protocol,
|
||||||
'api_version': api_version})
|
'api_version': api_version})
|
||||||
|
|
||||||
if rdata.get('service_type'):
|
service_type = _resolve('service_type')
|
||||||
ctxt['service_type'] = rdata.get('service_type')
|
if service_type:
|
||||||
|
ctxt['service_type'] = service_type
|
||||||
|
|
||||||
if float(api_version) > 2:
|
if float(api_version) > 2:
|
||||||
ctxt.update({
|
ctxt.update({
|
||||||
'admin_domain_name': rdata.get('service_domain'),
|
'admin_domain_name': _resolve('service_domain'),
|
||||||
'service_project_id': rdata.get('service_tenant_id'),
|
'service_project_id': _resolve('service_tenant_id'),
|
||||||
'service_domain_id': rdata.get('service_domain_id')})
|
'service_domain_id': _resolve('service_domain_id')})
|
||||||
|
|
||||||
|
# NOTE:
|
||||||
|
# keystone-k8s operator presents full URLS
|
||||||
|
# for all three endpoints - public and internal are
|
||||||
|
# externally addressable for machine based charm
|
||||||
|
public_auth_url = _resolve('public_auth_url')
|
||||||
|
# if 'public_auth_url' in rdata:
|
||||||
|
if public_auth_url:
|
||||||
|
ctxt.update({
|
||||||
|
'public_auth_url': public_auth_url,
|
||||||
|
})
|
||||||
|
internal_auth_url = _resolve('internal_auth_url')
|
||||||
|
# if 'internal_auth_url' in rdata:
|
||||||
|
if internal_auth_url:
|
||||||
|
ctxt.update({
|
||||||
|
'internal_auth_url': internal_auth_url,
|
||||||
|
})
|
||||||
|
|
||||||
# we keep all veriables in ctxt for compatibility and
|
# we keep all veriables in ctxt for compatibility and
|
||||||
# add nested dictionary for keystone_authtoken generic
|
# add nested dictionary for keystone_authtoken generic
|
||||||
|
@ -487,8 +555,8 @@ class IdentityServiceContext(OSContextGenerator):
|
||||||
# NOTE(jamespage) this is required for >= icehouse
|
# NOTE(jamespage) this is required for >= icehouse
|
||||||
# so a missing value just indicates keystone needs
|
# so a missing value just indicates keystone needs
|
||||||
# upgrading
|
# upgrading
|
||||||
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
|
ctxt['admin_tenant_id'] = _resolve('service_tenant_id')
|
||||||
ctxt['admin_domain_id'] = rdata.get('service_domain_id')
|
ctxt['admin_domain_id'] = _resolve('service_domain_id')
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
return {}
|
return {}
|
||||||
|
@ -860,9 +928,14 @@ class HAProxyContext(OSContextGenerator):
|
||||||
interfaces = ['cluster']
|
interfaces = ['cluster']
|
||||||
|
|
||||||
def __init__(self, singlenode_mode=False,
|
def __init__(self, singlenode_mode=False,
|
||||||
address_types=ADDRESS_TYPES):
|
address_types=None,
|
||||||
|
exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT):
|
||||||
|
if address_types is None:
|
||||||
|
address_types = ADDRESS_TYPES[:]
|
||||||
|
|
||||||
self.address_types = address_types
|
self.address_types = address_types
|
||||||
self.singlenode_mode = singlenode_mode
|
self.singlenode_mode = singlenode_mode
|
||||||
|
self.exporter_stats_port = exporter_stats_port
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
if not os.path.isdir(HAPROXY_RUN_DIR):
|
if not os.path.isdir(HAPROXY_RUN_DIR):
|
||||||
|
@ -957,10 +1030,20 @@ class HAProxyContext(OSContextGenerator):
|
||||||
db = kv()
|
db = kv()
|
||||||
ctxt['stat_password'] = db.get('stat-password')
|
ctxt['stat_password'] = db.get('stat-password')
|
||||||
if not ctxt['stat_password']:
|
if not ctxt['stat_password']:
|
||||||
ctxt['stat_password'] = db.set('stat-password',
|
ctxt['stat_password'] = db.set('stat-password', pwgen(32))
|
||||||
pwgen(32))
|
|
||||||
db.flush()
|
db.flush()
|
||||||
|
|
||||||
|
# NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0
|
||||||
|
# New bind will be created and a prometheus-exporter
|
||||||
|
# will be used for path /metrics. At the same time,
|
||||||
|
# prometheus-exporter avoids using auth.
|
||||||
|
haproxy_version = get_installed_version("haproxy")
|
||||||
|
if (haproxy_version and
|
||||||
|
haproxy_version.ver_str >= LooseVersion("2.0.0") and
|
||||||
|
is_relation_made("haproxy-exporter")):
|
||||||
|
ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter")
|
||||||
|
ctxt["stats_exporter_port"] = self.exporter_stats_port
|
||||||
|
|
||||||
for frontend in cluster_hosts:
|
for frontend in cluster_hosts:
|
||||||
if (len(cluster_hosts[frontend]['backends']) > 1 or
|
if (len(cluster_hosts[frontend]['backends']) > 1 or
|
||||||
self.singlenode_mode):
|
self.singlenode_mode):
|
||||||
|
|
|
@ -25,6 +25,7 @@ Helpers for high availability.
|
||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
@ -36,6 +37,7 @@ from charmhelpers.core.hookenv import (
|
||||||
config,
|
config,
|
||||||
status_set,
|
status_set,
|
||||||
DEBUG,
|
DEBUG,
|
||||||
|
application_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
|
@ -65,6 +67,7 @@ JSON_ENCODE_OPTIONS = dict(
|
||||||
|
|
||||||
VIP_GROUP_NAME = 'grp_{service}_vips'
|
VIP_GROUP_NAME = 'grp_{service}_vips'
|
||||||
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
|
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
|
||||||
|
HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard"
|
||||||
|
|
||||||
|
|
||||||
class DNSHAException(Exception):
|
class DNSHAException(Exception):
|
||||||
|
@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data):
|
||||||
relation_data['groups'] = {
|
relation_data['groups'] = {
|
||||||
key: ' '.join(vip_group)
|
key: ' '.join(vip_group)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard):
|
||||||
|
"""Load grafana dashboard json model and insert prometheus datasource.
|
||||||
|
|
||||||
|
:param prometheus_app_name: name of the 'prometheus' application that will
|
||||||
|
be used as datasource in grafana dashboard
|
||||||
|
:type prometheus_app_name: str
|
||||||
|
:param haproxy_dashboard: path to haproxy dashboard
|
||||||
|
:type haproxy_dashboard: str
|
||||||
|
:return: Grafana dashboard json model as a str.
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
from charmhelpers.contrib.templating import jinja
|
||||||
|
|
||||||
|
dashboard_template = os.path.basename(haproxy_dashboard)
|
||||||
|
dashboard_template_dir = os.path.dirname(haproxy_dashboard)
|
||||||
|
app_name = application_name()
|
||||||
|
datasource = "{} - Juju generated source".format(prometheus_app_name)
|
||||||
|
return jinja.render(dashboard_template,
|
||||||
|
{"datasource": datasource,
|
||||||
|
"app_name": app_name,
|
||||||
|
"prometheus_app_name": prometheus_app_name},
|
||||||
|
template_dir=dashboard_template_dir,
|
||||||
|
jinja_env_args={"variable_start_string": "<< ",
|
||||||
|
"variable_end_string": " >>"})
|
||||||
|
|
|
@ -25,6 +25,7 @@ from charmhelpers.contrib.network.ip import (
|
||||||
is_ipv6,
|
is_ipv6,
|
||||||
get_ipv6_addr,
|
get_ipv6_addr,
|
||||||
resolve_network_cidr,
|
resolve_network_cidr,
|
||||||
|
get_iface_for_address
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||||
|
|
||||||
|
@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'):
|
||||||
return unit_get(unit_get_fallback)
|
return unit_get(unit_get_fallback)
|
||||||
|
|
||||||
|
|
||||||
|
def get_invalid_vips():
|
||||||
|
"""Check if any of the provided vips are invalid.
|
||||||
|
A vip is invalid if it doesn't belong to the subnet in any interface.
|
||||||
|
If all vips are valid, this returns an empty list.
|
||||||
|
|
||||||
|
:returns: A list of strings, where each string is an invalid vip address.
|
||||||
|
:rtype: list
|
||||||
|
"""
|
||||||
|
|
||||||
|
clustered = is_clustered()
|
||||||
|
vips = config('vip')
|
||||||
|
if vips:
|
||||||
|
vips = vips.split()
|
||||||
|
invalid_vips = []
|
||||||
|
|
||||||
|
if clustered and vips:
|
||||||
|
for vip in vips:
|
||||||
|
iface_for_vip = get_iface_for_address(vip)
|
||||||
|
if iface_for_vip is None:
|
||||||
|
invalid_vips.append(vip)
|
||||||
|
|
||||||
|
return invalid_vips
|
||||||
|
|
||||||
|
|
||||||
def resolve_address(endpoint_type=PUBLIC, override=True):
|
def resolve_address(endpoint_type=PUBLIC, override=True):
|
||||||
"""Return unit address depending on net config.
|
"""Return unit address depending on net config.
|
||||||
|
|
||||||
|
|
|
@ -310,7 +310,7 @@ def ssh_known_hosts_lines(application_name, user=None):
|
||||||
for hosts_line in hosts:
|
for hosts_line in hosts:
|
||||||
if hosts_line.rstrip():
|
if hosts_line.rstrip():
|
||||||
known_hosts_list.append(hosts_line.rstrip())
|
known_hosts_list.append(hosts_line.rstrip())
|
||||||
return(known_hosts_list)
|
return known_hosts_list
|
||||||
|
|
||||||
|
|
||||||
def ssh_authorized_keys_lines(application_name, user=None):
|
def ssh_authorized_keys_lines(application_name, user=None):
|
||||||
|
@ -327,7 +327,7 @@ def ssh_authorized_keys_lines(application_name, user=None):
|
||||||
for authkey_line in keys:
|
for authkey_line in keys:
|
||||||
if authkey_line.rstrip():
|
if authkey_line.rstrip():
|
||||||
authorized_keys_list.append(authkey_line.rstrip())
|
authorized_keys_list.append(authkey_line.rstrip())
|
||||||
return(authorized_keys_list)
|
return authorized_keys_list
|
||||||
|
|
||||||
|
|
||||||
def ssh_compute_remove(public_key, application_name, user=None):
|
def ssh_compute_remove(public_key, application_name, user=None):
|
||||||
|
|
|
@ -49,6 +49,11 @@ defaults
|
||||||
|
|
||||||
listen stats
|
listen stats
|
||||||
bind {{ local_host }}:{{ stat_port }}
|
bind {{ local_host }}:{{ stat_port }}
|
||||||
|
{%- if stats_exporter_host and stats_exporter_port %}
|
||||||
|
bind {{ stats_exporter_host }}:{{ stats_exporter_port }}
|
||||||
|
option http-use-htx
|
||||||
|
http-request use-service prometheus-exporter if { path /metrics }
|
||||||
|
{%- endif %}
|
||||||
mode http
|
mode http
|
||||||
stats enable
|
stats enable
|
||||||
stats hide-version
|
stats hide-version
|
||||||
|
|
|
@ -1327,7 +1327,7 @@ def _check_listening_on_services_ports(services, test=False):
|
||||||
@param test: default=False, if False, test for closed, otherwise open.
|
@param test: default=False, if False, test for closed, otherwise open.
|
||||||
@returns OrderedDict(service: [port-not-open, ...]...), [boolean]
|
@returns OrderedDict(service: [port-not-open, ...]...), [boolean]
|
||||||
"""
|
"""
|
||||||
test = not(not(test)) # ensure test is True or False
|
test = not (not (test)) # ensure test is True or False
|
||||||
all_ports = list(itertools.chain(*services.values()))
|
all_ports = list(itertools.chain(*services.values()))
|
||||||
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
|
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
|
||||||
map_ports = OrderedDict()
|
map_ports = OrderedDict()
|
||||||
|
@ -1583,7 +1583,7 @@ def is_unit_paused_set():
|
||||||
with unitdata.HookData()() as t:
|
with unitdata.HookData()() as t:
|
||||||
kv = t[0]
|
kv = t[0]
|
||||||
# transform something truth-y into a Boolean.
|
# transform something truth-y into a Boolean.
|
||||||
return not(not(kv.get('unit-paused')))
|
return not (not (kv.get('unit-paused')))
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -2181,7 +2181,7 @@ def is_unit_upgrading_set():
|
||||||
with unitdata.HookData()() as t:
|
with unitdata.HookData()() as t:
|
||||||
kv = t[0]
|
kv = t[0]
|
||||||
# transform something truth-y into a Boolean.
|
# transform something truth-y into a Boolean.
|
||||||
return not(not(kv.get('unit-upgrading')))
|
return not (not (kv.get('unit-upgrading')))
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
@ -173,7 +173,12 @@ def retrieve_secret_id(url, token):
|
||||||
# hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate
|
# hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate
|
||||||
if not isinstance(client.adapter, hvac.adapters.Request):
|
if not isinstance(client.adapter, hvac.adapters.Request):
|
||||||
client.adapter = hvac.adapters.Request(base_uri=url, token=token)
|
client.adapter = hvac.adapters.Request(base_uri=url, token=token)
|
||||||
response = client._post('/v1/sys/wrapping/unwrap')
|
try:
|
||||||
|
# hvac == 1.0.0 has an API to unwrap with the user token
|
||||||
|
response = client.sys.unwrap()
|
||||||
|
except AttributeError:
|
||||||
|
# fallback to hvac < 1.0.0
|
||||||
|
response = client._post('/v1/sys/wrapping/unwrap')
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
data = response.json()
|
data = response.json()
|
||||||
return data['data']['secret_id']
|
return data['data']['secret_id']
|
||||||
|
|
|
@ -23,6 +23,12 @@ from subprocess import (
|
||||||
call
|
call
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
WARNING,
|
||||||
|
INFO
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _luks_uuid(dev):
|
def _luks_uuid(dev):
|
||||||
"""
|
"""
|
||||||
|
@ -110,7 +116,7 @@ def is_device_mounted(device):
|
||||||
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
||||||
|
|
||||||
|
|
||||||
def mkfs_xfs(device, force=False, inode_size=1024):
|
def mkfs_xfs(device, force=False, inode_size=None):
|
||||||
"""Format device with XFS filesystem.
|
"""Format device with XFS filesystem.
|
||||||
|
|
||||||
By default this should fail if the device already has a filesystem on it.
|
By default this should fail if the device already has a filesystem on it.
|
||||||
|
@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024):
|
||||||
:ptype device: tr
|
:ptype device: tr
|
||||||
:param force: Force operation
|
:param force: Force operation
|
||||||
:ptype: force: boolean
|
:ptype: force: boolean
|
||||||
:param inode_size: XFS inode size in bytes
|
:param inode_size: XFS inode size in bytes; if set to 0 or None,
|
||||||
|
the value used will be the XFS system default
|
||||||
:ptype inode_size: int"""
|
:ptype inode_size: int"""
|
||||||
cmd = ['mkfs.xfs']
|
cmd = ['mkfs.xfs']
|
||||||
if force:
|
if force:
|
||||||
cmd.append("-f")
|
cmd.append("-f")
|
||||||
|
|
||||||
cmd += ['-i', "size={}".format(inode_size), device]
|
if inode_size:
|
||||||
|
if inode_size >= 256 and inode_size <= 2048:
|
||||||
|
cmd += ['-i', "size={}".format(inode_size)]
|
||||||
|
else:
|
||||||
|
log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING)
|
||||||
|
else:
|
||||||
|
log("Using XFS filesystem with system default inode size.", level=INFO)
|
||||||
|
|
||||||
|
cmd += [device]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
|
@ -954,7 +954,7 @@ def pwgen(length=None):
|
||||||
random_generator = random.SystemRandom()
|
random_generator = random.SystemRandom()
|
||||||
random_chars = [
|
random_chars = [
|
||||||
random_generator.choice(alphanumeric_chars) for _ in range(length)]
|
random_generator.choice(alphanumeric_chars) for _ in range(length)]
|
||||||
return(''.join(random_chars))
|
return ''.join(random_chars)
|
||||||
|
|
||||||
|
|
||||||
def is_phy_iface(interface):
|
def is_phy_iface(interface):
|
||||||
|
|
|
@ -171,8 +171,9 @@ class Storage(object):
|
||||||
path parameter which causes sqlite3 to only build the db in memory.
|
path parameter which causes sqlite3 to only build the db in memory.
|
||||||
This should only be used for testing purposes.
|
This should only be used for testing purposes.
|
||||||
"""
|
"""
|
||||||
def __init__(self, path=None):
|
def __init__(self, path=None, keep_revisions=False):
|
||||||
self.db_path = path
|
self.db_path = path
|
||||||
|
self.keep_revisions = keep_revisions
|
||||||
if path is None:
|
if path is None:
|
||||||
if 'UNIT_STATE_DB' in os.environ:
|
if 'UNIT_STATE_DB' in os.environ:
|
||||||
self.db_path = os.environ['UNIT_STATE_DB']
|
self.db_path = os.environ['UNIT_STATE_DB']
|
||||||
|
@ -242,7 +243,7 @@ class Storage(object):
|
||||||
Remove a key from the database entirely.
|
Remove a key from the database entirely.
|
||||||
"""
|
"""
|
||||||
self.cursor.execute('delete from kv where key=?', [key])
|
self.cursor.execute('delete from kv where key=?', [key])
|
||||||
if self.revision and self.cursor.rowcount:
|
if self.keep_revisions and self.revision and self.cursor.rowcount:
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
'insert into kv_revisions values (?, ?, ?)',
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
[key, self.revision, json.dumps('DELETED')])
|
[key, self.revision, json.dumps('DELETED')])
|
||||||
|
@ -259,14 +260,14 @@ class Storage(object):
|
||||||
if keys is not None:
|
if keys is not None:
|
||||||
keys = ['%s%s' % (prefix, key) for key in keys]
|
keys = ['%s%s' % (prefix, key) for key in keys]
|
||||||
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
||||||
if self.revision and self.cursor.rowcount:
|
if self.keep_revisions and self.revision and self.cursor.rowcount:
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
||||||
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
||||||
else:
|
else:
|
||||||
self.cursor.execute('delete from kv where key like ?',
|
self.cursor.execute('delete from kv where key like ?',
|
||||||
['%s%%' % prefix])
|
['%s%%' % prefix])
|
||||||
if self.revision and self.cursor.rowcount:
|
if self.keep_revisions and self.revision and self.cursor.rowcount:
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
'insert into kv_revisions values (?, ?, ?)',
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
||||||
|
@ -299,7 +300,7 @@ class Storage(object):
|
||||||
where key = ?''', [serialized, key])
|
where key = ?''', [serialized, key])
|
||||||
|
|
||||||
# Save
|
# Save
|
||||||
if not self.revision:
|
if (not self.keep_revisions) or (not self.revision):
|
||||||
return value
|
return value
|
||||||
|
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
|
|
|
@ -230,6 +230,10 @@ CLOUD_ARCHIVE_POCKETS = {
|
||||||
'zed/proposed': 'jammy-proposed/zed',
|
'zed/proposed': 'jammy-proposed/zed',
|
||||||
'jammy-zed/proposed': 'jammy-proposed/zed',
|
'jammy-zed/proposed': 'jammy-proposed/zed',
|
||||||
'jammy-proposed/zed': 'jammy-proposed/zed',
|
'jammy-proposed/zed': 'jammy-proposed/zed',
|
||||||
|
|
||||||
|
# OVN
|
||||||
|
'focal-ovn-22.03': 'focal-updates/ovn-22.03',
|
||||||
|
'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03',
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -363,6 +367,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
|
||||||
:type quiet: bool
|
:type quiet: bool
|
||||||
:raises: subprocess.CalledProcessError
|
:raises: subprocess.CalledProcessError
|
||||||
"""
|
"""
|
||||||
|
if not packages:
|
||||||
|
log("Nothing to install", level=DEBUG)
|
||||||
|
return
|
||||||
if options is None:
|
if options is None:
|
||||||
options = ['--option=Dpkg::Options::=--force-confold']
|
options = ['--option=Dpkg::Options::=--force-confold']
|
||||||
|
|
||||||
|
@ -687,6 +694,7 @@ def add_source(source, key=None, fail_invalid=False):
|
||||||
(r"^cloud-archive:(.*)$", _add_apt_repository),
|
(r"^cloud-archive:(.*)$", _add_apt_repository),
|
||||||
(r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
|
(r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
|
||||||
(r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
|
(r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
|
||||||
|
(r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check),
|
||||||
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
|
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
|
||||||
(r"^cloud:(.*)$", _add_cloud_pocket),
|
(r"^cloud:(.*)$", _add_cloud_pocket),
|
||||||
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
|
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
|
||||||
|
@ -750,6 +758,11 @@ def _add_apt_repository(spec):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def __write_sources_list_d_actual_pocket(file, actual_pocket):
|
||||||
|
with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt:
|
||||||
|
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||||
|
|
||||||
|
|
||||||
def _add_cloud_pocket(pocket):
|
def _add_cloud_pocket(pocket):
|
||||||
"""Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
|
"""Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
|
||||||
|
|
||||||
|
@ -769,8 +782,9 @@ def _add_cloud_pocket(pocket):
|
||||||
'Unsupported cloud: source option %s' %
|
'Unsupported cloud: source option %s' %
|
||||||
pocket)
|
pocket)
|
||||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
__write_sources_list_d_actual_pocket(
|
||||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'),
|
||||||
|
actual_pocket)
|
||||||
|
|
||||||
|
|
||||||
def _add_cloud_staging(cloud_archive_release, openstack_release):
|
def _add_cloud_staging(cloud_archive_release, openstack_release):
|
||||||
|
@ -931,10 +945,14 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
|
||||||
try:
|
try:
|
||||||
result = subprocess.check_call(cmd, env=env, **kwargs)
|
result = subprocess.check_call(cmd, env=env, **kwargs)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
retry_count = retry_count + 1
|
|
||||||
if retry_count > max_retries:
|
|
||||||
raise
|
|
||||||
result = e.returncode
|
result = e.returncode
|
||||||
|
if result not in retry_results:
|
||||||
|
# a non-retriable exitcode was produced
|
||||||
|
raise
|
||||||
|
retry_count += 1
|
||||||
|
if retry_count > max_retries:
|
||||||
|
# a retriable exitcode was produced more than {max_retries} times
|
||||||
|
raise
|
||||||
log(retry_message)
|
log(retry_message)
|
||||||
time.sleep(CMD_RETRY_DELAY)
|
time.sleep(CMD_RETRY_DELAY)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue