Add focal-ussuri and bionic-ussuri bundle

This patch adds a focal-ussuri and bionic-ussuri bundles to the tests
for the charm.  The linked bug is concerned with installing
nova-network, which is not available on Ussuri.

Closes-Bug: #1872770

Change-Id: Iea5a682aaebeb6f6941cf9d8f5780473f457e455
This commit is contained in:
Alex Kavanagh 2020-04-20 10:37:07 +01:00
parent 8490919e2f
commit 810ee1b37d
18 changed files with 1339 additions and 34 deletions

View File

@ -20,7 +20,8 @@ Database
Nova compute only requires database access if using nova-network. If using
Neutron, no direct database access is required and the shared-db relation need
not be added.
not be added. The nova-network feature is not available in Ussuri and later,
and so this interface produces a warning if added.
Networking
==========

View File

@ -186,7 +186,9 @@ options:
multi-host:
type: string
default: 'yes'
description: Whether to run nova-api and nova-network on the compute nodes.
description: |
Whether to run nova-api and nova-network on the compute nodes. Note that
nova-network is not available on Ussuri and later.
reserved-huge-pages:
type: string
default:

View File

@ -28,6 +28,7 @@ from charmhelpers.core.host import (
service
)
BRIDGE_TEMPLATE = """\
# This veth pair is required when neutron data-port is mapped to an existing linux bridge. lp:1635067
@ -83,28 +84,158 @@ def get_bridges_and_ports_map():
return {b: get_bridge_ports(b) for b in get_bridges()}
def add_bridge(name, datapath_type=None):
''' Add the named bridge to openvswitch '''
def _dict_to_vsctl_set(data, table, entity):
"""Helper that takes dictionary and provides ``ovs-vsctl set`` commands
:param data: Additional data to attach to interface
The keys in the data dictionary map directly to column names in the
OpenvSwitch table specified as defined in DB-SCHEMA [0] referenced in
RFC 7047 [1]
There are some established conventions for keys in the external-ids
column of various tables, consult the OVS Integration Guide [2] for
more details.
NOTE(fnordahl): Technically the ``external-ids`` column is called
``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s
behaviour of transforming dashes to underscores for us [3] so we can
have a more pleasant data structure.
0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf
1: https://tools.ietf.org/html/rfc7047
2: http://docs.openvswitch.org/en/latest/topics/integration/
3: https://github.com/openvswitch/ovs/blob/
20dac08fdcce4b7fda1d07add3b346aa9751cfbc/
lib/db-ctl-base.c#L189-L215
:type data: Optional[Dict[str,Union[str,Dict[str,str]]]]
:param table: Name of table to operate on
:type table: str
:param entity: Name of entity to operate on
:type entity: str
:returns: '--' separated ``ovs-vsctl set`` commands
:rtype: Iterator[Tuple[str, str, str, str, str]]
"""
for (k, v) in data.items():
if isinstance(v, dict):
entries = {
'{}:{}'.format(k, dk): dv for (dk, dv) in v.items()}
else:
entries = {k: v}
for (colk, colv) in entries.items():
yield ('--', 'set', table, entity, '{}={}'.format(colk, colv))
def add_bridge(name, datapath_type=None, brdata=None, exclusive=False):
"""Add the named bridge to openvswitch and set/update bridge data for it
:param name: Name of bridge to create
:type name: str
:param datapath_type: Add datapath_type to bridge (DEPRECATED, use brdata)
:type datapath_type: Optional[str]
:param brdata: Additional data to attach to bridge
The keys in the brdata dictionary map directly to column names in the
OpenvSwitch bridge table as defined in DB-SCHEMA [0] referenced in
RFC 7047 [1]
There are some established conventions for keys in the external-ids
column of various tables, consult the OVS Integration Guide [2] for
more details.
NOTE(fnordahl): Technically the ``external-ids`` column is called
``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s
behaviour of transforming dashes to underscores for us [3] so we can
have a more pleasant data structure.
0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf
1: https://tools.ietf.org/html/rfc7047
2: http://docs.openvswitch.org/en/latest/topics/integration/
3: https://github.com/openvswitch/ovs/blob/
20dac08fdcce4b7fda1d07add3b346aa9751cfbc/
lib/db-ctl-base.c#L189-L215
:type brdata: Optional[Dict[str,Union[str,Dict[str,str]]]]
:param exclusive: If True, raise exception if bridge exists
:type exclusive: bool
:raises: subprocess.CalledProcessError
"""
log('Creating bridge {}'.format(name))
cmd = ["ovs-vsctl", "--", "--may-exist", "add-br", name]
cmd = ['ovs-vsctl', '--']
if not exclusive:
cmd.append('--may-exist')
cmd.extend(('add-br', name))
if brdata:
for setcmd in _dict_to_vsctl_set(brdata, 'bridge', name):
cmd.extend(setcmd)
if datapath_type is not None:
log('DEPRECATION WARNING: add_bridge called with datapath_type, '
'please use the brdata keyword argument instead.')
cmd += ['--', 'set', 'bridge', name,
'datapath_type={}'.format(datapath_type)]
subprocess.check_call(cmd)
def del_bridge(name):
''' Delete the named bridge from openvswitch '''
"""Delete the named bridge from openvswitch
:param name: Name of bridge to remove
:type name: str
:raises: subprocess.CalledProcessError
"""
log('Deleting bridge {}'.format(name))
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
def add_bridge_port(name, port, promisc=False):
''' Add a port to the named openvswitch bridge '''
def add_bridge_port(name, port, promisc=False, ifdata=None, exclusive=False,
linkup=True):
"""Add port to bridge and optionally set/update interface data for it
:param name: Name of bridge to attach port to
:type name: str
:param port: Name of port as represented in netdev
:type port: str
:param promisc: Whether to set promiscuous mode on interface
:type promisc: bool
:param ifdata: Additional data to attach to interface
The keys in the ifdata dictionary map directly to column names in the
OpenvSwitch Interface table as defined in DB-SCHEMA [0] referenced in
RFC 7047 [1]
There are some established conventions for keys in the external-ids
column of various tables, consult the OVS Integration Guide [2] for
more details.
NOTE(fnordahl): Technically the ``external-ids`` column is called
``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s
behaviour of transforming dashes to underscores for us [3] so we can
have a more pleasant data structure.
0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf
1: https://tools.ietf.org/html/rfc7047
2: http://docs.openvswitch.org/en/latest/topics/integration/
3: https://github.com/openvswitch/ovs/blob/
20dac08fdcce4b7fda1d07add3b346aa9751cfbc/
lib/db-ctl-base.c#L189-L215
:type ifdata: Optional[Dict[str,Union[str,Dict[str,str]]]]
:param exclusive: If True, raise exception if port exists
:type exclusive: bool
:param linkup: Bring link up
:type linkup: bool
:raises: subprocess.CalledProcessError
"""
cmd = ['ovs-vsctl', '--']
if not exclusive:
cmd.append('--may-exist')
cmd.extend(('add-port', name, port))
if ifdata:
for setcmd in _dict_to_vsctl_set(ifdata, 'Interface', port):
cmd.extend(setcmd)
log('Adding port {} to bridge {}'.format(port, name))
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
name, port])
subprocess.check_call(["ip", "link", "set", port, "up"])
subprocess.check_call(cmd)
if linkup:
# This is mostly a workaround for CI environments, in the real world
# the bare metal provider would most likely have configured and brought
# up the link for us.
subprocess.check_call(["ip", "link", "set", port, "up"])
if promisc:
subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
else:
@ -112,7 +243,14 @@ def add_bridge_port(name, port, promisc=False):
def del_bridge_port(name, port):
''' Delete a port from the named openvswitch bridge '''
"""Delete a port from the named openvswitch bridge
:param name: Name of bridge to remove port from
:type name: str
:param port: Name of port to remove
:type port: str
:raises: subprocess.CalledProcessError
"""
log('Deleting port {} from bridge {}'.format(port, name))
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
name, port])
@ -120,12 +258,35 @@ def del_bridge_port(name, port):
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
def add_ovsbridge_linuxbridge(name, bridge):
''' Add linux bridge to the named openvswitch bridge
def add_ovsbridge_linuxbridge(name, bridge, ifdata=None):
"""Add linux bridge to the named openvswitch bridge
:param name: Name of ovs bridge to be added to Linux bridge
:type name: str
:param bridge: Name of Linux bridge to be added to ovs bridge
:returns: True if veth is added between ovs bridge and linux bridge,
False otherwise'''
:type name: str
:param ifdata: Additional data to attach to interface
The keys in the ifdata dictionary map directly to column names in the
OpenvSwitch Interface table as defined in DB-SCHEMA [0] referenced in
RFC 7047 [1]
There are some established conventions for keys in the external-ids
column of various tables, consult the OVS Integration Guide [2] for
more details.
NOTE(fnordahl): Technically the ``external-ids`` column is called
``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s
behaviour of transforming dashes to underscores for us [3] so we can
have a more pleasant data structure.
0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf
1: https://tools.ietf.org/html/rfc7047
2: http://docs.openvswitch.org/en/latest/topics/integration/
3: https://github.com/openvswitch/ovs/blob/
20dac08fdcce4b7fda1d07add3b346aa9751cfbc/
lib/db-ctl-base.c#L189-L215
:type ifdata: Optional[Dict[str,Union[str,Dict[str,str]]]]
"""
try:
import netifaces
except ImportError:
@ -177,7 +338,7 @@ def add_ovsbridge_linuxbridge(name, bridge):
bridge=bridge))
subprocess.check_call(["ifup", linuxbridge_port])
add_bridge_port(name, linuxbridge_port)
add_bridge_port(name, linuxbridge_port, ifdata=ifdata)
def is_linuxbridge_interface(port):
@ -303,3 +464,21 @@ def port_to_br(port):
).decode('UTF-8').strip()
except subprocess.CalledProcessError:
return None
def ovs_appctl(target, args):
"""Run `ovs-appctl` for target with args and return output.
:param target: Name of daemon to contact. Unless target begins with '/',
`ovs-appctl` looks for a pidfile and will build the path to
a /var/run/openvswitch/target.pid.ctl for you.
:type target: str
:param args: Command and arguments to pass to `ovs-appctl`
:type args: Tuple[str, ...]
:returns: Output from command
:rtype: str
:raises: subprocess.CalledProcessError
"""
cmd = ['ovs-appctl', '-t', target]
cmd.extend(args)
return subprocess.check_output(cmd, universal_newlines=True)

View File

@ -0,0 +1,230 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import uuid
from . import utils
OVN_RUNDIR = '/var/run/ovn'
OVN_SYSCONFDIR = '/etc/ovn'
def ovn_appctl(target, args, rundir=None, use_ovs_appctl=False):
"""Run ovn/ovs-appctl for target with args and return output.
:param target: Name of daemon to contact. Unless target begins with '/',
`ovn-appctl` looks for a pidfile and will build the path to
a /var/run/ovn/target.pid.ctl for you.
:type target: str
:param args: Command and arguments to pass to `ovn-appctl`
:type args: Tuple[str, ...]
:param rundir: Override path to sockets
:type rundir: Optional[str]
:param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03,
set this to True to use ``ovs-appctl`` instead.
:type use_ovs_appctl: bool
:returns: Output from command
:rtype: str
:raises: subprocess.CalledProcessError
"""
# NOTE(fnordahl): The ovsdb-server processes for the OVN databases use a
# non-standard naming scheme for their daemon control socket and we need
# to pass the full path to the socket.
if target in ('ovnnb_db', 'ovnsb_db',):
target = os.path.join(rundir or OVN_RUNDIR, target + '.ctl')
if use_ovs_appctl:
tool = 'ovs-appctl'
else:
tool = 'ovn-appctl'
return utils._run(tool, '-t', target, *args)
class OVNClusterStatus(object):
def __init__(self, name, cluster_id, server_id, address, status, role,
term, leader, vote, election_timer, log,
entries_not_yet_committed, entries_not_yet_applied,
connections, servers):
"""Initialize and populate OVNClusterStatus object.
Use class initializer so we can define types in a compatible manner.
:param name: Name of schema used for database
:type name: str
:param cluster_id: UUID of cluster
:type cluster_id: uuid.UUID
:param server_id: UUID of server
:type server_id: uuid.UUID
:param address: OVSDB connection method
:type address: str
:param status: Status text
:type status: str
:param role: Role of server
:type role: str
:param term: Election term
:type term: int
:param leader: Short form UUID of leader
:type leader: str
:param vote: Vote
:type vote: str
:param election_timer: Current value of election timer
:type election_timer: int
:param log: Log
:type log: str
:param entries_not_yet_committed: Entries not yet committed
:type entries_not_yet_committed: int
:param entries_not_yet_applied: Entries not yet applied
:type entries_not_yet_applied: int
:param connections: Connections
:type connections: str
:param servers: Servers in the cluster
[('0ea6', 'ssl:192.0.2.42:6643')]
:type servers: List[Tuple[str,str]]
"""
self.name = name
self.cluster_id = cluster_id
self.server_id = server_id
self.address = address
self.status = status
self.role = role
self.term = term
self.leader = leader
self.vote = vote
self.election_timer = election_timer
self.log = log
self.entries_not_yet_committed = entries_not_yet_committed
self.entries_not_yet_applied = entries_not_yet_applied
self.connections = connections
self.servers = servers
def __eq__(self, other):
return (
self.name == other.name and
self.cluster_id == other.cluster_id and
self.server_id == other.server_id and
self.address == other.address and
self.status == other.status and
self.role == other.role and
self.term == other.term and
self.leader == other.leader and
self.vote == other.vote and
self.election_timer == other.election_timer and
self.log == other.log and
self.entries_not_yet_committed == other.entries_not_yet_committed and
self.entries_not_yet_applied == other.entries_not_yet_applied and
self.connections == other.connections and
self.servers == other.servers)
@property
def is_cluster_leader(self):
"""Retrieve status information from clustered OVSDB.
:returns: Whether target is cluster leader
:rtype: bool
"""
return self.leader == 'self'
def cluster_status(target, schema=None, use_ovs_appctl=False):
"""Retrieve status information from clustered OVSDB.
:param target: Usually one of 'ovsdb-server', 'ovnnb_db', 'ovnsb_db', can
also be full path to control socket.
:type target: str
:param schema: Database schema name, deduced from target if not provided
:type schema: Optional[str]
:param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03,
set this to True to use ``ovs-appctl`` instead.
:type use_ovs_appctl: bool
:returns: cluster status data object
:rtype: OVNClusterStatus
:raises: subprocess.CalledProcessError, KeyError, RuntimeError
"""
schema_map = {
'ovnnb_db': 'OVN_Northbound',
'ovnsb_db': 'OVN_Southbound',
}
if schema and schema not in schema_map.keys():
raise RuntimeError('Unknown schema provided: "{}"'.format(schema))
status = {}
k = ''
for line in ovn_appctl(target, 'cluster/status',
schema or schema_map[target],
use_ovs_appctl=use_ovs_appctl).splitlines():
if k and line.startswith(' '):
# there is no key which means this is a instance of a multi-line/
# multi-value item, populate the List which is already stored under
# the key.
if k == 'servers':
status[k].append(
tuple(line.replace(')', '').lstrip().split()[0:4:3]))
else:
status[k].append(line.lstrip())
elif ':' in line:
# this is a line with a key
k, v = line.split(':', 1)
k = k.lower()
k = k.replace(' ', '_')
if v:
# this is a line with both key and value
if k in ('cluster_id', 'server_id',):
v = v.replace('(', '')
v = v.replace(')', '')
status[k] = tuple(v.split())
else:
status[k] = v.lstrip()
else:
# this is a line with only key which means a multi-line/
# multi-value item. Store key as List which will be
# populated on subsequent iterations.
status[k] = []
return OVNClusterStatus(
status['name'],
uuid.UUID(status['cluster_id'][1]),
uuid.UUID(status['server_id'][1]),
status['address'],
status['status'],
status['role'],
int(status['term']),
status['leader'],
status['vote'],
int(status['election_timer']),
status['log'],
int(status['entries_not_yet_committed']),
int(status['entries_not_yet_applied']),
status['connections'],
status['servers'])
def is_northd_active():
"""Query `ovn-northd` for active status.
Note that the active status information for ovn-northd is available for
OVN 20.03 and onward.
:returns: True if local `ovn-northd` instance is active, False otherwise
:rtype: bool
"""
try:
for line in ovn_appctl('ovn-northd', 'status').splitlines():
if line.startswith('Status:') and 'active' in line:
return True
except subprocess.CalledProcessError:
pass
return False

View File

@ -0,0 +1,101 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
from . import utils
class SimpleOVSDB(object):
"""Simple interface to OVSDB through the use of command line tools.
OVS and OVN is managed through a set of databases. These databases have
similar command line tools to manage them. We make use of the similarity
to provide a generic class that can be used to manage them.
The OpenvSwitch project does provide a Python API, but on the surface it
appears to be a bit too involved for our simple use case.
Examples:
chassis = SimpleOVSDB('ovn-sbctl', 'chassis')
for chs in chassis:
print(chs)
bridges = SimpleOVSDB('ovs-vsctl', 'bridge')
for br in bridges:
if br['name'] == 'br-test':
bridges.set(br['uuid'], 'external_ids:charm', 'managed')
"""
def __init__(self, tool, table):
"""SimpleOVSDB constructor
:param tool: Which tool with database commands to operate on.
Usually one of `ovs-vsctl`, `ovn-nbctl`, `ovn-sbctl`
:type tool: str
:param table: Which table to operate on
:type table: str
"""
if tool not in ('ovs-vsctl', 'ovn-nbctl', 'ovn-sbctl'):
raise RuntimeError(
"tool must be one of 'ovs-vsctl', 'ovn-nbctl', 'ovn-sbctl'")
self.tool = tool
self.tbl = table
def _find_tbl(self, condition=None):
"""Run and parse output of OVSDB `find` command.
:param condition: An optional RFC 7047 5.1 match condition
:type condition: Optional[str]
:returns: Dictionary with data
:rtype: Iterator[Dict[str, ANY]]
"""
# When using json formatted output to OVS commands Internal OVSDB
# notation may occur that require further deserializing.
# Reference: https://tools.ietf.org/html/rfc7047#section-5.1
ovs_type_cb_map = {
'uuid': uuid.UUID,
# FIXME sets also appear to sometimes contain type/value tuples
'set': list,
'map': dict,
}
cmd = [self.tool, '-f', 'json', 'find', self.tbl]
if condition:
cmd.append(condition)
output = utils._run(*cmd)
data = json.loads(output)
for row in data['data']:
values = []
for col in row:
if isinstance(col, list):
f = ovs_type_cb_map.get(col[0], str)
values.append(f(col[1]))
else:
values.append(col)
yield dict(zip(data['headings'], values))
def __iter__(self):
return self._find_tbl()
def clear(self, rec, col):
utils._run(self.tool, 'clear', self.tbl, rec, col)
def find(self, condition):
return self._find_tbl(condition=condition)
def remove(self, rec, col, value):
utils._run(self.tool, 'remove', self.tbl, rec, col, value)
def set(self, rec, col, value):
utils._run(self.tool, 'set', self.tbl, rec, '{}={}'.format(col, value))

View File

@ -0,0 +1,26 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
def _run(*args):
"""Run a process, check result, capture decoded output from STDOUT.
:param args: Command and arguments to run
:type args: Tuple[str, ...]
:returns: Information about the completed process
:rtype: str
:raises subprocess.CalledProcessError
"""
return subprocess.check_output(args, universal_newlines=True)

View File

@ -13,7 +13,7 @@
# limitations under the License.
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
from collections import OrderedDict, namedtuple
from functools import wraps
import subprocess
@ -39,12 +39,16 @@ from charmhelpers.core.hookenv import (
action_fail,
action_set,
config,
expected_peer_units,
expected_related_units,
log as juju_log,
charm_dir,
INFO,
ERROR,
metadata,
related_units,
relation_get,
relation_id,
relation_ids,
relation_set,
status_set,
@ -53,6 +57,7 @@ from charmhelpers.core.hookenv import (
cached,
leader_set,
leader_get,
local_unit,
)
from charmhelpers.core.strutils import (
@ -108,6 +113,10 @@ from charmhelpers.contrib.openstack.policyd import (
POLICYD_CONFIG_NAME,
)
from charmhelpers.contrib.openstack.ha.utils import (
expect_ha,
)
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
@ -2046,3 +2055,264 @@ def is_db_maintenance_mode(relid=None):
'WARN')
pass
return True in notifications
@cached
def container_scoped_relations():
"""Get all the container scoped relations
:returns: List of relation names
:rtype: List
"""
md = metadata()
relations = []
for relation_type in ('provides', 'requires', 'peers'):
for relation in md.get(relation_type, []):
if md[relation_type][relation].get('scope') == 'container':
relations.append(relation)
return relations
def is_db_ready(use_current_context=False, rel_name=None):
"""Check remote database is ready to be used.
Database relations are expected to provide a list of 'allowed' units to
confirm that the database is ready for use by those units.
If db relation has provided this information and local unit is a member,
returns True otherwise False.
:param use_current_context: Whether to limit checks to current hook
context.
:type use_current_context: bool
:param rel_name: Name of relation to check
:type rel_name: string
:returns: Whether remote db is ready.
:rtype: bool
:raises: Exception
"""
key = 'allowed_units'
rel_name = rel_name or 'shared-db'
this_unit = local_unit()
if use_current_context:
if relation_id() in relation_ids(rel_name):
rids_units = [(None, None)]
else:
raise Exception("use_current_context=True but not in {} "
"rel hook contexts (currently in {})."
.format(rel_name, relation_id()))
else:
rids_units = [(r_id, u)
for r_id in relation_ids(rel_name)
for u in related_units(r_id)]
for rid, unit in rids_units:
allowed_units = relation_get(rid=rid, unit=unit, attribute=key)
if allowed_units and this_unit in allowed_units.split():
juju_log("This unit ({}) is in allowed unit list from {}".format(
this_unit,
unit), 'DEBUG')
return True
juju_log("This unit was not found in any allowed unit list")
return False
def is_expected_scale(peer_relation_name='cluster'):
"""Query juju goal-state to determine whether our peer- and dependency-
relations are at the expected scale.
Useful for deferring per unit per relation housekeeping work until we are
ready to complete it successfully and without unnecessary repetiton.
Always returns True if version of juju used does not support goal-state.
:param peer_relation_name: Name of peer relation
:type rel_name: string
:returns: True or False
:rtype: bool
"""
def _get_relation_id(rel_type):
return next((rid for rid in relation_ids(reltype=rel_type)), None)
Relation = namedtuple('Relation', 'rel_type rel_id')
peer_rid = _get_relation_id(peer_relation_name)
# Units with no peers should still have a peer relation.
if not peer_rid:
juju_log('Not at expected scale, no peer relation found', 'DEBUG')
return False
expected_relations = [
Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))]
if expect_ha():
expected_relations.append(
Relation(
rel_type='ha',
rel_id=_get_relation_id('ha')))
juju_log(
'Checking scale of {} relations'.format(
','.join([r.rel_type for r in expected_relations])),
'DEBUG')
try:
if (len(related_units(relid=peer_rid)) <
len(list(expected_peer_units()))):
return False
for rel in expected_relations:
if not rel.rel_id:
juju_log(
'Expected to find {} relation, but it is missing'.format(
rel.rel_type),
'DEBUG')
return False
# Goal state returns every unit even for container scoped
# relations but the charm only ever has a relation with
# the local unit.
if rel.rel_type in container_scoped_relations():
expected_count = 1
else:
expected_count = len(
list(expected_related_units(reltype=rel.rel_type)))
if len(related_units(relid=rel.rel_id)) < expected_count:
juju_log(
('Not at expected scale, not enough units on {} '
'relation'.format(rel.rel_type)),
'DEBUG')
return False
except NotImplementedError:
return True
juju_log('All checks have passed, unit is at expected scale', 'DEBUG')
return True
def get_peer_key(unit_name):
"""Get the peer key for this unit.
The peer key is the key a unit uses to publish its status down the peer
relation
:param unit_name: Name of unit
:type unit_name: string
:returns: Peer key for given unit
:rtype: string
"""
return 'unit-state-{}'.format(unit_name.replace('/', '-'))
UNIT_READY = 'READY'
UNIT_NOTREADY = 'NOTREADY'
UNIT_UNKNOWN = 'UNKNOWN'
UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN]
def inform_peers_unit_state(state, relation_name='cluster'):
"""Inform peers of the state of this unit.
:param state: State of unit to publish
:type state: string
:param relation_name: Name of relation to publish state on
:type relation_name: string
"""
if state not in UNIT_STATES:
raise ValueError(
"Setting invalid state {} for unit".format(state))
for r_id in relation_ids(relation_name):
relation_set(relation_id=r_id,
relation_settings={
get_peer_key(local_unit()): state})
def get_peers_unit_state(relation_name='cluster'):
"""Get the state of all peers.
:param relation_name: Name of relation to check peers on.
:type relation_name: string
:returns: Unit states keyed on unit name.
:rtype: dict
:raises: ValueError
"""
r_ids = relation_ids(relation_name)
rids_units = [(r, u) for r in r_ids for u in related_units(r)]
unit_states = {}
for r_id, unit in rids_units:
settings = relation_get(unit=unit, rid=r_id)
unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN)
if unit_states[unit] not in UNIT_STATES:
raise ValueError(
"Unit in unknown state {}".format(unit_states[unit]))
return unit_states
def are_peers_ready(relation_name='cluster'):
"""Check if all peers are ready.
:param relation_name: Name of relation to check peers on.
:type relation_name: string
:returns: Whether all units are ready.
:rtype: bool
"""
unit_states = get_peers_unit_state(relation_name)
return all(v == UNIT_READY for v in unit_states.values())
def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'):
"""Inform peers if this unit is ready.
The check function should return a tuple (state, message). A state
of 'READY' indicates the unit is READY.
:param check_unit_ready_func: Function to run to check readiness
:type check_unit_ready_func: function
:param relation_name: Name of relation to check peers on.
:type relation_name: string
"""
unit_ready, msg = check_unit_ready_func()
if unit_ready:
state = UNIT_READY
else:
state = UNIT_NOTREADY
juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG')
inform_peers_unit_state(state, relation_name)
def check_api_unit_ready(check_db_ready=True):
"""Check if this unit is ready.
:param check_db_ready: Include checks of database readiness.
:type check_db_ready: bool
:returns: Whether unit state is ready and status message
:rtype: (bool, str)
"""
unit_ready = True
msg = ''
if is_db_maintenance_mode():
msg = 'Database in maintenance mode.'
elif is_unit_paused_set():
msg = 'Unit paused.'
elif check_db_ready and not is_db_ready():
msg = 'Allowed_units list provided but this unit not present'
elif not is_db_initialised():
msg = 'Database not initialised'
elif not is_expected_scale():
msg = 'Charm and its dependencies not yet at expected scale'
if msg:
unit_ready = False
else:
msg = 'Unit has passed checks and is ready'
juju_log(msg, 'DEBUG')
return unit_ready, msg
def check_api_application_ready():
"""Check if this application is ready.
:returns: Whether unit state is ready and status message
:rtype: (bool, str)
"""
unit_ready, msg = check_api_unit_ready(check_db_ready=True)
if not unit_ready:
return unit_ready, msg
if are_peers_ready():
return True, 'All units have passed checks and are ready'
else:
return False, 'This unit is ready but peers are not'

View File

@ -140,9 +140,16 @@ def vault_relation_complete(backend=None):
:ptype backend: string
:returns: whether the relation to vault is complete
:rtype: bool"""
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
vault_kv()
return vault_kv.complete
try:
import hvac
except ImportError:
return False
try:
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
vault_kv()
return vault_kv.complete
except hvac.exceptions.InvalidRequest:
return False
# TODO: contrib a high level unwrap method to hvac that works

View File

@ -22,6 +22,7 @@
# Adam Gandelman <adamg@ubuntu.com>
#
import collections
import errno
import hashlib
import math
@ -93,6 +94,88 @@ LEGACY_PG_COUNT = 200
DEFAULT_MINIMUM_PGS = 2
class OsdPostUpgradeError(Exception):
"""Error class for OSD post-upgrade operations."""
pass
class OSDSettingConflict(Exception):
"""Error class for conflicting osd setting requests."""
pass
class OSDSettingNotAllowed(Exception):
"""Error class for a disallowed setting."""
pass
OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed)
OSD_SETTING_WHITELIST = [
'osd heartbeat grace',
'osd heartbeat interval',
]
def _order_dict_by_key(rdict):
"""Convert a dictionary into an OrderedDict sorted by key.
:param rdict: Dictionary to be ordered.
:type rdict: dict
:returns: Ordered Dictionary.
:rtype: collections.OrderedDict
"""
return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0]))
def get_osd_settings(relation_name):
"""Consolidate requested osd settings from all clients.
Consolidate requested osd settings from all clients. Check that the
requested setting is on the whitelist and it does not conflict with
any other requested settings.
:returns: Dictionary of settings
:rtype: dict
:raises: OSDSettingNotAllowed
:raises: OSDSettingConflict
"""
rel_ids = relation_ids(relation_name)
osd_settings = {}
for relid in rel_ids:
for unit in related_units(relid):
unit_settings = relation_get('osd-settings', unit, relid) or '{}'
unit_settings = json.loads(unit_settings)
for key, value in unit_settings.items():
if key not in OSD_SETTING_WHITELIST:
msg = 'Illegal settings "{}"'.format(key)
raise OSDSettingNotAllowed(msg)
if key in osd_settings:
if osd_settings[key] != unit_settings[key]:
msg = 'Conflicting settings for "{}"'.format(key)
raise OSDSettingConflict(msg)
else:
osd_settings[key] = value
return _order_dict_by_key(osd_settings)
def send_osd_settings():
"""Pass on requested OSD settings to osd units."""
try:
settings = get_osd_settings('client')
except OSD_SETTING_EXCEPTIONS as e:
# There is a problem with the settings, not passing them on. Update
# status will notify the user.
log(e, level=ERROR)
return
data = {
'osd-settings': json.dumps(settings, sort_keys=True)}
for relid in relation_ids('osd'):
relation_set(relation_id=relid,
relation_settings=data)
def validator(value, valid_type, valid_range=None):
"""
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
@ -1635,5 +1718,67 @@ class CephConfContext(object):
continue
ceph_conf[key] = conf[key]
return ceph_conf
class CephOSDConfContext(CephConfContext):
"""Ceph config (ceph.conf) context.
Consolidates settings from config-flags via CephConfContext with
settings provided by the mons. The config-flag values are preserved in
conf['osd'], settings from the mons which do not clash with config-flag
settings are in conf['osd_from_client'] and finally settings which do
clash are in conf['osd_from_client_conflict']. Rather than silently drop
the conflicting settings they are provided in the context so they can be
rendered commented out to give some visability to the admin.
"""
def __init__(self, permitted_sections=None):
super(CephOSDConfContext, self).__init__(
permitted_sections=permitted_sections)
try:
self.settings_from_mons = get_osd_settings('mon')
except OSDSettingConflict:
log(
"OSD settings from mons are inconsistent, ignoring them",
level=WARNING)
self.settings_from_mons = {}
def filter_osd_from_mon_settings(self):
"""Filter settings from client relation against config-flags.
:returns: A tuple (
,config-flag values,
,client settings which do not conflict with config-flag values,
,client settings which confilct with config-flag values)
:rtype: (OrderedDict, OrderedDict, OrderedDict)
"""
ceph_conf = super(CephOSDConfContext, self).__call__()
conflicting_entries = {}
clear_entries = {}
for key, value in self.settings_from_mons.items():
if key in ceph_conf.get('osd', {}):
if ceph_conf['osd'][key] != value:
conflicting_entries[key] = value
else:
clear_entries[key] = value
clear_entries = _order_dict_by_key(clear_entries)
conflicting_entries = _order_dict_by_key(conflicting_entries)
return ceph_conf, clear_entries, conflicting_entries
def __call__(self):
"""Construct OSD config context.
Standard context with two additional special keys.
osd_from_client_conflict: client settings which confilct with
config-flag values
osd_from_client: settings which do not conflict with config-flag
values
:returns: OSD config context dict.
:rtype: dict
"""
conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings()
conf['osd_from_client_conflict'] = osd_conflict
conf['osd_from_client'] = osd_clear
return conf

View File

@ -32,6 +32,10 @@ def loopback_devices():
/dev/loop0: [0807]:961814 (/tmp/my.img)
or:
/dev/loop0: [0807]:961814 (/tmp/my.img (deleted))
:returns: dict: a dict mapping {loopback_dev: backing_file}
'''
loopbacks = {}
@ -39,9 +43,9 @@ def loopback_devices():
output = check_output(cmd)
if six.PY3:
output = output.decode('utf-8')
devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0]
return loopbacks

View File

@ -1093,7 +1093,7 @@ def status_set(workload_state, message):
Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then
assume this is juju < 1.23 and juju-log the message unstead.
assume this is juju < 1.23 and juju-log the message instead.
workload_state -- valid juju workload state.
message -- status update message
@ -1526,13 +1526,13 @@ def env_proxy_settings(selected_settings=None):
"""Get proxy settings from process environment variables.
Get charm proxy settings from environment variables that correspond to
juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
see lp:1782236) in a format suitable for passing to an application that
reacts to proxy settings passed as environment variables. Some applications
support lowercase or uppercase notation (e.g. curl), some support only
lowercase (e.g. wget), there are also subjectively rare cases of only
uppercase notation support. no_proxy CIDR and wildcard support also varies
between runtimes and applications as there is no enforced standard.
juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see
lp:1782236) and juju-ftp-proxy in a format suitable for passing to an
application that reacts to proxy settings passed as environment variables.
Some applications support lowercase or uppercase notation (e.g. curl), some
support only lowercase (e.g. wget), there are also subjectively rare cases
of only uppercase notation support. no_proxy CIDR and wildcard support also
varies between runtimes and applications as there is no enforced standard.
Some applications may connect to multiple destinations and expose config
options that would affect only proxy settings for a specific destination

View File

@ -532,7 +532,13 @@ class CloudComputeContext(context.OSContextGenerator):
return {}
if config('multi-host').lower() == 'yes':
self._ensure_packages(['nova-api', 'nova-network'])
cmp_os_release = CompareOpenStackReleases(
os_release('nova-common'))
if cmp_os_release <= 'train':
# nova-network only available until ussuri
self._ensure_packages(['nova-api', 'nova-network'])
else:
self._ensure_packages(['nova-api'])
return {
'flat_interface': config('flat-interface'),

View File

@ -753,6 +753,15 @@ def post_series_upgrade():
resume_unit_helper, CONFIGS)
@hooks.hook('shared-db-relation-joined')
def shared_db_relation_joined():
release = os_release('nova-common')
if CompareOpenStackReleases(release) >= 'ussuri':
log("shared-db is only required for nova-network which is NOT "
"available in Ussuri and later. Please remove the relation.",
"WARNING")
def main():
try:
hooks.execute(sys.argv)

View File

@ -14,6 +14,7 @@ series:
- xenial
- bionic
- eoan
- focal
- trusty
provides:
cloud-compute:

View File

@ -0,0 +1,106 @@
series: bionic
relations:
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp
- - placement:shared-db
- percona-cluster:shared-db
- - placement:identity-service
- keystone:identity-service
- - placement:placement
- nova-cloud-controller:placement
applications:
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
max-connections: 1000
innodb-buffer-pool-size: 256M
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: cloud:bionic-ussuri
network-manager: Neutron
debug: true
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
manage-neutron-plugin-legacy-mode: true
openstack-origin: cloud:bionic-ussuri
flat-network-providers: physnet1
neutron-security-groups: true
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: cloud:bionic-ussuri
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: cloud:bionic-ussuri
bridge-mappings: physnet1:br-ex
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: cloud:bionic-ussuri
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
placement:
charm: cs:~openstack-charmers-next/placement
num_units: 1
options:
openstack-origin: cloud:bionic-ussuri
nova-compute:
charm: ../../../nova-compute
num_units: 1
constraints: mem=4G cores=4
options:
openstack-origin: cloud:bionic-ussuri
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
ephemeral-device: /dev/vdb
ephemeral-unmount: /mnt
debug: true

View File

@ -0,0 +1,202 @@
variables:
openstack-origin: &openstack-origin distro
series: focal
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
constraints: mem=4096M cores=4
applications:
nova-cloud-controller-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
neutron-api-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
placement-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: *openstack-origin
to:
- '3'
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: *openstack-origin
network-manager: Neutron
debug: true
to:
- '4'
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
manage-neutron-plugin-legacy-mode: true
openstack-origin: *openstack-origin
flat-network-providers: physnet1
neutron-security-groups: true
to:
- '5'
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '6'
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: *openstack-origin
bridge-mappings: physnet1:br-ex
to:
- '7'
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '8'
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
placement:
charm: cs:~openstack-charmers-next/placement
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '9'
nova-compute:
charm: ../../../nova-compute
num_units: 1
storage:
ephemeral-device: '40G'
options:
openstack-origin: *openstack-origin
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
debug: true
to:
- '10'
relations:
- - 'nova-compute:image-service'
- 'glance:image-service'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:shared-db'
- 'nova-cloud-controller-mysql-router:shared-db'
- - 'nova-cloud-controller-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'nova-cloud-controller:identity-service'
- 'keystone:identity-service'
- - 'nova-cloud-controller:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:cloud-compute'
- 'nova-compute:cloud-compute'
- - 'nova-cloud-controller:image-service'
- 'glance:image-service'
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:amqp'
- 'rabbitmq-server:amqp'
- - 'neutron-gateway:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:quantum-network-service'
- 'neutron-gateway:quantum-network-service'
- - 'neutron-api:shared-db'
- 'neutron-api-mysql-router:shared-db'
- - 'neutron-api-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'neutron-api:amqp'
- 'rabbitmq-server:amqp'
- - 'neutron-api:neutron-api'
- 'nova-cloud-controller:neutron-api'
- - 'neutron-api:identity-service'
- 'keystone:identity-service'
- - 'nova-compute:neutron-plugin'
- 'neutron-openvswitch:neutron-plugin'
- - 'rabbitmq-server:amqp'
- 'neutron-openvswitch:amqp'
- - 'placement:shared-db'
- 'placement-mysql-router:shared-db'
- - 'placement-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'placement:identity-service'
- 'keystone:identity-service'
- - 'placement:placement'
- 'nova-cloud-controller:placement'

View File

@ -1,7 +1,11 @@
charm_name: nova-compute
smoke_bundles:
- bionic-train
gate_bundles:
- focal-ussuri
- bionic-ussuri
- bionic-train
- bionic-stein
- bionic-rocky
@ -11,7 +15,9 @@ gate_bundles:
- xenial-ocata
- xenial-mitaka
- trusty-mitaka
dev_bundles:
configure:
- zaza.openstack.charm_tests.glance.setup.add_cirros_image
- zaza.openstack.charm_tests.glance.setup.add_lts_image
@ -19,8 +25,13 @@ configure:
- zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network
- zaza.openstack.charm_tests.nova.setup.create_flavors
- zaza.openstack.charm_tests.nova.setup.manage_ssh_key
tests:
- zaza.openstack.charm_tests.nova.tests.CirrosGuestCreateTest
- zaza.openstack.charm_tests.nova.tests.LTSGuestCreateTest
- zaza.openstack.charm_tests.nova.tests.NovaCompute
- zaza.openstack.charm_tests.nova.tests.SecurityTests
tests_options:
force_deploy:
- focal-ussuri

View File

@ -904,11 +904,16 @@ class InstanceConsoleContextTest(CharmTestCase):
super(InstanceConsoleContextTest, self).setUp(context, TO_PATCH)
self.os_release.return_value = 'mitaka'
@patch.object(context, 'resolve_address')
@patch.object(context, 'relation_ids')
@patch.object(context, 'related_units')
def test_spice(self, mock_related_units, mock_relation_ids):
def test_spice(self,
mock_related_units,
mock_relation_ids,
mock_resolve_address):
mock_relation_ids.return_value = ['cloud-compute:15']
mock_related_units.return_value = ['nova-compute/0']
mock_resolve_address.return_value = "internal-address"
rel_settings = {
'console_access_protocol': 'spice',