Updates for jammy enablement

- charmcraft: build-on 20.04 -> run-on 20.04/22.04 [*archs]
- Refresh tox targets
- Drop impish bundles and OSCI testing
- Add jammy metadata
- Default source is yoga
- Charmhelpers and charms.ceph sync

Change-Id: I39f091db8ef8f18c0a40d4e46d54dfc964c03d70
This commit is contained in:
Chris MacNaughton 2022-04-07 09:07:37 +02:00 committed by James Page
parent 7907fa96e9
commit 1f4dbd3a5d
18 changed files with 531 additions and 397 deletions

View File

@ -1,3 +1,3 @@
- project:
templates:
- openstack-python3-ussuri-jobs
- openstack-python3-charm-yoga-jobs

View File

@ -15,4 +15,5 @@ include:
- contrib.openstack|inc=*
- contrib.charmsupport
- contrib.hardening|inc=*
- contrib.hardware
- contrib.openstack.policyd

View File

@ -21,7 +21,15 @@ parts:
- README.md
bases:
- name: ubuntu
channel: "20.04"
architectures:
- amd64
- build-on:
- name: ubuntu
channel: "20.04"
architectures:
- amd64
run-on:
- name: ubuntu
channel: "20.04"
architectures: [amd64, s390x, ppc64el, arm64]
- name: ubuntu
channel: "22.04"
architectures: [amd64, s390x, ppc64el, arm64]

View File

@ -5,7 +5,7 @@ options:
description: RadosGW debug level. Max is 20.
source:
type: string
default:
default: yoga
description: |
Optional repository from which to install. May be one of the following:
distro (default), ppa:somecustom/ppa, a deb url sources entry,

View File

@ -0,0 +1,13 @@
# Copyright 2022 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,288 @@
#!/usr/bin/env python3
#
# Copyright 2016-2022 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import itertools
import logging
import os
import re
import shlex
import subprocess
import typing
def format_pci_addr(pci_addr: str) -> str:
"""Format a PCI address with 0 fill for parts
:param: pci_addr: unformatted PCI address
:type: str
:returns: formatted PCI address
:rtype: str
"""
domain, bus, slot_func = pci_addr.split(":")
slot, func = slot_func.split(".")
return "{}:{}:{}.{}".format(
domain.zfill(4), bus.zfill(2), slot.zfill(2), func
)
def get_sysnet_interfaces_and_macs() -> list:
"""Catalog interface information from local system
each device dict contains:
interface: logical name
mac_address: MAC address
pci_address: PCI address
state: Current interface state (up/down)
sriov: Boolean indicating whether interface is an SR-IOV
capable device.
sriov_totalvfs: Total VF capacity of device
sriov_numvfs: Configured VF capacity of device
:returns: array of dict objects containing details of each interface
:rtype: list
"""
net_devs = []
for sdir in itertools.chain(
glob.glob("/sys/bus/pci/devices/*/net/../"),
glob.glob("/sys/bus/pci/devices/*/virtio*/net/../")):
fq_path = os.path.realpath(sdir)
path = fq_path.split("/")
if "virtio" in path[-1]:
pci_address = path[-2]
else:
pci_address = path[-1]
ifname = get_sysnet_interface(sdir)
if not ifname:
logging.warn("Unable to determine interface name for PCI "
"device {}".format(pci_address))
continue
device = {
"interface": ifname,
"mac_address": get_sysnet_mac(sdir, ifname),
"pci_address": pci_address,
"state": get_sysnet_device_state(sdir, ifname),
"sriov": is_sriov(sdir),
}
if device["sriov"]:
device["sriov_totalvfs"] = get_sriov_totalvfs(sdir)
device["sriov_numvfs"] = get_sriov_numvfs(sdir)
net_devs.append(device)
return net_devs
def get_sysnet_mac(sysdir: str, ifname: str) -> str:
"""Determine MAC address for a device
:param: sysdir: path to device /sys directory
:type: str
:returns: MAC address of device
:rtype: str
"""
mac_addr_file = os.path.join(sysdir, "net", ifname, "address")
with open(mac_addr_file, "r") as f:
read_data = f.read()
return read_data.strip()
def get_sysnet_device_state(sysdir: str, ifname: str) -> str:
"""Read operational state of a device
:param: sysdir: path to device /sys directory
:type: str
:returns: current device state
:rtype: str
"""
state_file = os.path.join(sysdir, "net", ifname, "operstate")
with open(state_file, "r") as f:
read_data = f.read()
return read_data.strip()
def is_sriov(sysdir: str) -> bool:
"""Determine whether a device is SR-IOV capable
:param: sysdir: path to device /sys directory
:type: str
:returns: whether device is SR-IOV capable or not
:rtype: bool
"""
return os.path.exists(os.path.join(sysdir, "sriov_totalvfs"))
def get_sriov_totalvfs(sysdir: str) -> int:
"""Read total VF capacity for a device
:param: sysdir: path to device /sys directory
:type: str
:returns: number of VF's the device supports
:rtype: int
"""
sriov_totalvfs_file = os.path.join(sysdir, "sriov_totalvfs")
with open(sriov_totalvfs_file, "r") as f:
read_data = f.read()
return int(read_data.strip())
def get_sriov_numvfs(sysdir: str) -> int:
"""Read configured VF capacity for a device
:param: sysdir: path to device /sys directory
:type: str
:returns: number of VF's the device is configured with
:rtype: int
"""
sriov_numvfs_file = os.path.join(sysdir, "sriov_numvfs")
with open(sriov_numvfs_file, "r") as f:
read_data = f.read()
return int(read_data.strip())
# https://github.com/libvirt/libvirt/commit/5b1c525b1f3608156884aed0dc5e925306c1e260
PF_PHYS_PORT_NAME_REGEX = re.compile(r"(p[0-9]+$)|(p[0-9]+s[0-9]+$)",
re.IGNORECASE)
def _phys_port_name_is_pf(sysnetdir: str) -> typing.Optional[bool]:
try:
with open(os.path.join(sysnetdir, "phys_port_name"), "r") as fin:
return (PF_PHYS_PORT_NAME_REGEX.match(fin.read().strip())
is not None)
except OSError:
return
def get_sysnet_interface(sysdir: str) -> typing.Optional[str]:
sysnetdir = os.path.join(sysdir, "net")
netdevs = os.listdir(sysnetdir)
# Return early in case the PCI device only has one netdev
if len(netdevs) == 1:
return netdevs[0]
# When a PCI device has multiple netdevs we need to figure out which one
# represents the PF
for netdev in netdevs:
if _phys_port_name_is_pf(os.path.join(sysnetdir, netdev)):
return netdev
def get_pci_ethernet_addresses() -> list:
"""Generate list of PCI addresses for all network adapters
:returns: list of PCI addresses
:rtype: list
"""
cmd = ["lspci", "-m", "-D"]
lspci_output = subprocess.check_output(cmd).decode("UTF-8")
pci_addresses = []
for line in lspci_output.split("\n"):
columns = shlex.split(line)
if len(columns) > 1 and columns[1] == "Ethernet controller":
pci_address = columns[0]
pci_addresses.append(format_pci_addr(pci_address))
return pci_addresses
class PCINetDevice(object):
def __init__(self, pci_address):
self.pci_address = pci_address
self.interface_name = None
self.mac_address = None
self.state = None
self.sriov = False
self.sriov_totalvfs = None
self.sriov_numvfs = None
self.update_attributes()
def update_attributes(self):
self.update_interface_info()
def update_interface_info(self):
net_devices = get_sysnet_interfaces_and_macs()
for interface in net_devices:
if self.pci_address == interface["pci_address"]:
self.interface_name = interface["interface"]
self.mac_address = interface["mac_address"]
self.state = interface["state"]
self.sriov = interface["sriov"]
if self.sriov:
self.sriov_totalvfs = interface["sriov_totalvfs"]
self.sriov_numvfs = interface["sriov_numvfs"]
def _set_sriov_numvfs(self, numvfs: int):
sdevice = os.path.join(
"/sys/bus/pci/devices", self.pci_address, "sriov_numvfs"
)
with open(sdevice, "w") as sh:
sh.write(str(numvfs))
self.update_attributes()
def set_sriov_numvfs(self, numvfs: int) -> bool:
"""Set the number of VF devices for a SR-IOV PF
Assuming the device is an SR-IOV device, this function will attempt
to change the number of VF's created by the PF.
@param numvfs: integer to set the current number of VF's to
@returns boolean indicating whether any changes where made
"""
if self.sriov and numvfs != self.sriov_numvfs:
# NOTE(fnordahl): run-time change of numvfs is disallowed
# without resetting to 0 first.
self._set_sriov_numvfs(0)
self._set_sriov_numvfs(numvfs)
return True
return False
class PCINetDevices(object):
def __init__(self):
self.pci_devices = [
PCINetDevice(dev) for dev in get_pci_ethernet_addresses()
]
def update_devices(self):
for pcidev in self.pci_devices:
pcidev.update_attributes()
def get_macs(self) -> list:
macs = []
for pcidev in self.pci_devices:
if pcidev.mac_address:
macs.append(pcidev.mac_address)
return macs
def get_device_from_mac(self, mac: str) -> PCINetDevice:
for pcidev in self.pci_devices:
if pcidev.mac_address == mac:
return pcidev
return None
def get_device_from_pci_address(self, pci_addr: str) -> PCINetDevice:
for pcidev in self.pci_devices:
if pcidev.pci_address == pci_addr:
return pcidev
return None
def get_device_from_interface_name(
self, interface_name: str
) -> PCINetDevice:
for pcidev in self.pci_devices:
if pcidev.interface_name == interface_name:
return pcidev
return None

View File

@ -118,12 +118,7 @@ from charmhelpers.contrib.openstack.utils import (
)
from charmhelpers.core.unitdata import kv
try:
from sriov_netplan_shim import pci
except ImportError:
# The use of the function and contexts that require the pci module is
# optional.
pass
from charmhelpers.contrib.hardware import pci
try:
import psutil
@ -426,6 +421,9 @@ class IdentityServiceContext(OSContextGenerator):
('password', ctxt.get('admin_password', '')),
('signing_dir', ctxt.get('signing_dir', '')),))
if ctxt.get('service_type'):
c.update((('service_type', ctxt.get('service_type')),))
return c
def __call__(self):
@ -468,6 +466,9 @@ class IdentityServiceContext(OSContextGenerator):
'internal_protocol': int_protocol,
'api_version': api_version})
if rdata.get('service_type'):
ctxt['service_type'] = rdata.get('service_type')
if float(api_version) > 2:
ctxt.update({
'admin_domain_name': rdata.get('service_domain'),
@ -539,6 +540,9 @@ class IdentityCredentialsContext(IdentityServiceContext):
'api_version': api_version
})
if rdata.get('service_type'):
ctxt['service_type'] = rdata.get('service_type')
if float(api_version) > 2:
ctxt.update({'admin_domain_name':
rdata.get('domain')})
@ -3120,7 +3124,7 @@ class SRIOVContext(OSContextGenerator):
"""Determine number of Virtual Functions (VFs) configured for device.
:param device: Object describing a PCI Network interface card (NIC)/
:type device: sriov_netplan_shim.pci.PCINetDevice
:type device: contrib.hardware.pci.PCINetDevice
:param sriov_numvfs: Number of VFs requested for blanket configuration.
:type sriov_numvfs: int
:returns: Number of VFs to configure for device

View File

@ -9,4 +9,7 @@ project_name = {{ admin_tenant_name }}
username = {{ admin_user }}
password = {{ admin_password }}
signing_dir = {{ signing_dir }}
{% if service_type -%}
service_type = {{ service_type }}
{% endif -%}
{% endif -%}

View File

@ -6,6 +6,9 @@ auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3
project_domain_name = {{ admin_domain_name }}
user_domain_name = {{ admin_domain_name }}
{% if service_type -%}
service_type = {{ service_type }}
{% endif -%}
{% else -%}
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}

View File

@ -614,7 +614,8 @@ class Pool(BasePool):
class ReplicatedPool(BasePool):
def __init__(self, service, name=None, pg_num=None, replicas=None,
percent_data=None, app_name=None, op=None):
percent_data=None, app_name=None, op=None,
profile_name='replicated_rule'):
"""Initialize ReplicatedPool object.
Pool information is either initialized from individual keyword
@ -631,6 +632,8 @@ class ReplicatedPool(BasePool):
to this replicated pool.
:type replicas: int
:raises: KeyError
:param profile_name: Crush Profile to use
:type profile_name: Optional[str]
"""
# NOTE: Do not perform initialization steps that require live data from
# a running cluster here. The *Pool classes may be used for validation.
@ -645,11 +648,20 @@ class ReplicatedPool(BasePool):
# we will fail with KeyError if it is not provided.
self.replicas = op['replicas']
self.pg_num = op.get('pg_num')
self.profile_name = op.get('crush-profile') or profile_name
else:
self.replicas = replicas or 2
self.pg_num = pg_num
self.profile_name = profile_name or 'replicated_rule'
def _create(self):
# Validate if crush profile exists
if self.profile_name is None:
msg = ("Failed to discover crush profile named "
"{}".format(self.profile_name))
log(msg, level=ERROR)
raise PoolCreationError(msg)
# Do extra validation on pg_num with data from live cluster
if self.pg_num:
# Since the number of placement groups were specified, ensure
@ -667,12 +679,12 @@ class ReplicatedPool(BasePool):
'--pg-num-min={}'.format(
min(AUTOSCALER_DEFAULT_PGS, self.pg_num)
),
self.name, str(self.pg_num)
self.name, str(self.pg_num), self.profile_name
]
else:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)
self.name, str(self.pg_num), self.profile_name
]
check_call(cmd)
@ -691,7 +703,7 @@ class ErasurePool(BasePool):
def __init__(self, service, name=None, erasure_code_profile=None,
percent_data=None, app_name=None, op=None,
allow_ec_overwrites=False):
"""Initialize ReplicatedPool object.
"""Initialize ErasurePool object.
Pool information is either initialized from individual keyword
arguments or from a individual CephBrokerRq operation Dict.
@ -777,6 +789,9 @@ def enabled_manager_modules():
:rtype: List[str]
"""
cmd = ['ceph', 'mgr', 'module', 'ls']
quincy_or_later = cmp_pkgrevno('ceph-common', '17.1.0') >= 0
if quincy_or_later:
cmd.append('--format=json')
try:
modules = check_output(cmd).decode('utf-8')
except CalledProcessError as e:
@ -1842,7 +1857,7 @@ class CephBrokerRq(object):
}
def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
**kwargs):
crush_profile=None, **kwargs):
"""Adds an operation to create a replicated pool.
Refer to docstring for ``_partial_build_common_op_create`` for
@ -1856,6 +1871,10 @@ class CephBrokerRq(object):
for pool.
:type pg_num: int
:raises: AssertionError if provided data is of invalid type/range
:param crush_profile: Name of crush profile to use. If not set the
ceph-mon unit handling the broker request will
set its default value.
:type crush_profile: Optional[str]
"""
if pg_num and kwargs.get('weight'):
raise ValueError('pg_num and weight are mutually exclusive')
@ -1865,6 +1884,7 @@ class CephBrokerRq(object):
'name': name,
'replicas': replica_count,
'pg_num': pg_num,
'crush-profile': crush_profile
}
op.update(self._partial_build_common_op_create(**kwargs))

View File

@ -114,6 +114,33 @@ def service_stop(service_name, **kwargs):
return service('stop', service_name, **kwargs)
def service_enable(service_name, **kwargs):
"""Enable a system service.
The specified service name is managed via the system level init system.
Some init systems (e.g. upstart) require that additional arguments be
provided in order to directly control service instances whereas other init
systems allow for addressing instances of a service directly by name (e.g.
systemd).
The kwargs allow for the additional parameters to be passed to underlying
init systems for those systems which require/allow for them. For example,
the ceph-osd upstart script requires the id parameter to be passed along
in order to identify which running daemon should be restarted. The follow-
ing example restarts the ceph-osd service for instance id=4:
service_enable('ceph-osd', id=4)
:param service_name: the name of the service to enable
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""
return service('enable', service_name, **kwargs)
def service_restart(service_name, **kwargs):
"""Restart a system service.
@ -134,7 +161,7 @@ def service_restart(service_name, **kwargs):
:param service_name: the name of the service to restart
:param **kwargs: additional parameters to pass to the init system when
managing services. These will be passed as key=value
parameters to the init system's commandline. kwargs
parameters to the init system's commandline. kwargs
are ignored for init systems not allowing additional
parameters via the commandline (systemd).
"""

View File

@ -1,4 +1,4 @@
# Copyright 2017 Canonical Ltd
# Copyright 2017-2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -293,7 +293,7 @@ def get_link_speed(network_interface):
def persist_settings(settings_dict):
# Write all settings to /etc/hdparm.conf
""" This will persist the hard drive settings to the /etc/hdparm.conf file
"""This will persist the hard drive settings to the /etc/hdparm.conf file
The settings_dict should be in the form of {"uuid": {"key":"value"}}
@ -552,7 +552,7 @@ def get_osd_weight(osd_id):
:returns: Float
:raises: ValueError if the monmap fails to parse.
:raises: CalledProcessError if our ceph command fails.
:raises: CalledProcessError if our Ceph command fails.
"""
try:
tree = str(subprocess
@ -560,7 +560,7 @@ def get_osd_weight(osd_id):
.decode('UTF-8'))
try:
json_tree = json.loads(tree)
# Make sure children are present in the json
# Make sure children are present in the JSON
if not json_tree['nodes']:
return None
for device in json_tree['nodes']:
@ -619,12 +619,12 @@ def _flatten_roots(nodes, lookup_type='host'):
def get_osd_tree(service):
"""Returns the current osd map in JSON.
"""Returns the current OSD map in JSON.
:returns: List.
:rtype: List[CrushLocation]
:raises: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails
Also raises CalledProcessError if our Ceph command fails
"""
try:
tree = str(subprocess
@ -666,12 +666,12 @@ def _get_child_dirs(path):
def _get_osd_num_from_dirname(dirname):
"""Parses the dirname and returns the OSD id.
Parses a string in the form of 'ceph-{osd#}' and returns the osd number
Parses a string in the form of 'ceph-{osd#}' and returns the OSD number
from the directory name.
:param dirname: the directory name to return the OSD number from
:return int: the osd number the directory name corresponds to
:raises ValueError: if the osd number cannot be parsed from the provided
:return int: the OSD number the directory name corresponds to
:raises ValueError: if the OSD number cannot be parsed from the provided
directory name.
"""
match = re.search(r'ceph-(?P<osd_id>\d+)', dirname)
@ -686,7 +686,7 @@ def get_local_osd_ids():
to split the ID off of the directory name and return it in
a list.
:returns: list. A list of osd identifiers
:returns: list. A list of OSD identifiers
:raises: OSError if something goes wrong with listing the directory.
"""
osd_ids = []
@ -875,12 +875,12 @@ DISK_FORMATS = [
]
CEPH_PARTITIONS = [
'89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # ceph encrypted disk in creation
'45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # ceph encrypted journal
'4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # ceph encrypted osd data
'4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # ceph osd data
'45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # ceph osd journal
'89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # ceph disk in creation
'89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE', # Ceph encrypted disk in creation
'45B0969E-9B03-4F30-B4C6-5EC00CEFF106', # Ceph encrypted journal
'4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D', # Ceph encrypted OSD data
'4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D', # Ceph OSD data
'45B0969E-9B03-4F30-B4C6-B4B80CEFF106', # Ceph OSD journal
'89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE', # Ceph disk in creation
]
@ -984,7 +984,7 @@ def is_osd_disk(dev):
def start_osds(devices):
# Scan for ceph block devices
# Scan for Ceph block devices
rescan_osd_devices()
if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and
cmp_pkgrevno('ceph', '14.2.0') < 0):
@ -1229,12 +1229,6 @@ def get_named_key(name, caps=None, pool_list=None):
'get',
key_name,
]).decode('UTF-8')).strip()
# NOTE(jamespage);
# Apply any changes to key capabilities, dealing with
# upgrades which requires new caps for operation.
upgrade_key_caps(key_name,
caps or _default_caps,
pool_list)
return parse_key(output)
except subprocess.CalledProcessError:
# Couldn't get the key, time to create it!
@ -1270,7 +1264,7 @@ def get_named_key(name, caps=None, pool_list=None):
def upgrade_key_caps(key, caps, pool_list=None):
""" Upgrade key to have capabilities caps """
"""Upgrade key to have capabilities caps"""
if not is_leader():
# Not the MON leader OR not clustered
return
@ -1304,11 +1298,11 @@ def use_bluestore():
def bootstrap_monitor_cluster(secret):
"""Bootstrap local ceph mon into the ceph cluster
"""Bootstrap local Ceph mon into the Ceph cluster
:param secret: cephx secret to use for monitor authentication
:type secret: str
:raises: Exception if ceph mon cannot be bootstrapped
:raises: Exception if Ceph mon cannot be bootstrapped
"""
hostname = socket.gethostname()
path = '/var/lib/ceph/mon/ceph-{}'.format(hostname)
@ -1351,11 +1345,11 @@ def _create_monitor(keyring, secret, hostname, path, done, init_marker):
:type: secret: str
:param hostname: hostname of the local unit
:type hostname: str
:param path: full path to ceph mon directory
:param path: full path to Ceph mon directory
:type path: str
:param done: full path to 'done' marker for ceph mon
:param done: full path to 'done' marker for Ceph mon
:type done: str
:param init_marker: full path to 'init' marker for ceph mon
:param init_marker: full path to 'init' marker for Ceph mon
:type init_marker: str
"""
subprocess.check_call(['ceph-authtool', keyring,
@ -1415,13 +1409,13 @@ def create_keyrings():
owner=ceph_user(), group=ceph_user(),
perms=0o400)
else:
# NOTE(jamespage): Later ceph releases require explicit
# NOTE(jamespage): Later Ceph releases require explicit
# call to ceph-create-keys to setup the
# admin keys for the cluster; this command
# will wait for quorum in the cluster before
# returning.
# NOTE(fnordahl): Explicitly run `ceph-create-keys` for older
# ceph releases too. This improves bootstrap
# Ceph releases too. This improves bootstrap
# resilience as the charm will wait for
# presence of peer units before attempting
# to bootstrap. Note that charms deploying
@ -1503,9 +1497,9 @@ def find_least_used_utility_device(utility_devices, lvs=False):
def get_devices(name):
""" Merge config and juju storage based devices
"""Merge config and Juju storage based devices
:name: THe name of the device type, eg: wal, osd, journal
:name: The name of the device type, e.g.: wal, osd, journal
:returns: Set(device names), which are strings
"""
if config(name):
@ -1520,11 +1514,11 @@ def get_devices(name):
def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False,
bluestore=False, key_manager=CEPH_KEY_MANAGER):
bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None):
if dev.startswith('/dev'):
osdize_dev(dev, osd_format, osd_journal,
ignore_errors, encrypt,
bluestore, key_manager)
bluestore, key_manager, osd_id)
else:
if cmp_pkgrevno('ceph', '14.0.0') >= 0:
log("Directory backed OSDs can not be created on Nautilus",
@ -1534,7 +1528,8 @@ def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False,
def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER):
encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER,
osd_id=None):
"""
Prepare a block device for use as a Ceph OSD
@ -1547,7 +1542,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
:param: ignore_errors: Don't fail in the event of any errors during
processing
:param: encrypt: Encrypt block devices using 'key_manager'
:param: bluestore: Use bluestore native ceph block device format
:param: bluestore: Use bluestore native Ceph block device format
:param: key_manager: Key management approach for encryption keys
:raises subprocess.CalledProcessError: in the event that any supporting
subprocess operation failed
@ -1599,7 +1594,8 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
osd_journal,
encrypt,
bluestore,
key_manager)
key_manager,
osd_id)
else:
cmd = _ceph_disk(dev,
osd_format,
@ -1683,7 +1679,7 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
key_manager=CEPH_KEY_MANAGER):
key_manager=CEPH_KEY_MANAGER, osd_id=None):
"""
Prepare and activate a device for usage as a Ceph OSD using ceph-volume.
@ -1695,6 +1691,7 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
:param: encrypt: Use block device encryption
:param: bluestore: Use bluestore storage for OSD
:param: key_manager: dm-crypt Key Manager to use
:param: osd_id: The OSD-id to recycle, or None to create a new one
:raises subprocess.CalledProcessError: in the event that any supporting
LVM operation failed.
:returns: list. 'ceph-volume' command and required parameters for
@ -1716,6 +1713,9 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
if encrypt and key_manager == CEPH_KEY_MANAGER:
cmd.append('--dmcrypt')
if osd_id is not None:
cmd.extend(['--osd-id', str(osd_id)])
# On-disk journal volume creation
if not osd_journal and not bluestore:
journal_lv_type = 'journal'
@ -1840,7 +1840,7 @@ def get_conf(variable):
Get the value of the given configuration variable from the
cluster.
:param variable: ceph configuration variable
:param variable: Ceph configuration variable
:returns: str. configured value for provided variable
"""
@ -1860,7 +1860,7 @@ def calculate_volume_size(lv_type):
:raises KeyError: if invalid lv_type is supplied
:returns: int. Configured size in megabytes for volume type
"""
# lv_type -> ceph configuration option
# lv_type -> Ceph configuration option
_config_map = {
'db': 'bluestore_block_db_size',
'wal': 'bluestore_block_wal_size',
@ -1874,7 +1874,7 @@ def calculate_volume_size(lv_type):
'journal': 1024,
}
# conversion of ceph config units to MB
# conversion of Ceph config units to MB
_units = {
'db': 1048576, # Bytes -> MB
'wal': 1048576, # Bytes -> MB
@ -1907,7 +1907,7 @@ def _luks_uuid(dev):
def _initialize_disk(dev, dev_uuid, encrypt=False,
key_manager=CEPH_KEY_MANAGER):
"""
Initialize a raw block device consuming 100% of the avaliable
Initialize a raw block device consuming 100% of the available
disk space.
Function assumes that block device has already been wiped.
@ -2004,7 +2004,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid,
def osdize_dir(path, encrypt=False, bluestore=False):
"""Ask ceph-disk to prepare a directory to become an osd.
"""Ask ceph-disk to prepare a directory to become an OSD.
:param path: str. The directory to osdize
:param encrypt: bool. Should the OSD directory be encrypted at rest
@ -2074,11 +2074,11 @@ def get_running_osds():
def get_cephfs(service):
"""List the Ceph Filesystems that exist.
:param service: The service name to run the ceph command under
:returns: list. Returns a list of the ceph filesystems
:param service: The service name to run the Ceph command under
:returns: list. Returns a list of the Ceph filesystems
"""
if get_version() < 0.86:
# This command wasn't introduced until 0.86 ceph
# This command wasn't introduced until 0.86 Ceph
return []
try:
output = str(subprocess
@ -2157,7 +2157,7 @@ def roll_monitor_cluster(new_version, upgrade_key):
sys.exit(1)
log('monitor_list: {}'.format(monitor_list))
# A sorted list of osd unit names
# A sorted list of OSD unit names
mon_sorted_list = sorted(monitor_list)
# Install packages immediately but defer restarts to when it's our time.
@ -2192,6 +2192,20 @@ def roll_monitor_cluster(new_version, upgrade_key):
wait_for_all_monitors_to_upgrade(new_version=new_version,
upgrade_key=upgrade_key)
bootstrap_manager()
# NOTE(jmcvaughn):
# Nautilus and later binaries use msgr2 by default, but existing
# clusters that have been upgraded from pre-Nautilus will not
# automatically have msgr2 enabled. Without this, Ceph will show
# a warning only (with no impact to operations), but newly added units
# will not be able to join the cluster. Therefore, we ensure it is
# enabled on upgrade for all versions including and after Nautilus
# (to cater for previous charm versions that will not have done this).
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0
if nautilus_or_later:
wait_for_all_monitors_to_upgrade(new_version=new_version,
upgrade_key=upgrade_key)
enable_msgr2()
except ValueError:
log("Failed to find {} in list {}.".format(
my_name, mon_sorted_list))
@ -2204,7 +2218,7 @@ def noop():
def upgrade_monitor(new_version, kick_function=None, restart_daemons=True):
"""Upgrade the current ceph monitor to the new version
"""Upgrade the current Ceph monitor to the new version
:param new_version: String version to upgrade to.
"""
@ -2212,18 +2226,19 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True):
kick_function = noop
current_version = get_version()
status_set("maintenance", "Upgrading monitor")
log("Current ceph version is {}".format(current_version))
log("Current Ceph version is {}".format(current_version))
log("Upgrading to: {}".format(new_version))
# Needed to determine if whether to stop/start ceph-mgr
luminous_or_later = cmp_pkgrevno('ceph-common', '12.2.0') >= 0
# Needed to differentiate between systemd unit names
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.0.0') >= 0
kick_function()
try:
add_source(config('source'), config('key'))
apt_update(fatal=True)
except subprocess.CalledProcessError as err:
log("Adding the ceph source failed with message: {}".format(
log("Adding the Ceph source failed with message: {}".format(
err))
status_set("blocked", "Upgrade to {} failed".format(new_version))
sys.exit(1)
@ -2246,7 +2261,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True):
try:
if systemd():
service_stop('ceph-mon')
if nautilus_or_later:
systemd_unit = 'ceph-mon@{}'.format(socket.gethostname())
else:
systemd_unit = 'ceph-mon'
service_stop(systemd_unit)
log("restarting ceph-mgr.target maybe: {}"
.format(luminous_or_later))
if luminous_or_later:
@ -2277,7 +2296,11 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True):
perms=0o755)
if systemd():
service_restart('ceph-mon')
if nautilus_or_later:
systemd_unit = 'ceph-mon@{}'.format(socket.gethostname())
else:
systemd_unit = 'ceph-mon'
service_restart(systemd_unit)
log("starting ceph-mgr.target maybe: {}".format(luminous_or_later))
if luminous_or_later:
# due to BUG: #1849874 we have to force a restart to get it to
@ -2294,7 +2317,7 @@ def upgrade_monitor(new_version, kick_function=None, restart_daemons=True):
def lock_and_roll(upgrade_key, service, my_name, version):
"""Create a lock on the ceph monitor cluster and upgrade.
"""Create a lock on the Ceph monitor cluster and upgrade.
:param upgrade_key: str. The cephx key to use
:param service: str. The cephx id to use
@ -2443,7 +2466,7 @@ class WatchDog(object):
allow for other delays.
There is a compatibility mode where if the otherside never kicks, then it
simply waits for the compatability timer.
simply waits for the compatibility timer.
"""
class WatchDogDeadException(Exception):
@ -2578,11 +2601,11 @@ class WatchDog(object):
def get_upgrade_position(osd_sorted_list, match_name):
"""Return the upgrade position for the given osd.
"""Return the upgrade position for the given OSD.
:param osd_sorted_list: Osds sorted
:param osd_sorted_list: OSDs sorted
:type osd_sorted_list: [str]
:param match_name: The osd name to match
:param match_name: The OSD name to match
:type match_name: str
:returns: The position of the name
:rtype: int
@ -2591,20 +2614,20 @@ def get_upgrade_position(osd_sorted_list, match_name):
for index, item in enumerate(osd_sorted_list):
if item.name == match_name:
return index
raise ValueError("osd name '{}' not found in get_upgrade_position list"
raise ValueError("OSD name '{}' not found in get_upgrade_position list"
.format(match_name))
# Edge cases:
# 1. Previous node dies on upgrade, can we retry?
# 2. This assumes that the osd failure domain is not set to osd.
# 2. This assumes that the OSD failure domain is not set to OSD.
# It rolls an entire server at a time.
def roll_osd_cluster(new_version, upgrade_key):
"""This is tricky to get right so here's what we're going to do.
There's 2 possible cases: Either I'm first in line or not.
If I'm not first in line I'll wait a random time between 5-30 seconds
and test to see if the previous osd is upgraded yet.
and test to see if the previous OSD is upgraded yet.
TODO: If you're not in the same failure domain it's safe to upgrade
1. Examine all pools and adopt the most strict failure domain policy
@ -2620,7 +2643,7 @@ def roll_osd_cluster(new_version, upgrade_key):
log('roll_osd_cluster called with {}'.format(new_version))
my_name = socket.gethostname()
osd_tree = get_osd_tree(service=upgrade_key)
# A sorted list of osd unit names
# A sorted list of OSD unit names
osd_sorted_list = sorted(osd_tree)
log("osd_sorted_list: {}".format(osd_sorted_list))
@ -2655,7 +2678,7 @@ def roll_osd_cluster(new_version, upgrade_key):
def upgrade_osd(new_version, kick_function=None):
"""Upgrades the current osd
"""Upgrades the current OSD
:param new_version: str. The new version to upgrade to
"""
@ -2663,15 +2686,15 @@ def upgrade_osd(new_version, kick_function=None):
kick_function = noop
current_version = get_version()
status_set("maintenance", "Upgrading osd")
log("Current ceph version is {}".format(current_version))
status_set("maintenance", "Upgrading OSD")
log("Current Ceph version is {}".format(current_version))
log("Upgrading to: {}".format(new_version))
try:
add_source(config('source'), config('key'))
apt_update(fatal=True)
except subprocess.CalledProcessError as err:
log("Adding the ceph sources failed with message: {}".format(
log("Adding the Ceph sources failed with message: {}".format(
err))
status_set("blocked", "Upgrade to {} failed".format(new_version))
sys.exit(1)
@ -2685,7 +2708,7 @@ def upgrade_osd(new_version, kick_function=None):
kick_function()
# If the upgrade does not need an ownership update of any of the
# directories in the osd service directory, then simply restart
# directories in the OSD service directory, then simply restart
# all of the OSDs at the same time as this will be the fastest
# way to update the code on the node.
if not dirs_need_ownership_update('osd'):
@ -2700,7 +2723,7 @@ def upgrade_osd(new_version, kick_function=None):
# Need to change the ownership of all directories which are not OSD
# directories as well.
# TODO - this should probably be moved to the general upgrade function
# and done before mon/osd.
# and done before mon/OSD.
update_owner(CEPH_BASE_DIR, recurse_dirs=False)
non_osd_dirs = filter(lambda x: not x == 'osd',
os.listdir(CEPH_BASE_DIR))
@ -2721,12 +2744,12 @@ def upgrade_osd(new_version, kick_function=None):
_upgrade_single_osd(osd_num, osd_dir)
except ValueError as ex:
# Directory could not be parsed - junk directory?
log('Could not parse osd directory %s: %s' % (osd_dir, ex),
log('Could not parse OSD directory %s: %s' % (osd_dir, ex),
WARNING)
continue
except (subprocess.CalledProcessError, IOError) as err:
log("Stopping ceph and upgrading packages failed "
log("Stopping Ceph and upgrading packages failed "
"with message: {}".format(err))
status_set("blocked", "Upgrade to {} failed".format(new_version))
sys.exit(1)
@ -2753,7 +2776,7 @@ def _upgrade_single_osd(osd_num, osd_dir):
def stop_osd(osd_num):
"""Stops the specified OSD number.
:param osd_num: the osd number to stop
:param osd_num: the OSD number to stop
"""
if systemd():
service_stop('ceph-osd@{}'.format(osd_num))
@ -2764,7 +2787,7 @@ def stop_osd(osd_num):
def start_osd(osd_num):
"""Starts the specified OSD number.
:param osd_num: the osd number to start.
:param osd_num: the OSD number to start.
"""
if systemd():
service_start('ceph-osd@{}'.format(osd_num))
@ -2775,12 +2798,12 @@ def start_osd(osd_num):
def disable_osd(osd_num):
"""Disables the specified OSD number.
Ensures that the specified osd will not be automatically started at the
Ensures that the specified OSD will not be automatically started at the
next reboot of the system. Due to differences between init systems,
this method cannot make any guarantees that the specified osd cannot be
this method cannot make any guarantees that the specified OSD cannot be
started manually.
:param osd_num: the osd id which should be disabled.
:param osd_num: the OSD id which should be disabled.
:raises CalledProcessError: if an error occurs invoking the systemd cmd
to disable the OSD
:raises IOError, OSError: if the attempt to read/remove the ready file in
@ -2820,7 +2843,7 @@ def enable_osd(osd_num):
:param osd_num: the osd id which should be enabled.
:raises CalledProcessError: if the call to the systemd command issued
fails when enabling the service
:raises IOError: if the attempt to write the ready file in an usptart
:raises IOError: if the attempt to write the ready file in an upstart
enabled system fails
"""
if systemd():
@ -2828,7 +2851,7 @@ def enable_osd(osd_num):
subprocess.check_call(cmd)
else:
# When running on upstart, the OSDs are started via the ceph-osd-all
# upstart script which will only start the osd if it has a 'ready'
# upstart script which will only start the OSD if it has a 'ready'
# file. Make sure that file exists.
ready_file = os.path.join(OSD_BASE_DIR, 'ceph-{}'.format(osd_num),
'ready')
@ -2881,7 +2904,7 @@ def get_osd_state(osd_num, osd_goal_state=None):
If osd_goal_state is not None, loop until the current OSD state matches
the OSD goal state.
:param osd_num: the osd id to get state for
:param osd_num: the OSD id to get state for
:param osd_goal_state: (Optional) string indicating state to wait for
Defaults to None
:returns: Returns a str, the OSD state.
@ -2942,7 +2965,7 @@ def maintain_osd_state(osd_num):
Ensures the state of an OSD is the same at the end of a block nested
in a with statement as it was at the beginning of the block.
:param osd_num: the osd id to maintain state for
:param osd_num: the OSD id to maintain state for
"""
osd_state = get_osd_state(osd_num)
try:
@ -2969,9 +2992,9 @@ def maintain_all_osd_states():
def list_pools(client='admin'):
"""This will list the current pools that Ceph has
:param client: (Optional) client id for ceph key to use
:param client: (Optional) client id for Ceph key to use
Defaults to ``admin``
:type cilent: str
:type client: str
:returns: Returns a list of available pools.
:rtype: list
:raises: subprocess.CalledProcessError if the subprocess fails to run.
@ -2996,9 +3019,9 @@ def get_pool_param(pool, param, client='admin'):
:type pool: str
:param param: Name of variable to get
:type param: str
:param client: (Optional) client id for ceph key to use
:param client: (Optional) client id for Ceph key to use
Defaults to ``admin``
:type cilent: str
:type client: str
:returns: Value of variable on pool or None
:rtype: str or None
:raises: subprocess.CalledProcessError
@ -3020,9 +3043,9 @@ def get_pool_erasure_profile(pool, client='admin'):
:param pool: Name of pool to get variable from
:type pool: str
:param client: (Optional) client id for ceph key to use
:param client: (Optional) client id for Ceph key to use
Defaults to ``admin``
:type cilent: str
:type client: str
:returns: Erasure code profile of pool or None
:rtype: str or None
:raises: subprocess.CalledProcessError
@ -3041,9 +3064,9 @@ def get_pool_quota(pool, client='admin'):
:param pool: Name of pool to get variable from
:type pool: str
:param client: (Optional) client id for ceph key to use
:param client: (Optional) client id for Ceph key to use
Defaults to ``admin``
:type cilent: str
:type client: str
:returns: Dictionary with quota variables
:rtype: dict
:raises: subprocess.CalledProcessError
@ -3066,9 +3089,9 @@ def get_pool_applications(pool='', client='admin'):
:param pool: (Optional) Name of pool to get applications for
Defaults to get for all pools
:type pool: str
:param client: (Optional) client id for ceph key to use
:param client: (Optional) client id for Ceph key to use
Defaults to ``admin``
:type cilent: str
:type client: str
:returns: Dictionary with pool name as key
:rtype: dict
:raises: subprocess.CalledProcessError
@ -3131,7 +3154,7 @@ def dirs_need_ownership_update(service):
necessary due to the upgrade from Hammer to Jewel where the daemon user
changes from root: to ceph:.
:param service: the name of the service folder to check (e.g. osd, mon)
:param service: the name of the service folder to check (e.g. OSD, mon)
:returns: boolean. True if the directories need a change of ownership,
False otherwise.
:raises IOError: if an error occurs reading the file stats from one of
@ -3161,7 +3184,7 @@ def dirs_need_ownership_update(service):
return False
# A dict of valid ceph upgrade paths. Mapping is old -> new
# A dict of valid Ceph upgrade paths. Mapping is old -> new
UPGRADE_PATHS = collections.OrderedDict([
('firefly', 'hammer'),
('hammer', 'jewel'),
@ -3173,7 +3196,7 @@ UPGRADE_PATHS = collections.OrderedDict([
('pacific', 'quincy'),
])
# Map UCA codenames to ceph codenames
# Map UCA codenames to Ceph codenames
UCA_CODENAME_MAP = {
'icehouse': 'firefly',
'juno': 'firefly',
@ -3196,24 +3219,24 @@ UCA_CODENAME_MAP = {
def pretty_print_upgrade_paths():
"""Pretty print supported upgrade paths for ceph"""
"""Pretty print supported upgrade paths for Ceph"""
return ["{} -> {}".format(key, value)
for key, value in UPGRADE_PATHS.items()]
def resolve_ceph_version(source):
"""Resolves a version of ceph based on source configuration
"""Resolves a version of Ceph based on source configuration
based on Ubuntu Cloud Archive pockets.
@param: source: source configuration option of charm
:returns: ceph release codename or None if not resolvable
:returns: Ceph release codename or None if not resolvable
"""
os_release = get_os_codename_install_source(source)
return UCA_CODENAME_MAP.get(os_release)
def get_ceph_pg_stat():
"""Returns the result of ceph pg stat.
"""Returns the result of 'ceph pg stat'.
:returns: dict
"""
@ -3248,7 +3271,7 @@ def get_ceph_health():
.decode('UTF-8'))
try:
json_tree = json.loads(tree)
# Make sure children are present in the json
# Make sure children are present in the JSON
if not json_tree['overall_status']:
return None
@ -3265,7 +3288,7 @@ def get_ceph_health():
def reweight_osd(osd_num, new_weight):
"""Changes the crush weight of an OSD to the value specified.
:param osd_num: the osd id which should be changed
:param osd_num: the OSD id which should be changed
:param new_weight: the new weight for the OSD
:returns: bool. True if output looks right, else false.
:raises CalledProcessError: if an error occurs invoking the systemd cmd
@ -3292,7 +3315,7 @@ def reweight_osd(osd_num, new_weight):
def determine_packages():
"""Determines packages for installation.
:returns: list of ceph packages
:returns: list of Ceph packages
"""
packages = PACKAGES.copy()
if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'eoan':
@ -3338,6 +3361,16 @@ def bootstrap_manager():
service_restart(unit)
def enable_msgr2():
"""
Enables msgr2
:raises: subprocess.CalledProcessError if the command fails
"""
cmd = ['ceph', 'mon', 'enable-msgr2']
subprocess.check_call(cmd)
def osd_noout(enable):
"""Sets or unsets 'noout'
@ -3361,12 +3394,12 @@ def osd_noout(enable):
class OSDConfigSetError(Exception):
"""Error occured applying OSD settings."""
"""Error occurred applying OSD settings."""
pass
def apply_osd_settings(settings):
"""Applies the provided osd settings
"""Applies the provided OSD settings
Apply the provided settings to all local OSD unless settings are already
present. Settings stop being applied on encountering an error.
@ -3391,7 +3424,7 @@ def apply_osd_settings(settings):
out = json.loads(
subprocess.check_output(cmd.split()).decode('UTF-8'))
if 'error' in out:
log("Error retrieving osd setting: {}".format(out['error']),
log("Error retrieving OSD setting: {}".format(out['error']),
level=ERROR)
return False
current_settings[key] = out[cli_key]
@ -3408,7 +3441,7 @@ def apply_osd_settings(settings):
out = json.loads(
subprocess.check_output(cmd.split()).decode('UTF-8'))
if 'error' in out:
log("Error applying osd setting: {}".format(out['error']),
log("Error applying OSD setting: {}".format(out['error']),
level=ERROR)
raise OSDConfigSetError
return True
@ -3478,7 +3511,7 @@ mgr_disable_dashboard = functools.partial(mgr_disable_module, 'dashboard')
def ceph_config_set(name, value, who):
"""Set a ceph config option
"""Set a Ceph config option
:param name: key to set
:type name: str
@ -3496,7 +3529,7 @@ mgr_config_set = functools.partial(ceph_config_set, who='mgr')
def ceph_config_get(name, who):
"""Retrieve the value of a ceph config option
"""Retrieve the value of a Ceph config option
:param name: key to lookup
:type name: str

View File

@ -14,7 +14,7 @@ tags:
- misc
series:
- focal
- impish
- jammy
extra-bindings:
public:
admin:

View File

@ -10,10 +10,6 @@
voting: false
- vault-focal-yoga-namespaced:
voting: false
- vault-impish-xena_rgw:
voting: false
- vault-impish-xena-namespaced:
voting: false
- vault-jammy-yoga_rgw:
voting: false
- vault-jammy-yoga-namespaced:
@ -58,22 +54,6 @@
- vault-focal-xena-namespaced
vars:
tox_extra_args: vault:jammy-yoga-namespaced
- job:
name: vault-impish-xena_rgw
parent: func-target
dependencies:
- vault-focal-xena_rgw
- vault-focal-xena-namespaced
vars:
tox_extra_args: vault:impish-xena
- job:
name: vault-impish-xena-namespaced
parent: func-target
dependencies:
- vault-focal-xena_rgw
- vault-focal-xena-namespaced
vars:
tox_extra_args: vault:impish-xena-namespaced
- job:
name: vault-focal-yoga_rgw
parent: func-target

View File

@ -1,124 +0,0 @@
options:
source: &source distro
series: impish
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
applications:
keystone-mysql-router:
charm: ch:mysql-router
channel: latest/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
num_units: 3
options:
source: *source
to:
- '0'
- '1'
- '2'
channel: latest/edge
ceph-radosgw:
charm: ../../ceph-radosgw.charm
num_units: 1
options:
source: *source
namespace-tenants: True
to:
- '3'
ceph-osd:
charm: ch:ceph-osd
num_units: 3
constraints: "mem=2048"
storage:
osd-devices: 'cinder,10G'
options:
source: *source
osd-devices: '/srv/ceph /dev/test-non-existent'
to:
- '4'
- '5'
- '6'
channel: latest/edge
ceph-mon:
charm: ch:ceph-mon
num_units: 3
options:
source: *source
to:
- '7'
- '8'
- '9'
channel: latest/edge
keystone:
expose: True
charm: ch:keystone
num_units: 1
options:
openstack-origin: *source
to:
- '10'
channel: latest/edge
vault-mysql-router:
charm: ch:mysql-router
channel: latest/edge
vault:
charm: ch:vault
num_units: 1
to:
- '11'
channel: latest/edge
relations:
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-radosgw:mon'
- 'ceph-mon:radosgw'
- - 'ceph-radosgw:identity-service'
- 'keystone:identity-service'
- - 'vault-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'vault:shared-db'
- 'vault-mysql-router:shared-db'
- - 'keystone:certificates'
- 'vault:certificates'
- - 'ceph-radosgw:certificates'
- 'vault:certificates'

View File

@ -1,123 +0,0 @@
options:
source: &source distro
series: impish
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
'11':
applications:
keystone-mysql-router:
charm: ch:mysql-router
channel: latest/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
num_units: 3
options:
source: *source
to:
- '0'
- '1'
- '2'
channel: latest/edge
ceph-radosgw:
charm: ../../ceph-radosgw.charm
num_units: 1
options:
source: *source
to:
- '3'
ceph-osd:
charm: ch:ceph-osd
num_units: 3
constraints: "mem=2048"
storage:
osd-devices: 'cinder,10G'
options:
source: *source
osd-devices: '/srv/ceph /dev/test-non-existent'
to:
- '4'
- '5'
- '6'
channel: latest/edge
ceph-mon:
charm: ch:ceph-mon
num_units: 3
options:
source: *source
to:
- '7'
- '8'
- '9'
channel: latest/edge
keystone:
expose: True
charm: ch:keystone
num_units: 1
options:
openstack-origin: *source
to:
- '10'
channel: latest/edge
vault-mysql-router:
charm: ch:mysql-router
channel: latest/edge
vault:
charm: ch:vault
num_units: 1
to:
- '11'
channel: latest/edge
relations:
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'ceph-radosgw:mon'
- 'ceph-mon:radosgw'
- - 'ceph-radosgw:identity-service'
- 'keystone:identity-service'
- - 'vault-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'vault:shared-db'
- 'vault-mysql-router:shared-db'
- - 'keystone:certificates'
- 'vault:certificates'
- - 'ceph-radosgw:certificates'
- 'vault:certificates'

View File

@ -3,8 +3,6 @@ charm_name: ceph-radosgw
gate_bundles:
- vault: focal-xena
- vault: focal-xena-namespaced
- vault: impish-xena
- vault: impish-xena-namespaced
smoke_bundles:
- vault: focal-xena
@ -34,7 +32,5 @@ tests:
tests_options:
force_deploy:
- impish-xena
- impish-xena-namespaced
- jammy-yoga
- jammy-yoga-namespaced

View File

@ -76,6 +76,11 @@ basepython = python3.9
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py310]
basepython = python3.10
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:py3]
basepython = python3
deps = -r{toxinidir}/requirements.txt