Charm refresh

At present the ``barbican`` charm is released as a Development /
Preview charm.

When it was first developed the upstream project had a separate
workflow for configuring Keystone authentication and authorization
which involved customization of the API paste configuration.

Since then the upstream project has gained support for standard
authentication and authorization configuration through the
``keystone_authtoken`` section in the configuration file.

We are bringing the charm into full support and do not want to
carry forward the legacy API paste configuration support, we also
want to integrate the charm with pluggable secrets support which
dependencies first are available in OpenStack ``rocky``.

Remove support for OpenStack versions prior to ``rocky``.

Remove surplus configuration options either provided through
charm layers or deemed unnecessary.

Change-Id: I4f0bbca5bbafa40d8bd6036a0743da642cab3d99
This commit is contained in:
Frode Nordahl 2018-10-18 12:35:35 +02:00
parent f12029cccd
commit 3082897e9e
No known key found for this signature in database
GPG Key ID: 6A5D59A3BA48373F
18 changed files with 158 additions and 801 deletions

View File

@ -1,51 +1,4 @@
options:
openstack-origin:
default: distro
type: string
description: |
Repository from which to install. May be one of the following:
distro (default), ppa:somecustom/ppa, a deb url sources entry,
or a supported Cloud Archive release pocket.
Supported Cloud Archive sources include: cloud:precise-folsom,
cloud:precise-folsom/updates, cloud:precise-folsom/staging,
cloud:precise-folsom/proposed.
Note that updating this setting to a source that is known to
provide a later version of OpenStack will trigger a software
upgrade.
rabbit-user:
default: barbican
type: string
description: Username used to access rabbitmq queue
rabbit-vhost:
default: openstack
type: string
description: Rabbitmq vhost
database-user:
default: barbican
type: string
description: Username for Neutron database access (if enabled)
database:
default: barbican
type: string
description: Database name for Neutron (if enabled)
debug:
default: False
type: boolean
description: Enable debug logging
verbose:
default: False
type: boolean
description: Enable verbose logging
region:
default: RegionOne
type: string
description: OpenStack Region
keystone-api-version:
default: "2"
type: string
description: none, 2 or 3
require-hsm-plugin:
default: False
type: boolean

View File

@ -9,3 +9,6 @@ options:
basic:
use_venv: true
include_system_packages: true
config:
deletes:
- verbose

View File

@ -26,8 +26,11 @@ import charms_openstack.charm
import charms_openstack.adapters
import charms_openstack.ip as os_ip
PACKAGES = ['barbican-common', 'barbican-api', 'barbican-worker',
'python-mysqldb']
PACKAGES = [
'barbican-common', 'barbican-api', 'barbican-worker',
'python3-barbican', 'libapache2-mod-wsgi-py3',
'python-apt', # NOTE: workaround for hacluster suboridinate
]
BARBICAN_DIR = '/etc/barbican/'
BARBICAN_CONF = BARBICAN_DIR + "barbican.conf"
BARBICAN_API_PASTE_CONF = BARBICAN_DIR + "barbican-api-paste.ini"
@ -43,43 +46,6 @@ charms_openstack.charm.use_defaults('charm.default-select-release')
###
# Implementation of the Barbican Charm classes
# Add some properties to the configuration for templates/code to use with the
# charm instance. The config_validator is called when the configuration is
# loaded, and the properties are to add those names to the config object.
@charms_openstack.adapters.config_property
def validate_keystone_api_version(config):
if config.keystone_api_version not in ['2', '3', 'none']:
raise ValueError(
"Unsupported keystone-api-version ({}). It should be 2 or 3"
.format(config.keystone_api_version))
@charms_openstack.adapters.config_property
def barbican_api_keystone_pipeline(config):
if config.keystone_api_version == "2":
return 'cors http_proxy_to_wsgi keystone_authtoken context apiapp'
else:
return 'cors http_proxy_to_wsgi keystone_v3_authtoken context apiapp'
@charms_openstack.adapters.config_property
def barbican_api_pipeline(config):
return {
"2": "cors http_proxy_to_wsgi keystone_authtoken context apiapp",
"3": "cors http_proxy_to_wsgi keystone_v3_authtoken context apiapp",
"none": "cors http_proxy_to_wsgi unauthenticated-context apiapp"
}[config.keystone_api_version]
@charms_openstack.adapters.config_property
def barbican_api_keystone_audit_pipeline(config):
if config.keystone_api_version == "2":
return 'http_proxy_to_wsgi keystone_authtoken context audit apiapp'
else:
return 'http_proxy_to_wsgi keystone_v3_authtoken context audit apiapp'
# Adapt the barbican-hsm-plugin relation for use in rendering the config
# for Barbican. Note that the HSM relation is optional, so we have a class
# variable 'exists' that we can test in the template to see if we should
@ -117,9 +83,14 @@ class BarbicanCharm(charms_openstack.charm.HAOpenStackCharm):
functionality to manage a barbican unit.
"""
release = 'mitaka'
release = 'rocky'
name = 'barbican'
packages = PACKAGES
purge_packages = [
'python-barbican',
'python-mysqldb'
]
python_version = 3
api_ports = {
'barbican-worker': {
os_ip.PUBLIC: 9311,
@ -148,11 +119,6 @@ class BarbicanCharm(charms_openstack.charm.HAOpenStackCharm):
# Package codename map for barbican-common
package_codenames = {
'barbican-common': collections.OrderedDict([
('2', 'mitaka'),
('3', 'newton'),
('4', 'ocata'),
('5', 'pike'),
('6', 'queens'),
('7', 'rocky'),
]),
}
@ -164,7 +130,7 @@ class BarbicanCharm(charms_openstack.charm.HAOpenStackCharm):
:returns (username, host): two strings to send to the amqp provider.
"""
return (self.config['rabbit-user'], self.config['rabbit-vhost'])
return ('barbican', 'openstack')
def get_database_setup(self):
"""Provide the default database credentials as a list of 3-tuples
@ -181,8 +147,8 @@ class BarbicanCharm(charms_openstack.charm.HAOpenStackCharm):
"""
return [
dict(
database=self.config['database'],
username=self.config['database-user'], )
database='barbican',
username='barbican', )
]
def action_generate_mkek(self, hsm):
@ -256,21 +222,3 @@ class BarbicanCharm(charms_openstack.charm.HAOpenStackCharm):
required_relations.append('hsm')
return super(BarbicanCharm, self).states_to_check(
required_relations=required_relations)
class BarbicanCharmRocky(BarbicanCharm):
release = 'rocky'
packages = [
'barbican-common', 'barbican-api', 'barbican-worker',
'python3-barbican', 'libapache2-mod-wsgi-py3',
'python-apt', # NOTE: workaround for hacluster suboridinate
]
purge_packages = [
'python-barbican',
'python-mysqldb'
]
python_version = 3

View File

@ -1,390 +0,0 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
verbose = {{ options.verbose }}
# Show debugging output in logs (sets DEBUG log level output)
debug = {{ options.debug }}
# Address to bind the API server
# {{ options.service_listen_info }}
bind_host = {{ options.service_listen_info.barbican_worker.ip }}
# Port to bind the API server to
bind_port = {{ options.service_listen_info.barbican_worker.port }}
# Host name, for use in HATEOAS-style references
# Note: Typically this would be the load balanced endpoint that clients would use
# communicate back with this service.
host_href = {{ options.external_endpoints.barbican_worker.url }}
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
#log_file = /var/log/barbican/api.log
# Backlog requests when creating socket
backlog = 4096
# TCP_KEEPIDLE value in seconds when creating socket.
# Not supported on OS X.
#tcp_keepidle = 600
# Maximum allowed http request size against the barbican-api
max_allowed_secret_in_bytes = 10000
max_allowed_request_size_in_bytes = 1000000
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
# Uncomment this for local dev, putting db in project directory:
#sql_connection = sqlite:///barbican.sqlite
# Note: For absolute addresses, use '////' slashes after 'sqlite:'
# Uncomment for a more global development environment
#sql_connection = sqlite:////var/lib/barbican/barbican.sqlite
# This is shared_db.uri from the shared_db relationship.
{% include "parts/database" %}
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
sql_idle_timeout = 3600
# Accepts a class imported from the sqlalchemy.pool module, and handles the
# details of building the pool for you. If commented out, SQLAlchemy
# will select based on the database dialect. Other options are QueuePool
# (for SQLAlchemy-managed connections) and NullPool (to disabled SQLAlchemy
# management of connections).
# See http://docs.sqlalchemy.org/en/latest/core/pooling.html for more details.
#sql_pool_class = QueuePool
# Show SQLAlchemy pool-related debugging output in logs (sets DEBUG log level
# output) if specified.
#sql_pool_logging = True
# Size of pool used by SQLAlchemy. This is the largest number of connections
# that will be kept persistently in the pool. Can be set to 0 to indicate no
# size limit. To disable pooling, use a NullPool with sql_pool_class instead.
# Comment out to allow SQLAlchemy to select the default.
#sql_pool_size = 5
# The maximum overflow size of the pool used by SQLAlchemy. When the number of
# checked-out connections reaches the size set in sql_pool_size, additional
# connections will be returned up to this limit. It follows then that the
# total number of simultaneous connections the pool will allow is
# sql_pool_size + sql_pool_max_overflow. Can be set to -1 to indicate no
# overflow limit, so no limit will be placed on the total number of concurrent
# connections. Comment out to allow SQLAlchemy to select the default.
#sql_pool_max_overflow = 10
# Default page size for the 'limit' paging URL parameter.
default_limit_paging = 10
# Maximum page size for the 'limit' paging URL parameter.
max_limit_paging = 100
# Role used to identify an authenticated user as administrator
#admin_role = admin
# Allow unauthenticated users to access the API with read-only
# privileges. This only applies when using ContextMiddleware.
#allow_anonymous_access = False
# Allow access to version 1 of barbican api
#enable_v1_api = True
# Allow access to version 2 of barbican api
#enable_v2_api = True
# ================= SSL Options ===============================
# Certificate file to use when starting API server securely
#cert_file = /path/to/certfile
# Private key file to use when starting API server securely
#key_file = /path/to/keyfile
# CA certificate file to use to verify connecting clients
#ca_file = /path/to/cafile
# ================= Security Options ==========================
# AES key for encrypting store 'location' metadata, including
# -- if used -- Swift or S3 credentials
# Should be set to a random string of length 16, 24 or 32 bytes
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
# ================= Queue Options - oslo.messaging ==========================
# rabbitmq-olso section from the rabbit-mq and releated relations.
{% include "parts/section-rabbitmq-oslo" %}
# For HA, specify queue nodes in cluster as 'user@host:5672', comma delimited, ending with '/offset':
# For example: transport_url = rabbit://guest@192.168.50.8:5672,guest@192.168.50.9:5672/
# DO NOT USE THIS, due to '# FIXME(markmc): support multiple hosts' in oslo/messaging/_drivers/amqpdriver.py
# transport_url = rabbit://guest@localhost:5672/
# oslo notification driver for sending audit events via audit middleware.
# Meaningful only when middleware is enabled in barbican paste ini file.
# This is oslo config MultiStrOpt so can be defined multiple times in case
# there is need to route audit event to messaging as well as log.
# notification_driver = messagingv2
# notification_driver = log
# ======== OpenStack policy - oslo_policy ===============
[oslo_policy]
# ======== OpenStack policy integration
# JSON file representing policy (string value)
policy_file=/etc/barbican/policy.json
# Rule checked when requested rule is not found (string value)
policy_default_rule=default
# ================= Queue Options - Application ==========================
[queue]
# Enable queuing asynchronous messaging.
# Set false to invoke worker tasks synchronously (i.e. no-queue standalone mode)
enable = False
# Namespace for the queue
namespace = 'barbican'
# Topic for the queue
topic = 'barbican.workers'
# Version for the task API
version = '1.1'
# Server name for RPC service
server_name = 'barbican.queue'
# Number of asynchronous worker processes.
# When greater than 1, then that many additional worker processes are
# created for asynchronous worker functionality.
asynchronous_workers = 1
# ================= Retry/Scheduler Options ==========================
[retry_scheduler]
# Seconds (float) to wait between starting retry scheduler
initial_delay_seconds = 10.0
# Seconds (float) to wait between starting retry scheduler
periodic_interval_max_seconds = 10.0
# ====================== Quota Options ===============================
[quotas]
# For each resource, the default maximum number that can be used for
# a project is set below. This value can be overridden for each
# project through the API. A negative value means no limit. A zero
# value effectively disables the resource.
# default number of secrets allowed per project
quota_secrets = -1
# default number of orders allowed per project
quota_orders = -1
# default number of containers allowed per project
quota_containers = -1
# default number of consumers allowed per project
quota_consumers = -1
# default number of CAs allowed per project
quota_cas = -1
# ================= Keystone Notification Options - Application ===============
[keystone_notifications]
# Keystone notification functionality uses transport related configuration
# from barbican common configuration as defined under
# 'Queue Options - oslo.messaging' comments.
# The HA related configuration is also shared with notification server.
# True enables keystone notification listener functionality.
enable = False
# The default exchange under which topics are scoped.
# May be overridden by an exchange name specified in the transport_url option.
control_exchange = 'openstack'
# Keystone notification queue topic name.
# This name needs to match one of values mentioned in Keystone deployment's
# 'notification_topics' configuration e.g.
# notification_topics=notifications, barbican_notifications
# Multiple servers may listen on a topic and messages will be dispatched to one
# of the servers in a round-robin fashion. That's why Barbican service should
# have its own dedicated notification queue so that it receives all of Keystone
# notifications.
topic = 'notifications'
# True enables requeue feature in case of notification processing error.
# Enable this only when underlying transport supports this feature.
allow_requeue = False
# Version of tasks invoked via notifications
version = '1.0'
# Define the number of max threads to be used for notification server
# processing functionality.
thread_pool_size = 10
{% include "parts/section-oslo-middleware" %}
# ================= Secret Store Plugin ===================
[secretstore]
namespace = barbican.secretstore.plugin
enabled_secretstore_plugins = store_crypto
# ================= Crypto plugin ===================
[crypto]
namespace = barbican.crypto.plugin
{% if hsm -%}
enabled_crypto_plugins = p11_crypto
{% else -%}
enabled_crypto_plugins = simple_crypto
{%- endif %}
[simple_crypto_plugin]
# the kek should be a 32-byte value which is base64 encoded
kek = 'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY='
[dogtag_plugin]
pem_path = '/etc/barbican/kra_admin_cert.pem'
dogtag_host = localhost
dogtag_port = 8443
nss_db_path = '/etc/barbican/alias'
nss_db_path_ca = '/etc/barbican/alias-ca'
nss_password = 'password123'
simple_cmc_profile = 'caOtherCert'
ca_expiration_time = 1
plugin_working_dir = '/etc/barbican/dogtag'
{% if hsm -%}
[p11_crypto_plugin]
# Path to vendor PKCS11 library
library_path = '{{ hsm.library_path }}'
# Password to login to PKCS11 session
login = '{{ hsm.login }}'
# Label to identify master KEK in the HSM (must not be the same as HMAC label)
mkek_label = '{{ options.label_mkek }}'
# Length in bytes of master KEK
mkek_length = {{ options.mkek_key_length }}
# Label to identify HMAC key in the HSM (must not be the same as MKEK label)
hmac_label = '{{ options.label_hmac }}'
# HSM Slot id (Should correspond to a configured PKCS11 slot). Default: 1
slot_id = {{ hsm.slot_id }}
# Enable Read/Write session with the HSM?
# rw_session = True
# Length of Project KEKs to create
# pkek_length = 32
# How long to cache unwrapped Project KEKs
# pkek_cache_ttl = 900
# Max number of items in pkek cache
# pkek_cache_limit = 100
# Seedfile to generate random data from.
seed_file = '/dev/urandom'
# Seed length to read the random data for seeding the RNG
seed_length = 32
{%- endif %}
# ================== KMIP plugin =====================
[kmip_plugin]
username = 'admin'
password = 'password'
host = localhost
port = 5696
keyfile = '/path/to/certs/cert.key'
certfile = '/path/to/certs/cert.crt'
ca_certs = '/path/to/certs/LocalCA.crt'
# ================= Certificate plugin ===================
[certificate]
namespace = barbican.certificate.plugin
enabled_certificate_plugins = simple_certificate
enabled_certificate_plugins = snakeoil_ca
[certificate_event]
namespace = barbican.certificate.event.plugin
enabled_certificate_event_plugins = simple_certificate_event
[snakeoil_ca_plugin]
ca_cert_path = /etc/barbican/snakeoil-ca.crt
ca_cert_key_path = /etc/barbican/snakeoil-ca.key
ca_cert_chain_path = /etc/barbican/snakeoil-ca.chain
ca_cert_pkcs7_path = /etc/barbican/snakeoil-ca.p7b
subca_cert_key_directory=/etc/barbican/snakeoil-cas
[cors]
#
# From oslo.middleware.cors
#
# Indicate whether this resource may be shared with the domain
# received in the requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials
# (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to
# HTTP Simple Headers. (list value)
#expose_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list
# value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual
# request. (list value)
#allow_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
[cors.subdomain]
#
# From oslo.middleware.cors
#
# Indicate whether this resource may be shared with the domain
# received in the requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials
# (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to
# HTTP Simple Headers. (list value)
#expose_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list
# value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual
# request. (list value)
#allow_headers = X-Auth-Token, X-Openstack-Request-Id, X-Project-Id, X-Identity-Status, X-User-Id, X-Storage-Token, X-Domain-Id, X-User-Domain-Id, X-Project-Domain-Id, X-Roles

View File

@ -1,7 +1,7 @@
[composite:main]
use = egg:Paste#urlmap
/: barbican_version
/v1: barbican_api
/v1: barbican-api-keystone
# Use this pipeline for Barbican API - versions no authentication
[pipeline:barbican_version]
@ -9,8 +9,7 @@ pipeline = cors http_proxy_to_wsgi versionapp
# Use this pipeline for Barbican API - DEFAULT no authentication
[pipeline:barbican_api]
# pipeline = cors unauthenticated-context apiapp
pipeline = {{ options.barbican_api_pipeline }}
pipeline = barbican-api-keystone
#Use this pipeline to activate a repoze.profile middleware and HTTP port,
# to provide profiling information for the REST API processing.
@ -19,13 +18,11 @@ pipeline = cors http_proxy_to_wsgi unauthenticated-context egg:Paste#cgitb egg:P
#Use this pipeline for keystone auth
[pipeline:barbican-api-keystone]
# pipeline = cors keystone_authtoken context apiapp
pipeline = {{ options.barbican_api_keystone_pipeline }}
pipeline = cors http_proxy_to_wsgi authtoken context apiapp
#Use this pipeline for keystone auth with audit feature
[pipeline:barbican-api-keystone-audit]
# pipeline = keystone_authtoken context audit apiapp
pipeline = {{ options.barbican_api_keystone_audit_pipeline }}
pipeline = http_proxy_to_wsgi authtoken context audit apiapp
[app:apiapp]
paste.app_factory = barbican.api.app:create_main_app
@ -46,34 +43,8 @@ paste.filter_factory = barbican.api.middleware.context:ContextMiddleware.factory
paste.filter_factory = keystonemiddleware.audit:filter_factory
audit_map_file = /etc/barbican/api_audit_map.conf
[filter:keystone_authtoken]
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
signing_dir = /var/lib/barbican/keystone-signing
auth_host = {{ identity_service.auth_host }}
#need ability to re-auth a token, thus admin url
auth_port = {{ identity_service.auth_port }}
auth_protocol = {{ identity_service.auth_protocol }}
admin_tenant_name = {{ identity_service.service_tenant }}
admin_user = {{ identity_service.service_username }}
admin_password = {{ identity_service.service_password }}
auth_version = v2.0
#delay failing perhaps to log the unauthorized request in barbican ..
#delay_auth_decision = true
[filter:keystone_v3_authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
signing_dir = /var/lib/barbican/keystone-signing
username = {{ identity_service.service_username }}
password= {{ identity_service.service_password }}
# These are hardcoded as it is not passed in the relation but is needed for v3 auth
user_domain_name = default
project_domain_name = default
auth_plugin = password
auth_url = {{ identity_service.auth_protocol }}://{{ identity_service.auth_host }}:{{ identity_service.auth_port }}/v3
auth_uri = {{ identity_service.service_protocol}}://{{ identity_service.service_host }}:{{ identity_service.service_port }}/
project_name = {{ identity_service.service_tenant }}
#delay failing perhaps to log the unauthorized request in barbican ..
#delay_auth_decision = true
[filter:profile]
use = egg:repoze.profile

View File

@ -0,0 +1,58 @@
[DEFAULT]
debug = {{ options.debug }}
bind_host = {{ options.service_listen_info.barbican_worker.ip }}
bind_port = {{ options.service_listen_info.barbican_worker.port }}
host_href = {{ options.external_endpoints.barbican_worker.url }}
[database]
{% include "parts/database" %}
{% include "parts/section-keystone-authtoken" %}
{% include "parts/section-rabbitmq-oslo" %}
{% include "parts/section-oslo-middleware" %}
[secretstore]
namespace = barbican.secretstore.plugin
enabled_secretstore_plugins = store_crypto
[crypto]
namespace = barbican.crypto.plugin
{% if hsm -%}
enabled_crypto_plugins = p11_crypto
{% else -%}
enabled_crypto_plugins = simple_crypto
{%- endif %}
[simple_crypto_plugin]
# the kek should be a 32-byte value which is base64 encoded
kek = 'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY='
{% if hsm -%}
[p11_crypto_plugin]
# Path to vendor PKCS11 library
library_path = '{{ hsm.library_path }}'
# Password to login to PKCS11 session
login = '{{ hsm.login }}'
# Label to identify master KEK in the HSM (must not be the same as HMAC label)
mkek_label = '{{ options.label_mkek }}'
# Length in bytes of master KEK
mkek_length = {{ options.mkek_key_length }}
# Label to identify HMAC key in the HSM (must not be the same as MKEK label)
hmac_label = '{{ options.label_hmac }}'
# HSM Slot id (Should correspond to a configured PKCS11 slot). Default: 1
slot_id = {{ hsm.slot_id }}
# Enable Read/Write session with the HSM?
# rw_session = True
# Length of Project KEKs to create
# pkek_length = 32
# How long to cache unwrapped Project KEKs
# pkek_cache_ttl = 900
# Max number of items in pkek cache
# pkek_cache_limit = 100
# Seedfile to generate random data from.
seed_file = '/dev/urandom'
# Seed length to read the random data for seeding the RNG
seed_length = 32
{%- endif %}

View File

@ -19,19 +19,14 @@ u = OpenStackAmuletUtils(DEBUG)
class BarbicanBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic Barbican deployment."""
def __init__(self, series, openstack=None, source=None, stable=False,
keystone_version='2'):
def __init__(self, series, openstack=None, source=None, stable=False):
"""Deploy the entire test environment.
The keystone_version controls whether keystone (and barbican) are set
up to use keystone v2.0 or v3. The options are <string> 2 or 3.
"""
super(BarbicanBasicDeployment, self).__init__(
series, openstack, source, stable)
self._keystone_version = str(keystone_version)
self._add_services()
self._add_relations()
self._configure_services(keystone_version)
self._configure_services()
self._deploy()
u.log.info('Waiting on extended status checks...')
@ -67,18 +62,15 @@ class BarbicanBasicDeployment(OpenStackAmuletDeployment):
}
super(BarbicanBasicDeployment, self)._add_relations(relations)
def _configure_services(self, keystone_version='2'):
def _configure_services(self):
"""Configure all of the services."""
keystone_config = {
'admin-password': 'openstack',
'admin-token': 'ubuntutesting',
'preferred-api-version': str(keystone_version),
}
# say we don't need an HSM for these tests
barbican_config = {
'require-hsm-plugin': False,
'verbose': True,
'keystone-api-version': str(keystone_version),
}
pxc_config = {
'dataset-size': '25%',
@ -109,7 +101,7 @@ class BarbicanBasicDeployment(OpenStackAmuletDeployment):
self.keystone_session, self.keystone = u.get_default_keystone_session(
self.keystone_sentry,
openstack_release=self._get_openstack_release(),
api_version=int(self._keystone_version))
api_version=3)
def test_100_services(self):
"""Verify the expected services are running on the corresponding
@ -136,27 +128,17 @@ class BarbicanBasicDeployment(OpenStackAmuletDeployment):
actual = self.keystone.service_catalog.get_endpoints()
if self._keystone_version == '2':
endpoint_check = [{
'adminURL': u.valid_url,
# v3 endpoint check
endpoint_check = [
{
'id': u.not_null,
'interface': interface,
'region': 'RegionOne',
'publicURL': u.valid_url,
'internalURL': u.valid_url,
}]
validate_catalog = u.validate_svc_catalog_endpoint_data
else:
# v3 endpoint check
endpoint_check = [
{
'id': u.not_null,
'interface': interface,
'region': 'RegionOne',
'region_id': 'RegionOne',
'url': u.valid_url,
}
for interface in ('admin', 'public', 'internal')]
validate_catalog = u.validate_v3_svc_catalog_endpoint_data
'region_id': 'RegionOne',
'url': u.valid_url,
}
for interface in ('admin', 'public', 'internal')]
validate_catalog = u.validate_v3_svc_catalog_endpoint_data
expected = {
'key-manager': endpoint_check,
@ -176,30 +158,16 @@ class BarbicanBasicDeployment(OpenStackAmuletDeployment):
u.log.debug(endpoints)
admin_port = '9312'
internal_port = public_port = '9311'
if self._keystone_version == '2':
expected = {'id': u.not_null,
'region': 'RegionOne',
'adminurl': u.valid_url,
'internalurl': u.valid_url,
'publicurl': u.valid_url,
'service_id': u.not_null}
# For keystone v3 it's slightly different.
expected = {'id': u.not_null,
'region': 'RegionOne',
'region_id': 'RegionOne',
'url': u.valid_url,
'interface': u.not_null, # we match this in the test
'service_id': u.not_null}
ret = u.validate_endpoint_data(
endpoints, admin_port, internal_port, public_port, expected)
elif self._keystone_version == '3':
# For keystone v3 it's slightly different.
expected = {'id': u.not_null,
'region': 'RegionOne',
'region_id': 'RegionOne',
'url': u.valid_url,
'interface': u.not_null, # we match this in the test
'service_id': u.not_null}
ret = u.validate_v3_endpoint_data(
endpoints, admin_port, internal_port, public_port, expected)
else:
raise RuntimeError("Unexpected self._keystone_version: {}"
.format(self._keystone_version))
ret = u.validate_v3_endpoint_data(
endpoints, admin_port, internal_port, public_port, expected)
if ret:
message = 'barbican endpoint: {}'.format(ret)
@ -332,97 +300,59 @@ class BarbicanBasicDeployment(OpenStackAmuletDeployment):
# a demo user, demo project, and then get a demo barbican client and do
# the secret. ensure that the default domain is created.
if self._keystone_version == '2':
# find or create the 'demo' tenant (project)
tenant = self._find_or_create(
items=self.keystone.tenants.list(),
key=lambda t: t.name == 'demo',
create=lambda: self.keystone.tenants.create(
tenant_name="demo",
description="Demo for testing barbican",
enabled=True))
# find or create the demo user
demo_user = self._find_or_create(
items=self.keystone.users.list(),
key=lambda u: u.name == 'demo',
create=lambda: self.keystone.users.create(
name='demo',
password='pass',
tenant_id=tenant.id))
# find the admin role
# already be created - if not, then this will fail later.
admin_role = self._find_or_create(
items=self.keystone.roles.list(),
key=lambda r: r.name.lower() == 'admin',
create=lambda: None)
# grant the role if it isn't already created.
# now grant the creator role to the demo user.
self._find_or_create(
items=self.keystone.roles.roles_for_user(
demo_user, tenant=tenant),
key=lambda r: r.name.lower() == admin_role.name.lower(),
create=lambda: self.keystone.roles.add_user_role(
demo_user, admin_role, tenant=tenant))
self.keystone_demo = u.authenticate_keystone_user(
self.keystone, user='demo',
password='pass', tenant='demo')
else:
# find or create the 'default' domain
domain = self._find_or_create(
items=self.keystone.domains.list(),
key=lambda u: u.name == 'default',
create=lambda: self.keystone.domains.create(
"default",
description="domain for barbican testing",
enabled=True))
# find or create the 'demo' user
demo_user = self._find_or_create(
items=self.keystone.users.list(domain=domain.id),
key=lambda u: u.name == 'demo',
create=lambda: self.keystone.users.create(
'demo',
domain=domain.id,
description="Demo user for barbican tests",
enabled=True,
email="demo@example.com",
password="pass"))
# find or create the 'demo' project
demo_project = self._find_or_create(
items=self.keystone.projects.list(domain=domain.id),
key=lambda x: x.name == 'demo',
create=lambda: self.keystone.projects.create(
'demo',
domain=domain.id,
description='barbican testing project',
enabled=True))
# create the role for the user - needs to be admin so that the
# secret can be deleted - note there is only one admin role, and it
# should already be created - if not, then this will fail later.
admin_role = self._find_or_create(
items=self.keystone.roles.list(),
key=lambda r: r.name.lower() == 'admin',
create=lambda: None)
# now grant the creator role to the demo user.
try:
self.keystone.roles.check(
role=admin_role,
user=demo_user,
project=demo_project)
except keystoneclient.exceptions.NotFound:
# create it if it isn't found
self.keystone.roles.grant(
role=admin_role,
user=demo_user,
project=demo_project)
keystone_ip = self.keystone_sentry.info['public-address']
self.keystone_demo = u.authenticate_keystone(
keystone_ip, demo_user.name, 'pass',
api_version=int(self._keystone_version),
user_domain_name=domain.name,
project_domain_name=domain.name,
project_name=demo_project.name)
# find or create the 'default' domain
domain = self._find_or_create(
items=self.keystone.domains.list(),
key=lambda u: u.name == 'default',
create=lambda: self.keystone.domains.create(
"default",
description="domain for barbican testing",
enabled=True))
# find or create the 'demo' user
demo_user = self._find_or_create(
items=self.keystone.users.list(domain=domain.id),
key=lambda u: u.name == 'demo',
create=lambda: self.keystone.users.create(
'demo',
domain=domain.id,
description="Demo user for barbican tests",
enabled=True,
email="demo@example.com",
password="pass"))
# find or create the 'demo' project
demo_project = self._find_or_create(
items=self.keystone.projects.list(domain=domain.id),
key=lambda x: x.name == 'demo',
create=lambda: self.keystone.projects.create(
'demo',
domain=domain.id,
description='barbican testing project',
enabled=True))
# create the role for the user - needs to be admin so that the
# secret can be deleted - note there is only one admin role, and it
# should already be created - if not, then this will fail later.
admin_role = self._find_or_create(
items=self.keystone.roles.list(),
key=lambda r: r.name.lower() == 'admin',
create=lambda: None)
# now grant the creator role to the demo user.
try:
self.keystone.roles.check(
role=admin_role,
user=demo_user,
project=demo_project)
except keystoneclient.exceptions.NotFound:
# create it if it isn't found
self.keystone.roles.grant(
role=admin_role,
user=demo_user,
project=demo_project)
keystone_ip = self.keystone_sentry.info['public-address']
self.keystone_demo = u.authenticate_keystone(
keystone_ip, demo_user.name, 'pass', api_version=3,
user_domain_name=domain.name,
project_domain_name=domain.name,
project_name=demo_project.name)
# Authenticate admin with barbican endpoint
barbican_ep = self.keystone.service_catalog.url_for(

View File

@ -6,5 +6,5 @@
from basic_deployment import BarbicanBasicDeployment
if __name__ == '__main__':
deployment = BarbicanBasicDeployment(series='cosmic', keystone_version=3)
deployment = BarbicanBasicDeployment(series='cosmic')
deployment.run_tests()

View File

@ -1,10 +0,0 @@
#!/usr/bin/env python
"""Amulet tests on a basic barbican deployment on bionic-queens for keystone v2.
"""
from basic_deployment import BarbicanBasicDeployment
if __name__ == '__main__':
deployment = BarbicanBasicDeployment(series='bionic', keystone_version=3)
deployment.run_tests()

View File

@ -6,7 +6,7 @@
from basic_deployment import BarbicanBasicDeployment
if __name__ == '__main__':
deployment = BarbicanBasicDeployment(series='bionic', keystone_version=3,
deployment = BarbicanBasicDeployment(series='bionic',
openstack='cloud:bionic-rocky',
source='cloud:bionic-updates/rocky')
deployment.run_tests()

View File

@ -1,10 +0,0 @@
#!/usr/bin/env python
"""Amulet tests on a basic barbican deployment on xenial-mitaka for keystone v2.
"""
from basic_deployment import BarbicanBasicDeployment
if __name__ == '__main__':
deployment = BarbicanBasicDeployment(series='xenial', keystone_version=2)
deployment.run_tests()

View File

@ -1,10 +0,0 @@
#!/usr/bin/env python
"""Amulet tests on a basic barbican deployment on xenial-mitaka for keystone v3.
"""
from basic_deployment import BarbicanBasicDeployment
if __name__ == '__main__':
deployment = BarbicanBasicDeployment(series='xenial', keystone_version=3)
deployment.run_tests()

View File

@ -1,12 +0,0 @@
#!/usr/bin/env python
"""Amulet tests on a basic barbican deploy on xenial-ocata for keystone v3.
"""
from basic_deployment import BarbicanBasicDeployment
if __name__ == '__main__':
deployment = BarbicanBasicDeployment(series='xenial', keystone_version=3,
openstack='cloud:xenial-ocata',
source='cloud:xenial-updates/ocata')
deployment.run_tests()

View File

@ -1,12 +0,0 @@
#!/usr/bin/env python
"""Amulet tests on a basic barbican deploy on xenial-pike for keystone v3.
"""
from basic_deployment import BarbicanBasicDeployment
if __name__ == '__main__':
deployment = BarbicanBasicDeployment(series='xenial', keystone_version=3,
openstack='cloud:xenial-pike',
source='cloud:xenial-updates/pike')
deployment.run_tests()

View File

@ -1,12 +0,0 @@
#!/usr/bin/env python
"""Amulet tests on a basic barbican deploy on xenial-queens for keystone v3.
"""
from basic_deployment import BarbicanBasicDeployment
if __name__ == '__main__':
deployment = BarbicanBasicDeployment(series='xenial', keystone_version=3,
openstack='cloud:xenial-queens',
source='cloud:xenial-updates/queens')
deployment.run_tests()

View File

@ -3,7 +3,7 @@
# within individual charm repos.
[tox]
skipsdist = True
envlist = pep8,py34,py35
envlist = pep8,py35,py36
skip_missing_interpreters = True
[testenv]

View File

@ -28,56 +28,6 @@ class Helper(test_utils.PatchHelper):
self.patch_release(barbican.BarbicanCharm.release)
class TestCustomProperties(Helper):
def test_validate_keystone_api_version(self):
config = mock.MagicMock()
for v in ['2', '3', 'none']:
config.keystone_api_version = v
barbican.validate_keystone_api_version(config)
# ensure that it fails
with self.assertRaises(ValueError):
config.keystone_api_version = 'fail-me'
barbican.validate_keystone_api_version(config)
def test_barbican_api_keystone_pipeline(self):
config = mock.MagicMock()
config.keystone_api_version = '2'
self.assertEqual(
barbican.barbican_api_keystone_pipeline(config),
'cors http_proxy_to_wsgi keystone_authtoken context apiapp')
config.keystone_api_version = ''
self.assertEqual(
barbican.barbican_api_keystone_pipeline(config),
'cors http_proxy_to_wsgi keystone_v3_authtoken context apiapp')
def test_barbican_api_pipeline(self):
config = mock.MagicMock()
config.keystone_api_version = '2'
self.assertEqual(
barbican.barbican_api_pipeline(config),
'cors http_proxy_to_wsgi keystone_authtoken context apiapp')
config.keystone_api_version = '3'
self.assertEqual(
barbican.barbican_api_pipeline(config),
'cors http_proxy_to_wsgi keystone_v3_authtoken context apiapp')
config.keystone_api_version = 'none'
self.assertEqual(
barbican.barbican_api_pipeline(config),
'cors http_proxy_to_wsgi unauthenticated-context apiapp')
def test_barbican_api_keystone_audit_pipeline(self):
config = mock.MagicMock()
config.keystone_api_version = '2'
self.assertEqual(
barbican.barbican_api_keystone_audit_pipeline(config),
'http_proxy_to_wsgi keystone_authtoken context audit apiapp')
config.keystone_api_version = ''
self.assertEqual(
barbican.barbican_api_keystone_audit_pipeline(config),
'http_proxy_to_wsgi keystone_v3_authtoken context audit apiapp')
class TestHSMProperties(Helper):
def setUp(self):