Update functional test definitions

Update testing setup and config.
Fixes to policy.json

Closes-Bug: #1789961
Change-Id: Iac5da7cd02a5c87f3002dbabf6c21a5c2f936536
This commit is contained in:
Ryan Beisner 2018-08-27 18:17:28 -05:00 committed by David Ames
parent 6ba63148d0
commit f9c822e901
15 changed files with 1004 additions and 58 deletions

View File

@ -24,7 +24,8 @@ import urlparse
import cinderclient.v1.client as cinder_client
import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client
import glanceclient.v1 as glance_client
import glanceclient.v2 as glance_clientv2
import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client
from keystoneauth1.identity import (
@ -623,7 +624,7 @@ class OpenStackAmuletUtils(AmuletUtils):
ep = keystone.service_catalog.url_for(service_type='image',
interface='adminURL')
if keystone.session:
return glance_client.Client(ep, session=keystone.session)
return glance_clientv2.Client("2", session=keystone.session)
else:
return glance_client.Client(ep, token=keystone.auth_token)
@ -711,10 +712,19 @@ class OpenStackAmuletUtils(AmuletUtils):
f.close()
# Create glance image
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
if float(glance.version) < 2.0:
with open(local_path) as fimage:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare',
data=fimage)
else:
image = glance.images.create(
name=image_name,
disk_format="qcow2",
visibility="public",
container_format="bare")
glance.images.upload(image.id, open(local_path, 'rb'))
# Wait for image to reach active status
img_id = image.id
@ -729,9 +739,14 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Validating image attributes...')
val_img_name = glance.images.get(img_id).name
val_img_stat = glance.images.get(img_id).status
val_img_pub = glance.images.get(img_id).is_public
val_img_cfmt = glance.images.get(img_id).container_format
val_img_dfmt = glance.images.get(img_id).disk_format
if float(glance.version) < 2.0:
val_img_pub = glance.images.get(img_id).is_public
else:
val_img_pub = glance.images.get(img_id).visibility == "public"
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
'container fmt:{} disk fmt:{}'.format(
val_img_name, val_img_pub, img_id,

View File

@ -1389,11 +1389,12 @@ class WorkerConfigContext(OSContextGenerator):
class WSGIWorkerConfigContext(WorkerConfigContext):
def __init__(self, name=None, script=None, admin_script=None,
public_script=None, process_weight=1.00,
public_script=None, user=None, group=None,
process_weight=1.00,
admin_process_weight=0.25, public_process_weight=0.75):
self.service_name = name
self.user = name
self.group = name
self.user = user or name
self.group = group or name
self.script = script
self.admin_script = admin_script
self.public_script = public_script

View File

@ -0,0 +1,412 @@
# Copyright 2018 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from charmhelpers.core.hookenv import (
ERROR,
log,
relation_get,
)
from charmhelpers.contrib.network.ip import (
is_ipv6,
ns_query,
)
from charmhelpers.contrib.openstack.utils import (
get_hostname,
get_host_ip,
is_ip,
)
NOVA_SSH_DIR = '/etc/nova/compute_ssh/'
def ssh_directory_for_unit(application_name, user=None):
"""Return the directory used to store ssh assets for the application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Fully qualified directory path.
:rtype: str
"""
if user:
application_name = "{}_{}".format(application_name, user)
_dir = os.path.join(NOVA_SSH_DIR, application_name)
for d in [NOVA_SSH_DIR, _dir]:
if not os.path.isdir(d):
os.mkdir(d)
for f in ['authorized_keys', 'known_hosts']:
f = os.path.join(_dir, f)
if not os.path.isfile(f):
open(f, 'w').close()
return _dir
def known_hosts(application_name, user=None):
"""Return the known hosts file for the application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Fully qualified path to file.
:rtype: str
"""
return os.path.join(
ssh_directory_for_unit(application_name, user),
'known_hosts')
def authorized_keys(application_name, user=None):
"""Return the authorized keys file for the application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Fully qualified path to file.
:rtype: str
"""
return os.path.join(
ssh_directory_for_unit(application_name, user),
'authorized_keys')
def ssh_known_host_key(host, application_name, user=None):
"""Return the first entry in known_hosts for host.
:param host: hostname to lookup in file.
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Host key
:rtype: str or None
"""
cmd = [
'ssh-keygen',
'-f', known_hosts(application_name, user),
'-H',
'-F',
host]
try:
# The first line of output is like '# Host xx found: line 1 type RSA',
# which should be excluded.
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
# RC of 1 seems to be legitimate for most ssh-keygen -F calls.
if e.returncode == 1:
output = e.output
else:
raise
output = output.strip()
if output:
# Bug #1500589 cmd has 0 rc on precise if entry not present
lines = output.split('\n')
if len(lines) >= 1:
return lines[0]
return None
def remove_known_host(host, application_name, user=None):
"""Remove the entry in known_hosts for host.
:param host: hostname to lookup in file.
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
log('Removing SSH known host entry for compute host at %s' % host)
cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host]
subprocess.check_call(cmd)
def is_same_key(key_1, key_2):
"""Extract the key from two host entries and compare them.
:param key_1: Host key
:type key_1: str
:param key_2: Host key
:type key_2: str
"""
# The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp'
# 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare
# the part start with 'ssh-rsa' followed with '= ', because the hash
# value in the beginning will change each time.
k_1 = key_1.split('= ')[1]
k_2 = key_2.split('= ')[1]
return k_1 == k_2
def add_known_host(host, application_name, user=None):
"""Add the given host key to the known hosts file.
:param host: host name
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
try:
remote_key = subprocess.check_output(cmd).strip()
except Exception as e:
log('Could not obtain SSH host key from %s' % host, level=ERROR)
raise e
current_key = ssh_known_host_key(host, application_name, user)
if current_key and remote_key:
if is_same_key(remote_key, current_key):
log('Known host key for compute host %s up to date.' % host)
return
else:
remove_known_host(host, application_name, user)
log('Adding SSH host key to known hosts for compute node at %s.' % host)
with open(known_hosts(application_name, user), 'a') as out:
out.write("{}\n".format(remote_key))
def ssh_authorized_key_exists(public_key, application_name, user=None):
"""Check if given key is in the authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Whether given key is in the authorized_key file.
:rtype: boolean
"""
with open(authorized_keys(application_name, user)) as keys:
return ('%s' % public_key) in keys.read()
def add_authorized_key(public_key, application_name, user=None):
"""Add given key to the authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
with open(authorized_keys(application_name, user), 'a') as keys:
keys.write("{}\n".format(public_key))
def ssh_compute_add_host_and_key(public_key, hostname, private_address,
application_name, user=None):
"""Add a compute nodes ssh details to local cache.
Collect various hostname variations and add the corresponding host keys to
the local known hosts file. Finally, add the supplied public key to the
authorized_key file.
:param public_key: Public key.
:type public_key: str
:param hostname: Hostname to collect host keys from.
:type hostname: str
:param private_address:aCorresponding private address for hostname
:type private_address: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
# If remote compute node hands us a hostname, ensure we have a
# known hosts entry for its IP, hostname and FQDN.
hosts = [private_address]
if not is_ipv6(private_address):
if hostname:
hosts.append(hostname)
if is_ip(private_address):
hn = get_hostname(private_address)
if hn:
hosts.append(hn)
short = hn.split('.')[0]
if ns_query(short):
hosts.append(short)
else:
hosts.append(get_host_ip(private_address))
short = private_address.split('.')[0]
if ns_query(short):
hosts.append(short)
for host in list(set(hosts)):
add_known_host(host, application_name, user)
if not ssh_authorized_key_exists(public_key, application_name, user):
log('Saving SSH authorized key for compute host at %s.' %
private_address)
add_authorized_key(public_key, application_name, user)
def ssh_compute_add(public_key, application_name, rid=None, unit=None,
user=None):
"""Add a compute nodes ssh details to local cache.
Collect various hostname variations and add the corresponding host keys to
the local known hosts file. Finally, add the supplied public key to the
authorized_key file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param rid: Relation id of the relation between this charm and the app. If
none is supplied it is assumed its the relation relating to
the current hook context.
:type rid: str
:param unit: Unit to add ssh asserts for if none is supplied it is assumed
its the unit relating to the current hook context.
:type unit: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
relation_data = relation_get(rid=rid, unit=unit)
ssh_compute_add_host_and_key(
public_key,
relation_data.get('hostname'),
relation_data.get('private-address'),
application_name,
user=user)
def ssh_known_hosts_lines(application_name, user=None):
"""Return contents of known_hosts file for given application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
known_hosts_list = []
with open(known_hosts(application_name, user)) as hosts:
for hosts_line in hosts:
if hosts_line.rstrip():
known_hosts_list.append(hosts_line.rstrip())
return(known_hosts_list)
def ssh_authorized_keys_lines(application_name, user=None):
"""Return contents of authorized_keys file for given application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
authorized_keys_list = []
with open(authorized_keys(application_name, user)) as keys:
for authkey_line in keys:
if authkey_line.rstrip():
authorized_keys_list.append(authkey_line.rstrip())
return(authorized_keys_list)
def ssh_compute_remove(public_key, application_name, user=None):
"""Remove given public key from authorized_keys file.
:param public_key: Public key.
:type public_key: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
if not (os.path.isfile(authorized_keys(application_name, user)) or
os.path.isfile(known_hosts(application_name, user))):
return
keys = ssh_authorized_keys_lines(application_name, user=None)
keys = [k.strip() for k in keys]
if public_key not in keys:
return
[keys.remove(key) for key in keys if key == public_key]
with open(authorized_keys(application_name, user), 'w') as _keys:
keys = '\n'.join(keys)
if not keys.endswith('\n'):
keys += '\n'
_keys.write(keys)
def get_ssh_settings(application_name, user=None):
"""Retrieve the known host entries and public keys for application
Retrieve the known host entries and public keys for application for all
units of the given application related to this application for the
app + user combination.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
:returns: Public keys + host keys for all units for app + user combination.
:rtype: dict
"""
settings = {}
keys = {}
prefix = ''
if user:
prefix = '{}_'.format(user)
for i, line in enumerate(ssh_known_hosts_lines(
application_name=application_name, user=user)):
settings['{}known_hosts_{}'.format(prefix, i)] = line
if settings:
settings['{}known_hosts_max_index'.format(prefix)] = len(
settings.keys())
for i, line in enumerate(ssh_authorized_keys_lines(
application_name=application_name, user=user)):
keys['{}authorized_keys_{}'.format(prefix, i)] = line
if keys:
keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys())
settings.update(keys)
return settings
def get_all_user_ssh_settings(application_name):
"""Retrieve the known host entries and public keys for application
Retrieve the known host entries and public keys for application for all
units of the given application related to this application for root user
and nova user.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:returns: Public keys + host keys for all units for app + user combination.
:rtype: dict
"""
settings = get_ssh_settings(application_name)
settings.update(get_ssh_settings(application_name, user='nova'))
return settings

View File

@ -14,7 +14,7 @@ Listen {{ public_port }}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
@ -40,7 +40,7 @@ Listen {{ public_port }}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
@ -66,7 +66,7 @@ Listen {{ public_port }}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}

View File

@ -0,0 +1,91 @@
# Configuration file maintained by Juju. Local changes may be overwritten.
{% if port -%}
Listen {{ port }}
{% endif -%}
{% if admin_port -%}
Listen {{ admin_port }}
{% endif -%}
{% if public_port -%}
Listen {{ public_port }}
{% endif -%}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
{% endif -%}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
{% endif -%}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
<IfVersion >= 2.4>
ErrorLogFormat "%{cu}t %M"
</IfVersion>
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
</VirtualHost>
{% endif -%}

View File

@ -831,12 +831,25 @@ def _ows_check_if_paused(services=None, ports=None):
"""Check if the unit is supposed to be paused, and if so check that the
services/ports (if passed) are actually stopped/not being listened to.
if the unit isn't supposed to be paused, just return None, None
If the unit isn't supposed to be paused, just return None, None
If the unit is performing a series upgrade, return a message indicating
this.
@param services: OPTIONAL services spec or list of service names.
@param ports: OPTIONAL list of port numbers.
@returns state, message or None, None
"""
if is_unit_upgrading_set():
state, message = check_actually_paused(services=services,
ports=ports)
if state is None:
# we're paused okay, so set maintenance and return
state = "blocked"
message = ("Ready for do-release-upgrade and reboot. "
"Set complete when finished.")
return state, message
if is_unit_paused_set():
state, message = check_actually_paused(services=services,
ports=ports)
@ -1339,7 +1352,7 @@ def pause_unit(assess_status_func, services=None, ports=None,
message = assess_status_func()
if message:
messages.append(message)
if messages:
if messages and not is_unit_upgrading_set():
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
@ -1689,3 +1702,34 @@ def install_os_snaps(snaps, refresh=False):
snap_install(snap,
_ensure_flag(snaps[snap]['channel']),
_ensure_flag(snaps[snap]['mode']))
def set_unit_upgrading():
"""Set the unit to a upgrading state in the local kv() store.
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-upgrading', True)
def clear_unit_upgrading():
"""Clear the unit from a upgrading state in the local kv() store
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-upgrading', False)
def is_unit_upgrading_set():
"""Return the state of the kv().get('unit-upgrading').
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
try:
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-upgrading')))
except Exception:
return False

View File

@ -201,11 +201,35 @@ def remote_unit():
return os.environ.get('JUJU_REMOTE_UNIT', None)
def service_name():
"""The name service group this unit belongs to"""
def application_name():
"""
The name of the deployed application this unit belongs to.
"""
return local_unit().split('/')[0]
def service_name():
"""
.. deprecated:: 0.19.1
Alias for :func:`application_name`.
"""
return application_name()
def model_name():
"""
Name of the model that this unit is deployed in.
"""
return os.environ['JUJU_MODEL_NAME']
def model_uuid():
"""
UUID of the model that this unit is deployed in.
"""
return os.environ['JUJU_MODEL_UUID']
def principal_unit():
"""Returns the principal unit of this unit, otherwise None"""
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
@ -1297,3 +1321,33 @@ def egress_subnets(rid=None, unit=None):
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen
def unit_doomed(unit=None):
"""Determines if the unit is being removed from the model
Requires Juju 2.4.1.
:param unit: string unit name, defaults to local_unit
:side effect: calls goal_state
:side effect: calls local_unit
:side effect: calls has_juju_version
:return: True if the unit is being removed, already gone, or never existed
"""
if not has_juju_version("2.4.1"):
# We cannot risk blindly returning False for 'we don't know',
# because that could cause data loss; if call sites don't
# need an accurate answer, they likely don't need this helper
# at all.
# goal-state existed in 2.4.0, but did not handle removals
# correctly until 2.4.1.
raise NotImplementedError("is_doomed")
if unit is None:
unit = local_unit()
gs = goal_state()
units = gs.get('units', {})
if unit not in units:
return True
# I don't think 'dead' units ever show up in the goal-state, but
# check anyway in addition to 'dying'.
return units[unit]['status'] in ('dying', 'dead')

View File

@ -244,7 +244,8 @@ def determine_packages():
# Really should be handled as a dep in the openstack-dashboard package
if CompareOpenStackReleases(release) >= 'mitaka':
packages.append('python-pymysql')
if CompareOpenStackReleases(release) >= 'ocata':
if (CompareOpenStackReleases(release) >= 'ocata' and
CompareOpenStackReleases(release) < 'rocky'):
packages.append('python-neutron-lbaas-dashboard')
if CompareOpenStackReleases(release) >= 'queens':
packages.append('python-designate-dashboard')

View File

@ -51,7 +51,7 @@
"identity:delete_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
"admin_and_matching_target_group_domain_id": "rule:admin_required and domain_id:%(target.group.domain_id)s",
"admin_and_matching_group_domain_id": "rule:admin_required and (domain_id:%(group.domain_id)s or domain_id:%(user.domain_id)s",
"admin_and_matching_group_domain_id": "rule:admin_required and (domain_id:%(group.domain_id)s or domain_id:%(user.domain_id)s)",
"identity:get_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
"identity:list_groups": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"identity:list_groups_for_user": "rule:owner or rule:admin_and_matching_target_user_domain_id",

View File

@ -0,0 +1,215 @@
{
"admin_required": "role:Admin",
"cloud_admin": "rule:admin_required and rule:domain_id:{{ admin_domain_id }}",
"service_role": "role:service",
"service_or_admin": "rule:admin_required or rule:service_role",
"owner" : "rule:user_id:%(user_id)s or rule:user_id:%(target.token.user_id)s",
"admin_or_owner": "(rule:admin_required and rule:domain_id:%(target.token.user.domain.id)s) or rule:owner",
"admin_and_matching_domain_id": "rule:admin_required and rule:domain_id:%(domain_id)s",
"service_admin_or_owner": "rule:service_or_admin or rule:owner",
"default": "rule:admin_required",
"identity:get_region": "",
"identity:list_regions": "",
"identity:create_region": "rule:cloud_admin",
"identity:update_region": "rule:cloud_admin",
"identity:delete_region": "rule:cloud_admin",
"identity:get_service": "rule:admin_required",
"identity:list_services": "rule:admin_required",
"identity:create_service": "rule:cloud_admin",
"identity:update_service": "rule:cloud_admin",
"identity:delete_service": "rule:cloud_admin",
"identity:get_endpoint": "rule:admin_required",
"identity:list_endpoints": "rule:admin_required",
"identity:create_endpoint": "rule:cloud_admin",
"identity:update_endpoint": "rule:cloud_admin",
"identity:delete_endpoint": "rule:cloud_admin",
"identity:get_domain": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"identity:list_domains": "rule:cloud_admin",
"identity:create_domain": "rule:cloud_admin",
"identity:update_domain": "rule:cloud_admin",
"identity:delete_domain": "rule:cloud_admin",
"admin_and_matching_target_project_domain_id": "rule:admin_required and domain_id:%(target.project.domain_id)s",
"identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id or project_id:%(target.project.id)s",
"identity:list_projects": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"identity:list_user_projects": "rule:owner or rule:admin_and_matching_domain_id",
"identity:create_project": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"identity:update_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id",
"identity:delete_project": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"admin_and_matching_target_user_domain_id": "rule:admin_required and domain_id:%(target.user.domain_id)s",
"admin_and_matching_user_domain_id": "rule:admin_required and domain_id:%(user.domain_id)s",
"identity:get_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
"identity:list_users": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"identity:create_user": "rule:cloud_admin or rule:admin_and_matching_user_domain_id",
"identity:update_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
"identity:delete_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id",
"admin_and_matching_target_group_domain_id": "rule:admin_required and domain_id:%(target.group.domain_id)s",
"admin_and_matching_group_domain_id": "rule:admin_required and (domain_id:%(group.domain_id)s or domain_id:%(user.domain_id)s)",
"identity:get_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
"identity:list_groups": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"identity:list_groups_for_user": "rule:owner or rule:admin_and_matching_target_user_domain_id",
"identity:create_group": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"identity:update_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
"identity:delete_group": "rule:cloud_admin or rule:admin_and_matching_domain_id",
"identity:list_users_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
"identity:remove_user_from_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
"identity:check_user_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
"identity:add_user_to_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id",
"identity:get_credential": "rule:admin_required",
"identity:list_credentials": "rule:admin_required or user_id:%(user_id)s",
"identity:create_credential": "rule:admin_required",
"identity:update_credential": "rule:admin_required",
"identity:delete_credential": "rule:admin_required",
"identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
"identity:ec2_list_credentials": "rule:admin_required or rule:owner",
"identity:ec2_create_credential": "rule:admin_required or rule:owner",
"identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
"identity:get_role": "rule:admin_required",
"identity:list_roles": "rule:admin_required",
"identity:create_role": "rule:cloud_admin",
"identity:update_role": "rule:cloud_admin",
"identity:delete_role": "rule:cloud_admin",
"identity:get_domain_role": "rule:cloud_admin or rule:get_domain_roles",
"identity:list_domain_roles": "rule:cloud_admin or rule:list_domain_roles",
"identity:create_domain_role": "rule:cloud_admin or rule:domain_admin_matches_domain_role",
"identity:update_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role",
"identity:delete_domain_role": "rule:cloud_admin or rule:domain_admin_matches_target_domain_role",
"domain_admin_matches_domain_role": "rule:admin_required and domain_id:%(role.domain_id)s",
"get_domain_roles": "rule:domain_admin_matches_target_domain_role or rule:project_admin_matches_target_domain_role",
"domain_admin_matches_target_domain_role": "rule:admin_required and domain_id:%(target.role.domain_id)s",
"project_admin_matches_target_domain_role": "rule:admin_required and project_domain_id:%(target.role.domain_id)s",
"list_domain_roles": "rule:domain_admin_matches_filter_on_list_domain_roles or rule:project_admin_matches_filter_on_list_domain_roles",
"domain_admin_matches_filter_on_list_domain_roles": "rule:admin_required and domain_id:%(domain_id)s",
"project_admin_matches_filter_on_list_domain_roles": "rule:admin_required and project_domain_id:%(domain_id)s",
"identity:get_implied_role": "rule:cloud_admin",
"identity:list_implied_roles": "rule:cloud_admin",
"identity:create_implied_role": "rule:cloud_admin",
"identity:delete_implied_role": "rule:cloud_admin",
"identity:list_role_inference_rules": "rule:cloud_admin",
"identity:check_implied_role": "rule:cloud_admin",
"domain_admin_for_grants": "rule:admin_required and (domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s)",
"project_admin_for_grants": "rule:admin_required and project_id:%(project_id)s",
"identity:check_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
"identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
"identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
"identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants",
"admin_on_domain_filter" : "rule:admin_required and domain_id:%(scope.domain.id)s",
"admin_on_project_filter" : "rule:admin_required and project_id:%(scope.project.id)s",
"admin_on_domain_of_project_filter" : "rule:admin_required and domain_id:%(target.project.domain_id)s",
"identity:list_role_assignments": "rule:cloud_admin or rule:admin_on_domain_filter or rule:admin_on_project_filter",
"identity:list_role_assignments_for_tree": "rule:cloud_admin or rule:admin_on_domain_of_project_filter",
"identity:get_policy": "rule:cloud_admin",
"identity:list_policies": "rule:cloud_admin",
"identity:create_policy": "rule:cloud_admin",
"identity:update_policy": "rule:cloud_admin",
"identity:delete_policy": "rule:cloud_admin",
"identity:change_password": "rule:owner",
"identity:check_token": "rule:admin_or_owner",
"identity:validate_token": "rule:service_admin_or_owner",
"identity:validate_token_head": "rule:service_or_admin",
"identity:revocation_list": "rule:service_or_admin",
"identity:revoke_token": "rule:admin_or_owner",
"identity:create_trust": "rule:user_id:%(trust.trustor_user_id)s",
"identity:list_trusts": "",
"identity:list_roles_for_trust": "",
"identity:get_role_for_trust": "",
"identity:delete_trust": "",
"identity:create_consumer": "rule:admin_required",
"identity:get_consumer": "rule:admin_required",
"identity:list_consumers": "rule:admin_required",
"identity:delete_consumer": "rule:admin_required",
"identity:update_consumer": "rule:admin_required",
"identity:authorize_request_token": "rule:admin_required",
"identity:list_access_token_roles": "rule:admin_required",
"identity:get_access_token_role": "rule:admin_required",
"identity:list_access_tokens": "rule:admin_required",
"identity:get_access_token": "rule:admin_required",
"identity:delete_access_token": "rule:admin_required",
"identity:list_projects_for_endpoint": "rule:admin_required",
"identity:add_endpoint_to_project": "rule:admin_required",
"identity:check_endpoint_in_project": "rule:admin_required",
"identity:list_endpoints_for_project": "rule:admin_required",
"identity:remove_endpoint_from_project": "rule:admin_required",
"identity:create_endpoint_group": "rule:admin_required",
"identity:list_endpoint_groups": "rule:admin_required",
"identity:get_endpoint_group": "rule:admin_required",
"identity:update_endpoint_group": "rule:admin_required",
"identity:delete_endpoint_group": "rule:admin_required",
"identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
"identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
"identity:get_endpoint_group_in_project": "rule:admin_required",
"identity:list_endpoint_groups_for_project": "rule:admin_required",
"identity:add_endpoint_group_to_project": "rule:admin_required",
"identity:remove_endpoint_group_from_project": "rule:admin_required",
"identity:create_identity_provider": "rule:cloud_admin",
"identity:list_identity_providers": "rule:cloud_admin",
"identity:get_identity_providers": "rule:cloud_admin",
"identity:update_identity_provider": "rule:cloud_admin",
"identity:delete_identity_provider": "rule:cloud_admin",
"identity:create_protocol": "rule:cloud_admin",
"identity:update_protocol": "rule:cloud_admin",
"identity:get_protocol": "rule:cloud_admin",
"identity:list_protocols": "rule:cloud_admin",
"identity:delete_protocol": "rule:cloud_admin",
"identity:create_mapping": "rule:cloud_admin",
"identity:get_mapping": "rule:cloud_admin",
"identity:list_mappings": "rule:cloud_admin",
"identity:delete_mapping": "rule:cloud_admin",
"identity:update_mapping": "rule:cloud_admin",
"identity:create_service_provider": "rule:cloud_admin",
"identity:list_service_providers": "rule:cloud_admin",
"identity:get_service_provider": "rule:cloud_admin",
"identity:update_service_provider": "rule:cloud_admin",
"identity:delete_service_provider": "rule:cloud_admin",
"identity:get_auth_catalog": "",
"identity:get_auth_projects": "",
"identity:get_auth_domains": "",
"identity:list_projects_for_groups": "",
"identity:list_domains_for_groups": "",
"identity:list_revoke_events": "",
"identity:create_policy_association_for_endpoint": "rule:cloud_admin",
"identity:check_policy_association_for_endpoint": "rule:cloud_admin",
"identity:delete_policy_association_for_endpoint": "rule:cloud_admin",
"identity:create_policy_association_for_service": "rule:cloud_admin",
"identity:check_policy_association_for_service": "rule:cloud_admin",
"identity:delete_policy_association_for_service": "rule:cloud_admin",
"identity:create_policy_association_for_region_and_service": "rule:cloud_admin",
"identity:check_policy_association_for_region_and_service": "rule:cloud_admin",
"identity:delete_policy_association_for_region_and_service": "rule:cloud_admin",
"identity:get_policy_for_endpoint": "rule:cloud_admin",
"identity:list_endpoints_for_policy": "rule:cloud_admin",
"identity:create_domain_config": "rule:cloud_admin",
"identity:get_domain_config": "rule:cloud_admin",
"identity:update_domain_config": "rule:cloud_admin",
"identity:delete_domain_config": "rule:cloud_admin",
"identity:get_domain_config_default": "rule:cloud_admin"
}

View File

@ -0,0 +1,16 @@
WSGIScriptAlias {{ webroot }} /usr/share/openstack-dashboard/openstack_dashboard/wsgi.py process-group=horizon
WSGIDaemonProcess horizon user=horizon group=horizon processes={{ processes }} threads=10 display-name=%{GROUP}
WSGIProcessGroup horizon
WSGIApplicationGroup %{GLOBAL}
{% if custom_theme %}
Alias /static/themes/custom /usr/share/openstack-dashboard/openstack_dashboard/themes/custom/static/
{% endif %}
Alias /static /var/lib/openstack-dashboard/static/
Alias /horizon/static /var/lib/openstack-dashboard/static/
<Directory /usr/share/openstack-dashboard/openstack_dashboard>
Require all granted
</Directory>
<Directory /var/lib/openstack-dashboard/static>
Require all granted
</Directory>

View File

@ -66,7 +66,8 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
local, and the rest of the service are from lp branches that are
compatible with the local charm (e.g. stable or next).
"""
this_service = {'name': 'openstack-dashboard'}
this_service = {'name': 'openstack-dashboard',
'constraints': {'mem': '3072M'}}
other_services = [
{'name': 'keystone'},
{'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},
@ -90,16 +91,19 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
"""Configure all of the services."""
horizon_config = {
'debug': 'yes',
'haproxy-server-timeout': 90000,
'haproxy-client-timeout': 90000,
'haproxy-queue-timeout': 9000,
'haproxy-connect-timeout': 9000,
}
keystone_config = {
'admin-password': 'openstack',
'admin-token': 'ubuntutesting',
}
pxc_config = {
'dataset-size': '25%',
'max-connections': 1000,
'root-password': 'ChangeMe123',
'sst-password': 'ChangeMe123',
'tuning-level': 'unsafe',
}
configs = {
'openstack-dashboard': horizon_config,
@ -230,7 +234,7 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
# add retry logic to unwedge the gate. This issue
# should be revisited and root caused properly when time
# allows.
@retry_on_exception(2, base_delay=2)
@retry_on_exception(3, base_delay=30)
def do_request():
response = urllib2.urlopen('http://%s/horizon' % (dashboard_ip))
return response.read()
@ -239,6 +243,9 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
msg = "Dashboard frontpage check failed"
amulet.raise_status(amulet.FAIL, msg=msg)
class FailedAuth(Exception):
pass
def test_401_authenticate(self):
"""Validate that authentication succeeds when client logs in through
the OpenStack Dashboard"""
@ -257,41 +264,62 @@ class OpenstackDashboardBasicDeployment(OpenStackAmuletDeployment):
region = u.get_keystone_endpoint(
self.keystone_sentry.info['public-address'], api_version)
# start session, get csrftoken
client = requests.session()
client.get(url)
response = client.get(url)
if 'csrftoken' in client.cookies:
csrftoken = client.cookies['csrftoken']
# build and send post request
auth = {
'domain': 'admin_domain',
'username': 'admin',
'password': 'openstack',
'csrfmiddlewaretoken': csrftoken,
'next': '/horizon/',
'region': region,
}
if api_version == 2:
del auth['domain']
u.log.debug('POST data: "{}"'.format(auth))
response = client.post(url, data=auth, headers={'Referer': url})
if self._get_openstack_release() == self.trusty_icehouse:
# icehouse horizon does not operate properly without the compute
# service present in the keystone catalog. However, checking for
# presence of the following text is sufficient to determine whether
# authentication succeeded or not
expect = 'ServiceCatalogException at /admin/'
elif self._get_openstack_release() >= self.xenial_queens:
expect = 'API Access - OpenStack Dashboard'
else:
expect = 'Projects - OpenStack Dashboard'
if expect not in response.text:
msg = 'FAILURE code={} text="{}"'.format(response, response.text)
amulet.raise_status(amulet.FAIL, msg=msg)
# NOTE(thedac) Similar to the connection test above we get occasional
# intermittent authentication fails. Wrap in a retry loop.
@retry_on_exception(3, base_delay=30)
def _do_auth_check(expect):
# start session, get csrftoken
client = requests.session()
client.get(url)
if 'csrftoken' in client.cookies:
csrftoken = client.cookies['csrftoken']
else:
raise Exception("Missing csrftoken")
# build and send post request
auth = {
'domain': 'admin_domain',
'username': 'admin',
'password': 'openstack',
'csrfmiddlewaretoken': csrftoken,
'next': '/horizon/',
'region': region,
}
# In the redirect /horizon/project/ is unauthorized.
# Redirect to /horizon/project/api_access/
if self._get_openstack_release() >= self.xenial_queens:
auth['next'] = '/horizon/project/api_access'
if api_version == 2:
del auth['domain']
u.log.debug('POST data: "{}"'.format(auth))
response = client.post(url, data=auth, headers={'Referer': url})
if expect not in response.text:
msg = 'FAILURE code={} text="{}"'.format(response,
response.text)
# NOTE(thedac) amulet.raise_status exits on exception.
# Raise a custom exception.
raise self.FailedAuth(msg)
try:
_do_auth_check(expect)
except self.FailedAuth as e:
amulet.raise_status(amulet.FAIL, e.message)
u.log.debug('OK')

View File

@ -24,7 +24,8 @@ import urlparse
import cinderclient.v1.client as cinder_client
import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client
import glanceclient.v1 as glance_client
import glanceclient.v2 as glance_clientv2
import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client
from keystoneauth1.identity import (
@ -623,7 +624,7 @@ class OpenStackAmuletUtils(AmuletUtils):
ep = keystone.service_catalog.url_for(service_type='image',
interface='adminURL')
if keystone.session:
return glance_client.Client(ep, session=keystone.session)
return glance_clientv2.Client("2", session=keystone.session)
else:
return glance_client.Client(ep, token=keystone.auth_token)
@ -711,10 +712,19 @@ class OpenStackAmuletUtils(AmuletUtils):
f.close()
# Create glance image
with open(local_path) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
if float(glance.version) < 2.0:
with open(local_path) as fimage:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare',
data=fimage)
else:
image = glance.images.create(
name=image_name,
disk_format="qcow2",
visibility="public",
container_format="bare")
glance.images.upload(image.id, open(local_path, 'rb'))
# Wait for image to reach active status
img_id = image.id
@ -729,9 +739,14 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Validating image attributes...')
val_img_name = glance.images.get(img_id).name
val_img_stat = glance.images.get(img_id).status
val_img_pub = glance.images.get(img_id).is_public
val_img_cfmt = glance.images.get(img_id).container_format
val_img_dfmt = glance.images.get(img_id).disk_format
if float(glance.version) < 2.0:
val_img_pub = glance.images.get(img_id).is_public
else:
val_img_pub = glance.images.get(img_id).visibility == "public"
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
'container fmt:{} disk fmt:{}'.format(
val_img_name, val_img_pub, img_id,

View File

@ -201,11 +201,35 @@ def remote_unit():
return os.environ.get('JUJU_REMOTE_UNIT', None)
def service_name():
"""The name service group this unit belongs to"""
def application_name():
"""
The name of the deployed application this unit belongs to.
"""
return local_unit().split('/')[0]
def service_name():
"""
.. deprecated:: 0.19.1
Alias for :func:`application_name`.
"""
return application_name()
def model_name():
"""
Name of the model that this unit is deployed in.
"""
return os.environ['JUJU_MODEL_NAME']
def model_uuid():
"""
UUID of the model that this unit is deployed in.
"""
return os.environ['JUJU_MODEL_UUID']
def principal_unit():
"""Returns the principal unit of this unit, otherwise None"""
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
@ -1297,3 +1321,33 @@ def egress_subnets(rid=None, unit=None):
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen
def unit_doomed(unit=None):
"""Determines if the unit is being removed from the model
Requires Juju 2.4.1.
:param unit: string unit name, defaults to local_unit
:side effect: calls goal_state
:side effect: calls local_unit
:side effect: calls has_juju_version
:return: True if the unit is being removed, already gone, or never existed
"""
if not has_juju_version("2.4.1"):
# We cannot risk blindly returning False for 'we don't know',
# because that could cause data loss; if call sites don't
# need an accurate answer, they likely don't need this helper
# at all.
# goal-state existed in 2.4.0, but did not handle removals
# correctly until 2.4.1.
raise NotImplementedError("is_doomed")
if unit is None:
unit = local_unit()
gs = goal_state()
units = gs.get('units', {})
if unit not in units:
return True
# I don't think 'dead' units ever show up in the goal-state, but
# check anyway in addition to 'dying'.
return units[unit]['status'] in ('dying', 'dead')