Remove deploy from source support

Drop support for deployment from Git repositories, as deprecated
in the 17.02 charm release.  This feature is unmaintained and has
no known users.

Change-Id: I0abe07721bedfd8b80c7c590bc646abfc822bbfa
This commit is contained in:
James Page 2018-01-10 14:08:56 +00:00
parent 72f88dcab0
commit 4da9fdd67c
25 changed files with 361 additions and 2399 deletions

View File

@ -1,5 +1,3 @@
git-reinstall:
description: Reinstall quantum-gateway from the openstack-origin-git repositories.
openstack-upgrade:
description: Perform openstack upgrades. Config option action-managed-upgrade must be set to True.
pause:

View File

@ -1 +0,0 @@
git_reinstall.py

View File

@ -1,45 +0,0 @@
#!/usr/bin/env python3
import sys
import traceback
sys.path.append('hooks/')
from charmhelpers.contrib.openstack.utils import (
git_install_requested,
)
from charmhelpers.core.hookenv import (
action_set,
action_fail,
config,
)
from neutron_utils import (
git_install,
)
from neutron_hooks import (
config_changed,
)
def git_reinstall():
"""Reinstall from source and restart services.
If the openstack-origin-git config option was used to install openstack
from source git repositories, then this action can be used to reinstall
from updated git repositories, followed by a restart of services."""
if not git_install_requested():
action_fail('openstack-origin-git is not configured')
return
try:
git_install(config('openstack-origin-git'))
config_changed()
except:
action_set({'traceback': traceback.format_exc()})
action_fail('git-reinstall resulted in an unexpected error')
if __name__ == '__main__':
git_reinstall()

View File

@ -31,40 +31,6 @@ options:
NOTE: updating this setting to a source that is known to provide
a later version of OpenStack will trigger a software upgrade unless
action-managed-upgrade is set to True.
openstack-origin-git:
type: string
default:
description: |
Specifies a default OpenStack release name, or a YAML dictionary
listing the git repositories to install from.
.
The default Openstack release name may be one of the following, where
the corresponding OpenStack github branch will be used:
* liberty
* mitaka
* newton
* master
.
The YAML must minimally include requirements, neutron-fwaas,
neutron-lbaas, neutron-vpnaas, and neutron repositories, and may
also include repositories for other dependencies:
repositories:
- {name: requirements,
repository: 'git://github.com/openstack/requirements',
branch: master}
- {name: neutron-fwaas,
repository: 'git://github.com/openstack/neutron-fwaas',
branch: master}
- {name: neutron-lbaas,
repository: 'git://github.com/openstack/neutron-lbaas',
branch: master}
- {name: neutron-vpnaas,
repository: 'git://github.com/openstack/neutron-vpnaas',
branch: master}
- {name: neutron,
repository: 'git://github.com/openstack/neutron',
branch: master}
release: master
action-managed-upgrade:
type: boolean
default: False

View File

@ -858,9 +858,12 @@ class OpenStackAmuletUtils(AmuletUtils):
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
pool_name = df['pools'][pool_id]['name']
obj_count = df['pools'][pool_id]['stats']['objects']
kb_used = df['pools'][pool_id]['stats']['kb_used']
for pool in df['pools']:
if pool['id'] == pool_id:
pool_name = pool['name']
obj_count = pool['stats']['objects']
kb_used = pool['stats']['kb_used']
self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))

View File

@ -97,10 +97,10 @@ from charmhelpers.contrib.network.ip import (
from charmhelpers.contrib.openstack.utils import (
config_flags_parser,
get_host_ip,
git_determine_usr_bin,
git_determine_python_path,
enable_memcache,
snap_install_requested,
CompareOpenStackReleases,
os_release,
)
from charmhelpers.core.unitdata import kv
@ -332,10 +332,7 @@ class IdentityServiceContext(OSContextGenerator):
self.rel_name = rel_name
self.interfaces = [self.rel_name]
def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {}
def _setup_pki_cache(self):
if self.service and self.service_user:
# This is required for pki token signing if we don't want /tmp to
# be used.
@ -345,6 +342,15 @@ class IdentityServiceContext(OSContextGenerator):
mkdir(path=cachedir, owner=self.service_user,
group=self.service_user, perms=0o700)
return cachedir
return None
def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {}
cachedir = self._setup_pki_cache()
if cachedir:
ctxt['signing_dir'] = cachedir
for rid in relation_ids(self.rel_name):
@ -383,6 +389,62 @@ class IdentityServiceContext(OSContextGenerator):
return {}
class IdentityCredentialsContext(IdentityServiceContext):
'''Context for identity-credentials interface type'''
def __init__(self,
service=None,
service_user=None,
rel_name='identity-credentials'):
super(IdentityCredentialsContext, self).__init__(service,
service_user,
rel_name)
def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {}
cachedir = self._setup_pki_cache()
if cachedir:
ctxt['signing_dir'] = cachedir
for rid in relation_ids(self.rel_name):
self.related = True
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
credentials_host = rdata.get('credentials_host')
credentials_host = (
format_ipv6_addr(credentials_host) or credentials_host
)
auth_host = rdata.get('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host
svc_protocol = rdata.get('credentials_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({
'service_port': rdata.get('credentials_port'),
'service_host': credentials_host,
'auth_host': auth_host,
'auth_port': rdata.get('auth_port'),
'admin_tenant_name': rdata.get('credentials_project'),
'admin_tenant_id': rdata.get('credentials_project_id'),
'admin_user': rdata.get('credentials_username'),
'admin_password': rdata.get('credentials_password'),
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol,
'api_version': api_version
})
if float(api_version) > 2:
ctxt.update({'admin_domain_name':
rdata.get('domain')})
if self.context_complete(ctxt):
return ctxt
return {}
class AMQPContext(OSContextGenerator):
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
@ -1321,8 +1383,6 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
"usr_bin": git_determine_usr_bin(),
"python_path": git_determine_python_path(),
}
return ctxt
@ -1566,8 +1626,18 @@ class InternalEndpointContext(OSContextGenerator):
endpoints by default so this allows admins to optionally use internal
endpoints.
"""
def __init__(self, ost_rel_check_pkg_name):
self.ost_rel_check_pkg_name = ost_rel_check_pkg_name
def __call__(self):
return {'use_internal_endpoints': config('use-internal-endpoints')}
ctxt = {'use_internal_endpoints': config('use-internal-endpoints')}
rel = os_release(self.ost_rel_check_pkg_name, base='icehouse')
if CompareOpenStackReleases(rel) >= 'pike':
ctxt['volume_api_version'] = '3'
else:
ctxt['volume_api_version'] = '2'
return ctxt
class AppArmorContext(OSContextGenerator):

View File

@ -17,22 +17,22 @@ defaults
{%- if haproxy_queue_timeout %}
timeout queue {{ haproxy_queue_timeout }}
{%- else %}
timeout queue 5000
timeout queue 9000
{%- endif %}
{%- if haproxy_connect_timeout %}
timeout connect {{ haproxy_connect_timeout }}
{%- else %}
timeout connect 5000
timeout connect 9000
{%- endif %}
{%- if haproxy_client_timeout %}
timeout client {{ haproxy_client_timeout }}
{%- else %}
timeout client 30000
timeout client 90000
{%- endif %}
{%- if haproxy_server_timeout %}
timeout server {{ haproxy_server_timeout }}
{%- else %}
timeout server 30000
timeout server 90000
{%- endif %}
listen stats

View File

@ -23,7 +23,6 @@ import sys
import re
import itertools
import functools
import shutil
import six
import traceback
@ -47,7 +46,6 @@ from charmhelpers.core.hookenv import (
related_units,
relation_ids,
relation_set,
service_name,
status_set,
hook_name,
application_version_set,
@ -68,11 +66,6 @@ from charmhelpers.contrib.network.ip import (
port_has_listener,
)
from charmhelpers.contrib.python.packages import (
pip_create_virtualenv,
pip_install,
)
from charmhelpers.core.host import (
lsb_release,
mounts,
@ -84,7 +77,6 @@ from charmhelpers.core.host import (
)
from charmhelpers.fetch import (
apt_cache,
install_remote,
import_key as fetch_import_key,
add_source as fetch_add_source,
SourceConfigError,
@ -278,27 +270,6 @@ PACKAGE_CODENAMES = {
]),
}
GIT_DEFAULT_REPOS = {
'requirements': 'git://github.com/openstack/requirements',
'cinder': 'git://github.com/openstack/cinder',
'glance': 'git://github.com/openstack/glance',
'horizon': 'git://github.com/openstack/horizon',
'keystone': 'git://github.com/openstack/keystone',
'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
'neutron': 'git://github.com/openstack/neutron',
'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
'nova': 'git://github.com/openstack/nova',
}
GIT_DEFAULT_BRANCHES = {
'liberty': 'stable/liberty',
'mitaka': 'stable/mitaka',
'newton': 'stable/newton',
'master': 'master',
}
DEFAULT_LOOPBACK_SIZE = '5G'
@ -392,6 +363,8 @@ def get_swift_codename(version):
releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
if six.PY3:
ret = ret.decode('UTF-8')
if codename in ret or release[0] in ret:
return codename
elif len(codenames) == 1:
@ -528,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False):
if _os_rel:
return _os_rel
_os_rel = (
git_os_codename_install_source(config('openstack-origin-git')) or
get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
@ -769,417 +741,6 @@ def os_requires_version(ostack_release, pkg):
return wrap
def git_install_requested():
"""
Returns true if openstack-origin-git is specified.
"""
return config('openstack-origin-git') is not None
def git_os_codename_install_source(projects_yaml):
"""
Returns OpenStack codename of release being installed from source.
"""
if git_install_requested():
projects = _git_yaml_load(projects_yaml)
if projects in GIT_DEFAULT_BRANCHES.keys():
if projects == 'master':
return 'ocata'
return projects
if 'release' in projects:
if projects['release'] == 'master':
return 'ocata'
return projects['release']
return None
def git_default_repos(projects_yaml):
"""
Returns default repos if a default openstack-origin-git value is specified.
"""
service = service_name()
core_project = service
for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
if projects_yaml == default:
# add the requirements repo first
repo = {
'name': 'requirements',
'repository': GIT_DEFAULT_REPOS['requirements'],
'branch': branch,
}
repos = [repo]
# neutron-* and nova-* charms require some additional repos
if service in ['neutron-api', 'neutron-gateway',
'neutron-openvswitch']:
core_project = 'neutron'
if service == 'neutron-api':
repo = {
'name': 'networking-hyperv',
'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
'branch': branch,
}
repos.append(repo)
for project in ['neutron-fwaas', 'neutron-lbaas',
'neutron-vpnaas', 'nova']:
repo = {
'name': project,
'repository': GIT_DEFAULT_REPOS[project],
'branch': branch,
}
repos.append(repo)
elif service in ['nova-cloud-controller', 'nova-compute']:
core_project = 'nova'
repo = {
'name': 'neutron',
'repository': GIT_DEFAULT_REPOS['neutron'],
'branch': branch,
}
repos.append(repo)
elif service == 'openstack-dashboard':
core_project = 'horizon'
# finally add the current service's core project repo
repo = {
'name': core_project,
'repository': GIT_DEFAULT_REPOS[core_project],
'branch': branch,
}
repos.append(repo)
return yaml.dump(dict(repositories=repos, release=default))
return projects_yaml
def _git_yaml_load(projects_yaml):
"""
Load the specified yaml into a dictionary.
"""
if not projects_yaml:
return None
return yaml.load(projects_yaml)
requirements_dir = None
def git_clone_and_install(projects_yaml, core_project):
"""
Clone/install all specified OpenStack repositories.
The expected format of projects_yaml is:
repositories:
- {name: keystone,
repository: 'git://git.openstack.org/openstack/keystone.git',
branch: 'stable/icehouse'}
- {name: requirements,
repository: 'git://git.openstack.org/openstack/requirements.git',
branch: 'stable/icehouse'}
directory: /mnt/openstack-git
http_proxy: squid-proxy-url
https_proxy: squid-proxy-url
The directory, http_proxy, and https_proxy keys are optional.
"""
global requirements_dir
parent_dir = '/mnt/openstack-git'
http_proxy = None
projects = _git_yaml_load(projects_yaml)
_git_validate_projects_yaml(projects, core_project)
old_environ = dict(os.environ)
if 'http_proxy' in projects.keys():
http_proxy = projects['http_proxy']
os.environ['http_proxy'] = projects['http_proxy']
if 'https_proxy' in projects.keys():
os.environ['https_proxy'] = projects['https_proxy']
if 'directory' in projects.keys():
parent_dir = projects['directory']
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
# Upgrade setuptools and pip from default virtualenv versions. The default
# versions in trusty break master OpenStack branch deployments.
for p in ['pip', 'setuptools']:
pip_install(p, upgrade=True, proxy=http_proxy,
venv=os.path.join(parent_dir, 'venv'))
constraints = None
for p in projects['repositories']:
repo = p['repository']
branch = p['branch']
depth = '1'
if 'depth' in p.keys():
depth = p['depth']
if p['name'] == 'requirements':
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=False)
requirements_dir = repo_dir
constraints = os.path.join(repo_dir, "upper-constraints.txt")
# upper-constraints didn't exist until after icehouse
if not os.path.isfile(constraints):
constraints = None
# use constraints unless project yaml sets use_constraints to false
if 'use_constraints' in projects.keys():
if not projects['use_constraints']:
constraints = None
else:
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=True,
constraints=constraints)
os.environ = old_environ
def _git_validate_projects_yaml(projects, core_project):
"""
Validate the projects yaml.
"""
_git_ensure_key_exists('repositories', projects)
for project in projects['repositories']:
_git_ensure_key_exists('name', project.keys())
_git_ensure_key_exists('repository', project.keys())
_git_ensure_key_exists('branch', project.keys())
if projects['repositories'][0]['name'] != 'requirements':
error_out('{} git repo must be specified first'.format('requirements'))
if projects['repositories'][-1]['name'] != core_project:
error_out('{} git repo must be specified last'.format(core_project))
_git_ensure_key_exists('release', projects)
def _git_ensure_key_exists(key, keys):
"""
Ensure that key exists in keys.
"""
if key not in keys:
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
update_requirements, constraints=None):
"""
Clone and install a single git repository.
"""
if not os.path.exists(parent_dir):
juju_log('Directory already exists at {}. '
'No need to create directory.'.format(parent_dir))
os.mkdir(parent_dir)
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(
repo, dest=parent_dir, branch=branch, depth=depth)
venv = os.path.join(parent_dir, 'venv')
if update_requirements:
if not requirements_dir:
error_out('requirements repo must be cloned before '
'updating from global requirements.')
_git_update_requirements(venv, repo_dir, requirements_dir)
juju_log('Installing git repo from dir: {}'.format(repo_dir))
if http_proxy:
pip_install(repo_dir, proxy=http_proxy, venv=venv,
constraints=constraints)
else:
pip_install(repo_dir, venv=venv, constraints=constraints)
return repo_dir
def _git_update_requirements(venv, package_dir, reqs_dir):
"""
Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt.
"""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
python = os.path.join(venv, 'bin/python')
cmd = [python, 'update.py', package_dir]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from "
"global-requirements.txt".format(package))
os.chdir(orig_dir)
def git_pip_venv_dir(projects_yaml):
"""
Return the pip virtualenv path.
"""
parent_dir = '/mnt/openstack-git'
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
return os.path.join(parent_dir, 'venv')
def git_src_dir(projects_yaml, project):
"""
Return the directory where the specified project's source is located.
"""
parent_dir = '/mnt/openstack-git'
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
for p in projects['repositories']:
if p['name'] == project:
return os.path.join(parent_dir, os.path.basename(p['repository']))
return None
def git_yaml_value(projects_yaml, key):
"""
Return the value in projects_yaml for the specified key.
"""
projects = _git_yaml_load(projects_yaml)
if key in projects.keys():
return projects[key]
return None
def git_generate_systemd_init_files(templates_dir):
"""
Generate systemd init files.
Generates and installs systemd init units and script files based on the
*.init.in files contained in the templates_dir directory.
This code is based on the openstack-pkg-tools package and its init
script generation, which is used by the OpenStack packages.
"""
for f in os.listdir(templates_dir):
# Create the init script and systemd unit file from the template
if f.endswith(".init.in"):
init_in_file = f
init_file = f[:-8]
service_file = "{}.service".format(init_file)
init_in_source = os.path.join(templates_dir, init_in_file)
init_source = os.path.join(templates_dir, init_file)
service_source = os.path.join(templates_dir, service_file)
init_dest = os.path.join('/etc/init.d', init_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
shutil.copyfile(init_in_source, init_source)
with open(init_source, 'a') as outfile:
template = ('/usr/share/openstack-pkg-tools/'
'init-script-template')
with open(template) as infile:
outfile.write('\n\n{}'.format(infile.read()))
cmd = ['pkgos-gen-systemd-unit', init_in_source]
subprocess.check_call(cmd)
if os.path.exists(init_dest):
os.remove(init_dest)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(init_source, init_dest)
shutil.copyfile(service_source, service_dest)
os.chmod(init_dest, 0o755)
for f in os.listdir(templates_dir):
# If there's a service.in file, use it instead of the generated one
if f.endswith(".service.in"):
service_in_file = f
service_file = f[:-3]
service_in_source = os.path.join(templates_dir, service_in_file)
service_source = os.path.join(templates_dir, service_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
shutil.copyfile(service_in_source, service_source)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(service_source, service_dest)
for f in os.listdir(templates_dir):
# Generate the systemd unit if there's no existing .service.in
if f.endswith(".init.in"):
init_in_file = f
init_file = f[:-8]
service_in_file = "{}.service.in".format(init_file)
service_file = "{}.service".format(init_file)
init_in_source = os.path.join(templates_dir, init_in_file)
service_in_source = os.path.join(templates_dir, service_in_file)
service_source = os.path.join(templates_dir, service_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
if not os.path.exists(service_in_source):
cmd = ['pkgos-gen-systemd-unit', init_in_source]
subprocess.check_call(cmd)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(service_source, service_dest)
def git_determine_usr_bin():
"""Return the /usr/bin path for Apache2 config.
The /usr/bin path will be located in the virtualenv if the charm
is configured to deploy from source.
"""
if git_install_requested():
projects_yaml = config('openstack-origin-git')
projects_yaml = git_default_repos(projects_yaml)
return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
else:
return '/usr/bin'
def git_determine_python_path():
"""Return the python-path for Apache2 config.
Returns 'None' unless the charm is configured to deploy from source,
in which case the path of the virtualenv's site-packages is returned.
"""
if git_install_requested():
projects_yaml = config('openstack-origin-git')
projects_yaml = git_default_repos(projects_yaml)
return os.path.join(git_pip_venv_dir(projects_yaml),
'lib/python2.7/site-packages')
else:
return None
def os_workload_status(configs, required_interfaces, charm_func=None):
"""
Decorator to set workload status based on complete contexts
@ -1613,27 +1174,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
"""
ret = False
if git_install_requested():
action_set({'outcome': 'installed from source, skipped upgrade.'})
else:
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'no upgrade available.'})
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
else:
action_set({'outcome': 'no upgrade available.'})
return ret
@ -2043,14 +1601,25 @@ def token_cache_pkgs(source=None, release=None):
def update_json_file(filename, items):
"""Updates the json `filename` with a given dict.
:param filename: json filename (i.e.: /etc/glance/policy.json)
:param filename: path to json file (e.g. /etc/glance/policy.json)
:param items: dict of items to update
"""
if not items:
return
with open(filename) as fd:
policy = json.load(fd)
# Compare before and after and if nothing has changed don't write the file
# since that could cause unnecessary service restarts.
before = json.dumps(policy, indent=4, sort_keys=True)
policy.update(items)
after = json.dumps(policy, indent=4, sort_keys=True)
if before == after:
return
with open(filename, "w") as fd:
fd.write(json.dumps(policy, indent=4))
fd.write(after)
@cached

View File

@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
assert isinstance(valid_range, list), \
"valid_range must be a list, was given {}".format(valid_range)
# If we're dealing with strings
if valid_type is six.string_types:
if isinstance(value, six.string_types):
assert value in valid_range, \
"{} is not in the list {}".format(value, valid_range)
# Integer, float should have a min and max
@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value):
:param value:
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
str(value).lower()]
try:
check_call(cmd)
except CalledProcessError:
@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param durability_estimator: int
:return: None. Can raise CalledProcessError
"""
version = ceph_version()
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
'ruleset_failure_domain=' + failure_domain]
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
]
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
# failure_domain changed in luminous
if version and version >= '12.0.0':
cmd.append('crush-failure-domain=' + failure_domain)
else:
cmd.append('ruleset-failure-domain=' + failure_domain)
# Add plugin specific information
if locality is not None:
# For local erasure codes
@ -1064,14 +1073,24 @@ class CephBrokerRq(object):
self.ops = []
def add_op_request_access_to_group(self, name, namespace=None,
permission=None, key_name=None):
permission=None, key_name=None,
object_prefix_permissions=None):
"""
Adds the requested permissions to the current service's Ceph key,
allowing the key to access only the specified pools
allowing the key to access only the specified pools or
object prefixes. object_prefix_permissions should be a dictionary
keyed on the permission with the corresponding value being a list
of prefixes to apply that permission to.
{
'rwx': ['prefix1', 'prefix2'],
'class-read': ['prefix3']}
"""
self.ops.append({'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace, 'name': key_name or service_name(),
'group-permission': permission})
self.ops.append({
'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace,
'name': key_name or service_name(),
'group-permission': permission,
'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None):
@ -1107,7 +1126,10 @@ class CephBrokerRq(object):
def _ops_equal(self, other):
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
for key in [
'replicas', 'name', 'op', 'pg_num', 'weight',
'group', 'group-namespace', 'group-permission',
'object-prefix-permissions']:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from subprocess import (
CalledProcessError,
check_call,
@ -101,3 +102,52 @@ def create_lvm_volume_group(volume_group, block_device):
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])
def list_logical_volumes(select_criteria=None, path_mode=False):
'''
List logical volumes
:param select_criteria: str: Limit list to those volumes matching this
criteria (see 'lvs -S help' for more details)
:param path_mode: bool: return logical volume name in 'vg/lv' format, this
format is required for some commands like lvextend
:returns: [str]: List of logical volumes
'''
lv_diplay_attr = 'lv_name'
if path_mode:
# Parsing output logic relies on the column order
lv_diplay_attr = 'vg_name,' + lv_diplay_attr
cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
if select_criteria:
cmd.extend(['--select', select_criteria])
lvs = []
for lv in check_output(cmd).decode('UTF-8').splitlines():
if not lv:
continue
if path_mode:
lvs.append('/'.join(lv.strip().split()))
else:
lvs.append(lv.strip())
return lvs
list_thin_logical_volume_pools = functools.partial(
list_logical_volumes,
select_criteria='lv_attr =~ ^t')
list_thin_logical_volumes = functools.partial(
list_logical_volumes,
select_criteria='lv_attr =~ ^V')
def extend_logical_volume_by_device(lv_name, block_device):
'''
Extends the size of logical volume lv_name by the amount of free space on
physical volume block_device.
:param lv_name: str: name of logical volume to be extended (vg/lv format)
:param block_device: str: name of block_device to be allocated to lv_name
'''
cmd = ['lvextend', lv_name, block_device]
check_call(cmd)

View File

@ -20,6 +20,7 @@ UBUNTU_RELEASES = (
'yakkety',
'zesty',
'artful',
'bionic',
)

View File

@ -30,7 +30,6 @@ from charmhelpers.contrib.hahelpers.apache import(
install_ca_cert
)
from charmhelpers.contrib.openstack.utils import (
config_value_changed,
configure_installation_source,
openstack_upgrade_available,
pausable_restart_on_change as restart_on_change,
@ -51,8 +50,6 @@ from neutron_utils import (
do_openstack_upgrade,
get_packages,
get_early_packages,
git_install,
git_install_requested,
valid_plugin,
configure_ovs,
stop_services,
@ -91,8 +88,6 @@ def install():
fatal=True)
apt_install(filter_installed_packages(get_packages()),
fatal=True)
status_set('maintenance', 'Git install')
git_install(config('openstack-origin-git'))
else:
message = 'Please provide a valid plugin config'
log(message, level=ERROR)
@ -112,13 +107,7 @@ def install():
@harden()
def config_changed():
global CONFIGS
if git_install_requested():
if config_value_changed('openstack-origin-git'):
status_set('maintenance', 'Running Git install')
git_install(config('openstack-origin-git'))
CONFIGS.write_all()
elif not config('action-managed-upgrade'):
if not config('action-managed-upgrade'):
if openstack_upgrade_available(NEUTRON_COMMON):
status_set('maintenance', 'Running openstack upgrade')
do_openstack_upgrade(CONFIGS)
@ -144,12 +133,11 @@ def config_changed():
status_set('blocked', message)
sys.exit(1)
if config('plugin') == 'n1kv':
if not git_install_requested():
if config('enable-l3-agent'):
status_set('maintenance', 'Installing apt packages')
apt_install(filter_installed_packages('neutron-l3-agent'))
else:
apt_purge('neutron-l3-agent')
if config('enable-l3-agent'):
status_set('maintenance', 'Installing apt packages')
apt_install(filter_installed_packages('neutron-l3-agent'))
else:
apt_purge('neutron-l3-agent')
# Setup legacy ha configurations
update_legacy_ha_files()

View File

@ -3,21 +3,15 @@ import shutil
import subprocess
from shutil import copy2
from charmhelpers.core.host import (
adduser,
add_group,
add_user_to_group,
lsb_release,
mkdir,
service,
service_running,
service_stop,
service_restart,
write_file,
init_is_systemd,
CompareHostReleases,
)
from charmhelpers.core.hookenv import (
charm_dir,
log,
DEBUG,
INFO,
@ -26,7 +20,6 @@ from charmhelpers.core.hookenv import (
is_relation_made,
relation_ids,
)
from charmhelpers.core.templating import render
from charmhelpers.fetch import (
apt_upgrade,
apt_update,
@ -43,12 +36,6 @@ from charmhelpers.contrib.hahelpers.cluster import (
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
get_os_codename_install_source,
git_clone_and_install,
git_default_repos,
git_generate_systemd_init_files,
git_install_requested,
git_pip_venv_dir,
git_src_dir,
make_assess_status_func,
os_release,
pause_unit,
@ -206,45 +193,6 @@ LEGACY_FILES_MAP = {
LEGACY_RES_MAP = ['res_monitor']
L3HA_PACKAGES = ['keepalived', 'conntrack']
BASE_GIT_PACKAGES = [
'arping',
'dnsmasq',
'libffi-dev',
'libssl-dev',
'libxml2-dev',
'libxslt1-dev',
'libyaml-dev',
'openstack-pkg-tools',
'python-dev',
'python-pip',
'python-setuptools',
'zlib1g-dev',
]
# ubuntu packages that should not be installed when deploying from git
GIT_PACKAGE_BLACKLIST = [
'nova-api-metadata',
'neutron-common',
'neutron-dhcp-agent',
'neutron-l3-agent',
'neutron-lbaas-agent',
'neutron-metadata-agent',
'neutron-metering-agent',
'neutron-plugin-cisco',
'neutron-plugin-metering-agent',
'neutron-plugin-openvswitch-agent',
'neutron-openvswitch-agent',
'neutron-vpn-agent',
'python-neutron-fwaas',
'python-oslo.config',
'python-pymysql',
'quantum-common',
'quantum-dhcp-agent',
'quantum-l3-agent',
'quantum-metadata-agent',
'quantum-plugin-openvswitch-agent',
]
# The interface is said to be satisfied if anyone of the interfaces in the
# list has a complete context.
REQUIRED_INTERFACES = {
@ -297,14 +245,6 @@ def get_packages():
packages.append('neutron-lbaasv2-agent')
packages.extend(determine_l3ha_packages())
if git_install_requested():
packages = list(set(packages))
packages.extend(BASE_GIT_PACKAGES)
# don't include packages that will be installed from git
for p in GIT_PACKAGE_BLACKLIST:
if p in packages:
packages.remove(p)
return packages
@ -886,453 +826,6 @@ def cleanup_ovs_netns():
log('Faild to cleanup ovs and netns, %s' % e, level=ERROR)
def git_install(projects_yaml):
"""Perform setup, and install git repos specified in yaml parameter."""
if git_install_requested():
git_pre_install()
projects_yaml = git_default_repos(projects_yaml)
git_clone_and_install(projects_yaml, core_project='neutron')
git_post_install(projects_yaml)
def git_pre_install():
"""Perform pre-install setup."""
dirs = [
'/etc/neutron',
'/etc/neutron/rootwrap.d',
'/etc/neutron/plugins',
'/etc/nova',
'/var/lib/neutron',
'/var/lib/neutron/lock',
'/var/log/neutron',
'/var/lib/nova',
'/var/log/nova',
]
logs = [
'/var/log/neutron/bigswitch-agent.log',
'/var/log/neutron/dhcp-agent.log',
'/var/log/neutron/l3-agent.log',
'/var/log/neutron/lbaas-agent.log',
'/var/log/neutron/ibm-agent.log',
'/var/log/neutron/linuxbridge-agent.log',
'/var/log/neutron/metadata-agent.log',
'/var/log/neutron/metering_agent.log',
'/var/log/neutron/mlnx-agent.log',
'/var/log/neutron/nec-agent.log',
'/var/log/neutron/nvsd-agent.log',
'/var/log/neutron/openflow-agent.log',
'/var/log/neutron/openvswitch-agent.log',
'/var/log/neutron/ovs-cleanup.log',
'/var/log/neutron/ryu-agent.log',
'/var/log/neutron/server.log',
'/var/log/neutron/sriov-agent.log',
'/var/log/neutron/vpn_agent.log',
]
adduser('neutron', shell='/bin/bash', system_user=True)
add_group('neutron', system_group=True)
add_user_to_group('neutron', 'neutron')
adduser('nova', shell='/bin/bash', system_user=True)
subprocess.check_call(['usermod', '--home', '/var/lib/nova', 'nova'])
add_group('nova', system_group=True)
add_user_to_group('nova', 'nova')
for d in dirs:
mkdir(d, owner='neutron', group='neutron', perms=0o755, force=False)
for l in logs:
write_file(l, '', owner='neutron', group='neutron', perms=0o644)
def git_post_install(projects_yaml):
"""Perform post-install setup."""
etc_neutron = os.path.join(git_src_dir(projects_yaml, 'neutron'), 'etc')
etc_nova = os.path.join(git_src_dir(projects_yaml, 'nova'), 'etc/nova')
configs = [
{'src': etc_neutron,
'dest': '/etc/neutron'},
{'src': os.path.join(etc_neutron, 'neutron/plugins'),
'dest': '/etc/neutron/plugins'},
{'src': os.path.join(etc_neutron, 'neutron/rootwrap.d'),
'dest': '/etc/neutron/rootwrap.d'},
{'src': etc_nova,
'dest': '/etc/nova'},
{'src': os.path.join(etc_nova, 'rootwrap.d'),
'dest': '/etc/nova/rootwrap.d'},
]
for c in configs:
if os.path.exists(c['dest']):
shutil.rmtree(c['dest'])
shutil.copytree(c['src'], c['dest'])
# NOTE(coreycb): Need to find better solution than bin symlinks.
symlinks = [
{'src': os.path.join(git_pip_venv_dir(projects_yaml),
'bin/neutron-ns-metadata-proxy'),
'link': '/usr/local/bin/neutron-ns-metadata-proxy'},
{'src': os.path.join(git_pip_venv_dir(projects_yaml),
'bin/neutron-rootwrap'),
'link': '/usr/local/bin/neutron-rootwrap'},
{'src': '/usr/local/bin/neutron-rootwrap',
'link': '/usr/bin/neutron-rootwrap'},
{'src': os.path.join(git_pip_venv_dir(projects_yaml),
'bin/nova-rootwrap'),
'link': '/usr/local/bin/nova-rootwrap'},
{'src': os.path.join(git_pip_venv_dir(projects_yaml),
'bin/nova-rootwrap-daemon'),
'link': '/usr/local/bin/nova-rootwrap-daemon'},
]
for s in symlinks:
if os.path.lexists(s['link']):
os.remove(s['link'])
os.symlink(s['src'], s['link'])
render('git/neutron_sudoers',
'/etc/sudoers.d/neutron_sudoers', {}, perms=0o440)
render('git/nova_sudoers',
'/etc/sudoers.d/nova_sudoers', {}, perms=0o440)
render('git/cron.d/neutron-dhcp-agent-netns-cleanup',
'/etc/cron.d/neutron-dhcp-agent-netns-cleanup', {}, perms=0o755)
render('git/cron.d/neutron-l3-agent-netns-cleanup',
'/etc/cron.d/neutron-l3-agent-netns-cleanup', {}, perms=0o755)
render('git/cron.d/neutron-lbaas-agent-netns-cleanup',
'/etc/cron.d/neutron-lbaas-agent-netns-cleanup', {}, perms=0o755)
bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
cmp_host_release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
cmp_os_release = CompareOpenStackReleases(os_release('neutron-common'))
# Use systemd init units/scripts from ubuntu wily onward
if cmp_host_release >= 'wily':
templates_dir = os.path.join(charm_dir(), 'templates/git')
daemons = ['neutron-dhcp-agent', 'neutron-l3-agent',
'neutron-lbaasv2-agent',
'neutron-linuxbridge-agent', 'neutron-linuxbridge-cleanup',
'neutron-macvtap-agent', 'neutron-metadata-agent',
'neutron-metering-agent', 'neutron-openvswitch-agent',
'neutron-ovs-cleanup', 'neutron-server',
'neutron-sriov-nic-agent', 'neutron-vpn-agent',
'nova-api-metadata']
if cmp_os_release <= 'mitaka':
daemons.append('neutron-lbaas-agent')
for daemon in daemons:
neutron_context = {
'daemon_path': os.path.join(bin_dir, daemon),
}
filename = daemon
if daemon == 'neutron-sriov-nic-agent':
filename = 'neutron-sriov-agent'
elif daemon == 'neutron-openvswitch-agent':
if cmp_os_release < 'mitaka':
filename = 'neutron-plugin-openvswitch-agent'
template_file = 'git/{}.init.in.template'.format(filename)
init_in_file = '{}.init.in'.format(filename)
render(template_file, os.path.join(templates_dir, init_in_file),
neutron_context, perms=0o644)
git_generate_systemd_init_files(templates_dir)
for daemon in daemons:
filename = daemon
if daemon == 'neutron-openvswitch-agent':
if cmp_os_release < 'mitaka':
filename = 'neutron-plugin-openvswitch-agent'
service('enable', filename)
else:
service_name = 'quantum-gateway'
user_name = 'neutron'
neutron_api_context = {
'service_description': 'Neutron API server',
'service_name': service_name,
'process_name': 'neutron-server',
'executable_name': os.path.join(bin_dir, 'neutron-server'),
}
neutron_dhcp_agent_context = {
'service_description': 'Neutron DHCP Agent',
'service_name': service_name,
'process_name': 'neutron-dhcp-agent',
'executable_name': os.path.join(bin_dir, 'neutron-dhcp-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/dhcp_agent.ini'],
'log_file': '/var/log/neutron/dhcp-agent.log',
}
neutron_l3_agent_context = {
'service_description': 'Neutron L3 Agent',
'service_name': service_name,
'process_name': 'neutron-l3-agent',
'executable_name': os.path.join(bin_dir, 'neutron-l3-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/l3_agent.ini',
'/etc/neutron/fwaas_driver.ini'],
'log_file': '/var/log/neutron/l3-agent.log',
}
neutron_lbaas_agent_context = {
'service_description': 'Neutron LBaaS Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-lbaas-agent',
'executable_name': os.path.join(bin_dir, 'neutron-lbaas-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/lbaas_agent.ini'],
'log_file': '/var/log/neutron/lbaas-agent.log',
}
neutron_metadata_agent_context = {
'service_description': 'Neutron Metadata Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-metadata-agent',
'executable_name': os.path.join(bin_dir, 'neutron-metadata-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/metadata_agent.ini'],
'log_file': '/var/log/neutron/metadata-agent.log',
}
neutron_metering_agent_context = {
'service_description': 'Neutron Metering Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-metering-agent',
'executable_name': os.path.join(bin_dir, 'neutron-metering-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/metering_agent.ini'],
'log_file': '/var/log/neutron/metering-agent.log',
}
neutron_ovs_cleanup_context = {
'service_description': 'Neutron OVS cleanup',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-ovs-cleanup',
'executable_name': os.path.join(bin_dir, 'neutron-ovs-cleanup'),
'config_file': '/etc/neutron/neutron.conf',
'log_file': '/var/log/neutron/ovs-cleanup.log',
}
neutron_plugin_bigswitch_context = {
'service_description': 'Neutron BigSwitch Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-restproxy-agent',
'executable_name': os.path.join(bin_dir,
'neutron-restproxy-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/bigswitch/restproxy.ini'],
'log_file': '/var/log/neutron/bigswitch-agent.log',
}
neutron_plugin_ibm_context = {
'service_description': 'Neutron IBM SDN Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-ibm-agent',
'executable_name': os.path.join(bin_dir, 'neutron-ibm-agent'),
'config_files':
['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini'],
'log_file': '/var/log/neutron/ibm-agent.log',
}
neutron_plugin_linuxbridge_context = {
'service_description': 'Neutron Linux Bridge Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-linuxbridge-agent',
'executable_name': os.path.join(bin_dir,
'neutron-linuxbridge-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ml2/ml2_conf.ini'],
'log_file': '/var/log/neutron/linuxbridge-agent.log',
}
neutron_plugin_mlnx_context = {
'service_description': 'Neutron MLNX Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-mlnx-agent',
'executable_name': os.path.join(bin_dir, 'neutron-mlnx-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/mlnx/mlnx_conf.ini'],
'log_file': '/var/log/neutron/mlnx-agent.log',
}
neutron_plugin_nec_context = {
'service_description': 'Neutron NEC Plugin Agent',
'service_name': service_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-nec-agent',
'executable_name': os.path.join(bin_dir, 'neutron-nec-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/nec/nec.ini'],
'log_file': '/var/log/neutron/nec-agent.log',
}
neutron_plugin_oneconvergence_context = {
'service_description': 'Neutron One Convergence Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-nvsd-agent',
'executable_name': os.path.join(bin_dir, 'neutron-nvsd-agent'),
'config_files':
['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/oneconvergence/nvsdplugin.ini'],
'log_file': '/var/log/neutron/nvsd-agent.log',
}
neutron_plugin_openflow_context = {
'service_description': 'Neutron OpenFlow Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-ofagent-agent',
'executable_name': os.path.join(bin_dir, 'neutron-ofagent-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ml2/ml2_conf_ofa.ini'],
'log_file': '/var/log/neutron/openflow-agent.log',
}
neutron_plugin_openvswitch_context = {
'service_description': 'Neutron OpenvSwitch Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-openvswitch-agent',
'executable_name': os.path.join(bin_dir,
'neutron-openvswitch-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ml2/ml2_conf.ini'],
'log_file': '/var/log/neutron/openvswitch-agent.log',
}
neutron_plugin_ryu_context = {
'service_description': 'Neutron RYU Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-ryu-agent',
'executable_name': os.path.join(bin_dir, 'neutron-ryu-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ryu/ryu.ini'],
'log_file': '/var/log/neutron/ryu-agent.log',
}
neutron_plugin_sriov_context = {
'service_description': 'Neutron SRIOV SDN Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-sriov-nic-agent',
'executable_name': os.path.join(bin_dir,
'neutron-sriov-nic-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ml2/ml2_conf_sriov'],
'log_file': '/var/log/neutron/sriov-agent.log',
}
neutron_vpn_agent_context = {
'service_description': 'Neutron VPN Agent',
'service_name': service_name,
'process_name': 'neutron-vpn-agent',
'executable_name': os.path.join(bin_dir, 'neutron-vpn-agent'),
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/vpn_agent.ini',
'/etc/neutron/l3_agent.ini',
'/etc/neutron/fwaas_driver.ini'],
'log_file': '/var/log/neutron/vpn_agent.log',
}
service_name = 'nova-compute'
nova_user = 'nova'
start_dir = '/var/lib/nova'
nova_conf = '/etc/nova/nova.conf'
nova_api_metadata_context = {
'service_description': 'Nova Metadata API server',
'service_name': service_name,
'user_name': nova_user,
'start_dir': start_dir,
'process_name': 'nova-api-metadata',
'executable_name': os.path.join(bin_dir, 'nova-api-metadata'),
'config_files': [nova_conf],
}
templates_dir = 'hooks/charmhelpers/contrib/openstack/templates'
templates_dir = os.path.join(charm_dir(), templates_dir)
render('git/upstart/neutron-agent.upstart',
'/etc/init/neutron-dhcp-agent.conf',
neutron_dhcp_agent_context, perms=0o644)
render('git/upstart/neutron-agent.upstart',
'/etc/init/neutron-l3-agent.conf',
neutron_l3_agent_context, perms=0o644)
render('git.upstart',
'/etc/init/neutron-lbaas-agent.conf',
neutron_lbaas_agent_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-metadata-agent.conf',
neutron_metadata_agent_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-metering-agent.conf',
neutron_metering_agent_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-ovs-cleanup.conf',
neutron_ovs_cleanup_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-plugin-bigswitch-agent.conf',
neutron_plugin_bigswitch_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-plugin-ibm-agent.conf',
neutron_plugin_ibm_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-plugin-linuxbridge-agent.conf',
neutron_plugin_linuxbridge_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-plugin-mlnx-agent.conf',
neutron_plugin_mlnx_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-plugin-nec-agent.conf',
neutron_plugin_nec_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-plugin-oneconvergence-agent.conf',
neutron_plugin_oneconvergence_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-plugin-openflow-agent.conf',
neutron_plugin_openflow_context, perms=0o644,
templates_dir=templates_dir)
if cmp_os_release < 'mitaka':
render('git.upstart',
'/etc/init/neutron-plugin-openvswitch-agent.conf',
neutron_plugin_openvswitch_context, perms=0o644,
templates_dir=templates_dir)
else:
render('git.upstart',
'/etc/init/neutron-openvswitch-agent.conf',
neutron_plugin_openvswitch_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-plugin-ryu-agent.conf',
neutron_plugin_ryu_context, perms=0o644,
templates_dir=templates_dir)
render('git.upstart',
'/etc/init/neutron-plugin-sriov-agent.conf',
neutron_plugin_sriov_context, perms=0o644,
templates_dir=templates_dir)
render('git/upstart/neutron-server.upstart',
'/etc/init/neutron-server.conf',
neutron_api_context, perms=0o644)
render('git/upstart/neutron-agent.upstart',
'/etc/init/neutron-vpn-agent.conf',
neutron_vpn_agent_context, perms=0o644)
render('git.upstart',
'/etc/init/nova-api-metadata.conf',
nova_api_metadata_context, perms=0o644,
templates_dir=templates_dir)
def get_optional_interfaces():
"""Return the optional interfaces that should be checked if the relavent
relations have appeared.

View File

@ -1,6 +1,4 @@
import amulet
import os
import yaml
import time
import subprocess
import json
@ -24,12 +22,11 @@ u = OpenStackAmuletUtils(DEBUG)
class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
"""Amulet tests on a basic neutron-gateway deployment."""
def __init__(self, series, openstack=None, source=None, git=False,
def __init__(self, series, openstack=None, source=None,
stable=False):
"""Deploy the entire test environment."""
super(NeutronGatewayBasicDeployment, self).__init__(series, openstack,
source, stable)
self.git = git
self._add_services()
self._add_relations()
self._configure_services()
@ -98,58 +95,6 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
def _configure_services(self):
"""Configure all of the services."""
neutron_gateway_config = {'aa-profile-mode': 'enforce'}
if self.git:
amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY')
branch = 'stable/' + self._get_openstack_release_string()
if self._get_openstack_release() >= self.trusty_kilo:
openstack_origin_git = {
'repositories': [
{'name': 'requirements',
'repository': 'git://github.com/openstack/requirements', # noqa
'branch': branch},
{'name': 'neutron-fwaas',
'repository': 'git://github.com/openstack/neutron-fwaas', # noqa
'branch': branch},
{'name': 'neutron-lbaas',
'repository': 'git://github.com/openstack/neutron-lbaas', # noqa
'branch': branch},
{'name': 'neutron-vpnaas',
'repository': 'git://github.com/openstack/neutron-vpnaas', # noqa
'branch': branch},
{'name': 'neutron',
'repository': 'git://github.com/openstack/neutron',
'branch': branch},
],
'directory': '/mnt/openstack-git',
'http_proxy': amulet_http_proxy,
'https_proxy': amulet_http_proxy,
}
else:
reqs_repo = 'git://github.com/openstack/requirements'
neutron_repo = 'git://github.com/openstack/neutron'
if self._get_openstack_release() == self.trusty_icehouse:
reqs_repo = 'git://github.com/coreycb/requirements'
neutron_repo = 'git://github.com/coreycb/neutron'
openstack_origin_git = {
'repositories': [
{'name': 'requirements',
'repository': reqs_repo,
'branch': branch},
{'name': 'neutron',
'repository': neutron_repo,
'branch': branch},
],
'directory': '/mnt/openstack-git',
'http_proxy': amulet_http_proxy,
'https_proxy': amulet_http_proxy,
}
neutron_gateway_config['openstack-origin-git'] = \
yaml.dump(openstack_origin_git)
keystone_config = {
'admin-password': 'openstack',
'admin-token': 'ubuntutesting',
@ -373,7 +318,7 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
if self._get_openstack_release() >= self.xenial_ocata:
# Ocata or later
expected['service_username'] = 'placement_nova'
expected['service_username'] = 'nova_placement'
elif self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
expected['service_username'] = 'nova'

View File

@ -858,9 +858,12 @@ class OpenStackAmuletUtils(AmuletUtils):
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
pool_name = df['pools'][pool_id]['name']
obj_count = df['pools'][pool_id]['stats']['objects']
kb_used = df['pools'][pool_id]['stats']['kb_used']
for pool in df['pools']:
if pool['id'] == pool_id:
pool_name = pool['name']
obj_count = pool['stats']['objects']
kb_used = pool['stats']['kb_used']
self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))

View File

@ -23,7 +23,6 @@ import sys
import re
import itertools
import functools
import shutil
import six
import traceback
@ -47,7 +46,6 @@ from charmhelpers.core.hookenv import (
related_units,
relation_ids,
relation_set,
service_name,
status_set,
hook_name,
application_version_set,
@ -68,11 +66,6 @@ from charmhelpers.contrib.network.ip import (
port_has_listener,
)
from charmhelpers.contrib.python.packages import (
pip_create_virtualenv,
pip_install,
)
from charmhelpers.core.host import (
lsb_release,
mounts,
@ -84,7 +77,6 @@ from charmhelpers.core.host import (
)
from charmhelpers.fetch import (
apt_cache,
install_remote,
import_key as fetch_import_key,
add_source as fetch_add_source,
SourceConfigError,
@ -278,27 +270,6 @@ PACKAGE_CODENAMES = {
]),
}
GIT_DEFAULT_REPOS = {
'requirements': 'git://github.com/openstack/requirements',
'cinder': 'git://github.com/openstack/cinder',
'glance': 'git://github.com/openstack/glance',
'horizon': 'git://github.com/openstack/horizon',
'keystone': 'git://github.com/openstack/keystone',
'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
'neutron': 'git://github.com/openstack/neutron',
'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
'nova': 'git://github.com/openstack/nova',
}
GIT_DEFAULT_BRANCHES = {
'liberty': 'stable/liberty',
'mitaka': 'stable/mitaka',
'newton': 'stable/newton',
'master': 'master',
}
DEFAULT_LOOPBACK_SIZE = '5G'
@ -392,6 +363,8 @@ def get_swift_codename(version):
releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
if six.PY3:
ret = ret.decode('UTF-8')
if codename in ret or release[0] in ret:
return codename
elif len(codenames) == 1:
@ -528,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False):
if _os_rel:
return _os_rel
_os_rel = (
git_os_codename_install_source(config('openstack-origin-git')) or
get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
@ -769,417 +741,6 @@ def os_requires_version(ostack_release, pkg):
return wrap
def git_install_requested():
"""
Returns true if openstack-origin-git is specified.
"""
return config('openstack-origin-git') is not None
def git_os_codename_install_source(projects_yaml):
"""
Returns OpenStack codename of release being installed from source.
"""
if git_install_requested():
projects = _git_yaml_load(projects_yaml)
if projects in GIT_DEFAULT_BRANCHES.keys():
if projects == 'master':
return 'ocata'
return projects
if 'release' in projects:
if projects['release'] == 'master':
return 'ocata'
return projects['release']
return None
def git_default_repos(projects_yaml):
"""
Returns default repos if a default openstack-origin-git value is specified.
"""
service = service_name()
core_project = service
for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
if projects_yaml == default:
# add the requirements repo first
repo = {
'name': 'requirements',
'repository': GIT_DEFAULT_REPOS['requirements'],
'branch': branch,
}
repos = [repo]
# neutron-* and nova-* charms require some additional repos
if service in ['neutron-api', 'neutron-gateway',
'neutron-openvswitch']:
core_project = 'neutron'
if service == 'neutron-api':
repo = {
'name': 'networking-hyperv',
'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
'branch': branch,
}
repos.append(repo)
for project in ['neutron-fwaas', 'neutron-lbaas',
'neutron-vpnaas', 'nova']:
repo = {
'name': project,
'repository': GIT_DEFAULT_REPOS[project],
'branch': branch,
}
repos.append(repo)
elif service in ['nova-cloud-controller', 'nova-compute']:
core_project = 'nova'
repo = {
'name': 'neutron',
'repository': GIT_DEFAULT_REPOS['neutron'],
'branch': branch,
}
repos.append(repo)
elif service == 'openstack-dashboard':
core_project = 'horizon'
# finally add the current service's core project repo
repo = {
'name': core_project,
'repository': GIT_DEFAULT_REPOS[core_project],
'branch': branch,
}
repos.append(repo)
return yaml.dump(dict(repositories=repos, release=default))
return projects_yaml
def _git_yaml_load(projects_yaml):
"""
Load the specified yaml into a dictionary.
"""
if not projects_yaml:
return None
return yaml.load(projects_yaml)
requirements_dir = None
def git_clone_and_install(projects_yaml, core_project):
"""
Clone/install all specified OpenStack repositories.
The expected format of projects_yaml is:
repositories:
- {name: keystone,
repository: 'git://git.openstack.org/openstack/keystone.git',
branch: 'stable/icehouse'}
- {name: requirements,
repository: 'git://git.openstack.org/openstack/requirements.git',
branch: 'stable/icehouse'}
directory: /mnt/openstack-git
http_proxy: squid-proxy-url
https_proxy: squid-proxy-url
The directory, http_proxy, and https_proxy keys are optional.
"""
global requirements_dir
parent_dir = '/mnt/openstack-git'
http_proxy = None
projects = _git_yaml_load(projects_yaml)
_git_validate_projects_yaml(projects, core_project)
old_environ = dict(os.environ)
if 'http_proxy' in projects.keys():
http_proxy = projects['http_proxy']
os.environ['http_proxy'] = projects['http_proxy']
if 'https_proxy' in projects.keys():
os.environ['https_proxy'] = projects['https_proxy']
if 'directory' in projects.keys():
parent_dir = projects['directory']
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
# Upgrade setuptools and pip from default virtualenv versions. The default
# versions in trusty break master OpenStack branch deployments.
for p in ['pip', 'setuptools']:
pip_install(p, upgrade=True, proxy=http_proxy,
venv=os.path.join(parent_dir, 'venv'))
constraints = None
for p in projects['repositories']:
repo = p['repository']
branch = p['branch']
depth = '1'
if 'depth' in p.keys():
depth = p['depth']
if p['name'] == 'requirements':
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=False)
requirements_dir = repo_dir
constraints = os.path.join(repo_dir, "upper-constraints.txt")
# upper-constraints didn't exist until after icehouse
if not os.path.isfile(constraints):
constraints = None
# use constraints unless project yaml sets use_constraints to false
if 'use_constraints' in projects.keys():
if not projects['use_constraints']:
constraints = None
else:
repo_dir = _git_clone_and_install_single(repo, branch, depth,
parent_dir, http_proxy,
update_requirements=True,
constraints=constraints)
os.environ = old_environ
def _git_validate_projects_yaml(projects, core_project):
"""
Validate the projects yaml.
"""
_git_ensure_key_exists('repositories', projects)
for project in projects['repositories']:
_git_ensure_key_exists('name', project.keys())
_git_ensure_key_exists('repository', project.keys())
_git_ensure_key_exists('branch', project.keys())
if projects['repositories'][0]['name'] != 'requirements':
error_out('{} git repo must be specified first'.format('requirements'))
if projects['repositories'][-1]['name'] != core_project:
error_out('{} git repo must be specified last'.format(core_project))
_git_ensure_key_exists('release', projects)
def _git_ensure_key_exists(key, keys):
"""
Ensure that key exists in keys.
"""
if key not in keys:
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
update_requirements, constraints=None):
"""
Clone and install a single git repository.
"""
if not os.path.exists(parent_dir):
juju_log('Directory already exists at {}. '
'No need to create directory.'.format(parent_dir))
os.mkdir(parent_dir)
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(
repo, dest=parent_dir, branch=branch, depth=depth)
venv = os.path.join(parent_dir, 'venv')
if update_requirements:
if not requirements_dir:
error_out('requirements repo must be cloned before '
'updating from global requirements.')
_git_update_requirements(venv, repo_dir, requirements_dir)
juju_log('Installing git repo from dir: {}'.format(repo_dir))
if http_proxy:
pip_install(repo_dir, proxy=http_proxy, venv=venv,
constraints=constraints)
else:
pip_install(repo_dir, venv=venv, constraints=constraints)
return repo_dir
def _git_update_requirements(venv, package_dir, reqs_dir):
"""
Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt.
"""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
python = os.path.join(venv, 'bin/python')
cmd = [python, 'update.py', package_dir]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from "
"global-requirements.txt".format(package))
os.chdir(orig_dir)
def git_pip_venv_dir(projects_yaml):
"""
Return the pip virtualenv path.
"""
parent_dir = '/mnt/openstack-git'
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
return os.path.join(parent_dir, 'venv')
def git_src_dir(projects_yaml, project):
"""
Return the directory where the specified project's source is located.
"""
parent_dir = '/mnt/openstack-git'
projects = _git_yaml_load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
for p in projects['repositories']:
if p['name'] == project:
return os.path.join(parent_dir, os.path.basename(p['repository']))
return None
def git_yaml_value(projects_yaml, key):
"""
Return the value in projects_yaml for the specified key.
"""
projects = _git_yaml_load(projects_yaml)
if key in projects.keys():
return projects[key]
return None
def git_generate_systemd_init_files(templates_dir):
"""
Generate systemd init files.
Generates and installs systemd init units and script files based on the
*.init.in files contained in the templates_dir directory.
This code is based on the openstack-pkg-tools package and its init
script generation, which is used by the OpenStack packages.
"""
for f in os.listdir(templates_dir):
# Create the init script and systemd unit file from the template
if f.endswith(".init.in"):
init_in_file = f
init_file = f[:-8]
service_file = "{}.service".format(init_file)
init_in_source = os.path.join(templates_dir, init_in_file)
init_source = os.path.join(templates_dir, init_file)
service_source = os.path.join(templates_dir, service_file)
init_dest = os.path.join('/etc/init.d', init_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
shutil.copyfile(init_in_source, init_source)
with open(init_source, 'a') as outfile:
template = ('/usr/share/openstack-pkg-tools/'
'init-script-template')
with open(template) as infile:
outfile.write('\n\n{}'.format(infile.read()))
cmd = ['pkgos-gen-systemd-unit', init_in_source]
subprocess.check_call(cmd)
if os.path.exists(init_dest):
os.remove(init_dest)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(init_source, init_dest)
shutil.copyfile(service_source, service_dest)
os.chmod(init_dest, 0o755)
for f in os.listdir(templates_dir):
# If there's a service.in file, use it instead of the generated one
if f.endswith(".service.in"):
service_in_file = f
service_file = f[:-3]
service_in_source = os.path.join(templates_dir, service_in_file)
service_source = os.path.join(templates_dir, service_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
shutil.copyfile(service_in_source, service_source)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(service_source, service_dest)
for f in os.listdir(templates_dir):
# Generate the systemd unit if there's no existing .service.in
if f.endswith(".init.in"):
init_in_file = f
init_file = f[:-8]
service_in_file = "{}.service.in".format(init_file)
service_file = "{}.service".format(init_file)
init_in_source = os.path.join(templates_dir, init_in_file)
service_in_source = os.path.join(templates_dir, service_in_file)
service_source = os.path.join(templates_dir, service_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
if not os.path.exists(service_in_source):
cmd = ['pkgos-gen-systemd-unit', init_in_source]
subprocess.check_call(cmd)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.copyfile(service_source, service_dest)
def git_determine_usr_bin():
"""Return the /usr/bin path for Apache2 config.
The /usr/bin path will be located in the virtualenv if the charm
is configured to deploy from source.
"""
if git_install_requested():
projects_yaml = config('openstack-origin-git')
projects_yaml = git_default_repos(projects_yaml)
return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
else:
return '/usr/bin'
def git_determine_python_path():
"""Return the python-path for Apache2 config.
Returns 'None' unless the charm is configured to deploy from source,
in which case the path of the virtualenv's site-packages is returned.
"""
if git_install_requested():
projects_yaml = config('openstack-origin-git')
projects_yaml = git_default_repos(projects_yaml)
return os.path.join(git_pip_venv_dir(projects_yaml),
'lib/python2.7/site-packages')
else:
return None
def os_workload_status(configs, required_interfaces, charm_func=None):
"""
Decorator to set workload status based on complete contexts
@ -1613,27 +1174,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
"""
ret = False
if git_install_requested():
action_set({'outcome': 'installed from source, skipped upgrade.'})
else:
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
if openstack_upgrade_available(package):
if config('action-managed-upgrade'):
juju_log('Upgrading OpenStack release')
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
try:
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
'unexpected error')
else:
action_set({'outcome': 'no upgrade available.'})
action_set({'outcome': 'action-managed-upgrade config is '
'False, skipped upgrade.'})
else:
action_set({'outcome': 'no upgrade available.'})
return ret
@ -2043,14 +1601,25 @@ def token_cache_pkgs(source=None, release=None):
def update_json_file(filename, items):
"""Updates the json `filename` with a given dict.
:param filename: json filename (i.e.: /etc/glance/policy.json)
:param filename: path to json file (e.g. /etc/glance/policy.json)
:param items: dict of items to update
"""
if not items:
return
with open(filename) as fd:
policy = json.load(fd)
# Compare before and after and if nothing has changed don't write the file
# since that could cause unnecessary service restarts.
before = json.dumps(policy, indent=4, sort_keys=True)
policy.update(items)
after = json.dumps(policy, indent=4, sort_keys=True)
if before == after:
return
with open(filename, "w") as fd:
fd.write(json.dumps(policy, indent=4))
fd.write(after)
@cached

View File

@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
assert isinstance(valid_range, list), \
"valid_range must be a list, was given {}".format(valid_range)
# If we're dealing with strings
if valid_type is six.string_types:
if isinstance(value, six.string_types):
assert value in valid_range, \
"{} is not in the list {}".format(value, valid_range)
# Integer, float should have a min and max
@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value):
:param value:
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
str(value).lower()]
try:
check_call(cmd)
except CalledProcessError:
@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param durability_estimator: int
:return: None. Can raise CalledProcessError
"""
version = ceph_version()
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
'ruleset_failure_domain=' + failure_domain]
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
]
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
# failure_domain changed in luminous
if version and version >= '12.0.0':
cmd.append('crush-failure-domain=' + failure_domain)
else:
cmd.append('ruleset-failure-domain=' + failure_domain)
# Add plugin specific information
if locality is not None:
# For local erasure codes
@ -1064,14 +1073,24 @@ class CephBrokerRq(object):
self.ops = []
def add_op_request_access_to_group(self, name, namespace=None,
permission=None, key_name=None):
permission=None, key_name=None,
object_prefix_permissions=None):
"""
Adds the requested permissions to the current service's Ceph key,
allowing the key to access only the specified pools
allowing the key to access only the specified pools or
object prefixes. object_prefix_permissions should be a dictionary
keyed on the permission with the corresponding value being a list
of prefixes to apply that permission to.
{
'rwx': ['prefix1', 'prefix2'],
'class-read': ['prefix3']}
"""
self.ops.append({'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace, 'name': key_name or service_name(),
'group-permission': permission})
self.ops.append({
'op': 'add-permissions-to-key', 'group': name,
'namespace': namespace,
'name': key_name or service_name(),
'group-permission': permission,
'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None):
@ -1107,7 +1126,10 @@ class CephBrokerRq(object):
def _ops_equal(self, other):
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
for key in [
'replicas', 'name', 'op', 'pg_num', 'weight',
'group', 'group-namespace', 'group-permission',
'object-prefix-permissions']:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from subprocess import (
CalledProcessError,
check_call,
@ -101,3 +102,52 @@ def create_lvm_volume_group(volume_group, block_device):
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])
def list_logical_volumes(select_criteria=None, path_mode=False):
'''
List logical volumes
:param select_criteria: str: Limit list to those volumes matching this
criteria (see 'lvs -S help' for more details)
:param path_mode: bool: return logical volume name in 'vg/lv' format, this
format is required for some commands like lvextend
:returns: [str]: List of logical volumes
'''
lv_diplay_attr = 'lv_name'
if path_mode:
# Parsing output logic relies on the column order
lv_diplay_attr = 'vg_name,' + lv_diplay_attr
cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
if select_criteria:
cmd.extend(['--select', select_criteria])
lvs = []
for lv in check_output(cmd).decode('UTF-8').splitlines():
if not lv:
continue
if path_mode:
lvs.append('/'.join(lv.strip().split()))
else:
lvs.append(lv.strip())
return lvs
list_thin_logical_volume_pools = functools.partial(
list_logical_volumes,
select_criteria='lv_attr =~ ^t')
list_thin_logical_volumes = functools.partial(
list_logical_volumes,
select_criteria='lv_attr =~ ^V')
def extend_logical_volume_by_device(lv_name, block_device):
'''
Extends the size of logical volume lv_name by the amount of free space on
physical volume block_device.
:param lv_name: str: name of logical volume to be extended (vg/lv format)
:param block_device: str: name of block_device to be allocated to lv_name
'''
cmd = ['lvextend', lv_name, block_device]
check_call(cmd)

View File

@ -20,6 +20,7 @@ UBUNTU_RELEASES = (
'yakkety',
'zesty',
'artful',
'bionic',
)

View File

@ -1,9 +0,0 @@
#!/usr/bin/env python
"""Amulet tests on a basic quantum-gateway git deployment on trusty-icehouse."""
from basic_deployment import NeutronGatewayBasicDeployment
if __name__ == '__main__':
deployment = NeutronGatewayBasicDeployment(series='trusty', git=True)
deployment.run_tests()

View File

@ -1,12 +0,0 @@
#!/usr/bin/env python
"""Amulet tests on a basic neutron-gateway git deployment on trusty-kilo."""
from basic_deployment import NeutronGatewayBasicDeployment
if __name__ == '__main__':
deployment = NeutronGatewayBasicDeployment(series='trusty',
openstack='cloud:trusty-kilo',
source='cloud:trusty-updates/kilo',
git=True)
deployment.run_tests()

View File

@ -1,103 +0,0 @@
import sys
from mock import patch, MagicMock
from test_utils import CharmTestCase
# python-apt is not installed as part of test-requirements but is imported by
# some charmhelpers modules so create a fake import.
sys.modules['apt'] = MagicMock()
sys.modules['apt_pkg'] = MagicMock()
with patch('charmhelpers.core.hookenv.config'):
with patch('neutron_utils.restart_map'):
with patch('neutron_utils.register_configs'):
with patch('charmhelpers.contrib.hardening.harden.harden') as \
mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
lambda *args, **kwargs:
f(*args, **kwargs))
with patch('charmhelpers.core.hookenv.status_set'):
import git_reinstall
TO_PATCH = [
'config',
]
openstack_origin_git = \
"""repositories:
- {name: requirements,
repository: 'git://git.openstack.org/openstack/requirements',
branch: stable/juno}
- {name: neutron,
repository: 'git://git.openstack.org/openstack/neutron',
branch: stable/juno}"""
class TestNeutronAPIActions(CharmTestCase):
def setUp(self):
super(TestNeutronAPIActions, self).setUp(git_reinstall, TO_PATCH)
self.config.side_effect = self.test_config.get
@patch.object(git_reinstall, 'action_set')
@patch.object(git_reinstall, 'action_fail')
@patch.object(git_reinstall, 'git_install')
@patch.object(git_reinstall, 'config_changed')
def test_git_reinstall(self, config_changed, git_install, action_fail,
action_set):
self.test_config.set('openstack-origin-git', openstack_origin_git)
git_reinstall.git_reinstall()
git_install.assert_called_with(openstack_origin_git)
self.assertTrue(git_install.called)
self.assertTrue(config_changed.called)
self.assertFalse(action_set.called)
self.assertFalse(action_fail.called)
@patch.object(git_reinstall, 'action_set')
@patch.object(git_reinstall, 'action_fail')
@patch.object(git_reinstall, 'git_install')
@patch.object(git_reinstall, 'config_changed')
@patch('charmhelpers.contrib.openstack.utils.config')
def test_git_reinstall_not_configured(self, _config, config_changed,
git_install, action_fail,
action_set):
_config.return_value = None
git_reinstall.git_reinstall()
msg = 'openstack-origin-git is not configured'
action_fail.assert_called_with(msg)
self.assertFalse(git_install.called)
self.assertFalse(action_set.called)
@patch.object(git_reinstall, 'action_set')
@patch.object(git_reinstall, 'action_fail')
@patch.object(git_reinstall, 'git_install')
@patch.object(git_reinstall, 'config_changed')
@patch('traceback.format_exc')
@patch('charmhelpers.contrib.openstack.utils.config')
def test_git_reinstall_exception(self, _config, format_exc,
config_changed, git_install, action_fail,
action_set):
_config.return_value = openstack_origin_git
e = OSError('something bad happened')
git_install.side_effect = e
traceback = (
"Traceback (most recent call last):\n"
" File \"actions/git_reinstall.py\", line 37, in git_reinstall\n"
" git_install(config(\'openstack-origin-git\'))\n"
" File \"/usr/lib/python2.7/dist-packages/mock.py\", line 964, in __call__\n" # noqa
" return _mock_self._mock_call(*args, **kwargs)\n"
" File \"/usr/lib/python2.7/dist-packages/mock.py\", line 1019, in _mock_call\n" # noqa
" raise effect\n"
"OSError: something bad happened\n")
format_exc.return_value = traceback
git_reinstall.git_reinstall()
msg = 'git-reinstall resulted in an unexpected error'
action_fail.assert_called_with(msg)
action_set.assert_called_with({'traceback': traceback})

View File

@ -7,9 +7,10 @@ from test_utils import (
os.environ['JUJU_UNIT_NAME'] = 'neutron-gateway'
with patch('charmhelpers.core.hookenv.status_set'):
with patch('neutron_utils.register_configs') as register_configs:
import openstack_upgrade
with patch('charmhelpers.core.hookenv.config'):
with patch('neutron_utils.restart_map'):
with patch('neutron_utils.register_configs'):
import openstack_upgrade
TO_PATCH = [
'do_openstack_upgrade',
@ -25,11 +26,9 @@ class TestNeutronGWUpgradeActions(CharmTestCase):
@patch('charmhelpers.contrib.openstack.utils.config')
@patch('charmhelpers.contrib.openstack.utils.action_set')
@patch('charmhelpers.contrib.openstack.utils.git_install_requested')
@patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
def test_openstack_upgrade_true(self, upgrade_avail, git_requested,
def test_openstack_upgrade_true(self, upgrade_avail,
action_set, config):
git_requested.return_value = False
upgrade_avail.return_value = True
config.return_value = True
@ -40,11 +39,9 @@ class TestNeutronGWUpgradeActions(CharmTestCase):
@patch('charmhelpers.contrib.openstack.utils.config')
@patch('charmhelpers.contrib.openstack.utils.action_set')
@patch('charmhelpers.contrib.openstack.utils.git_install_requested')
@patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
def test_openstack_upgrade_false(self, upgrade_avail, git_requested,
def test_openstack_upgrade_false(self, upgrade_avail,
action_set, config):
git_requested.return_value = False
upgrade_avail.return_value = True
config.return_value = False

View File

@ -1,7 +1,5 @@
import sys
import yaml
from mock import MagicMock, patch, call
# python-apt is not installed as part of test-requirements but is imported by
@ -10,12 +8,15 @@ sys.modules['apt'] = MagicMock()
sys.modules['apt_pkg'] = MagicMock()
import charmhelpers.core.hookenv as hookenv
with patch('charmhelpers.contrib.hardening.harden.harden') as \
mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
lambda *args, **kwargs:
f(*args, **kwargs))
import neutron_hooks as hooks
with patch('charmhelpers.core.hookenv.config'):
with patch('neutron_utils.restart_map'):
with patch('neutron_utils.register_configs'):
with patch('charmhelpers.contrib.'
'hardening.harden.harden') as mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
lambda *args, **kwargs:
f(*args, **kwargs))
import neutron_hooks as hooks
from test_utils import CharmTestCase
@ -30,7 +31,6 @@ TO_PATCH = [
'filter_installed_packages',
'get_early_packages',
'get_packages',
'git_install',
'log',
'do_openstack_upgrade',
'openstack_upgrade_available',
@ -111,46 +111,9 @@ class TestQuantumHooks(CharmTestCase):
self.assertTrue(self.log.called)
_exit.assert_called_with(1)
@patch('neutron_utils.git_install_requested')
def test_install_hook_git(self, git_requested):
git_requested.return_value = True
self.valid_plugin.return_value = True
_pkgs = ['foo', 'bar']
self.filter_installed_packages.return_value = _pkgs
repo = 'cloud:trusty-juno'
openstack_origin_git = {
'repositories': [
{'name': 'requirements',
'repository': 'git://git.openstack.org/openstack/requirements', # noqa
'branch': 'stable/juno'},
{'name': 'neutron',
'repository': 'git://git.openstack.org/openstack/neutron',
'branch': 'stable/juno'}
],
'directory': '/mnt/openstack-git',
}
projects_yaml = yaml.dump(openstack_origin_git)
self.test_config.set('openstack-origin', repo)
self.test_config.set('openstack-origin-git', projects_yaml)
self._call_hook('install')
self.configure_installation_source.assert_called_with(
'cloud:trusty-juno'
)
self.apt_update.assert_called_with(fatal=True)
self.apt_install.assert_has_calls([
call(_pkgs, fatal=True),
call(_pkgs, fatal=True),
])
self.assertTrue(self.get_early_packages.called)
self.assertTrue(self.get_packages.called)
self.git_install.assert_called_with(projects_yaml)
self.assertTrue(self.execd_preinstall.called)
@patch.object(hooks, 'git_install_requested')
def test_config_changed(self, git_requested):
def test_config_changed(self):
def mock_relids(rel):
return ['relid']
git_requested.return_value = False
self.test_config.set('sysctl', '{ kernel.max_pid: "1337"}')
self.openstack_upgrade_available.return_value = True
self.valid_plugin.return_value = True
@ -165,18 +128,14 @@ class TestQuantumHooks(CharmTestCase):
self.assertTrue(self.create_sysctl.called)
self.configure_apparmor.assert_called_with()
@patch.object(hooks, 'git_install_requested')
def test_config_changed_upgrade(self, git_requested):
git_requested.return_value = False
def test_config_changed_upgrade(self):
self.openstack_upgrade_available.return_value = True
self.valid_plugin.return_value = True
self._call_hook('config-changed')
self.assertTrue(self.do_openstack_upgrade.called)
self.assertTrue(self.configure_ovs.called)
@patch.object(hooks, 'git_install_requested')
def test_config_changed_n1kv(self, git_requested):
git_requested.return_value = False
def test_config_changed_n1kv(self):
self.openstack_upgrade_available.return_value = False
self.valid_plugin.return_value = True
self.filter_installed_packages.side_effect = lambda p: p
@ -188,50 +147,12 @@ class TestQuantumHooks(CharmTestCase):
self.apt_purge.assert_called_with('neutron-l3-agent')
@patch('sys.exit')
@patch.object(hooks, 'git_install_requested')
def test_config_changed_invalid_plugin(self, git_requested, _exit):
git_requested.return_value = False
def test_config_changed_invalid_plugin(self, _exit):
self.valid_plugin.return_value = False
self._call_hook('config-changed')
self.assertTrue(self.log.called)
_exit.assert_called_with(1)
@patch.object(hooks, 'git_install_requested')
@patch.object(hooks, 'config_value_changed')
def test_config_changed_git(self, config_val_changed, git_requested):
def mock_relids(rel):
return ['relid']
git_requested.return_value = True
self.test_config.set('sysctl', '{ kernel.max_pid: "1337"}')
self.openstack_upgrade_available.return_value = True
self.valid_plugin.return_value = True
self.relation_ids.side_effect = mock_relids
_amqp_joined = self.patch('amqp_joined')
_amqp_nova_joined = self.patch('amqp_nova_joined')
repo = 'cloud:trusty-juno'
openstack_origin_git = {
'repositories': [
{'name': 'requirements',
'repository':
'git://git.openstack.org/openstack/requirements',
'branch': 'stable/juno'},
{'name': 'neutron',
'repository': 'git://git.openstack.org/openstack/neutron',
'branch': 'stable/juno'}
],
'directory': '/mnt/openstack-git',
}
projects_yaml = yaml.dump(openstack_origin_git)
self.test_config.set('openstack-origin', repo)
self.test_config.set('openstack-origin-git', projects_yaml)
self._call_hook('config-changed')
self.git_install.assert_called_with(projects_yaml)
self.assertFalse(self.do_openstack_upgrade.called)
self.assertTrue(self.configure_ovs.called)
self.assertTrue(_amqp_joined.called)
self.assertTrue(_amqp_nova_joined.called)
self.assertTrue(self.create_sysctl.called)
def test_upgrade_charm(self):
_install = self.patch('install')
_config_changed = self.patch('config_changed')

View File

@ -18,18 +18,15 @@ TO_PATCH = [
'apt_upgrade',
'apt_install',
'configure_installation_source',
'git_src_dir',
'log',
'add_bridge',
'add_bridge_port',
'headers_package',
'full_restart',
'os_release',
'service',
'service_running',
'NetworkServiceContext',
'ExternalPortContext',
'render',
'service_stop',
'determine_dkms_package',
'service_restart',
@ -42,15 +39,6 @@ TO_PATCH = [
'NeutronAPIContext',
]
openstack_origin_git = \
"""repositories:
- {name: requirements,
repository: 'git://git.openstack.org/openstack/requirements',
branch: stable/juno}
- {name: neutron,
repository: 'git://git.openstack.org/openstack/neutron',
branch: stable/juno}"""
class TestNeutronUtils(CharmTestCase):
@ -96,41 +84,31 @@ class TestNeutronUtils(CharmTestCase):
self.assertEqual(neutron_utils.get_early_packages(),
[])
@patch.object(neutron_utils, 'git_install_requested')
def test_get_packages_ovs_icehouse(self, git_requested):
git_requested.return_value = False
def test_get_packages_ovs_icehouse(self):
self.config.return_value = 'ovs'
self.os_release.return_value = 'icehouse'
self.assertTrue('neutron-vpn-agent' in neutron_utils.get_packages())
self.assertFalse('neutron-l3-agent' in neutron_utils.get_packages())
@patch.object(neutron_utils, 'git_install_requested')
def test_get_packages_ovs_juno_utopic(self, git_requested):
git_requested.return_value = False
def test_get_packages_ovs_juno_utopic(self):
self.config.return_value = 'ovs'
self.os_release.return_value = 'juno'
self._set_distrib_codename('utopic')
self.assertFalse('neutron-vpn-agent' in neutron_utils.get_packages())
self.assertTrue('neutron-l3-agent' in neutron_utils.get_packages())
@patch.object(neutron_utils, 'git_install_requested')
def test_get_packages_ovs_juno_trusty(self, git_requested):
git_requested.return_value = False
def test_get_packages_ovs_juno_trusty(self):
self.config.return_value = 'ovs'
self.os_release.return_value = 'juno'
self.assertTrue('neutron-vpn-agent' in neutron_utils.get_packages())
self.assertFalse('neutron-l3-agent' in neutron_utils.get_packages())
@patch.object(neutron_utils, 'git_install_requested')
def test_get_packages_ovs_kilo(self, git_requested):
git_requested.return_value = False
def test_get_packages_ovs_kilo(self):
self.config.return_value = 'ovs'
self.os_release.return_value = 'kilo'
self.assertTrue('python-neutron-fwaas' in neutron_utils.get_packages())
@patch.object(neutron_utils, 'git_install_requested')
def test_get_packages_ovs_liberty(self, git_requested):
git_requested.return_value = False
def test_get_packages_ovs_liberty(self):
self.config.return_value = 'ovs'
self.os_release.return_value = 'liberty'
packages = neutron_utils.get_packages()
@ -139,9 +117,7 @@ class TestNeutronUtils(CharmTestCase):
self.assertFalse('python-mysqldb' in packages)
self.assertTrue('python-pymysql' in packages)
@patch.object(neutron_utils, 'git_install_requested')
def test_get_packages_ovs_mitaka(self, git_requested):
git_requested.return_value = False
def test_get_packages_ovs_mitaka(self):
self.config.return_value = 'ovs'
self.os_release.return_value = 'mitaka'
packages = neutron_utils.get_packages()
@ -152,9 +128,7 @@ class TestNeutronUtils(CharmTestCase):
self.assertFalse('python-mysqldb' in packages)
self.assertTrue('python-pymysql' in packages)
@patch.object(neutron_utils, 'git_install_requested')
def test_get_packages_ovs_newton(self, git_requested):
git_requested.return_value = False
def test_get_packages_ovs_newton(self):
self.config.return_value = 'ovs'
self.os_release.return_value = 'newton'
packages = neutron_utils.get_packages()
@ -166,9 +140,7 @@ class TestNeutronUtils(CharmTestCase):
self.assertFalse('python-mysqldb' in packages)
self.assertTrue('python-pymysql' in packages)
@patch.object(neutron_utils, 'git_install_requested')
def test_get_packages_ovsodl_icehouse(self, git_requested):
git_requested.return_value = False
def test_get_packages_ovsodl_icehouse(self):
self.config.return_value = 'ovs-odl'
self.os_release.return_value = 'icehouse'
packages = neutron_utils.get_packages()
@ -178,9 +150,7 @@ class TestNeutronUtils(CharmTestCase):
self.assertFalse('neutron-openvswitch-agent' in packages)
self.assertTrue('neutron-lbaas-agent' in packages)
@patch.object(neutron_utils, 'git_install_requested')
def test_get_packages_ovsodl_newton(self, git_requested):
git_requested.return_value = False
def test_get_packages_ovsodl_newton(self):
self.config.return_value = 'ovs-odl'
self.os_release.return_value = 'newton'
packages = neutron_utils.get_packages()
@ -191,9 +161,7 @@ class TestNeutronUtils(CharmTestCase):
self.assertFalse('neutron-lbaas-agent' in packages)
self.assertTrue('neutron-lbaasv2-agent' in packages)
@patch.object(neutron_utils, 'git_install_requested')
def test_get_packages_l3ha(self, git_requested):
git_requested.return_value = False
def test_get_packages_l3ha(self):
self.config.return_value = 'ovs'
self.get_os_codename_install_source.return_value = 'juno'
self.os_release.return_value = 'juno'
@ -277,12 +245,10 @@ class TestNeutronUtils(CharmTestCase):
@patch.object(neutron_utils, 'register_configs')
@patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer')
@patch.object(neutron_utils, 'git_install_requested')
def test_do_openstack_upgrade(self, git_requested, mock_renderer,
def test_do_openstack_upgrade(self, mock_renderer,
mock_register_configs):
mock_configs = MagicMock()
mock_register_configs.return_value = mock_configs
git_requested.return_value = False
self.config.side_effect = self.test_config.get
self.is_relation_made.return_value = False
self.test_config.set('openstack-origin', 'cloud:precise-havana')
@ -759,508 +725,6 @@ class TestNeutronAgentReallocation(CharmTestCase):
# Reset cached cache
hookenv.cache = {}
@patch.object(neutron_utils, 'git_install_requested')
@patch.object(neutron_utils, 'git_clone_and_install')
@patch.object(neutron_utils, 'git_post_install')
@patch.object(neutron_utils, 'git_pre_install')
def test_git_install(self, git_pre, git_post, git_clone_and_install,
git_requested):
projects_yaml = openstack_origin_git
git_requested.return_value = True
neutron_utils.git_install(projects_yaml)
self.assertTrue(git_pre.called)
git_clone_and_install.assert_called_with(openstack_origin_git,
core_project='neutron')
self.assertTrue(git_post.called)
@patch('subprocess.check_call')
@patch.object(neutron_utils, 'mkdir')
@patch.object(neutron_utils, 'write_file')
@patch.object(neutron_utils, 'add_user_to_group')
@patch.object(neutron_utils, 'add_group')
@patch.object(neutron_utils, 'adduser')
def test_git_pre_install(self, adduser, add_group, add_user_to_group,
write_file, mkdir, check_call):
neutron_utils.git_pre_install()
expected = [
call('neutron', shell='/bin/bash', system_user=True),
call('nova', shell='/bin/bash', system_user=True),
]
self.assertEqual(adduser.call_args_list, expected)
expected = [
call('neutron', system_group=True),
call('nova', system_group=True),
]
self.assertEqual(add_group.call_args_list, expected)
expected = [
call('neutron', 'neutron'),
call('nova', 'nova'),
]
self.assertEqual(add_user_to_group.call_args_list, expected)
expected = [
call('/etc/neutron', owner='neutron',
group='neutron', perms=0o755, force=False),
call('/etc/neutron/rootwrap.d', owner='neutron',
group='neutron', perms=0o755, force=False),
call('/etc/neutron/plugins', owner='neutron',
group='neutron', perms=0o755, force=False),
call('/etc/nova', owner='neutron',
group='neutron', perms=0o755, force=False),
call('/var/lib/neutron', owner='neutron',
group='neutron', perms=0o755, force=False),
call('/var/lib/neutron/lock', owner='neutron',
group='neutron', perms=0o755, force=False),
call('/var/log/neutron', owner='neutron',
group='neutron', perms=0o755, force=False),
call('/var/lib/nova', owner='neutron',
group='neutron', perms=0o755, force=False),
call('/var/log/nova', owner='neutron',
group='neutron', perms=0o755, force=False),
]
self.assertEqual(mkdir.call_args_list, expected)
expected = [
call('/var/log/neutron/bigswitch-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/dhcp-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/l3-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/lbaas-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/ibm-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/linuxbridge-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/metadata-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/metering_agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/mlnx-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/nec-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/nvsd-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/openflow-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/openvswitch-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/ovs-cleanup.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/ryu-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/server.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/sriov-agent.log', '', owner='neutron',
group='neutron', perms=0o644),
call('/var/log/neutron/vpn_agent.log', '', owner='neutron',
group='neutron', perms=0o644),
]
self.assertEqual(write_file.call_args_list, expected)
@patch('os.remove')
@patch('os.path.join')
@patch('os.path.exists')
@patch('os.symlink')
@patch('shutil.rmtree')
@patch('shutil.copyfile')
@patch('shutil.copytree')
def test_git_post_install_upstart(self, copytree, copyfile, rmtree,
symlink, exists, join, remove):
projects_yaml = openstack_origin_git
join.return_value = 'joined-string'
self.lsb_release.return_value = {'DISTRIB_RELEASE': '15.04',
'DISTRIB_CODENAME': 'vivid'}
self.os_release.return_value = 'liberty'
neutron_utils.git_post_install(projects_yaml)
expected = [
call('joined-string', '/etc/neutron'),
call('joined-string', '/etc/neutron/plugins'),
call('joined-string', '/etc/neutron/rootwrap.d'),
]
copytree.assert_has_calls(expected)
expected = [
call('/usr/local/bin/neutron-rootwrap',
'/usr/bin/neutron-rootwrap'),
]
symlink.assert_has_calls(expected)
service_name = 'quantum-gateway'
user_name = 'neutron'
neutron_api_context = {
'service_description': 'Neutron API server',
'charm_name': 'neutron-api',
'process_name': 'neutron-server',
'executable_name': 'joined-string',
}
neutron_dhcp_agent_context = {
'service_description': 'Neutron DHCP Agent',
'service_name': service_name,
'process_name': 'neutron-dhcp-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/dhcp_agent.ini'],
'log_file': '/var/log/neutron/dhcp-agent.log',
}
neutron_l3_agent_context = {
'service_description': 'Neutron L3 Agent',
'service_name': service_name,
'process_name': 'neutron-l3-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/l3_agent.ini',
'/etc/neutron/fwaas_driver.ini'],
'log_file': '/var/log/neutron/l3-agent.log',
}
neutron_lbaas_agent_context = {
'service_description': 'Neutron LBaaS Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-lbaas-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/lbaas_agent.ini'],
'log_file': '/var/log/neutron/lbaas-agent.log',
}
neutron_metadata_agent_context = {
'service_description': 'Neutron Metadata Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-metadata-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/metadata_agent.ini'],
'log_file': '/var/log/neutron/metadata-agent.log',
}
neutron_metering_agent_context = {
'service_description': 'Neutron Metering Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-metering-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/metering_agent.ini'],
'log_file': '/var/log/neutron/metering-agent.log',
}
neutron_ovs_cleanup_context = {
'service_description': 'Neutron OVS cleanup',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-ovs-cleanup',
'executable_name': 'joined-string',
'config_file': '/etc/neutron/neutron.conf',
'log_file': '/var/log/neutron/ovs-cleanup.log',
}
neutron_plugin_bigswitch_context = {
'service_description': 'Neutron BigSwitch Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-restproxy-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/bigswitch/restproxy.ini'],
'log_file': '/var/log/neutron/bigswitch-agent.log',
}
neutron_plugin_ibm_context = {
'service_description': 'Neutron IBM SDN Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-ibm-agent',
'executable_name': 'joined-string',
'config_files':
['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini'],
'log_file': '/var/log/neutron/ibm-agent.log',
}
neutron_plugin_linuxbridge_context = {
'service_description': 'Neutron Linux Bridge Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-linuxbridge-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ml2/ml2_conf.ini'],
'log_file': '/var/log/neutron/linuxbridge-agent.log',
}
neutron_plugin_mlnx_context = {
'service_description': 'Neutron MLNX Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-mlnx-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/mlnx/mlnx_conf.ini'],
'log_file': '/var/log/neutron/mlnx-agent.log',
}
neutron_plugin_nec_context = {
'service_description': 'Neutron NEC Plugin Agent',
'service_name': service_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-nec-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/nec/nec.ini'],
'log_file': '/var/log/neutron/nec-agent.log',
}
neutron_plugin_oneconvergence_context = {
'service_description': 'Neutron One Convergence Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-nvsd-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/oneconvergence/'
'nvsdplugin.ini'],
'log_file': '/var/log/neutron/nvsd-agent.log',
}
neutron_plugin_openflow_context = {
'service_description': 'Neutron OpenFlow Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-ofagent-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ml2/ml2_conf_ofa.ini'],
'log_file': '/var/log/neutron/openflow-agent.log',
}
neutron_plugin_openvswitch_context = {
'service_description': 'Neutron OpenvSwitch Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-openvswitch-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ml2/ml2_conf.ini'],
'log_file': '/var/log/neutron/openvswitch-agent.log',
}
neutron_plugin_ryu_context = {
'service_description': 'Neutron RYU Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-ryu-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ryu/ryu.ini'],
'log_file': '/var/log/neutron/ryu-agent.log',
}
neutron_plugin_sriov_context = {
'service_description': 'Neutron SRIOV SDN Plugin Agent',
'service_name': service_name,
'user_name': user_name,
'start_dir': '/var/lib/neutron',
'process_name': 'neutron-sriov-nic-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/plugins/ml2/ml2_conf_sriov'],
'log_file': '/var/log/neutron/sriov-agent.log',
}
neutron_api_context = {
'service_description': 'Neutron API server',
'service_name': service_name,
'process_name': 'neutron-server',
'executable_name': 'joined-string',
}
neutron_vpn_agent_context = {
'service_description': 'Neutron VPN Agent',
'service_name': service_name,
'process_name': 'neutron-vpn-agent',
'executable_name': 'joined-string',
'config_files': ['/etc/neutron/neutron.conf',
'/etc/neutron/vpn_agent.ini',
'/etc/neutron/l3_agent.ini',
'/etc/neutron/fwaas_driver.ini'],
'log_file': '/var/log/neutron/vpn_agent.log',
}
nova_api_metadata_context = {
'service_description': 'Nova Metadata API server',
'service_name': 'nova-compute',
'user_name': 'nova',
'start_dir': '/var/lib/nova',
'process_name': 'nova-api-metadata',
'executable_name': 'joined-string',
'config_files': ['/etc/nova/nova.conf'],
}
expected = [
call('git/neutron_sudoers',
'/etc/sudoers.d/neutron_sudoers',
{}, perms=0o440),
call('git/nova_sudoers',
'/etc/sudoers.d/nova_sudoers',
{}, perms=0o440),
call('git/cron.d/neutron-dhcp-agent-netns-cleanup',
'/etc/cron.d/neutron-dhcp-agent-netns-cleanup',
{}, perms=0o755),
call('git/cron.d/neutron-l3-agent-netns-cleanup',
'/etc/cron.d/neutron-l3-agent-netns-cleanup',
{}, perms=0o755),
call('git/cron.d/neutron-lbaas-agent-netns-cleanup',
'/etc/cron.d/neutron-lbaas-agent-netns-cleanup',
{}, perms=0o755),
call('git/upstart/neutron-agent.upstart',
'/etc/init/neutron-dhcp-agent.conf',
neutron_dhcp_agent_context, perms=0o644),
call('git/upstart/neutron-agent.upstart',
'/etc/init/neutron-l3-agent.conf',
neutron_l3_agent_context, perms=0o644),
call('git.upstart',
'/etc/init/neutron-lbaas-agent.conf',
neutron_lbaas_agent_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-metadata-agent.conf',
neutron_metadata_agent_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-metering-agent.conf',
neutron_metering_agent_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-ovs-cleanup.conf',
neutron_ovs_cleanup_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-plugin-bigswitch-agent.conf',
neutron_plugin_bigswitch_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-plugin-ibm-agent.conf',
neutron_plugin_ibm_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-plugin-linuxbridge-agent.conf',
neutron_plugin_linuxbridge_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-plugin-mlnx-agent.conf',
neutron_plugin_mlnx_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-plugin-nec-agent.conf',
neutron_plugin_nec_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-plugin-oneconvergence-agent.conf',
neutron_plugin_oneconvergence_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-plugin-openflow-agent.conf',
neutron_plugin_openflow_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-plugin-openvswitch-agent.conf',
neutron_plugin_openvswitch_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-plugin-ryu-agent.conf',
neutron_plugin_ryu_context, perms=0o644,
templates_dir='joined-string'),
call('git.upstart',
'/etc/init/neutron-plugin-sriov-agent.conf',
neutron_plugin_sriov_context, perms=0o644,
templates_dir='joined-string'),
call('git/upstart/neutron-server.upstart',
'/etc/init/neutron-server.conf',
neutron_api_context, perms=0o644),
call('git/upstart/neutron-agent.upstart',
'/etc/init/neutron-vpn-agent.conf',
neutron_vpn_agent_context, perms=0o644),
call('git.upstart',
'/etc/init/nova-api-metadata.conf',
nova_api_metadata_context, perms=0o644,
templates_dir='joined-string'),
]
self.assertEqual(self.render.call_args_list, expected)
@patch('os.listdir')
@patch('os.remove')
@patch('os.path.join')
@patch('os.path.exists')
@patch('os.symlink')
@patch('shutil.rmtree')
@patch('shutil.copyfile')
@patch('shutil.copytree')
def test_git_post_install_systemd(self, copytree, copyfile, rmtree,
symlink, exists, join, remove, listdir):
projects_yaml = openstack_origin_git
join.return_value = 'joined-string'
self.lsb_release.return_value = {'DISTRIB_RELEASE': '15.10',
'DISTRIB_CODENAME': 'wily'}
self.os_release.return_value = 'newton'
neutron_utils.git_post_install(projects_yaml)
expected = [
call('git/neutron_sudoers',
'/etc/sudoers.d/neutron_sudoers',
{}, perms=288),
call('git/nova_sudoers',
'/etc/sudoers.d/nova_sudoers',
{}, perms=288),
call('git/cron.d/neutron-dhcp-agent-netns-cleanup',
'/etc/cron.d/neutron-dhcp-agent-netns-cleanup',
{}, perms=493),
call('git/cron.d/neutron-l3-agent-netns-cleanup',
'/etc/cron.d/neutron-l3-agent-netns-cleanup',
{}, perms=493),
call('git/cron.d/neutron-lbaas-agent-netns-cleanup',
'/etc/cron.d/neutron-lbaas-agent-netns-cleanup',
{}, perms=493),
call('git/neutron-dhcp-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-l3-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-lbaasv2-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-linuxbridge-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-linuxbridge-cleanup.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-macvtap-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-metadata-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-metering-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-openvswitch-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-ovs-cleanup.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-server.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-sriov-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/neutron-vpn-agent.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
call('git/nova-api-metadata.init.in.template',
'joined-string', {'daemon_path': 'joined-string'},
perms=420),
]
self.assertEqual(self.render.call_args_list, expected)
def test_assess_status(self):
with patch.object(neutron_utils, 'assess_status_func') as asf:
callee = MagicMock()