Merge branch 'master' into stable/mitaka

Change-Id: Ib3daad58e529eeb312c19f8bdd93bc279f59899d
This commit is contained in:
Ilya Kharin 2016-06-22 21:15:52 +03:00
commit 24f3c39661
96 changed files with 3111 additions and 1913 deletions

View File

@ -1,6 +1,7 @@
include AUTHORS
include ChangeLog
include octane/patches/*
include octane/bin/*
exclude .gitignore
global-exclude *.pyc

View File

@ -11,14 +11,12 @@ fi
nova service-list --host $1
nova service-list | grep -q 'nova-compute.*enabled' && {
nova service-disable $1 nova-compute
}
nova service-list | grep -q 'nova-compute.*enabled' || {
echo "All nova-compute are disabled"
exit 3
}
if [ $(nova service-list | grep -c 'nova-compute.*enabled') -gt 1 ]; then
nova service-disable $1 nova-compute
else
echo "You can't disable last compute node"
exit 3
fi
while :; do
VMS=$(nova list --host $1 | grep -i ' active ' | wc -l)

View File

@ -73,3 +73,17 @@ class BackupCommand(BaseBackupCommand):
class BackupRepoCommand(BaseBackupCommand):
archivators = backup_restore.REPO_ARCHIVATORS
full_archivators = backup_restore.FULL_REPO_ARCHIVATORS
def get_parser(self, *args, **kwargs):
parser = super(BackupRepoCommand, self).get_parser(*args, **kwargs)
parser.add_argument(
"--full",
action='store_true',
help="Backup all repositories")
return parser
def take_action(self, parsed_args):
if parsed_args.full:
self.archivators = self.full_archivators
super(BackupRepoCommand, self).take_action(parsed_args)

View File

@ -1,70 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from cliff import command as cmd
from fuelclient import objects
from octane import magic_consts
from octane.util import env as env_util
from octane.util import node as node_util
from octane.util import ssh
LOG = logging.getLogger(__name__)
def cleanup_environment(env_id):
env = objects.Environment(env_id)
nodes = env.get_all_nodes()
for node in nodes:
node_util.remove_compute_upgrade_levels(node)
controller = env_util.get_one_controller(env)
sftp = ssh.sftp(controller)
admin_pass = env_util.get_admin_password(env, controller)
script_filename = 'clean_env.py'
with ssh.tempdir(controller) as tempdir:
script_src_filename = os.path.join(
magic_consts.CWD, "helpers", script_filename)
script_dst_filename = os.path.join(tempdir, script_filename)
sftp.put(script_src_filename, script_dst_filename)
command = [
'sh', '-c', '. /root/openrc; export OS_PASSWORD={0}; python {1}'
.format(admin_pass, script_dst_filename),
]
with ssh.popen(command, node=controller, stdin=ssh.PIPE) as proc:
roles = ["controller", "compute"]
for node in env_util.get_nodes(env, roles):
data = "{0}\n{1}\n".format(node.data['fqdn'].split('.')[0],
node.data['fqdn'])
proc.stdin.write(data)
class CleanupCommand(cmd.Command):
"""Cleanup upgraded environment"""
def get_parser(self, prog_name):
parser = super(CleanupCommand, self).get_parser(prog_name)
parser.add_argument(
'env', type=int, metavar='ENV_ID',
help="ID of environment to cleanup")
return parser
def take_action(self, parsed_args):
cleanup_environment(parsed_args.env)

View File

@ -0,0 +1,70 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from cliff import command
from fuelclient.client import APIClient
from octane.handlers import backup_restore
from octane import magic_consts
from octane.util import fuel_client
LOG = logging.getLogger(__name__)
def enable_release(release_id, context):
release_url = "/releases/{0}".format(release_id)
with fuel_client.set_auth_context(context):
data = APIClient.get_request(release_url)
state = data.get('state')
if state == magic_consts.RELEASE_STATUS_MANAGED:
data['state'] = magic_consts.RELEASE_STATUS_ENABLED
APIClient.put_request(release_url, data)
else:
exc_msg = ("Cannot enable release {0}: has status {1}, not {2}"
.format(release_id,
state,
magic_consts.RELEASE_STATUS_MANAGED))
raise Exception(exc_msg)
class EnableReleaseCommand(command.Command):
def get_parser(self, *args, **kwargs):
parser = super(EnableReleaseCommand, self).get_parser(*args, **kwargs)
parser.add_argument(
"--id",
type=str,
action="store",
dest="release_id",
required=True,
help="ID of the release to enable.")
parser.add_argument(
"--admin-password",
type=str,
action="store",
dest="admin_password",
required=True,
help="Fuel admin password")
return parser
def get_context(self, parsed_args):
return backup_restore.NailgunCredentialsContext(
password=parsed_args.admin_password,
user="admin"
)
def take_action(self, parsed_args):
assert parsed_args.release_id
assert parsed_args.admin_password
enable_release(parsed_args.release_id,
self.get_context(parsed_args))

View File

@ -13,11 +13,11 @@
import logging
from octane.handlers.upgrade import controller as controller_upgrade
from octane.helpers import network
from octane.helpers.node_attributes import copy_disks
from octane.helpers.node_attributes import copy_ifaces
from octane import magic_consts
from octane.util import env as env_util
from octane.util import network
from octane.util import node as node_util
from cliff import command as cmd

View File

@ -0,0 +1,104 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from cliff import command as cmd
from fuelclient.objects import node as node_obj
from octane.handlers import backup_restore
from octane import magic_consts
from octane.util import fuel_client
from octane.util import helpers
from octane.util import ssh
LOG = logging.getLogger(__name__)
def _get_backup_path(path, node):
dir_name = os.path.dirname(path)
prefix_name = os.path.basename(path)
return ssh.call_output(
[
"tempfile",
"-d", dir_name,
"-p", ".{0}".format(prefix_name),
"-s", ".bak",
],
node=node)
def upgrade_osd(env_id, user, password):
with fuel_client.set_auth_context(
backup_restore.NailgunCredentialsContext(user, password)):
nodes = [
n for n in node_obj.Node.get_all()
if "ceph-osd" in n.data["roles"] and n.data["cluster"] == env_id]
if not nodes:
LOG.info("Nothing to upgrade")
return
backup_val = [
# (node, path, backup_path)
]
admin_ip = helpers.get_astute_dict()["ADMIN_NETWORK"]["ipaddress"]
try:
hostnames = []
for node in nodes:
sftp = ssh.sftp(node)
for path, content in magic_consts.OSD_REPOS_UPDATE:
back_path = _get_backup_path(path, node)
ssh.call(["cp", path, back_path], node=node)
backup_val.append((node, path, back_path))
with ssh.update_file(sftp, path) as (_, new):
new.write(content.format(admin_ip=admin_ip))
hostnames.append(node.data["hostname"])
ssh.call(["dpkg", "--configure", "-a"], node=node)
call_node = nodes[0]
ssh.call(["ceph", "osd", "set", "noout"], node=call_node)
ssh.call(['ceph-deploy', 'install', '--release', 'hammer'] + hostnames,
node=call_node, stdout=ssh.PIPE, stderr=ssh.PIPE)
for node in nodes:
ssh.call(["restart", "ceph-osd-all"], node=node)
ssh.call(["ceph", "osd", "unset", "noout"], node=call_node)
ssh.call(["ceph", "osd", "stat"], node=call_node)
finally:
nodes_to_revert = set()
for node, path, back_path in backup_val:
ssh.call(["mv", back_path, path], node=node)
nodes_to_revert.add(node)
for node in nodes_to_revert:
ssh.call(["dpkg", "--configure", "-a"], node=node)
class UpgradeOSDCommand(cmd.Command):
"""Upgrade osd servers"""
def get_parser(self, prog_name):
parser = super(UpgradeOSDCommand, self).get_parser(prog_name)
parser.add_argument(
'env_id',
type=int,
metavar='ENV_ID',
help="ID of target environment")
parser.add_argument(
"--admin-password",
type=str,
action="store",
dest="admin_password",
required=True,
help="Fuel admin password")
return parser
def take_action(self, parsed_args):
upgrade_osd(parsed_args.env_id, 'admin', parsed_args.admin_password)

View File

@ -1,95 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
from cliff import command as cmd
from octane import magic_consts
from octane.util import archivate
from octane.util import docker
from octane.util import subprocess
def patch_puppet(revert=False):
puppet_patch_dir = os.path.join(magic_consts.CWD, "patches", "puppet")
for d in os.listdir(puppet_patch_dir):
d = os.path.join(puppet_patch_dir, d)
if not os.path.isdir(d):
continue
with open(os.path.join(d, "patch")) as patch:
try:
subprocess.call(["patch", "-R", "-p3"], stdin=patch,
cwd=magic_consts.PUPPET_DIR)
except subprocess.CalledProcessError:
if not revert:
pass
else:
raise
if not revert:
patch.seek(0)
subprocess.call(["patch", "-N", "-p3"], stdin=patch,
cwd=magic_consts.PUPPET_DIR)
def apply_patches(revert=False):
for container, prefix, patch in magic_consts.PATCHES:
docker.apply_patches(container, prefix,
os.path.join(magic_consts.CWD, patch),
revert=revert)
def revert_initramfs():
backup = magic_consts.BOOTSTRAP_INITRAMFS + '.bkup'
os.rename(backup, magic_consts.BOOTSTRAP_INITRAMFS)
def patch_initramfs():
with archivate.update_cpio(magic_consts.BOOTSTRAP_INITRAMFS) as chroot:
patch_fuel_agent(chroot)
docker.run_in_container("cobbler", ["cobbler", "sync"])
def patch_fuel_agent(chroot):
patch_dir = os.path.join(magic_consts.CWD, "patches", "fuel_agent")
with open(os.path.join(patch_dir, "patch")) as patch:
subprocess.call(["patch", "-N", "-p0"], stdin=patch, cwd=chroot)
def prepare():
if not os.path.isdir(magic_consts.FUEL_CACHE):
os.makedirs(magic_consts.FUEL_CACHE)
subprocess.call(["yum", "-y", "install"] + magic_consts.PACKAGES)
# From patch_all_containers
apply_patches()
docker.run_in_container("nailgun", ["pkill", "-f", "wsgi"])
patch_initramfs()
def revert_prepare():
apply_patches(revert=True)
docker.run_in_container("nailgun", ["pkill", "-f", "wsgi"])
revert_initramfs()
class PrepareCommand(cmd.Command):
"""Prepare the Fuel master node to upgrade an environment"""
def take_action(self, parsed_args):
prepare()
class RevertCommand(cmd.Command):
"""Revert all patches applied by 'prepare' command"""
def take_action(self, parsed_args):
revert_prepare()

View File

@ -12,9 +12,9 @@
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.helpers import network
from octane.util import env as env_util
from octane.util import maintenance
from octane.util import network
def rollback_control_plane(seed_id, orig_id):

View File

@ -1,74 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.helpers.sync_glance_images import sync_glance_images
from octane.util import db
from octane.util import env as env_util
from octane.util import ssh
def prepare(orig_id, seed_id):
orig_env = environment_obj.Environment(orig_id)
seed_env = environment_obj.Environment(seed_id)
controller = env_util.get_one_controller(seed_env)
with tempfile.NamedTemporaryFile() as temp:
db.mysqldump_from_env(orig_env, ['keystone'], temp.name)
db.mysqldump_restore_to_env(seed_env, temp.name)
ssh.call(['keystone-manage', 'db_sync'],
node=controller, parse_levels=True)
for controller in env_util.get_controllers(seed_env):
ssh.call(['service', 'memcached', 'restart'], node=controller)
class SyncImagesCommand(cmd.Command):
"""Sync glance images between ORIG and SEED environments"""
def get_parser(self, prog_name):
parser = super(SyncImagesCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
parser.add_argument(
'swift_ep', type=str,
help="Endpoint's name where swift-proxy service is listening on")
return parser
def take_action(self, parsed_args):
sync_glance_images(parsed_args.orig_id, parsed_args.seed_id,
parsed_args.swift_ep)
class SyncImagesPrepareCommand(cmd.Command):
"""Sync glance images between ORIG and SEED environments"""
def get_parser(self, prog_name):
parser = super(SyncImagesPrepareCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
return parser
def take_action(self, parsed_args):
prepare(parsed_args.orig_id, parsed_args.seed_id)

View File

@ -1,92 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from cliff import command as cmd
from fuelclient import objects
from requests import HTTPError
from octane.util import env as env_util
LOG = logging.getLogger(__name__)
KEEP_NETWORK_NAMES = ['fuelweb_admin', 'management', 'public']
def update_env_networks(env_id, networks):
fields_to_update = ['meta', 'ip_ranges']
env = objects.Environment(env_id)
release_id = env.get_fresh_data()['release_id']
network_data = env.get_network_data()
node_group_id = None
for ng in network_data['networks']:
if ng['name'] in KEEP_NETWORK_NAMES:
continue
if node_group_id is None:
# for now we'll have only one node group
# so just take it id from any network
node_group_id = ng['group_id']
objects.NetworkGroup(ng['id']).delete()
data_to_update = {}
for ng in networks:
if ng['name'] in KEEP_NETWORK_NAMES:
continue
try:
objects.NetworkGroup.create(
ng['name'],
release_id,
ng['vlan_start'],
ng['cidr'],
ng['gateway'],
node_group_id,
ng['meta']
)
except HTTPError:
LOG.error("Cannot sync network '{0}'".format(ng['name']))
continue
data = {}
for key in fields_to_update:
data[key] = ng[key]
data_to_update[ng['name']] = data
# now we need to update new networks with
# correct ip_ranges and meta
network_data = env.get_network_data()
network_data['networks'] = [ng for ng in network_data['networks']
if ng['name'] not in KEEP_NETWORK_NAMES]
for ng in network_data['networks']:
if ng['name'] in data_to_update:
for k in fields_to_update:
ng[k] = data_to_update[ng['name']][k]
env.set_network_data(network_data)
class SyncNetworksCommand(cmd.Command):
"""Synchronize network groups in original and seed environments"""
def get_parser(self, prog_name):
parser = super(SyncNetworksCommand, self).get_parser(prog_name)
parser.add_argument(
'original_env', type=int, metavar='ORIGINAL_ENV_ID',
help="ID of original environment")
parser.add_argument(
'seed_env', type=int, metavar='SEED_ENV_ID',
help="ID of seed environment")
return parser
def take_action(self, parsed_args):
orig_env = objects.Environment(parsed_args.original_env)
networks = env_util.get_env_networks(orig_env)
update_env_networks(parsed_args.seed_env, networks)

View File

@ -21,7 +21,7 @@ from octane.util import archivate
def update_centos_bootstrap():
with archivate.update_cpio(magic_consts.BOOTSTRAP_INITRAMFS) as tmp_dir:
shutil.copy(
shutil.copy2(
"/root/.ssh/authorized_keys",
os.path.join(tmp_dir, "root/.ssh/authorized_keys"))

View File

@ -1,204 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import logging
import re
import requests
from cliff import command as cmd
from fuelclient.objects import environment
from fuelclient.objects import node as node_obj
from octane.util import env as env_util
from octane.util import ssh
LOG = logging.getLogger(__name__)
def get_template_hosts_by_name(client, plugin_name):
return client.template.get(filter={'name': plugin_name},
selectHosts=['name'])[0]['hosts']
def get_host_snmp_ip(client, host_id):
# second type is SNMP type
return client.hostinterface.get(hosids=host_id,
output=['ip'],
filter={'type': 2})[0]['ip']
def get_zabbix_url(astute):
return 'http://{0}/zabbix'.format(astute['public_vip'])
def get_zabbix_credentials(astute):
return astute['zabbix']['username'], astute['zabbix']['password']
def zabbix_monitoring_settings(astute, attrs):
attrs['username']['value'] = astute['zabbix']['username']
attrs['password']['value'] = astute['zabbix']['password']
attrs['db_password']['value'] = astute['zabbix']['db_password']
attrs['metadata']['enabled'] = astute['zabbix']['enabled']
def emc_vnx_settings(astute, attrs):
attrs['emc_sp_a_ip']['value'] = astute['storage']['emc_sp_a_ip']
attrs['emc_sp_b_ip']['value'] = astute['storage']['emc_sp_b_ip']
attrs['emc_password']['value'] = astute['storage']['emc_password']
attrs['emc_username']['value'] = astute['storage']['emc_username']
attrs['emc_pool_name']['value'] = astute['storage']['emc_pool_name']
attrs['metadata']['enabled'] = astute['storage']['volumes_emc']
def zabbix_snmptrapd_settings(astute, attrs):
node = node_obj.Node(astute['uid'])
with ssh.sftp(node).open('/etc/snmp/snmptrapd.conf') as f:
data = f.read()
template = re.compile(r"authCommunity\s[a-z-,]+\s([a-z-]+)")
match = template.search(data)
attrs['community']['value'] = match.group(1)
attrs['metadata']['enabled'] = True
def get_zabbix_client(astute):
url = get_zabbix_url(astute)
user, password = get_zabbix_credentials(astute)
session = requests.Session()
node_cidr = astute['network_scheme']['endpoints']['br-fw-admin']['IP'][0]
node_ip = node_cidr.split('/')[0]
session.proxies = {
'http': 'http://{0}:8888'.format(node_ip)
}
import pyzabbix
client = pyzabbix.ZabbixAPI(server=url, session=session)
client.login(user=user, password=password)
return client
def zabbix_monitoring_emc_settings(astute, attrs):
client = get_zabbix_client(astute)
hosts = get_template_hosts_by_name(client, 'Template EMC VNX')
for host in hosts:
host['ip'] = get_host_snmp_ip(client, host['hostid'])
settings = ','.join('{0}:{1}'.format(host['name'], host['ip'])
for host in hosts)
attrs['hosts']['value'] = settings
attrs['metadata']['enabled'] = True
def zabbix_monitoring_extreme_networks_settings(astute, attrs):
client = get_zabbix_client(astute)
hosts = get_template_hosts_by_name(client, 'Template Extreme Networks')
for host in hosts:
host['ip'] = get_host_snmp_ip(client, host['hostid'])
settings = ','.join('{0}:{1}'.format(host['name'], host['ip'])
for host in hosts)
attrs['hosts']['value'] = settings
attrs['metadata']['enabled'] = True
class UnknownPlugin(Exception):
message = "Unknown plugin '{0}'"
def __init__(self, plugin):
super(UnknownPlugin, self).__init__(self.message.format(plugin))
class PluginNotConfigured(Exception):
message = "No settings for plugin '{0}' in environment #{1}. " \
"Was it installed before environment #{1} has been created?"
def __init__(self, plugin, env_id):
super(PluginNotConfigured, self).__init__(self.message.format(
plugin, env_id))
def transfer_plugins_settings(orig_env_id, seed_env_id, plugins):
orig_env = environment.Environment(orig_env_id)
seed_env = environment.Environment(seed_env_id)
astute = env_util.get_astute_yaml(orig_env)
attrs = seed_env.get_settings_data()
editable_attrs = attrs['editable']
plugin_fns = {}
plugin_attrs = {}
for plugin in plugins:
try:
plugin_fns[plugin] = PLUGINS[plugin]
except KeyError:
raise UnknownPlugin(plugin)
try:
plugin_attrs[plugin] = editable_attrs[plugin]
except KeyError:
raise PluginNotConfigured(plugin, seed_env_id)
for plugin in plugins:
LOG.info("Fetching settings for plugin '%s'", plugin)
plugin_fn = plugin_fns[plugin]
plugin_attr = plugin_attrs[plugin]
plugin_fn(astute, plugin_attr)
seed_env.set_settings_data(attrs)
PLUGINS = {
'zabbix_monitoring': zabbix_monitoring_settings,
'emc_vnx': emc_vnx_settings,
'zabbix_snmptrapd': zabbix_snmptrapd_settings,
'zabbix_monitoring_emc': zabbix_monitoring_emc_settings,
'zabbix_monitoring_extreme_networks':
zabbix_monitoring_extreme_networks_settings,
}
def plugin_names(s):
plugins = s.split(',')
for plugin in plugins:
if plugin not in PLUGINS:
raise argparse.ArgumentTypeError("Unknown plugin '{0}'"
.format(plugin))
return plugins
class UpdatePluginSettingsCommand(cmd.Command):
"""Transfer settings for specified plugin from ORIG_ENV to SEED_ENV"""
def get_parser(self, prog_name):
parser = super(UpdatePluginSettingsCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_env',
type=int,
metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_env',
type=int,
metavar='SEED_ID',
help="ID of seed environment")
parser.add_argument(
'--plugins',
type=plugin_names,
required=True,
help="Comma separated values: {0}".format(', '.join(PLUGINS)))
return parser
def take_action(self, parsed_args):
transfer_plugins_settings(parsed_args.orig_env,
parsed_args.seed_env,
parsed_args.plugins)

View File

@ -81,6 +81,17 @@ def get_ceph_conf_filename(node):
return '/etc/ceph/ceph.conf'
def add_rgw_frontends(conf):
rgw_frontends_line = ("rgw_frontends = fastcgi socket_port=9000 "
"socket_host=127.0.0.1")
if re.search(r"\nrgw_frontends", conf):
return conf
conf = re.sub(r'\n\[client.radosgw.gateway\]\n',
"\g<0>{0}\n".format(rgw_frontends_line),
conf)
return conf
def ceph_set_new_mons(seed_env, filename, conf_filename, db_path):
nodes = list(env_util.get_controllers(seed_env))
hostnames = map(short_hostname, node_util.get_hostnames(nodes))
@ -89,6 +100,7 @@ def ceph_set_new_mons(seed_env, filename, conf_filename, db_path):
with contextlib.closing(tarfile.open(filename)) as f:
conf = f.extractfile(conf_filename).read()
conf = replace_addresses(conf, hostnames, mgmt_ips)
conf = add_rgw_frontends(conf)
fsid = get_fsid(conf)
monmaptool_cmd = ['monmaptool', '--fsid', fsid, '--clobber', '--create']

View File

@ -12,9 +12,9 @@
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.helpers import network
from octane.util import env as env_util
from octane.util import maintenance
from octane.util import network
from octane.util import ssh

View File

@ -43,13 +43,6 @@ def find_deployable_release(operating_system):
operating_system)
def set_cobbler_provision(env_id):
env = environment_obj.Environment(env_id)
settings = env.get_settings_data()
settings["editable"]["provision"]["method"]["value"] = "cobbler"
env.set_settings_data(settings)
def upgrade_env(env_id):
env = environment_obj.Environment(env_id)
target_release = find_deployable_release("Ubuntu")

View File

@ -14,27 +14,33 @@ import logging
import os.path
from cliff import command as cmd
from distutils import version
from fuelclient.objects import environment as environment_obj
from fuelclient.objects import node as node_obj
from octane.handlers import upgrade as upgrade_handlers
from octane.helpers import disk
from octane import magic_consts
from octane.util import docker
from octane.util import env as env_util
from octane.util import helpers
from octane.util import patch
LOG = logging.getLogger(__name__)
def upgrade_node(env_id, node_ids, isolated=False, network_template=None):
# From check_deployment_status
env = environment_obj.Environment(env_id)
nodes = [node_obj.Node(node_id) for node_id in node_ids]
def load_network_template(network_template):
try:
data = helpers.load_yaml(network_template)
except Exception:
LOG.exception("Cannot open network template from %s",
network_template)
raise
return data
# Sanity check
def check_sanity(env_id, nodes):
one_orig_id = None
for node in nodes:
node_id = node.data['id']
orig_id = node.data['cluster']
if orig_id == env_id:
raise Exception(
@ -48,29 +54,43 @@ def upgrade_node(env_id, node_ids, isolated=False, network_template=None):
orig_id, one_orig_id,
)
one_orig_id = orig_id
patch_partition_generator(one_orig_id)
call_handlers = upgrade_handlers.get_nodes_handlers(nodes, env, isolated)
call_handlers('preupgrade')
call_handlers('prepare')
env_util.move_nodes(env, nodes)
call_handlers('predeploy')
if network_template:
env_util.set_network_template(env, network_template)
if isolated or len(nodes) == 1:
env_util.deploy_nodes(env, nodes)
else:
env_util.deploy_changes(env, nodes)
call_handlers('postdeploy')
def patch_partition_generator(env_id):
"""Update partitions generator for releases earlier than 6.0"""
def upgrade_node(env_id, node_ids, isolated=False, network_template=None,
provision=True, roles=None, live_migration=True):
# From check_deployment_status
env = environment_obj.Environment(env_id)
env_version = version.StrictVersion(env.data["fuel_version"])
if env_version < version.StrictVersion("6.0"):
copy_patches_folder_to_nailgun()
disk.update_partition_generator()
nodes = [node_obj.Node(node_id) for node_id in node_ids]
if network_template:
network_template_data = load_network_template(network_template)
check_sanity(env_id, nodes)
# NOTE(ogelbukh): patches and scripts copied to nailgun container
# for later use
copy_patches_folder_to_nailgun()
call_handlers = upgrade_handlers.get_nodes_handlers(
nodes, env, isolated, live_migration)
with patch.applied_patch(
magic_consts.PUPPET_DIR, *magic_consts.UPGRADE_NODE_PATCHES):
call_handlers('preupgrade')
call_handlers('prepare')
env_util.move_nodes(env, nodes, provision, roles)
# NOTE(aroma): copying of VIPs must be done after node reassignment
# as according to [1] otherwise the operation will not take any effect
# [1]: https://bugs.launchpad.net/fuel/+bug/1549254
env_util.copy_vips(env)
if network_template:
env.set_network_template_data(network_template_data)
call_handlers('predeploy')
if isolated or len(nodes) == 1:
env_util.deploy_nodes(env, nodes)
else:
env_util.deploy_changes(env, nodes)
call_handlers('postdeploy')
def copy_patches_folder_to_nailgun():
@ -79,11 +99,24 @@ def copy_patches_folder_to_nailgun():
docker.put_files_to_docker('nailgun', dest_folder, folder)
def list_roles(s):
return s.split(',')
class UpgradeNodeCommand(cmd.Command):
"""Move nodes to environment and upgrade the node"""
def get_parser(self, prog_name):
parser = super(UpgradeNodeCommand, self).get_parser(prog_name)
parser.add_argument(
'--no-provision', dest='provision', action='store_false',
default=True,
help="Perform reprovisioning of nodes during the upgrade. "
"(default: True).")
parser.add_argument(
'--roles', type=list_roles, nargs='?',
help="Assign given roles to the specified nodes or do not specify "
"them at all to preserve the current roles.")
parser.add_argument(
'--isolated', action='store_true',
help="Isolate node's network from original cluster")
@ -96,9 +129,20 @@ class UpgradeNodeCommand(cmd.Command):
parser.add_argument(
'node_ids', type=int, metavar='NODE_ID', nargs='+',
help="IDs of nodes to be moved")
parser.add_argument(
'--no-live-migration',
action='store_false',
dest="live_migration",
default=True,
help="Run migration on ceph-osd or compute nodes in one command. "
"It can prevent to cluster downtime on deploy period. "
"(default: True).")
return parser
def take_action(self, parsed_args):
upgrade_node(parsed_args.env_id, parsed_args.node_ids,
isolated=parsed_args.isolated,
network_template=parsed_args.template)
network_template=parsed_args.template,
provision=parsed_args.provision,
roles=parsed_args.roles,
live_migration=parsed_args.live_migration)

View File

@ -1,41 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuelclient.commands import base
from fuelclient.commands import environment as env_commands
class CloneIPs(env_commands.EnvMixIn, base.BaseCommand):
"""Clone IPs from original environment controllers to seed environment"""
def get_parser(self, prog_name):
parser = super(CloneIPs, self).get_parser(prog_name)
parser.add_argument('id', type=int,
help='ID of environment to clone from')
parser.add_argument('--networks',
type=str,
nargs='+',
help='Names of networks which ips should'
' be copied.')
return parser
def take_action(self, parsed_args):
# TODO(asvechnikov): While the clone ip procedure is not a part of
# fuelclient.objects.Environment the connection
# will be called directly.
networks = []
if parsed_args.networks:
networks = parsed_args.networks
self.client._entity_wrapper.connection.post_request(
"clusters/{0}/upgrade/clone_ips".format(parsed_args.id),
{'networks': networks}
)

View File

@ -0,0 +1,35 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuelclient.commands import base
from fuelclient.commands import environment as env_commands
class CopyVIPs(env_commands.EnvMixIn, base.BaseCommand):
"""Copy VIPs to seed cluster"""
def get_parser(self, prog_name):
parser = super(CopyVIPs, self).get_parser(prog_name)
parser.add_argument('env_id',
type=str,
help='ID of the environment')
return parser
def take_action(self, parsed_args):
# NOTE(aroma): while copying of VIPs procedure is not a part of
# fuelclient.objects.Environment the connection will be called directly
self.client._entity_wrapper.connection.post_request(
"clusters/{0}/upgrade/vips".format(parsed_args.env_id))
msg = ('VIPs successfully copied from the original cluster to seed '
'cluster {0}'.format(parsed_args.env_id))
self.app.stdout.write(msg)

View File

@ -19,6 +19,12 @@ class EnvMoveNode(env_commands.EnvMixIn, base.BaseCommand):
def get_parser(self, prog_name):
parser = super(EnvMoveNode, self).get_parser(prog_name)
parser.add_argument('--no-provision', dest='provision',
default=True, action='store_false',
help="Do not perform reprovisioning of the node.")
parser.add_argument('--roles', nargs='?',
help="Assign the given roles to the node (a comma "
"separated list of roles).")
parser.add_argument('node_id',
type=int,
help='ID of the node to upgrade.')
@ -31,11 +37,15 @@ class EnvMoveNode(env_commands.EnvMixIn, base.BaseCommand):
# TODO(akscram): While the clone procedure is not a part of
# fuelclient.objects.Environment the connection
# will be called directly.
data = {
'node_id': parsed_args.node_id,
'reprovision': parsed_args.provision,
}
if parsed_args.roles:
data['roles'] = parsed_args.roles.split(',')
self.client._entity_wrapper.connection.post_request(
"clusters/{0}/upgrade/assign".format(parsed_args.env_id),
{
'node_id': parsed_args.node_id,
}
data,
)
msg = ('Node {node_id} successfully relocated to the environment'
' {env_id}.\n'.format(

View File

@ -16,14 +16,20 @@ from octane.handlers.backup_restore import astute
from octane.handlers.backup_restore import cobbler
from octane.handlers.backup_restore import fuel_keys
from octane.handlers.backup_restore import fuel_uuid
from octane.handlers.backup_restore import logs
from octane.handlers.backup_restore import mirrors
from octane.handlers.backup_restore import nailgun_plugins
from octane.handlers.backup_restore import postgres
from octane.handlers.backup_restore import puppet
from octane.handlers.backup_restore import release
from octane.handlers.backup_restore import ssh
from octane.handlers.backup_restore import version
# NOTE(akscram): Unsupported archivators are disabled and will be
# re-wrote one-by-one. Docker containers were removed in 9.0 and all
# services are run now in OS on the host. This major change requires to
# modify current archivators that use containers.
ARCHIVATORS = [
astute.AstuteArchivator,
# SSH restore must go before Cobbler restore so it updates
@ -33,12 +39,13 @@ ARCHIVATORS = [
fuel_keys.FuelKeysArchivator,
fuel_uuid.FuelUUIDArchivator,
puppet.PuppetArchivator,
postgres.KeystoneArchivator,
# Nailgun restore should be after puppet restore
postgres.NailgunArchivator,
# Restore of Nailgun DB should go after restore of Puppet modules.
postgres.DatabasesArchivator,
release.ReleaseArchivator,
logs.LogsArchivator,
version.VersionArchivator,
nailgun_plugins.NailgunPluginsArchivator,
puppet.PuppetApplyHost,
puppet.PuppetApplyTasks,
]
REPO_ARCHIVATORS = [
@ -46,6 +53,11 @@ REPO_ARCHIVATORS = [
mirrors.RepoBackup,
]
FULL_REPO_ARCHIVATORS = [
mirrors.FullMirrorsBackup,
mirrors.FullRepoBackup,
]
class NailgunCredentialsContext(object):
@ -55,6 +67,6 @@ class NailgunCredentialsContext(object):
def get_credentials_env(self):
env = os.environ.copy()
env["KEYSTONE_USER"] = self.user
env["KEYSTONE_PASS"] = self.password
env["OS_USERNAME"] = self.user
env["OS_PASSWORD"] = self.password
return env

View File

@ -15,8 +15,7 @@ import shutil
import yaml
from octane.handlers.backup_restore import base
from octane import magic_consts
from octane.util import docker
from octane.util import puppet
LOG = logging.getLogger(__name__)
@ -72,12 +71,6 @@ class AstuteArchivator(base.PathArchivator):
return yaml.load(current)
def pre_restore_check(self):
names = docker.get_docker_container_names(status="running")
containers = set(magic_consts.RUNNING_REQUIRED_CONTAINERS) - set(names)
if containers:
raise Exception(
"Required running containers: {0}".format(
", ".join(containers)))
backup_ip = self.get_backup_dict()["ADMIN_NETWORK"]["ipaddress"]
current_ip = self.get_current_dict()["ADMIN_NETWORK"]["ipaddress"]
if backup_ip != current_ip:
@ -109,24 +102,12 @@ class AstuteArchivator(base.PathArchivator):
",".join(not_found_keys)))
old_path_name = "{0}.old".format(self.path)
new_path_name = "{0}.new".format(self.path)
shutil.copy(self.path, old_path_name)
shutil.copy2(self.path, old_path_name)
with open(new_path_name, "w") as new:
yaml.safe_dump(current_yaml, new, default_flow_style=False)
shutil.move(new_path_name, self.path)
self._post_restore_action()
def _post_restore_action(self):
# restart all running containers
for name in magic_consts.RUNNING_REQUIRED_CONTAINERS:
docker.stop_container(name)
# FIXME: when astute container restart corrent this may be removed
if "astute" == name:
try:
docker.start_container(name)
except Exception:
LOG.warn(
"Failed to start astute container for the first time")
docker.stop_container(name)
else:
continue
docker.start_container(name)
for task in ["hiera", "host"]:
puppet.apply_task(task)

View File

@ -50,9 +50,11 @@ class ContainerArchivator(Base):
backup_directory = None
allowed_files = None
container = None
backup_name = None
def backup(self):
assert self.container
assert self.backup_name
assert self.backup_directory
stdout, _ = docker.run_in_container(
self.container,
@ -71,13 +73,15 @@ class ContainerArchivator(Base):
self.archive,
self.container,
["cat", path],
"{0}/{1}".format(self.container, filename)
"{0}/{1}".format(self.backup_name, filename)
)
def restore(self):
assert self.container
assert self.backup_name
assert self.backup_directory
for member in archivate.filter_members(self.archive, self.container):
for member in archivate.filter_members(
self.archive, self.backup_name):
dump = self.archive.extractfile(member.name).read()
name = member.name.split("/", 1)[-1]
docker.write_data_in_docker_file(
@ -87,19 +91,46 @@ class ContainerArchivator(Base):
)
class CmdArchivator(Base):
class PathFilterArchivator(Base):
container = None
backup_directory = None
backup_name = None
allowed_files = None
banned_files = []
def backup(self):
assert self.backup_name
assert self.backup_directory
for root, _, filenames in os.walk(self.backup_directory):
directory = root[len(self.backup_directory):].lstrip(os.path.sep)
for filename in filenames:
relative_path = os.path.join(directory, filename)
if relative_path in self.banned_files:
continue
if self.allowed_files is not None \
and relative_path not in self.allowed_files:
continue
path = os.path.join(root, filename)
path_in_archive = os.path.join(self.backup_name, relative_path)
self.archive.add(path, path_in_archive)
def restore(self):
assert self.backup_name
assert self.backup_directory
for member in archivate.filter_members(self.archive, self.backup_name):
member.name = member.name.partition(os.path.sep)[-1]
self.archive.extract(member, self.backup_directory)
class CmdArchivator(Base):
cmd = None
filename = None
def backup(self):
assert self.cmd
assert self.container
assert self.filename
archivate.archivate_container_cmd_output(
self.archive, self.container, self.cmd, self.filename)
archivate.archivate_cmd_output(self.archive, self.cmd, self.filename)
class DirsArchivator(Base):
@ -142,3 +173,25 @@ class PathArchivator(Base):
member.name = member.name.split("/", 1)[-1]
path = self.path
self.archive.extract(member, path)
class CollectionArchivator(Base):
archivators_classes = []
def __init__(self, *args, **kwargs):
super(CollectionArchivator, self).__init__(*args, **kwargs)
self.archivators = [c(*args, **kwargs)
for c in self.archivators_classes]
def backup(self):
for archvator in self.archivators:
archvator.backup()
def restore(self):
for archvator in self.archivators:
archvator.restore()
def pre_restore_check(self):
for archvator in self.archivators:
archvator.pre_restore_check()

View File

@ -11,15 +11,37 @@
# under the License.
from octane.handlers.backup_restore import base
from octane.util import docker
from octane.util import puppet
from octane.util import subprocess
class CobblerArchivator(base.ContainerArchivator):
class CobblerSystemArchivator(base.PathFilterArchivator):
backup_directory = "/var/lib/cobbler/config/systems.d/"
banned_files = ["default.json"]
container = "cobbler"
backup_name = "cobbler"
class CobblerProfileArchivator(base.PathFilterArchivator):
backup_directory = "/var/lib/cobbler/config/profiles.d/"
banned_files = ["bootstrap.json", "ubuntu_bootstrap.json"]
backup_name = "cobbler_profiles"
class CobblerDistroArchivator(base.PathFilterArchivator):
backup_directory = "/var/lib/cobbler/config/distros.d/"
banned_files = ["bootstrap.json", "ubuntu_bootstrap.json"]
backup_name = "cobbler_distros"
class CobblerArchivator(base.CollectionArchivator):
archivators_classes = [
CobblerSystemArchivator,
CobblerProfileArchivator,
CobblerDistroArchivator,
]
def restore(self):
super(CobblerArchivator, self).restore()
docker.stop_container("cobbler")
docker.start_container("cobbler")
subprocess.call(["systemctl", "stop", "cobblerd"])
puppet.apply_task("cobbler")

View File

@ -0,0 +1,48 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from fuelclient import objects
from octane.handlers.backup_restore import base
from octane.util import fuel_client
from octane.util import helpers
from octane.util import subprocess
class LogsArchivator(base.Base):
def backup(self):
pass
def restore(self):
domain = helpers.get_astute_dict()["DNS_DOMAIN"]
dirname = "/var/log/remote/"
with fuel_client.set_auth_context(self.context):
pairs = [(n.data["meta"]["system"]["fqdn"], n.data["ip"])
for n in objects.Node.get_all()]
subprocess.call(["systemctl", "stop", "rsyslog"])
try:
for fqdn, ip_addr in pairs:
if not fqdn.endswith(domain):
continue
ip_addr_path = os.path.join(dirname, ip_addr)
fqdn_path = os.path.join(dirname, fqdn)
if os.path.islink(ip_addr_path):
continue
if os.path.isdir(ip_addr_path):
os.rename(ip_addr_path, fqdn_path)
else:
os.mkdir(fqdn_path)
os.symlink(fqdn, ip_addr_path)
finally:
subprocess.call(["systemctl", "start", "rsyslog"])

View File

@ -13,12 +13,12 @@
import json
import os
import urlparse
import yaml
from octane.handlers.backup_restore import base
from octane.util import docker
from octane.util import subprocess
from octane import magic_consts
from octane.util import helpers
from octane.util import sql
class NaigunWWWBackup(base.PathArchivator):
@ -30,39 +30,23 @@ class NaigunWWWBackup(base.PathArchivator):
def _get_values_list(self, data):
raise NotImplementedError
def backup(self):
with open("/etc/fuel/astute.yaml", "r") as current:
current_yaml = yaml.load(current)
ipaddr = current_yaml["ADMIN_NETWORK"]["ipaddress"]
results, _ = docker.run_in_container(
"postgres",
[
"sudo",
"-u",
"postgres",
"psql",
self.db,
"--tuples-only",
"-c",
self.sql
],
stdout=subprocess.PIPE)
results = results.strip()
if not results:
return
rows = results.split("\n")
already_backuped = set()
def _get_mirrors(self):
ipaddr = helpers.get_astute_dict()["ADMIN_NETWORK"]["ipaddress"]
rows = sql.run_psql(self.sql, self.db)
dirs_to_backup = set()
for line in rows:
data = json.loads(line)
for value in self._get_values_list(data):
if ipaddr in value['uri']:
path = urlparse.urlsplit(value['uri']).path
dir_name = path.lstrip("/").split('/', 1)[0]
if dir_name in already_backuped:
continue
already_backuped.add(dir_name)
path = os.path.join(self.path, dir_name)
self.archive.add(path, os.path.join(self.name, dir_name))
dirs_to_backup.add(dir_name)
return list(dirs_to_backup)
def backup(self):
for dir_name in self._get_mirrors():
path = os.path.join(self.path, dir_name)
self.archive.add(path, os.path.join(self.name, dir_name))
class MirrorsBackup(NaigunWWWBackup):
@ -81,3 +65,24 @@ class RepoBackup(NaigunWWWBackup):
def _get_values_list(self, data):
return data['provision']['image_data'].values()
class FullMirrorsBackup(NaigunWWWBackup):
name = "mirrors"
sql = "select array_to_json(array_agg(distinct version)) from releases;"
def _get_mirrors(self):
results = sql.run_psql(self.sql, self.db)
releases = []
for dir_name in magic_consts.MIRRORS_EXTRA_DIRS:
if os.path.exists(os.path.join(self.path, dir_name)):
releases.append(dir_name)
for line in results:
releases.extend(json.loads(line))
return releases
class FullRepoBackup(base.PathArchivator):
name = 'repos/targetimages'
path = '/var/www/nailgun/targetimages'

View File

@ -10,33 +10,21 @@
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import requests
import shutil
import six
import urlparse
import yaml
from fuelclient.objects import node
from keystoneclient.v2_0 import Client as keystoneclient
from octane.handlers.backup_restore import base
from octane import magic_consts
from octane.util import docker
from octane.util import fuel_client
from octane.util import helpers
from octane.util import auth
from octane.util import keystone
from octane.util import patch
from octane.util import puppet
from octane.util import subprocess
LOG = logging.getLogger(__name__)
class PostgresArchivatorMeta(type):
def __init__(cls, name, bases, attr):
super(PostgresArchivatorMeta, cls).__init__(name, bases, attr)
cls.container = "postgres"
if cls.db is not None and cls.cmd is None:
cls.cmd = ["sudo", "-u", "postgres", "pg_dump", "-C", cls.db]
if cls.db is not None and cls.filename is None:
@ -46,132 +34,61 @@ class PostgresArchivatorMeta(type):
@six.add_metaclass(PostgresArchivatorMeta)
class PostgresArchivator(base.CmdArchivator):
db = None
services = []
def restore(self):
dump = self.archive.extractfile(self.filename)
subprocess.call([
"systemctl", "stop", "docker-{0}.service".format(self.db)
])
docker.stop_container(self.db)
docker.run_in_container(
"postgres",
["sudo", "-u", "postgres", "dropdb", "--if-exists", self.db],
)
with docker.in_container("postgres",
["sudo", "-u", "postgres", "psql"],
stdin=subprocess.PIPE) as process:
process.stdin.write(dump.read())
docker.start_container(self.db)
docker.wait_for_container(self.db)
subprocess.call([
"systemctl", "start", "docker-{0}.service".format(self.db)
])
subprocess.call(["systemctl", "stop"] + self.services)
subprocess.call(["sudo", "-u", "postgres", "dropdb", "--if-exists",
self.db])
with subprocess.popen(["sudo", "-u", "postgres", "psql"],
stdin=subprocess.PIPE) as process:
shutil.copyfileobj(dump, process.stdin)
with auth.set_astute_password(self.context):
puppet.apply_task(self.db)
class NailgunArchivator(PostgresArchivator):
db = "nailgun"
def __post_data_to_nailgun(self, url, data, user, password):
ksclient = keystoneclient(
auth_url=magic_consts.KEYSTONE_API_URL,
username=user,
password=password,
tenant_name=magic_consts.KEYSTONE_TENANT_NAME,
)
resp = requests.post(
urlparse.urljoin(magic_consts.NAILGUN_URL, url),
json.dumps(data),
headers={
"X-Auth-Token": ksclient.auth_token,
"Content-Type": "application/json",
})
LOG.debug(resp.content)
return resp
services = [
"nailgun",
"oswl_flavor_collectord",
"oswl_image_collectord",
"oswl_keystone_user_collectord",
"oswl_tenant_collectord",
"oswl_vm_collectord",
"oswl_volume_collectord",
"receiverd",
"statsenderd",
"assassind",
]
patches = magic_consts.NAILGUN_ARCHIVATOR_PATCHES
def restore(self):
for args in magic_consts.NAILGUN_ARCHIVATOR_PATCHES:
docker.apply_patches(*args)
try:
with patch.applied_patch(*self.patches):
super(NailgunArchivator, self).restore()
self._post_restore_action()
finally:
for args in magic_consts.NAILGUN_ARCHIVATOR_PATCHES:
docker.apply_patches(*args, revert=True)
def _create_links_on_remote_logs(self):
with open("/etc/fuel/astute.yaml") as astute:
domain = yaml.load(astute)["DNS_DOMAIN"]
dirname = "/var/log/docker-logs/remote/"
with fuel_client.set_auth_context(self.context):
pairs = [(n.data["meta"]["system"]["fqdn"], n.data["ip"])
for n in node.Node.get_all()]
docker.run_in_container("rsyslog", ["service", "rsyslog", "stop"])
try:
for fqdn, ip_addr in pairs:
if not fqdn.endswith(domain):
continue
ip_addr_path = os.path.join(dirname, ip_addr)
fqdn_path = os.path.join(dirname, fqdn)
if os.path.islink(ip_addr_path):
continue
if os.path.isdir(ip_addr_path):
os.rename(ip_addr_path, fqdn_path)
else:
os.mkdir(fqdn_path)
os.symlink(fqdn, ip_addr_path)
finally:
docker.run_in_container("rsyslog", ["service", "rsyslog", "start"])
def _run_sql_in_container(self, sql):
sql_run_prams = [
"sudo", "-u", "postgres", "psql", "nailgun", "--tuples-only", "-c"]
results, _ = docker.run_in_container(
"postgres",
sql_run_prams + [sql],
stdout=subprocess.PIPE)
return results.strip().split("\n")
def _post_restore_action(self):
data, _ = docker.run_in_container(
"nailgun",
["cat", magic_consts.OPENSTACK_FIXTURES],
stdout=subprocess.PIPE)
fixtures = yaml.load(data)
base_release_fields = fixtures[0]['fields']
for fixture in fixtures[1:]:
release = helpers.merge_dicts(
base_release_fields, fixture['fields'])
self.__post_data_to_nailgun(
"/api/v1/releases/",
release,
self.context.user,
self.context.password)
subprocess.call(
[
"fuel",
"release",
"--sync-deployment-tasks",
"--dir",
"/etc/puppet/",
],
env=self.context.get_credentials_env())
values = []
for line in self._run_sql_in_container(
"select id, generated from attributes;"):
c_id, c_data = line.split("|", 1)
data = json.loads(c_data)
data["deployed_before"] = {"value": True}
values.append("({0}, '{1}')".format(c_id, json.dumps(data)))
if values:
self._run_sql_in_container(
'update attributes as a set generated = b.generated '
'from (values {0}) as b(id, generated) '
'where a.id = b.id;'.format(','.join(values))
)
self._create_links_on_remote_logs()
class KeystoneArchivator(PostgresArchivator):
db = "keystone"
services = ["openstack-keystone"]
def restore(self):
keystone.unset_default_domain_id(magic_consts.KEYSTONE_CONF)
keystone.add_admin_token_auth(magic_consts.KEYSTONE_PASTE, [
"pipeline:public_api",
"pipeline:admin_api",
"pipeline:api_v3",
])
super(KeystoneArchivator, self).restore()
class DatabasesArchivator(base.CollectionArchivator):
archivators_classes = [
KeystoneArchivator,
NailgunArchivator,
]
def restore(self):
puppet.apply_task("postgresql")
super(DatabasesArchivator, self).restore()

View File

@ -10,12 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import shutil
import tempfile
import yaml
from octane.handlers.backup_restore import base
from octane.util import auth
from octane.util import puppet
from octane.util import subprocess
class PuppetArchivator(base.DirsArchivator):
@ -23,22 +21,15 @@ class PuppetArchivator(base.DirsArchivator):
tag = "puppet"
class PuppetApplyHost(base.Base):
class PuppetApplyTasks(base.Base):
services = [
"ostf",
]
def backup(self):
pass
def restore(self):
_, tmp_file_name = tempfile.mkstemp(
dir="/etc/fuel",
prefix=".astute.yaml.octane")
shutil.copy("/etc/fuel/astute.yaml", tmp_file_name)
try:
with open("/etc/fuel/astute.yaml") as current:
data = yaml.load(current)
data["FUEL_ACCESS"]["password"] = self.context.password
with open("/etc/fuel/astute.yaml", "w") as current:
yaml.safe_dump(data, current, default_flow_style=False)
puppet.apply_host()
finally:
shutil.move(tmp_file_name, "/etc/fuel/astute.yaml")
subprocess.call(["systemctl", "stop"] + self.services)
with auth.set_astute_password(self.context):
puppet.apply_all_tasks()

View File

@ -0,0 +1,100 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import requests
import urlparse
import yaml
from keystoneclient.v2_0 import Client as keystoneclient
from octane.handlers.backup_restore import base
from octane import magic_consts
from octane.util import helpers
from octane.util import subprocess
LOG = logging.getLogger(__name__)
class ReleaseArchivator(base.Base):
def backup(self):
pass
def restore(self):
def get_release_key(release):
return (release['version'], release['name'])
with open(magic_consts.OPENSTACK_FIXTURES) as f:
fixtures = yaml.load(f)
loaded_existing_releases = self.__get_request("/api/v1/releases/")
existing_releases = set(map(get_release_key, loaded_existing_releases))
releases = self.extend_fixtures(fixtures)
for release in releases:
key = get_release_key(release)
if key in existing_releases:
LOG.debug("Skipping to upload of the already existing "
"release: %s - %s",
release['name'], release['version'])
continue
self.__post_request("/api/v1/releases/", release)
subprocess.call(
[
"fuel",
"release",
"--sync-deployment-tasks",
"--dir",
"/etc/puppet/",
],
env=self.context.get_credentials_env())
@staticmethod
def extend_fixtures(fixtures):
def extend(obj):
if 'extend' in obj:
obj['extend'] = extend(obj['extend'])
return helpers.merge_dicts(obj['extend'], obj)
return obj
for fixture in fixtures:
if "pk" not in fixture or fixture["pk"] is None:
continue
yield extend(fixture)["fields"]
def __post_request(self, url, data):
self.__request("POST", url,
user=self.context.user,
password=self.context.password,
data=data)
def __get_request(self, url):
resp = self.__request("GET", url,
user=self.context.user,
password=self.context.password)
return resp.json()
def __request(self, method, url, user, password, data=None):
ksclient = keystoneclient(
auth_url=magic_consts.KEYSTONE_API_URL,
username=user,
password=password,
tenant_name=magic_consts.KEYSTONE_TENANT_NAME,
)
resp = requests.request(
method,
urlparse.urljoin(magic_consts.NAILGUN_URL, url),
json=data,
headers={
"X-Auth-Token": ksclient.auth_token,
"Content-Type": "application/json",
})
LOG.debug(resp.content)
return resp

View File

@ -11,6 +11,7 @@
# under the License.
from octane.handlers.backup_restore import base
from octane.util import fuel_bootstrap
from octane.util import subprocess
@ -23,3 +24,6 @@ class SshArchivator(base.PathArchivator):
subprocess.call(
["fuel-bootstrap", "build", "--activate"],
env=self.context.get_credentials_env())
# Remove old images cause they were created with old ssh keys pair
fuel_bootstrap.delete_not_active_images()

View File

@ -14,8 +14,8 @@ import logging
import os.path
from octane.handlers import install
from octane.helpers import disk
from octane import magic_consts
from octane.util import disk
from octane.util import node as node_util
from octane.util import plugin
from octane.util import ssh

View File

@ -14,12 +14,13 @@ from octane import handlers
class UpgradeHandler(object):
def __init__(self, node, env, isolated):
def __init__(self, node, env, isolated, live_migration):
self.node = node
self.orig_env = self.node.env
self.orig_version = self.orig_env.data["fuel_version"]
self.env = env
self.isolated = isolated
self.live_migration = live_migration
def preupgrade(self):
raise NotImplementedError('preupgrade')

View File

@ -10,24 +10,45 @@
# License for the specific language governing permissions and limitations
# under the License.
from octane.commands import prepare
import logging
from octane.handlers import upgrade
from octane.util import ceph
from octane.util import node as node_util
from octane.util import puppet
from octane.util import subprocess
LOG = logging.getLogger(__name__)
class CephOsdUpgrade(upgrade.UpgradeHandler):
env_with_set_noout = set()
patched_nodes = set()
def preupgrade(self):
ceph.check_cluster(self.node)
try:
ceph.check_cluster(self.node)
except subprocess.CalledProcessError as exc:
LOG.warning("Ceph cluster health is not OK, ignoring: %s", exc)
def prepare(self):
self.preserve_partition()
ceph.set_osd_noout(self.env)
prepare.patch_puppet()
# patch only on first prepare run
if not self.patched_nodes:
puppet.patch_modules()
self.patched_nodes.add(self.node.data['id'])
if self.env.data['id'] not in self.env_with_set_noout:
self.env_with_set_noout.add(self.env.data['id'])
ceph.set_osd_noout(self.env)
def postdeploy(self):
ceph.unset_osd_noout(self.env)
prepare.patch_puppet(revert=True)
# revert only on first postdeploy run
if self.env.data['id'] in self.env_with_set_noout:
ceph.unset_osd_noout(self.env)
self.env_with_set_noout.remove(self.env.data['id'])
self.patched_nodes.remove(self.node.data['id'])
if not self.patched_nodes:
puppet.patch_modules(revert=True)
def preserve_partition(self):
partition = 'ceph'

View File

@ -16,7 +16,6 @@ import stat
import subprocess
from octane.handlers import upgrade
from octane.helpers import disk
from octane import magic_consts
from octane.util import env as env_util
from octane.util import node as node_util
@ -28,11 +27,10 @@ LOG = logging.getLogger(__name__)
class ComputeUpgrade(upgrade.UpgradeHandler):
def prepare(self):
env = self.node.env
if env_util.get_env_provision_method(env) != 'image':
self.create_configdrive_partition()
disk.update_node_partition_info(self.node.id)
if node_util.is_live_migration_supported(self.node):
if not self.live_migration:
self.preserve_partition()
self.shutoff_vms()
elif node_util.is_live_migration_supported(self.node):
self.evacuate_host()
else:
self.backup_iscsi_initiator_info()
@ -90,23 +88,15 @@ class ComputeUpgrade(upgrade.UpgradeHandler):
def shutoff_vms(self):
password = env_util.get_admin_password(self.env)
controller = env_util.get_one_controller(self.env)
cmd = ['. /root/openrc;',
'nova list --os-password {0} --host {1}'
'nova --os-password {0} list --host {1}'
.format(password, self.node.data['hostname']),
'|',
'awk -F\| \'$4~/ACTIVE/{print($2)}',
"awk -F\| '$4~/ACTIVE/{print($2)}'",
'|',
'xargs -I% nova stop %']
out, err = ssh.call(cmd, stdout=ssh.PIPE, node=self.node)
def create_configdrive_partition(self):
disks = disk.get_node_disks(self.node)
if not disks:
raise Exception("No disks info was found "
"for node {0}".format(self.node["id"]))
# it was agreed that 10MB is enough for config drive partition
size = 10
disk.create_partition(disks[0]['name'], size, self.node)
ssh.call(["sh", "-c", ' '.join(cmd)], stdout=ssh.PIPE, node=controller)
def backup_iscsi_initiator_info(self):
if not plugin.is_enabled(self.env, 'emc_vnx'):

View File

@ -28,8 +28,9 @@ LOG = logging.getLogger(__name__)
class ControllerUpgrade(upgrade.UpgradeHandler):
def __init__(self, node, env, isolated):
super(ControllerUpgrade, self).__init__(node, env, isolated)
def __init__(self, node, env, isolated, live_migration):
super(ControllerUpgrade, self).__init__(
node, env, isolated, live_migration)
self.service_tenant_id = None
self.gateway = None
@ -73,7 +74,7 @@ class ControllerUpgrade(upgrade.UpgradeHandler):
transformations.reset_gw_admin(info, gw_admin)
# From run_ping_checker
info['run_ping_checker'] = False
transformations.remove_predefined_nets(info)
env_util.prepare_net_info(info)
deployment_info.append(info)
self.env.upload_facts('deployment', deployment_info)

View File

@ -11,15 +11,17 @@
# under the License.
import glanceclient.client
import keystoneclient.v2_0.client as ksclient
import keystoneclient.client as ksclient
import neutronclient.neutron.client
def _get_keystone(username, password, tenant_name, auth_url):
return ksclient.Client(username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url)
klient = ksclient.Client(auth_url=auth_url)
klient.authenticate(
username=username,
password=password,
tenant_name=tenant_name)
return klient
def _get_glance(version=2, endpoint=None, token=None):

View File

@ -1,120 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import os.path
import re
import shlex
import subprocess
import tarfile
import tempfile
from docker import Client
class IllegalArgumentError(ValueError):
pass
class IllegalContainerName(ValueError):
pass
class InvalidPatchFormat(IndexError):
pass
class InvalidPatch(ValueError):
pass
r = re.compile('^\+{3} ')
l = re.compile('[ \t]')
def extractFilesFromPatch(filePath):
files = []
if (filePath is None or not os.path.isfile(filePath)):
raise InvalidPatch
try:
with open(filePath, "r") as f:
for line in f:
if (r.match(line)):
try:
fileName = l.split(line)
files.append(fileName[1])
except IndexError:
raise InvalidPatchFormat
except IOError:
raise InvalidPatch
if (len(files) == 0):
raise InvalidPatch
return files
class DockerPatch(object):
dockerClient = None
containerId = None
patches = {}
def __init__(self, containerName):
if (containerName is None):
raise IllegalArgumentError("containerName must be not None")
self.dockerClient = Client(
base_url='unix://var/run/docker.sock', version='auto')
for i in self.dockerClient.containers():
if (i['Names'][0] == containerName):
self.containerId = i['Id']
break
if (self.containerId is None):
raise IllegalContainerName(
"Container " + containerName + " not found")
def addPatch(self, patchFile, prefix='/'):
self.patches[patchFile] = {
'prefix': prefix,
'files': extractFilesFromPatch(patchFile),
'patch': patchFile
}
def copy_from_docker(self, src, dest):
reply = self.dockerClient.copy(self.containerId, src)
filelike = io.BytesIO(reply.read())
tar = tarfile.open(fileobj=filelike)
file = tar.extractfile(os.path.basename(src))
with open(dest, 'wb') as f:
f.write(file.read())
reply.release_conn()
def apply(self):
tempdir = tempfile.mkdtemp('patching')
for p in self.patches.values():
for f in p['files']:
filePath = tempdir + '/' + f
fileDir = os.path.dirname(filePath)
if not os.path.exists(fileDir):
os.makedirs(fileDir)
self.copy_from_docker(p['prefix'] + '/' + f, filePath)
self.patchIt(p['patch'], tempdir)
def patchIt(self, patchFile, tempdir):
with open(patchFile, "r") as f:
subprocess.Popen(shlex.split('patch -p0 -d ' + tempdir), stdin=f)

View File

@ -1,223 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuelclient.objects import environment as environment_obj
from octane.util import env as env_util
from octane.util import ssh
LOG = logging.getLogger(__name__)
def get_endpoint_ip(ep_name, yaml_data):
endpoint = yaml_data['network_scheme']['endpoints'].get(ep_name)
if not endpoint:
return None
net_data = endpoint["IP"][0]
if net_data:
return net_data.split('/')[0]
def get_glance_password(yaml_data):
return yaml_data['glance']['user_password']
def parse_swift_out(output, field):
for line in output.splitlines()[1:-1]:
parts = line.split(': ')
if parts[0].strip() == field:
return parts[1]
raise Exception(
"Field {0} not found in output:\n{1}".format(field, output))
def get_swift_objects(node, tenant, user, password, token, container):
cmd = ". /root/openrc; swift --os-project-name {0} --os-username {1}"\
" --os-password {2} --os-auth-token {3} list {4}".format(tenant,
user,
password,
token,
container)
objects_list = ssh.call_output(["sh", "-c", cmd], node=node)
return objects_list.split('\n')[:-1]
def get_object_property(node, tenant, user, password, token, container,
object_id, prop):
cmd = ". /root/openrc; swift --os-project-name {0} --os-username {1}"\
" --os-password {2} --os-auth-token {3} stat {4} {5}"\
.format(tenant,
user,
password,
token,
container,
object_id)
object_data = ssh.call_output(["sh", "-c", cmd], node=node)
return parse_swift_out(object_data, prop)
def get_auth_token(node, tenant, user, password):
cmd = ". /root/openrc; keystone --os-tenant-name {0}"\
" --os-username {1} --os-password {2} token-get".format(tenant,
user,
password)
token_info = ssh.call_output(["sh", "-c", cmd], node=node)
return env_util.parse_tenant_get(token_info, 'id')
def download_image(node, tenant, user, password, token, container, object_id):
cmd = ". /root/openrc; swift --os-project-name {0} --os-username {1}"\
" --os-password {2} --os-auth-token {3} download {4} {5}"\
.format(tenant,
user,
password,
token,
container,
object_id)
ssh.call(["sh", "-c", cmd], node=node)
LOG.info("Swift %s image has been downloaded", object_id)
def delete_image(node, tenant, user, password, token, container, object_id):
cmd = ". /root/openrc; swift --os-project-name {0}"\
" --os-username {1} --os-password {2} --os-auth-token {3}"\
" delete {4} {5}".format(tenant, user, password, token,
container, object_id)
ssh.call(["sh", "-c", cmd], node=node)
LOG.info("Swift %s image has been deleted", object_id)
def transfer_image(node, tenant, user, password, token, container, object_id,
storage_ip, tenant_id):
storage_url = "http://{0}:8080/v1/AUTH_{1}".format(storage_ip, tenant_id)
cmd = ['swift', '--os-project-name', tenant, '--os-username', user,
'--os-password', password, '--os-auth-token', token,
'--os-storage-url', storage_url, 'upload', container,
object_id]
ssh.call(cmd, node=node)
LOG.info("Swift %s image has been transferred", object_id)
def create_container(node, tenant, user, password, token, container):
cmd = ". /root/openrc; swift --os-project-name {0}"\
" --os-username {1} --os-password {2} --os-auth-token {3}"\
" post {4}".format(tenant, user, password, token, container)
ssh.call(["sh", "-c", cmd], node=node)
def sync_glance_images(source_env_id, seed_env_id, seed_swift_ep):
"""Sync glance images from original ENV to seed ENV
Args:
source_env_id (int): ID of original ENV.
seed_env_id (int): ID of seed ENV.
seed_swift_ep (str): endpoint's name where swift-proxy service is
listening on.
Examples:
sync_glance_images(2, 3, 'br-mgmt')
"""
# set glance username
glance_user = "glance"
# set swift container value
container = "glance"
# choose tenant
tenant = "services"
# get clusters by id
source_env = environment_obj.Environment(source_env_id)
seed_env = environment_obj.Environment(seed_env_id)
# gather cics admin IPs
source_node = next(env_util.get_controllers(source_env))
seed_node = next(env_util.get_controllers(seed_env))
# get cics yaml files
source_yaml = env_util.get_astute_yaml(source_env, source_node)
seed_yaml = env_util.get_astute_yaml(seed_env, seed_node)
# get glance passwords
source_glance_pass = get_glance_password(source_yaml)
seed_glance_pass = get_glance_password(seed_yaml)
# get seed node swift ip
seed_swift_ip = get_endpoint_ip(seed_swift_ep, seed_yaml)
# get service tenant id & lists of objects for source env
source_token = get_auth_token(source_node, tenant, glance_user,
source_glance_pass)
source_swift_list = set(get_swift_objects(source_node,
tenant,
glance_user,
source_glance_pass,
source_token,
container))
# get service tenant id & lists of objects for seed env
seed_token = get_auth_token(seed_node, tenant, glance_user,
seed_glance_pass)
# to be sure that glance container is present for seed env
create_container(seed_node, tenant, glance_user, seed_glance_pass,
seed_token, container)
seed_swift_list = set(get_swift_objects(seed_node,
tenant,
glance_user,
seed_glance_pass,
seed_token,
container))
# get service tenant for seed env
seed_tenant = env_util.get_service_tenant_id(seed_env)
# check consistency of matched images
source_token = get_auth_token(source_node, tenant, glance_user,
source_glance_pass)
seed_token = get_auth_token(seed_node, tenant, glance_user,
seed_glance_pass)
for image in source_swift_list & seed_swift_list:
source_obj_etag = get_object_property(source_node,
tenant,
glance_user,
source_glance_pass,
source_token,
container,
image,
'ETag')
seed_obj_etag = get_object_property(seed_node, tenant,
glance_user, seed_glance_pass,
seed_token, container, image,
'ETag')
if source_obj_etag != seed_obj_etag:
# image should be resynced
delete_image(seed_node, tenant, glance_user, seed_glance_pass,
seed_token, container, image)
LOG.info("Swift %s image should be resynced", image)
seed_swift_list.remove(image)
# migrate new images
for image in source_swift_list - seed_swift_list:
# download image on source's node local drive
source_token = get_auth_token(source_node, tenant, glance_user,
source_glance_pass)
download_image(source_node, tenant, glance_user, source_glance_pass,
source_token, container, image)
# transfer image
source_token = get_auth_token(source_node, tenant,
glance_user, source_glance_pass)
seed_token = get_auth_token(seed_node, tenant, glance_user,
seed_glance_pass)
transfer_image(source_node, tenant, glance_user, seed_glance_pass,
seed_token, container, image, seed_swift_ip,
seed_tenant)
# remove transferred image
ssh.sftp(source_node).remove(image)
# delete outdated images
for image in seed_swift_list - source_swift_list:
token = get_auth_token(seed_node, tenant, glance_user,
seed_glance_pass)
delete_image(seed_node, tenant, glance_user, seed_glance_pass,
token, container, image)

View File

@ -12,22 +12,21 @@
import os.path
PACKAGES = ["postgresql.x86_64", "pssh", "patch", "python-pip"]
PATCHES = []
# TODO: use pkg_resources for patches
CWD = os.path.dirname(__file__) # FIXME
FUEL_CACHE = "/tmp" # TODO: we shouldn't need this
PUPPET_DIR = "/etc/puppet/modules"
NAILGUN_ARCHIVATOR_PATCHES = [
(
"nailgun",
os.path.join(PUPPET_DIR, "nailgun/manifests/"),
os.path.join(CWD, "patches/timeout.patch")
),
]
NAILGUN_ARCHIVATOR_PATCHES = (
PUPPET_DIR,
os.path.join(CWD, "patches/timeout.patch"),
)
BOOTSTRAP_INITRAMFS = "/var/www/nailgun/bootstrap/initramfs.img"
PUPPET_TASKS_DIR = os.path.join(PUPPET_DIR, 'fuel/examples')
PUPPET_APPLY_TASKS_SCRIPT = os.path.join(PUPPET_TASKS_DIR, 'deploy.sh')
SSH_KEYS = ['/root/.ssh/id_rsa', '/root/.ssh/bootstrap.rsa']
OS_SERVICES = ["nova", "keystone", "heat", "neutron", "cinder", "glance"]
BRIDGES = ['br-ex', 'br-mgmt']
@ -48,20 +47,33 @@ NAILGUN_URL = "http://127.0.0.1:8000"
KEYSTONE_API_URL = "http://127.0.0.1:5000/v2.0"
KEYSTONE_TENANT_NAME = "admin"
SYNC_CONTAINERS = []
OPENSTACK_FIXTURES = "/usr/share/fuel-openstack-metadata/openstack.yaml"
RUNNING_REQUIRED_CONTAINERS = [
"postgres",
"rabbitmq",
"keystone",
"rsync",
"astute",
"rsyslog",
"nailgun",
"ostf",
"nginx",
"cobbler",
"mcollective",
OSD_REPOS_UPDATE = [
# ("path", "content")
(
"/etc/apt/sources.list.d/mos.list",
"deb http://{admin_ip}:8080/liberty-8.0/ubuntu/x86_64 "
"mos8.0 main restricted"
),
(
"/etc/apt/sources.list.d/mos-updates.list",
'deb http://{admin_ip}:8080/ubuntu/x86_64/ mos8.0 main restricted',
),
]
COBBLER_DROP_VERSION = "7.0"
MIRRORS_EXTRA_DIRS = ["ubuntu-full", "mos-ubuntu"]
RELEASE_STATUS_ENABLED = "available"
RELEASE_STATUS_MANAGED = "manageonly"
UPGRADE_NODE_PATCHES = [
os.path.join(CWD, "patches/puppet/fix_mysql.patch")
]
OPENSTACK_FIXTURES = "/usr/share/fuel-openstack-metadata/openstack.yaml"
BOOTSTRAP_UNSUPPORTED_IMAGES = ["centos"]
# NOTE(ogelbukh): it was agreed that 10MB is enough for config drive partition
CONFIGDRIVE_PART_SIZE = 10
KEYSTONE_CONF = "/etc/keystone/keystone.conf"
KEYSTONE_PASTE = "/etc/keystone/keystone-paste.ini"

View File

@ -1,26 +1,13 @@
diff --git a/fuel_agent/drivers/nailgun.py b/fuel_agent/drivers/nailgun.py
index 8f5f630..f5209b5 100644
--- usr/lib/python2.6/site-packages/fuel_agent/drivers/nailgun.py
+++ usr/lib/python2.6/site-packages/fuel_agent/drivers/nailgun.py
@@ -258,9 +258,9 @@ class Nailgun(BaseDataDriver):
disk['name'])
parted.add_partition(size=24, flags=['bios_grub'])
# uefi partition (for future use)
- LOG.debug('Adding UEFI partition on disk %s: size=200' %
- disk['name'])
- parted.add_partition(size=200)
+ #LOG.debug('Adding UEFI partition on disk %s: size=200' %
+ # disk['name'])
+ #parted.add_partition(size=200)
--- usr/lib/python2.7/dist-packages/fuel_agent/drivers/nailgun.py
+++ usr/lib/python2.7/dist-packages/fuel_agent/drivers/nailgun.py
@@ -321,10 +321,6 @@
LOG.debug('Adding bios_grub partition on disk %s: size=24' %
disk['name'])
parted.add_partition(size=24, flags=['bios_grub'])
- # uefi partition (for future use)
- LOG.debug('Adding UEFI partition on disk %s: size=200' %
- disk['name'])
- parted.add_partition(size=200)
LOG.debug('Looping over all volumes on disk %s' % disk['name'])
for volume in disk['volumes']:
@@ -480,7 +480,7 @@ class Nailgun(BaseDataDriver):
configdrive_scheme.set_common(
ssh_auth_keys=ssh_auth_keys,
- hostname=data['hostname'],
+ hostname=data['hostname'].split('.')[0],
fqdn=data['hostname'],
name_servers=data['name_servers'],
search_domain=data['name_servers_search'],

View File

@ -0,0 +1,72 @@
diff --git a/deployment/puppet/ceph/manifests/osds.pp b/deployment/puppet/ceph/manifests/osds.pp
index 3281415..4d2b31e 100644
--- /ceph/manifests/osds.pp
+++ /ceph/manifests/osds.pp
@@ -1,8 +1,28 @@
-# prepare and bring online the devices listed in $::ceph::osd_devices
+# == Class: ceph::osd
+#
+# Prepare and bring online the OSD devices
+#
+# ==== Parameters
+#
+# [*devices*]
+# (optional) Array. This is the list of OSD devices identified by the facter.
+#
class ceph::osds (
$devices = $::ceph::osd_devices,
){
+ exec { 'udevadm trigger':
+ command => 'udevadm trigger',
+ returns => 0,
+ logoutput => true,
+ } ->
+
+ exec {'ceph-disk activate-all':
+ command => 'ceph-disk activate-all',
+ returns => [0, 1],
+ logoutput => true,
+ } ->
+
firewall { '011 ceph-osd allow':
chain => 'INPUT',
dport => '6800-7100',
@@ -11,5 +31,4 @@ class ceph::osds (
} ->
ceph::osds::osd{ $devices: }
-
}
diff --git a/deployment/puppet/ceph/manifests/osds/osd.pp b/deployment/puppet/ceph/manifests/osds/osd.pp
index b8fd18e..153b84d 100644
--- /ceph/manifests/osds/osd.pp
+++ /ceph/manifests/osds/osd.pp
@@ -1,3 +1,7 @@
+# == Define: ceph::osds::osd
+#
+# Prepare and activate OSD nodes on the node
+#
define ceph::osds::osd () {
# ${name} format is DISK[:JOURNAL]
@@ -18,8 +22,8 @@ define ceph::osds::osd () {
tries => 2, # This is necessary because of race for mon creating keys
try_sleep => 1,
logoutput => true,
- unless => "grep -q ${data_device_name} /proc/mounts",
- } ->
+ unless => "ceph-disk list | fgrep -q -e '${data_device_name} ceph data, active' -e '${data_device_name} ceph data, prepared'",
+ } -> Exec["ceph-deploy osd activate ${deploy_device_name}"]
exec { "ceph-deploy osd activate ${deploy_device_name}":
command => "ceph-deploy osd activate ${deploy_device_name}",
@@ -27,7 +31,7 @@ define ceph::osds::osd () {
tries => 3,
logoutput => true,
timeout => 0,
- unless => "ceph osd dump | grep -q \"osd.$(sed -nEe 's|${data_device_name}\\ .*ceph-([0-9]+).*$|\\1|p' /proc/mounts)\\ up\\ .*\\ in\\ \"",
+ onlyif => "ceph-disk list | fgrep -q -e '${data_device_name} ceph data, prepared'",
}
}

View File

@ -0,0 +1,64 @@
diff --git a/galera/manifests/init.pp b/galera/manifests/init.pp
index 7f25ab3..8fe8633 100644
--- a/galera/manifests/init.pp
+++ b/galera/manifests/init.pp
@@ -76,6 +76,11 @@
# size to exceed the value of this variable, the server rotates the
# binary logs (closes the current file and opens the next one). The
# minimum value is 4096 bytes. The maximum and default value is 512MB.
+#
+# [*ignore_db_dirs*]
+# (optional) array of directories to ignore in datadir.
+# Defaults to []
+#
class galera (
$cluster_name = 'openstack',
@@ -95,6 +100,7 @@ class galera (
$binary_logs_enabled = false,
$binary_logs_period = 1,
$binary_logs_maxsize = '512M',
+ $ignore_db_dirs = [],
) {
include galera::params
diff --git a/galera/templates/my.cnf.erb b/galera/templates/my.cnf.erb
index 253a8a2..e832d2c 100644
--- a/galera/templates/my.cnf.erb
+++ b/galera/templates/my.cnf.erb
@@ -1,10 +1,10 @@
-# #[mysqld]
-# #datadir=/var/lib/mysql
-# #socket=/var/lib/mysql/mysql.sock
-# #user=mysql
-# # Disabling symbolic-links is recommended to prevent assorted security risks
-# #symbolic-links=0
+<% if @ignore_db_dirs and (@ignore_db_dirs.length > 0) -%>
+[mysqld]
+ <%- @ignore_db_dirs.each do |directory| -%>
+ignore-db-dir=<%= directory %>
+ <%- end -%>
+<% end -%>
[mysqld_safe]
<% if @use_syslog -%>
syslog
@@ -12,6 +12,4 @@ syslog
log-error=/var/log/mysqld.log
<% end -%>
-# pid-file=/var/run/mysqld.pid
-
!includedir /etc/mysql/conf.d/
diff --git a/mysql/manifests/server.pp b/mysql/manifests/server.pp
index 4cce092..a9f0ae9 100644
--- a/mysql/manifests/server.pp
+++ b/mysql/manifests/server.pp
@@ -227,6 +227,7 @@ class mysql::server (
skip_name_resolve => $mysql_skip_name_resolve,
use_syslog => $use_syslog,
wsrep_sst_password => $root_password,
+ ignore_db_dirs => $ignore_db_dirs,
}
}

View File

@ -1,12 +1,12 @@
diff --git a/deployment/puppet/nailgun/manifests/venv.pp b/deployment/puppet/nailgun/manifests/venv.pp
index 3313333..c383160 100644
--- venv.pp
+++ venv.pp
@@ -96,6 +96,7 @@ class nailgun::venv(
],
tries => 50,
try_sleep => 5,
+ timeout => 0,
}
exec {"nailgun_upload_fixtures":
command => "${venv}/bin/nailgun_fixtures",
diff --git a/fuel/manifests/nailgun/server.pp b/fuel/manifests/nailgun/server.pp
index a7624b0..1c9c831 100644
--- a/fuel/manifests/nailgun/server.pp
+++ b/fuel/manifests/nailgun/server.pp
@@ -116,6 +116,7 @@ class fuel::nailgun::server (
subscribe => File["/etc/nailgun/settings.yaml"],
tries => 50,
try_sleep => 5,
+ timeout => 0,
}
exec {"nailgun_upload_fixtures":

View File

@ -24,7 +24,11 @@ if not nv:
volumes = nv.volumes
os_vg = next(disk for disk in volumes if 'id' in disk and disk['id'] == 'os')
try:
os_vg = next(
disk for disk in volumes if 'id' in disk and disk['id'] == 'os')
except StopIteration:
sys.exit(0)
volumes = [disk for disk in volumes if 'id' not in disk or disk['id'] != 'os']
for disk in volumes:

View File

@ -25,7 +25,6 @@ from octane.handlers.backup_restore import postgres
from octane.handlers.backup_restore import puppet
from octane.handlers.backup_restore import ssh
from octane.handlers.backup_restore import version
from octane.util import subprocess
@pytest.mark.parametrize("cls,path,name", [
@ -45,54 +44,61 @@ def test_path_backup(mocker, cls, path, name):
@pytest.mark.parametrize(
"cls,banned_files,backup_directory,allowed_files, container", [
"cls,banned_files,backup_directory,allowed_files,backup_name", [
(
cobbler.CobblerArchivator,
cobbler.CobblerSystemArchivator,
["default.json"],
"/var/lib/cobbler/config/systems.d/",
None,
"cobbler"
"cobbler",
),
(
cobbler.CobblerProfileArchivator,
["bootstrap.json", "ubuntu_bootstrap.json"],
"/var/lib/cobbler/config/profiles.d/",
None,
"cobbler_profiles",
),
(
cobbler.CobblerDistroArchivator,
["bootstrap.json", "ubuntu_bootstrap.json"],
"/var/lib/cobbler/config/distros.d/",
None,
"cobbler_distros",
),
])
def test_container_backup(
mocker, cls, banned_files, backup_directory, allowed_files, container):
test_archive = mocker.Mock()
data_lst = (banned_files or []) + (allowed_files or []) + ["tmp1", "tmp2"]
stdout_data_lst = [os.path.join(backup_directory, f) for f in data_lst]
data = " ".join(stdout_data_lst)
docker_mock = mocker.patch(
"octane.util.docker.run_in_container",
return_value=(data, None))
def test_path_filter_backup(mocker, cls, banned_files, backup_directory,
allowed_files, backup_name):
def foo(path, path_in_archive):
assert path.startswith(backup_directory)
assert path_in_archive.startswith(backup_name)
filename = path[len(backup_directory):].lstrip(os.path.sep)
filename_in_archive = \
path_in_archive[len(backup_name):].lstrip(os.path.sep)
assert filename == filename_in_archive
backuped_files.add(filename)
def foo(archive, container_name, cmd, backup_dir):
assert archive is test_archive
assert container == container_name
_, path = cmd
assert _ == "cat"
assert path[:len(backup_directory)] == backup_directory
assert backup_dir[:len(container)] == container
filename = path[len(backup_directory):].strip("\/")
backuped_files.add(path[len(backup_directory):])
assert filename == backup_dir[len(container):].strip("\/")
mocker.patch("octane.util.archivate.archivate_container_cmd_output",
side_effect=foo)
files_to_archive = data_lst
files_to_archive = [d for d in files_to_archive
if d in (allowed_files or [])]
files_to_archive = [d for d in files_to_archive
if d not in (banned_files or [])]
filenames = banned_files + (allowed_files or []) + ["tmp1", "tmp2"]
files_to_archive = filenames
if allowed_files:
files_to_archive = [d for d in files_to_archive if d in allowed_files]
files_to_archive = [d for d in files_to_archive if d not in banned_files]
backuped_files = set()
test_archive = mocker.Mock()
test_archive.add.side_effect = foo
mock_os_walk = mocker.patch("os.walk")
mock_os_walk.return_value = [(backup_directory, (), filenames)]
cls(test_archive).backup()
docker_mock.assert_called_once_with(
container,
["find", backup_directory, "-type", "f"],
stdout=subprocess.PIPE
)
mock_os_walk.assert_called_once_with(backup_directory)
for filename in files_to_archive:
assert filename in backuped_files
for filename in set(filenames) - set(files_to_archive):
assert filename not in backuped_files
@pytest.mark.parametrize("cls,db", [
@ -102,11 +108,10 @@ def test_container_backup(
def test_posgres_archivator(mocker, cls, db):
test_archive = mocker.Mock()
archive_mock = mocker.patch(
"octane.util.archivate.archivate_container_cmd_output")
"octane.util.archivate.archivate_cmd_output")
cls(test_archive).backup()
archive_mock.assert_called_once_with(
test_archive,
"postgres",
["sudo", "-u", "postgres", "pg_dump", "-C", db],
"postgres/{0}.sql".format(db))
@ -144,11 +149,13 @@ def test_nailgun_plugins_backup(mocker, path_exists):
"mirrors",
"select editable from attributes;",
"127.0.0.1",
'{"repo_setup": {"repos": {"value": ['
'{"uri": "http://127.0.0.1:8080/test_fest"},'
'{"uri": "http://127.0.0.1:8080/test_fest"},'
'{"uri": "http://127.0.0.1:8080/test_fest_2"}'
']}}}',
[
'{"repo_setup": {"repos": {"value": ['
'{"uri": "http://127.0.0.1:8080/test_fest"},'
'{"uri": "http://127.0.0.1:8080/test_fest"},'
'{"uri": "http://127.0.0.1:8080/test_fest_2"}'
']}}}'
],
["test_fest", "test_fest_2"]
),
(
@ -156,16 +163,18 @@ def test_nailgun_plugins_backup(mocker, path_exists):
"mirrors",
"select editable from attributes;",
"127.0.0.1",
'{"repo_setup": {"repos": {"value": ['
'{"uri": "http://127.0.0.1:8080/test_fest"},'
'{"uri": "http://127.0.0.1:8080/test_fest"},'
'{"uri": "http://127.0.0.1:8080/test_fest_2"}'
']}}}\n'
'{"repo_setup": {"repos": {"value": ['
'{"uri": "http://127.0.0.1:8080/test_fest"},'
'{"uri": "http://127.0.0.1:8080/test_fest_3"},'
'{"uri": "http://127.0.0.1:8080/test_fest_2"}'
']}}}',
[
'{"repo_setup": {"repos": {"value": ['
'{"uri": "http://127.0.0.1:8080/test_fest"},'
'{"uri": "http://127.0.0.1:8080/test_fest"},'
'{"uri": "http://127.0.0.1:8080/test_fest_2"}'
']}}}',
'{"repo_setup": {"repos": {"value": ['
'{"uri": "http://127.0.0.1:8080/test_fest"},'
'{"uri": "http://127.0.0.1:8080/test_fest_3"},'
'{"uri": "http://127.0.0.1:8080/test_fest_2"}'
']}}}',
],
["test_fest", "test_fest_2", "test_fest_3"]
),
(
@ -181,12 +190,14 @@ def test_nailgun_plugins_backup(mocker, path_exists):
"repos",
"select generated from attributes;",
"127.0.0.1",
'{"provision": {"image_data": {'
'"1": {"uri": "http://127.0.0.1:8080/test_fest"},'
'"2": {"uri": "http://127.0.0.1:8080/test_fest_2"},'
'"3": {"uri": "http://127.0.0.1:8080/test_fest_3"},'
'"4": {"uri": "http://127.0.0.1:8080/test_fest_5"}'
'}}}',
[
'{"provision": {"image_data": {'
'"1": {"uri": "http://127.0.0.1:8080/test_fest"},'
'"2": {"uri": "http://127.0.0.1:8080/test_fest_2"},'
'"3": {"uri": "http://127.0.0.1:8080/test_fest_3"},'
'"4": {"uri": "http://127.0.0.1:8080/test_fest_5"}'
'}}}'
],
['test_fest', 'test_fest_2', 'test_fest_3', "test_fest_5"]
),
(
@ -194,18 +205,20 @@ def test_nailgun_plugins_backup(mocker, path_exists):
"repos",
"select generated from attributes;",
"127.0.0.1",
'{"provision": {"image_data": {'
'"1": {"uri": "http://127.0.0.1:8080/test_fest"},'
'"2": {"uri": "http://127.0.0.1:8080/test_fest_2"},'
'"3": {"uri": "http://127.0.0.1:8080/test_fest_3"},'
'"4": {"uri": "http://127.0.0.1:8080/test_fest"}'
'}}}\n'
'{"provision": {"image_data": {'
'"1": {"uri": "http://127.0.0.1:8080/test_fest"},'
'"2": {"uri": "http://127.0.0.1:8080/test_fest_2"},'
'"3": {"uri": "http://127.0.0.1:8080/test_fest_3"},'
'"4": {"uri": "http://127.0.0.1:8080/test_fest_5"}'
'}}}',
[
'{"provision": {"image_data": {'
'"1": {"uri": "http://127.0.0.1:8080/test_fest"},'
'"2": {"uri": "http://127.0.0.1:8080/test_fest_2"},'
'"3": {"uri": "http://127.0.0.1:8080/test_fest_3"},'
'"4": {"uri": "http://127.0.0.1:8080/test_fest"}'
'}}}',
'{"provision": {"image_data": {'
'"1": {"uri": "http://127.0.0.1:8080/test_fest"},'
'"2": {"uri": "http://127.0.0.1:8080/test_fest_2"},'
'"3": {"uri": "http://127.0.0.1:8080/test_fest_3"},'
'"4": {"uri": "http://127.0.0.1:8080/test_fest_5"}'
'}}}',
],
['test_fest', 'test_fest_2', 'test_fest_3', "test_fest_5"]
),
(
@ -224,25 +237,14 @@ def test_repos_backup(
yaml_mocker = mocker.patch(
"yaml.load",
return_value={"ADMIN_NETWORK": {"ipaddress": "127.0.0.1"}})
docker_mock = mocker.patch("octane.util.docker.run_in_container")
sql_mock = mocker.patch("octane.util.sql.run_psql")
test_archive = mocker.Mock()
path = "/var/www/nailgun/"
docker_mock.return_value = sql_output, None
sql_mock.return_value = sql_output
cls(test_archive).backup()
yaml_mocker.assert_called_once_with(mock_open.return_value)
docker_mock.assert_called_once_with(
"postgres", [
"sudo",
"-u",
"postgres",
"psql",
"nailgun",
"--tuples-only",
"-c",
sql
],
stdout=subprocess.PIPE
)
sql_mock.assert_called_once_with(sql, "nailgun")
test_archive.add.assert_has_calls(
[
mock.call(os.path.join(path, i), os.path.join(name, i))

View File

@ -9,10 +9,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import mock
import os
import pytest
import yaml
from keystoneclient.v2_0 import Client as keystoneclient
@ -21,9 +21,11 @@ from octane.handlers.backup_restore import astute
from octane.handlers.backup_restore import cobbler
from octane.handlers.backup_restore import fuel_keys
from octane.handlers.backup_restore import fuel_uuid
from octane.handlers.backup_restore import logs
from octane.handlers.backup_restore import mirrors
from octane.handlers.backup_restore import postgres
from octane.handlers.backup_restore import puppet
from octane.handlers.backup_restore import release
from octane.handlers.backup_restore import ssh
from octane.handlers.backup_restore import version
from octane import magic_consts
@ -39,6 +41,7 @@ class TestMember(object):
self.path = ''
self.is_extracted = False
self.dump = ""
self.read_idx = 0
def isfile(self):
return self.is_file
@ -48,8 +51,13 @@ class TestMember(object):
if self.is_extracted and path:
assert os.path.join(path, "/") == os.path.join(self.path, "/")
def read(self):
return self.dump
def read(self, chunk_size=None):
current_idx = self.read_idx
if chunk_size:
self.read_idx += chunk_size
else:
self.read_idx = len(self.dump)
return self.dump[current_idx: self.read_idx]
class TestArchive(object):
@ -137,7 +145,13 @@ class TestArchive(object):
),
])
def test_path_restore(mocker, cls, path, members):
fake_uuids = ['00000000-1111-2222-3333-444444444444', 'centos']
subprocess_mock = mocker.patch("octane.util.subprocess.call")
get_images = mocker.patch(
"octane.util.fuel_bootstrap.get_not_active_images_uuids",
return_value=fake_uuids)
delete_image = mocker.patch("octane.util.fuel_bootstrap.delete_image")
members = [TestMember(n, f, e) for n, f, e in members]
archive = TestArchive(members, cls)
mocker.patch("os.environ", new_callable=mock.PropertyMock(return_value={}))
@ -149,128 +163,161 @@ def test_path_restore(mocker, cls, path, members):
if cls is ssh.SshArchivator:
subprocess_mock.assert_called_once_with(
["fuel-bootstrap", "build", "--activate"],
env={'KEYSTONE_PASS': 'password', 'KEYSTONE_USER': 'user'})
env={'OS_PASSWORD': 'password', 'OS_USERNAME': 'user'})
get_images.assert_called_once_with()
delete_image.assert_called_once_with(fake_uuids[0])
else:
assert not subprocess_mock.called
@pytest.mark.parametrize("cls,path,container,members,mock_actions", [
@pytest.mark.parametrize("cls,path,backup_name,members", [
(
cobbler.CobblerArchivator,
cobbler.CobblerSystemArchivator,
"/var/lib/cobbler/config/systems.d/",
"cobbler",
[
("cobbler/file", True, True),
("cobbler/dir/file", True, True),
],
),
(
cobbler.CobblerDistroArchivator,
"/var/lib/cobbler/config/distros.d/",
"cobbler_distros",
[
("octane.util.docker.stop_container", "cobbler"),
("octane.util.docker.start_container", "cobbler")
]
("cobbler_distros/file", True, True),
("cobbler_distros/dir/file", True, True),
],
),
(
cobbler.CobblerProfileArchivator,
"/var/lib/cobbler/config/profiles.d/",
"cobbler_profiles",
[
("cobbler_profiles/file", True, True),
("cobbler_profiles/dir/file", True, True),
],
),
])
def test_container_archivator(
mocker, cls, path, container, members, mock_actions):
docker = mocker.patch("octane.util.docker.write_data_in_docker_file")
extra_mocks = [(mocker.patch(n), p) for n, p in mock_actions]
def test_path_filter_restore(mocker, cls, path, backup_name, members):
members = [TestMember(n, f, e) for n, f, e in members]
archive = TestArchive(members, cls)
cls(archive).restore()
for member in members:
member.assert_extract()
path_restor = member.name[len(container) + 1:]
docker.assert_has_calls([
mock.call(container, os.path.join(path, path_restor), member.dump)
])
for extra_mock, param in extra_mocks:
extra_mock.assert_called_once_with(param)
@pytest.mark.parametrize("cls,db,sync_db_cmd,mocked_action_name", [
def test_cobbler_archivator(mocker, mock_subprocess):
mocker.patch.object(cobbler.CobblerSystemArchivator, "restore")
mocker.patch.object(cobbler.CobblerDistroArchivator, "restore")
mocker.patch.object(cobbler.CobblerProfileArchivator, "restore")
mock_puppet = mocker.patch("octane.util.puppet.apply_task")
cobbler.CobblerArchivator(mock.Mock(), mock.Mock()).restore()
mock_subprocess.assert_called_once_with(
["systemctl", "stop", "cobblerd"])
mock_puppet.assert_called_once_with("cobbler")
def test_databases_archivator(mocker):
mock_call = mock.Mock()
mocker.patch.object(postgres.NailgunArchivator, "restore",
new=mock_call.nailgun.restore)
mocker.patch.object(postgres.KeystoneArchivator, "restore",
new=mock_call.keystone.restore)
mocker.patch("octane.util.puppet.apply_task",
new=mock_call.puppet.apply_task)
archivator = postgres.DatabasesArchivator(mock.Mock(), mock.Mock())
archivator.restore()
assert mock_call.mock_calls == [
mock.call.puppet.apply_task("postgresql"),
mock.call.keystone.restore(),
mock.call.nailgun.restore(),
]
@pytest.mark.parametrize("cls,db,services", [
(
postgres.NailgunArchivator,
"nailgun",
["nailgun_syncdb"],
"_post_restore_action",
[
"nailgun",
"oswl_flavor_collectord",
"oswl_image_collectord",
"oswl_keystone_user_collectord",
"oswl_tenant_collectord",
"oswl_vm_collectord",
"oswl_volume_collectord",
"receiverd",
"statsenderd",
"assassind",
],
),
(
postgres.KeystoneArchivator,
"keystone",
["keystone-manage", "db_sync"],
None
["openstack-keystone"],
),
])
def test_postgres_restore(mocker, cls, db, sync_db_cmd, mocked_action_name):
patch_mock = mocker.patch("octane.util.docker.apply_patches")
if mocked_action_name:
mocked_action = mocker.patch.object(cls, mocked_action_name)
def test_postgres_restore(mocker, cls, db, services):
member = TestMember("postgres/{0}.sql".format(db), True, True)
archive = TestArchive([member], cls)
actions = []
def foo(action):
return_mock_object = mocker.Mock()
mock_keystone = mock.Mock()
mocker.patch("octane.util.keystone.unset_default_domain_id",
new=mock_keystone.unset)
mocker.patch("octane.util.keystone.add_admin_token_auth",
new=mock_keystone.add)
def mock_foo(*args, **kwargs):
actions.append(action)
return return_mock_object
mock_foo.return_value = return_mock_object
return mock_foo
mock_subprocess = mock.MagicMock()
mocker.patch("octane.util.subprocess.call", new=mock_subprocess.call)
mocker.patch("octane.util.subprocess.popen", new=mock_subprocess.popen)
call_mock = mocker.patch("octane.util.subprocess.call",
side_effect=foo("call"))
in_container_mock = mocker.patch("octane.util.docker.in_container")
side_effect_in_container = foo("in_container")
in_container_mock.return_value.__enter__.side_effect = \
side_effect_in_container
run_in_container = mocker.patch(
"octane.util.docker.run_in_container",
side_effect=foo("run_in_container"))
mocker.patch("octane.util.docker.stop_container",
side_effect=foo("stop_container"))
mocker.patch("octane.util.docker.start_container",
side_effect=foo("start_container"))
mocker.patch("octane.util.docker.wait_for_container",
side_effect=foo("wait_for_container"))
cls(archive).restore()
mock_patch = mocker.patch("octane.util.patch.applied_patch")
mock_copyfileobj = mocker.patch("shutil.copyfileobj")
mock_set_astute_password = mocker.patch(
"octane.util.auth.set_astute_password")
mock_apply_task = mocker.patch("octane.util.puppet.apply_task")
mock_context = mock.Mock()
cls(archive, mock_context).restore()
member.assert_extract()
args = ["call", "stop_container", "run_in_container", "in_container",
"start_container", "wait_for_container", "call"]
assert args == actions
if cls is postgres.NailgunArchivator:
assert [
mock.call(
'nailgun',
'/etc/puppet/modules/nailgun/manifests/',
os.path.join(magic_consts.CWD, "patches/timeout.patch")
),
mock.call(
'nailgun',
'/etc/puppet/modules/nailgun/manifests/',
os.path.join(magic_consts.CWD, "patches/timeout.patch"),
revert=True
),
] == patch_mock.call_args_list
else:
assert not patch_mock.called
call_mock.assert_has_calls([
mock.call(["systemctl", "stop", "docker-{0}.service".format(db)]),
mock.call(["systemctl", "start", "docker-{0}.service".format(db)])
])
in_container_mock.assert_called_once_with(
"postgres",
["sudo", "-u", "postgres", "psql"],
stdin=subprocess.PIPE
assert mock_subprocess.mock_calls == [
mock.call.call(["systemctl", "stop"] + services),
mock.call.call(["sudo", "-u", "postgres", "dropdb", "--if-exists",
db]),
mock.call.popen(["sudo", "-u", "postgres", "psql"],
stdin=subprocess.PIPE),
mock.call.popen().__enter__(),
mock.call.popen().__exit__(None, None, None),
]
mock_copyfileobj.assert_called_once_with(
member,
mock_subprocess.popen.return_value.__enter__.return_value.stdin,
)
run_in_container.assert_has_calls([
mock.call("postgres",
["sudo", "-u", "postgres", "dropdb", "--if-exists", db]),
])
side_effect_in_container.return_value.stdin.write.assert_called_once_with(
member.dump)
if mocked_action_name:
mocked_action.assert_called_once_with()
mock_apply_task.assert_called_once_with(db)
if cls is postgres.NailgunArchivator:
assert mock_patch.call_args_list == [
mock.call(
'/etc/puppet/modules',
os.path.join(magic_consts.CWD, "patches/timeout.patch"),
),
]
assert not mock_keystone.called
else:
assert not mock_patch.called
assert mock_keystone.mock_calls == [
mock.call.unset("/etc/keystone/keystone.conf"),
mock.call.add("/etc/keystone/keystone-paste.ini", [
"pipeline:public_api",
"pipeline:admin_api",
"pipeline:api_v3",
]),
]
mock_set_astute_password.assert_called_once_with(mock_context)
@pytest.mark.parametrize("keys_in_dump_file,restored", [
@ -413,22 +460,20 @@ def test_astute_restore(mocker, mock_open, keys_in_dump_file, restored):
mocker.patch("yaml.load", side_effect=[dump_dict, current_dict])
safe_dump = mocker.patch("yaml.safe_dump")
copy_mock = mocker.patch("shutil.copy")
copy_mock = mocker.patch("shutil.copy2")
move_mock = mocker.patch("shutil.move")
mock_puppet = mocker.patch("octane.util.puppet.apply_task")
cls = astute.AstuteArchivator
archive = TestArchive([member], cls)
post_restore_mock = mocker.patch.object(cls, "_post_restore_action")
try:
cls(archive).restore()
except Exception as exc:
if restored:
raise
assert str(exc).startswith("Not found values in backup for keys: ")
assert not post_restore_mock.called
else:
assert restored
member.assert_extract()
post_restore_mock.assert_called_once_with()
copy_mock.assert_called_once_with(
"/etc/fuel/astute.yaml", "/etc/fuel/astute.yaml.old")
move_mock.assert_called_once_with(
@ -436,140 +481,90 @@ def test_astute_restore(mocker, mock_open, keys_in_dump_file, restored):
safe_dump.assert_called_once_with(dict_to_restore,
mock_open.return_value,
default_flow_style=False)
assert mock_puppet.mock_calls == [
mock.call("hiera"),
mock.call("host"),
]
def test_post_restore_action_astute(mocker):
stopped = []
mocker.patch(
"octane.util.docker.get_docker_container_names",
return_value=["container_1", "container_2"]
)
start = mocker.patch("octane.util.docker.start_container",
side_effect=stopped.remove)
stop = mocker.patch("octane.util.docker.stop_container",
side_effect=stopped.append)
astute.AstuteArchivator(None)._post_restore_action()
assert start.called
assert stop.called
assert not stopped
FAKE_OPENSTACK_YAML = """\
---
- &base_release
fields: {"k": 0, "p": 2}
- pk: 1
extend: *base_release
fields: {"version": 1, "name": "first", "k": 1}
- &release2
pk: 2
extend: *base_release
fields: {"version": 1, "name": "second", "k": 2}
- pk: 3
extend: *release2
fields: {"name": "third", "p": 3}
"""
@pytest.mark.parametrize("dump, calls, data_for_update", [
@pytest.mark.parametrize(("content", "existing_releases", "calls"), [
(
[{"fields": {"k": 1, "p": 2}}, {"fields": {}}, {"fields": {"k": 3}}],
[{"p": 2, "k": 1}, {"p": 2, "k": 3}],
"1|{}",
),
(
[
{"fields": {"k": 1, "p": 2, "c": {"k": 1, "p": {"a": 1}}}},
{"fields": {}},
{"fields": {"k": 3, "c": {"k": 3, "p": {"c": 4}}}},
],
[
{"p": 2, "c": {"p": {"a": 1}, "k": 1}, "k": 1},
{'p': 2, 'c': {'p': {'a': 1, 'c': 4}, 'k': 3}, 'k': 3},
],
"1|{}",
FAKE_OPENSTACK_YAML,
[{"version": 1, "name": "second"}],
[{"version": 1, "name": "first", "k": 1, "p": 2},
{"version": 1, "name": "third", "k": 2, "p": 3}],
),
])
def test_post_restore_nailgun(mocker, mock_open, dump, calls, data_for_update):
mock_links = mocker.patch.object(
postgres.NailgunArchivator, "_create_links_on_remote_logs")
data = yaml.dump(dump)
def test_release_restore(mocker, mock_open, content, existing_releases, calls):
mock_open.return_value = io.BytesIO(content)
mock_subprocess_call = mocker.patch("octane.util.subprocess.call")
run_in_container_mock = mocker.patch(
"octane.util.docker.run_in_container", return_value=(data, None))
run_sql_mock = mocker.patch.object(
postgres.NailgunArchivator,
"_run_sql_in_container",
return_value=[data_for_update]
)
json_mock = mocker.patch("json.dumps")
token = "123"
fake_token = "123"
def mock_init(self, *args, **kwargs):
self.auth_token = token
self.auth_token = fake_token
mocker.patch.object(keystoneclient, "__init__", mock_init)
post_data = mocker.patch("requests.post")
mock_request = mocker.patch("requests.request")
mock_request.return_value.json.return_value = existing_releases
mocker.patch("os.environ", new_callable=mock.PropertyMock(return_value={}))
postgres.NailgunArchivator(
release.ReleaseArchivator(
None,
backup_restore.NailgunCredentialsContext(
user="admin", password="password")
)._post_restore_action()
).restore()
headers = {
"X-Auth-Token": token,
"X-Auth-Token": fake_token,
"Content-Type": "application/json"
}
post_url = 'http://127.0.0.1:8000/api/v1/releases/'
post_call = mock.call(post_url, json_mock.return_value, headers=headers)
for call in post_data.call_args_list:
assert post_call == call
json_mock.assert_has_calls([mock.call(d) for d in calls], any_order=True)
assert json_mock.call_count == 3
url = 'http://127.0.0.1:8000/api/v1/releases/'
expected_calls = [
mock.call("GET", url, json=None, headers=headers)
] + [
mock.call("POST", url, json=call, headers=headers)
for call in calls
]
assert mock_request.call_args_list == expected_calls
mock_subprocess_call.assert_called_once_with([
"fuel", "release", "--sync-deployment-tasks", "--dir", "/etc/puppet/"],
env={'KEYSTONE_PASS': 'password', 'KEYSTONE_USER': 'admin'}
env={'OS_PASSWORD': 'password', 'OS_USERNAME': 'admin'}
)
run_in_container_mock.assert_called_once_with(
"nailgun",
["cat", magic_consts.OPENSTACK_FIXTURES],
stdout=subprocess.PIPE
)
json_mock.assert_called_with({"deployed_before": {"value": True}})
mock_links.assert_called_once_with()
run_sql_mock.assert_has_calls([
mock.call("select id, generated from attributes;"),
])
mock_open.assert_called_once_with(magic_consts.OPENSTACK_FIXTURES)
@pytest.mark.parametrize("exc_on_apply", [True, False])
def test_post_restore_puppet_apply_host(mocker, mock_open, exc_on_apply):
class TestException(Exception):
pass
mkstemp_mock = mocker.patch(
"tempfile.mkstemp",
return_value=(1, "/etc/fuel/.astute.yaml.bac"))
mock_copy = mocker.patch("shutil.copy")
mock_move = mocker.patch("shutil.move")
yaml_load = mocker.patch(
"yaml.load", return_value={"FUEL_ACCESS": {"password": "dump_pswd"}})
yaml_dump = mocker.patch("yaml.safe_dump")
def test_post_restore_puppet_apply_tasks(mocker, mock_subprocess):
context = backup_restore.NailgunCredentialsContext(
user="admin", password="user_pswd")
archivator = puppet.PuppetApplyHost(None, context)
if exc_on_apply:
mock_apply = mocker.patch(
"octane.util.puppet.apply_host",
side_effect=TestException("test exception"))
pytest.raises(TestException, archivator.restore)
else:
mock_apply = mocker.patch("octane.util.puppet.apply_host")
archivator.restore()
mock_set_astute_password = mocker.patch(
"octane.util.auth.set_astute_password")
mock_apply = mocker.patch("octane.util.puppet.apply_all_tasks")
archivator = puppet.PuppetApplyTasks(None, context)
archivator.restore()
mock_subprocess.assert_called_once_with(["systemctl", "stop", "ostf"])
assert mock_apply.called
assert mock_open.call_args_list == [
mock.call("/etc/fuel/astute.yaml"),
mock.call("/etc/fuel/astute.yaml", "w"),
]
yaml_load.assert_called_once_with(mock_open.return_value)
yaml_dump.asswer_called_once_with(
{'FUEL_ACCESS': {'password': 'user_pswd'}},
mock_open.return_value,
default_flow_style=False)
mock_copy.assert_called_once_with("/etc/fuel/astute.yaml",
"/etc/fuel/.astute.yaml.bac")
mock_move.assert_called_once_with("/etc/fuel/.astute.yaml.bac",
"/etc/fuel/astute.yaml")
mkstemp_mock.assert_called_once_with(
dir="/etc/fuel", prefix=".astute.yaml.octane")
mock_set_astute_password.assert_called_once_with(context)
@pytest.mark.parametrize("nodes", [
@ -580,8 +575,8 @@ def test_post_restore_puppet_apply_host(mocker, mock_open, exc_on_apply):
])
@pytest.mark.parametrize("is_dir", [True, False])
@pytest.mark.parametrize("exception", [True, False])
def test_create_links_on_remote_logs(
mocker, mock_open, nodes, is_dir, exception):
def test_logs_restore(
mocker, mock_open, mock_subprocess, nodes, is_dir, exception):
domain_name = "test_domain"
mocker.patch("yaml.load", return_value={"DNS_DOMAIN": domain_name})
domain_names = []
@ -608,16 +603,14 @@ def test_create_links_on_remote_logs(
moved_nodes.append((node_domain_name, ip_addr))
is_link_mock = mocker.patch("os.path.islink", side_effect=is_link_exists)
mocker.patch("os.path.isdir", return_value=is_dir)
mocker.patch("fuelclient.objects.node.Node.get_all",
mocker.patch("fuelclient.objects.Node.get_all",
return_value=fuel_client_values)
run_in_container_mock = mocker.patch(
"octane.util.docker.run_in_container")
rename_mock = mocker.patch("os.rename")
symlink_mock = mocker.patch("os.symlink")
mkdir_mock = mocker.patch("os.mkdir")
context = backup_restore.NailgunCredentialsContext(
user="admin", password="user_pswd")
archivator = backup_restore.postgres.NailgunArchivator(None, context)
archivator = logs.LogsArchivator(None, context)
if not exception:
class TestException(Exception):
@ -625,12 +618,12 @@ def test_create_links_on_remote_logs(
is_link_mock.side_effect = TestException("test exc")
with pytest.raises(TestException):
archivator._create_links_on_remote_logs()
archivator.restore()
assert not mkdir_mock.called
assert not rename_mock.called
else:
archivator._create_links_on_remote_logs()
path = "/var/log/docker-logs/remote/"
archivator.restore()
path = "/var/log/remote/"
path_pairs = [(os.path.join(path, d), os.path.join(path, i))
for d, i in moved_nodes]
sym_calls = [mock.call(d, os.path.join(path, i))
@ -644,30 +637,7 @@ def test_create_links_on_remote_logs(
mkdir_mock.call_args_list
assert not rename_mock.called
assert sym_calls == symlink_mock.call_args_list
assert [mock.call("rsyslog", ["service", "rsyslog", "stop"]),
mock.call("rsyslog", ["service", "rsyslog", "start"])] == \
run_in_container_mock.call_args_list
def test_run_sql(mocker):
archivator = postgres.NailgunArchivator(None)
run_mock = mocker.patch(
"octane.util.docker.run_in_container",
return_value=("row_1|val_1\nrow_2|val_1\n", None))
test_sql = "test_sql"
results = archivator._run_sql_in_container(test_sql)
run_mock.assert_called_once_with(
"postgres",
[
"sudo",
"-u",
"postgres",
"psql",
"nailgun",
"--tuples-only",
"-c",
test_sql,
],
stdout=subprocess.PIPE
)
assert ["row_1|val_1", "row_2|val_1"] == results
assert mock_subprocess.call_args_list == [
mock.call(["systemctl", "stop", "rsyslog"]),
mock.call(["systemctl", "start", "rsyslog"]),
]

View File

@ -0,0 +1,33 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pytest
from octane.handlers.backup_restore import base
@pytest.mark.parametrize("action", ["backup", "restore", "pre_restore_check"])
@pytest.mark.parametrize("archivators", [[mock.Mock(), mock.Mock()], []])
def test_collection_archivator(action, archivators):
class TestCollectionArchivator(base.CollectionArchivator):
archivators_classes = archivators
archive = mock.Mock()
context = mock.Mock()
getattr(TestCollectionArchivator(archive, context), action)()
for archivator in archivators:
getattr(archivator.return_value, action).assert_called_once_with()

View File

@ -91,3 +91,29 @@ def test_wait_for_puppet_in_container(mocker, mock_subprocess):
]
docker._wait_for_puppet_in_container(test_container, attempts, delay)
assert 2 == mock_subprocess.call_count
@pytest.mark.parametrize(
"container_id,stop_by_docker",
[('\n', False), ("123", True), ("123\n", True)])
@pytest.mark.parametrize("container_name", ["container_name"])
def test_docker_stop(
mocker, mock_subprocess, container_id, container_name, stop_by_docker):
mock_subprocess_call_output = mocker.patch(
"octane.util.subprocess.call_output", return_value=container_id)
mock_stop_action = mocker.patch("octane.util.docker._container_action")
docker.stop_container(container_name)
mock_stop_action.assert_called_once_with(container_name, "stop")
mock_subprocess_call_output.assert_called_once_with([
'docker',
'ps',
'--filter',
'name={0}'.format(container_name),
'--format',
'{{.ID}}'
])
if stop_by_docker:
mock_subprocess.assert_called_once_with(
["docker", "stop", container_id.strip()])
else:
assert not mock_subprocess.called

View File

@ -0,0 +1,68 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from octane.commands.enable_release import enable_release
from octane import magic_consts
@pytest.mark.parametrize("release_id,password",
[('1', 'test_pass'),
('1', ''),
('', '')])
def test_parser(mocker, octane_app, release_id, password):
command = "enable-release"
get_context_mock = mocker.patch(
"octane.commands.enable_release.EnableReleaseCommand.get_context")
context_mock = mocker.patch(
"octane.handlers.backup_restore.NailgunCredentialsContext")
get_context_mock.return_value = context_mock
enable_release_mock = mocker.patch(
"octane.commands.enable_release.enable_release")
params = [command, "--id", release_id, "--admin-password", password]
if release_id and password:
octane_app.run(params)
enable_release_mock.assert_called_once_with(release_id,
context_mock)
else:
with pytest.raises(AssertionError):
octane_app.run(params)
@pytest.mark.parametrize("release_id,data", [
(1, {'state': 'manageonly', }),
(1, {'state': 'available', }),
(1, {'state': 'unavailable', }),
(1, {'nostate': '', }),
])
def test_enable_release(mocker, release_id, data):
release_url = "/releases/{0}".format(release_id)
context_class_mock = mocker.patch(
"octane.handlers.backup_restore.NailgunCredentialsContext")
context_mock = context_class_mock()
set_auth_context_mock = mocker.patch(
"octane.util.fuel_client.set_auth_context")
get_request_mock = mocker.patch(
"fuelclient.client.APIClient.get_request")
put_request_mock = mocker.patch(
"fuelclient.client.APIClient.put_request")
get_request_mock.return_value = data
if data.get("state") == magic_consts.RELEASE_STATUS_MANAGED:
enable_release(release_id, context_mock)
set_auth_context_mock.assert_called_once_with(context_mock)
expected_data = {'state': magic_consts.RELEASE_STATUS_ENABLED}
put_request_mock.assert_called_once_with(release_url, expected_data)
else:
with pytest.raises(Exception):
enable_release(release_id, context_mock)

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import pytest
@ -100,11 +101,6 @@ DEPLOYMENT_INFO = [{
}]
def test_parse_tenant_get():
res = env_util.parse_tenant_get(TENANT_GET_SAMPLE, 'id')
assert res == 'e26c8079d61f46c48f9a6d606631ee5e'
def test_cache_service_tenant_id(mocker, mock_open, mock_os_path, node):
mock_open.return_value.readline.return_value = '111'
test_env = mock.Mock()
@ -114,13 +110,284 @@ def test_cache_service_tenant_id(mocker, mock_open, mock_os_path, node):
res = env_util.cache_service_tenant_id(test_env, node)
assert res == '111'
TENANT_GET_SAMPLE = """
+-------------+-----------------------------------+
| Property | Value |
+-------------+-----------------------------------+
| description | Tenant for the openstack services |
| enabled | True |
| id | e26c8079d61f46c48f9a6d606631ee5e |
| name | services |
+-------------+-----------------------------------+
def test_get_keystone_tenants(mocker):
env = mock.Mock()
node = mock.Mock()
mocker.patch("octane.util.env.get_admin_password", return_value="passwd")
mocker.patch("octane.util.ssh.call_output",
return_value=TENANT_LIST_SAMPLE)
tenants = env_util.get_keystone_tenants(env, node)
assert tenants == {"admin": "45632156d201479cb2c0171590435be1",
"services": "7dafd04613524cd4a34524bfa7533c8c"}
TENANT_LIST_SAMPLE = """
+----------------------------------+----------+---------+
| id | name | enabled |
+----------------------------------+----------+---------+
| 45632156d201479cb2c0171590435be1 | admin | True |
| 7dafd04613524cd4a34524bfa7533c8c | services | True |
+----------------------------------+----------+---------+
"""[1:]
def test_copy_vips(mock_subprocess):
env_id = -1
env = mock.Mock(data={'id': env_id})
env_util.copy_vips(env)
mock_subprocess.assert_called_once_with(
['fuel2', 'env', 'copy', 'vips', str(env_id)]
)
PROJECTS = {
"admin": "2aed71d8816f4e5f8d4ad06836521d49",
"services": "09f1c11740ba4bc399387f3995d5160e",
}
@pytest.mark.parametrize(("data", "expected"), [
(
'[{"ID": "2aed71d8816f4e5f8d4ad06836521d49", "Name": "admin"}, '
'{"ID": "09f1c11740ba4bc399387f3995d5160e", "Name": "services"}]',
PROJECTS,
),
(
'[{"id": "2aed71d8816f4e5f8d4ad06836521d49", "name": "admin"}, '
'{"id": "09f1c11740ba4bc399387f3995d5160e", "name": "services"}]',
PROJECTS,
),
(
'[{"ID": "2aed71d8816f4e5f8d4ad06836521d49", "NAME": "admin"}, '
'{"ID": "09f1c11740ba4bc399387f3995d5160e", "NAME": "services"}]',
PROJECTS,
),
(
'[{"ID": "2aed71d8816f4e5f8d4ad06836521d49", "NAME": "admin"}]',
{"admin": "2aed71d8816f4e5f8d4ad06836521d49"},
),
])
def test_openstack_project_value(mocker, data, expected):
env = mock.Mock()
node = mock.Mock()
mocker.patch("octane.util.env.get_admin_password", return_value="pswd")
mocker.patch("octane.util.ssh.call_output", return_value=data)
projects = env_util.get_openstack_projects(env, node)
assert projects == expected
@pytest.mark.parametrize(("version", "client"), [
("6.0", "keystone"),
("6.1", "keystone"),
("7.0", "openstack"),
("8.0", "openstack"),
])
def test_get_openstack_project_dict(mocker, version, client):
env = mock.Mock()
node = mock.Mock()
node.env.data.get.return_value = version
mocker.patch("octane.util.env.get_one_controller", return_value=node)
mocker.patch("octane.util.env.get_keystone_tenants",
return_value="keystone")
mocker.patch("octane.util.env.get_openstack_projects",
return_value="openstack")
result = env_util.get_openstack_project_dict(env)
assert result == client
@pytest.mark.parametrize(("data", "key", "exception"), [
({'admin': 'ADMINIS'}, 'services', True),
({'services': 'SERVICEID', 'admin': 'ADMINIS'}, 'services', False),
({'services': 'SERVICEID'}, 'SERVICES', False),
])
def test_get_openstack_project_value(mocker, data, key, exception):
env = mock.Mock()
node = mock.Mock()
mocker.patch("octane.util.env.get_openstack_project_dict",
return_value=data)
if exception:
with pytest.raises(Exception) as exc_info:
env_util.get_openstack_project_value(env, node, key)
assert "Field {0} not found in openstack project list".format(key) == \
exc_info.value.message
else:
project_id = env_util.get_openstack_project_value(env, node, key)
assert project_id == 'SERVICEID'
@pytest.mark.parametrize("node", [mock.Mock(), None])
def test_get_service_tenant_id(mocker, node):
mock_obj = mocker.patch("octane.util.env.get_openstack_project_value")
env = mock.Mock()
env_util.get_service_tenant_id(env, node)
mock_obj.assert_called_once_with(env, node, "services")
ENV_SETTINGS = {
'editable': {
'public_ssl': {
'horizon': {
'value': None
},
'services': {
'value': None
}
},
'external_ntp': {
'ntp_list': {
'value': None
}
},
'external_dns': {
'dns_list': {
'value': None
}
},
'provision': {
'method': {
'value': None
}
}
}
}
@pytest.mark.parametrize("env_id,master_ip", [(1, '10.0.0.1')])
@pytest.mark.parametrize("format_tuples", [
[
# (path, release_template, expected_result)
('/boot', "{settings.MASTER_IP}_{cluster.id}", "10.0.0.1_1"),
(
'/',
"{cluster.id}_{settings.MASTER_IP}_blabal.tar.gz",
"1_10.0.0.1_blabal.tar.gz"
),
]
])
def test_change_env_settings(mocker, env_id, master_ip, format_tuples):
env = mocker.patch("fuelclient.objects.environment.Environment")
env_dict = {
'provision': {
'image_data': {f[0]: {'uri': 'bad_value'} for f in format_tuples}}
}
expected_dict = {
'provision': {
'image_data': {f[0]: {'uri': f[2]} for f in format_tuples}}
}
release_dict = {
'generated': {
'provision': {
'image_data': {f[0]: {'uri': f[1]} for f in format_tuples}}
}
}
sql_call_mock = mocker.patch(
"octane.util.sql.run_psql_in_container",
side_effect=[
[json.dumps(env_dict)], [json.dumps(release_dict)], 1
]
)
mock_json_dumps = mocker.patch("json.dumps", return_value="generated_json")
mock_env = env.return_value = mock.Mock()
mock_env.data = {"release_id": 1}
mock_env.get_attributes.return_value = ENV_SETTINGS
env_util.change_env_settings(env_id, master_ip)
mock_env.update_attributes.assert_called_once_with({
'editable': {
'public_ssl': {
'horizon': {
'value': False
},
'services': {
'value': False
}
},
'external_ntp': {
'ntp_list': {
'value': master_ip
}
},
'external_dns': {
'dns_list': {
'value': master_ip
}
},
'provision': {
'method': {
'value': 'image'
}
}
}
})
mock_json_dumps.assert_called_once_with(expected_dict)
sql_call_mock.assert_called_with(
"update attributes set generated='{0}' where cluster_id={1}".format(
mock_json_dumps.return_value, env_id
),
'nailgun'
)
@pytest.mark.parametrize("mock_method,version,expected_result",
[("cobbler", "5.1.1", True),
("image", "6.0", False),
("cobbler", "6.0", True),
("image", "6.0", False),
("image", "7.0", False),
("image", "", False),
(None, None, False)])
def test_incompatible_provision_method(mocker,
mock_method,
version,
expected_result):
mock_env = mock.Mock()
mock_env.data = {"fuel_version": version, "id": "test"}
mock_get_method = mocker.patch("octane.util.env.get_env_provision_method")
mock_get_method.return_value = mock_method
if version:
result = env_util.incompatible_provision_method(mock_env)
assert expected_result == result
else:
with pytest.raises(Exception) as exc_info:
env_util.incompatible_provision_method(mock_env)
assert ("Cannot find version of environment {0}:"
" attribute 'fuel_version' missing or has incorrect value"
.format(mock_env.data["id"])) == exc_info.value.args[0]
@pytest.mark.parametrize("provision,compat", [
(True, True,),
(False, True),
])
def test_move_nodes(mocker, mock_subprocess, provision, compat):
env = mock.Mock()
env.data = {
'id': 'test-id',
}
nodes = [mock.Mock(), mock.Mock()]
for idx, node in enumerate(nodes):
node.data = {'id': str(idx)}
mock_create_configdrive = mocker.patch(
"octane.util.disk.create_configdrive_partition")
mock_update_node_partinfo = mocker.patch(
"octane.util.disk.update_node_partition_info")
mock_wait_for = mocker.patch(
"octane.util.env.wait_for_nodes")
mock_get_provision_method = mocker.patch(
"octane.util.env.incompatible_provision_method")
mock_get_provision_method.return_value = compat
env_util.move_nodes(env, nodes, provision)
if provision:
assert mock_create_configdrive.call_args_list == \
[mock.call(node) for node in nodes]
assert mock_update_node_partinfo.call_args_list == \
[mock.call(node.data["id"]) for node in nodes]
mock_wait_for.assert_called_once_with(nodes, 'provisioned')
else:
assert mock_create_configdrive.call_args_list == []
assert mock_update_node_partinfo.call_args_list == []
assert mock_wait_for.call_args_list == []

View File

@ -35,3 +35,28 @@ from octane.util import helpers
])
def test_merge_dicts(mocker, base, update, result):
assert result == helpers.merge_dicts(base, update)
@pytest.mark.parametrize(("source", "parameters"), [
([
"option1 = value1\n",
"[section1]\n",
"# some comment\n",
"option2= value2\n",
"[section2]\n",
" option3 =value3 \n",
], [
(None, "option1", "value1"),
("section1", None, None),
("section1", None, None),
("section1", "option2", "value2"),
("section2", None, None),
("section2", "option3", "value3"),
]),
])
def test_iterate_parameters(source, parameters):
expected_result = []
for line, params in zip(source, parameters):
expected_result.append((line,) + params)
result = list(helpers.iterate_parameters(source))
assert result == expected_result

View File

@ -13,7 +13,7 @@ import subprocess
from mock import call
from mock import Mock
from octane.helpers import network
from octane.util import network
def test_create_overlay_network(mocker):
@ -107,10 +107,10 @@ def test_delete_overlay_network(mocker):
mock_ssh = mocker.patch('octane.util.ssh.call')
mock_ovs_tuns = mocker.patch('octane.helpers.network.list_tunnels_ovs')
mock_ovs_tuns = mocker.patch('octane.util.network.list_tunnels_ovs')
mock_ovs_tuns.return_value = ['br-ex--gre-10.10.10.2']
mock_lnx_tun = mocker.patch('octane.helpers.network.list_tunnels_lnx')
mock_lnx_tun = mocker.patch('octane.util.network.list_tunnels_lnx')
mock_lnx_tun.return_value = ['gre3-3']
expected_args = [

View File

@ -0,0 +1,168 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import pytest
from octane.commands import osd_upgrade
from octane import magic_consts
from octane.util import ssh
@pytest.mark.parametrize("env_id", [None, 1])
@pytest.mark.parametrize("admin_pswd", [None, "pswd"])
def test_osd_cmd_upgrade(mocker, octane_app, env_id, admin_pswd):
upgrade_osd_mock = mocker.patch("octane.commands.osd_upgrade.upgrade_osd")
params = ["upgrade-osd"]
if admin_pswd:
params += ["--admin-password", admin_pswd]
if env_id:
params += [str(env_id)]
if env_id and admin_pswd:
octane_app.run(params)
upgrade_osd_mock.assert_called_once_with(env_id, "admin", admin_pswd)
return
with pytest.raises(AssertionError):
octane_app.run(params)
assert not upgrade_osd_mock.called
@pytest.mark.parametrize("node_roles, exception_node", [
([('ceph-osd',)] * 10, None),
([('ceph-osd', 'compute')] * 10, None),
([('ceph-osd',), ('compute',)] * 10, None),
([('ceph-osd',), ('compute',), ('controller',)] * 10, None),
([], None),
([('compute',)] * 10, None),
([('ceph-osd',)] * 10, 0),
([('ceph-osd',), ('compute',)] * 10, 9),
])
@pytest.mark.parametrize("user", ["usr", "admin"])
@pytest.mark.parametrize("password", ["admin", "pswd"])
@pytest.mark.parametrize("env_id", [1, 2, 3])
@pytest.mark.parametrize("master_ip", ["10.21.10.2", "10.20.1.2"])
def test_upgrade_osd(
mocker, node_roles, user, password, exception_node, master_ip, env_id):
auth_mock_client = mocker.patch("octane.util.fuel_client.set_auth_context")
creds_mock = mocker.patch(
"octane.handlers.backup_restore.NailgunCredentialsContext")
mocker.patch(
"octane.commands.osd_upgrade._get_backup_path",
return_value="backup_path")
mocker.patch("octane.magic_consts.OSD_REPOS_UPDATE",
[("path", "{admin_ip}")])
ssh_call_mock = mocker.patch("octane.util.ssh.call")
preinstall_calls = []
rollbabk_calls = []
dpkg_rollbabk_calls = []
nodes = []
osd_nodes = []
hostnames = []
osd_node_idx = 0
call_node = None
class TestException(Exception):
pass
for roles in node_roles:
node = mocker.Mock()
hostname = "{0}_node.{1}".format("_".join(roles), osd_node_idx)
node.data = {"roles": roles, "hostname": hostname, "cluster": env_id}
nodes.append(node)
new_env_node = mocker.Mock()
new_env_node.data = {
"roles": roles,
"hostname": "{0}_env.{1}".format("_".join(roles), osd_node_idx),
"cluster": env_id + 1
}
nodes.append(new_env_node)
if 'ceph-osd' not in roles:
continue
osd_nodes.append(node)
hostnames.append(hostname)
call_node = call_node or node
for path, _ in magic_consts.OSD_REPOS_UPDATE:
preinstall_calls.append((
mock.call(["cp", path, "backup_path"], node=node),
exception_node == osd_node_idx,
))
if exception_node == osd_node_idx:
break
rollbabk_calls.append(
(mock.call(["mv", "backup_path", path], node=node), False))
if exception_node == osd_node_idx:
break
preinstall_calls.append(
(mock.call(["dpkg", "--configure", "-a"], node=node), False))
dpkg_rollbabk_calls.append(
(mock.call(["dpkg", "--configure", "-a"], node=node), False))
osd_node_idx += 1
mocker.patch("fuelclient.objects.node.Node.get_all", return_value=nodes)
file_mock = mock.Mock()
@contextlib.contextmanager
def update_file(*args, **kwargs):
yield (None, file_mock)
mocker.patch("octane.util.ssh.update_file", side_effect=update_file)
mocker.patch("octane.util.ssh.sftp")
mocker.patch(
"octane.util.helpers.get_astute_dict",
return_value={"ADMIN_NETWORK": {"ipaddress": master_ip}})
update_calls = []
if exception_node is None and osd_node_idx:
update_calls.append((
mock.call(["ceph", "osd", "set", "noout"], node=call_node), False))
update_calls.append((
mock.call(
['ceph-deploy', 'install', '--release', 'hammer'] + hostnames,
node=call_node,
stdout=ssh.PIPE,
stderr=ssh.PIPE,
),
False
))
for node in osd_nodes:
update_calls.append(
(mock.call(['restart', 'ceph-osd-all'], node=node), False))
update_calls.append((
mock.call(["ceph", "osd", "unset", "noout"], node=call_node),
False
))
update_calls.append((
mock.call(["ceph", "osd", "stat"], node=call_node),
False
))
calls = \
preinstall_calls + \
update_calls + \
rollbabk_calls + \
dpkg_rollbabk_calls
ssh_calls = [i[0] for i in calls]
mock_calls = [TestException() if i[1] else mock.DEFAULT for i in calls]
ssh_call_mock.side_effect = mock_calls
if exception_node is not None:
with pytest.raises(TestException):
osd_upgrade.upgrade_osd(env_id, user, password)
else:
osd_upgrade.upgrade_osd(env_id, user, password)
ssh_call_mock.assert_has_calls(ssh_calls, any_order=True)
assert ssh_call_mock.call_count == len(ssh_calls)
auth_mock_client.assert_called_once_with(creds_mock.return_value)
creds_mock.assert_called_once_with(user, password)
if exception_node is not None and osd_node_idx:
file_mock.write.assert_called_with(master_ip)

View File

@ -12,8 +12,8 @@
import mock
import pytest
from octane.commands import prepare
from octane import magic_consts
from octane.util import puppet
from octane.util import subprocess
@ -36,9 +36,9 @@ class MockFile(mock.MagicMock):
def assert_calls(self):
kwargs = {'stdin': self, 'cwd': magic_consts.PUPPET_DIR}
args = [((["patch", "-R", "-p3"], ), kwargs), ]
args = [((["patch", "-R", "-p1"], ), kwargs), ]
if not self.revert:
args.append(((["patch", "-N", "-p3"], ), kwargs))
args.append(((["patch", "-N", "-p1"], ), kwargs))
assert args == self.call_args
@ -83,7 +83,7 @@ def test_simple_patch(mocker,
mock_is_dir = mocker.patch("os.path.isdir", side_effect=is_dir_list)
mock_open.return_value.__enter__.side_effect = patch_files
mock_subprocess.side_effect = _read_in_subprocess
prepare.patch_puppet(revert)
puppet.patch_modules(revert)
path_arg = '/'.join([magic_consts.CWD, "patches", "puppet"])
mock_list_dir.assert_called_once_with(path_arg)
path_args = [mock.call('/'.join([path_arg, i])) for i in os_dirs]

View File

@ -1,27 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def test_prepare_parser(mocker, octane_app):
m = mocker.patch('octane.commands.prepare.prepare')
octane_app.run(["prepare"])
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with()
def test_revert_parser(mocker, octane_app):
mock_apply = mocker.patch('octane.commands.prepare.revert_prepare')
octane_app.run(["revert-prepare"])
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
mock_apply.assert_called_once_with()

View File

@ -16,7 +16,6 @@ import pytest
from octane.commands import restore
from octane.handlers import backup_restore
from octane.handlers.backup_restore import astute
from octane import magic_consts
@pytest.mark.parametrize("path,is_file", [
@ -80,9 +79,6 @@ def test_restore_data(mocker):
])
def test_astute_checker(
mocker, mock_open, backup_ip, current_ip):
mocker.patch(
"octane.util.docker.get_docker_container_names",
return_value=magic_consts.RUNNING_REQUIRED_CONTAINERS)
tar_mock = mocker.Mock()
mocker.patch.object(
astute.AstuteArchivator,

View File

@ -1,62 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pytest
from octane.helpers import sync_glance_images
def test_parser(mocker, octane_app):
m = mocker.patch('octane.commands.sync_images.sync_glance_images')
octane_app.run(['sync-images', '1', '2', 'br-mgmt'])
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(1, 2, 'br-mgmt')
def test_prepare_parser(mocker, octane_app):
m = mocker.patch('octane.commands.sync_images.prepare')
octane_app.run(['sync-images-prepare', '1', '2'])
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(1, 2)
@pytest.mark.parametrize("yaml,expected", [
({'network_scheme': {'endpoints': {'MY_EP': {'IP': ['1.2.3.4/24']}}}},
'1.2.3.4'),
({'network_scheme': {'endpoints': {'MY_EP1': {'IP': ['1.2.3.4/24']}}}},
None),
])
def test_get_endpoint_ip(yaml, expected):
result = sync_glance_images.get_endpoint_ip('MY_EP', yaml)
assert result == expected
def test_get_swift_object(mock_subprocess, mock_ssh_call_output, node):
mock_ssh_call_output.return_value = 'id1\nid2\n'
res = sync_glance_images.get_swift_objects(
node, 'tenant', 'user', 'password', 'token', 'container')
assert not mock_subprocess.called
assert mock_ssh_call_output.call_args_list == [
mock.call(["sh", "-c", mock.ANY], node=node)]
assert res == ['id1', 'id2']
def test_download_image(mock_subprocess, mock_ssh_call, node):
mock_ssh_call.return_value = 'id1\nid2\n'
sync_glance_images.download_image(
node, 'tenant', 'user', 'password', 'token', 'container', 'id')
assert not mock_subprocess.called
assert mock_ssh_call.call_args_list == [
mock.call(["sh", "-c", mock.ANY], node=node)]

View File

@ -1,27 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def test_parser(mocker, octane_app):
networks = [{'key': 'value'}]
env_cls = mocker.patch('fuelclient.objects.Environment')
m1 = mocker.patch('octane.util.env.get_env_networks')
m1.return_value = networks
m2 = mocker.patch('octane.commands.sync_networks.update_env_networks')
octane_app.run(["sync-networks", "1", "2"])
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m1.assert_called_once_with(env_cls.return_value)
m2.assert_called_once_with(2, networks)

View File

@ -1,49 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pytest
from octane.commands import update_plugin_settings
def test_parser(mocker, octane_app):
m = mocker.patch('octane.commands.update_plugin_settings'
'.transfer_plugins_settings')
plugins_str = ','.join(update_plugin_settings.PLUGINS)
octane_app.run(["update-plugin-settings", "--plugins", plugins_str,
"1", "2"])
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(1, 2, update_plugin_settings.PLUGINS.keys())
def test_transfer_plugin_settings(mocker):
plugin = mock.Mock()
mocker.patch.object(update_plugin_settings, 'PLUGINS', {'plugin': plugin})
env_cls = mocker.patch('fuelclient.objects.environment.Environment')
get_astute_yaml = mocker.patch('octane.util.env.get_astute_yaml')
attrs = {'editable': {'plugin': {}}}
env_cls.return_value.get_settings_data.return_value = attrs
update_plugin_settings.transfer_plugins_settings(1, 2, ['plugin'])
plugin.assert_called_once_with(get_astute_yaml.return_value, {})
def test_transfer_plugin_settings_fail(mocker):
plugin = mock.Mock()
mocker.patch.object(update_plugin_settings, 'PLUGINS', {'plugin': plugin})
env_cls = mocker.patch('fuelclient.objects.environment.Environment')
mocker.patch('octane.util.env.get_astute_yaml')
attrs = {'editable': {'plugin1': {}}}
env_cls.return_value.get_settings_data.return_value = attrs
with pytest.raises(update_plugin_settings.PluginNotConfigured):
update_plugin_settings.transfer_plugins_settings(1, 2, ['plugin'])

View File

@ -10,6 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pytest
from octane.commands import upgrade_ceph
from octane.handlers.upgrade import ceph_osd
def test_parser(mocker, octane_app):
m = mocker.patch('octane.commands.upgrade_ceph.upgrade_ceph')
@ -17,3 +23,58 @@ def test_parser(mocker, octane_app):
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(1, 2)
@pytest.mark.parametrize("env_node_ids", [
# [(env_id, node_id), ... ]
[(1, 1)],
[(1, 1), (1, 2)],
[(1, 1), (2, 2)]
])
def test_patch_and_revert_only_once(mocker, env_node_ids):
patch_mock = mocker.patch("octane.util.puppet.patch_modules")
mocker.patch("octane.util.ceph.check_cluster")
mocker.patch("octane.util.node.preserve_partition")
set_ceph_noout_mock = mocker.patch("octane.util.ceph.set_osd_noout")
unset_ceph_noout_mock = mocker.patch("octane.util.ceph.unset_osd_noout")
handlers = []
envs = {}
for env_id, node_id in env_node_ids:
try:
env = envs[env_id]
except KeyError:
env = mock.Mock()
env.data = {
"id": env_id,
"fuel_version": "xxx"
}
envs[env_id] = env
node = mock.Mock()
node.env = env
node.data = {"id": node_id}
handlers.append(ceph_osd.CephOsdUpgrade(node, env, False, False))
for handler in handlers:
handler.preupgrade()
for handler in handlers:
handler.prepare()
for handler in handlers:
handler.postdeploy()
assert [mock.call(), mock.call(revert=True)] == patch_mock.call_args_list
env_calls = [mock.call(e) for e in envs.values()]
assert env_calls == set_ceph_noout_mock.call_args_list
assert env_calls == unset_ceph_noout_mock.call_args_list
CEPH_CONF_BASE = "key = value\n"
CEPH_CONF_KEYRING = CEPH_CONF_BASE + "[client.radosgw.gateway]\n"
CEPH_CONF_RGWFRONT = CEPH_CONF_KEYRING + \
"rgw_frontends = fastcgi socket_port=9000 socket_host=127.0.0.1\n"
@pytest.mark.parametrize("conf,expected_res", [
(CEPH_CONF_BASE, CEPH_CONF_BASE),
(CEPH_CONF_KEYRING, CEPH_CONF_RGWFRONT),
(CEPH_CONF_RGWFRONT, CEPH_CONF_RGWFRONT),
])
def test_add_rgw_frontends(mocker, conf, expected_res):
assert expected_res == upgrade_ceph.add_rgw_frontends(conf)

View File

@ -10,10 +10,136 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pytest
import yaml
def test_parser(mocker, octane_app):
from octane.commands import upgrade_node
@pytest.mark.parametrize('cmd,env,nodes,provision,roles', [
(["upgrade-node", "--isolated", "1", "2", "3"], 1, [2, 3], True, None),
(["upgrade-node", "--isolated", "--no-provision", "4", "5"], 4, [5], False,
None),
(["upgrade-node", "--isolated", "--roles=role-a,role-b", "6", "7"], 6, [7],
True, ["role-a", "role-b"]),
(["upgrade-node", "--isolated", "--no-provision", "--roles=role-c,role-d",
"8", "9"], 8, [9], False, ["role-c", "role-d"]),
])
@pytest.mark.parametrize('live_migration', [True, False])
def test_parser(mocker, octane_app, cmd, env, nodes, provision, roles,
live_migration):
if not live_migration:
cmd = cmd + ["--no-live-migration"]
m = mocker.patch('octane.commands.upgrade_node.upgrade_node')
octane_app.run(["upgrade-node", "--isolated", "1", "2", "3"])
octane_app.run(cmd)
assert not octane_app.stdout.getvalue()
assert not octane_app.stderr.getvalue()
m.assert_called_once_with(1, [2, 3], isolated=True, network_template=None)
m.assert_called_once_with(env, nodes, isolated=True, network_template=None,
provision=provision, roles=roles,
live_migration=live_migration)
@pytest.mark.parametrize(
"node_ids,isolated,network_template,provision,roles",
[(['test-node-1', 'test-node-2', 'test-node-3'],
False, None, True, None), ])
def test_upgrade_node(mocker, node_ids, isolated, network_template,
provision, roles):
def _create_node(node_id):
node = mock.Mock('node', spec_set=['data', 'id'])
node.id = node_id
node.data = {}
node.data['id'] = node_id
node.data['cluster'] = None
node.data['roles'] = 'controller'
mock_nodes_list.append(node)
return node
mock_nodes_list = []
test_env_id = 'test-env'
mock_env_class = mocker.patch("fuelclient.objects.environment.Environment")
mock_env = mock_env_class.return_value
mock_env.id = test_env_id
mock_env.data = {}
mock_env.data['id'] = mock_env.id
mocker.patch("octane.util.patch.applied_patch")
mock_node = mocker.patch("fuelclient.objects.node.Node")
mock_node.side_effect = _create_node
mock_copy_patches = mocker.patch(
"octane.commands.upgrade_node.copy_patches_folder_to_nailgun")
mock_get_handlers = mocker.patch(
"octane.handlers.upgrade.get_nodes_handlers")
mock_handlers = mock_get_handlers.return_value
mock_move_nodes = mocker.patch("octane.util.env.move_nodes")
mock_copy_vips = mocker.patch("octane.util.env.copy_vips")
mock_load_network_template = mocker.patch(
"octane.commands.upgrade_node.load_network_template")
mock_deploy_nodes = mocker.patch("octane.util.env.deploy_nodes")
mock_deploy_changes = mocker.patch("octane.util.env.deploy_changes")
upgrade_node.upgrade_node(test_env_id, node_ids)
mock_copy_patches.assert_called_once_with()
mock_copy_vips.assert_called_once_with(mock_env)
mock_move_nodes.assert_called_once_with(mock_env, mock_nodes_list,
True, None)
assert mock_handlers.call_args_list == [
mock.call('preupgrade'), mock.call('prepare'),
mock.call('predeploy'), mock.call('postdeploy')]
if network_template:
mock_load_network_template.assert_called_once_with(network_template)
if isolated:
mock_deploy_nodes.assert_called_once_with(mock_env, mock_nodes_list)
else:
mock_deploy_changes.assert_called_once_with(mock_env, mock_nodes_list)
@pytest.mark.parametrize('node_data,expected_error', [
([{
'id': 'test-node',
'cluster': None,
}], None),
([{
'id': 'test-node',
'cluster': 'test-env',
}], Exception),
([{
'id': 'test-node',
'cluster': 'test-env-1',
}, {
'id': 'another-test-node',
'cluster': 'test-env-2'
}], Exception),
])
def test_check_sanity(mocker, node, node_data, expected_error):
test_env_id = "test-env"
mock_nodes = []
for data in node_data:
mock_node = mocker.Mock(data=data)
mock_nodes.append(mock_node)
if expected_error:
with pytest.raises(expected_error) as exc_info:
upgrade_node.check_sanity(test_env_id, mock_nodes)
if len(mock_nodes) == 1:
assert "Cannot upgrade node with ID %s:" \
in exc_info.value.args[0]
else:
assert "Not upgrading nodes from different clusters" \
in exc_info.value.args[0]
else:
assert upgrade_node.check_sanity(test_env_id, mock_nodes) is None
@pytest.mark.parametrize("return_value", [{'test': 'test'}, ])
@pytest.mark.parametrize("side_effect",
[None, yaml.parser.ParserError, IOError])
def test_load_network_template(mocker, return_value, side_effect):
mocker.patch("octane.util.helpers.load_yaml",
return_value=return_value,
side_effect=side_effect)
if side_effect:
with pytest.raises(side_effect):
upgrade_node.load_network_template("testfile")
else:
assert return_value == upgrade_node.load_network_template("testfile")

View File

@ -0,0 +1,60 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pytest
from octane.handlers import backup_restore
from octane.util import auth
class TestException(Exception):
pass
@pytest.mark.parametrize("exc_on_apply", [True, False])
def test_set_astute_password(mocker, mock_open, exc_on_apply):
fd_mock = mock.Mock()
close_mock = mocker.patch("os.close")
mkstemp_mock = mocker.patch(
"tempfile.mkstemp",
return_value=(fd_mock, "/etc/fuel/.astute.yaml.bac"))
mock_copy = mocker.patch("shutil.copy2")
mock_move = mocker.patch("shutil.move")
yaml_load = mocker.patch(
"yaml.load", return_value={"FUEL_ACCESS": {"password": "dump_pswd"}})
yaml_dump = mocker.patch("yaml.safe_dump")
context = backup_restore.NailgunCredentialsContext(
user="admin", password="user_pswd")
if exc_on_apply:
with pytest.raises(TestException):
with auth.set_astute_password(context):
raise TestException("text exception")
else:
with auth.set_astute_password(context):
pass
assert mock_open.call_args_list == [
mock.call("/etc/fuel/astute.yaml", "r"),
mock.call("/etc/fuel/astute.yaml", "w"),
]
yaml_load.assert_called_once_with(mock_open.return_value)
yaml_dump.assert_called_once_with(
{'FUEL_ACCESS': {'password': 'user_pswd'}},
mock_open.return_value,
default_flow_style=False)
mock_copy.assert_called_once_with("/etc/fuel/astute.yaml",
"/etc/fuel/.astute.yaml.bac")
mock_move.assert_called_once_with("/etc/fuel/.astute.yaml.bac",
"/etc/fuel/astute.yaml")
mkstemp_mock.assert_called_once_with(
dir="/etc/fuel", prefix=".astute.yaml.octane")
close_mock.assert_called_once_with(fd_mock)

View File

@ -0,0 +1,79 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pytest
from octane import magic_consts
from octane.util import disk as disk_util
@pytest.mark.parametrize("disk", ["sda", "sdb", "sdc"])
@pytest.mark.parametrize("size,last_part,end_part", [
(10, 1024, 1035),
(0, 10, 11),
])
def test_create_partition(mocker, mock_ssh_call, mock_ssh_call_output, node,
size, last_part, end_part, disk):
mock_part_end = mocker.patch("octane.util.disk.parse_last_partition_end")
mock_part_end.return_value = last_part
disk_util.create_partition(disk, size, node)
mock_ssh_call_output.assert_called_once_with(
['parted', '/dev/%s' % disk, 'unit', 'MB', 'print'], node=node)
mock_ssh_call.assert_called_once_with(
['parted', '/dev/%s' % disk, 'unit', 'MB', 'mkpart',
'custom', 'ext4', str(last_part + 1), str(end_part)], node=node)
def test_update_partition_info(mocker, node):
test_node_id = 1
container = 'nailgun'
mock_run_in_container = mocker.patch(
"octane.util.docker.run_in_container")
expected_command = [
'python',
os.path.join('/tmp', 'update_node_partition_info.py'),
str(test_node_id),
]
disk_util.update_node_partition_info(test_node_id)
mock_run_in_container.assert_called_once_with(container, expected_command)
NODE_DISKS_ATTRIBUTE = [
{
'id': '1',
'name': 'disk1',
}, {
'id': '2',
'name': 'disk2',
}
]
@pytest.mark.parametrize("disk_attrs", [
NODE_DISKS_ATTRIBUTE,
None,
])
def test_create_configdrive_partition(mocker, node, disk_attrs):
name = 'disk1'
node.mock_add_spec(['get_attribute'])
node.data = {"id": "1"}
node.get_attribute.return_value = disk_attrs
mock_create_part = mocker.patch("octane.util.disk.create_partition")
if disk_attrs:
disk_util.create_configdrive_partition(node)
mock_create_part.assert_called_once_with(
name, magic_consts.CONFIGDRIVE_PART_SIZE, node)
else:
with pytest.raises(disk_util.NoDisksInfoError):
disk_util.create_configdrive_partition(node)

View File

@ -0,0 +1,38 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from octane.util import fuel_bootstrap
def test_get_images_uuids(mocker):
fake_output = [{"status": "active",
"uuid": "00000000-1111-2222-3333-444444444444",
"label": "00000000-1111-2222-3333-444444444444"},
{"status": "",
"uuid": "55555555-6666-7777-8888-999999999999",
"label": "55555555-6666-7777-8888-999999999999"}]
mocker.patch('octane.util.subprocess.call_output',
return_value=json.dumps(fake_output))
uuids = fuel_bootstrap.get_not_active_images_uuids()
assert uuids == [fake_output[1]['uuid']]
def test_delete_image(mocker):
fake_uuid = "00000000-1111-2222-3333-444444444444"
call = mocker.patch('octane.util.subprocess.call')
fuel_bootstrap.delete_image(fake_uuid)
call.assert_called_once_with(['fuel-bootstrap', 'delete', fake_uuid])

View File

@ -10,40 +10,68 @@
# License for the specific language governing permissions and limitations
# under the License.
from fuelclient import client
from fuelclient import fuelclient_settings
import mock
import pytest
from octane.util import fuel_client
def test_simple_overwrite(mocker):
def mock_fuelclient_80(mocker, user, password):
client = mocker.patch("fuelclient.client.APIClient",
new_callable=mock.Mock)
client.mock_add_spec(
["user", "password", "_session", "_keystone_client"],
spec_set=True,
)
client.user = user
client.password = password
return client, None
class TestContext(object):
user = "test user"
password = "test password"
def mock_fuelclient_90(mocker, user, password):
config = {
'OS_USERNAME': user,
'OS_PASSWORD': password,
}
get_settings = mocker.patch("fuelclient.fuelclient_settings.get_settings")
get_settings.return_value.configure_mock(config=config, **config)
get_settings.return_value.mock_add_spec(
["config", "OS_USERNAME", "OS_PASSWORD"],
spec_set=True,
)
client = mocker.patch("fuelclient.client.APIClient",
new_callable=mock.Mock)
client.mock_add_spec(["_session", "_keystone_client"], spec_set=True)
return client, config
conf = fuelclient_settings.get_settings()
client_val = "Not empty val"
# NOTE(akscram): It's not possible to use fixtures in parametrized tests
# as parameters and I use them as common functions. For more information
# take a look on this: https://github.com/pytest-dev/pytest/issues/349
@pytest.mark.parametrize(("auth_context", "fuelclient_fixture", "legacy"), [
(fuel_client.set_auth_context_80, mock_fuelclient_80, True),
(fuel_client.set_auth_context_90, mock_fuelclient_90, False),
])
def test_simple_overwrite(mocker, auth_context, fuelclient_fixture, legacy):
def assert_client_state(user, password):
if legacy:
assert mock_client.user == user
assert mock_client.password == password
else:
assert mock_config['OS_USERNAME'] == user
assert mock_config['OS_PASSWORD'] == password
assert conf.KEYSTONE_USER == client.APIClient.user
assert conf.KEYSTONE_PASS == client.APIClient.password
assert client.APIClient._session is None
assert client.APIClient._keystone_client is None
assert mock_client._session is None
assert mock_client._keystone_client is None
client.APIClient._session = client.APIClient._keystone_client = client_val
mock_client, mock_config = fuelclient_fixture(mocker, "userA", "passwordA")
context = mock.Mock(user="userB", password="passwordB",
spec=["user", "password"])
with fuel_client.set_auth_context(TestContext()):
assert TestContext.user == client.APIClient.user
assert TestContext.password == client.APIClient.password
assert client.APIClient._session is None
assert client.APIClient._keystone_client is None
with auth_context(context):
assert_client_state(context.user, context.password)
client.APIClient._session = client_val
client.APIClient._keystone_client = client_val
mock_client._session = mock.Mock()
mock_client._keystone_client = mock.Mock()
assert conf.KEYSTONE_USER == client.APIClient.user
assert conf.KEYSTONE_PASS == client.APIClient.password
assert client.APIClient._session is None
assert client.APIClient._keystone_client is None
assert_client_state("userA", "passwordA")

View File

@ -0,0 +1,90 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import pytest
from octane.util import keystone
@contextlib.contextmanager
def verify_update_file(mocker, parameters, writes):
mock_old = mock.Mock()
mock_new = mock.Mock()
mock_update_file = mocker.patch("octane.util.subprocess.update_file")
mock_update_file.return_value.__enter__.return_value = (mock_old, mock_new)
mock_iter_params = mocker.patch("octane.util.helpers.iterate_parameters")
mock_iter_params.return_value = parameters
expected_writes = [mock.call(call) for call in writes]
yield mock_update_file
mock_iter_params.assert_called_once_with(mock_old)
assert mock_new.write.call_args_list == expected_writes
@pytest.mark.parametrize(("parameters", "writes"), [
([
("[identity]\n", "identity", None, None),
("default_domain_id = b5a5e858092d44ffbe2f3347831c5ca7\n",
"identity", "default_domain_id", "b5a5e858092d44ffbe2f3347831c5ca7"),
], [
"[identity]\n",
"#default_domain_id = b5a5e858092d44ffbe2f3347831c5ca7\n",
]),
([
("[identity]\n", "identity", None, None),
], [
"[identity]\n",
]),
])
def test_unset_default_domain_id(mocker, parameters, writes):
with verify_update_file(mocker, parameters, writes) as mock_update_file:
keystone.unset_default_domain_id("fakefilename")
mock_update_file.assert_called_once_with("fakefilename")
@pytest.mark.parametrize(("parameters", "writes"), [
([
("[pipeline:public_api]\n", "pipeline:public_api", None, None),
("pipeline = request_id admin_token_auth token_auth public_service\n",
"pipeline:public_api", "pipeline",
"request_id admin_token_auth token_auth public_service"),
("[pipeline:admin_api]\n", "pipeline:admin_api", None, None),
("pipeline = request_id token_auth admin_service\n",
"pipeline:admin_api", "pipeline",
"request_id token_auth admin_service"),
("[pipeline:api_v3]\n", "pipeline:api_v3", None, None),
("pipeline = request_id token_auth service_v3\n",
"pipeline:api_v3", "pipeline",
"request_id token_auth service_v3"),
], [
"[pipeline:public_api]\n",
"pipeline = request_id admin_token_auth token_auth public_service\n",
"[pipeline:admin_api]\n",
"pipeline = request_id admin_token_auth token_auth admin_service\n",
"[pipeline:api_v3]\n",
"pipeline = request_id token_auth service_v3\n",
])
])
def test_add_admin_token_auth(mocker, parameters, writes):
with verify_update_file(mocker, parameters, writes) as mock_update_file:
keystone.add_admin_token_auth("fakefilename", [
"pipeline:public_api",
"pipeline:admin_api",
])
mock_update_file.assert_called_once_with("fakefilename")

View File

@ -125,12 +125,17 @@ def _check_upgrade_levels(mocker, node, content, expected_content):
NOVA_DEFAULT = b"#\u0444\n[DEFAULT]\ndebug = True\n"
NOVA_WITH_EMPTY_LEVELS = NOVA_DEFAULT + b"[upgrade_levels]\n"
NOVA_WITH_JUNO_LEVELS = NOVA_WITH_EMPTY_LEVELS + b"compute=juno\n"
NOVA_WITH_KILO_LEVELS = NOVA_WITH_EMPTY_LEVELS + b"compute=kilo\n"
NOVA_BROKEN_LEVELS = NOVA_DEFAULT + b"compute=essex\n[upgrade_levels]\n"
NOVA_BROKEN_LEVELS_WITH_KILO = NOVA_BROKEN_LEVELS + b"compute=kilo\n"
@pytest.mark.parametrize("content,expected_content", [
(NOVA_DEFAULT, NOVA_DEFAULT),
(NOVA_DEFAULT, NOVA_WITH_KILO_LEVELS),
(NOVA_WITH_EMPTY_LEVELS, NOVA_WITH_KILO_LEVELS),
(NOVA_WITH_JUNO_LEVELS, NOVA_WITH_KILO_LEVELS),
(NOVA_BROKEN_LEVELS, NOVA_BROKEN_LEVELS_WITH_KILO),
])
def test_add_compute_upgrade_levels(mocker, node, content, expected_content):
with _check_upgrade_levels(mocker, node, content, expected_content):
@ -141,6 +146,7 @@ def test_add_compute_upgrade_levels(mocker, node, content, expected_content):
(NOVA_DEFAULT, NOVA_DEFAULT),
(NOVA_WITH_EMPTY_LEVELS, NOVA_WITH_EMPTY_LEVELS),
(NOVA_WITH_KILO_LEVELS, NOVA_WITH_EMPTY_LEVELS),
(NOVA_BROKEN_LEVELS_WITH_KILO, NOVA_WITH_EMPTY_LEVELS),
])
def test_remove_compute_upgrade_levels(mocker, node, content,
expected_content):

View File

@ -0,0 +1,38 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pytest
from octane.util import patch
@pytest.mark.parametrize("patches", [("patch_1", ), ("patch_1", "patch_2")])
@pytest.mark.parametrize("cwd", ["test_dir"])
@pytest.mark.parametrize("is_exception", [True, False])
def test_applied_context_manager(mocker, patches, cwd, is_exception):
patch_mock = mocker.patch("octane.util.patch.patch_apply")
class TestException(Exception):
pass
if is_exception:
with pytest.raises(TestException):
with patch.applied_patch(cwd, *patches):
raise TestException
else:
with patch.applied_patch(cwd, *patches):
pass
assert [
mock.call(cwd, patches),
mock.call(cwd, patches, revert=True)
] == patch_mock.call_args_list

View File

@ -16,13 +16,35 @@ from octane.util import puppet as puppet_util
from octane.util import subprocess
def test_apply_host(mock_subprocess):
puppet_util.apply_host()
assert mock_subprocess.call_count == 1
@pytest.mark.parametrize("name", ["cobbler", "nailgun"])
@pytest.mark.parametrize(("returncode", "is_error"), [
(0, False), (1, True), (2, False), (4, True), (6, True),
])
def test_apply_task(mock_subprocess, name, returncode, is_error):
filename = "/etc/puppet/modules/fuel/examples/{0}.pp".format(name)
cmd = ['puppet', 'apply', '-d', '-v', "--color", "false",
'--detailed-exitcodes', filename]
if is_error:
mock_subprocess.side_effect = \
subprocess.CalledProcessError(returncode, 'CMD')
with pytest.raises(subprocess.CalledProcessError):
puppet_util.apply_task(name)
else:
puppet_util.apply_task(name)
mock_subprocess.assert_called_once_with(cmd)
def test_apply_host_error(mock_subprocess):
def test_apply_all_tasks(mock_subprocess):
puppet_util.apply_all_tasks()
expected_filename = "/etc/puppet/modules/fuel/examples/deploy.sh"
mock_subprocess.assert_called_once_with([expected_filename])
def test_apply_all_tasks_error(mocker, mock_subprocess):
mock_log = mocker.patch("octane.util.puppet.LOG")
exc = subprocess.CalledProcessError(1, 'TEST_PROCESS')
mock_subprocess.side_effect = exc
with pytest.raises(type(exc)):
puppet_util.apply_host()
with pytest.raises(subprocess.CalledProcessError):
puppet_util.apply_all_tasks()
mock_log.error.assert_called_once_with(
"Cannot apply Puppet state on host: %s", exc)

View File

@ -0,0 +1,44 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from octane.util import sql
from octane.util import subprocess
@pytest.mark.parametrize("sql_raw, result_data", [
("row_1|val_1\nrow_2|val_1\n", ["row_1|val_1", "row_2|val_1"]),
("", [])
])
@pytest.mark.parametrize("db", ["nailgun", "keystone"])
def test_run_sql(mocker, sql_raw, result_data, db):
run_mock = mocker.patch(
"octane.util.docker.run_in_container",
return_value=(sql_raw, None))
test_sql = "test_sql"
results = sql.run_psql_in_container(test_sql, db)
run_mock.assert_called_once_with(
"postgres",
[
"sudo",
"-u",
"postgres",
"psql",
db,
"--tuples-only",
"--no-align",
"-c",
test_sql,
],
stdout=subprocess.PIPE
)
assert result_data == results

View File

@ -0,0 +1,68 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pytest
from octane.util import subprocess
class _TestException(Exception):
pass
@pytest.mark.parametrize(("exception", "reraise", "calls"), [
(None, False, [
mock.call.stat("/fake/filename"),
mock.call.chmod("/temp/filename", 0o640),
mock.call.chown("/temp/filename", 2, 3),
mock.call.rename("/fake/filename", "/fake/filename.bak"),
mock.call.rename("/temp/filename", "/fake/filename"),
mock.call.unlink("/fake/filename.bak"),
]),
(subprocess.DontUpdateException, False, [
mock.call.unlink("/temp/filename"),
]),
(_TestException, True, [
mock.call.unlink("/temp/filename"),
]),
])
def test_update_file(mocker, mock_open, exception, reraise, calls):
mock_tempfile = mocker.patch("octane.util.tempfile.get_tempname")
mock_tempfile.return_value = "/temp/filename"
mock_old = mock.MagicMock()
mock_new = mock.MagicMock()
mock_open.side_effect = [mock_old, mock_new]
mock_os = mock.Mock()
os_methods = ["unlink", "stat", "chmod", "chown", "rename"]
for method in os_methods:
mocker.patch("os." + method, new=getattr(mock_os, method))
mock_os.stat.return_value.configure_mock(
st_mode=0o640,
st_uid=2,
st_gid=3,
)
if reraise:
with pytest.raises(exception):
with subprocess.update_file("/fake/filename"):
raise exception
else:
with subprocess.update_file("/fake/filename"):
if exception is not None:
raise exception
assert mock_os.mock_calls == calls

View File

@ -0,0 +1,53 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import pytest
from octane.util import tempfile
@pytest.mark.parametrize("dir", ["dir_1", "dir_2", None])
@pytest.mark.parametrize("prefix", ["prefix_1", "prefix_2", None])
def test_get_tempname(mocker, dir, prefix):
fd = mock.Mock()
tmp_file_name = mock.Mock()
mock_mkstemp = mocker.patch(
"tempfile.mkstemp",
return_value=(fd, tmp_file_name))
os_close_mock = mocker.patch("os.close")
assert tmp_file_name == tempfile.get_tempname(dir, prefix)
if prefix:
mock_mkstemp.assert_called_once_with(dir=dir, prefix=prefix)
else:
mock_mkstemp.assert_called_once_with(dir=dir)
os_close_mock.assert_called_once_with(fd)
@pytest.mark.parametrize("is_exception", [True, False])
def test_temp_dir(mocker, is_exception):
class TestException(Exception):
pass
temp_dir_name = mock.Mock()
mkdtemp_mock = mocker.patch("tempfile.mkdtemp", return_value=temp_dir_name)
rm_tree_mock = mocker.patch("shutil.rmtree")
if is_exception:
with pytest.raises(TestException):
with tempfile.temp_dir():
raise TestException
else:
with tempfile.temp_dir():
pass
mkdtemp_mock.assert_called_once_with()
rm_tree_mock.assert_called_once_with(temp_dir_name)

View File

@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import contextlib
import io
import itertools
@ -61,6 +63,15 @@ def archivate_container_cmd_output(archive, container, cmd, filename):
archive.addfile(info, dump)
def archivate_cmd_output(archive, cmd, filename):
suffix = ".{0}".format(os.path.basename(filename))
with tempfile.NamedTemporaryFile(suffix=suffix) as f:
with subprocess.popen(cmd, stdout=subprocess.PIPE) as process:
shutil.copyfileobj(process.stdout, f)
f.flush()
archive.add(f.name, filename)
def filter_members(archive, dir_name):
if '/' not in dir_name:
dir_name = "{0}/".format(dir_name)

34
octane/util/auth.py Normal file
View File

@ -0,0 +1,34 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shutil
import yaml
import contextlib
from octane.util import helpers
from octane.util import tempfile
@contextlib.contextmanager
def set_astute_password(auth_context):
tmp_file_name = tempfile.get_tempname(
dir="/etc/fuel", prefix=".astute.yaml.octane")
shutil.copy2("/etc/fuel/astute.yaml", tmp_file_name)
try:
data = helpers.get_astute_dict()
data["FUEL_ACCESS"]["password"] = auth_context.password
with open("/etc/fuel/astute.yaml", "w") as current:
yaml.safe_dump(data, current, default_flow_style=False)
yield
finally:
shutil.move(tmp_file_name, "/etc/fuel/astute.yaml")

View File

@ -11,10 +11,18 @@
# under the License.
import os.path
from octane import magic_consts
from octane.util import docker
from octane.util import ssh
class NoDisksInfoError(Exception):
message = "No disks info was found for node {0}"
def __init__(self, node_id):
super(NoDisksInfoError, self).__init__(self.message.format(node_id))
def get_node_disks(node):
return node.get_attribute('disks')
@ -29,9 +37,8 @@ def parse_last_partition_end(out):
# size in MB
def create_partition(disk_name, size, node):
out, _ = ssh.call(['parted', '/dev/%s' % disk_name, 'unit', 'MB', 'print'],
stdout=ssh.PIPE,
node=node)
out = ssh.call_output(
['parted', '/dev/%s' % disk_name, 'unit', 'MB', 'print'], node=node)
start = parse_last_partition_end(out) + 1
end = start + size
ssh.call(['parted', '/dev/%s' % disk_name, 'unit', 'MB', 'mkpart',
@ -39,13 +46,16 @@ def create_partition(disk_name, size, node):
node=node)
def update_partition_generator():
fname = 'update_release_partition_info.py'
command = ['python', os.path.join('/tmp', fname)]
docker.run_in_container('nailgun', command)
def update_node_partition_info(node_id):
fname = 'update_node_partition_info.py'
command = ['python', os.path.join('/tmp', fname), str(node_id)]
docker.run_in_container('nailgun', command)
def create_configdrive_partition(node):
disks = get_node_disks(node)
if not disks:
raise NoDisksInfoError(node.data['id'])
create_partition(disks[0]['name'],
magic_consts.CONFIGDRIVE_PART_SIZE,
node)

View File

@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import contextlib
import io
import logging
@ -210,6 +212,16 @@ def _container_action(container, action):
def stop_container(container):
_container_action(container, "stop")
container_id = subprocess.call_output([
'docker',
'ps',
'--filter',
'name={0}'.format(container),
'--format',
'{{.ID}}'
]).strip()
if container_id:
subprocess.call(["docker", "stop", container_id])
def start_container(container):

View File

@ -11,13 +11,18 @@
# under the License.
import fuelclient
import collections
import json
import logging
import os.path
import pipes
import time
import uuid
import yaml
from distutils import version
from fuelclient.objects import environment as environment_obj
from fuelclient.objects import node as node_obj
from fuelclient.objects import task as task_obj
@ -25,6 +30,8 @@ from fuelclient.objects import task as task_obj
from octane.helpers import tasks as tasks_helpers
from octane.helpers import transformations
from octane import magic_consts
from octane.util import disk
from octane.util import sql
from octane.util import ssh
from octane.util import subprocess
@ -77,8 +84,31 @@ def change_env_settings(env_id, master_ip=''):
attrs['editable']['public_ssl']['services']['value'] = False
attrs['editable']['external_ntp']['ntp_list']['value'] = master_ip
attrs['editable']['external_dns']['dns_list']['value'] = master_ip
if get_env_provision_method(env) != 'image':
attrs['editable']['provision']['method']['value'] = 'image'
env.update_attributes(attrs)
generated_data = sql.run_psql_in_container(
"select generated from attributes where cluster_id={0}".format(env_id),
"nailgun"
)[0]
generated_json = json.loads(generated_data)
release_data = sql.run_psql_in_container(
"select attributes_metadata from releases where id={0}".format(
env.data['release_id']),
"nailgun"
)[0]
release_json = json.loads(release_data)
release_image_dict = release_json['generated']['provision']['image_data']
settings_cls = collections.namedtuple("settings", ["MASTER_IP", "id"])
settings = settings_cls(master_ip, env_id)
for key, value in generated_json['provision']['image_data'].iteritems():
value['uri'] = release_image_dict[key]['uri'].format(settings=settings,
cluster=settings)
sql.run_psql_in_container(
"update attributes set generated='{0}' where cluster_id={1}".format(
json.dumps(generated_json), env_id),
"nailgun"
)
def clone_env(env_id, release):
@ -117,30 +147,61 @@ def delete_fuel_resources(env):
)
def parse_tenant_get(output, field):
for line in output.splitlines()[3:-1]:
parts = line.split()
if parts[1] == field:
return parts[3]
raise Exception(
"Field {0} not found in output:\n{1}".format(field, output))
def get_service_tenant_id(env, node=None):
if node is None:
node = get_one_controller(env)
def get_keystone_tenants(env, node):
password = get_admin_password(env, node)
tenant_out = ssh.call_output(
[
'sh', '-c',
'. /root/openrc; keystone --os-password={0} tenant-get services'
.format(password),
'. /root/openrc; keystone --os-password={0} tenant-list'
.format(pipes.quote(password)),
],
node=node,
)
tenant_id = parse_tenant_get(tenant_out, 'id')
return tenant_id
tenants = {}
for line in tenant_out.splitlines()[3:-1]:
parts = line.split()
tenants[parts[3]] = parts[1]
return tenants
def get_openstack_projects(env, node):
password = get_admin_password(env, node)
out = ssh.call_output(
[
'sh', '-c',
'. /root/openrc; openstack --os-password {0} project list -f json'
.format(pipes.quote(password)),
],
node=node,
)
data = [{k.lower(): v for k, v in d.items()}
for d in json.loads(out)]
return {i["name"]: i["id"] for i in data}
def get_openstack_project_dict(env, node=None):
if node is None:
node = get_one_controller(env)
node_env_version = str(node.env.data.get('fuel_version'))
if node_env_version < version.StrictVersion("7.0"):
mapping = get_keystone_tenants(env, node)
else:
mapping = get_openstack_projects(env, node)
return mapping
def get_openstack_project_value(env, node, key):
data = get_openstack_project_dict(env, node)
try:
return data[key.lower()]
except KeyError:
raise Exception(
"Field {0} not found in openstack project list".format(key))
def get_service_tenant_id(env, node):
return get_openstack_project_value(env, node, "services")
def cache_service_tenant_id(env, node=None):
@ -225,14 +286,29 @@ def wait_for_nodes(nodes, status, timeout=60 * 60, check_freq=60):
wait_for_node(node, status, timeout, check_freq)
def move_nodes(env, nodes):
def move_nodes(env, nodes, provision=True, roles=None):
env_id = env.data['id']
cmd = ["fuel2", "env", "move", "node"]
if not provision:
cmd += ['--no-provision']
if roles:
cmd += ['--roles', ','.join(roles)]
for node in nodes:
node_id = node.data['id']
subprocess.call(
["fuel2", "env", "move", "node", str(node_id), str(env_id)])
LOG.info("Nodes provision started. Please wait...")
wait_for_nodes(nodes, "provisioned")
cmd_move_node = cmd + [str(node_id), str(env_id)]
if provision and incompatible_provision_method(env):
disk.create_configdrive_partition(node)
disk.update_node_partition_info(node.data["id"])
subprocess.call(cmd_move_node)
if provision:
LOG.info("Nodes provision started. Please wait...")
wait_for_nodes(nodes, "provisioned")
def copy_vips(env):
subprocess.call(
["fuel2", "env", "copy", "vips", str(env.data['id'])]
)
def provision_nodes(env, nodes):
@ -254,6 +330,21 @@ def deploy_changes(env, nodes):
wait_for_env(env, "operational", timeout=180 * 60)
def prepare_net_info(info):
quantum_settings = info["quantum_settings"]
pred_nets = quantum_settings["predefined_networks"]
phys_nets = quantum_settings["L2"]["phys_nets"]
if 'net04' in pred_nets and \
pred_nets['net04']['L2']['network_type'] == "vlan":
physnet = pred_nets["net04"]["L2"]["physnet"]
segment_id = phys_nets[physnet]["vlan_range"].split(":")[1]
pred_nets['net04']["L2"]["segment_id"] = segment_id
if 'net04_ext' in pred_nets:
pred_nets["net04_ext"]["L2"]["physnet"] = ""
pred_nets["net04_ext"]["L2"]["network_type"] = "local"
def get_deployment_info(env):
deployment_info = []
try:
@ -278,12 +369,6 @@ def get_admin_password(env, node=None):
return get_astute_yaml(env, node)['access']['password']
def set_network_template(env, filename):
with open(filename, 'r') as f:
data = f.read()
env.set_network_template_data(yaml.load(data))
def update_deployment_info(env, isolated):
default_info = env.get_default_facts('deployment')
network_data = env.get_network_data()
@ -312,7 +397,7 @@ def update_deployment_info(env, isolated):
transformations.reset_gw_admin(info, gw_admin)
# From run_ping_checker
info['run_ping_checker'] = False
transformations.remove_predefined_nets(info)
prepare_net_info(info)
deployment_info.append(info)
env.upload_facts('deployment', deployment_info)
@ -368,3 +453,18 @@ def iter_deployment_info(env, roles):
for node in controllers:
info = find_node_deployment_info(node, roles, full_info)
yield (node, info)
def incompatible_provision_method(env):
if env.data.get("fuel_version"):
env_version = version.StrictVersion(env.data["fuel_version"])
else:
error_message = ("Cannot find version of environment {0}:"
" attribute 'fuel_version' missing or has"
" incorrect value".format(env.data["id"]))
raise Exception(error_message)
provision_method = get_env_provision_method(env)
if env_version < version.StrictVersion(magic_consts.COBBLER_DROP_VERSION) \
and provision_method != 'image':
return True
return False

View File

@ -0,0 +1,33 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from octane import magic_consts
from octane.util import subprocess
def get_not_active_images_uuids():
fuel_bootstrap_list = ["fuel-bootstrap", "list", "--format", "json"]
images = json.loads(subprocess.call_output(fuel_bootstrap_list))
return [img["uuid"] for img in images if img["status"] != "active"]
def delete_image(uuid):
subprocess.call(["fuel-bootstrap", "delete", uuid])
def delete_not_active_images():
# Remove old images cause they were created with old ssh keys pair
for image_uuid in get_not_active_images_uuids():
if image_uuid not in magic_consts.BOOTSTRAP_UNSUPPORTED_IMAGES:
delete_image(image_uuid)

View File

@ -12,11 +12,13 @@
import contextlib
import fuelclient
from fuelclient import client
from fuelclient import fuelclient_settings
@contextlib.contextmanager
def set_auth_context(auth_context):
def set_auth_context_80(auth_context):
old_credentials = (client.APIClient.user, client.APIClient.password)
client.APIClient.user = auth_context.user
client.APIClient.password = auth_context.password
@ -26,3 +28,27 @@ def set_auth_context(auth_context):
finally:
(client.APIClient.user, client.APIClient.password) = old_credentials
client.APIClient._session = client.APIClient._keystone_client = None
@contextlib.contextmanager
def set_auth_context_90(auth_context):
settings = fuelclient_settings.get_settings()
config = settings.config
old_credentials = (settings.OS_USERNAME, settings.OS_PASSWORD)
config['OS_USERNAME'] = auth_context.user
config['OS_PASSWORD'] = auth_context.password
client.APIClient._session = client.APIClient._keystone_client = None
try:
yield
finally:
(config['OS_USERNAME'], config['OS_PASSWORD']) = old_credentials
client.APIClient._session = client.APIClient._keystone_client = None
# NOTE(akscram): The 9.0.0 release for fuelclient is not yet available
# on PyPI but to test it on master nodes with the 9.0 release some
# workaround is needed.
if fuelclient.__version__ == "8.0.0":
set_auth_context = set_auth_context_80
else:
set_auth_context = set_auth_context_90

View File

@ -10,6 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import re
import yaml
def merge_dicts(base_dict, update_dict):
result = base_dict.copy()
@ -19,3 +23,30 @@ def merge_dicts(base_dict, update_dict):
else:
result[key] = merge_dicts(result[key], val)
return result
def get_astute_dict():
with open("/etc/fuel/astute.yaml", "r") as current:
return yaml.load(current)
def load_yaml(filename):
with open(filename, "r") as f:
return yaml.load(f)
def iterate_parameters(fp):
section = None
for line in fp:
match = re.match(r'^\s*\[(?P<section>[^\]]+)', line)
if match:
section = match.group('section')
yield line, section, None, None
continue
match = re.match(r'^\s*(?P<parameter>[^=\s]+)\s*='
'\s*(?P<value>[^\s.+](?:\s*[^\s.+])*)\s*$', line)
if match:
parameter, value = match.group("parameter", "value")
yield line, section, parameter, value
continue
yield line, section, None, None

35
octane/util/keystone.py Normal file
View File

@ -0,0 +1,35 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octane.util import helpers
from octane.util import subprocess
def unset_default_domain_id(filename):
with subprocess.update_file(filename) as (old, new):
for line, section, parameter, value in helpers.iterate_parameters(old):
if section == "identity" and parameter == "default_domain_id":
line = "#{0}".format(line)
new.write(line)
def add_admin_token_auth(filename, pipelines):
with subprocess.update_file(filename) as (old, new):
for line, section, parameter, value in helpers.iterate_parameters(old):
if section in pipelines and parameter == "pipeline" and \
"admin_token_auth" not in value:
items = value.split()
token_auth_idx = items.index("token_auth")
items.insert(token_auth_idx, "admin_token_auth")
value = " ".join(items)
line = "{0} = {1}\n".format(parameter, value)
new.write(line)

View File

@ -44,7 +44,7 @@ def disable_apis(env):
with ssh.update_file(sftp, f) as (old, new):
contents = old.read()
if not mode_tcp_re.search(contents):
raise ssh.DontUpdateException
raise subprocess.DontUpdateException
new.write(contents)
if not contents.endswith('\n'):
new.write('\n')

View File

@ -135,10 +135,25 @@ def wait_for_mcollective_start(nodes, timeout=600):
def add_compute_upgrade_levels(node, version):
sftp = ssh.sftp(node)
with ssh.update_file(sftp, '/etc/nova/nova.conf') as (old, new):
add_upgrade_levels = True
in_section = False
for line in old:
new.write(line)
if line.startswith("[upgrade_levels]"):
add_upgrade_levels = False
in_section = True
new.write(line)
new.write("compute={0}\n".format(version))
continue
if in_section and line.startswith("["):
in_section = False
if in_section and line.startswith("compute="):
LOG.warning(
"Skipping line so not to duplicate compute "
"upgrade level setting: %s" % line.rstrip())
continue
new.write(line)
if add_upgrade_levels:
new.write("[upgrade_levels]\ncompute={0}\n".format(version))
def remove_compute_upgrade_levels(node):

39
octane/util/patch.py Normal file
View File

@ -0,0 +1,39 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from octane.util import subprocess
def patch_apply(cwd, patches, revert=False):
for path in patches:
with open(path, 'rb') as patch:
try:
subprocess.call(["patch", "-R", "-p1"], stdin=patch, cwd=cwd)
except subprocess.CalledProcessError:
if not revert:
pass
else:
raise
if not revert:
patch.seek(0)
subprocess.call(["patch", "-N", "-p1"], stdin=patch, cwd=cwd)
@contextlib.contextmanager
def applied_patch(cwd, *patches):
patch_apply(cwd, patches)
try:
yield
finally:
patch_apply(cwd, patches, revert=True)

View File

@ -14,21 +14,49 @@ import logging
import os.path
from octane import magic_consts
from octane.util import patch
from octane.util import subprocess
LOG = logging.getLogger(__name__)
def apply_host():
cmd = ['puppet', 'apply', '-d', '-v']
path = os.path.join(magic_consts.PUPPET_DIR,
'nailgun',
'examples',
'host-only.pp')
cmd.append(path)
def apply_task(task):
filename = '{0}.pp'.format(task)
path = os.path.join(magic_consts.PUPPET_TASKS_DIR, filename)
cmd = ['puppet', 'apply', '-d', '-v', "--color", "false",
'--detailed-exitcodes', path]
try:
subprocess.call(cmd)
except subprocess.CalledProcessError as exc:
# NOTE(akscram): Detailed exit codes of puppet apply:
# 0: The run succeeded with no changes or failures; the system
# was already in the desired state.
# 1: The run failed, or wasn't attempted due to another run
# already in progress.
# 2: The run succeeded, and some resources were changed.
# 4: The run succeeded, and some resources failed.
# 6: The run succeeded, and included both changes and failures.
if exc.returncode != 2:
LOG.error("Cannot apply the Puppet task: %s, %s",
task, exc.message)
raise
def apply_all_tasks():
try:
subprocess.call([magic_consts.PUPPET_APPLY_TASKS_SCRIPT])
except subprocess.CalledProcessError as exc:
LOG.error("Cannot apply Puppet state on host: %s",
exc.message)
exc)
raise
def patch_modules(revert=False):
puppet_patch_dir = os.path.join(magic_consts.CWD, "patches", "puppet")
patches = []
for d in os.listdir(puppet_patch_dir):
d = os.path.join(puppet_patch_dir, d)
if not os.path.isdir(d):
continue
patches.append(os.path.join(d, "patch"))
patch.patch_apply(magic_consts.PUPPET_DIR, patches, revert=revert)

39
octane/util/sql.py Normal file
View File

@ -0,0 +1,39 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octane.util import docker
from octane.util import subprocess
def run_psql_in_container(sql, db):
results, _ = docker.run_in_container(
"postgres",
[
"sudo",
"-u",
"postgres",
"psql",
db,
"--tuples-only",
"--no-align",
"-c",
sql,
],
stdout=subprocess.PIPE)
return results.strip().splitlines()
def run_psql(sql, db):
output = subprocess.call_output(
["sudo", "-u", "postgres", "psql", db, "--tuples-only", "--no-align",
"-c", sql])
return output.strip().splitlines()

View File

@ -191,10 +191,6 @@ def sftp(node):
return _get_sftp(node)
class DontUpdateException(Exception):
pass
@contextlib.contextmanager
def update_file(sftp, filename):
old = sftp.open(filename, 'r')
@ -209,7 +205,7 @@ def update_file(sftp, filename):
with contextlib.nested(old, new):
try:
yield old, new
except DontUpdateException:
except subprocess.DontUpdateException:
sftp.unlink(temp_filename)
return
except Exception:

View File

@ -22,6 +22,8 @@ import re
import subprocess
import threading
from octane.util import tempfile
LOG = logging.getLogger(__name__)
PIPE = subprocess.PIPE
CalledProcessError = subprocess.CalledProcessError
@ -202,3 +204,32 @@ def call(cmd, **kwargs):
def call_output(cmd, **kwargs):
return call(cmd, stdout=PIPE, **kwargs)[0]
class DontUpdateException(Exception):
pass
@contextlib.contextmanager
def update_file(filename):
old = open(filename, 'r')
dirname = os.path.dirname(filename)
prefix = ".{0}.".format(os.path.basename(filename))
temp_filename = tempfile.get_tempname(dir=dirname, prefix=prefix)
new = open(temp_filename, 'w')
with contextlib.nested(old, new):
try:
yield old, new
except DontUpdateException:
os.unlink(temp_filename)
return
except Exception:
os.unlink(temp_filename)
raise
stat = os.stat(filename)
os.chmod(temp_filename, stat.st_mode)
os.chown(temp_filename, stat.st_uid, stat.st_gid)
bak_filename = filename + '.bak'
os.rename(filename, bak_filename)
os.rename(temp_filename, filename)
os.unlink(bak_filename)

View File

@ -10,17 +10,27 @@
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from __future__ import absolute_import
releases = db().query(models.Release)
for rel in releases:
meta = rel.volumes_metadata
for volume in meta['volumes']:
if volume['min_size']['generator'] == 'calc_min_log_size':
volume['min_size']['generator'] = 'calc_gb_to_mb'
volume['min_size']['generator_args'] = [2]
db().query(models.Release).filter_by(id=rel.id).update(
{"volumes_metadata": meta})
import contextlib
import os
import shutil
import tempfile
db().commit()
def get_tempname(dir=None, prefix=None):
kwargs = {}
if prefix is not None:
kwargs["prefix"] = prefix
fd, tmp_file_name = tempfile.mkstemp(dir=dir, **kwargs)
os.close(fd)
return tmp_file_name
@contextlib.contextmanager
def temp_dir():
temp_dir = tempfile.mkdtemp()
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)

4
other-requirements.txt Normal file
View File

@ -0,0 +1,4 @@
# Due to the problem with the new version of cryptography==1.4 we have
# to add these binary dependencies.
libffi-dev
libssl-dev

View File

@ -8,4 +8,4 @@ python-fuelclient # It pulls following libs:
#requests
#stevedore
paramiko==1.13.0
paramiko

View File

@ -23,33 +23,24 @@ classifier =
packages =
octane
[extras]
zabbix =
pyzabbix==0.7.3
[entry_points]
console_scripts =
octane = octane.app:main
octane =
prepare = octane.commands.prepare:PrepareCommand
revert-prepare = octane.commands.prepare:RevertCommand
upgrade-env = octane.commands.upgrade_env:UpgradeEnvCommand
upgrade-node = octane.commands.upgrade_node:UpgradeNodeCommand
upgrade-db = octane.commands.upgrade_db:UpgradeDBCommand
upgrade-ceph = octane.commands.upgrade_ceph:UpgradeCephCommand
install-node = octane.commands.install_node:InstallNodeCommand
upgrade-control = octane.commands.upgrade_controlplane:UpgradeControlPlaneCommand
upgrade-osd = octane.commands.osd_upgrade:UpgradeOSDCommand
rollback-control = octane.commands.rollback_controlplane:RollbackControlPlaneCommand
sync-networks = octane.commands.sync_networks:SyncNetworksCommand
cleanup = octane.commands.cleanup:CleanupCommand
sync-images = octane.commands.sync_images:SyncImagesCommand
sync-images-prepare = octane.commands.sync_images:SyncImagesPrepareCommand
update-plugin-settings = octane.commands.update_plugin_settings:UpdatePluginSettingsCommand [zabbix]
fuel-backup = octane.commands.backup:BackupCommand
fuel-restore = octane.commands.restore:RestoreCommand
fuel-repo-backup = octane.commands.backup:BackupRepoCommand
fuel-repo-restore = octane.commands.restore:RestoreRepoCommand
update-bootstrap-centos = octane.commands.update_bootstrap:UpdateCentos
enable-release = octane.commands.enable_release:EnableReleaseCommand
octane.handlers.upgrade =
controller = octane.handlers.upgrade.controller:ControllerUpgrade
compute = octane.handlers.upgrade.compute:ComputeUpgrade
@ -58,4 +49,4 @@ octane.handlers.upgrade =
fuelclient =
env_clone = octane.fuelclient.clone_env:EnvClone
env_move_node = octane.fuelclient.move_node:EnvMoveNode
env_clone-ips = octane.fuelclient.clone_ips:CloneIPs
env_copy_vips = octane.fuelclient.copy_vips:CopyVIPs

View File

@ -34,13 +34,14 @@ installations to version 9.0.
%setup -cq -n %{name}-%{version}
%build
cd %{_builddir}/%{name}-%{version} && OSLO_PACKAGE_VERSION=%{version} python setup.py egg_info && cp octane.egg-info/PKG-INFO . && python setup.py build
cd %{_builddir}/%{name}-%{version} && OSLO_PACKAGE_VERSION=%{version} %{__python2} setup.py egg_info && cp octane.egg-info/PKG-INFO . && %{__python2} setup.py build
%install
cd %{_builddir}/%{name}-%{version} && python setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=%{_builddir}/%{name}-%{version}/INSTALLED_FILES
cd %{_builddir}/%{name}-%{version} && %{__python} setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=%{_builddir}/%{name}-%{version}/INSTALLED_FILES
cp -vr %{_builddir}/%{name}-%{version}/octane/patches ${RPM_BUILD_ROOT}/%{python2_sitelib}/octane/
%files -f %{_builddir}/%{name}-%{version}/INSTALLED_FILES
%{python2_sitelib}/octane/patches/*
%defattr(-,root,root)

View File

@ -1,5 +1,5 @@
hacking<0.11,>=0.10.0
pytest<2.8.0
pytest
pytest-cov
pytest-mock<0.10.0
pyzabbix==0.7.3

View File

@ -5,7 +5,7 @@ skipsdist = False
[testenv]
usedevelop = True
install_command = pip install -U {opts} {packages}
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} -U {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/requirements.txt