Remove obsolete upgrade code

Since upgrade procedure were changed and tarball-based upgrade is no longer available
we can remove all tarbal-related code from fuel-qa

Change-Id: Id62ad622a42bd35aeeee866cb20651e7086c7720
Partial-bug:1546943
(cherry picked from commit 4daa44e2b8)
This commit is contained in:
Vladimir Khlyunev 2016-02-17 15:11:55 +03:00
parent b54b6a58d8
commit 020a0ba941
6 changed files with 0 additions and 1431 deletions

View File

@ -581,16 +581,6 @@ Restart tests
Upgrade tests
=============
Test Upgrade
------------
.. automodule:: fuelweb_test.tests.tests_upgrade.test_upgrade
:members:
Test Upgrade Chains
-------------------
.. automodule:: fuelweb_test.tests.tests_upgrade.test_upgrade_chains
:members:
OS upgrade tests
================

View File

@ -217,27 +217,6 @@ def check_unallocated_space(disks, contr_img_ceph=False):
return True
@logwrap
def check_upgraded_containers(remote, version_from, version_to):
logger.info('Checking of containers')
containers = remote.execute("docker ps | tail -n +2 |"
"awk '{ print $NF;}'")['stdout']
symlink = remote.execute("readlink /etc/supervisord.d/current")['stdout']
logger.debug('containers are {0}'.format(containers))
logger.debug('symlinks are {0}'.format(symlink))
components = [co.split('-') for x in containers for co in x.split(',')]
for i in components:
assert_true(version_from != i[2],
'There are {0} containers'.format(version_from))
for i in components:
assert_true(version_to == i[2],
'There are no {0} containers'.format(version_to))
assert_true('/etc/supervisord.d/{0}'.format(version_to)
in symlink[0],
'Symlink is set not to {0}'.format(version_to))
@logwrap
def upload_tarball(node_ssh, tar_path, tar_target):
assert_true(tar_path, "Source path for uploading 'tar_path' is empty, "
@ -267,64 +246,6 @@ def check_file_exists(node_ssh, path):
logger.info('File {0} exists on {1}'.format(path, node_ssh.host))
@logwrap
def run_upgrade_script(node_ssh, script_path, script_name, password='admin',
rollback=False, exit_code=0):
path = os.path.join(script_path, script_name)
check_file_exists(node_ssh, path)
c_res = node_ssh.execute('chmod 755 {0}'.format(path))
logger.debug("Result of chmod is {0}".format(c_res))
if rollback:
path = "UPGRADERS='host-system docker openstack" \
" raise-error' {0}/{1}" \
" --password {2}".format(script_path, script_name, password)
else:
path = "{0}/{1} --no-rollback --password {2}".format(script_path,
script_name,
password)
result = run_on_remote_get_results(node_ssh, path,
assert_ec_equal=[exit_code],
raise_on_assert=False)
# TODO: check that we really need this log from fuel_upgrade.log
if result['exit_code'] != exit_code:
log = "".join(
run_on_remote(node_ssh,
"awk -v p=\"UPGRADE FAILED\" 'BEGIN{m=\"\"}"
" {if ($0 ~ p) {m=$0} else m=m\"\\n\"$0}"
" END{if (m ~ p) print m}'"
" /var/log/fuel_upgrade.log",
raise_on_assert=False)
)
logger.error("Message from /var/log/fuel_upgrade.log:\n"
"{log}".format(log=log))
assert_equal(
result['exit_code'],
exit_code,
"Upgrade script failed with exit code {exit_code}, "
"please inspect logs for details.\n"
"last output: \"{output}\""
"".format(exit_code=result['exit_code'],
output=''.join(result['stdout'][-5:]) + result['stderr_str'])
)
@logwrap
def wait_upgrade_is_done(node_ssh, timeout, phrase):
logger.info('Waiting while upgrade is done')
cmd = "grep '{0}' /var/log/fuel_upgrade.log".format(phrase)
try:
wait(
lambda: not node_ssh.execute(cmd)['exit_code'], timeout=timeout)
except Exception as e:
a = node_ssh.execute(cmd)
logger.error(e)
assert_equal(0, a['exit_code'], a['stderr'])
@logwrap
def wait_phrase_in_log(node_ssh, timeout, interval, phrase, log_path):
cmd = "grep '{0}' '{1}'".format(phrase, log_path)
@ -335,15 +256,6 @@ def wait_phrase_in_log(node_ssh, timeout, interval, phrase, log_path):
"remote node".format(phrase, log_path))
@logwrap
def wait_rollback_is_done(node_ssh, timeout):
logger.info('Waiting while rollback is done')
wait(
lambda: not node_ssh.execute(
"grep 'UPGRADE FAILED' /var/log/fuel_upgrade.log"
)['exit_code'], timeout=timeout)
@logwrap
def get_package_versions_from_node(remote, name, os_type):
if os_type and 'Ubuntu' in os_type:

View File

@ -26,7 +26,6 @@ from proboscis.asserts import assert_equal
from fuelweb_test import logger
from fuelweb_test import logwrap
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import retry
from fuelweb_test.helpers.regenerate_repo import regenerate_centos_repo
@ -396,40 +395,6 @@ class AdminActions(BaseActions):
stderr))
assert_equal(result['exit_code'], 0)
def upgrade_master_node(self, rollback=False, file_upload=True):
"""This method upgrades master node with current state."""
# TODO: It will be remooved or changed
master = self.admin_remote
if file_upload:
checkers.upload_tarball(master, hlp_data.TARBALL_PATH, '/var')
checkers.check_file_exists(master,
os.path.join(
'/var',
os.path.basename(hlp_data.
TARBALL_PATH)))
self.untar(master, os.path.basename(hlp_data.TARBALL_PATH),
'/var')
keystone_pass = hlp_data.KEYSTONE_CREDS['password']
checkers.run_upgrade_script(master, '/var', 'upgrade.sh',
password=keystone_pass,
rollback=rollback,
exit_code=255 if rollback else 0)
if not rollback:
checkers.wait_upgrade_is_done(master, 3000,
phrase='***UPGRADING MASTER NODE'
' DONE SUCCESSFULLY')
checkers.check_upgraded_containers(master,
hlp_data.UPGRADE_FUEL_FROM,
hlp_data.UPGRADE_FUEL_TO)
elif rollback:
checkers.wait_rollback_is_done(master, 3000)
checkers.check_upgraded_containers(master,
hlp_data.UPGRADE_FUEL_TO,
hlp_data.UPGRADE_FUEL_FROM)
logger.debug("all containers are ok")
def get_fuel_settings(self):
cmd = 'cat {cfg_file}'.format(cfg_file=hlp_data.FUEL_SETTINGS_YAML)
result = self.ssh_manager.execute(

View File

@ -373,9 +373,6 @@ MAKE_SNAPSHOT = get_var_as_bool('MAKE_SNAPSHOT', False)
FUEL_SETTINGS_YAML = os.environ.get('FUEL_SETTINGS_YAML',
'/etc/fuel/astute.yaml')
# TarBall data for updates and upgrades
TARBALL_PATH = os.environ.get('TARBALL_PATH')
UPGRADE_FUEL_FROM = os.environ.get('UPGRADE_FUEL_FROM', '7.0')
UPGRADE_FUEL_TO = os.environ.get('UPGRADE_FUEL_TO', '8.0')

File diff suppressed because it is too large Load Diff

View File

@ -1,205 +0,0 @@
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from proboscis import SkipTest
from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test import logger
from fuelweb_test import settings as hlp_data
from fuelweb_test.tests import base_test_case as base_test_data
@test(groups=["upgrade_chains"])
class UpgradeFuelChains(base_test_data.TestBasic):
"""UpgradeChains.""" # TODO documentation
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["prepare_upgrade_env", "prepare_upgrade_env_classic"])
@log_snapshot_after_test
def prepare_upgrade_env(self):
"""Deploy cluster in ha mode with 1 controller and Neutron VLAN
Scenario:
1. Create cluster
2. Add 1 node with controller role
3. Add 2 nodes with compute role
4. Deploy the cluster
5. Run network verification
6. Run OSTF
Duration 35m
Snapshot deploy_neutron_vlan
"""
self.env.revert_snapshot("ready_with_3_slaves")
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
"net_provider": 'neutron',
"net_segment_type": "vlan",
'tenant': 'prepare_upgrade_env',
'user': 'prepare_upgrade_env',
'password': 'prepare_upgrade_env'
}
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute', 'cinder'],
'slave-03': ['compute', 'cinder']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("prepare_upgrade_env", is_make=True)
@test(groups=["upgrade_first_stage", "upgrade_first_stage_classic"])
@log_snapshot_after_test
def upgrade_first_stage(self):
"""Upgrade ha one controller deployed cluster and deploy new one
Scenario:
1. Revert snapshot with neutron ha one controller
2. Run upgrade on master
3. Check that upgrade was successful
4. Run network verification
5. Run OSTF
6. Deploy new ceph ha one controller neutron vlan custer
7. Run network verification
8. Run OSTF
"""
if not self.env.revert_snapshot('prepare_upgrade_env'):
raise SkipTest()
cluster_id = self.fuel_web.get_last_created_cluster()
available_releases_before = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
self.env.admin_actions.upgrade_master_node()
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.assert_nodes_in_ready_state(cluster_id)
self.fuel_web.wait_nodes_get_online_state(
self.env.d_env.nodes().slaves[:3])
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
available_releases_after = self.fuel_web.get_releases_list_for_os(
release_name=hlp_data.OPENSTACK_RELEASE)
added_release = [release_id for release_id in available_releases_after
if release_id not in available_releases_before]
self.env.bootstrap_nodes(
self.env.d_env.nodes().slaves[3:6])
data = {
'tenant': 'upgrade_first_stage',
'user': 'upgrade_first_stage',
'password': 'upgrade_first_stage',
'net_provider': 'neutron',
'net_segment_type': 'vlan',
'volumes_ceph': True,
'images_ceph': True,
'volumes_lvm': False
}
cluster_id = self.fuel_web.create_cluster(
name='first_stage_upgrade',
mode=hlp_data.DEPLOYMENT_MODE,
settings=data,
release_id=added_release[0]
)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-04': ['controller'],
'slave-05': ['compute', 'ceph-osd'],
'slave-06': ['compute', 'ceph-osd']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("upgrade_first_stage", is_make=True)
@test(groups=["upgrade_second_stage", "upgrade_second_stage_classic"])
@log_snapshot_after_test
def upgrade_second_stage(self):
"""Upgrade master second time with 2 available clusters
Scenario:
1. Revert snapshot upgrade_first_stage
2. Run upgrade on master
3. Check that upgrade was successful
4. Run network verification on both clusters
5. Run OSTF on both clusters
6. Add 1 compute node to both clusters and
re-deploy them one by one
7. Run network verification on both clusters
8. Run OSTF on both clusters
"""
if not self.env.revert_snapshot('upgrade_first_stage'):
raise SkipTest()
with self.env.d_env.get_admin_remote() as remote:
remote.execute("rm -rf /var/*upgrade*")
self.env.admin_actions.upgrade_master_node()
self.fuel_web.assert_fuel_version(hlp_data.UPGRADE_FUEL_TO)
self.fuel_web.wait_nodes_get_online_state(
self.env.d_env.nodes().slaves[:6])
self.env.bootstrap_nodes(
self.env.d_env.nodes().slaves[6:8])
cluster_ids = [cluster['id']
for cluster in self.fuel_web.client.list_clusters()]
for cluster_id in cluster_ids:
self.fuel_web.verify_network(cluster_id)
self.fuel_web.run_ostf(cluster_id=cluster_id)
first_cluster_id = sorted(cluster_ids)[0]
second_cluster_id = sorted(cluster_ids)[1]
logger.debug("first cluster id {0}, second cluster id"
" {1}".format(first_cluster_id, second_cluster_id))
self.fuel_web.update_nodes(
first_cluster_id, {'slave-07': ['compute']},
True, False
)
self.fuel_web.deploy_cluster_wait(first_cluster_id)
self.fuel_web.verify_network(first_cluster_id)
self.fuel_web.run_ostf(cluster_id=first_cluster_id)
self.fuel_web.update_nodes(
second_cluster_id, {'slave-08': ['compute', 'ceph-osd']},
True, False
)
self.fuel_web.deploy_cluster_wait(second_cluster_id)
self.fuel_web.verify_network(second_cluster_id)
self.fuel_web.run_ostf(cluster_id=second_cluster_id)