Python3 code compatibility: Stage1

1. Fix imports (lovercase imported as UPPERCASE)
2. Fix local variables (CamelCase/UPPERCASE)
3. Use Python3 compatible syntax, when it possible
4. Define attributes in __init__
5. Remove redundant parenthesis
6. Add /logs to .gitignore (to prevent log publish and remove fake git status notifications)

blueprint fuel-qa-python3-compatibility
Change-Id: Ifd60646055d07f888b2e204083b99d9db507f93e
This commit is contained in:
Alexey Stepanov 2016-02-05 16:45:27 +03:00
parent e4d4358006
commit 70106730d9
18 changed files with 201 additions and 192 deletions

1
.gitignore vendored
View File

@ -47,6 +47,7 @@ local_settings.py
doc/_build/
# Logs
/logs
sys_test.log
# Certs

View File

@ -24,8 +24,8 @@ from novaclient.v2 import Client as NovaClient
import neutronclient.v2_0.client as neutronclient
from proboscis.asserts import assert_equal
from fuelweb_test import logger as LOGGER
from fuelweb_test import logwrap as LOGWRAP
from fuelweb_test import logger
from fuelweb_test import logwrap
from fuelweb_test.settings import DISABLE_SSL
from fuelweb_test.settings import PATH_TO_CERT
from fuelweb_test.settings import VERIFY_SSL
@ -52,7 +52,7 @@ class Common(object):
insecure = not VERIFY_SSL
LOGGER.debug('Auth URL is {0}'.format(auth_url))
logger.debug('Auth URL is {0}'.format(auth_url))
keystone_args = {'username': user, 'password': password,
'tenant_name': tenant, 'auth_url': auth_url,
@ -60,7 +60,7 @@ class Common(object):
self.keystone = self._get_keystoneclient(**keystone_args)
token = self.keystone.auth_token
LOGGER.debug('Token is {0}'.format(token))
logger.debug('Token is {0}'.format(token))
neutron_endpoint = self.keystone.service_catalog.url_for(
service_type='network', endpoint_type='publicURL')
@ -90,7 +90,7 @@ class Common(object):
glance_endpoint = self.keystone.service_catalog.url_for(
service_type='image', endpoint_type='publicURL')
LOGGER.debug('Glance endpoint is {0}'.format(
logger.debug('Glance endpoint is {0}'.format(
make_endpoint(glance_endpoint)))
glance_args = {'endpoint': make_endpoint(glance_endpoint),
'token': token,
@ -107,15 +107,15 @@ class Common(object):
os_auth_token=token,
ironic_url=make_endpoint(ironic_endpoint), insecure=True)
except ClientException as e:
LOGGER.warning('Could not initialize ironic client {0}'.format(e))
logger.warning('Could not initialize ironic client {0}'.format(e))
def goodbye_security(self):
secgroup_list = self.nova.security_groups.list()
LOGGER.debug("Security list is {0}".format(secgroup_list))
logger.debug("Security list is {0}".format(secgroup_list))
secgroup_id = [i.id for i in secgroup_list if i.name == 'default'][0]
LOGGER.debug("Id of security group default is {0}".format(
logger.debug("Id of security group default is {0}".format(
secgroup_id))
LOGGER.debug('Permit all TCP and ICMP in security group default')
logger.debug('Permit all TCP and ICMP in security group default')
self.nova.security_group_rules.create(secgroup_id,
ip_protocol='tcp',
from_port=1,
@ -133,13 +133,13 @@ class Common(object):
return self.glance.images.delete(image_id)
def create_key(self, key_name):
LOGGER.debug('Try to create key {0}'.format(key_name))
logger.debug('Try to create key {0}'.format(key_name))
return self.nova.keypairs.create(key_name)
def create_instance(self, flavor_name='test_flavor', ram=64, vcpus=1,
disk=1, server_name='test_instance', image_name=None,
neutron_network=True, label=None):
LOGGER.debug('Try to create instance')
logger.debug('Try to create instance')
start_time = time.time()
while time.time() - start_time < 100:
@ -161,16 +161,16 @@ class Common(object):
network = self.nova.networks.find(label=net_label)
kwargs['nics'] = [{'net-id': network.id, 'v4-fixed-ip': ''}]
LOGGER.info('image uuid is {0}'.format(image))
logger.info('image uuid is {0}'.format(image))
flavor = self.nova.flavors.create(
name=flavor_name, ram=ram, vcpus=vcpus, disk=disk)
LOGGER.info('flavor is {0}'.format(flavor.name))
logger.info('flavor is {0}'.format(flavor.name))
server = self.nova.servers.create(
name=server_name, image=image[0], flavor=flavor, **kwargs)
LOGGER.info('server is {0}'.format(server.name))
logger.info('server is {0}'.format(server.name))
return server
@LOGWRAP
@logwrap
def get_instance_detail(self, server):
details = self.nova.servers.get(server)
return details
@ -183,13 +183,13 @@ class Common(object):
try:
_verify_instance_state()
except AssertionError:
LOGGER.debug('Instance is not {0}, lets provide it the last '
logger.debug('Instance is not {0}, lets provide it the last '
'chance and sleep 60 sec'.format(expected_state))
time.sleep(60)
_verify_instance_state()
def delete_instance(self, server):
LOGGER.debug('Try to delete instance')
logger.debug('Try to delete instance')
self.nova.servers.delete(server)
def create_flavor(self, name, ram, vcpus, disk, flavorid="auto",
@ -203,27 +203,25 @@ class Common(object):
def _get_keystoneclient(self, username, password, tenant_name, auth_url,
retries=3, ca_cert=None, insecure=False):
keystone = None
for i in range(retries):
exception = None
for i in xrange(retries):
try:
if ca_cert:
keystone = KeystoneClient(username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url,
cacert=ca_cert,
insecure=insecure)
return KeystoneClient(username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url,
cacert=ca_cert,
insecure=insecure)
else:
keystone = KeystoneClient(username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url)
break
except ClientException as e:
return KeystoneClient(username=username,
password=password,
tenant_name=tenant_name,
auth_url=auth_url)
except ClientException as exc:
err = "Try nr {0}. Could not get keystone client, error: {1}"
LOGGER.warning(err.format(i + 1, e))
logger.warning(err.format(i + 1, exc))
exception = exc
time.sleep(5)
if not keystone:
raise
return keystone
raise exception if exception else RuntimeError()

View File

@ -84,7 +84,6 @@ def log_snapshot_after_test(func):
except SkipTest:
raise SkipTest()
except Exception as test_exception:
exc_trace = sys.exc_traceback
name = 'error_%s' % func.__name__
description = "Failed in method '%s'." % func.__name__
if args[0].env is not None:
@ -111,7 +110,7 @@ def log_snapshot_after_test(func):
" {0}".format(traceback.format_exc()))
logger.error(traceback.format_exc())
logger.info("<" * 5 + "*" * 100 + ">" * 5)
raise test_exception, None, exc_trace
raise test_exception
else:
if settings.ALWAYS_CREATE_DIAGNOSTIC_SNAPSHOT:
if args[0].env is None:

View File

@ -20,11 +20,11 @@ import json
from paramiko import RSAKey
from devops.models.node import SSHClient
from fuelweb_test import logger
from fuelweb_test.helpers import metaclasses
from fuelweb_test.helpers.metaclasses import SingletonMeta
class SSHManager(object):
__metaclass__ = metaclasses.SingletonMeta
__metaclass__ = SingletonMeta
# Slots is used to prevent uncontrolled attributes set or remove.
__slots__ = [
'__connections', 'admin_ip', 'admin_port', 'login', '__password'

View File

@ -693,7 +693,7 @@ def pretty_log(src, indent=0, invert=False):
if src and isinstance(src, dict):
max_len = len(max(src.values() if invert else src.keys(),
key=lambda x: len(str(x))))
for key, value in src.iteritems():
for key, value in src.items():
if (isinstance(value, dict) and value) or \
isinstance(value, list):
result += templates[1].format(indent=' ' * indent, item=key)

View File

@ -420,10 +420,10 @@ class EnvironmentModel(object):
# This is very rude implementation and it SHOULD be changes after
# implementation this feature in fuel-devops
name = "{}_{}".format(settings.ENV_NAME, self.d_env.nodes().admin.name)
NAME_SIZE = 80
if len(name) > NAME_SIZE:
name_size = 80
if len(name) > name_size:
hash_str = str(hash(name))
name = (hash_str + name)[:NAME_SIZE]
name = (hash_str + name)[:name_size]
cmd = """EDITOR="sed -i s/tray=\\'open\\'//" virsh edit {}""".format(
name)

View File

@ -249,21 +249,21 @@ def get_tests_results(systest_build, os):
run_test_data = test_build.test_data()
test_classes = {}
for one in run_test_data['suites'][0]['cases']:
className = one['className']
if className not in test_classes:
test_classes[className] = {}
test_classes[className]['child'] = []
test_classes[className]['duration'] = 0
test_classes[className]["failCount"] = 0
test_classes[className]["passCount"] = 0
test_classes[className]["skipCount"] = 0
class_name = one['className']
if class_name not in test_classes:
test_classes[class_name] = {}
test_classes[class_name]['child'] = []
test_classes[class_name]['duration'] = 0
test_classes[class_name]["failCount"] = 0
test_classes[class_name]["passCount"] = 0
test_classes[class_name]["skipCount"] = 0
else:
if one['className'] == one['name']:
logger.warning("Found duplicate test in run - {}".format(
one['className']))
continue
test_class = test_classes[className]
test_class = test_classes[class_name]
test_class['child'].append(one)
test_class['duration'] += float(one['duration'])
if one['status'].lower() in ('failed', 'error'):

View File

@ -20,7 +20,7 @@ from proboscis import test
from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test import settings as CONF
from fuelweb_test import settings
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@ -75,12 +75,12 @@ class EMCPlugin(TestBasic):
navicli = checkers.get_package_versions_from_node(
remote=remote,
name='navicli',
os_type=CONF.OPENSTACK_RELEASE)
os_type=settings.OPENSTACK_RELEASE)
naviseccli = checkers.get_package_versions_from_node(
remote=remote,
name='naviseccli',
os_type=CONF.OPENSTACK_RELEASE)
return any([out != '' for out in navicli, naviseccli])
os_type=settings.OPENSTACK_RELEASE)
return bool(navicli + naviseccli)
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
groups=["deploy_emc_ha"])
@ -108,16 +108,16 @@ class EMCPlugin(TestBasic):
# copy plugin to the master node
checkers.upload_tarball(
remote,
CONF.EMC_PLUGIN_PATH, '/var')
settings.EMC_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
remote,
plugin=os.path.basename(CONF.EMC_PLUGIN_PATH))
plugin=os.path.basename(settings.EMC_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=CONF.DEPLOYMENT_MODE,
mode=settings.DEPLOYMENT_MODE,
)
attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
@ -139,11 +139,11 @@ class EMCPlugin(TestBasic):
emc_options = attr["editable"]["emc_vnx"]
emc_options["metadata"]["enabled"] = True
emc_options["emc_sp_a_ip"]["value"] = CONF.EMC_SP_A_IP
emc_options["emc_sp_b_ip"]["value"] = CONF.EMC_SP_B_IP
emc_options["emc_username"]["value"] = CONF.EMC_USERNAME
emc_options["emc_password"]["value"] = CONF.EMC_PASSWORD
emc_options["emc_pool_name"]["value"] = CONF.EMC_POOL_NAME
emc_options["emc_sp_a_ip"]["value"] = settings.EMC_SP_A_IP
emc_options["emc_sp_b_ip"]["value"] = settings.EMC_SP_B_IP
emc_options["emc_username"]["value"] = settings.EMC_USERNAME
emc_options["emc_password"]["value"] = settings.EMC_PASSWORD
emc_options["emc_pool_name"]["value"] = settings.EMC_POOL_NAME
self.fuel_web.client.update_cluster_attributes(cluster_id, attr)

View File

@ -29,6 +29,9 @@ from fuelweb_test.tests import base_test_case
@test(groups=["jumbo_frames"])
class TestJumboFrames(base_test_case.TestBasic):
def __init__(self):
self.os_conn = None
super(TestJumboFrames, self).__init__()
interfaces = {
iface_alias('eth0'): ['fuelweb_admin'],

View File

@ -28,7 +28,7 @@ from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.helpers import os_actions
from fuelweb_test import settings
from fuelweb_test import logger as LOGGER
from fuelweb_test import logger
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@ -58,22 +58,22 @@ class RhHA(TestBasic):
timeout=timeout, timeout_msg="Node doesn't gone offline")
def warm_restart_nodes(self, devops_nodes):
LOGGER.info('Reboot (warm restart) nodes '
logger.info('Reboot (warm restart) nodes '
'{0}'.format([n.name for n in devops_nodes]))
self.warm_shutdown_nodes(devops_nodes)
self.warm_start_nodes(devops_nodes)
def warm_shutdown_nodes(self, devops_nodes):
LOGGER.info('Shutting down (warm) nodes '
logger.info('Shutting down (warm) nodes '
'{0}'.format([n.name for n in devops_nodes]))
for node in devops_nodes:
LOGGER.debug('Shutdown node {0}'.format(node.name))
logger.debug('Shutdown node {0}'.format(node.name))
with self.fuel_web.get_ssh_for_node(node.name) as remote:
remote.execute('/sbin/shutdown -Ph now & exit')
for node in devops_nodes:
ip = self.fuel_web.get_node_ip_by_devops_name(node.name)
LOGGER.info('Wait a {0} node offline status'.format(node.name))
logger.info('Wait a {0} node offline status'.format(node.name))
try:
self.wait_for_slave_network_down(ip)
except TimeoutError:
@ -84,7 +84,7 @@ class RhHA(TestBasic):
node.destroy()
def warm_start_nodes(self, devops_nodes):
LOGGER.info('Starting nodes '
logger.info('Starting nodes '
'{0}'.format([n.name for n in devops_nodes]))
for node in devops_nodes:
node.start()
@ -97,7 +97,7 @@ class RhHA(TestBasic):
tcp_ping(ip, 22),
'Node {0} has not become online '
'after warm start'.format(node.name))
LOGGER.debug('Node {0} became online.'.format(node.name))
logger.debug('Node {0} became online.'.format(node.name))
@staticmethod
def connect_rh_image(slave):
@ -120,9 +120,9 @@ class RhHA(TestBasic):
try:
system_disk.volume.upload(path)
except Exception as e:
LOGGER.error(e)
LOGGER.debug("Volume path: {0}".format(vol_path))
LOGGER.debug("Image path: {0}".format(path))
logger.error(e)
logger.debug("Volume path: {0}".format(vol_path))
logger.debug("Image path: {0}".format(path))
@staticmethod
def verify_image_connected(remote):
@ -132,7 +132,7 @@ class RhHA(TestBasic):
"""
cmd = "cat /etc/redhat-release"
result = remote.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0, "Image doesn't connected")
@staticmethod
@ -166,19 +166,19 @@ class RhHA(TestBasic):
if settings.RH_POOL_HASH:
result = remote.execute(reg_command)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'RH registration failed')
reg_pool_cmd = ("/usr/sbin/subscription-manager "
"attach --pool={0}".format(settings.RH_POOL_HASH))
result = remote.execute(reg_pool_cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'Can not attach node to subscription pool')
else:
cmd = reg_command + " --auto-attach"
result = remote.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'RH registration with auto-attaching failed')
@ -194,7 +194,7 @@ class RhHA(TestBasic):
.format(settings.RH_MAJOR_RELEASE))
result = remote.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'Enabling RH repos failed')
@ -210,7 +210,7 @@ class RhHA(TestBasic):
"echo '{0}' > /etc/hostname".format(hostname))
result = remote.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'Setting up hostname for node failed')
@ -221,15 +221,15 @@ class RhHA(TestBasic):
:param puppets: <list> of puppets.
:param remote: Remote node for proceed.
"""
LOGGER.debug("Applying puppets...")
logger.debug("Applying puppets...")
for puppet in puppets:
LOGGER.debug('Applying: {0}'.format(puppet))
logger.debug('Applying: {0}'.format(puppet))
result = remote.execute(
'puppet apply -vd -l /var/log/puppet.log {0}'.format(puppet))
if result['exit_code'] != 0:
LOGGER.debug("Failed on task: {0}".format(puppet))
LOGGER.debug("STDERR:\n {0}".format(result['stderr']))
LOGGER.debug("STDOUT:\n {0}".format(result['stdout']))
logger.debug("Failed on task: {0}".format(puppet))
logger.debug("STDERR:\n {0}".format(result['stderr']))
logger.debug("STDOUT:\n {0}".format(result['stdout']))
asserts.assert_equal(
result['exit_code'], 0, 'Puppet run failed. '
'Task: {0}'.format(puppet))
@ -262,10 +262,10 @@ class RhHA(TestBasic):
if result['exit_code'] == 0:
remove_iface = "rm -f /etc/sysconfig/network-scripts/ifcfg-eth0"
result = remote.execute(remove_iface)
LOGGER.debug(result)
logger.debug(result)
prep = "screen -dmS netconf"
result = remote.execute(prep)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0, 'Can not create screen')
net_puppet = ('screen -r netconf -p 0 -X stuff '
'$"puppet apply -vd -l /var/log/puppet.log '
@ -274,8 +274,8 @@ class RhHA(TestBasic):
result = remote.execute(net_puppet)
if result['exit_code'] != 0:
LOGGER.debug("STDERR:\n {0}".format(result['stderr']))
LOGGER.debug("STDOUT:\n {0}".format(result['stdout']))
logger.debug("STDERR:\n {0}".format(result['stderr']))
logger.debug("STDOUT:\n {0}".format(result['stdout']))
asserts.assert_equal(
result['exit_code'], 0, 'Can not create screen with '
'netconfig task')
@ -291,7 +291,7 @@ class RhHA(TestBasic):
def file_checker(connection):
cmd = "test -f ~/success"
result = connection.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
if result['exit_code'] != 0:
return False
else:
@ -329,17 +329,17 @@ class RhHA(TestBasic):
:param remote: Remote Fuel master node.
:param ip: Target node ip to back up from.
"""
LOGGER.debug('Target node ip: {0}'.format(ip))
logger.debug('Target node ip: {0}'.format(ip))
cmd = ("cd ~/ && mkdir rh_backup; "
"scp -r {0}:/root/.ssh rh_backup/. ; "
"scp {0}:/etc/astute.yaml rh_backup/ ; "
"scp -r {0}:/var/lib/astute/nova rh_backup/").format(ip)
result = remote.execute(cmd)
LOGGER.debug(result['stdout'])
LOGGER.debug(result['stderr'])
logger.debug(result['stdout'])
logger.debug(result['stderr'])
asserts.assert_equal(result['exit_code'], 0,
'Can not back up required information from node')
LOGGER.debug("Backed up ssh-keys and astute.yaml")
logger.debug("Backed up ssh-keys and astute.yaml")
@staticmethod
def clean_string(string):
@ -368,7 +368,7 @@ class RhHA(TestBasic):
cmd = "cat ~/rh_backup/.ssh/authorized_keys"
result = remote_admin.execute(cmd)
key = result['stdout']
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'Can not get backed up ssh key.')
@ -376,32 +376,32 @@ class RhHA(TestBasic):
cmd = "mkdir ~/.ssh; echo '{0}' >> ~/.ssh/authorized_keys".format(key)
result = remote_slave.execute(cmd)
LOGGER.debug(result['stdout'])
LOGGER.debug(result['stderr'])
logger.debug(result['stdout'])
logger.debug(result['stderr'])
asserts.assert_equal(result['exit_code'], 0,
'Can not recover ssh key for node')
cmd = "cd ~/rh_backup && scp astute.yaml {0}@{1}:/etc/.".format(
settings.RH_IMAGE_USER, ip)
LOGGER.debug("Restoring astute.yaml for node with ip {0}".format(ip))
logger.debug("Restoring astute.yaml for node with ip {0}".format(ip))
result = remote_admin.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'Can not restore astute.yaml')
cmd = "mkdir -p /var/lib/astute"
LOGGER.debug("Prepare node for restoring nova ssh-keys")
logger.debug("Prepare node for restoring nova ssh-keys")
result = remote_slave.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0, 'Preparation failed')
cmd = (
"cd ~/rh_backup && scp -r nova {0}@{1}:/var/lib/astute/.".format(
settings.RH_IMAGE_USER, ip)
)
LOGGER.debug("Restoring nova ssh-keys")
logger.debug("Restoring nova ssh-keys")
result = remote_admin.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'Can not restore ssh-keys for nova')
@ -413,7 +413,7 @@ class RhHA(TestBasic):
"""
cmd = "yum install yum-utils yum-priorities -y"
result = remote.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0, 'Can not install required'
'yum components.')
@ -427,7 +427,7 @@ class RhHA(TestBasic):
cmd = ("curl {0}".format(repo))
result = remote.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'Perestroika repos unavailable from node.')
@ -441,7 +441,7 @@ class RhHA(TestBasic):
"/etc/yum.repos.d/mos.repo && "
"yum clean all".format(repo))
result = remote.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'Can not create config file for repo')
@ -452,19 +452,19 @@ class RhHA(TestBasic):
:param remote: Remote node for proceed.
"""
cmd = "yum list installed | grep hiera"
LOGGER.debug('Checking hiera installation...')
logger.debug('Checking hiera installation...')
result = remote.execute(cmd)
if result['exit_code'] == 0:
cmd = "yum remove hiera -y"
LOGGER.debug('Found existing installation of hiera. Removing...')
logger.debug('Found existing installation of hiera. Removing...')
result = remote.execute(cmd)
asserts.assert_equal(result['exit_code'], 0, 'Can not remove '
'hiera')
cmd = "ls /etc/hiera"
LOGGER.debug('Checking hiera files for removal...')
logger.debug('Checking hiera files for removal...')
result = remote.execute(cmd)
if result['exit_code'] == 0:
LOGGER.debug('Found redundant hiera files. Removing...')
logger.debug('Found redundant hiera files. Removing...')
cmd = "rm -rf /etc/hiera"
result = remote.execute(cmd)
asserts.assert_equal(result['exit_code'], 0,
@ -477,13 +477,13 @@ class RhHA(TestBasic):
:param remote: Remote node for proceed.
"""
cmd = "yum list installed | grep rsync"
LOGGER.debug("Checking rsync installation...")
logger.debug("Checking rsync installation...")
result = remote.execute(cmd)
if result['exit_code'] != 0:
LOGGER.debug("Rsync is not found. Installing rsync...")
logger.debug("Rsync is not found. Installing rsync...")
cmd = "yum clean all && yum install rsync -y"
result = remote.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0, 'Can not install '
'rsync on node.')
@ -498,7 +498,7 @@ class RhHA(TestBasic):
"awk '/%s/{print $2}'); do nova service-delete $i; "
"done" % hostname)
result = remote.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0, 'Can not remove '
'old nova computes')
@ -506,7 +506,7 @@ class RhHA(TestBasic):
"awk '/%s/{print $2}'); do neutron agent-delete $i; "
"done" % hostname)
result = remote.execute(cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0, 'Can not remove '
'old neutron agents')
@ -518,7 +518,7 @@ class RhHA(TestBasic):
"""
puppet_install_cmd = "yum install puppet ruby -y"
result = remote.execute(puppet_install_cmd)
LOGGER.debug(result)
logger.debug(result)
asserts.assert_equal(result['exit_code'], 0,
'Ruby and puppet installation failed')
@ -533,7 +533,7 @@ class RhHA(TestBasic):
"{0}@{1}:/etc/puppet/modules/".format(settings.RH_IMAGE_USER,
ip))
result = remote.execute(cmd)
LOGGER.debug(cmd)
logger.debug(cmd)
asserts.assert_equal(result['exit_code'], 0,
'Rsync puppet modules failed')
@ -574,7 +574,7 @@ class RhHA(TestBasic):
"""
self.show_step(1, initialize=True)
LOGGER.debug('Check MD5 sum of RH 7 image')
logger.debug('Check MD5 sum of RH 7 image')
check_image = checkers.check_image(
settings.RH_IMAGE,
settings.RH_IMAGE_MD5,
@ -587,7 +587,7 @@ class RhHA(TestBasic):
self.env.revert_snapshot("ready_with_5_slaves")
self.show_step(3)
LOGGER.debug('Create Fuel cluster RH-based compute tests')
logger.debug('Create Fuel cluster RH-based compute tests')
data = {
'net_provider': 'neutron',
'net_segment_type': settings.NEUTRON_SEGMENT['tun'],
@ -629,14 +629,14 @@ class RhHA(TestBasic):
controller_name = 'slave-01'
controller_ip = self.fuel_web.get_nailgun_node_by_name(
controller_name)['ip']
LOGGER.debug('Got node: {0}'.format(compute))
logger.debug('Got node: {0}'.format(compute))
target_node_name = compute['name'].split('_')[0]
LOGGER.debug('Target node name: {0}'.format(target_node_name))
logger.debug('Target node name: {0}'.format(target_node_name))
target_node = self.env.d_env.get_node(name=target_node_name)
LOGGER.debug('DevOps Node: {0}'.format(target_node))
logger.debug('DevOps Node: {0}'.format(target_node))
target_node_ip = self.fuel_web.get_nailgun_node_by_name(
target_node_name)['ip']
LOGGER.debug('Acquired ip: {0} for node: {1}'.format(
logger.debug('Acquired ip: {0} for node: {1}'.format(
target_node_ip, target_node_name))
with self.env.d_env.get_ssh_to_remote(target_node_ip) as remote:

View File

@ -22,7 +22,7 @@ from fuelweb_test.helpers import checkers
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.helpers import os_actions
from fuelweb_test import settings
from fuelweb_test import logger as LOGGER
from fuelweb_test import logger
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@ -55,7 +55,7 @@ class SaharaHAOneController(TestBasic):
self.env.revert_snapshot("ready_with_3_slaves")
LOGGER.debug('Create Fuel cluster for Sahara tests')
logger.debug('Create Fuel cluster for Sahara tests')
data = {
'sahara': True,
'net_provider': 'neutron',
@ -82,7 +82,7 @@ class SaharaHAOneController(TestBasic):
data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=5)
LOGGER.debug('Verify Sahara service on controller')
logger.debug('Verify Sahara service on controller')
_ip = self.fuel_web.get_nailgun_node_by_name("slave-01")['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
checkers.verify_service(
@ -92,14 +92,14 @@ class SaharaHAOneController(TestBasic):
remote,
service_name='sahara-engine')
LOGGER.debug('Check MD5 sum of Vanilla2 image')
logger.debug('Check MD5 sum of Vanilla2 image')
check_image = checkers.check_image(
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE,
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_MD5,
settings.SERVTEST_LOCAL_PATH)
asserts.assert_true(check_image)
LOGGER.debug('Run all sanity and smoke tests')
logger.debug('Run all sanity and smoke tests')
path_to_tests = 'fuel_health.tests.sanity.test_sanity_sahara.'
test_names = ['VanillaTwoTemplatesTest.test_vanilla_two_templates',
'HDPTwoTemplatesTest.test_hdp_two_templates']
@ -109,7 +109,7 @@ class SaharaHAOneController(TestBasic):
for test_name in test_names]
)
LOGGER.debug('Import Vanilla2 image for Sahara')
logger.debug('Import Vanilla2 image for Sahara')
with open('{0}/{1}'.format(
settings.SERVTEST_LOCAL_PATH,
@ -125,7 +125,7 @@ class SaharaHAOneController(TestBasic):
path_to_tests = 'fuel_health.tests.tests_platform.test_sahara.'
test_names = ['VanillaTwoClusterTest.test_vanilla_two_cluster']
for test_name in test_names:
LOGGER.debug('Run platform test {0} for Sahara'.format(test_name))
logger.debug('Run platform test {0} for Sahara'.format(test_name))
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['tests_platform'],
test_name=path_to_tests + test_name, timeout=60 * 200)
@ -162,7 +162,7 @@ class SaharaHA(TestBasic):
self.env.revert_snapshot("ready_with_5_slaves")
LOGGER.debug('Create Fuel cluster for Sahara tests')
logger.debug('Create Fuel cluster for Sahara tests')
data = {
'sahara': True,
'net_provider': 'neutron',
@ -191,7 +191,7 @@ class SaharaHA(TestBasic):
cluster_vip, data['user'], data['password'], data['tenant'])
self.fuel_web.assert_cluster_ready(os_conn, smiles_count=13)
LOGGER.debug('Verify Sahara service on all controllers')
logger.debug('Verify Sahara service on all controllers')
for slave in ["slave-01", "slave-02", "slave-03"]:
_ip = self.fuel_web.get_nailgun_node_by_name(slave)['ip']
with self.env.d_env.get_ssh_to_remote(_ip) as remote:
@ -202,14 +202,14 @@ class SaharaHA(TestBasic):
remote,
service_name='sahara-engine')
LOGGER.debug('Check MD5 sum of Vanilla2 image')
logger.debug('Check MD5 sum of Vanilla2 image')
check_image = checkers.check_image(
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE,
settings.SERVTEST_SAHARA_VANILLA_2_IMAGE_MD5,
settings.SERVTEST_LOCAL_PATH)
asserts.assert_true(check_image)
LOGGER.debug('Run all sanity and smoke tests')
logger.debug('Run all sanity and smoke tests')
path_to_tests = 'fuel_health.tests.sanity.test_sanity_sahara.'
test_names = ['VanillaTwoTemplatesTest.test_vanilla_two_templates',
'HDPTwoTemplatesTest.test_hdp_two_templates']
@ -219,7 +219,7 @@ class SaharaHA(TestBasic):
for test_name in test_names]
)
LOGGER.debug('Import Vanilla2 image for Sahara')
logger.debug('Import Vanilla2 image for Sahara')
with open('{0}/{1}'.format(
settings.SERVTEST_LOCAL_PATH,
@ -235,7 +235,7 @@ class SaharaHA(TestBasic):
path_to_tests = 'fuel_health.tests.tests_platform.test_sahara.'
test_names = ['VanillaTwoClusterTest.test_vanilla_two_cluster']
for test_name in test_names:
LOGGER.debug('Run platform test {0} for Sahara'.format(test_name))
logger.debug('Run platform test {0} for Sahara'.format(test_name))
self.fuel_web.run_single_ostf_test(
cluster_id=cluster_id, test_sets=['tests_platform'],
test_name=path_to_tests + test_name, timeout=60 * 200)
@ -301,7 +301,7 @@ class MuranoHAOneController(TestBasic):
remote,
service_name='murano-api')
LOGGER.debug('Run sanity and functional Murano OSTF tests')
logger.debug('Run sanity and functional Murano OSTF tests')
self.fuel_web.run_single_ostf_test(
cluster_id=self.fuel_web.get_last_created_cluster(),
test_sets=['sanity'],
@ -309,7 +309,7 @@ class MuranoHAOneController(TestBasic):
'MuranoSanityTests.test_create_and_delete_service')
)
LOGGER.debug('Run OSTF platform tests')
logger.debug('Run OSTF platform tests')
test_class_main = ('fuel_health.tests.tests_platform'
'.test_murano_linux.MuranoDeployLinuxServicesTests')
@ -390,7 +390,7 @@ class MuranoHA(TestBasic):
remote,
service_name='murano-api')
LOGGER.debug('Run sanity and functional Murano OSTF tests')
logger.debug('Run sanity and functional Murano OSTF tests')
self.fuel_web.run_single_ostf_test(
cluster_id=self.fuel_web.get_last_created_cluster(),
test_sets=['sanity'],
@ -398,7 +398,7 @@ class MuranoHA(TestBasic):
'MuranoSanityTests.test_create_and_delete_service')
)
LOGGER.debug('Run OSTF platform tests')
logger.debug('Run OSTF platform tests')
test_class_main = ('fuel_health.tests.tests_platform'
'.test_murano_linux.MuranoDeployLinuxServicesTests')
@ -423,14 +423,14 @@ class OSTFCeilometerHelper(TestBasic):
def run_tests(self, cluster_id, skip_tests=None):
"""Method run smoke, sanity and platform Ceilometer tests."""
LOGGER.debug('Run sanity and smoke tests')
logger.debug('Run sanity and smoke tests')
self.fuel_web.run_ostf(
cluster_id=cluster_id,
test_sets=['smoke', 'sanity'],
timeout=60 * 15
)
LOGGER.debug('Run platform OSTF Ceilometer tests')
logger.debug('Run platform OSTF Ceilometer tests')
test_class_main = ('fuel_health.tests.tests_platform.'
'test_ceilometer.'
@ -532,7 +532,7 @@ class CeilometerHAOneControllerMongo(OSTFCeilometerHelper):
disk_mb = self.fuel_web.get_node_disk_size(node.get('id'),
"vda")
LOGGER.debug('disk size is {0}'.format(disk_mb))
logger.debug('disk size is {0}'.format(disk_mb))
mongo_disk_mb = 11116
os_disk_mb = disk_mb - mongo_disk_mb
mongo_disk_gb = ("{0}G".format(round(mongo_disk_mb / 1024, 1)))
@ -887,7 +887,7 @@ class HeatHAOneController(TestBasic):
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
LOGGER.debug('Run Heat OSTF platform tests')
logger.debug('Run Heat OSTF platform tests')
test_class_main = ('fuel_health.tests.tests_platform.'
'test_heat.'
@ -979,7 +979,7 @@ class HeatHA(TestBasic):
service_name='ceilometer-api',
ignore_count_of_proccesses=True)
LOGGER.debug('Run Heat OSTF platform tests')
logger.debug('Run Heat OSTF platform tests')
test_class_main = ('fuel_health.tests.tests_platform.'
'test_heat.'

View File

@ -32,7 +32,7 @@ class HaScaleGroup1(TestBasic):
def expected_fail_stop_deployment(self, cluster_id):
try:
self.fuel_web.client.stop_deployment(cluster_id)
except urllib2.HTTPError, e:
except urllib2.HTTPError as e:
asserts.assert_equal(
400,
e.code,

View File

@ -17,11 +17,10 @@ from devops.helpers.helpers import tcp_ping
from devops.helpers.helpers import wait
import netaddr
from proboscis import test
from fuelweb_test.settings import LOGS_DIR
from fuelweb_test.helpers import decorators
from fuelweb_test.helpers import nessus
from fuelweb_test import settings as CONF
from fuelweb_test import settings
from fuelweb_test.tests import base_test_case
from fuelweb_test.tests.test_neutron_tun_base import NeutronTunHaBase
@ -112,16 +111,16 @@ class TestNessus(NeutronTunHaBase):
"""
self.env.revert_snapshot("deploy_neutron_tun_ha_nessus")
if CONF.NESSUS_ADDRESS is None:
CONF.NESSUS_ADDRESS = \
if settings.NESSUS_ADDRESS is None:
settings.NESSUS_ADDRESS = \
self.find_nessus_address(nessus_net_name='admin',
nessus_port=CONF.NESSUS_PORT)
nessus_port=settings.NESSUS_PORT)
nessus_client = nessus.NessusClient(CONF.NESSUS_ADDRESS,
CONF.NESSUS_PORT,
CONF.NESSUS_USERNAME,
CONF.NESSUS_PASSWORD,
CONF.NESSUS_SSL_VERIFY)
nessus_client = nessus.NessusClient(settings.NESSUS_ADDRESS,
settings.NESSUS_PORT,
settings.NESSUS_USERNAME,
settings.NESSUS_PASSWORD,
settings.NESSUS_SSL_VERIFY)
scan_start_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
@ -133,10 +132,10 @@ class TestNessus(NeutronTunHaBase):
policies_list)[0]
policy_id = nessus_client.add_cpa_policy(
scan_name, CONF.ENV_NAME, cpa_policy_template['uuid'])
scan_name, settings.ENV_NAME, cpa_policy_template['uuid'])
scan_id = nessus_client.create_scan(
scan_name, CONF.ENV_NAME, self.fuel_web.admin_node_ip,
scan_name, settings.ENV_NAME, self.fuel_web.admin_node_ip,
policy_id, cpa_policy_template['uuid'])
scan_uuid = nessus_client.launch_scan(scan_id)
history_id = nessus_client.list_scan_history_ids(scan_id)[scan_uuid]
@ -146,8 +145,8 @@ class TestNessus(NeutronTunHaBase):
wait(check_scan_complete, interval=10, timeout=60 * 30)
file_id = nessus_client.export_scan(scan_id, history_id, 'html')
nessus_client.download_scan_result(scan_id, file_id,
'master_cpa', 'html', LOGS_DIR)
nessus_client.download_scan_result(
scan_id, file_id, 'master_cpa', 'html', settings.LOGS_DIR)
self.env.make_snapshot("nessus_fuel_master_cpa")
@ -168,16 +167,16 @@ class TestNessus(NeutronTunHaBase):
"""
self.env.revert_snapshot("deploy_neutron_tun_ha_nessus")
if CONF.NESSUS_ADDRESS is None:
CONF.NESSUS_ADDRESS = \
if settings.NESSUS_ADDRESS is None:
settings.NESSUS_ADDRESS = \
self.find_nessus_address(nessus_net_name='admin',
nessus_port=CONF.NESSUS_PORT)
nessus_port=settings.NESSUS_PORT)
nessus_client = nessus.NessusClient(CONF.NESSUS_ADDRESS,
CONF.NESSUS_PORT,
CONF.NESSUS_USERNAME,
CONF.NESSUS_PASSWORD,
CONF.NESSUS_SSL_VERIFY)
nessus_client = nessus.NessusClient(settings.NESSUS_ADDRESS,
settings.NESSUS_PORT,
settings.NESSUS_USERNAME,
settings.NESSUS_PASSWORD,
settings.NESSUS_SSL_VERIFY)
scan_start_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
@ -189,10 +188,10 @@ class TestNessus(NeutronTunHaBase):
policies_list)[0]
policy_id = nessus_client.add_wat_policy(
scan_name, CONF.ENV_NAME, wat_policy_template['uuid'])
scan_name, settings.ENV_NAME, wat_policy_template['uuid'])
scan_id = nessus_client.create_scan(
scan_name, CONF.ENV_NAME, self.fuel_web.admin_node_ip,
scan_name, settings.ENV_NAME, self.fuel_web.admin_node_ip,
policy_id, wat_policy_template['uuid'])
scan_uuid = nessus_client.launch_scan(scan_id)
@ -203,8 +202,8 @@ class TestNessus(NeutronTunHaBase):
wait(check_scan_complete, interval=10, timeout=60 * 30)
file_id = nessus_client.export_scan(scan_id, history_id, 'html')
nessus_client.download_scan_result(scan_id, file_id,
'master_wat', 'html', LOGS_DIR)
nessus_client.download_scan_result(
scan_id, file_id, 'master_wat', 'html', settings.LOGS_DIR)
self.env.make_snapshot("nessus_fuel_master_wat")
@ -227,16 +226,16 @@ class TestNessus(NeutronTunHaBase):
self.enable_password_login_for_ssh_on_slaves(['slave-01'])
if CONF.NESSUS_ADDRESS is None:
CONF.NESSUS_ADDRESS = \
if settings.NESSUS_ADDRESS is None:
settings.NESSUS_ADDRESS = \
self.find_nessus_address(nessus_net_name='admin',
nessus_port=CONF.NESSUS_PORT)
nessus_port=settings.NESSUS_PORT)
nessus_client = nessus.NessusClient(CONF.NESSUS_ADDRESS,
CONF.NESSUS_PORT,
CONF.NESSUS_USERNAME,
CONF.NESSUS_PASSWORD,
CONF.NESSUS_SSL_VERIFY)
nessus_client = nessus.NessusClient(settings.NESSUS_ADDRESS,
settings.NESSUS_PORT,
settings.NESSUS_USERNAME,
settings.NESSUS_PASSWORD,
settings.NESSUS_SSL_VERIFY)
scan_start_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
@ -248,13 +247,13 @@ class TestNessus(NeutronTunHaBase):
policies_list)[0]
policy_id = nessus_client.add_cpa_policy(
scan_name, CONF.ENV_NAME, cpa_policy_template['uuid'])
scan_name, settings.ENV_NAME, cpa_policy_template['uuid'])
slave_address = \
self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']
scan_id = nessus_client.create_scan(
scan_name, CONF.ENV_NAME, slave_address,
scan_name, settings.ENV_NAME, slave_address,
policy_id, cpa_policy_template['uuid'])
scan_uuid = nessus_client.launch_scan(scan_id)
history_id = nessus_client.list_scan_history_ids(scan_id)[scan_uuid]
@ -264,7 +263,7 @@ class TestNessus(NeutronTunHaBase):
wait(check_scan_complete, interval=10, timeout=60 * 30)
file_id = nessus_client.export_scan(scan_id, history_id, 'html')
nessus_client.download_scan_result(scan_id, file_id,
'controller_cpa', 'html', LOGS_DIR)
nessus_client.download_scan_result(
scan_id, file_id, 'controller_cpa', 'html', settings.LOGS_DIR)
self.env.make_snapshot("nessus_controller_ubuntu_cpa")

View File

@ -141,7 +141,7 @@ def patch_and_assemble_ubuntu_bootstrap(environment):
# renew code in bootstrap
# Step 1 - install squashfs-tools
cmd = ("yum install -y squashfs-tools")
cmd = "yum install -y squashfs-tools"
result = remote.execute(cmd)
assert_equal(result['exit_code'], 0,
('Failed to install squashfs-tools {}'

View File

@ -146,7 +146,7 @@ def explain_group(**kwargs):
def show_all_groups(**kwargs):
"""Show all Proboscis groups"""
groups_nums = get_groups()
out = {k: len(v) for k, v in groups_nums.iteritems()}
out = {k: len(v) for k, v in groups_nums.items()}
print(pretty_log(out))
@ -154,7 +154,7 @@ def show_fuelweb_groups(**kwargs):
"""Show Proboscis groups defined in fuelweb suite"""
groups_nums = get_groups()
out = {k: len(v) for k, v in groups_nums.iteritems()
out = {k: len(v) for k, v in groups_nums.items()
if not k.startswith('system_test')}
print(pretty_log(out))
@ -163,7 +163,7 @@ def show_systest_groups(**kwargs):
"""Show Proboscis groups defined in Systest suite"""
groups_nums = get_groups()
out = {k: len(v) for k, v in groups_nums.iteritems()
out = {k: len(v) for k, v in groups_nums.items()
if k.startswith('system_test')}
print(pretty_log(out))

View File

@ -14,7 +14,6 @@
import functools
import traceback
import sys
import hashlib
import inspect
import collections
@ -62,7 +61,6 @@ def make_snapshot_if_step_fail(func):
except SkipTest:
raise SkipTest()
except Exception as test_exception:
exc_trace = sys.exc_traceback
name = 'error_%s' % func.__name__
case_name = getattr(func, '_base_class', None)
step_num = getattr(func, '_step_num', None)
@ -101,7 +99,7 @@ def make_snapshot_if_step_fail(func):
except:
logger.error("Error making the environment snapshot:"
" {0}".format(traceback.format_exc()))
raise test_exception, None, exc_trace
raise test_exception
return result
return wrapper

View File

@ -115,6 +115,17 @@ class FillRootBaseActions(actions_base.ActionsBase):
def __init__(self, config=None):
super(FillRootBaseActions, self).__init__(config)
self.ostf_tests_should_failed = 0
self.primary_controller = None
self.primary_controller_fqdn = None
self.primary_controller_space_on_root = 0
self.disk_monitor_limit = 512
self.rabbit_disk_free_limit = 5
self.pacemaker_restart_timeout = 600
self.pcs_check_timeout = 300
self.primary_controller_space_to_filled = 0
self.pcs_status = None
self.slave_nodes_fqdn = None
self.slave_node_running_resources = None
@deferred_decorator([make_snapshot_if_step_fail])
@action

View File

@ -127,4 +127,4 @@ class FuelMasterMigrate(ActionsBase, FuelMasterActions):
with self.env.d_env.get_admin_remote() as remote:
wait(lambda: not remote.exists("/notready"),
timeout=900,
timeout_msg=("File wasn't removed in 900 sec"))
timeout_msg="File wasn't removed in 900 sec")