Refactor & fix incorrect default args
Fix: 1. Default arguments should not be mutable 2. Set could be created without set() call (little faster) 3. Redundant parenthesis (readability) 4. lambdas is strictly not recommended by PEP8, especially as class variables 5. fix old style classes (not inherited from object) Update-reason: rebase Closes-Bug: #1512671 Change-Id: Id8314b76848278b67da898600240b59cf76a7328
This commit is contained in:
parent
672ce759ed
commit
b63dd9848b
|
@ -858,8 +858,11 @@ def check_swift_ring(remote):
|
|||
|
||||
def check_oswl_stat(postgres_actions, remote_collector, master_uid,
|
||||
operation='current',
|
||||
resources=['vm', 'flavor', 'volume', 'image',
|
||||
'tenant', 'keystone_user']):
|
||||
resources=None):
|
||||
if resources is None:
|
||||
resources = [
|
||||
'vm', 'flavor', 'volume', 'image', 'tenant', 'keystone_user'
|
||||
]
|
||||
logger.info("Checking that all resources were collected...")
|
||||
expected_resource_count = {
|
||||
'current':
|
||||
|
@ -1040,7 +1043,7 @@ def check_auto_mode(remote):
|
|||
|
||||
def is_ntpd_active(remote, ntpd_ip):
|
||||
cmd = 'ntpdate -d -p 4 -t 0.2 -u {0}'.format(ntpd_ip)
|
||||
return (not remote.execute(cmd)['exit_code'])
|
||||
return not remote.execute(cmd)['exit_code']
|
||||
|
||||
|
||||
def check_repo_managment(remote):
|
||||
|
@ -1168,7 +1171,7 @@ def check_log_lines_order(remote, log_file_path, line_matcher):
|
|||
|
||||
current_line_pos = int(result['stdout'][0].split(':')[0])
|
||||
|
||||
previous_line_pos = previous_line_pos + current_line_pos
|
||||
previous_line_pos += current_line_pos
|
||||
previous_line = current_line
|
||||
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ class BaseActions(object):
|
|||
def is_container_ready(self):
|
||||
result = self.admin_remote.execute("timeout 5 dockerctl check {0}"
|
||||
.format(self.container))
|
||||
return (result['exit_code'] == 0)
|
||||
return result['exit_code'] == 0
|
||||
|
||||
def wait_for_ready_container(self, timeout=300):
|
||||
wait(lambda: self.is_container_ready, timeout=timeout)
|
||||
|
@ -440,9 +440,11 @@ class NailgunActions(BaseActions):
|
|||
"s in {0} for details.").format(log_file))
|
||||
raise
|
||||
|
||||
def force_oswl_collect(self, resources=['vm', 'flavor', 'volume',
|
||||
'image', 'tenant',
|
||||
'keystone_user']):
|
||||
def force_oswl_collect(self, resources=None):
|
||||
if resources is None:
|
||||
resources = [
|
||||
'vm', 'flavor', 'volume', 'image', 'tenant', 'keystone_user'
|
||||
]
|
||||
for resource in resources:
|
||||
cmd = 'supervisorctl restart oswl' \
|
||||
'_{0}_collectord'.format(resource)
|
||||
|
|
|
@ -208,10 +208,10 @@ def get_package_test_info(package, pkg_type, tests_path, patch_target):
|
|||
target, project = _get_target_and_project(package, all_packages)
|
||||
if patch_target == 'master':
|
||||
if target not in ['master', 'bootstrap']:
|
||||
return set([None])
|
||||
return {None}
|
||||
if patch_target == 'environment':
|
||||
if target not in ['deployment', 'provisioning']:
|
||||
return set([None])
|
||||
return {None}
|
||||
target_tests_path = "/".join((tests_path, pkg_type, target, tests_file))
|
||||
project_tests_path = "/".join((tests_path, pkg_type, target, project,
|
||||
tests_file))
|
||||
|
|
|
@ -105,7 +105,9 @@ class CustomRepo(object):
|
|||
self.regenerate_repo(self.centos_script, self.local_mirror_centos)
|
||||
|
||||
# Install tools to masternode
|
||||
def install_tools(self, master_tools=[]):
|
||||
def install_tools(self, master_tools=None):
|
||||
if master_tools is None:
|
||||
master_tools = []
|
||||
logger.info("Installing necessary tools for {0}"
|
||||
.format(settings.OPENSTACK_RELEASE))
|
||||
for master_tool in master_tools:
|
||||
|
|
|
@ -183,7 +183,7 @@ def get_current_env(args):
|
|||
|
||||
|
||||
@logwrap
|
||||
def update_yaml(yaml_tree=[], yaml_value='', is_uniq=True,
|
||||
def update_yaml(yaml_tree=None, yaml_value='', is_uniq=True,
|
||||
yaml_file=settings.TIMESTAT_PATH_YAML):
|
||||
"""Store/update a variable in YAML file.
|
||||
|
||||
|
@ -191,6 +191,8 @@ def update_yaml(yaml_tree=[], yaml_value='', is_uniq=True,
|
|||
yaml_value - value of the variable, will be overwritten if exists,
|
||||
is_uniq - If false, add the unique two-digit suffix to the variable name.
|
||||
"""
|
||||
if yaml_tree is None:
|
||||
yaml_tree = []
|
||||
yaml_data = {}
|
||||
if os.path.isfile(yaml_file):
|
||||
with open(yaml_file, 'r') as f:
|
||||
|
@ -334,7 +336,7 @@ def run_on_remote(*args, **kwargs):
|
|||
|
||||
@logwrap
|
||||
def run_on_remote_get_results(remote, cmd, clear=False, err_msg=None,
|
||||
jsonify=False, assert_ec_equal=[0],
|
||||
jsonify=False, assert_ec_equal=None,
|
||||
raise_on_assert=True):
|
||||
# TODO(ivankliuk): move it to devops.helpers.SSHClient
|
||||
"""Execute ``cmd`` on ``remote`` and return result.
|
||||
|
@ -348,6 +350,8 @@ def run_on_remote_get_results(remote, cmd, clear=False, err_msg=None,
|
|||
:return: dict
|
||||
:raise: Exception
|
||||
"""
|
||||
if assert_ec_equal is None:
|
||||
assert_ec_equal = [0]
|
||||
result = remote.execute(cmd)
|
||||
if result['exit_code'] not in assert_ec_equal:
|
||||
error_details = {
|
||||
|
@ -397,7 +401,7 @@ def json_deserialize(json_string):
|
|||
:return: obj
|
||||
:raise: Exception
|
||||
"""
|
||||
if isinstance(json_string, (list)):
|
||||
if isinstance(json_string, list):
|
||||
json_string = ''.join(json_string)
|
||||
|
||||
try:
|
||||
|
@ -446,7 +450,7 @@ def get_net_settings(remote, skip_interfaces=set()):
|
|||
bond_mode_cmd = 'awk \'{{print $1}}\' /sys/class/net/{0}/bonding/mode'
|
||||
bond_slaves_cmd = ('awk \'{{gsub(" ","\\n"); print}}\' '
|
||||
'/sys/class/net/{0}/bonding/slaves')
|
||||
bridge_slaves_cmd = ('ls -1 /sys/class/net/{0}/brif/')
|
||||
bridge_slaves_cmd = 'ls -1 /sys/class/net/{0}/brif/'
|
||||
|
||||
node_interfaces = [l.strip() for l in run_on_remote(remote, interface_cmd)
|
||||
if not any(re.search(regex, l.strip()) for regex
|
||||
|
|
|
@ -167,7 +167,7 @@ class EnvironmentModel(object):
|
|||
'build_images': '1' if build_images else '0'
|
||||
}
|
||||
keys = ''
|
||||
if(iso_connect_as == 'usb'):
|
||||
if iso_connect_as == 'usb':
|
||||
keys = (
|
||||
"<Wait>\n" # USB boot uses boot_menu=yes for master node
|
||||
"<F12>\n"
|
||||
|
@ -360,7 +360,7 @@ class EnvironmentModel(object):
|
|||
security=settings.SECURITY_TEST):
|
||||
# start admin node
|
||||
admin = self.d_env.nodes().admin
|
||||
if(iso_connect_as == 'usb'):
|
||||
if iso_connect_as == 'usb':
|
||||
admin.disk_devices.get(device='disk',
|
||||
bus='usb').volume.upload(settings.ISO_PATH)
|
||||
else: # cdrom is default
|
||||
|
@ -460,10 +460,11 @@ class EnvironmentModel(object):
|
|||
run_on_remote(remote, check_cmd)
|
||||
|
||||
@retry(count=3, delay=60)
|
||||
def sync_time(self, nailgun_nodes=[]):
|
||||
def sync_time(self, nailgun_nodes=None):
|
||||
# with @retry, failure on any step of time synchronization causes
|
||||
# restart the time synchronization starting from the admin node
|
||||
|
||||
if nailgun_nodes is None:
|
||||
nailgun_nodes = []
|
||||
controller_nodes = [
|
||||
n for n in nailgun_nodes if "controller" in n['roles']]
|
||||
other_nodes = [
|
||||
|
@ -593,7 +594,9 @@ class EnvironmentModel(object):
|
|||
# its original content.
|
||||
# * adds 'nameservers' at start of resolv.conf if merge=True
|
||||
# * replaces resolv.conf with 'nameservers' if merge=False
|
||||
def modify_resolv_conf(self, nameservers=[], merge=True):
|
||||
def modify_resolv_conf(self, nameservers=None, merge=True):
|
||||
if nameservers is None:
|
||||
nameservers = []
|
||||
with self.d_env.get_admin_remote() as remote:
|
||||
resolv_conf = remote.execute('cat /etc/resolv.conf')
|
||||
assert_equal(0, resolv_conf['exit_code'], 'Executing "{0}" on the '
|
||||
|
|
|
@ -463,9 +463,9 @@ class FuelWebClient(object):
|
|||
section = 'access'
|
||||
if option == 'assign_to_all_nodes':
|
||||
section = 'public_network_assignment'
|
||||
if option in ('dns_list'):
|
||||
if option in 'dns_list':
|
||||
section = 'external_dns'
|
||||
if option in ('ntp_list'):
|
||||
if option in 'ntp_list':
|
||||
section = 'external_ntp'
|
||||
if section:
|
||||
attributes['editable'][section][option]['value'] =\
|
||||
|
@ -1261,7 +1261,9 @@ class FuelWebClient(object):
|
|||
raise
|
||||
|
||||
@logwrap
|
||||
def update_nodes_interfaces(self, cluster_id, nailgun_nodes=[]):
|
||||
def update_nodes_interfaces(self, cluster_id, nailgun_nodes=None):
|
||||
if nailgun_nodes is None:
|
||||
nailgun_nodes = []
|
||||
net_provider = self.client.get_cluster(cluster_id)['net_provider']
|
||||
if NEUTRON == net_provider:
|
||||
assigned_networks = {
|
||||
|
|
|
@ -73,7 +73,7 @@ def get_build_artifact(url, artifact):
|
|||
return s
|
||||
|
||||
|
||||
class Build():
|
||||
class Build(object):
|
||||
def __init__(self, name, number):
|
||||
"""Get build info via Jenkins API, get test info via direct HTTP
|
||||
request.
|
||||
|
|
|
@ -36,7 +36,7 @@ from settings import TestRailSettings
|
|||
from testrail_client import TestRailProject
|
||||
|
||||
|
||||
class TestResult():
|
||||
class TestResult(object):
|
||||
"""TestResult.""" # TODO documentation
|
||||
|
||||
def __init__(self, name, group, status, duration, url=None,
|
||||
|
|
|
@ -28,7 +28,7 @@ import json
|
|||
import urllib2
|
||||
|
||||
|
||||
class APIClient:
|
||||
class APIClient(object):
|
||||
"""APIClient.""" # TODO documentation
|
||||
|
||||
def __init__(self, base_url):
|
||||
|
|
|
@ -17,7 +17,7 @@ from testrail import APIClient
|
|||
from testrail import APIError
|
||||
|
||||
|
||||
class TestRailProject():
|
||||
class TestRailProject(object):
|
||||
"""TestRailProject.""" # TODO documentation
|
||||
|
||||
def __init__(self, url, user, password, project):
|
||||
|
|
|
@ -298,7 +298,7 @@ class ContrailPlugin(TestBasic):
|
|||
should_fail=2,
|
||||
failed_test_name=[('Check network connectivity '
|
||||
'from instance via floating IP'),
|
||||
('Launch instance with file injection')]
|
||||
'Launch instance with file injection']
|
||||
)
|
||||
|
||||
logger.info(self._ostf_msg)
|
||||
|
@ -391,8 +391,8 @@ class ContrailPlugin(TestBasic):
|
|||
should_fail=3,
|
||||
failed_test_name=[('Check network connectivity '
|
||||
'from instance via floating IP'),
|
||||
('Launch instance with file injection'),
|
||||
('Check that required services are running')]
|
||||
'Launch instance with file injection',
|
||||
'Check that required services are running']
|
||||
)
|
||||
|
||||
logger.info(self._ostf_msg)
|
||||
|
@ -470,7 +470,7 @@ class ContrailPlugin(TestBasic):
|
|||
should_fail=2,
|
||||
failed_test_name=[('Check network connectivity '
|
||||
'from instance via floating IP'),
|
||||
('Launch instance with file injection')]
|
||||
'Launch instance with file injection']
|
||||
)
|
||||
|
||||
logger.info(self._ostf_msg)
|
||||
|
@ -503,7 +503,7 @@ class ContrailPlugin(TestBasic):
|
|||
should_fail=2,
|
||||
failed_test_name=[('Check network connectivity '
|
||||
'from instance via floating IP'),
|
||||
('Launch instance with file injection')]
|
||||
'Launch instance with file injection']
|
||||
)
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
|
||||
|
@ -594,8 +594,8 @@ class ContrailPlugin(TestBasic):
|
|||
should_fail=3,
|
||||
failed_test_name=[('Check network connectivity '
|
||||
'from instance via floating IP'),
|
||||
('Launch instance with file injection'),
|
||||
('Check that required services are running')]
|
||||
'Launch instance with file injection',
|
||||
'Check that required services are running']
|
||||
)
|
||||
|
||||
logger.info(self._ostf_msg)
|
||||
|
@ -676,7 +676,7 @@ class ContrailPlugin(TestBasic):
|
|||
should_fail=2,
|
||||
failed_test_name=[('Check network connectivity '
|
||||
'from instance via floating IP'),
|
||||
('Launch instance with file injection')])
|
||||
'Launch instance with file injection'])
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_slaves_5],
|
||||
groups=["check_bonding_with_contrail"])
|
||||
|
@ -770,6 +770,6 @@ class ContrailPlugin(TestBasic):
|
|||
should_fail=3,
|
||||
failed_test_name=[('Check network connectivity '
|
||||
'from instance via floating IP'),
|
||||
('Launch instance with file injection'),
|
||||
('Check that required services are running')]
|
||||
'Launch instance with file injection',
|
||||
'Check that required services are running']
|
||||
)
|
||||
|
|
|
@ -63,11 +63,11 @@ class BondingTest(TestBasic):
|
|||
|
||||
def check_interfaces_config_after_reboot(self, cluster_id):
|
||||
network_settings = dict()
|
||||
skip_interfaces = set([r'^pub-base$', r'^vr_pub-base$', r'^vr-base$',
|
||||
r'^mgmt-base$', r'^vr-host-base$',
|
||||
r'^mgmt-conntrd$', r'^hapr-host$',
|
||||
r'^(tap|qr-|qg-|p_).*$', r'^v_vrouter.*$',
|
||||
r'^v_(management|public)$'])
|
||||
skip_interfaces = {
|
||||
r'^pub-base$', r'^vr_pub-base$', r'^vr-base$', r'^mgmt-base$',
|
||||
r'^vr-host-base$', r'^mgmt-conntrd$', r'^hapr-host$',
|
||||
r'^(tap|qr-|qg-|p_).*$', r'^v_vrouter.*$',
|
||||
r'^v_(management|public)$'}
|
||||
|
||||
nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
|
||||
|
||||
|
|
|
@ -463,6 +463,12 @@ class CephRadosGW(TestBasic):
|
|||
Snapshot ceph_rados_gw
|
||||
|
||||
"""
|
||||
def radosgw_started(remote):
|
||||
return len(
|
||||
remote.check_call(
|
||||
'ps aux | grep "/usr/bin/radosgw -n '
|
||||
'client.radosgw.gateway"')['stdout']) == 3
|
||||
|
||||
self.env.revert_snapshot("ready")
|
||||
self.env.bootstrap_nodes(
|
||||
self.env.d_env.nodes().slaves[:6])
|
||||
|
@ -522,10 +528,7 @@ class CephRadosGW(TestBasic):
|
|||
|
||||
# Check the radosqw daemon is started
|
||||
with self.fuel_web.get_ssh_for_node('slave-01') as remote:
|
||||
radosgw_started = lambda: len(remote.check_call(
|
||||
'ps aux | grep "/usr/bin/radosgw -n '
|
||||
'client.radosgw.gateway"')['stdout']) == 3
|
||||
assert_true(radosgw_started(), 'radosgw daemon started')
|
||||
assert_true(radosgw_started(remote), 'radosgw daemon started')
|
||||
|
||||
self.env.make_snapshot("ceph_rados_gw")
|
||||
|
||||
|
|
|
@ -570,8 +570,7 @@ class FloatingIPs(TestBasic):
|
|||
}
|
||||
)
|
||||
|
||||
floating_list = []
|
||||
floating_list.append(self.fuel_web.get_floating_ranges()[0][0])
|
||||
floating_list = [self.fuel_web.get_floating_ranges()[0][0]]
|
||||
networking_parameters = {
|
||||
"floating_ranges": floating_list}
|
||||
|
||||
|
|
|
@ -301,6 +301,9 @@ class TestNetworkTemplates(TestNetworkTemplatesBase):
|
|||
Duration 120m
|
||||
Snapshot two_nodegroups_network_templates
|
||||
"""
|
||||
def get_network(x):
|
||||
return self.env.d_env.get_network(name=x).ip_network
|
||||
|
||||
if not MULTIPLE_NETWORKS:
|
||||
raise SkipTest()
|
||||
|
||||
|
@ -309,7 +312,7 @@ class TestNetworkTemplates(TestNetworkTemplatesBase):
|
|||
# TODO(akostrikov) This should be refactored.
|
||||
admin_net = self.env.d_env.admin_net
|
||||
admin_net2 = self.env.d_env.admin_net2
|
||||
get_network = lambda x: self.env.d_env.get_network(name=x).ip_network
|
||||
|
||||
networks = ['.'.join(get_network(n).split('.')[0:-1])
|
||||
for n in [admin_net, admin_net2]]
|
||||
nodes_addresses = ['.'.join(node['ip'].split('.')[0:-1]) for node in
|
||||
|
|
|
@ -168,7 +168,7 @@ class TestNetworkTemplatesBase(TestBasic):
|
|||
"corresponds to used networking template...")
|
||||
# Network for Neutron is configured in namespaces (l3/dhcp agents)
|
||||
# and a bridge for it doesn't have IP, so skipping it for now
|
||||
skip_roles = set(['neutron/private'])
|
||||
skip_roles = {'neutron/private'}
|
||||
for node in self.fuel_web.client.list_cluster_nodes(cluster_id):
|
||||
node_networks = set()
|
||||
node_group_name = [ng['name'] for ng in
|
||||
|
|
|
@ -29,7 +29,7 @@ class TestOffloading(TestBasic):
|
|||
modes = None
|
||||
updated_offloads = None
|
||||
for i in interfaces:
|
||||
if (i['name'] == interface_to_update):
|
||||
if i['name'] == interface_to_update:
|
||||
modes = i['offloading_modes']
|
||||
|
||||
for k in update_values:
|
||||
|
|
|
@ -32,8 +32,8 @@ from fuelweb_test.helpers import os_actions
|
|||
class VcenterDeploy(TestBasic):
|
||||
"""VcenterDeploy.""" # TODO documentation
|
||||
|
||||
node_name = lambda self, name_node: self.fuel_web. \
|
||||
get_nailgun_node_by_name(name_node)['hostname']
|
||||
def node_name(self, name_node):
|
||||
return self.fuel_web.get_nailgun_node_by_name(name_node)['hostname']
|
||||
|
||||
def create_vm(self, os_conn=None, vm_count=None):
|
||||
# Get list of available images,flavors and hipervisors
|
||||
|
|
|
@ -66,6 +66,15 @@ class TestNessus(NeutronTunHaBase):
|
|||
if tcp_ping(address.format(), nessus_port):
|
||||
return address.format()
|
||||
|
||||
@staticmethod
|
||||
def get_check_scan_complete(nessus_client, scan_id, history_id):
|
||||
def check_scan_complete():
|
||||
return (
|
||||
nessus_client.get_scan_status(
|
||||
scan_id,
|
||||
history_id) == 'completed')
|
||||
return check_scan_complete
|
||||
|
||||
@test(depends_on=[base_test_case.SetupEnvironment.prepare_slaves_5],
|
||||
groups=["deploy_neutron_tun_ha_nessus"])
|
||||
@decorators.log_snapshot_after_test
|
||||
|
@ -132,9 +141,8 @@ class TestNessus(NeutronTunHaBase):
|
|||
scan_uuid = nessus_client.launch_scan(scan_id)
|
||||
history_id = nessus_client.list_scan_history_ids(scan_id)[scan_uuid]
|
||||
|
||||
check_scan_complete = \
|
||||
lambda: (nessus_client.get_scan_status(scan_id, history_id) ==
|
||||
'completed')
|
||||
check_scan_complete = self.get_check_scan_complete(
|
||||
nessus_client, scan_id, history_id)
|
||||
wait(check_scan_complete, interval=10, timeout=60 * 30)
|
||||
|
||||
file_id = nessus_client.export_scan(scan_id, history_id, 'html')
|
||||
|
@ -190,9 +198,8 @@ class TestNessus(NeutronTunHaBase):
|
|||
scan_uuid = nessus_client.launch_scan(scan_id)
|
||||
history_id = nessus_client.list_scan_history_ids(scan_id)[scan_uuid]
|
||||
|
||||
check_scan_complete = \
|
||||
lambda: (nessus_client.get_scan_status(scan_id, history_id) ==
|
||||
'completed')
|
||||
check_scan_complete = self.get_check_scan_complete(
|
||||
nessus_client, scan_id, history_id)
|
||||
wait(check_scan_complete, interval=10, timeout=60 * 30)
|
||||
|
||||
file_id = nessus_client.export_scan(scan_id, history_id, 'html')
|
||||
|
@ -252,9 +259,8 @@ class TestNessus(NeutronTunHaBase):
|
|||
scan_uuid = nessus_client.launch_scan(scan_id)
|
||||
history_id = nessus_client.list_scan_history_ids(scan_id)[scan_uuid]
|
||||
|
||||
check_scan_complete = \
|
||||
lambda: (nessus_client.get_scan_status(scan_id, history_id) ==
|
||||
'completed')
|
||||
check_scan_complete = self.get_check_scan_complete(
|
||||
nessus_client, scan_id, history_id)
|
||||
wait(check_scan_complete, interval=10, timeout=60 * 30)
|
||||
|
||||
file_id = nessus_client.export_scan(scan_id, history_id, 'html')
|
||||
|
|
|
@ -230,7 +230,7 @@ class CICMaintenanceMode(TestBasic):
|
|||
format(command1, result))
|
||||
|
||||
logger.info('Unexpected reboot on node %s', devops_node.name)
|
||||
command2 = ('reboot --force >/dev/null & ')
|
||||
command2 = 'reboot --force >/dev/null & '
|
||||
result = remote.execute(command2)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
'Failed to execute "{0}" on remote host: {1}'.
|
||||
|
@ -431,7 +431,7 @@ class CICMaintenanceMode(TestBasic):
|
|||
"Maintenance mode should not be available")
|
||||
|
||||
logger.info('Unexpected reboot on node %s', devops_node.name)
|
||||
command2 = ('reboot --force >/dev/null & ')
|
||||
command2 = 'reboot --force >/dev/null & '
|
||||
result = remote.execute(command2)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
'Failed to execute "{0}" on remote host: {1}'.
|
||||
|
|
|
@ -138,7 +138,7 @@ class TestHaFailoverBase(TestBasic):
|
|||
d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)
|
||||
p_d_ctrl = self.fuel_web.get_nailgun_primary_node(d_ctrls[0])
|
||||
ret.append(p_d_ctrl)
|
||||
ret.append((set(d_ctrls) - set([p_d_ctrl])).pop())
|
||||
ret.append((set(d_ctrls) - {p_d_ctrl}).pop())
|
||||
|
||||
return ret
|
||||
|
||||
|
@ -168,8 +168,8 @@ class TestHaFailoverBase(TestBasic):
|
|||
d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)
|
||||
|
||||
self.fuel_web.assert_pacemaker(
|
||||
(set(d_ctrls) - set([devops_node])).pop().name,
|
||||
set(d_ctrls) - set([devops_node]),
|
||||
(set(d_ctrls) - {devops_node}).pop().name,
|
||||
set(d_ctrls) - {devops_node},
|
||||
[devops_node])
|
||||
|
||||
# Wait until Nailgun marked suspended controller as offline
|
||||
|
@ -189,7 +189,7 @@ class TestHaFailoverBase(TestBasic):
|
|||
|
||||
# Wait until MySQL Galera is UP on online controllers
|
||||
self.fuel_web.wait_mysql_galera_is_up(
|
||||
[n.name for n in set(d_ctrls) - set([devops_node])],
|
||||
[n.name for n in set(d_ctrls) - {devops_node}],
|
||||
timeout=300)
|
||||
|
||||
# STEP: Run OSTF
|
||||
|
@ -913,7 +913,7 @@ class TestHaFailoverBase(TestBasic):
|
|||
d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)
|
||||
|
||||
rabbit_status = self.fuel_web.get_rabbit_running_nodes(
|
||||
list((set(d_ctrls) - set([p_d_ctrl])))[0].name)
|
||||
list((set(d_ctrls) - {p_d_ctrl}))[0].name)
|
||||
logger.debug("rabbit status is {}".format(rabbit_status))
|
||||
for rabbit_node in rabbit_nodes:
|
||||
assert_true(rabbit_node in rabbit_status,
|
||||
|
|
|
@ -392,7 +392,7 @@ class TestNeutronFailoverBase(base_test_case.TestBasic):
|
|||
d_ctrls = self.fuel_web.get_devops_nodes_by_nailgun_nodes(n_ctrls)
|
||||
|
||||
online_controllers_names = [n.name for n in
|
||||
set(d_ctrls) - set([devops_node_with_l3])]
|
||||
set(d_ctrls) - {devops_node_with_l3}]
|
||||
self.fuel_web.wait_mysql_galera_is_up(online_controllers_names)
|
||||
|
||||
# Wait reschedule l3 agent
|
||||
|
|
|
@ -67,11 +67,12 @@ class DeployCheckRadosGW(actions_base.ActionsBase):
|
|||
@action
|
||||
def check_rados_daemon(self):
|
||||
"""Check the radosqw daemon is started"""
|
||||
with self.fuel_web.get_ssh_for_node('slave-01') as remote:
|
||||
radosgw_started = lambda: len(remote.check_call(
|
||||
def radosgw_started(remote):
|
||||
return len(remote.check_call(
|
||||
'ps aux | grep "/usr/bin/radosgw -n '
|
||||
'client.radosgw.gateway"')['stdout']) == 3
|
||||
assert_true(radosgw_started(), 'radosgw daemon started')
|
||||
with self.fuel_web.get_ssh_for_node('slave-01') as remote:
|
||||
assert_true(radosgw_started(remote), 'radosgw daemon started')
|
||||
|
||||
|
||||
@factory
|
||||
|
|
Loading…
Reference in New Issue