run_on_remote, execute_remote_cmd and run_on_remote_get_results elimination

use SSHClient().check_call instead

Change-Id: I39f505c860d54558734bfe1efa173cefe54cf097
(cherry picked from commit aeb268c)
This commit is contained in:
Alexey Stepanov 2016-08-11 12:31:36 +03:00
parent ceee4237bf
commit f8d4f2c5c3
11 changed files with 77 additions and 78 deletions

View File

@ -16,7 +16,7 @@ import pytest
from devops.helpers.helpers import get_admin_remote
from devops.helpers.helpers import icmp_ping
from devops.helpers.helpers import _wait
from devops.helpers.helpers import wait_pass
from devops.helpers.helpers import wait
from fuelweb_test import logger
@ -193,8 +193,8 @@ class TestFuelMasterMigrate(object):
fuel_web = self.manager.fuel_web
_wait(fuel_web.get_nailgun_version,
timeout=60 * 20)
wait_pass(fuel_web.get_nailgun_version,
timeout=60 * 20)
def compute_hard_restart(self):
"""Hard restart compute with Fuel Master node"""

View File

@ -16,7 +16,6 @@ from proboscis.asserts import assert_equal
from fuelweb_test import logger
from fuelweb_test.helpers.utils import check_distribution
from fuelweb_test.helpers.utils import run_on_remote
from fuelweb_test.settings import DNS_SUFFIX
from fuelweb_test.settings import OPENSTACK_RELEASE
from fuelweb_test.settings import OPENSTACK_RELEASE_CENTOS
@ -34,9 +33,9 @@ def start_monitor(remote):
logger.debug("Starting Ceph monitor on {0}".format(remote.host))
check_distribution()
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
run_on_remote(remote, 'start ceph-mon-all')
remote.check_call('start ceph-mon-all')
if OPENSTACK_RELEASE_CENTOS in OPENSTACK_RELEASE:
run_on_remote(remote, '/etc/init.d/ceph start')
remote.check_call('/etc/init.d/ceph start')
def stop_monitor(remote):
@ -49,9 +48,9 @@ def stop_monitor(remote):
logger.debug("Stopping Ceph monitor on {0}".format(remote.host))
check_distribution()
if OPENSTACK_RELEASE_UBUNTU in OPENSTACK_RELEASE:
run_on_remote(remote, 'stop ceph-mon-all')
remote.check_call('stop ceph-mon-all')
if OPENSTACK_RELEASE_CENTOS in OPENSTACK_RELEASE:
run_on_remote(remote, '/etc/init.d/ceph stop')
remote.check_call('/etc/init.d/ceph stop')
def restart_monitor(remote):
@ -68,7 +67,7 @@ def restart_monitor(remote):
def get_health(remote):
logger.debug("Checking Ceph cluster health on {0}".format(remote.host))
cmd = 'ceph health -f json'
return run_on_remote(remote, cmd, jsonify=True)
return remote.check_call(cmd).stdout_json
def get_monitor_node_fqdns(remote):
@ -78,7 +77,7 @@ def get_monitor_node_fqdns(remote):
:return: list of FQDNs
"""
cmd = 'ceph mon_status -f json'
result = run_on_remote(remote, cmd, jsonify=True)
result = remote.check_call(cmd).stdout_json
fqdns = [i['name'] + DNS_SUFFIX for i in result['monmap']['mons']]
msg = "Ceph monitor service is running on {0}".format(', '.join(fqdns))
logger.debug(msg)
@ -242,7 +241,7 @@ def get_osd_tree(remote):
"""
logger.debug("Fetching Ceph OSD tree")
cmd = 'ceph osd tree -f json'
return run_on_remote(remote, cmd, jsonify=True)
return remote.check_call(cmd).stdout_json
def get_osd_ids(remote):
@ -253,7 +252,7 @@ def get_osd_ids(remote):
"""
logger.debug("Fetching Ceph OSD ids")
cmd = 'ceph osd ls -f json'
return run_on_remote(remote, cmd, jsonify=True)
return remote.check_call(cmd).stdout_json
def get_rbd_images_list(remote, pool):
@ -264,7 +263,7 @@ def get_rbd_images_list(remote, pool):
:return: JSON-like object
"""
cmd = 'rbd --pool {pool} --format json ls -l'.format(pool=pool)
return run_on_remote(remote, cmd, jsonify=True)
return remote.check_call(cmd).stdout_json
def get_version(remote):
@ -274,4 +273,4 @@ def get_version(remote):
:return: str
"""
cmd = 'ceph --version'
return run_on_remote(remote, cmd)[0].split(' ')[2]
return remote.check_call(cmd).stdout[0].split(' ')[2]

View File

@ -13,7 +13,6 @@
# under the License.
from fuelweb_test import logger
from fuelweb_test.helpers.utils import run_on_remote
def ovs_get_data(remote, table, columns=None):
@ -32,7 +31,7 @@ def ovs_get_data(remote, table, columns=None):
col = ''
cmd = ('ovs-vsctl --oneline --format=json {columns} list {table}'
.format(columns=col, table=table))
res = run_on_remote(remote, cmd, jsonify=True)
res = remote.check_call(cmd).stdout_json
logger.debug("OVS output of the command '{0}': {1}".format(cmd, res))
return res

View File

@ -360,7 +360,7 @@ def update_packages(environment, remote, packages, exclude_packages=None):
' '.join(packages), ','.join(exclude_packages or []))
]
for cmd in cmds:
environment.execute_remote_cmd(remote, cmd, exit_code=0)
remote.check_call(cmd)
def update_packages_on_slaves(environment, slaves, packages=None,
@ -591,7 +591,7 @@ def run_actions(environment, target, slaves, action_type='patch-scenario'):
tasks, timeout)
continue
for remote in remotes:
environment.execute_remote_cmd(remote, command)
remote.check_call(command)
if devops_action == 'down':
environment.fuel_web.warm_shutdown_nodes(devops_nodes)
elif devops_action == 'up':

View File

@ -609,12 +609,13 @@ def get_net_settings(remote, skip_interfaces=None):
'/sys/class/net/{0}/bonding/slaves')
bridge_slaves_cmd = 'ls -1 /sys/class/net/{0}/brif/'
node_interfaces = [l.strip() for l in run_on_remote(remote, interface_cmd)
if not any(re.search(regex, l.strip()) for regex
in skip_interfaces)]
node_vlans = [l.strip() for l in run_on_remote(remote, vlan_cmd)]
node_bonds = [l.strip() for l in run_on_remote(remote, bond_cmd)]
node_bridges = [l.strip() for l in run_on_remote(remote, bridge_cmd)]
node_interfaces = [
l.strip() for l in remote.check_call(interface_cmd).stdout
if not any(re.search(regex, l.strip())
for regex in skip_interfaces)]
node_vlans = [l.strip() for l in remote.check_call(vlan_cmd).stdout]
node_bonds = [l.strip() for l in remote.check_call(bond_cmd).stdout]
node_bridges = [l.strip() for l in remote.check_call(bridge_cmd).stdout]
for interface in node_interfaces:
bond_mode = None
@ -626,16 +627,16 @@ def get_net_settings(remote, skip_interfaces=None):
if_type = 'bond'
bond_mode = ''.join(
[l.strip() for l in
run_on_remote(remote, bond_mode_cmd.format(interface))])
remote.check_call(bond_mode_cmd.format(interface)).stdout])
bond_slaves = set(
[l.strip() for l in
run_on_remote(remote, bond_slaves_cmd.format(interface))]
remote.check_call(bond_slaves_cmd.format(interface)).stdout]
)
elif interface in node_bridges:
if_type = 'bridge'
bridge_slaves = set(
[l.strip() for l in
run_on_remote(remote, bridge_slaves_cmd.format(interface))
remote.check_call(bridge_slaves_cmd.format(interface)).stdout
if not any(re.search(regex, l.strip())
for regex in skip_interfaces)]
)
@ -643,7 +644,7 @@ def get_net_settings(remote, skip_interfaces=None):
if_type = 'common'
if_ips = set(
[l.strip()
for l in run_on_remote(remote, ip_cmd.format(interface))]
for l in remote.check_call(ip_cmd.format(interface)).stdout]
)
net_settings[interface] = {
@ -660,8 +661,8 @@ def get_net_settings(remote, skip_interfaces=None):
def get_ip_listen_stats(remote, proto='tcp'):
# If bindv6only is disabled, then IPv6 sockets listen on IPv4 too
check_v6_bind_cmd = 'cat /proc/sys/net/ipv6/bindv6only'
bindv6only = ''.join([l.strip()
for l in run_on_remote(remote, check_v6_bind_cmd)])
bindv6only = ''.join(
[l.strip() for l in remote.check_call(check_v6_bind_cmd).stdout])
check_v6 = bindv6only == '0'
if check_v6:
cmd = ("awk '$4 == \"0A\" {{gsub(\"00000000000000000000000000000000\","
@ -669,7 +670,7 @@ def get_ip_listen_stats(remote, proto='tcp'):
"/proc/net/{0} /proc/net/{0}6").format(proto)
else:
cmd = "awk '$4 == \"0A\" {{print $2}}' /proc/net/{0}".format(proto)
return [l.strip() for l in run_on_remote(remote, cmd)]
return [l.strip() for l in remote.check_call(cmd).stdout]
@logwrap
@ -686,8 +687,8 @@ def node_freemem(remote, unit='MB'):
denominator = denominators.get(unit, denominators['MB'])
cmd_mem_free = 'free -k | grep Mem:'
cmd_swap_free = 'free -k | grep Swap:'
mem_free = run_on_remote(remote, cmd_mem_free)[0]
swap_free = run_on_remote(remote, cmd_swap_free)[0]
mem_free = remote.check_call(cmd_mem_free).stdout[0]
swap_free = remote.check_call(cmd_swap_free).stdout[0]
ret = {
"mem": {
"total": int(mem_free.split()[1]) // denominator,
@ -747,7 +748,7 @@ def get_node_hiera_roles(remote, fqdn=None):
cmd = 'hiera roles'
if fqdn:
cmd += ' fqdn={}'.format(fqdn)
roles = ''.join(run_on_remote(remote, cmd)).strip()
roles = remote.check_call(cmd).stdout_str
# Content string with roles like a ["ceph-osd", "controller"] to list
return [role.strip('" ') for role in roles.strip("[]").split(',')]
@ -1024,7 +1025,7 @@ def erase_data_from_hdd(remote,
commands.append("sync")
for cmd in commands:
run_on_remote(remote, cmd)
remote.check_call(cmd)
@logwrap

View File

@ -66,7 +66,6 @@ from fuelweb_test.helpers.uca import change_cluster_uca_config
from fuelweb_test.helpers.utils import get_node_hiera_roles
from fuelweb_test.helpers.utils import node_freemem
from fuelweb_test.helpers.utils import pretty_log
from fuelweb_test.helpers.utils import run_on_remote
from fuelweb_test.models.nailgun_client import NailgunClient
import fuelweb_test.settings as help_data
from fuelweb_test.settings import ATTEMPTS
@ -2432,16 +2431,19 @@ class FuelWebClient29(object):
# FIXME(kozhukalov): This approach is outdated
# due to getting rid of docker containers.
logger.info("Backup of the master node is started.")
run_on_remote(remote, "echo CALC_MY_MD5SUM > /etc/fuel/data",
err_msg='command calc_my_mdsum failed')
run_on_remote(remote, "iptables-save > /etc/fuel/iptables-backup",
err_msg='can not save iptables in iptables-backup')
run_on_remote(remote,
"md5sum /etc/fuel/data | cut -d' ' -f1 > /etc/fuel/sum",
err_msg='failed to create sum file')
run_on_remote(remote, 'dockerctl backup')
run_on_remote(remote, 'rm -f /etc/fuel/data',
err_msg='Can not remove /etc/fuel/data')
remote.check_call(
"echo CALC_MY_MD5SUM > /etc/fuel/data",
error_info='command calc_my_mdsum failed')
remote.check_call(
"iptables-save > /etc/fuel/iptables-backup",
error_info='can not save iptables in iptables-backup')
remote.check_call(
"md5sum /etc/fuel/data | cut -d' ' -f1 > /etc/fuel/sum",
error_info='failed to create sum file')
remote.check_call('dockerctl backup')
remote.check_call(
'rm -f /etc/fuel/data',
error_info='Can not remove /etc/fuel/data')
logger.info("Backup of the master node is complete.")
@logwrap

View File

@ -267,10 +267,12 @@ class CommandLineTest(test_cli_base.CommandLine):
with open(PATH_TO_PEM) as pem_file:
old_ssl_keypair = pem_file.read().strip()
current_ssl_keypair = self.get_current_ssl_keypair(controller_node)
logger.info(("SSL keypair before cluster deploy {0} \
and after deploy {1}".format(old_ssl_keypair,
current_ssl_keypair)
))
logger.info(
"SSL keypair before cluster deploy:\n"
"{0}\n"
"and after deploy:\n"
"{1}".format(old_ssl_keypair, current_ssl_keypair)
)
assert_equal(old_ssl_keypair, current_ssl_keypair,
message="SSL keypairs are not equal")
self.show_step(16)

View File

@ -353,10 +353,12 @@ class CommandLine(TestBasic):
@logwrap
def get_current_ssl_keypair(self, controller_ip):
cmd = "cat /var/lib/astute/haproxy/public_haproxy.pem"
current_ssl_keypair = self.ssh_manager.execute_on_remote(
ip=controller_ip,
cmd=cmd)['stdout_str']
path = "/var/lib/astute/haproxy/public_haproxy.pem"
with self.ssh_manager.open_on_remote(
ip=controller_ip,
path=path
) as f:
current_ssl_keypair = f.read().strip()
return current_ssl_keypair
@logwrap

View File

@ -18,7 +18,6 @@ from proboscis import SkipTest
from fuelweb_test import logger
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
from fuelweb_test.helpers.utils import run_on_remote
@test(groups=['fuel-mirror'])
@ -55,8 +54,8 @@ class TestCreateMirror(TestBasic):
self.env.revert_snapshot('empty')
logger.info('Prepare environment for mirror checks.')
with self.env.d_env.get_admin_remote() as remote:
run_on_remote(remote, 'docker pull ubuntu')
run_on_remote(remote, 'docker pull nginx')
remote.check_call('docker pull ubuntu')
remote.check_call('docker pull nginx')
# TODO(akostrikov) add check that images are present.
self.env.make_snapshot(snapshot_name, is_make=True)

View File

@ -21,7 +21,6 @@ from proboscis import SkipTest
from fuelweb_test.helpers.decorators import retry
from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers.utils import run_on_remote
from fuelweb_test import logger
from fuelweb_test import logwrap
from fuelweb_test import settings
@ -85,13 +84,14 @@ class TestNeutronFailoverBase(base_test_case.TestBasic):
def check_instance_connectivity(remote, dhcp_namespace, instance_ip,
instance_keypair):
cmd_check_ns = 'ip netns list'
namespaces = [l.strip() for l in run_on_remote(remote, cmd_check_ns)]
namespaces = [
l.strip() for l in remote.check_call(cmd_check_ns).stdout]
logger.debug('Net namespaces on remote: {0}.'.format(namespaces))
assert_true(dhcp_namespace in namespaces,
"Network namespace '{0}' doesn't exist on "
"remote slave!".format(dhcp_namespace))
instance_key_path = '/root/.ssh/instancekey_rsa'
run_on_remote(remote, 'echo "{0}" > {1} && chmod 400 {1}'.format(
remote.check_call('echo "{0}" > {1} && chmod 400 {1}'.format(
instance_keypair.private_key, instance_key_path))
cmd = (". openrc; ip netns exec {0} ssh -i {1}"

View File

@ -12,13 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import time
from devops.helpers.helpers import wait
from proboscis.asserts import assert_true
from devops.helpers.helpers import wait
from fuelweb_test.helpers.utils import run_on_remote_get_results
from fuelweb_test.helpers.pacemaker import get_pacemaker_nodes_attributes
from fuelweb_test.helpers.pacemaker import get_pcs_nodes
from fuelweb_test.helpers.pacemaker import parse_pcs_status_xml
@ -146,8 +146,8 @@ class FillRootActions(object):
with self.fuel_web.get_ssh_for_node(
self.primary_controller.name) as remote:
root_free = run_on_remote_get_results(
remote, 'cibadmin --query --scope status')['stdout_str']
root_free = remote.check_call(
'cibadmin --query --scope status').stdout_str
self.primary_controller_space_on_root = get_pacemaker_nodes_attributes(
root_free)[self.primary_controller_fqdn]['root_free']
@ -258,9 +258,8 @@ class FillRootActions(object):
def checking_health_disk_attribute():
logger.info("Checking for '#health_disk' attribute")
cibadmin_status_xml = run_on_remote_get_results(
remote, 'cibadmin --query --scope status')[
'stdout_str']
cibadmin_status_xml = remote.check_call(
'cibadmin --query --scope status').stdout_str
pcs_attribs = get_pacemaker_nodes_attributes(
cibadmin_status_xml)
return '#health_disk' in pcs_attribs[
@ -269,9 +268,8 @@ class FillRootActions(object):
def checking_for_red_in_health_disk_attribute():
logger.info(
"Checking for '#health_disk' attribute have 'red' value")
cibadmin_status_xml = run_on_remote_get_results(
remote, 'cibadmin --query --scope status')[
'stdout_str']
cibadmin_status_xml = remote.check_call(
'cibadmin --query --scope status').stdout_str
pcs_attribs = get_pacemaker_nodes_attributes(
cibadmin_status_xml)
return pcs_attribs[self.primary_controller_fqdn][
@ -319,11 +317,9 @@ class FillRootActions(object):
with self.fuel_web.get_ssh_for_node(
self.primary_controller.name) as remote:
run_on_remote_get_results(
remote, 'rm /root/bigfile /root/bigfile2')
remote.check_call('rm /root/bigfile /root/bigfile2')
run_on_remote_get_results(
remote,
remote.check_call(
'crm node status-attr {} delete "#health_disk"'.format(
self.primary_controller_fqdn))
@ -346,9 +342,8 @@ class FillRootActions(object):
"Checking for '#health_disk' attribute "
"is not present on node {}".format(
self.primary_controller_fqdn))
cibadmin_status_xml = run_on_remote_get_results(
remote, 'cibadmin --query --scope status')[
'stdout_str']
cibadmin_status_xml = remote.check_call(
remote, 'cibadmin --query --scope status').stdout_str
pcs_attribs = get_pacemaker_nodes_attributes(
cibadmin_status_xml)
return '#health_disk' not in pcs_attribs[