Do not sync time after restarting of node

We have weird bug that can spawn 2 ntpd processes at the same time
by HA mechanism. Actually we have to sync time strictly only after
reverting a snapshot.

Change-Id: Ic73eb942eaacb436629841f9b12df844f3ac736d
Related-bug:1656020
This commit is contained in:
Vladimir Khlyunev 2017-03-30 14:21:52 +04:00
parent e1488101c5
commit 0c0a935969
4 changed files with 6 additions and 1 deletions

View File

@ -2112,7 +2112,6 @@ class FuelWebClient29(object):
if wait_online:
for node in devops_nodes:
self.wait_node_is_online(node, timeout=timeout)
self.environment.sync_time()
@logwrap
def ip_address_show(self, node_name, interface, namespace=None):

View File

@ -921,6 +921,7 @@ class CheckCephPartitionsAfterReboot(TestBasic):
self.show_step(10, node)
logger.info("Check Ceph health is ok after reboot")
self.fuel_web.assert_ha_services_ready(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)
self.show_step(11, node)
@ -944,6 +945,7 @@ class CheckCephPartitionsAfterReboot(TestBasic):
self.show_step(13, node)
logger.info("Check Ceph health is ok after reboot")
self.fuel_web.assert_ha_services_ready(cluster_id)
self.fuel_web.check_ceph_status(cluster_id)

View File

@ -479,7 +479,9 @@ class TestCgroupHa(TestBasic):
self.show_step(3)
target_controller = self.fuel_web.get_nailgun_primary_node(
self.fuel_web.get_devops_node_by_nailgun_node(n_ctrls[0]))
self.fuel_web.cold_restart_nodes([target_controller])
self.fuel_web.assert_ha_services_ready(cluster_id)
self.show_step(4)
self.check_cgroups_on_node(n_ctrls[0], cgroups)

View File

@ -451,6 +451,8 @@ class SupportHugepages(TestBasic):
self.fuel_web.get_nailgun_node_by_name(mixed_host))
self.fuel_web.cold_restart_nodes([target_compute])
self.fuel_web.assert_ha_services_ready(cluster_id)
self.boot_instance_with_hugepage(
target_compute_name=mixed_host,
flavor_name="h1.small_mixed.hpgs",