more probe test refactoring

* move get_to_final_state into ProbeTest
* get rid of kill_servers
* add replicators manager and updaters manager to ProbeTest

(this is all going someplace, i promise)

Change-Id: I8393a2ebc0d04051cae48cc3c49580f70818dbf2
This commit is contained in:
Leah Klearman 2015-02-13 16:55:45 -08:00
parent 9196c31f64
commit ca0fce8542
9 changed files with 41 additions and 47 deletions

2
.gitignore vendored
View File

@ -14,3 +14,5 @@ ChangeLog
pycscope.*
.idea
MANIFEST
test/probe/.noseids

View File

@ -124,10 +124,6 @@ def kill_server(port, port2server, pids):
sleep(0.1)
def kill_servers(port2server, pids):
Manager(['all']).kill()
def kill_nonprimary_server(primary_nodes, port2server, pids):
primary_ports = [n['port'] for n in primary_nodes]
for port, server in port2server.iteritems():
@ -217,18 +213,6 @@ def get_policy(**kwargs):
raise SkipTest('No policy matching %s' % kwargs)
def get_to_final_state():
replicators = Manager(['account-replicator', 'container-replicator',
'object-replicator'])
replicators.stop()
updaters = Manager(['container-updater', 'object-updater'])
updaters.stop()
replicators.once()
updaters.once()
replicators.once()
class ProbeTest(unittest.TestCase):
"""
Don't instantiate this directly, use a child class instead.
@ -273,17 +257,31 @@ class ProbeTest(unittest.TestCase):
for server in Manager([server_name]):
for i, conf in enumerate(server.conf_files(), 1):
self.configs[server.server][i] = conf
self.replicators = Manager(
['account-replicator', 'container-replicator',
'object-replicator'])
self.updaters = Manager(['container-updater', 'object-updater'])
except BaseException:
try:
raise
finally:
try:
kill_servers(self.port2server, self.pids)
Manager(['all']).kill()
except Exception:
pass
def tearDown(self):
kill_servers(self.port2server, self.pids)
Manager(['all']).kill()
def get_to_final_state(self):
# these .stop()s are probably not strictly necessary,
# but may prevent race conditions
self.replicators.stop()
self.updaters.stop()
self.replicators.once()
self.updaters.once()
self.replicators.once()
class ReplProbeTest(ProbeTest):

View File

@ -20,7 +20,7 @@ from swiftclient import client
from swift.common import direct_client
from swift.common.manager import Manager
from test.probe.common import get_to_final_state, kill_nonprimary_server, \
from test.probe.common import kill_nonprimary_server, \
kill_server, ReplProbeTest, start_server
@ -75,7 +75,7 @@ class TestAccountFailures(ReplProbeTest):
self.assert_(found2)
# Get to final state
get_to_final_state()
self.get_to_final_state()
# Assert account level now sees the container2/object1
headers, containers = client.get_account(self.url, self.token)
@ -168,7 +168,7 @@ class TestAccountFailures(ReplProbeTest):
self.assert_(found2)
# Get to final state
get_to_final_state()
self.get_to_final_state()
# Assert that server is now up to date
headers, containers = \

View File

@ -21,8 +21,7 @@ from swift.common.storage_policy import POLICIES
from swift.common.manager import Manager
from swift.common.direct_client import direct_delete_account, \
direct_get_object, direct_head_container, ClientException
from test.probe.common import ReplProbeTest, \
get_to_final_state, ENABLED_POLICIES
from test.probe.common import ReplProbeTest, ENABLED_POLICIES
class TestAccountReaper(ReplProbeTest):
@ -56,7 +55,7 @@ class TestAccountReaper(ReplProbeTest):
Manager(['account-reaper']).once()
get_to_final_state()
self.get_to_final_state()
for policy, container, obj in all_objects:
cpart, cnodes = self.container_ring.get_nodes(

View File

@ -27,7 +27,7 @@ from swiftclient import client
from swift.common import direct_client
from swift.common.exceptions import ClientException
from swift.common.utils import hash_path, readconf
from test.probe.common import get_to_final_state, kill_nonprimary_server, \
from test.probe.common import kill_nonprimary_server, \
kill_server, ReplProbeTest, start_server
eventlet.monkey_patch(all=False, socket=True)
@ -63,7 +63,7 @@ class TestContainerFailures(ReplProbeTest):
client.put_object(self.url, self.token, container1, 'object1', '123')
# Get to a final state
get_to_final_state()
self.get_to_final_state()
# Assert all container1 servers indicate container1 is alive and
# well with object1
@ -101,7 +101,7 @@ class TestContainerFailures(ReplProbeTest):
start_server(cnp_port, self.port2server, self.pids)
# Get to a final state
get_to_final_state()
self.get_to_final_state()
# Assert all container1 servers indicate container1 is gone (happens
# because the one node that knew about the delete replicated to the

View File

@ -26,8 +26,7 @@ from swift.common import utils, direct_client
from swift.common.storage_policy import POLICIES
from swift.common.http import HTTP_NOT_FOUND
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest, get_to_final_state, \
ENABLED_POLICIES
from test.probe.common import ReplProbeTest, ENABLED_POLICIES
from swiftclient import client, ClientException
@ -91,7 +90,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.fail('Unable to find /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
get_to_final_state()
self.get_to_final_state()
Manager(['container-reconciler']).once()
# validate containers
head_responses = []
@ -196,7 +195,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.fail('Unable to find tombstone /%s/%s/%s in %r' % (
self.account, self.container_name, self.object_name,
found_policy_indexes))
get_to_final_state()
self.get_to_final_state()
Manager(['container-reconciler']).once()
# validate containers
head_responses = []
@ -313,7 +312,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
break # one should do it...
self.brain.start_handoff_half()
get_to_final_state()
self.get_to_final_state()
Manager(['container-reconciler']).once()
# clear proxy cache
client.post_container(self.url, self.token, self.container_name, {})
@ -424,7 +423,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
acceptable_statuses=(4,),
headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
get_to_final_state()
self.get_to_final_state()
# verify entry in the queue
client = InternalClient(conf_file, 'probe-test', 3)
@ -448,7 +447,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
# make sure the queue is settled
get_to_final_state()
self.get_to_final_state()
for container in client.iter_containers('.misplaced_objects'):
for obj in client.iter_objects('.misplaced_objects',
container['name']):

View File

@ -22,8 +22,7 @@ from swift.common.internal_client import InternalClient
from swift.common.manager import Manager
from swift.common.utils import Timestamp
from test.probe.common import ReplProbeTest, get_to_final_state, \
ENABLED_POLICIES
from test.probe.common import ReplProbeTest, ENABLED_POLICIES
from test.probe.test_container_merge_policy_index import BrainSplitter
from swiftclient import client
@ -80,7 +79,7 @@ class TestObjectExpirer(ReplProbeTest):
self.expirer.once()
self.brain.start_handoff_half()
get_to_final_state()
self.get_to_final_state()
# validate object is expired
found_in_policy = None

View File

@ -26,7 +26,7 @@ import uuid
from swift.common import internal_client, utils
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest, get_to_final_state
from test.probe.common import ReplProbeTest
def _sync_methods(object_server_config_paths):
@ -143,7 +143,7 @@ class Test(ReplProbeTest):
self.brain.start_handoff_half()
# run replicator
get_to_final_state()
self.get_to_final_state()
# check object deletion has been replicated on first server set
self.brain.stop_primary_half()
@ -159,7 +159,7 @@ class Test(ReplProbeTest):
self.brain.start_handoff_half()
# run replicator
get_to_final_state()
self.get_to_final_state()
# check new object has been replicated on first server set
self.brain.stop_primary_half()
@ -198,7 +198,7 @@ class Test(ReplProbeTest):
self.brain.start_handoff_half()
# run replicator
get_to_final_state()
self.get_to_final_state()
# check user metadata has been replicated to first server subset
# and sysmeta is unchanged
@ -244,7 +244,7 @@ class Test(ReplProbeTest):
self.brain.start_primary_half()
# run replicator
get_to_final_state()
self.get_to_final_state()
# check stale user metadata is not replicated to first server subset
# and sysmeta is unchanged

View File

@ -26,7 +26,6 @@ from swift.obj.diskfile import get_data_dir
from test.probe.common import ReplProbeTest
from swift.common.utils import readconf
from swift.common.manager import Manager
def collect_info(path_list):
@ -120,8 +119,7 @@ class TestReplicatorFunctions(ReplProbeTest):
test_node_dir_list.append(d)
# Run all replicators
try:
Manager(['object-replicator', 'container-replicator',
'account-replicator']).start()
self.replicators.start()
# Delete some files
for directory in os.listdir(test_node):
@ -195,8 +193,7 @@ class TestReplicatorFunctions(ReplProbeTest):
raise
time.sleep(1)
finally:
Manager(['object-replicator', 'container-replicator',
'account-replicator']).stop()
self.replicators.stop()
if __name__ == '__main__':