From 9c33bbde6923b26f111572ae967a3b97a8ab12f2 Mon Sep 17 00:00:00 2001
From: Prashanth Pai
Date: Tue, 20 Jan 2015 12:14:32 +0530
Subject: [PATCH 01/98] Allow rsync to use compression
From rsync's man page:
-z, --compress
With this option, rsync compresses the file data as it is sent to the
destination machine, which reduces the amount of data being transmitted --
something that is useful over a slow connection.
A configurable option has been added to allow rsync to compress, but only
if the remote node is in a different region than the local one.
NOTE: Objects that are already compressed (for example: .tar.gz, .mp3)
might slow down the syncing process.
On wire compression can also be extended to ssync later in a different
change if required. In case of ssync, we could explore faster
compression libraries like lz4. rsync uses zlib which is slow but offers
higher compression ratio.
Change-Id: Ic9b9cbff9b5e68bef8257b522cc352fc3544db3c
Signed-off-by: Prashanth Pai
---
etc/account-server.conf-sample | 5 +++
etc/container-server.conf-sample | 5 +++
etc/object-server.conf-sample | 7 +++
swift/common/db_replicator.py | 62 +++++++++++++++++++++-----
swift/container/replicator.py | 5 ++-
swift/obj/replicator.py | 7 +++
test/unit/common/test_db_replicator.py | 59 +++++++++++++++++++-----
test/unit/obj/test_replicator.py | 37 +++++++++++++++
8 files changed, 163 insertions(+), 24 deletions(-)
diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample
index 6a7fcb929b..98c97acf6f 100644
--- a/etc/account-server.conf-sample
+++ b/etc/account-server.conf-sample
@@ -114,6 +114,11 @@ use = egg:swift#recon
# of run_pause.
# run_pause = 30
#
+# Allow rsync to compress data which is transmitted to destination node
+# during sync. However, this is applicable only when destination node is in
+# a different region than the local one.
+# rsync_compress = no
+#
# recon_cache_path = /var/cache/swift
[account-auditor]
diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample
index de511368ad..7405a3d250 100644
--- a/etc/container-server.conf-sample
+++ b/etc/container-server.conf-sample
@@ -115,6 +115,11 @@ use = egg:swift#recon
# of run_pause.
# run_pause = 30
#
+# Allow rsync to compress data which is transmitted to destination node
+# during sync. However, this is applicable only when destination node is in
+# a different region than the local one.
+# rsync_compress = no
+#
# recon_cache_path = /var/cache/swift
[container-updater]
diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample
index b594a9576f..933f30f2f1 100644
--- a/etc/object-server.conf-sample
+++ b/etc/object-server.conf-sample
@@ -174,6 +174,13 @@ use = egg:swift#recon
# passed to rsync for io op timeout
# rsync_io_timeout = 30
#
+# Allow rsync to compress data which is transmitted to destination node
+# during sync. However, this is applicable only when destination node is in
+# a different region than the local one.
+# NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might
+# slow down the syncing process.
+# rsync_compress = no
+#
# node_timeout =
# max duration of an http request; this is for REPLICATE finalization calls and
# so should be longer than node_timeout
diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py
index e456beed75..334cf74347 100644
--- a/swift/common/db_replicator.py
+++ b/swift/common/db_replicator.py
@@ -167,6 +167,8 @@ class Replicator(Daemon):
self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
+ self.rsync_compress = config_true_value(
+ conf.get('rsync_compress', 'no'))
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
@@ -209,13 +211,16 @@ class Replicator(Daemon):
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl',
'empty', 'diff_capped')]))
- def _rsync_file(self, db_file, remote_file, whole_file=True):
+ def _rsync_file(self, db_file, remote_file, whole_file=True,
+ different_region=False):
"""
Sync a single file using rsync. Used by _rsync_db to handle syncing.
:param db_file: file to be synced
:param remote_file: remote location to sync the DB file to
:param whole-file: if True, uses rsync's --whole-file flag
+ :param different_region: if True, the destination node is in a
+ different region
:returns: True if the sync was successful, False otherwise
"""
@@ -224,6 +229,12 @@ class Replicator(Daemon):
'--contimeout=%s' % int(math.ceil(self.conn_timeout))]
if whole_file:
popen_args.append('--whole-file')
+
+ if self.rsync_compress and different_region:
+ # Allow for compression, but only if the remote node is in
+ # a different region than the local one.
+ popen_args.append('--compress')
+
popen_args.extend([db_file, remote_file])
proc = subprocess.Popen(popen_args)
proc.communicate()
@@ -233,7 +244,8 @@ class Replicator(Daemon):
return proc.returncode == 0
def _rsync_db(self, broker, device, http, local_id,
- replicate_method='complete_rsync', replicate_timeout=None):
+ replicate_method='complete_rsync', replicate_timeout=None,
+ different_region=False):
"""
Sync a whole db using rsync.
@@ -243,6 +255,8 @@ class Replicator(Daemon):
:param local_id: unique ID of the local database replica
:param replicate_method: remote operation to perform after rsync
:param replicate_timeout: timeout to wait in seconds
+ :param different_region: if True, the destination node is in a
+ different region
"""
device_ip = rsync_ip(device['replication_ip'])
if self.vm_test_mode:
@@ -253,14 +267,17 @@ class Replicator(Daemon):
remote_file = '%s::%s/%s/tmp/%s' % (
device_ip, self.server_type, device['device'], local_id)
mtime = os.path.getmtime(broker.db_file)
- if not self._rsync_file(broker.db_file, remote_file):
+ if not self._rsync_file(broker.db_file, remote_file,
+ different_region=different_region):
return False
# perform block-level sync if the db was modified during the first sync
if os.path.exists(broker.db_file + '-journal') or \
os.path.getmtime(broker.db_file) > mtime:
# grab a lock so nobody else can modify it
with broker.lock():
- if not self._rsync_file(broker.db_file, remote_file, False):
+ if not self._rsync_file(broker.db_file, remote_file,
+ whole_file=False,
+ different_region=different_region):
return False
with Timeout(replicate_timeout or self.node_timeout):
response = http.replicate(replicate_method, local_id)
@@ -363,7 +380,8 @@ class Replicator(Daemon):
'put_timestamp', 'delete_timestamp', 'metadata')
return tuple(info[key] for key in sync_args_order)
- def _repl_to_node(self, node, broker, partition, info):
+ def _repl_to_node(self, node, broker, partition, info,
+ different_region=False):
"""
Replicate a database to a node.
@@ -373,6 +391,8 @@ class Replicator(Daemon):
:param info: DB info as a dictionary of {'max_row', 'hash', 'id',
'created_at', 'put_timestamp', 'delete_timestamp',
'metadata'}
+ :param different_region: if True, the destination node is in a
+ different region
:returns: True if successful, False otherwise
"""
@@ -382,13 +402,16 @@ class Replicator(Daemon):
response = http.replicate('sync', *sync_args)
if not response:
return False
- return self._handle_sync_response(node, response, info, broker, http)
+ return self._handle_sync_response(node, response, info, broker, http,
+ different_region=different_region)
- def _handle_sync_response(self, node, response, info, broker, http):
+ def _handle_sync_response(self, node, response, info, broker, http,
+ different_region=False):
if response.status == HTTP_NOT_FOUND: # completely missing, rsync
self.stats['rsync'] += 1
self.logger.increment('rsyncs')
- return self._rsync_db(broker, node, http, info['id'])
+ return self._rsync_db(broker, node, http, info['id'],
+ different_region=different_region)
elif response.status == HTTP_INSUFFICIENT_STORAGE:
raise DriveNotMounted()
elif response.status >= 200 and response.status < 300:
@@ -403,7 +426,8 @@ class Replicator(Daemon):
self.logger.increment('remote_merges')
return self._rsync_db(broker, node, http, info['id'],
replicate_method='rsync_then_merge',
- replicate_timeout=(info['count'] / 2000))
+ replicate_timeout=(info['count'] / 2000),
+ different_region=different_region)
# else send diffs over to the remote server
return self._usync_db(max(rinfo['point'], local_sync),
broker, http, rinfo['id'], info['id'])
@@ -470,6 +494,11 @@ class Replicator(Daemon):
return
responses = []
nodes = self.ring.get_part_nodes(int(partition))
+ local_dev = None
+ for node in nodes:
+ if node['id'] == node_id:
+ local_dev = node
+ break
if shouldbehere:
shouldbehere = bool([n for n in nodes if n['id'] == node_id])
# See Footnote [1] for an explanation of the repl_nodes assignment.
@@ -478,10 +507,23 @@ class Replicator(Daemon):
i += 1
repl_nodes = nodes[i + 1:] + nodes[:i]
more_nodes = self.ring.get_more_nodes(int(partition))
+ if not local_dev:
+ # Check further if local device is a handoff node
+ for node in more_nodes:
+ if node['id'] == node_id:
+ local_dev = node
+ break
for node in repl_nodes:
+ different_region = False
+ if local_dev and local_dev['region'] != node['region']:
+ # This additional information will help later if we
+ # want to handle syncing to a node in different
+ # region with some optimizations.
+ different_region = True
success = False
try:
- success = self._repl_to_node(node, broker, partition, info)
+ success = self._repl_to_node(node, broker, partition, info,
+ different_region)
except DriveNotMounted:
repl_nodes.append(more_nodes.next())
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
diff --git a/swift/container/replicator.py b/swift/container/replicator.py
index 8974535251..8d3bfce7f8 100644
--- a/swift/container/replicator.py
+++ b/swift/container/replicator.py
@@ -59,7 +59,8 @@ class ContainerReplicator(db_replicator.Replicator):
'storage_policy_index'))
return sync_args
- def _handle_sync_response(self, node, response, info, broker, http):
+ def _handle_sync_response(self, node, response, info, broker, http,
+ different_region):
parent = super(ContainerReplicator, self)
if is_success(response.status):
remote_info = json.loads(response.data)
@@ -74,7 +75,7 @@ class ContainerReplicator(db_replicator.Replicator):
broker.merge_timestamps(*(remote_info[key] for key in
sync_timestamps))
rv = parent._handle_sync_response(
- node, response, info, broker, http)
+ node, response, info, broker, http, different_region)
return rv
def find_local_handoff_for_part(self, part):
diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py
index b3df0ce28f..eb65eb3879 100644
--- a/swift/obj/replicator.py
+++ b/swift/obj/replicator.py
@@ -76,6 +76,8 @@ class ObjectReplicator(Daemon):
self.rsync_timeout = int(conf.get('rsync_timeout', 900))
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
+ self.rsync_compress = config_true_value(
+ conf.get('rsync_compress', 'no'))
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
self.recon_cache_path = conf.get('recon_cache_path',
@@ -183,6 +185,11 @@ class ObjectReplicator(Daemon):
'--contimeout=%s' % self.rsync_io_timeout,
'--bwlimit=%s' % self.rsync_bwlimit,
]
+ if self.rsync_compress and \
+ job['region'] != node['region']:
+ # Allow for compression, but only if the remote node is in
+ # a different region than the local one.
+ args.append('--compress')
node_ip = rsync_ip(node['replication_ip'])
if self.vm_test_mode:
rsync_module = '%s::object%s' % (node_ip, node['replication_port'])
diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py
index 0f3cc72e94..e50aa68dae 100644
--- a/test/unit/common/test_db_replicator.py
+++ b/test/unit/common/test_db_replicator.py
@@ -92,22 +92,23 @@ class FakeRingWithNodes(object):
class Ring(object):
devs = [dict(
id=1, weight=10.0, zone=1, ip='1.1.1.1', port=6000, device='sdb',
- meta=''
+ meta='', replication_ip='1.1.1.1', replication_port=6000, region=1
), dict(
id=2, weight=10.0, zone=2, ip='1.1.1.2', port=6000, device='sdb',
- meta=''
+ meta='', replication_ip='1.1.1.2', replication_port=6000, region=2
), dict(
id=3, weight=10.0, zone=3, ip='1.1.1.3', port=6000, device='sdb',
- meta=''
+ meta='', replication_ip='1.1.1.3', replication_port=6000, region=1
), dict(
id=4, weight=10.0, zone=4, ip='1.1.1.4', port=6000, device='sdb',
- meta=''
+ meta='', replication_ip='1.1.1.4', replication_port=6000, region=2
), dict(
id=5, weight=10.0, zone=5, ip='1.1.1.5', port=6000, device='sdb',
- meta=''
+ meta='', replication_ip='1.1.1.5', replication_port=6000, region=1
), dict(
id=6, weight=10.0, zone=6, ip='1.1.1.6', port=6000, device='sdb',
- meta='')]
+ meta='', replication_ip='1.1.1.6', replication_port=6000, region=2
+ )]
def __init__(self, path, reload_time=15, ring_name=None):
pass
@@ -334,9 +335,26 @@ class TestDBReplicator(unittest.TestCase):
'/some/file', 'remote:/some_file'],)
self.assertEqual(exp_args, process.args)
+ def test_rsync_file_popen_args_different_region_and_rsync_compress(self):
+ replicator = TestReplicator({})
+ for rsync_compress in (False, True):
+ replicator.rsync_compress = rsync_compress
+ for different_region in (False, True):
+ with _mock_process(0) as process:
+ replicator._rsync_file('/some/file', 'remote:/some_file',
+ False, different_region)
+ if rsync_compress and different_region:
+ # --compress arg should be passed to rsync binary
+ # only when rsync_compress option is enabled
+ # AND destination node is in a different
+ # region
+ self.assertTrue('--compress' in process.args[0])
+ else:
+ self.assertFalse('--compress' in process.args[0])
+
def test_rsync_db(self):
replicator = TestReplicator({})
- replicator._rsync_file = lambda *args: True
+ replicator._rsync_file = lambda *args, **kwargs: True
fake_device = {'replication_ip': '127.0.0.1', 'device': 'sda1'}
replicator._rsync_db(FakeBroker(), fake_device, ReplHttp(), 'abcd')
@@ -355,7 +373,8 @@ class TestDBReplicator(unittest.TestCase):
self.db_file = db_file
self.remote_file = remote_file
- def _rsync_file(self_, db_file, remote_file, whole_file=True):
+ def _rsync_file(self_, db_file, remote_file, whole_file=True,
+ different_region=False):
self.assertEqual(self_.db_file, db_file)
self.assertEqual(self_.remote_file, remote_file)
self_._rsync_file_called = True
@@ -403,7 +422,8 @@ class TestDBReplicator(unittest.TestCase):
self.broker = broker
self._rsync_file_call_count = 0
- def _rsync_file(self_, db_file, remote_file, whole_file=True):
+ def _rsync_file(self_, db_file, remote_file, whole_file=True,
+ different_region=False):
self_._rsync_file_call_count += 1
if self_._rsync_file_call_count == 1:
self.assertEquals(True, whole_file)
@@ -630,6 +650,20 @@ class TestDBReplicator(unittest.TestCase):
[(('Found /path/to/file for /a%20c%20t/c%20o%20n when it should '
'be on partition 0; will replicate out and remove.',), {})])
+ def test_replicate_object_different_region(self):
+ db_replicator.ring = FakeRingWithNodes()
+ replicator = TestReplicator({})
+ replicator._repl_to_node = mock.Mock()
+ # For node_id = 1, one replica in same region(1) and other is in a
+ # different region(2). Refer: FakeRingWithNodes
+ replicator._replicate_object('0', '/path/to/file', 1)
+ # different_region was set True and passed to _repl_to_node()
+ self.assertEqual(replicator._repl_to_node.call_args_list[0][0][-1],
+ True)
+ # different_region was set False and passed to _repl_to_node()
+ self.assertEqual(replicator._repl_to_node.call_args_list[1][0][-1],
+ False)
+
def test_delete_db(self):
db_replicator.lock_parent_directory = lock_parent_directory
replicator = TestReplicator({}, logger=unit.FakeLogger())
@@ -1202,7 +1236,8 @@ class TestReplToNode(unittest.TestCase):
mock.call(self.broker, self.fake_node, self.http,
self.fake_info['id'],
replicate_method='rsync_then_merge',
- replicate_timeout=(self.fake_info['count'] / 2000))
+ replicate_timeout=(self.fake_info['count'] / 2000),
+ different_region=False)
])
def test_repl_to_node_already_in_sync(self):
@@ -1217,13 +1252,13 @@ class TestReplToNode(unittest.TestCase):
def test_repl_to_node_not_found(self):
self.http = ReplHttp('{"id": 3, "point": -1}', set_status=404)
self.assertEquals(self.replicator._repl_to_node(
- self.fake_node, self.broker, '0', self.fake_info), True)
+ self.fake_node, self.broker, '0', self.fake_info, False), True)
self.replicator.logger.increment.assert_has_calls([
mock.call.increment('rsyncs')
])
self.replicator._rsync_db.assert_has_calls([
mock.call(self.broker, self.fake_node, self.http,
- self.fake_info['id'])
+ self.fake_info['id'], different_region=False)
])
def test_repl_to_node_drive_not_mounted(self):
diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py
index bf1c5bcb52..0bb86794ee 100644
--- a/test/unit/obj/test_replicator.py
+++ b/test/unit/obj/test_replicator.py
@@ -1116,6 +1116,43 @@ class TestObjectReplicator(unittest.TestCase):
'/a83', headers=self.headers))
mock_http.assert_has_calls(reqs, any_order=True)
+ def test_rsync_compress_different_region(self):
+ self.assertEqual(self.replicator.sync_method, self.replicator.rsync)
+ jobs = self.replicator.collect_jobs()
+ _m_rsync = mock.Mock(return_value=0)
+ _m_os_path_exists = mock.Mock(return_value=True)
+ with mock.patch.object(self.replicator, '_rsync', _m_rsync):
+ with mock.patch('os.path.exists', _m_os_path_exists):
+ for job in jobs:
+ self.assertTrue('region' in job)
+ for node in job['nodes']:
+ for rsync_compress in (True, False):
+ self.replicator.rsync_compress = rsync_compress
+ ret = \
+ self.replicator.sync(node, job,
+ ['fake_suffix'])
+ self.assertTrue(ret)
+ if node['region'] != job['region']:
+ if rsync_compress:
+ # --compress arg should be passed to rsync
+ # binary only when rsync_compress option is
+ # enabled AND destination node is in a
+ # different region
+ self.assertTrue('--compress' in
+ _m_rsync.call_args[0][0])
+ else:
+ self.assertFalse('--compress' in
+ _m_rsync.call_args[0][0])
+ else:
+ self.assertFalse('--compress' in
+ _m_rsync.call_args[0][0])
+ self.assertEqual(
+ _m_os_path_exists.call_args_list[-1][0][0],
+ os.path.join(job['path'], 'fake_suffix'))
+ self.assertEqual(
+ _m_os_path_exists.call_args_list[-2][0][0],
+ os.path.join(job['path']))
+
if __name__ == '__main__':
unittest.main()
From 38ae7bb89c95292c171cb89c1d62859cee0cc24f Mon Sep 17 00:00:00 2001
From: Alistair Coles
Date: Wed, 15 Apr 2015 23:03:21 +0100
Subject: [PATCH 02/98] Make the reaper use same timestamp for replica deletes
The account reaper is using a unique timestamp when deleting
replicas of the same resource. This will result in unnecessary
replication traffic after reaping.
This patch makes the reaper use a single timestamp per resource.
Probe test is modified to check that delete times are equal
across replicas before replicators run.
test_direct_client.py is modified to check that it uses explicit
timestamp when passed to direct_delete_[object|container]
methods.
Drive-by bug fixes in the probe test e.g. it was not sending
X-Backend-Storage-Policy-Index when doing a direct GET to check
object state, so the 404s being verified could in fact be due
to diskfile not existing rather than diskfile being deleted.
Closes-Bug: 1442879
Change-Id: I8bab22d66308bb9d3294e1e0def017c784228423
---
swift/account/reaper.py | 9 +++-
swift/common/direct_client.py | 3 +-
test/probe/test_account_reaper.py | 73 ++++++++++++++++++++++++--
test/unit/account/test_reaper.py | 71 +++++++++++++++++--------
test/unit/common/test_direct_client.py | 26 +++++++++
5 files changed, 153 insertions(+), 29 deletions(-)
diff --git a/swift/account/reaper.py b/swift/account/reaper.py
index 06a0085352..9eaee561ec 100644
--- a/swift/account/reaper.py
+++ b/swift/account/reaper.py
@@ -376,6 +376,7 @@ class AccountReaper(Daemon):
break
successes = 0
failures = 0
+ timestamp = Timestamp(time())
for node in nodes:
anode = account_nodes.pop()
try:
@@ -386,7 +387,8 @@ class AccountReaper(Daemon):
headers={'X-Account-Host': '%(ip)s:%(port)s' % anode,
'X-Account-Partition': str(account_partition),
'X-Account-Device': anode['device'],
- 'X-Account-Override-Deleted': 'yes'})
+ 'X-Account-Override-Deleted': 'yes',
+ 'X-Timestamp': timestamp.internal})
successes += 1
self.stats_return_codes[2] = \
self.stats_return_codes.get(2, 0) + 1
@@ -443,6 +445,8 @@ class AccountReaper(Daemon):
part, nodes = ring.get_nodes(account, container, obj)
successes = 0
failures = 0
+ timestamp = Timestamp(time())
+
for node in nodes:
cnode = next(cnodes)
try:
@@ -453,7 +457,8 @@ class AccountReaper(Daemon):
headers={'X-Container-Host': '%(ip)s:%(port)s' % cnode,
'X-Container-Partition': str(container_partition),
'X-Container-Device': cnode['device'],
- 'X-Backend-Storage-Policy-Index': policy_index})
+ 'X-Backend-Storage-Policy-Index': policy_index,
+ 'X-Timestamp': timestamp.internal})
successes += 1
self.stats_return_codes[2] = \
self.stats_return_codes.get(2, 0) + 1
diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py
index 35ca24a64c..dd36e3b499 100644
--- a/swift/common/direct_client.py
+++ b/swift/common/direct_client.py
@@ -204,10 +204,11 @@ def direct_delete_container(node, part, account, container, conn_timeout=5,
headers = {}
path = '/%s/%s' % (account, container)
+ add_timestamp = 'x-timestamp' not in (k.lower() for k in headers)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'DELETE', path,
- headers=gen_headers(headers, True))
+ headers=gen_headers(headers, add_timestamp))
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
diff --git a/test/probe/test_account_reaper.py b/test/probe/test_account_reaper.py
index 8368a59206..7da9dcd39d 100644
--- a/test/probe/test_account_reaper.py
+++ b/test/probe/test_account_reaper.py
@@ -53,33 +53,96 @@ class TestAccountReaper(ReplProbeTest):
for node in nodes:
direct_delete_account(node, part, self.account)
+ # run the reaper
Manager(['account-reaper']).once()
- self.get_to_final_state()
-
for policy, container, obj in all_objects:
+ # verify that any container deletes were at same timestamp
cpart, cnodes = self.container_ring.get_nodes(
self.account, container)
+ delete_times = set()
for cnode in cnodes:
try:
direct_head_container(cnode, cpart, self.account,
container)
except ClientException as err:
self.assertEquals(err.http_status, 404)
+ delete_time = err.http_headers.get(
+ 'X-Backend-DELETE-Timestamp')
+ # 'X-Backend-DELETE-Timestamp' confirms it was deleted
+ self.assertTrue(delete_time)
+ delete_times.add(delete_time)
+
else:
- self.fail('Found un-reaped /%s/%s on %r' %
- (self.account, container, node))
+ # Container replicas may not yet be deleted if we have a
+ # policy with object replicas < container replicas, so
+ # ignore successful HEAD. We'll check for all replicas to
+ # be deleted again after running the replicators.
+ pass
+ self.assertEqual(1, len(delete_times), delete_times)
+
+ # verify that all object deletes were at same timestamp
object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
part, nodes = object_ring.get_nodes(self.account, container, obj)
+ headers = {'X-Backend-Storage-Policy-Index': int(policy)}
+ delete_times = set()
for node in nodes:
try:
direct_get_object(node, part, self.account,
- container, obj)
+ container, obj, headers=headers)
except ClientException as err:
self.assertEquals(err.http_status, 404)
+ delete_time = err.http_headers.get('X-Backend-Timestamp')
+ # 'X-Backend-Timestamp' confirms obj was deleted
+ self.assertTrue(delete_time)
+ delete_times.add(delete_time)
else:
self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
(self.account, container, obj, node, policy))
+ self.assertEqual(1, len(delete_times))
+
+ # run replicators and updaters
+ self.get_to_final_state()
+
+ for policy, container, obj in all_objects:
+ # verify that ALL container replicas are now deleted
+ cpart, cnodes = self.container_ring.get_nodes(
+ self.account, container)
+ delete_times = set()
+ for cnode in cnodes:
+ try:
+ direct_head_container(cnode, cpart, self.account,
+ container)
+ except ClientException as err:
+ self.assertEquals(err.http_status, 404)
+ delete_time = err.http_headers.get(
+ 'X-Backend-DELETE-Timestamp')
+ # 'X-Backend-DELETE-Timestamp' confirms it was deleted
+ self.assertTrue(delete_time)
+ delete_times.add(delete_time)
+ else:
+ self.fail('Found un-reaped /%s/%s on %r' %
+ (self.account, container, cnode))
+
+ # sanity check that object state is still consistent...
+ object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
+ part, nodes = object_ring.get_nodes(self.account, container, obj)
+ headers = {'X-Backend-Storage-Policy-Index': int(policy)}
+ delete_times = set()
+ for node in nodes:
+ try:
+ direct_get_object(node, part, self.account,
+ container, obj, headers=headers)
+ except ClientException as err:
+ self.assertEquals(err.http_status, 404)
+ delete_time = err.http_headers.get('X-Backend-Timestamp')
+ # 'X-Backend-Timestamp' confirms obj was deleted
+ self.assertTrue(delete_time)
+ delete_times.add(delete_time)
+ else:
+ self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
+ (self.account, container, obj, node, policy))
+ self.assertEqual(1, len(delete_times))
if __name__ == "__main__":
diff --git a/test/unit/account/test_reaper.py b/test/unit/account/test_reaper.py
index d81b565fc4..b413a646a1 100644
--- a/test/unit/account/test_reaper.py
+++ b/test/unit/account/test_reaper.py
@@ -278,30 +278,34 @@ class TestReaper(unittest.TestCase):
'mount_check': 'false',
}
r = reaper.AccountReaper(conf, logger=unit.debug_logger())
- ring = unit.FakeRing()
mock_path = 'swift.account.reaper.direct_delete_object'
for policy in POLICIES:
r.reset_stats()
with patch(mock_path) as fake_direct_delete:
- r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
- policy.idx)
- for i, call_args in enumerate(
- fake_direct_delete.call_args_list):
- cnode = cont_nodes[i % len(cont_nodes)]
- host = '%(ip)s:%(port)s' % cnode
- device = cnode['device']
- headers = {
- 'X-Container-Host': host,
- 'X-Container-Partition': 'partition',
- 'X-Container-Device': device,
- 'X-Backend-Storage-Policy-Index': policy.idx
- }
- ring = r.get_object_ring(policy.idx)
- expected = call(dict(ring.devs[i], index=i), 0,
- 'a', 'c', 'o',
- headers=headers, conn_timeout=0.5,
- response_timeout=10)
- self.assertEqual(call_args, expected)
+ with patch('swift.account.reaper.time') as mock_time:
+ mock_time.return_value = 1429117638.86767
+ r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
+ policy.idx)
+ mock_time.assert_called_once_with()
+ for i, call_args in enumerate(
+ fake_direct_delete.call_args_list):
+ cnode = cont_nodes[i % len(cont_nodes)]
+ host = '%(ip)s:%(port)s' % cnode
+ device = cnode['device']
+ headers = {
+ 'X-Container-Host': host,
+ 'X-Container-Partition': 'partition',
+ 'X-Container-Device': device,
+ 'X-Backend-Storage-Policy-Index': policy.idx,
+ 'X-Timestamp': '1429117638.86767'
+ }
+ ring = r.get_object_ring(policy.idx)
+ expected = call(dict(ring.devs[i], index=i), 0,
+ 'a', 'c', 'o',
+ headers=headers, conn_timeout=0.5,
+ response_timeout=10)
+ self.assertEqual(call_args, expected)
+ self.assertEqual(policy.object_ring.replicas - 1, i)
self.assertEqual(r.stats_objects_deleted,
policy.object_ring.replicas)
@@ -366,7 +370,11 @@ class TestReaper(unittest.TestCase):
return headers, obj_list
mocks['direct_get_container'].side_effect = fake_get_container
- r.reap_container('a', 'partition', acc_nodes, 'c')
+ with patch('swift.account.reaper.time') as mock_time:
+ mock_time.side_effect = [1429117638.86767, 1429117639.67676]
+ r.reap_container('a', 'partition', acc_nodes, 'c')
+
+ # verify calls to direct_delete_object
mock_calls = mocks['direct_delete_object'].call_args_list
self.assertEqual(policy.object_ring.replicas, len(mock_calls))
for call_args in mock_calls:
@@ -374,8 +382,29 @@ class TestReaper(unittest.TestCase):
self.assertEqual(kwargs['headers']
['X-Backend-Storage-Policy-Index'],
policy.idx)
+ self.assertEqual(kwargs['headers']
+ ['X-Timestamp'],
+ '1429117638.86767')
+ # verify calls to direct_delete_container
self.assertEquals(mocks['direct_delete_container'].call_count, 3)
+ for i, call_args in enumerate(
+ mocks['direct_delete_container'].call_args_list):
+ anode = acc_nodes[i % len(acc_nodes)]
+ host = '%(ip)s:%(port)s' % anode
+ device = anode['device']
+ headers = {
+ 'X-Account-Host': host,
+ 'X-Account-Partition': 'partition',
+ 'X-Account-Device': device,
+ 'X-Account-Override-Deleted': 'yes',
+ 'X-Timestamp': '1429117639.67676'
+ }
+ ring = r.get_object_ring(policy.idx)
+ expected = call(dict(ring.devs[i], index=i), 0, 'a', 'c',
+ headers=headers, conn_timeout=0.5,
+ response_timeout=10)
+ self.assertEqual(call_args, expected)
self.assertEqual(r.stats_objects_deleted, policy.object_ring.replicas)
def test_reap_container_get_object_fail(self):
diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py
index d41a7c9672..6f7660cdf3 100644
--- a/test/unit/common/test_direct_client.py
+++ b/test/unit/common/test_direct_client.py
@@ -341,6 +341,19 @@ class TestDirectClient(unittest.TestCase):
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
+ def test_direct_delete_container_with_timestamp(self):
+ # ensure timestamp is different from any that might be auto-generated
+ timestamp = Timestamp(time.time() - 100)
+ headers = {'X-Timestamp': timestamp.internal}
+ with mocked_http_conn(200) as conn:
+ direct_client.direct_delete_container(
+ self.node, self.part, self.account, self.container,
+ headers=headers)
+ self.assertEqual(conn.method, 'DELETE')
+ self.assertEqual(conn.path, self.container_path)
+ self.assertTrue('X-Timestamp' in conn.req_headers)
+ self.assertEqual(timestamp, conn.req_headers['X-Timestamp'])
+
def test_direct_delete_container_error(self):
with mocked_http_conn(500) as conn:
try:
@@ -536,6 +549,19 @@ class TestDirectClient(unittest.TestCase):
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(resp, None)
+ def test_direct_delete_object_with_timestamp(self):
+ # ensure timestamp is different from any that might be auto-generated
+ timestamp = Timestamp(time.time() - 100)
+ headers = {'X-Timestamp': timestamp.internal}
+ with mocked_http_conn(200) as conn:
+ direct_client.direct_delete_object(
+ self.node, self.part, self.account, self.container, self.obj,
+ headers=headers)
+ self.assertEqual(conn.method, 'DELETE')
+ self.assertEqual(conn.path, self.obj_path)
+ self.assertTrue('X-Timestamp' in conn.req_headers)
+ self.assertEqual(timestamp, conn.req_headers['X-Timestamp'])
+
def test_direct_delete_object_error(self):
with mocked_http_conn(503) as conn:
try:
From e4d326b5a7dd186d762726faa45733ff2900343d Mon Sep 17 00:00:00 2001
From: Kazuhiro MIYAHARA
Date: Thu, 19 Feb 2015 17:38:10 +0900
Subject: [PATCH 03/98] Fix conflict SLO reponse
This patch fixes Swift to respond "409 Conflict"
when a segment object path of the manifest on PUT SLO
is same as requested object path. It is because
the request will overwrite the segment and then it
will absolutely cause "409 Conflict" on GET SLO.
e.g.:
request:
PUT "http://hostname/v1/AUTH_account/container/segment_object_00?multipart-manifest=put"
manifest file:
[{"path" : "container/segment_object_00", "etag" : "", "size_bytes" : },
{"path" : "container/segment_object_01", "etag" : "", "size_bytes" : },
{"path" : "container/segment_object_02", "etag" : "", "size_bytes" : }]
Change-Id: I4f4f7b9dbeb6a7c355b801c7e0ae560aa19a70b4
Closes-Bug: 1417936
---
AUTHORS | 1 +
swift/common/middleware/slo.py | 5 +++
test/unit/common/middleware/test_slo.py | 52 ++++++++++++++++++++++++-
3 files changed, 57 insertions(+), 1 deletion(-)
diff --git a/AUTHORS b/AUTHORS
index be3c5deeb9..fa2cee7458 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -223,3 +223,4 @@ Hua Zhang (zhuadl@cn.ibm.com)
Jian Zhang (jian.zhang@intel.com)
Ning Zhang (ning@zmanda.com)
Yuan Zhou (yuan.zhou@intel.com)
+Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp)
diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py
index e8f1707e28..d8df829981 100644
--- a/swift/common/middleware/slo.py
+++ b/swift/common/middleware/slo.py
@@ -586,6 +586,11 @@ class StaticLargeObject(object):
if isinstance(obj_name, unicode):
obj_name = obj_name.encode('utf-8')
obj_path = '/'.join(['', vrs, account, obj_name.lstrip('/')])
+ if req.path == quote(obj_path):
+ raise HTTPConflict(
+ 'Manifest object name "%s" '
+ 'cannot be included in the manifest'
+ % obj_name)
try:
seg_size = int(seg_dict['size_bytes'])
except (ValueError, TypeError):
diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py
index 4160d91d46..d70a25ccc4 100644
--- a/test/unit/common/middleware/test_slo.py
+++ b/test/unit/common/middleware/test_slo.py
@@ -24,7 +24,7 @@ from swift.common import swob, utils
from swift.common.exceptions import ListingIterError, SegmentError
from swift.common.middleware import slo
from swift.common.swob import Request, Response, HTTPException
-from swift.common.utils import json
+from swift.common.utils import quote, json
from test.unit.common.middleware.helpers import FakeSwift
@@ -139,6 +139,11 @@ class TestSloPutManifest(SloTestCase):
swob.HTTPOk,
{'Content-Length': '100', 'Etag': 'etagoftheobjectsegment'},
None)
+ self.app.register(
+ 'HEAD', '/v1/AUTH_test/cont/object2',
+ swob.HTTPOk,
+ {'Content-Length': '100', 'Etag': 'etagoftheobjectsegment'},
+ None)
self.app.register(
'HEAD', '/v1/AUTH_test/cont/object\xe2\x99\xa1',
swob.HTTPOk,
@@ -149,6 +154,11 @@ class TestSloPutManifest(SloTestCase):
swob.HTTPOk,
{'Content-Length': '10', 'Etag': 'etagoftheobjectsegment'},
None)
+ self.app.register(
+ 'HEAD', u'/v1/AUTH_test/cont/あ_1',
+ swob.HTTPOk,
+ {'Content-Length': '1', 'Etag': 'a'},
+ None)
self.app.register(
'PUT', '/v1/AUTH_test/c/man', swob.HTTPCreated, {}, None)
self.app.register(
@@ -391,6 +401,46 @@ class TestSloPutManifest(SloTestCase):
self.assertEquals(errors[4][0], '/checktest/slob')
self.assertEquals(errors[4][1], 'Etag Mismatch')
+ def test_handle_multipart_put_manifest_equal_slo(self):
+ test_json_data = json.dumps([{'path': '/cont/object',
+ 'etag': 'etagoftheobjectsegment',
+ 'size_bytes': 100}])
+ req = Request.blank(
+ '/v1/AUTH_test/cont/object?multipart-manifest=put',
+ environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
+ body=test_json_data)
+ status, headers, body = self.call_slo(req)
+ self.assertEqual(status, '409 Conflict')
+ self.assertEqual(self.app.call_count, 0)
+
+ def test_handle_multipart_put_manifest_equal_slo_non_ascii(self):
+ test_json_data = json.dumps([{'path': u'/cont/あ_1',
+ 'etag': 'a',
+ 'size_bytes': 1}])
+ path = quote(u'/v1/AUTH_test/cont/あ_1')
+ req = Request.blank(
+ path + '?multipart-manifest=put',
+ environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
+ body=test_json_data)
+ status, headers, body = self.call_slo(req)
+ self.assertEqual(status, '409 Conflict')
+ self.assertEqual(self.app.call_count, 0)
+
+ def test_handle_multipart_put_manifest_equal_last_segment(self):
+ test_json_data = json.dumps([{'path': '/cont/object',
+ 'etag': 'etagoftheobjectsegment',
+ 'size_bytes': 100},
+ {'path': '/cont/object2',
+ 'etag': 'etagoftheobjectsegment',
+ 'size_bytes': 100}])
+ req = Request.blank(
+ '/v1/AUTH_test/cont/object2?multipart-manifest=put',
+ environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
+ body=test_json_data)
+ status, headers, body = self.call_slo(req)
+ self.assertEqual(status, '409 Conflict')
+ self.assertEqual(self.app.call_count, 1)
+
class TestSloDeleteManifest(SloTestCase):
From a2a5b6aa6664b61fa31d25f329426ff089372f17 Mon Sep 17 00:00:00 2001
From: Samuel Merritt
Date: Thu, 16 Apr 2015 11:42:12 -0700
Subject: [PATCH 04/98] Functional test for SLO PUT overwriting one of its own
segments
Change-Id: I4855816848f4fdb148d0b82735cf79bc68429617
---
test/functional/tests.py | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/test/functional/tests.py b/test/functional/tests.py
index 95f168e6e8..3fbbdd784e 100644
--- a/test/functional/tests.py
+++ b/test/functional/tests.py
@@ -2259,6 +2259,26 @@ class TestSlo(Base):
else:
self.fail("Expected ResponseError but didn't get it")
+ def test_slo_overwrite_segment_with_manifest(self):
+ file_item = self.env.container.file("seg_b")
+ try:
+ file_item.write(
+ json.dumps([
+ {'size_bytes': 1024 * 1024,
+ 'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(),
+ 'path': '/%s/%s' % (self.env.container.name, 'seg_a')},
+ {'size_bytes': 1024 * 1024,
+ 'etag': hashlib.md5('b' * 1024 * 1024).hexdigest(),
+ 'path': '/%s/%s' % (self.env.container.name, 'seg_b')},
+ {'size_bytes': 1024 * 1024,
+ 'etag': hashlib.md5('c' * 1024 * 1024).hexdigest(),
+ 'path': '/%s/%s' % (self.env.container.name, 'seg_c')}]),
+ parms={'multipart-manifest': 'put'})
+ except ResponseError as err:
+ self.assertEqual(409, err.status)
+ else:
+ self.fail("Expected ResponseError but didn't get it")
+
def test_slo_copy(self):
file_item = self.env.container.file("manifest-abcde")
file_item.copy(self.env.container.name, "copied-abcde")
From f6482bdece27144ee083a53d696469528d7940c2 Mon Sep 17 00:00:00 2001
From: Thierry Carrez
Date: Thu, 16 Apr 2015 22:08:47 +0200
Subject: [PATCH 05/98] Set default branch to stable/kilo
Open stable/kilo branch by setting defaultbranch for git-review.
Change-Id: I81bcda30f99173416eaaa3f1d42da32f3ab5b6d2
---
.gitreview | 1 +
1 file changed, 1 insertion(+)
diff --git a/.gitreview b/.gitreview
index d7c52c0593..94552c9bcb 100644
--- a/.gitreview
+++ b/.gitreview
@@ -2,3 +2,4 @@
host=review.openstack.org
port=29418
project=openstack/swift.git
+defaultbranch=stable/kilo
From 2203b46e3f14ef68a090aaea284f0a0442bbb86f Mon Sep 17 00:00:00 2001
From: Tushar Gohad
Date: Wed, 15 Apr 2015 17:34:48 -0700
Subject: [PATCH 06/98] Bump PyECLib version from 1.0.3 to 1.0.7
In addition to fixing several bugs, 1.0.7 eliminates the need for a
few work-around code in Swift. This code was only to hide issues in
the current version, but it also ends up breaking some third-party
integration. In order to enable expected functionality and to avoid
dealing with deprecation issues right from the beginning, we need to
bump the minium PyECLib requirement to 1.0.7.
Change-Id: I03e059e7335656c22be28ffd6157b56e13bdfc1b
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 27d507901a..9f81b844ae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,4 +9,4 @@ netifaces>=0.5,!=0.10.0,!=0.10.1
pastedeploy>=1.3.3
simplejson>=2.0.9
xattr>=0.4
-PyECLib>=1.0.3
+PyECLib>=1.0.7
From c5c281ba6dd97a301ba80511e3356d6ca536d701 Mon Sep 17 00:00:00 2001
From: Minwoo B
Date: Mon, 20 Apr 2015 17:03:25 -0500
Subject: [PATCH 07/98] Included step in development_saio.rst for installing
dependencies in requirements.txt.
Change-Id: I6ed1704148e5ae1e3164d10080c350d81856f7a9
---
doc/source/development_saio.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst
index 3bd94872dd..49ef6eede0 100644
--- a/doc/source/development_saio.rst
+++ b/doc/source/development_saio.rst
@@ -176,7 +176,7 @@ Getting the code
#. Build a development installation of swift::
- cd $HOME/swift; sudo python setup.py develop; cd -
+ cd $HOME/swift; sudo pip install -r requirements.txt; sudo python setup.py develop; cd -
Fedora 19 or later users might have to perform the following if development
installation of swift fails::
From 0c391d6daffe1943f6def803db42e08e6d6846d2 Mon Sep 17 00:00:00 2001
From: Samuel Merritt
Date: Mon, 20 Apr 2015 16:47:10 -0700
Subject: [PATCH 08/98] SAIO instructions: ensure ~/bin exists before copying
into it
Change-Id: I16cd211b00b529ccc4b46f6b10497c32b6741896
---
doc/source/development_saio.rst | 1 +
1 file changed, 1 insertion(+)
diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst
index 3bd94872dd..dc952df3ab 100644
--- a/doc/source/development_saio.rst
+++ b/doc/source/development_saio.rst
@@ -409,6 +409,7 @@ Setting up scripts for running Swift
#. Copy the SAIO scripts for resetting the environment::
+ mkdir -p $HOME/bin
cd $HOME/swift/doc; cp saio/bin/* $HOME/bin; cd -
chmod +x $HOME/bin/*
From 27f6fba5c3b9e0461d52c1deffe32130e7e9af51 Mon Sep 17 00:00:00 2001
From: Kota Tsuyuzaki
Date: Mon, 20 Apr 2015 00:18:25 -0700
Subject: [PATCH 09/98] Use reconstruct insetad of decode/encode
With bumping PyECLib up to 1.0.7 on global requirements,
we can use the "reconstruct" function directly instead
of the current hack doing decode/encode on reconstructor.
That is because the hack was for treating PyECLib < 1.0.7
(strictly jearsure scheme) reconstruction bug so we don't
have to do decode/encode anymore.
Co-Authored-By: Clay Gerrard
Change-Id: I69aae495670e3d0bdebe665f73915547a4d56f99
---
swift/obj/reconstructor.py | 10 ++------
test/unit/obj/test_reconstructor.py | 39 +++++++++++++++++++++++++++++
2 files changed, 41 insertions(+), 8 deletions(-)
diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py
index db078de2fc..4385e42cc9 100644
--- a/swift/obj/reconstructor.py
+++ b/swift/obj/reconstructor.py
@@ -276,14 +276,8 @@ class ObjectReconstructor(Daemon):
rebuilt_fragment_iter)
def _reconstruct(self, policy, fragment_payload, frag_index):
- # XXX with jerasure this doesn't work if we need to rebuild a
- # parity fragment, and not all data fragments are available
- # segment = policy.pyeclib_driver.reconstruct(
- # fragment_payload, [frag_index])[0]
-
- # for safety until pyeclib 1.0.7 we'll just use decode and encode
- segment = policy.pyeclib_driver.decode(fragment_payload)
- return policy.pyeclib_driver.encode(segment)[frag_index]
+ return policy.pyeclib_driver.reconstruct(fragment_payload,
+ [frag_index])[0]
def make_rebuilt_fragment_iter(self, responses, path, policy, frag_index):
"""
diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py
index b7254f4343..23e70543f7 100755
--- a/test/unit/obj/test_reconstructor.py
+++ b/test/unit/obj/test_reconstructor.py
@@ -2347,6 +2347,45 @@ class TestObjectReconstructor(unittest.TestCase):
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
+ def test_reconstruct_parity_fa_with_data_node_failure(self):
+ job = {
+ 'partition': 0,
+ 'policy': self.policy,
+ }
+ part_nodes = self.policy.object_ring.get_part_nodes(0)
+ node = part_nodes[-4]
+ metadata = {
+ 'name': '/a/c/o',
+ 'Content-Length': 0,
+ 'ETag': 'etag',
+ }
+
+ # make up some data (trim some amount to make it unaligned with
+ # segment size)
+ test_data = ('rebuild' * self.policy.ec_segment_size)[:-454]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
+
+ # the scheme is 10+4, so this gets a parity node
+ broken_body = ec_archive_bodies.pop(-4)
+
+ base_responses = list((200, body) for body in ec_archive_bodies)
+ for error in (Timeout(), 404, Exception('kaboom!')):
+ responses = list(base_responses)
+ # grab a data node index
+ error_index = random.randint(0, self.policy.ec_ndata - 1)
+ responses[error_index] = (error, '')
+ headers = {'X-Object-Sysmeta-Ec-Etag': etag}
+ codes, body_iter = zip(*responses)
+ with mocked_http_conn(*codes, body_iter=body_iter,
+ headers=headers):
+ df = self.reconstructor.reconstruct_fa(
+ job, node, dict(metadata))
+ fixed_body = ''.join(df.reader())
+ self.assertEqual(len(fixed_body), len(broken_body))
+ self.assertEqual(md5(fixed_body).hexdigest(),
+ md5(broken_body).hexdigest())
+
def test_reconstruct_fa_errors_fails(self):
job = {
'partition': 0,
From 2080f7dbd897d6130542dbf88e37641a41625eb5 Mon Sep 17 00:00:00 2001
From: Alistair Coles
Date: Thu, 26 Feb 2015 15:16:22 +0000
Subject: [PATCH 10/98] Fix tempauth acl checks when simplejson has no speedups
As documented in linked bug report, tempauth unit tests
were seen to fail on a system where simplejson was
installed but without the speedups extension. This
is because the tempauth account acl validation checks
that values are type str, but without the speedups
extension the json parser is returning unicode objects.
Fix is to have the acl validator tolerate those objects
being unicode or str.
Also change common/bufferedhttp.py to coerce ring device
to type str when constructing a path, in order to avoid
a UnicodeDecodeError when httplib sends a message that
has non-ascii header values.
Change-Id: I01524282cbaa25dc4b6dfa09f3f4723516cdba99
Closes-Bug: 1425776
---
swift/common/bufferedhttp.py | 5 ++
swift/common/middleware/tempauth.py | 8 +--
test/unit/common/middleware/test_tempauth.py | 30 +++++++---
test/unit/common/test_bufferedhttp.py | 60 ++++++++++++++------
4 files changed, 73 insertions(+), 30 deletions(-)
diff --git a/swift/common/bufferedhttp.py b/swift/common/bufferedhttp.py
index d4a977c21e..2b3ec1609d 100644
--- a/swift/common/bufferedhttp.py
+++ b/swift/common/bufferedhttp.py
@@ -155,6 +155,11 @@ def http_connect(ipaddr, port, device, partition, method, path,
path = path.encode("utf-8")
except UnicodeError as e:
logging.exception(_('Error encoding to UTF-8: %s'), str(e))
+ if isinstance(device, unicode):
+ try:
+ device = device.encode("utf-8")
+ except UnicodeError as e:
+ logging.exception(_('Error encoding to UTF-8: %s'), str(e))
path = quote('/' + device + '/' + str(partition) + path)
return http_connect_raw(
ipaddr, port, method, path, headers, query_string, ssl)
diff --git a/swift/common/middleware/tempauth.py b/swift/common/middleware/tempauth.py
index 93f55ff031..dfde519f42 100644
--- a/swift/common/middleware/tempauth.py
+++ b/swift/common/middleware/tempauth.py
@@ -447,16 +447,16 @@ class TempAuth(object):
# on ACLs, TempAuth is not such an auth system. At this point,
# it thinks it is authoritative.
if key not in tempauth_acl_keys:
- return 'Key %r not recognized' % key
+ return "Key '%s' not recognized" % key
for key in tempauth_acl_keys:
if key not in result:
continue
if not isinstance(result[key], list):
- return 'Value for key %r must be a list' % key
+ return "Value for key '%s' must be a list" % key
for grantee in result[key]:
- if not isinstance(grantee, str):
- return 'Elements of %r list must be strings' % key
+ if not isinstance(grantee, basestring):
+ return "Elements of '%s' list must be strings" % key
# Everything looks fine, no errors found
internal_hdr = get_sys_meta_prefix('account') + 'core-access-control'
diff --git a/test/unit/common/middleware/test_tempauth.py b/test/unit/common/middleware/test_tempauth.py
index b9be84bb92..e8af310c82 100644
--- a/test/unit/common/middleware/test_tempauth.py
+++ b/test/unit/common/middleware/test_tempauth.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Copyright (c) 2011-2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import json
import unittest
from contextlib import contextmanager, nested
from base64 import b64encode
@@ -22,7 +24,7 @@ import mock
from swift.common.middleware import tempauth as auth
from swift.common.middleware.acl import format_acl
from swift.common.swob import Request, Response
-from swift.common.utils import split_path, get_swift_info
+from swift.common.utils import split_path
NO_CONTENT_RESP = (('204 No Content', {}, ''),) # mock server response
@@ -111,10 +113,6 @@ class TestAuth(unittest.TestCase):
def setUp(self):
self.test_auth = auth.filter_factory({})(FakeApp())
- def test_swift_info(self):
- info = get_swift_info()
- self.assertTrue(info['tempauth']['account_acls'])
-
def _make_request(self, path, **kwargs):
req = Request.blank(path, **kwargs)
req.environ['swift.cache'] = FakeMemcache()
@@ -1200,7 +1198,8 @@ class TestAccountAcls(unittest.TestCase):
user_groups = test_auth._get_user_groups('admin', 'admin:user',
'AUTH_admin')
good_headers = {'X-Auth-Token': 'AUTH_t'}
- good_acl = '{"read-only":["a","b"]}'
+ good_acl = json.dumps({"read-only": [u"á", "b"]})
+ bad_list_types = '{"read-only": ["a", 99]}'
bad_acl = 'syntactically invalid acl -- this does not parse as JSON'
wrong_acl = '{"other-auth-system":["valid","json","but","wrong"]}'
bad_value_acl = '{"read-write":["fine"],"admin":"should be a list"}'
@@ -1220,7 +1219,9 @@ class TestAccountAcls(unittest.TestCase):
req = self._make_request(target, user_groups=user_groups,
headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
- self.assertEquals(resp.status_int, 204)
+ self.assertEquals(resp.status_int, 204,
+ 'Expected 204, got %s, response body: %s'
+ % (resp.status_int, resp.body))
# syntactically valid empty acls should go through
for acl in empty_acls:
@@ -1243,14 +1244,25 @@ class TestAccountAcls(unittest.TestCase):
req = self._make_request(target, headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEquals(resp.status_int, 400)
- self.assertEquals(errmsg % "Key '", resp.body[:39])
+ self.assertTrue(resp.body.startswith(
+ errmsg % "Key 'other-auth-system' not recognized"), resp.body)
# acls with good keys but bad values also get a 400
update = {'x-account-access-control': bad_value_acl}
req = self._make_request(target, headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEquals(resp.status_int, 400)
- self.assertEquals(errmsg % "Value", resp.body[:39])
+ self.assertTrue(resp.body.startswith(
+ errmsg % "Value for key 'admin' must be a list"), resp.body)
+
+ # acls with non-string-types in list also get a 400
+ update = {'x-account-access-control': bad_list_types}
+ req = self._make_request(target, headers=dict(good_headers, **update))
+ resp = req.get_response(test_auth)
+ self.assertEquals(resp.status_int, 400)
+ self.assertTrue(resp.body.startswith(
+ errmsg % "Elements of 'read-only' list must be strings"),
+ resp.body)
# acls with wrong json structure also get a 400
update = {'x-account-access-control': not_dict_acl}
diff --git a/test/unit/common/test_bufferedhttp.py b/test/unit/common/test_bufferedhttp.py
index a663a3d121..6e51973147 100644
--- a/test/unit/common/test_bufferedhttp.py
+++ b/test/unit/common/test_bufferedhttp.py
@@ -1,4 +1,5 @@
-# Copyright (c) 2010-2012 OpenStack Foundation
+# -*- coding: utf-8 -*-
+# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +13,7 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import mock
import unittest
@@ -22,6 +24,24 @@ from eventlet import spawn, Timeout, listen
from swift.common import bufferedhttp
+class MockHTTPSConnection(object):
+
+ def __init__(self, hostport):
+ pass
+
+ def putrequest(self, method, path, skip_host=0):
+ self.path = path
+ pass
+
+ def putheader(self, header, *values):
+ # Verify that path and values can be safely joined
+ # Essentially what Python 2.7 does that caused us problems.
+ '\r\n\t'.join((self.path,) + values)
+
+ def endheaders(self):
+ pass
+
+
class TestBufferedHTTP(unittest.TestCase):
def test_http_connect(self):
@@ -76,22 +96,6 @@ class TestBufferedHTTP(unittest.TestCase):
raise Exception(err)
def test_nonstr_header_values(self):
-
- class MockHTTPSConnection(object):
-
- def __init__(self, hostport):
- pass
-
- def putrequest(self, method, path, skip_host=0):
- pass
-
- def putheader(self, header, *values):
- # Essentially what Python 2.7 does that caused us problems.
- '\r\n\t'.join(values)
-
- def endheaders(self):
- pass
-
origHTTPSConnection = bufferedhttp.HTTPSConnection
bufferedhttp.HTTPSConnection = MockHTTPSConnection
try:
@@ -106,6 +110,28 @@ class TestBufferedHTTP(unittest.TestCase):
finally:
bufferedhttp.HTTPSConnection = origHTTPSConnection
+ def test_unicode_values(self):
+ # simplejson may decode the ring devices as str or unicode
+ # depending on whether speedups is installed and/or the values are
+ # non-ascii. Verify all types are tolerated in combination with
+ # whatever type path might be and possible encoded non-ascii in
+ # a header value.
+ with mock.patch('swift.common.bufferedhttp.HTTPSConnection',
+ MockHTTPSConnection):
+ for dev in ('sda', u'sda', u'sdá', u'sdá'.encode('utf-8')):
+ for path in (
+ '/v1/a', u'/v1/a', u'/v1/á', u'/v1/á'.encode('utf-8')):
+ for header in ('abc', u'abc', u'ábc'.encode('utf-8')):
+ try:
+ bufferedhttp.http_connect(
+ '127.0.0.1', 8080, dev, 1, 'GET', path,
+ headers={'X-Container-Meta-Whatever': header},
+ ssl=True)
+ except Exception as e:
+ self.fail(
+ 'Exception %r for device=%r path=%r header=%r'
+ % (e, dev, path, header))
+
if __name__ == '__main__':
unittest.main()
From 51e31c5c7147f3ba61437e132b12b491ca718ce4 Mon Sep 17 00:00:00 2001
From: Clay Gerrard
Date: Wed, 15 Apr 2015 15:31:06 -0700
Subject: [PATCH 11/98] Don't apply the wrong Etag validation to rebuilt
fragments
Because of the object-server's interaction with ssync sender's
X-Backend-Replication-Headers when a object (or fragment archive) is
pushed unmodified to another node it's ETag value is duped into the
recieving ends metadata as Etag. This interacts poorly with the
reconstructor's RebuildingECDiskFileStream which can not know ahead of
time the ETag of the fragment archive being rebuilt.
Don't send the Etag from the local source fragment archive being used as
the basis for the rebuilt fragent archive's metadata along to ssync.
Closes-Bug: 1446800
Change-Id: Ie59ad93a67a7f439c9a84cd9cff31540f97f334a
---
swift/obj/reconstructor.py | 34 +++----
test/probe/common.py | 5 +
test/probe/test_reconstructor_revert.py | 120 +++++++++++++++++++++++-
test/unit/obj/test_reconstructor.py | 18 +---
4 files changed, 143 insertions(+), 34 deletions(-)
diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py
index 0ee2afbf6d..db078de2fc 100644
--- a/swift/obj/reconstructor.py
+++ b/swift/obj/reconstructor.py
@@ -49,6 +49,21 @@ SYNC, REVERT = ('sync_only', 'sync_revert')
hubs.use_hub(get_hub())
+def _get_partners(frag_index, part_nodes):
+ """
+ Returns the left and right partners of the node whose index is
+ equal to the given frag_index.
+
+ :param frag_index: a fragment index
+ :param part_nodes: a list of primary nodes
+ :returns: [, ]
+ """
+ return [
+ part_nodes[(frag_index - 1) % len(part_nodes)],
+ part_nodes[(frag_index + 1) % len(part_nodes)],
+ ]
+
+
class RebuildingECDiskFileStream(object):
"""
This class wraps the the reconstructed fragment archive data and
@@ -65,7 +80,8 @@ class RebuildingECDiskFileStream(object):
# update the FI and delete the ETag, the obj server will
# recalc on the other side...
self.metadata['X-Object-Sysmeta-Ec-Frag-Index'] = frag_index
- del self.metadata['ETag']
+ for etag_key in ('ETag', 'Etag'):
+ self.metadata.pop(etag_key, None)
self.frag_index = frag_index
self.rebuilt_fragment_iter = rebuilt_fragment_iter
@@ -382,20 +398,6 @@ class ObjectReconstructor(Daemon):
self.kill_coros()
self.last_reconstruction_count = self.reconstruction_count
- def _get_partners(self, frag_index, part_nodes):
- """
- Returns the left and right partners of the node whose index is
- equal to the given frag_index.
-
- :param frag_index: a fragment index
- :param part_nodes: a list of primary nodes
- :returns: [, ]
- """
- return [
- part_nodes[(frag_index - 1) % len(part_nodes)],
- part_nodes[(frag_index + 1) % len(part_nodes)],
- ]
-
def _get_hashes(self, policy, path, recalculate=None, do_listdir=False):
df_mgr = self._df_router[policy]
hashed, suffix_hashes = tpool_reraise(
@@ -715,7 +717,7 @@ class ObjectReconstructor(Daemon):
job_type=SYNC,
frag_index=frag_index,
suffixes=suffixes,
- sync_to=self._get_partners(frag_index, part_nodes),
+ sync_to=_get_partners(frag_index, part_nodes),
)
# ssync callback to rebuild missing fragment_archives
sync_job['sync_diskfile_builder'] = self.reconstruct_fa
diff --git a/test/probe/common.py b/test/probe/common.py
index 1311cc178a..7d1e754014 100644
--- a/test/probe/common.py
+++ b/test/probe/common.py
@@ -299,6 +299,11 @@ class ProbeTest(unittest.TestCase):
path_parts.append(str(part))
return os.path.join(*path_parts)
+ def config_number(self, node):
+ _server_type, config_number = get_server_number(
+ node['port'], self.port2server)
+ return config_number
+
def get_to_final_state(self):
# these .stop()s are probably not strictly necessary,
# but may prevent race conditions
diff --git a/test/probe/test_reconstructor_revert.py b/test/probe/test_reconstructor_revert.py
index 2a7bd7c834..39739b617d 100755
--- a/test/probe/test_reconstructor_revert.py
+++ b/test/probe/test_reconstructor_revert.py
@@ -18,6 +18,9 @@ from hashlib import md5
import unittest
import uuid
import os
+import random
+import shutil
+from collections import defaultdict
from test.probe.common import ECProbeTest
@@ -25,6 +28,7 @@ from swift.common import direct_client
from swift.common.storage_policy import EC_POLICY
from swift.common.manager import Manager
from swift.common.utils import renamer
+from swift.obj import reconstructor
from swiftclient import client
@@ -233,7 +237,7 @@ class TestReconstructorRevert(ECProbeTest):
# fire up reconstructor on handoff nodes only
for hnode in hnodes:
hnode_id = (hnode['port'] - 6000) / 10
- self.reconstructor.once(number=hnode_id, override_devices=['sdb8'])
+ self.reconstructor.once(number=hnode_id)
# check the first node to make sure its gone
try:
@@ -253,6 +257,120 @@ class TestReconstructorRevert(ECProbeTest):
self.fail('Node data on %r was not fully destoryed!' %
(onodes[0]))
+ def test_reconstruct_from_reverted_fragment_archive(self):
+ headers = {'X-Storage-Policy': self.policy.name}
+ client.put_container(self.url, self.token, self.container_name,
+ headers=headers)
+
+ # get our node lists
+ opart, onodes = self.object_ring.get_nodes(
+ self.account, self.container_name, self.object_name)
+
+ # find a primary server that only has one of it's devices in the
+ # primary node list
+ group_nodes_by_config = defaultdict(list)
+ for n in onodes:
+ group_nodes_by_config[self.config_number(n)].append(n)
+ for config_number, node_list in group_nodes_by_config.items():
+ if len(node_list) == 1:
+ break
+ else:
+ self.fail('ring balancing did not use all available nodes')
+ primary_node = node_list[0]
+ primary_device = self.device_dir('object', primary_node)
+ self.kill_drive(primary_device)
+
+ # PUT object
+ contents = Body()
+ etag = client.put_object(self.url, self.token, self.container_name,
+ self.object_name, contents=contents)
+ self.assertEqual(contents.etag, etag)
+
+ # fix the primary device and sanity GET
+ self.revive_drive(primary_device)
+ self.assertEqual(etag, self.proxy_get())
+
+ # find a handoff holding the fragment
+ for hnode in self.object_ring.get_more_nodes(opart):
+ try:
+ reverted_fragment_etag = self.direct_get(hnode, opart)
+ except direct_client.DirectClientException as err:
+ if err.http_status != 404:
+ raise
+ else:
+ break
+ else:
+ self.fail('Unable to find handoff fragment!')
+
+ # we'll force the handoff device to revert instead of potentially
+ # racing with rebuild by deleting any other fragments that may be on
+ # the same server
+ handoff_fragment_etag = None
+ for node in onodes:
+ if node['port'] == hnode['port']:
+ # we'll keep track of the etag of this fragment we're removing
+ # in case we need it later (queue forshadowing music)...
+ try:
+ handoff_fragment_etag = self.direct_get(node, opart)
+ except direct_client.DirectClientException as err:
+ if err.http_status != 404:
+ raise
+ # this just means our handoff device was on the same
+ # machine as the primary!
+ continue
+ # use the primary nodes device - not the hnode device
+ part_dir = self.storage_dir('object', node, part=opart)
+ shutil.rmtree(part_dir, True)
+
+ # revert from handoff device with reconstructor
+ self.reconstructor.once(number=self.config_number(hnode))
+
+ # verify fragment reverted to primary server
+ self.assertEqual(reverted_fragment_etag,
+ self.direct_get(primary_node, opart))
+
+ # now we'll remove some data on one of the primary node's partners
+ partner = random.choice(reconstructor._get_partners(
+ primary_node['index'], onodes))
+
+ try:
+ rebuilt_fragment_etag = self.direct_get(partner, opart)
+ except direct_client.DirectClientException as err:
+ if err.http_status != 404:
+ raise
+ # partner already had it's fragment removed
+ if (handoff_fragment_etag is not None and
+ hnode['port'] == partner['port']):
+ # oh, well that makes sense then...
+ rebuilt_fragment_etag = handoff_fragment_etag
+ else:
+ # I wonder what happened?
+ self.fail('Partner inexplicably missing fragment!')
+ part_dir = self.storage_dir('object', partner, part=opart)
+ shutil.rmtree(part_dir, True)
+
+ # sanity, it's gone
+ try:
+ self.direct_get(partner, opart)
+ except direct_client.DirectClientException as err:
+ if err.http_status != 404:
+ raise
+ else:
+ self.fail('successful GET of removed partner fragment archive!?')
+
+ # and force the primary node to do a rebuild
+ self.reconstructor.once(number=self.config_number(primary_node))
+
+ # and validate the partners rebuilt_fragment_etag
+ try:
+ self.assertEqual(rebuilt_fragment_etag,
+ self.direct_get(partner, opart))
+ except direct_client.DirectClientException as err:
+ if err.http_status != 404:
+ raise
+ else:
+ self.fail('Did not find rebuilt fragment on partner node')
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py
index 93a50e84de..b7254f4343 100755
--- a/test/unit/obj/test_reconstructor.py
+++ b/test/unit/obj/test_reconstructor.py
@@ -293,22 +293,6 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
writer.commit(timestamp)
return df
- def debug_wtf(self):
- # won't include this in the final, just handy reminder of where
- # things are...
- for pol in [p for p in POLICIES if p.policy_type == EC_POLICY]:
- obj_ring = pol.object_ring
- for part_num in self.part_nums:
- print "\n part_num %s " % part_num
- part_nodes = obj_ring.get_part_nodes(int(part_num))
- print "\n part_nodes %s " % part_nodes
- for local_dev in obj_ring.devs:
- partners = self.reconstructor._get_partners(
- local_dev['id'], obj_ring, part_num)
- if partners:
- print "\n local_dev %s \n partners %s " % (local_dev,
- partners)
-
def assert_expected_jobs(self, part_num, jobs):
for job in jobs:
del job['path']
@@ -702,7 +686,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
part_nodes = obj_ring.get_part_nodes(int(part_num))
primary_ids = [n['id'] for n in part_nodes]
for node in part_nodes:
- partners = self.reconstructor._get_partners(
+ partners = object_reconstructor._get_partners(
node['index'], part_nodes)
left = partners[0]['id']
right = partners[1]['id']
From 281cb1c210f0292bad190cabaae447145fa5eade Mon Sep 17 00:00:00 2001
From: Tushar Gohad
Date: Wed, 15 Apr 2015 17:34:48 -0700
Subject: [PATCH 12/98] Bump PyECLib version from 1.0.3 to 1.0.7
In addition to fixing several bugs, 1.0.7 eliminates the need for a
few work-around code in Swift. This code was only to hide issues in
the current version, but it also ends up breaking some third-party
integration. In order to enable expected functionality and to avoid
dealing with deprecation issues right from the beginning, we need to
bump the minium PyECLib requirement to 1.0.7.
Closes-Bug: 1446727
Change-Id: I03e059e7335656c22be28ffd6157b56e13bdfc1b
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 27d507901a..9f81b844ae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,4 +9,4 @@ netifaces>=0.5,!=0.10.0,!=0.10.1
pastedeploy>=1.3.3
simplejson>=2.0.9
xattr>=0.4
-PyECLib>=1.0.3
+PyECLib>=1.0.7
From cd7c58e93690fd25f5266754d0593c656dd51e2e Mon Sep 17 00:00:00 2001
From: Kota Tsuyuzaki
Date: Mon, 20 Apr 2015 00:18:25 -0700
Subject: [PATCH 13/98] Use reconstruct insetad of decode/encode
With bumping PyECLib up to 1.0.7 on global requirements,
we can use the "reconstruct" function directly instead
of the current hack doing decode/encode on reconstructor.
That is because the hack was for treating PyECLib < 1.0.7
(strictly jearsure scheme) reconstruction bug so we don't
have to do decode/encode anymore.
Closes-Bug: 1446801
Co-Authored-By: Clay Gerrard
Change-Id: I69aae495670e3d0bdebe665f73915547a4d56f99
---
swift/obj/reconstructor.py | 10 ++------
test/unit/obj/test_reconstructor.py | 39 +++++++++++++++++++++++++++++
2 files changed, 41 insertions(+), 8 deletions(-)
diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py
index db078de2fc..4385e42cc9 100644
--- a/swift/obj/reconstructor.py
+++ b/swift/obj/reconstructor.py
@@ -276,14 +276,8 @@ class ObjectReconstructor(Daemon):
rebuilt_fragment_iter)
def _reconstruct(self, policy, fragment_payload, frag_index):
- # XXX with jerasure this doesn't work if we need to rebuild a
- # parity fragment, and not all data fragments are available
- # segment = policy.pyeclib_driver.reconstruct(
- # fragment_payload, [frag_index])[0]
-
- # for safety until pyeclib 1.0.7 we'll just use decode and encode
- segment = policy.pyeclib_driver.decode(fragment_payload)
- return policy.pyeclib_driver.encode(segment)[frag_index]
+ return policy.pyeclib_driver.reconstruct(fragment_payload,
+ [frag_index])[0]
def make_rebuilt_fragment_iter(self, responses, path, policy, frag_index):
"""
diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py
index b7254f4343..23e70543f7 100755
--- a/test/unit/obj/test_reconstructor.py
+++ b/test/unit/obj/test_reconstructor.py
@@ -2347,6 +2347,45 @@ class TestObjectReconstructor(unittest.TestCase):
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
+ def test_reconstruct_parity_fa_with_data_node_failure(self):
+ job = {
+ 'partition': 0,
+ 'policy': self.policy,
+ }
+ part_nodes = self.policy.object_ring.get_part_nodes(0)
+ node = part_nodes[-4]
+ metadata = {
+ 'name': '/a/c/o',
+ 'Content-Length': 0,
+ 'ETag': 'etag',
+ }
+
+ # make up some data (trim some amount to make it unaligned with
+ # segment size)
+ test_data = ('rebuild' * self.policy.ec_segment_size)[:-454]
+ etag = md5(test_data).hexdigest()
+ ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
+
+ # the scheme is 10+4, so this gets a parity node
+ broken_body = ec_archive_bodies.pop(-4)
+
+ base_responses = list((200, body) for body in ec_archive_bodies)
+ for error in (Timeout(), 404, Exception('kaboom!')):
+ responses = list(base_responses)
+ # grab a data node index
+ error_index = random.randint(0, self.policy.ec_ndata - 1)
+ responses[error_index] = (error, '')
+ headers = {'X-Object-Sysmeta-Ec-Etag': etag}
+ codes, body_iter = zip(*responses)
+ with mocked_http_conn(*codes, body_iter=body_iter,
+ headers=headers):
+ df = self.reconstructor.reconstruct_fa(
+ job, node, dict(metadata))
+ fixed_body = ''.join(df.reader())
+ self.assertEqual(len(fixed_body), len(broken_body))
+ self.assertEqual(md5(fixed_body).hexdigest(),
+ md5(broken_body).hexdigest())
+
def test_reconstruct_fa_errors_fails(self):
job = {
'partition': 0,
From f8dee761bd36f857aa1288c27e095907032fad68 Mon Sep 17 00:00:00 2001
From: Andreas Jaeger
Date: Mon, 20 Apr 2015 11:15:35 +0200
Subject: [PATCH 14/98] Release Import of Translations from Transifex
Manual import of Translations from Transifex. This change also removes
all po files that are less than 66 per cent translated since such
partially translated files will not help users.
This updates also recreates all pot (translation source files) to
reflect the state of the repository.
This change needs to be done manually since the automatic import does
not handle the proposed branches and we need to sync with latest
translations.
Note: This is part of importing of translations, there are no new
translations for this project, thus only the pot file gets updated.
Change-Id: I0cbfdae3bd1662da54c58e91a13f49419eba9b2d
---
swift/locale/swift.pot | 503 ++++++++++++++----------
swift/locale/zh_CN/LC_MESSAGES/swift.po | 479 +++++++++++++---------
2 files changed, 588 insertions(+), 394 deletions(-)
diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot
index f7a79f7239..4845819076 100644
--- a/swift/locale/swift.pot
+++ b/swift/locale/swift.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: swift 2.2.2.post136\n"
+"Project-Id-Version: swift 2.3.0rc1.1.gf6482bd\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-03-24 06:06+0000\n"
+"POT-Creation-Date: 2015-04-20 11:15+0200\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language-Team: LANGUAGE \n"
@@ -63,98 +63,98 @@ msgstr ""
msgid "ERROR Could not get account info %s"
msgstr ""
-#: swift/account/reaper.py:133 swift/common/utils.py:2127
-#: swift/obj/diskfile.py:470 swift/obj/updater.py:87 swift/obj/updater.py:130
+#: swift/account/reaper.py:134 swift/common/utils.py:2127
+#: swift/obj/diskfile.py:476 swift/obj/updater.py:88 swift/obj/updater.py:131
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr ""
-#: swift/account/reaper.py:137
+#: swift/account/reaper.py:138
msgid "Exception in top-level account reaper loop"
msgstr ""
-#: swift/account/reaper.py:140
+#: swift/account/reaper.py:141
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr ""
-#: swift/account/reaper.py:237
+#: swift/account/reaper.py:238
#, python-format
msgid "Beginning pass on account %s"
msgstr ""
-#: swift/account/reaper.py:254
+#: swift/account/reaper.py:255
#, python-format
msgid "Exception with containers for account %s"
msgstr ""
-#: swift/account/reaper.py:261
+#: swift/account/reaper.py:262
#, python-format
msgid "Exception with account %s"
msgstr ""
-#: swift/account/reaper.py:262
+#: swift/account/reaper.py:263
#, python-format
msgid "Incomplete pass on account %s"
msgstr ""
-#: swift/account/reaper.py:264
+#: swift/account/reaper.py:265
#, python-format
msgid ", %s containers deleted"
msgstr ""
-#: swift/account/reaper.py:266
+#: swift/account/reaper.py:267
#, python-format
msgid ", %s objects deleted"
msgstr ""
-#: swift/account/reaper.py:268
+#: swift/account/reaper.py:269
#, python-format
msgid ", %s containers remaining"
msgstr ""
-#: swift/account/reaper.py:271
+#: swift/account/reaper.py:272
#, python-format
msgid ", %s objects remaining"
msgstr ""
-#: swift/account/reaper.py:273
+#: swift/account/reaper.py:274
#, python-format
msgid ", %s containers possibly remaining"
msgstr ""
-#: swift/account/reaper.py:276
+#: swift/account/reaper.py:277
#, python-format
msgid ", %s objects possibly remaining"
msgstr ""
-#: swift/account/reaper.py:279
+#: swift/account/reaper.py:280
msgid ", return codes: "
msgstr ""
-#: swift/account/reaper.py:283
+#: swift/account/reaper.py:284
#, python-format
msgid ", elapsed: %.02fs"
msgstr ""
-#: swift/account/reaper.py:289
+#: swift/account/reaper.py:290
#, python-format
msgid "Account %s has not been reaped since %s"
msgstr ""
-#: swift/account/reaper.py:348 swift/account/reaper.py:396
-#: swift/account/reaper.py:463 swift/container/updater.py:306
+#: swift/account/reaper.py:349 swift/account/reaper.py:397
+#: swift/account/reaper.py:464 swift/container/updater.py:306
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr ""
-#: swift/account/reaper.py:368
+#: swift/account/reaper.py:369
#, python-format
msgid "Exception with objects for container %(container)s for account %(account)s"
msgstr ""
#: swift/account/server.py:275 swift/container/server.py:582
-#: swift/obj/server.py:730
+#: swift/obj/server.py:910
#, python-format
msgid "ERROR __call__ error with %(method)s %(path)s "
msgstr ""
@@ -270,90 +270,90 @@ msgstr ""
msgid "Unexpected response: %s"
msgstr ""
-#: swift/common/manager.py:62
+#: swift/common/manager.py:63
msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?"
msgstr ""
-#: swift/common/manager.py:69
+#: swift/common/manager.py:70
msgid "WARNING: Unable to modify memory limit. Running as non-root?"
msgstr ""
-#: swift/common/manager.py:76
+#: swift/common/manager.py:77
msgid "WARNING: Unable to modify max process limit. Running as non-root?"
msgstr ""
-#: swift/common/manager.py:194
+#: swift/common/manager.py:195
msgid ""
"\n"
"user quit"
msgstr ""
-#: swift/common/manager.py:231 swift/common/manager.py:543
+#: swift/common/manager.py:232 swift/common/manager.py:544
#, python-format
msgid "No %s running"
msgstr ""
-#: swift/common/manager.py:244
+#: swift/common/manager.py:245
#, python-format
msgid "%s (%s) appears to have stopped"
msgstr ""
-#: swift/common/manager.py:254
+#: swift/common/manager.py:255
#, python-format
msgid "Waited %s seconds for %s to die; giving up"
msgstr ""
-#: swift/common/manager.py:437
+#: swift/common/manager.py:438
#, python-format
msgid "Unable to locate config %sfor %s"
msgstr ""
-#: swift/common/manager.py:441
+#: swift/common/manager.py:442
msgid "Found configs:"
msgstr ""
-#: swift/common/manager.py:485
+#: swift/common/manager.py:486
#, python-format
msgid "Signal %s pid: %s signal: %s"
msgstr ""
-#: swift/common/manager.py:492
+#: swift/common/manager.py:493
#, python-format
msgid "Removing stale pid file %s"
msgstr ""
-#: swift/common/manager.py:495
+#: swift/common/manager.py:496
#, python-format
msgid "No permission to signal PID %d"
msgstr ""
-#: swift/common/manager.py:540
+#: swift/common/manager.py:541
#, python-format
msgid "%s #%d not running (%s)"
msgstr ""
-#: swift/common/manager.py:547 swift/common/manager.py:640
-#: swift/common/manager.py:643
+#: swift/common/manager.py:548 swift/common/manager.py:641
+#: swift/common/manager.py:644
#, python-format
msgid "%s running (%s - %s)"
msgstr ""
-#: swift/common/manager.py:646
+#: swift/common/manager.py:647
#, python-format
msgid "%s already started..."
msgstr ""
-#: swift/common/manager.py:655
+#: swift/common/manager.py:656
#, python-format
msgid "Running %s once"
msgstr ""
-#: swift/common/manager.py:657
+#: swift/common/manager.py:658
#, python-format
msgid "Starting %s"
msgstr ""
-#: swift/common/manager.py:664
+#: swift/common/manager.py:665
#, python-format
msgid "%s does not exist"
msgstr ""
@@ -373,7 +373,12 @@ msgstr ""
msgid "Error limiting server %s"
msgstr ""
-#: swift/common/request_helpers.py:387
+#: swift/common/request_helpers.py:102
+#, python-format
+msgid "No policy with index %s"
+msgstr ""
+
+#: swift/common/request_helpers.py:395
msgid "ERROR: An error occurred while retrieving segments"
msgstr ""
@@ -436,51 +441,51 @@ msgstr ""
msgid "Unable to find %s config section in %s"
msgstr ""
-#: swift/common/utils.py:2348
+#: swift/common/utils.py:2353
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr ""
-#: swift/common/utils.py:2353
+#: swift/common/utils.py:2358
#, python-format
msgid "No realm key for %r"
msgstr ""
-#: swift/common/utils.py:2357
+#: swift/common/utils.py:2362
#, python-format
msgid "No cluster endpoint for %r %r"
msgstr ""
-#: swift/common/utils.py:2366
+#: swift/common/utils.py:2371
#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
msgstr ""
-#: swift/common/utils.py:2370
+#: swift/common/utils.py:2375
msgid "Path required in X-Container-Sync-To"
msgstr ""
-#: swift/common/utils.py:2373
+#: swift/common/utils.py:2378
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr ""
-#: swift/common/utils.py:2378
+#: swift/common/utils.py:2383
#, python-format
msgid "Invalid host %r in X-Container-Sync-To"
msgstr ""
-#: swift/common/utils.py:2570
+#: swift/common/utils.py:2575
msgid "Exception dumping recon cache"
msgstr ""
-#: swift/common/wsgi.py:175
+#: swift/common/wsgi.py:197
#, python-format
msgid "Could not bind to %s:%s after trying for %s seconds"
msgstr ""
-#: swift/common/wsgi.py:185
+#: swift/common/wsgi.py:207
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external "
"SSL termination for a production deployment."
@@ -521,27 +526,27 @@ msgstr ""
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr ""
-#: swift/common/middleware/recon.py:78
+#: swift/common/middleware/recon.py:80
msgid "Error reading recon cache file"
msgstr ""
-#: swift/common/middleware/recon.py:80
+#: swift/common/middleware/recon.py:82
msgid "Error parsing recon cache file"
msgstr ""
-#: swift/common/middleware/recon.py:82
+#: swift/common/middleware/recon.py:84
msgid "Error retrieving recon data"
msgstr ""
-#: swift/common/middleware/recon.py:151
+#: swift/common/middleware/recon.py:158
msgid "Error listing devices"
msgstr ""
-#: swift/common/middleware/recon.py:247
+#: swift/common/middleware/recon.py:254
msgid "Error reading ringfile"
msgstr ""
-#: swift/common/middleware/recon.py:261
+#: swift/common/middleware/recon.py:268
msgid "Error reading swift.conf"
msgstr ""
@@ -648,52 +653,61 @@ msgid ""
"later)"
msgstr ""
-#: swift/container/sync.py:193
+#: swift/container/sync.py:217
+msgid ""
+"Configuration option internal_client_conf_path not defined. Using default"
+" configuration, See internal-client.conf-sample for options"
+msgstr ""
+
+#: swift/container/sync.py:230
+#, python-format
+msgid "Unable to load internal client from config: %r (%s)"
+msgstr ""
+
+#: swift/container/sync.py:264
msgid "Begin container sync \"once\" mode"
msgstr ""
-#: swift/container/sync.py:205
+#: swift/container/sync.py:276
#, python-format
msgid "Container sync \"once\" mode completed: %.02fs"
msgstr ""
-#: swift/container/sync.py:213
+#: swift/container/sync.py:284
#, python-format
msgid ""
"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], "
"%(skip)s skipped, %(fail)s failed"
msgstr ""
-#: swift/container/sync.py:266
+#: swift/container/sync.py:337
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr ""
-#: swift/container/sync.py:322
+#: swift/container/sync.py:393
#, python-format
msgid "ERROR Syncing %s"
msgstr ""
-#: swift/container/sync.py:410
+#: swift/container/sync.py:476
#, python-format
-msgid ""
-"Unknown exception trying to GET: %(node)r %(account)r %(container)r "
-"%(object)r"
+msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
msgstr ""
-#: swift/container/sync.py:444
+#: swift/container/sync.py:510
#, python-format
msgid "Unauth %(sync_from)r => %(sync_to)r"
msgstr ""
-#: swift/container/sync.py:450
+#: swift/container/sync.py:516
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
"%(obj_name)r"
msgstr ""
-#: swift/container/sync.py:457 swift/container/sync.py:464
+#: swift/container/sync.py:523 swift/container/sync.py:530
#, python-format
msgid "ERROR Syncing %(db_file)s %(row)s"
msgstr ""
@@ -703,8 +717,8 @@ msgstr ""
msgid "ERROR: Failed to get paths to drive partitions: %s"
msgstr ""
-#: swift/container/updater.py:91 swift/obj/replicator.py:484
-#: swift/obj/replicator.py:570
+#: swift/container/updater.py:91 swift/obj/reconstructor.py:788
+#: swift/obj/replicator.py:487 swift/obj/replicator.py:575
#, python-format
msgid "%s is not mounted"
msgstr ""
@@ -816,42 +830,57 @@ msgstr ""
msgid "ERROR auditing: %s"
msgstr ""
-#: swift/obj/diskfile.py:318
+#: swift/obj/diskfile.py:323 swift/obj/diskfile.py:2305
#, python-format
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgstr ""
-#: swift/obj/diskfile.py:409
+#: swift/obj/diskfile.py:414 swift/obj/diskfile.py:2373
msgid "Error hashing suffix"
msgstr ""
-#: swift/obj/diskfile.py:484 swift/obj/updater.py:169
+#: swift/obj/diskfile.py:486 swift/obj/updater.py:162
#, python-format
-msgid "Directory %s does not map to a valid policy"
+msgid "Directory %r does not map to a valid policy (%s)"
msgstr ""
-#: swift/obj/diskfile.py:678
+#: swift/obj/diskfile.py:737
#, python-format
msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
msgstr ""
-#: swift/obj/diskfile.py:869
+#: swift/obj/diskfile.py:936 swift/obj/diskfile.py:1795
#, python-format
msgid "Problem cleaning up %s"
msgstr ""
-#: swift/obj/diskfile.py:1168
+#: swift/obj/diskfile.py:1253
#, python-format
msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
msgstr ""
-#: swift/obj/diskfile.py:1449
+#: swift/obj/diskfile.py:1543
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata "
"%(meta)s"
msgstr ""
+#: swift/obj/diskfile.py:1797
+#, python-format
+msgid "Problem fsyncing durable state file: %s"
+msgstr ""
+
+#: swift/obj/diskfile.py:1802
+#, python-format
+msgid "No space left on device for %s"
+msgstr ""
+
+#: swift/obj/diskfile.py:1806
+#, python-format
+msgid "Problem writing durable state file: %s"
+msgstr ""
+
#: swift/obj/expirer.py:79
#, python-format
msgid "Pass completed in %ds; %d objects expired"
@@ -881,326 +910,394 @@ msgstr ""
msgid "Exception while deleting object %s %s %s"
msgstr ""
-#: swift/obj/mem_server.py:87
+#: swift/obj/reconstructor.py:189 swift/obj/reconstructor.py:472
+#, python-format
+msgid "Invalid response %(resp)s from %(full_path)s"
+msgstr ""
+
+#: swift/obj/reconstructor.py:195
+#, python-format
+msgid "Trying to GET %(full_path)s"
+msgstr ""
+
+#: swift/obj/reconstructor.py:301
+#, python-format
+msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
+msgstr ""
+
+#: swift/obj/reconstructor.py:324
#, python-format
msgid ""
-"ERROR Container update failed: %(status)d response from "
-"%(ip)s:%(port)s/%(dev)s"
+"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed"
+" in %(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
msgstr ""
-#: swift/obj/mem_server.py:93
-#, python-format
-msgid "ERROR container update failed with %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-
-#: swift/obj/replicator.py:138
-#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr ""
-
-#: swift/obj/replicator.py:152
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr ""
-
-#: swift/obj/replicator.py:159 swift/obj/replicator.py:163
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr ""
-
-#: swift/obj/replicator.py:278
-#, python-format
-msgid "Removing %s objects"
-msgstr ""
-
-#: swift/obj/replicator.py:286
-msgid "Error syncing handoff partition"
-msgstr ""
-
-#: swift/obj/replicator.py:292
-#, python-format
-msgid "Removing partition: %s"
-msgstr ""
-
-#: swift/obj/replicator.py:347
-#, python-format
-msgid "%(ip)s/%(device)s responded as unmounted"
-msgstr ""
-
-#: swift/obj/replicator.py:352
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr ""
-
-#: swift/obj/replicator.py:387
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr ""
-
-#: swift/obj/replicator.py:391
-msgid "Error syncing partition"
-msgstr ""
-
-#: swift/obj/replicator.py:404
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-
-#: swift/obj/replicator.py:415
+#: swift/obj/reconstructor.py:337 swift/obj/replicator.py:419
#, python-format
msgid ""
"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
"synced"
msgstr ""
-#: swift/obj/replicator.py:422
+#: swift/obj/reconstructor.py:344 swift/obj/replicator.py:426
#, python-format
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
-#: swift/obj/replicator.py:430
+#: swift/obj/reconstructor.py:352
+#, python-format
+msgid "Nothing reconstructed for %s seconds."
+msgstr ""
+
+#: swift/obj/reconstructor.py:381 swift/obj/replicator.py:463
+msgid "Lockup detected.. killing live coros."
+msgstr ""
+
+#: swift/obj/reconstructor.py:442
+#, python-format
+msgid "Trying to sync suffixes with %s"
+msgstr ""
+
+#: swift/obj/reconstructor.py:467
+#, python-format
+msgid "%s responded as unmounted"
+msgstr ""
+
+#: swift/obj/reconstructor.py:849 swift/obj/replicator.py:295
+#, python-format
+msgid "Removing partition: %s"
+msgstr ""
+
+#: swift/obj/reconstructor.py:865
+msgid "Ring change detected. Aborting current reconstruction pass."
+msgstr ""
+
+#: swift/obj/reconstructor.py:884
+msgid "Exception in top-levelreconstruction loop"
+msgstr ""
+
+#: swift/obj/reconstructor.py:894
+msgid "Running object reconstructor in script mode."
+msgstr ""
+
+#: swift/obj/reconstructor.py:903
+#, python-format
+msgid "Object reconstruction complete (once). (%.02f minutes)"
+msgstr ""
+
+#: swift/obj/reconstructor.py:910
+msgid "Starting object reconstructor in daemon mode."
+msgstr ""
+
+#: swift/obj/reconstructor.py:914
+msgid "Starting object reconstruction pass."
+msgstr ""
+
+#: swift/obj/reconstructor.py:919
+#, python-format
+msgid "Object reconstruction complete. (%.02f minutes)"
+msgstr ""
+
+#: swift/obj/replicator.py:139
+#, python-format
+msgid "Killing long-running rsync: %s"
+msgstr ""
+
+#: swift/obj/replicator.py:153
+#, python-format
+msgid "Bad rsync return code: %(ret)d <- %(args)s"
+msgstr ""
+
+#: swift/obj/replicator.py:160 swift/obj/replicator.py:164
+#, python-format
+msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
+msgstr ""
+
+#: swift/obj/replicator.py:281
+#, python-format
+msgid "Removing %s objects"
+msgstr ""
+
+#: swift/obj/replicator.py:289
+msgid "Error syncing handoff partition"
+msgstr ""
+
+#: swift/obj/replicator.py:351
+#, python-format
+msgid "%(ip)s/%(device)s responded as unmounted"
+msgstr ""
+
+#: swift/obj/replicator.py:356
+#, python-format
+msgid "Invalid response %(resp)s from %(ip)s"
+msgstr ""
+
+#: swift/obj/replicator.py:391
+#, python-format
+msgid "Error syncing with node: %s"
+msgstr ""
+
+#: swift/obj/replicator.py:395
+msgid "Error syncing partition"
+msgstr ""
+
+#: swift/obj/replicator.py:408
+#, python-format
+msgid ""
+"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
+"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
+msgstr ""
+
+#: swift/obj/replicator.py:434
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr ""
-#: swift/obj/replicator.py:459
-msgid "Lockup detected.. killing live coros."
-msgstr ""
-
-#: swift/obj/replicator.py:573
+#: swift/obj/replicator.py:578
msgid "Ring change detected. Aborting current replication pass."
msgstr ""
-#: swift/obj/replicator.py:594
+#: swift/obj/replicator.py:599
msgid "Exception in top-level replication loop"
msgstr ""
-#: swift/obj/replicator.py:603
+#: swift/obj/replicator.py:608
msgid "Running object replicator in script mode."
msgstr ""
-#: swift/obj/replicator.py:621
+#: swift/obj/replicator.py:626
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr ""
-#: swift/obj/replicator.py:628
+#: swift/obj/replicator.py:633
msgid "Starting object replicator in daemon mode."
msgstr ""
-#: swift/obj/replicator.py:632
+#: swift/obj/replicator.py:637
msgid "Starting object replication pass."
msgstr ""
-#: swift/obj/replicator.py:637
+#: swift/obj/replicator.py:642
#, python-format
msgid "Object replication complete. (%.02f minutes)"
msgstr ""
-#: swift/obj/server.py:202
+#: swift/obj/server.py:231
#, python-format
msgid ""
"ERROR Container update failed (saving for async update later): %(status)d"
" response from %(ip)s:%(port)s/%(dev)s"
msgstr ""
-#: swift/obj/server.py:209
+#: swift/obj/server.py:238
#, python-format
msgid ""
"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for "
"async update later)"
msgstr ""
-#: swift/obj/server.py:244
+#: swift/obj/server.py:273
#, python-format
msgid ""
"ERROR Container update failed: different numbers of hosts and devices in "
"request: \"%s\" vs \"%s\""
msgstr ""
-#: swift/obj/updater.py:62
+#: swift/obj/updater.py:63
#, python-format
msgid "ERROR: Unable to access %(path)s: %(error)s"
msgstr ""
-#: swift/obj/updater.py:77
+#: swift/obj/updater.py:78
msgid "Begin object update sweep"
msgstr ""
-#: swift/obj/updater.py:103
+#: swift/obj/updater.py:104
#, python-format
msgid ""
"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s"
" successes, %(fail)s failures"
msgstr ""
-#: swift/obj/updater.py:112
+#: swift/obj/updater.py:113
#, python-format
msgid "Object update sweep completed: %.02fs"
msgstr ""
-#: swift/obj/updater.py:121
+#: swift/obj/updater.py:122
msgid "Begin object update single threaded sweep"
msgstr ""
-#: swift/obj/updater.py:135
+#: swift/obj/updater.py:136
#, python-format
msgid ""
"Object update single threaded sweep completed: %(elapsed).02fs, "
"%(success)s successes, %(fail)s failures"
msgstr ""
-#: swift/obj/updater.py:187
+#: swift/obj/updater.py:179
#, python-format
msgid "ERROR async pending file with unexpected name %s"
msgstr ""
-#: swift/obj/updater.py:217
+#: swift/obj/updater.py:209
#, python-format
msgid "ERROR Pickle problem, quarantining %s"
msgstr ""
-#: swift/obj/updater.py:282
+#: swift/obj/updater.py:274
#, python-format
msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
msgstr ""
-#: swift/proxy/server.py:380
+#: swift/proxy/server.py:405
msgid "ERROR Unhandled exception in request"
msgstr ""
-#: swift/proxy/server.py:435
+#: swift/proxy/server.py:460
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr ""
-#: swift/proxy/server.py:452 swift/proxy/server.py:470
+#: swift/proxy/server.py:477 swift/proxy/server.py:495
#, python-format
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr ""
-#: swift/proxy/server.py:540
+#: swift/proxy/server.py:571
#, python-format
msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
msgstr ""
-#: swift/proxy/controllers/account.py:63
+#: swift/proxy/controllers/account.py:64
msgid "Account"
msgstr ""
-#: swift/proxy/controllers/base.py:698 swift/proxy/controllers/base.py:731
-#: swift/proxy/controllers/obj.py:192 swift/proxy/controllers/obj.py:319
-#: swift/proxy/controllers/obj.py:366 swift/proxy/controllers/obj.py:382
-#: swift/proxy/controllers/obj.py:593
+#: swift/proxy/controllers/base.py:752 swift/proxy/controllers/base.py:814
+#: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:411
+#: swift/proxy/controllers/obj.py:427 swift/proxy/controllers/obj.py:643
+#: swift/proxy/controllers/obj.py:1130 swift/proxy/controllers/obj.py:1591
+#: swift/proxy/controllers/obj.py:1763 swift/proxy/controllers/obj.py:1908
+#: swift/proxy/controllers/obj.py:2093
msgid "Object"
msgstr ""
-#: swift/proxy/controllers/base.py:699
+#: swift/proxy/controllers/base.py:753
msgid "Trying to read during GET (retrying)"
msgstr ""
-#: swift/proxy/controllers/base.py:732
+#: swift/proxy/controllers/base.py:815
msgid "Trying to read during GET"
msgstr ""
-#: swift/proxy/controllers/base.py:736
+#: swift/proxy/controllers/base.py:819
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr ""
-#: swift/proxy/controllers/base.py:741
+#: swift/proxy/controllers/base.py:824
msgid "Client disconnected on read"
msgstr ""
-#: swift/proxy/controllers/base.py:743
+#: swift/proxy/controllers/base.py:826
msgid "Trying to send to client"
msgstr ""
-#: swift/proxy/controllers/base.py:780 swift/proxy/controllers/base.py:1049
+#: swift/proxy/controllers/base.py:863 swift/proxy/controllers/base.py:1141
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr ""
-#: swift/proxy/controllers/base.py:817 swift/proxy/controllers/base.py:1037
-#: swift/proxy/controllers/obj.py:357 swift/proxy/controllers/obj.py:402
+#: swift/proxy/controllers/base.py:902 swift/proxy/controllers/base.py:1129
+#: swift/proxy/controllers/obj.py:402 swift/proxy/controllers/obj.py:450
+#: swift/proxy/controllers/obj.py:1900 swift/proxy/controllers/obj.py:2138
msgid "ERROR Insufficient Storage"
msgstr ""
-#: swift/proxy/controllers/base.py:820
+#: swift/proxy/controllers/base.py:905
#, python-format
msgid "ERROR %(status)d %(body)s From %(type)s Server"
msgstr ""
-#: swift/proxy/controllers/base.py:1040
+#: swift/proxy/controllers/base.py:1132
#, python-format
msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server"
msgstr ""
-#: swift/proxy/controllers/base.py:1152
+#: swift/proxy/controllers/base.py:1260
#, python-format
msgid "%(type)s returning 503 for %(statuses)s"
msgstr ""
-#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:118
+#: swift/proxy/controllers/container.py:98 swift/proxy/controllers/obj.py:161
msgid "Container"
msgstr ""
-#: swift/proxy/controllers/obj.py:320
+#: swift/proxy/controllers/obj.py:365 swift/proxy/controllers/obj.py:1592
#, python-format
msgid "Trying to write to %s"
msgstr ""
-#: swift/proxy/controllers/obj.py:361
+#: swift/proxy/controllers/obj.py:406 swift/proxy/controllers/obj.py:1903
#, python-format
msgid "ERROR %(status)d Expect: 100-continue From Object Server"
msgstr ""
-#: swift/proxy/controllers/obj.py:367
+#: swift/proxy/controllers/obj.py:412 swift/proxy/controllers/obj.py:1909
#, python-format
msgid "Expect: 100-continue on %s"
msgstr ""
-#: swift/proxy/controllers/obj.py:383
+#: swift/proxy/controllers/obj.py:428
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr ""
-#: swift/proxy/controllers/obj.py:406
+#: swift/proxy/controllers/obj.py:454 swift/proxy/controllers/obj.py:2143
#, python-format
msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
msgstr ""
-#: swift/proxy/controllers/obj.py:663
+#: swift/proxy/controllers/obj.py:716
#, python-format
msgid "Object PUT returning 412, %(statuses)r"
msgstr ""
-#: swift/proxy/controllers/obj.py:672
+#: swift/proxy/controllers/obj.py:725
#, python-format
msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
msgstr ""
-#: swift/proxy/controllers/obj.py:755
+#: swift/proxy/controllers/obj.py:811 swift/proxy/controllers/obj.py:2048
#, python-format
msgid "ERROR Client read timeout (%ss)"
msgstr ""
-#: swift/proxy/controllers/obj.py:762
+#: swift/proxy/controllers/obj.py:818 swift/proxy/controllers/obj.py:2055
msgid "ERROR Exception causing client disconnect"
msgstr ""
-#: swift/proxy/controllers/obj.py:767
+#: swift/proxy/controllers/obj.py:823 swift/proxy/controllers/obj.py:2060
msgid "Client disconnected without sending enough data"
msgstr ""
-#: swift/proxy/controllers/obj.py:813
+#: swift/proxy/controllers/obj.py:869
#, python-format
msgid "Object servers returned %s mismatched etags"
msgstr ""
-#: swift/proxy/controllers/obj.py:817
+#: swift/proxy/controllers/obj.py:873 swift/proxy/controllers/obj.py:2218
msgid "Object PUT"
msgstr ""
+#: swift/proxy/controllers/obj.py:2035
+#, python-format
+msgid "Not enough object servers ack'ed (got %d)"
+msgstr ""
+
+#: swift/proxy/controllers/obj.py:2094
+#, python-format
+msgid "Trying to get %s status of PUT to %s"
+msgstr ""
+
diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po
index b123396e0d..8c239cc188 100644
--- a/swift/locale/zh_CN/LC_MESSAGES/swift.po
+++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po
@@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: Swift\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-03-24 06:06+0000\n"
-"PO-Revision-Date: 2015-03-24 04:20+0000\n"
+"POT-Creation-Date: 2015-04-20 11:15+0200\n"
+"PO-Revision-Date: 2015-04-15 12:48+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Chinese (China) "
"(http://www.transifex.com/projects/p/swift/language/zh_CN/)\n"
@@ -65,98 +65,98 @@ msgstr "审计失败%s: %s"
msgid "ERROR Could not get account info %s"
msgstr "错误:无法获取账号信息%s"
-#: swift/account/reaper.py:133 swift/common/utils.py:2127
-#: swift/obj/diskfile.py:470 swift/obj/updater.py:87 swift/obj/updater.py:130
+#: swift/account/reaper.py:134 swift/common/utils.py:2127
+#: swift/obj/diskfile.py:476 swift/obj/updater.py:88 swift/obj/updater.py:131
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "挂载失败 跳过%s"
-#: swift/account/reaper.py:137
+#: swift/account/reaper.py:138
msgid "Exception in top-level account reaper loop"
msgstr "异常出现在top-level账号reaper环"
-#: swift/account/reaper.py:140
+#: swift/account/reaper.py:141
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "设备通过完成: %.02fs"
-#: swift/account/reaper.py:237
+#: swift/account/reaper.py:238
#, python-format
msgid "Beginning pass on account %s"
msgstr "账号%s开始通过"
-#: swift/account/reaper.py:254
+#: swift/account/reaper.py:255
#, python-format
msgid "Exception with containers for account %s"
msgstr "账号%s内容器出现异常"
-#: swift/account/reaper.py:261
+#: swift/account/reaper.py:262
#, python-format
msgid "Exception with account %s"
msgstr "账号%s出现异常"
-#: swift/account/reaper.py:262
+#: swift/account/reaper.py:263
#, python-format
msgid "Incomplete pass on account %s"
msgstr "账号%s未完成通过"
-#: swift/account/reaper.py:264
+#: swift/account/reaper.py:265
#, python-format
msgid ", %s containers deleted"
msgstr ",删除容器%s"
-#: swift/account/reaper.py:266
+#: swift/account/reaper.py:267
#, python-format
msgid ", %s objects deleted"
msgstr ",删除对象%s"
-#: swift/account/reaper.py:268
+#: swift/account/reaper.py:269
#, python-format
msgid ", %s containers remaining"
msgstr ",剩余容器%s"
-#: swift/account/reaper.py:271
+#: swift/account/reaper.py:272
#, python-format
msgid ", %s objects remaining"
msgstr ",剩余对象%s"
-#: swift/account/reaper.py:273
+#: swift/account/reaper.py:274
#, python-format
msgid ", %s containers possibly remaining"
msgstr ",可能剩余容器%s"
-#: swift/account/reaper.py:276
+#: swift/account/reaper.py:277
#, python-format
msgid ", %s objects possibly remaining"
msgstr ",可能剩余对象%s"
-#: swift/account/reaper.py:279
+#: swift/account/reaper.py:280
msgid ", return codes: "
msgstr ",返回代码:"
-#: swift/account/reaper.py:283
+#: swift/account/reaper.py:284
#, python-format
msgid ", elapsed: %.02fs"
msgstr ",耗时:%.02fs"
-#: swift/account/reaper.py:289
+#: swift/account/reaper.py:290
#, python-format
msgid "Account %s has not been reaped since %s"
msgstr "账号%s自%s起未被reaped"
-#: swift/account/reaper.py:348 swift/account/reaper.py:396
-#: swift/account/reaper.py:463 swift/container/updater.py:306
+#: swift/account/reaper.py:349 swift/account/reaper.py:397
+#: swift/account/reaper.py:464 swift/container/updater.py:306
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s出现异常"
-#: swift/account/reaper.py:368
+#: swift/account/reaper.py:369
#, python-format
msgid "Exception with objects for container %(container)s for account %(account)s"
msgstr "账号%(account)s容器%(container)s的对象出现异常"
#: swift/account/server.py:275 swift/container/server.py:582
-#: swift/obj/server.py:730
+#: swift/obj/server.py:910
#, python-format
msgid "ERROR __call__ error with %(method)s %(path)s "
msgstr "%(method)s %(path)s出现错误__call__ error"
@@ -272,19 +272,19 @@ msgstr "尝试复制时发生错误"
msgid "Unexpected response: %s"
msgstr "意外响应:%s"
-#: swift/common/manager.py:62
+#: swift/common/manager.py:63
msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?"
msgstr "警告:无法修改文件描述限制。是否按非root运行?"
-#: swift/common/manager.py:69
+#: swift/common/manager.py:70
msgid "WARNING: Unable to modify memory limit. Running as non-root?"
msgstr "警告:无法修改内存极限,是否按非root运行?"
-#: swift/common/manager.py:76
+#: swift/common/manager.py:77
msgid "WARNING: Unable to modify max process limit. Running as non-root?"
msgstr "警告:无法修改最大运行极限,是否按非root运行?"
-#: swift/common/manager.py:194
+#: swift/common/manager.py:195
msgid ""
"\n"
"user quit"
@@ -292,72 +292,72 @@ msgstr ""
"\n"
"用户退出"
-#: swift/common/manager.py:231 swift/common/manager.py:543
+#: swift/common/manager.py:232 swift/common/manager.py:544
#, python-format
msgid "No %s running"
msgstr "无%s账号运行"
-#: swift/common/manager.py:244
+#: swift/common/manager.py:245
#, python-format
msgid "%s (%s) appears to have stopped"
msgstr "%s (%s)显示已停止"
-#: swift/common/manager.py:254
+#: swift/common/manager.py:255
#, python-format
msgid "Waited %s seconds for %s to die; giving up"
msgstr "等待%s秒直到%s停止;放弃"
-#: swift/common/manager.py:437
+#: swift/common/manager.py:438
#, python-format
msgid "Unable to locate config %sfor %s"
-msgstr "无法找到配置%s的%s"
+msgstr ""
-#: swift/common/manager.py:441
+#: swift/common/manager.py:442
msgid "Found configs:"
msgstr "找到配置"
-#: swift/common/manager.py:485
+#: swift/common/manager.py:486
#, python-format
msgid "Signal %s pid: %s signal: %s"
msgstr "发出信号%s pid: %s 信号: %s"
-#: swift/common/manager.py:492
+#: swift/common/manager.py:493
#, python-format
msgid "Removing stale pid file %s"
msgstr "移除原有pid文件%s"
-#: swift/common/manager.py:495
+#: swift/common/manager.py:496
#, python-format
msgid "No permission to signal PID %d"
msgstr "无权限发送信号PID%d"
-#: swift/common/manager.py:540
+#: swift/common/manager.py:541
#, python-format
msgid "%s #%d not running (%s)"
msgstr "%s #%d无法运行(%s)"
-#: swift/common/manager.py:547 swift/common/manager.py:640
-#: swift/common/manager.py:643
+#: swift/common/manager.py:548 swift/common/manager.py:641
+#: swift/common/manager.py:644
#, python-format
msgid "%s running (%s - %s)"
msgstr "%s运行(%s - %s)"
-#: swift/common/manager.py:646
+#: swift/common/manager.py:647
#, python-format
msgid "%s already started..."
msgstr "%s已启动..."
-#: swift/common/manager.py:655
+#: swift/common/manager.py:656
#, python-format
msgid "Running %s once"
msgstr "运行%s一次"
-#: swift/common/manager.py:657
+#: swift/common/manager.py:658
#, python-format
msgid "Starting %s"
msgstr "启动%s"
-#: swift/common/manager.py:664
+#: swift/common/manager.py:665
#, python-format
msgid "%s does not exist"
msgstr "%s不存在"
@@ -377,7 +377,12 @@ msgstr "%(action)s错误 高性能内存对象缓存: %(server)s"
msgid "Error limiting server %s"
msgstr "服务器出现错误%s "
-#: swift/common/request_helpers.py:387
+#: swift/common/request_helpers.py:102
+#, python-format
+msgid "No policy with index %s"
+msgstr ""
+
+#: swift/common/request_helpers.py:395
msgid "ERROR: An error occurred while retrieving segments"
msgstr ""
@@ -440,51 +445,51 @@ msgstr "无法从%s读取设置"
msgid "Unable to find %s config section in %s"
msgstr "无法在%s中查找到%s设置部分"
-#: swift/common/utils.py:2348
+#: swift/common/utils.py:2353
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "无效的X-Container-Sync-To格式%r"
-#: swift/common/utils.py:2353
+#: swift/common/utils.py:2358
#, python-format
msgid "No realm key for %r"
msgstr "%r权限key不存在"
-#: swift/common/utils.py:2357
+#: swift/common/utils.py:2362
#, python-format
msgid "No cluster endpoint for %r %r"
msgstr "%r %r的集群节点不存在"
-#: swift/common/utils.py:2366
+#: swift/common/utils.py:2371
#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
msgstr "在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。"
-#: swift/common/utils.py:2370
+#: swift/common/utils.py:2375
msgid "Path required in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中路径是必须的"
-#: swift/common/utils.py:2373
+#: swift/common/utils.py:2378
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中,变量,查询和碎片不被允许"
-#: swift/common/utils.py:2378
+#: swift/common/utils.py:2383
#, python-format
msgid "Invalid host %r in X-Container-Sync-To"
msgstr "X-Container-Sync-To中无效主机%r"
-#: swift/common/utils.py:2570
+#: swift/common/utils.py:2575
msgid "Exception dumping recon cache"
msgstr "执行dump recon的时候出现异常"
-#: swift/common/wsgi.py:175
+#: swift/common/wsgi.py:197
#, python-format
msgid "Could not bind to %s:%s after trying for %s seconds"
msgstr "尝试过%s秒后无法捆绑%s:%s"
-#: swift/common/wsgi.py:185
+#: swift/common/wsgi.py:207
msgid ""
"WARNING: SSL should only be enabled for testing purposes. Use external "
"SSL termination for a production deployment."
@@ -527,27 +532,27 @@ msgstr ""
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr "警告:缺失缓存客户端 无法控制流量 "
-#: swift/common/middleware/recon.py:78
+#: swift/common/middleware/recon.py:80
msgid "Error reading recon cache file"
msgstr "读取recon cache file时出现错误"
-#: swift/common/middleware/recon.py:80
+#: swift/common/middleware/recon.py:82
msgid "Error parsing recon cache file"
msgstr "解析recon cache file时出现错误"
-#: swift/common/middleware/recon.py:82
+#: swift/common/middleware/recon.py:84
msgid "Error retrieving recon data"
msgstr "检索recon data时出现错误"
-#: swift/common/middleware/recon.py:151
+#: swift/common/middleware/recon.py:158
msgid "Error listing devices"
msgstr "设备列表时出现错误"
-#: swift/common/middleware/recon.py:247
+#: swift/common/middleware/recon.py:254
msgid "Error reading ringfile"
msgstr "读取ringfile时出现错误"
-#: swift/common/middleware/recon.py:261
+#: swift/common/middleware/recon.py:268
msgid "Error reading swift.conf"
msgstr "读取swift.conf时出现错误"
@@ -654,16 +659,27 @@ msgid ""
"later)"
msgstr "错误 账号更新失败 %(ip)s:%(port)s/%(device)s (稍后尝试)"
-#: swift/container/sync.py:193
+#: swift/container/sync.py:217
+msgid ""
+"Configuration option internal_client_conf_path not defined. Using default"
+" configuration, See internal-client.conf-sample for options"
+msgstr ""
+
+#: swift/container/sync.py:230
+#, python-format
+msgid "Unable to load internal client from config: %r (%s)"
+msgstr ""
+
+#: swift/container/sync.py:264
msgid "Begin container sync \"once\" mode"
msgstr "开始容器同步\"once\"模式"
-#: swift/container/sync.py:205
+#: swift/container/sync.py:276
#, python-format
msgid "Container sync \"once\" mode completed: %.02fs"
msgstr "容器同步\"once\"模式完成:%.02fs"
-#: swift/container/sync.py:213
+#: swift/container/sync.py:284
#, python-format
msgid ""
"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], "
@@ -672,36 +688,34 @@ msgstr ""
"自%(time)s起:%(sync)s完成同步 [%(delete)s 删除, %(put)s 上传], \"\n"
"\"%(skip)s 跳过, %(fail)s 失败"
-#: swift/container/sync.py:266
+#: swift/container/sync.py:337
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "错误 %(db_file)s: %(validate_sync_to_err)s"
-#: swift/container/sync.py:322
+#: swift/container/sync.py:393
#, python-format
msgid "ERROR Syncing %s"
msgstr "同步时发生错误%s"
-#: swift/container/sync.py:410
+#: swift/container/sync.py:476
#, python-format
-msgid ""
-"Unknown exception trying to GET: %(node)r %(account)r %(container)r "
-"%(object)r"
-msgstr "尝试获取时发生未知的异常%(node)r %(account)r %(container)r %(object)r"
+msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
+msgstr ""
-#: swift/container/sync.py:444
+#: swift/container/sync.py:510
#, python-format
msgid "Unauth %(sync_from)r => %(sync_to)r"
msgstr "未授权%(sync_from)r => %(sync_to)r"
-#: swift/container/sync.py:450
+#: swift/container/sync.py:516
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
"%(obj_name)r"
msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r"
-#: swift/container/sync.py:457 swift/container/sync.py:464
+#: swift/container/sync.py:523 swift/container/sync.py:530
#, python-format
msgid "ERROR Syncing %(db_file)s %(row)s"
msgstr "同步错误 %(db_file)s %(row)s"
@@ -711,8 +725,8 @@ msgstr "同步错误 %(db_file)s %(row)s"
msgid "ERROR: Failed to get paths to drive partitions: %s"
msgstr "%s未挂载"
-#: swift/container/updater.py:91 swift/obj/replicator.py:484
-#: swift/obj/replicator.py:570
+#: swift/container/updater.py:91 swift/obj/reconstructor.py:788
+#: swift/obj/replicator.py:487 swift/obj/replicator.py:575
#, python-format
msgid "%s is not mounted"
msgstr "%s未挂载"
@@ -834,42 +848,57 @@ msgstr "错误:无法执行审计:%s"
msgid "ERROR auditing: %s"
msgstr "审计错误:%s"
-#: swift/obj/diskfile.py:318
+#: swift/obj/diskfile.py:323 swift/obj/diskfile.py:2305
#, python-format
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录"
-#: swift/obj/diskfile.py:409
+#: swift/obj/diskfile.py:414 swift/obj/diskfile.py:2373
msgid "Error hashing suffix"
msgstr "执行Hashing后缀时发生错误"
-#: swift/obj/diskfile.py:484 swift/obj/updater.py:169
+#: swift/obj/diskfile.py:486 swift/obj/updater.py:162
#, python-format
-msgid "Directory %s does not map to a valid policy"
-msgstr "目录%s无法映射到一个有效的policy"
+msgid "Directory %r does not map to a valid policy (%s)"
+msgstr ""
-#: swift/obj/diskfile.py:678
+#: swift/obj/diskfile.py:737
#, python-format
msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
msgstr "隔离%(object_path)s和%(quar_path)s因为非目录"
-#: swift/obj/diskfile.py:869
+#: swift/obj/diskfile.py:936 swift/obj/diskfile.py:1795
#, python-format
msgid "Problem cleaning up %s"
msgstr "问题清除%s"
-#: swift/obj/diskfile.py:1168
+#: swift/obj/diskfile.py:1253
#, python-format
msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s"
-#: swift/obj/diskfile.py:1449
+#: swift/obj/diskfile.py:1543
#, python-format
msgid ""
"Client path %(client)s does not match path stored in object metadata "
"%(meta)s"
msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符"
+#: swift/obj/diskfile.py:1797
+#, python-format
+msgid "Problem fsyncing durable state file: %s"
+msgstr ""
+
+#: swift/obj/diskfile.py:1802
+#, python-format
+msgid "No space left on device for %s"
+msgstr ""
+
+#: swift/obj/diskfile.py:1806
+#, python-format
+msgid "Problem writing durable state file: %s"
+msgstr ""
+
#: swift/obj/expirer.py:79
#, python-format
msgid "Pass completed in %ds; %d objects expired"
@@ -899,67 +928,138 @@ msgstr "未处理的异常"
msgid "Exception while deleting object %s %s %s"
msgstr "执行删除对象时发生异常%s %s %s"
-#: swift/obj/mem_server.py:87
+#: swift/obj/reconstructor.py:189 swift/obj/reconstructor.py:472
#, python-format
-msgid ""
-"ERROR Container update failed: %(status)d response from "
-"%(ip)s:%(port)s/%(dev)s"
-msgstr "错误 容器更新失败:%(status)d 从%(ip)s:%(port)s/%(dev)s得到回应"
-
-#: swift/obj/mem_server.py:93
-#, python-format
-msgid "ERROR container update failed with %(ip)s:%(port)s/%(dev)s"
-msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s"
-
-#: swift/obj/replicator.py:138
-#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "终止long-running同步: %s"
-
-#: swift/obj/replicator.py:152
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Bad rsync返还代码:%(ret)d <- %(args)s"
-
-#: swift/obj/replicator.py:159 swift/obj/replicator.py:163
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)"
-
-#: swift/obj/replicator.py:278
-#, python-format
-msgid "Removing %s objects"
+msgid "Invalid response %(resp)s from %(full_path)s"
msgstr ""
-#: swift/obj/replicator.py:286
-msgid "Error syncing handoff partition"
-msgstr "执行同步切换分区时发生错误"
+#: swift/obj/reconstructor.py:195
+#, python-format
+msgid "Trying to GET %(full_path)s"
+msgstr ""
-#: swift/obj/replicator.py:292
+#: swift/obj/reconstructor.py:301
+#, python-format
+msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
+msgstr ""
+
+#: swift/obj/reconstructor.py:324
+#, python-format
+msgid ""
+"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed"
+" in %(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
+msgstr ""
+
+#: swift/obj/reconstructor.py:337 swift/obj/replicator.py:419
+#, python-format
+msgid ""
+"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
+"synced"
+msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced"
+
+#: swift/obj/reconstructor.py:344 swift/obj/replicator.py:426
+#, python-format
+msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
+msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs"
+
+#: swift/obj/reconstructor.py:352
+#, python-format
+msgid "Nothing reconstructed for %s seconds."
+msgstr ""
+
+#: swift/obj/reconstructor.py:381 swift/obj/replicator.py:463
+msgid "Lockup detected.. killing live coros."
+msgstr "检测到lockup。终止正在执行的coros"
+
+#: swift/obj/reconstructor.py:442
+#, python-format
+msgid "Trying to sync suffixes with %s"
+msgstr ""
+
+#: swift/obj/reconstructor.py:467
+#, python-format
+msgid "%s responded as unmounted"
+msgstr ""
+
+#: swift/obj/reconstructor.py:849 swift/obj/replicator.py:295
#, python-format
msgid "Removing partition: %s"
msgstr "移除分区:%s"
-#: swift/obj/replicator.py:347
+#: swift/obj/reconstructor.py:865
+msgid "Ring change detected. Aborting current reconstruction pass."
+msgstr ""
+
+#: swift/obj/reconstructor.py:884
+msgid "Exception in top-levelreconstruction loop"
+msgstr ""
+
+#: swift/obj/reconstructor.py:894
+msgid "Running object reconstructor in script mode."
+msgstr ""
+
+#: swift/obj/reconstructor.py:903
+#, python-format
+msgid "Object reconstruction complete (once). (%.02f minutes)"
+msgstr ""
+
+#: swift/obj/reconstructor.py:910
+msgid "Starting object reconstructor in daemon mode."
+msgstr ""
+
+#: swift/obj/reconstructor.py:914
+msgid "Starting object reconstruction pass."
+msgstr ""
+
+#: swift/obj/reconstructor.py:919
+#, python-format
+msgid "Object reconstruction complete. (%.02f minutes)"
+msgstr ""
+
+#: swift/obj/replicator.py:139
+#, python-format
+msgid "Killing long-running rsync: %s"
+msgstr "终止long-running同步: %s"
+
+#: swift/obj/replicator.py:153
+#, python-format
+msgid "Bad rsync return code: %(ret)d <- %(args)s"
+msgstr "Bad rsync返还代码:%(ret)d <- %(args)s"
+
+#: swift/obj/replicator.py:160 swift/obj/replicator.py:164
+#, python-format
+msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
+msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)"
+
+#: swift/obj/replicator.py:281
+#, python-format
+msgid "Removing %s objects"
+msgstr ""
+
+#: swift/obj/replicator.py:289
+msgid "Error syncing handoff partition"
+msgstr "执行同步切换分区时发生错误"
+
+#: swift/obj/replicator.py:351
#, python-format
msgid "%(ip)s/%(device)s responded as unmounted"
msgstr "%(ip)s/%(device)s的回应为未挂载"
-#: swift/obj/replicator.py:352
+#: swift/obj/replicator.py:356
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "无效的回应%(resp)s来自%(ip)s"
-#: swift/obj/replicator.py:387
+#: swift/obj/replicator.py:391
#, python-format
msgid "Error syncing with node: %s"
msgstr "执行同步时节点%s发生错误"
-#: swift/obj/replicator.py:391
+#: swift/obj/replicator.py:395
msgid "Error syncing partition"
msgstr "执行同步分区时发生错误"
-#: swift/obj/replicator.py:404
+#: swift/obj/replicator.py:408
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@@ -968,259 +1068,256 @@ msgstr ""
"%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n"
"\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-#: swift/obj/replicator.py:415
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
-"synced"
-msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced"
-
-#: swift/obj/replicator.py:422
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-
-#: swift/obj/replicator.py:430
+#: swift/obj/replicator.py:434
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "%s秒无复制"
-#: swift/obj/replicator.py:459
-msgid "Lockup detected.. killing live coros."
-msgstr "检测到lockup。终止正在执行的coros"
-
-#: swift/obj/replicator.py:573
+#: swift/obj/replicator.py:578
msgid "Ring change detected. Aborting current replication pass."
msgstr "Ring改变被检测到。退出现有的复制通过"
-#: swift/obj/replicator.py:594
+#: swift/obj/replicator.py:599
msgid "Exception in top-level replication loop"
msgstr "top-level复制圈出现异常"
-#: swift/obj/replicator.py:603
+#: swift/obj/replicator.py:608
msgid "Running object replicator in script mode."
msgstr "在加密模式下执行对象复制"
-#: swift/obj/replicator.py:621
+#: swift/obj/replicator.py:626
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "对象复制完成(一次)。(%.02f minutes)"
-#: swift/obj/replicator.py:628
+#: swift/obj/replicator.py:633
msgid "Starting object replicator in daemon mode."
msgstr "在守护模式下开始对象复制"
-#: swift/obj/replicator.py:632
+#: swift/obj/replicator.py:637
msgid "Starting object replication pass."
msgstr "开始通过对象复制"
-#: swift/obj/replicator.py:637
+#: swift/obj/replicator.py:642
#, python-format
msgid "Object replication complete. (%.02f minutes)"
msgstr "对象复制完成。(%.02f minutes)"
-#: swift/obj/server.py:202
+#: swift/obj/server.py:231
#, python-format
msgid ""
"ERROR Container update failed (saving for async update later): %(status)d"
" response from %(ip)s:%(port)s/%(dev)s"
msgstr "错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/%(dev)s"
-#: swift/obj/server.py:209
+#: swift/obj/server.py:238
#, python-format
msgid ""
"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for "
"async update later)"
msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在保存 稍后同步更新)"
-#: swift/obj/server.py:244
+#: swift/obj/server.py:273
#, python-format
msgid ""
"ERROR Container update failed: different numbers of hosts and devices in "
"request: \"%s\" vs \"%s\""
msgstr "错误 容器更新失败:主机数量和设备数量不符合请求: \"%s\" vs \"%s\""
-#: swift/obj/updater.py:62
+#: swift/obj/updater.py:63
#, python-format
msgid "ERROR: Unable to access %(path)s: %(error)s"
msgstr ""
-#: swift/obj/updater.py:77
+#: swift/obj/updater.py:78
msgid "Begin object update sweep"
msgstr "开始对象更新扫除"
-#: swift/obj/updater.py:103
+#: swift/obj/updater.py:104
#, python-format
msgid ""
"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s"
" successes, %(fail)s failures"
msgstr "%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败"
-#: swift/obj/updater.py:112
+#: swift/obj/updater.py:113
#, python-format
msgid "Object update sweep completed: %.02fs"
msgstr "对象更新扫除完成:%.02fs"
-#: swift/obj/updater.py:121
+#: swift/obj/updater.py:122
msgid "Begin object update single threaded sweep"
msgstr "开始对象更新单线程扫除"
-#: swift/obj/updater.py:135
+#: swift/obj/updater.py:136
#, python-format
msgid ""
"Object update single threaded sweep completed: %(elapsed).02fs, "
"%(success)s successes, %(fail)s failures"
msgstr "对象更新单线程扫除完成:%(elapsed).02fs,%(success)s 成功, %(fail)s 失败"
-#: swift/obj/updater.py:187
+#: swift/obj/updater.py:179
#, python-format
msgid "ERROR async pending file with unexpected name %s"
msgstr "执行同步等待文件 文件名不可知%s"
-#: swift/obj/updater.py:217
+#: swift/obj/updater.py:209
#, python-format
msgid "ERROR Pickle problem, quarantining %s"
msgstr "错误 Pickle问题 隔离%s"
-#: swift/obj/updater.py:282
+#: swift/obj/updater.py:274
#, python-format
msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
msgstr "远程服务器发生错误 %(ip)s:%(port)s/%(device)s"
-#: swift/proxy/server.py:380
+#: swift/proxy/server.py:405
msgid "ERROR Unhandled exception in request"
msgstr "错误 未处理的异常发出请求"
-#: swift/proxy/server.py:435
+#: swift/proxy/server.py:460
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)"
-#: swift/proxy/server.py:452 swift/proxy/server.py:470
+#: swift/proxy/server.py:477 swift/proxy/server.py:495
#, python-format
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-#: swift/proxy/server.py:540
+#: swift/proxy/server.py:571
#, python-format
msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
msgstr "%(type)s服务器发生错误 %(ip)s:%(port)s/%(device)s re: %(info)s"
-#: swift/proxy/controllers/account.py:63
+#: swift/proxy/controllers/account.py:64
msgid "Account"
msgstr "账号"
-#: swift/proxy/controllers/base.py:698 swift/proxy/controllers/base.py:731
-#: swift/proxy/controllers/obj.py:192 swift/proxy/controllers/obj.py:319
-#: swift/proxy/controllers/obj.py:366 swift/proxy/controllers/obj.py:382
-#: swift/proxy/controllers/obj.py:593
+#: swift/proxy/controllers/base.py:752 swift/proxy/controllers/base.py:814
+#: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:411
+#: swift/proxy/controllers/obj.py:427 swift/proxy/controllers/obj.py:643
+#: swift/proxy/controllers/obj.py:1130 swift/proxy/controllers/obj.py:1591
+#: swift/proxy/controllers/obj.py:1763 swift/proxy/controllers/obj.py:1908
+#: swift/proxy/controllers/obj.py:2093
msgid "Object"
msgstr "对象"
-#: swift/proxy/controllers/base.py:699
+#: swift/proxy/controllers/base.py:753
msgid "Trying to read during GET (retrying)"
msgstr "执行GET时尝试读取(重新尝试)"
-#: swift/proxy/controllers/base.py:732
+#: swift/proxy/controllers/base.py:815
msgid "Trying to read during GET"
msgstr "执行GET时尝试读取"
-#: swift/proxy/controllers/base.py:736
+#: swift/proxy/controllers/base.py:819
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr "客户尚未从代理处读取%ss"
-#: swift/proxy/controllers/base.py:741
+#: swift/proxy/controllers/base.py:824
msgid "Client disconnected on read"
msgstr "客户读取时中断"
-#: swift/proxy/controllers/base.py:743
+#: swift/proxy/controllers/base.py:826
msgid "Trying to send to client"
msgstr "尝试发送到客户端"
-#: swift/proxy/controllers/base.py:780 swift/proxy/controllers/base.py:1049
+#: swift/proxy/controllers/base.py:863 swift/proxy/controllers/base.py:1141
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "尝试执行%(method)s %(path)s"
-#: swift/proxy/controllers/base.py:817 swift/proxy/controllers/base.py:1037
-#: swift/proxy/controllers/obj.py:357 swift/proxy/controllers/obj.py:402
+#: swift/proxy/controllers/base.py:902 swift/proxy/controllers/base.py:1129
+#: swift/proxy/controllers/obj.py:402 swift/proxy/controllers/obj.py:450
+#: swift/proxy/controllers/obj.py:1900 swift/proxy/controllers/obj.py:2138
msgid "ERROR Insufficient Storage"
msgstr "错误 存储空间不足"
-#: swift/proxy/controllers/base.py:820
+#: swift/proxy/controllers/base.py:905
#, python-format
msgid "ERROR %(status)d %(body)s From %(type)s Server"
msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器"
-#: swift/proxy/controllers/base.py:1040
+#: swift/proxy/controllers/base.py:1132
#, python-format
msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server"
msgstr ""
-#: swift/proxy/controllers/base.py:1152
+#: swift/proxy/controllers/base.py:1260
#, python-format
msgid "%(type)s returning 503 for %(statuses)s"
msgstr "%(type)s 返回 503 在 %(statuses)s"
-#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:118
+#: swift/proxy/controllers/container.py:98 swift/proxy/controllers/obj.py:161
msgid "Container"
msgstr "容器"
-#: swift/proxy/controllers/obj.py:320
+#: swift/proxy/controllers/obj.py:365 swift/proxy/controllers/obj.py:1592
#, python-format
msgid "Trying to write to %s"
msgstr "尝试执行书写%s"
-#: swift/proxy/controllers/obj.py:361
+#: swift/proxy/controllers/obj.py:406 swift/proxy/controllers/obj.py:1903
#, python-format
msgid "ERROR %(status)d Expect: 100-continue From Object Server"
msgstr ""
-#: swift/proxy/controllers/obj.py:367
+#: swift/proxy/controllers/obj.py:412 swift/proxy/controllers/obj.py:1909
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "已知:100-continue on %s"
-#: swift/proxy/controllers/obj.py:383
+#: swift/proxy/controllers/obj.py:428
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "尝试执行获取最后的PUT状态%s"
-#: swift/proxy/controllers/obj.py:406
+#: swift/proxy/controllers/obj.py:454 swift/proxy/controllers/obj.py:2143
#, python-format
msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s"
-#: swift/proxy/controllers/obj.py:663
+#: swift/proxy/controllers/obj.py:716
#, python-format
msgid "Object PUT returning 412, %(statuses)r"
msgstr "对象PUT返还 412,%(statuses)r "
-#: swift/proxy/controllers/obj.py:672
+#: swift/proxy/controllers/obj.py:725
#, python-format
msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
msgstr ""
-#: swift/proxy/controllers/obj.py:755
+#: swift/proxy/controllers/obj.py:811 swift/proxy/controllers/obj.py:2048
#, python-format
msgid "ERROR Client read timeout (%ss)"
msgstr "错误 客户读取超时(%ss)"
-#: swift/proxy/controllers/obj.py:762
+#: swift/proxy/controllers/obj.py:818 swift/proxy/controllers/obj.py:2055
msgid "ERROR Exception causing client disconnect"
msgstr "错误 异常导致客户端中断连接"
-#: swift/proxy/controllers/obj.py:767
+#: swift/proxy/controllers/obj.py:823 swift/proxy/controllers/obj.py:2060
msgid "Client disconnected without sending enough data"
msgstr "客户中断 尚未发送足够"
-#: swift/proxy/controllers/obj.py:813
+#: swift/proxy/controllers/obj.py:869
#, python-format
msgid "Object servers returned %s mismatched etags"
msgstr "对象服务器返还%s不匹配etags"
-#: swift/proxy/controllers/obj.py:817
+#: swift/proxy/controllers/obj.py:873 swift/proxy/controllers/obj.py:2218
msgid "Object PUT"
msgstr "对象上传"
+#: swift/proxy/controllers/obj.py:2035
+#, python-format
+msgid "Not enough object servers ack'ed (got %d)"
+msgstr ""
+
+#: swift/proxy/controllers/obj.py:2094
+#, python-format
+msgid "Trying to get %s status of PUT to %s"
+msgstr ""
+
From 215cd551df8be066edafd2a1e16d0bd143ec214b Mon Sep 17 00:00:00 2001
From: Samuel Merritt
Date: Tue, 21 Apr 2015 17:38:04 -0700
Subject: [PATCH 15/98] Bulk upload: treat user xattrs as object metadata
Currently, if you PUT a single object, then you can also associate
metadata with it by putting it in the request headers, prefixed with
"X-Object-Meta". However, if you're bulk-uploading objects, then you
have no way to assign any metadata.
The tar file format* allows for arbitrary UTF-8 key/value pairs to be
associated with each file in an archive (as well as with the archive
itself, but we don't care about that here). If a file has extended
attributes, then tar will store those as key/value pairs.
This commit makes bulk upload read those extended attributes, if
present, and convert those to Swift object metadata. Attributes
starting with "user.meta" are converted to object metadata, and
"user.mime_type"** is converted to Content-Type.
For example, if you have a file "setup.py":
$ setfattr -n user.mime_type -v "application/python-setup" setup.py
$ setfattr -n user.meta.lunch -v "burger and fries" setup.py
$ setfattr -n user.meta.dinner -v "baked ziti" setup.py
$ setfattr -n user.stuff -v "whee" setup.py
This will get translated to headers:
Content-Type: application/python-setup
X-Object-Meta-Lunch: burger and fries
X-Object-Meta-Dinner: baked ziti
Swift will handle xattrs stored by both GNU and BSD tar***. Only
xattrs user.mime_type and user.meta.* are processed; others are
ignored.
This brings bulk upload much closer to feature-parity with non-bulk upload.
* The POSIX 1003.1-2001 (pax) format, at least. There are a few
different, mutually-incompatible tar formats out there, because of
course there are. This is the default format on GNU tar 1.27.1 or
later.
** http://standards.freedesktop.org/shared-mime-info-spec/latest/ar01s02.html#idm140622087713936
*** Even with pax-format tarballs, different encoders store xattrs
slightly differently; for example, GNU tar stores the xattr
"user.rubberducky" as pax header "SCHILY.xattr.user.rubberducky",
while BSD tar (which uses libarchive) stores it as
"LIBARCHIVE.xattr.user.rubberducky". One might wonder if this is
some programmer's attempt at job security.
Change-Id: I5e3ce87d31054f5239e86d47c45adbde2bb93640
---
swift/common/middleware/bulk.py | 27 ++++++
test/unit/common/middleware/test_bulk.py | 103 ++++++++++++++++++++++-
2 files changed, 129 insertions(+), 1 deletion(-)
diff --git a/swift/common/middleware/bulk.py b/swift/common/middleware/bulk.py
index 7dc69b6ff1..888ff2356a 100644
--- a/swift/common/middleware/bulk.py
+++ b/swift/common/middleware/bulk.py
@@ -75,6 +75,23 @@ def get_response_body(data_format, data_dict, error_list):
return output
+def pax_key_to_swift_header(pax_key):
+ if (pax_key == u"SCHILY.xattr.user.mime_type" or
+ pax_key == u"LIBARCHIVE.xattr.user.mime_type"):
+ return "Content-Type"
+ elif pax_key.startswith(u"SCHILY.xattr.user.meta."):
+ useful_part = pax_key[len(u"SCHILY.xattr.user.meta."):]
+ return "X-Object-Meta-" + useful_part.encode("utf-8")
+ elif pax_key.startswith(u"LIBARCHIVE.xattr.user.meta."):
+ useful_part = pax_key[len(u"LIBARCHIVE.xattr.user.meta."):]
+ return "X-Object-Meta-" + useful_part.encode("utf-8")
+ else:
+ # You can get things like atime/mtime/ctime or filesystem ACLs in
+ # pax headers; those aren't really user metadata. The same goes for
+ # other, non-user metadata.
+ return None
+
+
class Bulk(object):
"""
Middleware that will do many operations on a single request.
@@ -464,6 +481,16 @@ class Bulk(object):
new_env['HTTP_USER_AGENT'] = \
'%s BulkExpand' % req.environ.get('HTTP_USER_AGENT')
create_obj_req = Request.blank(destination, new_env)
+
+ for pax_key, pax_value in tar_info.pax_headers.items():
+ header_name = pax_key_to_swift_header(pax_key)
+ if header_name:
+ # Both pax_key and pax_value are unicode
+ # strings; the key is already UTF-8 encoded, but
+ # we still have to encode the value.
+ create_obj_req.headers[header_name] = \
+ pax_value.encode("utf-8")
+
resp = create_obj_req.get_response(self.app)
containers_accessed.add(container)
if resp.is_success:
diff --git a/test/unit/common/middleware/test_bulk.py b/test/unit/common/middleware/test_bulk.py
index 0f0b83a7d4..2bd0b78158 100644
--- a/test/unit/common/middleware/test_bulk.py
+++ b/test/unit/common/middleware/test_bulk.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -25,9 +26,11 @@ from tempfile import mkdtemp
from StringIO import StringIO
from eventlet import sleep
from mock import patch, call
+from test.unit.common.middleware.helpers import FakeSwift
from swift.common import utils, constraints
from swift.common.middleware import bulk
-from swift.common.swob import Request, Response, HTTPException
+from swift.common.swob import Request, Response, HTTPException, \
+ HTTPNoContent, HTTPCreated, HeaderKeyDict
from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED
@@ -126,6 +129,104 @@ def build_tar_tree(tar, start_path, tree_obj, base_path=''):
tar.addfile(tar_info)
+class TestUntarMetadata(unittest.TestCase):
+ def setUp(self):
+ self.app = FakeSwift()
+ self.bulk = bulk.filter_factory({})(self.app)
+ self.testdir = mkdtemp(suffix='tmp_test_bulk')
+
+ def tearDown(self):
+ rmtree(self.testdir, ignore_errors=1)
+
+ def test_extract_metadata(self):
+ self.app.register('HEAD', '/v1/a/c?extract-archive=tar',
+ HTTPNoContent, {}, None)
+ self.app.register('PUT', '/v1/a/c/obj1?extract-archive=tar',
+ HTTPCreated, {}, None)
+ self.app.register('PUT', '/v1/a/c/obj2?extract-archive=tar',
+ HTTPCreated, {}, None)
+
+ # It's a real pain to instantiate TarInfo objects directly; they
+ # really want to come from a file on disk or a tarball. So, we write
+ # out some files and add pax headers to them as they get placed into
+ # the tarball.
+ with open(os.path.join(self.testdir, "obj1"), "w") as fh1:
+ fh1.write("obj1 contents\n")
+ with open(os.path.join(self.testdir, "obj2"), "w") as fh2:
+ fh2.write("obj2 contents\n")
+
+ tar_ball = StringIO()
+ tar_file = tarfile.TarFile.open(fileobj=tar_ball, mode="w",
+ format=tarfile.PAX_FORMAT)
+
+ # With GNU tar 1.27.1 or later (possibly 1.27 as well), a file with
+ # extended attribute user.thingy = dingy gets put into the tarfile
+ # with pax_headers containing key/value pair
+ # (SCHILY.xattr.user.thingy, dingy), both unicode strings (py2: type
+ # unicode, not type str).
+ #
+ # With BSD tar (libarchive), you get key/value pair
+ # (LIBARCHIVE.xattr.user.thingy, dingy), which strikes me as
+ # gratuitous incompatibility.
+ #
+ # Still, we'll support uploads with both. Just heap more code on the
+ # problem until you can forget it's under there.
+ with open(os.path.join(self.testdir, "obj1")) as fh1:
+ tar_info1 = tar_file.gettarinfo(fileobj=fh1,
+ arcname="obj1")
+ tar_info1.pax_headers[u'SCHILY.xattr.user.mime_type'] = \
+ u'application/food-diary'
+ tar_info1.pax_headers[u'SCHILY.xattr.user.meta.lunch'] = \
+ u'sopa de albóndigas'
+ tar_info1.pax_headers[
+ u'SCHILY.xattr.user.meta.afternoon-snack'] = \
+ u'gigantic bucket of coffee'
+ tar_file.addfile(tar_info1, fh1)
+
+ with open(os.path.join(self.testdir, "obj2")) as fh2:
+ tar_info2 = tar_file.gettarinfo(fileobj=fh2,
+ arcname="obj2")
+ tar_info2.pax_headers[
+ u'LIBARCHIVE.xattr.user.meta.muppet'] = u'bert'
+ tar_info2.pax_headers[
+ u'LIBARCHIVE.xattr.user.meta.cat'] = u'fluffy'
+ tar_info2.pax_headers[
+ u'LIBARCHIVE.xattr.user.notmeta'] = u'skipped'
+ tar_file.addfile(tar_info2, fh2)
+
+ tar_ball.seek(0)
+
+ req = Request.blank('/v1/a/c?extract-archive=tar')
+ req.environ['REQUEST_METHOD'] = 'PUT'
+ req.environ['wsgi.input'] = tar_ball
+ req.headers['transfer-encoding'] = 'chunked'
+ req.headers['accept'] = 'application/json;q=1.0'
+
+ resp = req.get_response(self.bulk)
+ self.assertEqual(resp.status_int, 200)
+
+ # sanity check to make sure the upload worked
+ upload_status = utils.json.loads(resp.body)
+ self.assertEqual(upload_status['Number Files Created'], 2)
+
+ put1_headers = HeaderKeyDict(self.app.calls_with_headers[1][2])
+ self.assertEqual(
+ put1_headers.get('Content-Type'),
+ 'application/food-diary')
+ self.assertEqual(
+ put1_headers.get('X-Object-Meta-Lunch'),
+ 'sopa de alb\xc3\xb3ndigas')
+ self.assertEqual(
+ put1_headers.get('X-Object-Meta-Afternoon-Snack'),
+ 'gigantic bucket of coffee')
+
+ put2_headers = HeaderKeyDict(self.app.calls_with_headers[2][2])
+ self.assertEqual(put2_headers.get('X-Object-Meta-Muppet'), 'bert')
+ self.assertEqual(put2_headers.get('X-Object-Meta-Cat'), 'fluffy')
+ self.assertEqual(put2_headers.get('Content-Type'), None)
+ self.assertEqual(put2_headers.get('X-Object-Meta-Blah'), None)
+
+
class TestUntar(unittest.TestCase):
def setUp(self):
From 43ace3c62893364b6e3c130df56438995627598d Mon Sep 17 00:00:00 2001
From: Samuel Merritt
Date: Mon, 20 Apr 2015 12:17:56 -0700
Subject: [PATCH 16/98] Make RingBuilders deep-copy-able
We used to be able to deep-copy RingBuilder objects, but the addition
of debug logging (8d3b3b2) broke that since you can't deep-copy a
Python logger. This commit fixes that.
Swift doesn't really deep-copy RingBuilders anywhere, but third-party
code might.
Change-Id: If8bdadd93d9980db3d8a093f32d76ca604de9301
---
swift/cli/ringbuilder.py | 3 +--
swift/common/ring/builder.py | 12 ++++++++++++
test/unit/common/ring/test_builder.py | 21 +++++++++++++++++++++
3 files changed, 34 insertions(+), 2 deletions(-)
diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py
index eac586e267..f5c8c14792 100755
--- a/swift/cli/ringbuilder.py
+++ b/swift/cli/ringbuilder.py
@@ -1073,8 +1073,7 @@ swift-ring-builder write_builder [min_part_hours]
'_last_part_gather_start': 0,
'_remove_devs': [],
}
- builder = RingBuilder(1, 1, 1)
- builder.copy_from(builder_dict)
+ builder = RingBuilder.from_dict(builder_dict)
for parts in builder._replica2part2dev:
for dev_id in parts:
builder.devs[dev_id]['parts'] += 1
diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py
index 6672fdbecc..c0a37f8a54 100644
--- a/swift/common/ring/builder.py
+++ b/swift/common/ring/builder.py
@@ -21,6 +21,7 @@ import logging
import math
import random
import cPickle as pickle
+from copy import deepcopy
from array import array
from collections import defaultdict
@@ -125,6 +126,12 @@ class RingBuilder(object):
'ring, or all devices have been '
'deleted')
+ @classmethod
+ def from_dict(cls, builder_data):
+ b = cls(1, 1, 1) # Dummy values
+ b.copy_from(builder_data)
+ return b
+
def copy_from(self, builder):
"""
Reinitializes this RingBuilder instance from data obtained from the
@@ -173,6 +180,11 @@ class RingBuilder(object):
for dev in self._iter_devs():
dev.setdefault("region", 1)
+ def __deepcopy__(self, memo):
+ the_copy = type(self).from_dict(deepcopy(self.to_dict(), memo))
+ memo[id(self)] = the_copy
+ return the_copy
+
def to_dict(self):
"""
Returns a dict that can be used later with copy_from to
diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py
index e2dc80824c..a05823368c 100644
--- a/test/unit/common/ring/test_builder.py
+++ b/test/unit/common/ring/test_builder.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import copy
import errno
import mock
import operator
@@ -84,6 +85,26 @@ class TestRingBuilder(unittest.TestCase):
ring.RingBuilder(8, 3, 0) # passes by not crashing
self.assertRaises(ValueError, ring.RingBuilder, 8, 3, -1)
+ def test_deepcopy(self):
+ rb = ring.RingBuilder(8, 3, 1)
+ rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
+ 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'})
+ rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
+ 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
+ rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1,
+ 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'})
+ rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1,
+ 'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'})
+ rb.rebalance()
+ rb_copy = copy.deepcopy(rb)
+
+ self.assertEqual(rb.to_dict(), rb_copy.to_dict())
+ self.assertTrue(rb.devs is not rb_copy.devs)
+ self.assertTrue(rb._replica2part2dev is not rb_copy._replica2part2dev)
+ self.assertTrue(rb._last_part_moves is not rb_copy._last_part_moves)
+ self.assertTrue(rb._remove_devs is not rb_copy._remove_devs)
+ self.assertTrue(rb._dispersion_graph is not rb_copy._dispersion_graph)
+
def test_get_ring(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
From 03380380efb5d5c0b2a3acfaa64b486ee2cb6e64 Mon Sep 17 00:00:00 2001
From: Clay Gerrard
Date: Thu, 23 Apr 2015 19:39:16 -0700
Subject: [PATCH 17/98] Simplify ring.builder.RingBuilder.__deepcopy__
Only container classes (lists, sets, dicts, graphs, collections,
etc) need to track objects they deepcopy in the memo dict -
particularly when they may contain other containers! As they
recreate a new container with the same items as themselves, they'll
reference the memo for each item they contain before making a
deepcopy of it, and place a reference to the copied item into memo
after they do. Trying to help out some other container class in
this endeavor by attempting to add ourselves to the memo dict in
some useful manor on their behalf however; is not helpful.
All we need to do to make sure we're being a good __deepcopy__
implementation is make sure we pass on memo to our calls of deepcopy
so that other container classes can avoid making additional
deepcopy's of our containers if they already have a memorized copy
(which would be odd since unique instances of RingBuilders aren't
expected to share state, but hey - python doesn't have private
attributes so you never know!)
Change-Id: Ifac444dffbf79d650b2d858f6282e05d8ea741a0
---
swift/common/ring/builder.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py
index c0a37f8a54..e51ef71c9f 100644
--- a/swift/common/ring/builder.py
+++ b/swift/common/ring/builder.py
@@ -181,9 +181,7 @@ class RingBuilder(object):
dev.setdefault("region", 1)
def __deepcopy__(self, memo):
- the_copy = type(self).from_dict(deepcopy(self.to_dict(), memo))
- memo[id(self)] = the_copy
- return the_copy
+ return type(self).from_dict(deepcopy(self.to_dict(), memo))
def to_dict(self):
"""
From 8cdf0fdebe9eb782322fccfc11253dc959cf321d Mon Sep 17 00:00:00 2001
From: Clay Gerrard
Date: Mon, 27 Apr 2015 13:29:50 -0700
Subject: [PATCH 18/98] Fix account replication during pre-storage-policy
upgrade
Old account schemas don't send the storage_policy_index key for container rows
during replication, and if the recieving end is already running an upgraded
server it is surprised with a KeyError. Normally this would work itself out
if the old schema recieved any updates from container layer, or a new
container is created, or requires a row sync from another account database -
but if the account databases have rows out of sync and there's no activity in
the account otherwise, there's nothing to force the old schemas to be
upgraded.
Rather than force the old schema that already has a complete set of container
rows to migrate even in the absense of activity we can just fill in default
legacy value for the storage policy index and allow the accounts to get back
in sync and migrate the next time a container update occurs.
FWIW, I never able to get a cluster upgrade to get stuck in this state without
some sort of account failure that forced them to get their rows out of sync
(in my cause I just unlinked a pending and then made sure to force all my
account datbases to commit pending files before upgrading - leading to an
upgraded cluster that absolutly needed account-replication to solve a row
mismatch for inactive accounts with old schemas)
Closes-Bug #1424108
Change-Id: Iaf4ef834eb24f0e11a52cc22b93a864574fabf83
---
swift/account/backend.py | 1 +
test/unit/account/test_backend.py | 41 ++++++++++++++++++++++++++++++-
2 files changed, 41 insertions(+), 1 deletion(-)
diff --git a/swift/account/backend.py b/swift/account/backend.py
index 3ff42518d2..ec28394626 100644
--- a/swift/account/backend.py
+++ b/swift/account/backend.py
@@ -460,6 +460,7 @@ class AccountBroker(DatabaseBroker):
max_rowid = -1
curs = conn.cursor()
for rec in item_list:
+ rec.setdefault('storage_policy_index', 0) # legacy
record = [rec['name'], rec['put_timestamp'],
rec['delete_timestamp'], rec['object_count'],
rec['bytes_used'], rec['deleted'],
diff --git a/test/unit/account/test_backend.py b/test/unit/account/test_backend.py
index d231fea741..d262689e87 100644
--- a/test/unit/account/test_backend.py
+++ b/test/unit/account/test_backend.py
@@ -32,7 +32,7 @@ import random
from swift.account.backend import AccountBroker
from swift.common.utils import Timestamp
-from test.unit import patch_policies, with_tempdir
+from test.unit import patch_policies, with_tempdir, make_timestamp_iter
from swift.common.db import DatabaseConnectionError
from swift.common.storage_policy import StoragePolicy, POLICIES
@@ -1120,6 +1120,45 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker):
conn.execute('SELECT * FROM policy_stat')
conn.execute('SELECT storage_policy_index FROM container')
+ @with_tempdir
+ def test_pre_storage_policy_replication(self, tempdir):
+ ts = make_timestamp_iter()
+
+ # make and two account database "replicas"
+ old_broker = AccountBroker(os.path.join(tempdir, 'old_account.db'),
+ account='a')
+ old_broker.initialize(ts.next().internal)
+ new_broker = AccountBroker(os.path.join(tempdir, 'new_account.db'),
+ account='a')
+ new_broker.initialize(ts.next().internal)
+
+ # manually insert an existing row to avoid migration for old database
+ with old_broker.get() as conn:
+ conn.execute('''
+ INSERT INTO container (name, put_timestamp,
+ delete_timestamp, object_count, bytes_used,
+ deleted)
+ VALUES (?, ?, ?, ?, ?, ?)
+ ''', ('test_name', ts.next().internal, 0, 1, 2, 0))
+ conn.commit()
+
+ # get replication info and rows form old database
+ info = old_broker.get_info()
+ rows = old_broker.get_items_since(0, 10)
+
+ # "send" replication rows to new database
+ new_broker.merge_items(rows, info['id'])
+
+ # make sure "test_name" container in new database
+ self.assertEqual(new_broker.get_info()['container_count'], 1)
+ for c in new_broker.list_containers_iter(1, None, None, None, None):
+ self.assertEqual(c, ('test_name', 1, 2, 0))
+
+ # full migration successful
+ with new_broker.get() as conn:
+ conn.execute('SELECT * FROM policy_stat')
+ conn.execute('SELECT storage_policy_index FROM container')
+
def pre_track_containers_create_policy_stat(self, conn):
"""
From dbb9d4b7938f24ee588d260c9b51ca65d5095749 Mon Sep 17 00:00:00 2001
From: Christian Schwede
Date: Mon, 27 Apr 2015 08:52:18 +0200
Subject: [PATCH 19/98] Add missing docstring in direct_client
Added a missing docstring to the direct_delete_container method.
Also checked other docstrings in the same file and fixed a wrong docstring
element in direct_delete_object. Added raises: docstring entry to all methods
that raise an Exception.
Change-Id: If463a0f9ddff3fe2d13f6d97fcfa955e91d0f01f
---
swift/common/direct_client.py | 21 +++++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py
index 35ca24a64c..c95bc44128 100644
--- a/swift/common/direct_client.py
+++ b/swift/common/direct_client.py
@@ -153,6 +153,7 @@ def direct_head_container(node, part, account, container, conn_timeout=5,
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers in a HeaderKeyDict
+ :raises ClientException: HTTP HEAD request failed
"""
path = '/%s/%s' % (account, container)
with Timeout(conn_timeout):
@@ -200,6 +201,18 @@ def direct_get_container(node, part, account, container, marker=None,
def direct_delete_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None):
+ """
+ Delete container directly from the container server.
+
+ :param node: node dictionary from the ring
+ :param part: partition the container is on
+ :param account: account name
+ :param container: container name
+ :param conn_timeout: timeout in seconds for establishing the connection
+ :param response_timeout: timeout in seconds for getting the response
+ :param headers: dict to be passed into HTTPConnection headers
+ :raises ClientException: HTTP DELETE request failed
+ """
if headers is None:
headers = {}
@@ -274,6 +287,7 @@ def direct_head_object(node, part, account, container, obj, conn_timeout=5,
:param response_timeout: timeout in seconds for getting the response
:param headers: dict to be passed into HTTPConnection headers
:returns: a dict containing the response's headers in a HeaderKeyDict
+ :raises ClientException: HTTP HEAD request failed
"""
if headers is None:
headers = {}
@@ -312,6 +326,7 @@ def direct_get_object(node, part, account, container, obj, conn_timeout=5,
:param headers: dict to be passed into HTTPConnection headers
:returns: a tuple of (response headers, the object's contents) The response
headers will be a HeaderKeyDict.
+ :raises ClientException: HTTP GET request failed
"""
if headers is None:
headers = {}
@@ -363,6 +378,7 @@ def direct_put_object(node, part, account, container, name, contents,
:param response_timeout: timeout in seconds for getting the response
:param chunk_size: if defined, chunk size of data to send.
:returns: etag from the server response
+ :raises ClientException: HTTP PUT request failed
"""
path = '/%s/%s/%s' % (account, container, name)
@@ -462,7 +478,7 @@ def direct_delete_object(node, part, account, container, obj,
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
- :returns: response from server
+ :raises ClientException: HTTP DELETE request failed
"""
if headers is None:
headers = {}
@@ -493,7 +509,8 @@ def retry(func, *args, **kwargs):
:param kwargs: keyward arguments to send to func (if retries or
error_log are sent, they will be deleted from kwargs
before sending on to func)
- :returns: restult of func
+ :returns: result of func
+ :raises ClientException: all retries failed
"""
retries = 5
if 'retries' in kwargs:
From bfbc94c3cb34eb9ff288fb817dee667cc870d9eb Mon Sep 17 00:00:00 2001
From: Alistair Coles
Date: Tue, 28 Apr 2015 10:45:50 +0100
Subject: [PATCH 20/98] Fix intermittent container replicator test failure
Intermittent failure of this test could be due to
insufficient time elapsing between either the local
and remote db's being created or during the
debug_timing calls. This patch forces greater timestamp
separation and forces debug_timing to always log timings.
Also add message to the failign assertion so if this does
fail again we get some clue as to why.
Closes-Bug: 1369663
Change-Id: I4b69b2e759d586a14abd0931a68dbdf256d57c32
---
test/unit/container/test_replicator.py | 25 +++++++++++++++----------
1 file changed, 15 insertions(+), 10 deletions(-)
diff --git a/test/unit/container/test_replicator.py b/test/unit/container/test_replicator.py
index 399bb8bb19..49fea253cc 100644
--- a/test/unit/container/test_replicator.py
+++ b/test/unit/container/test_replicator.py
@@ -30,7 +30,7 @@ from swift.common.utils import Timestamp
from swift.common.storage_policy import POLICIES
from test.unit.common import test_db_replicator
-from test.unit import patch_policies
+from test.unit import patch_policies, make_timestamp_iter
from contextlib import contextmanager
@@ -99,24 +99,26 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
self.assertEqual(1, daemon.stats['no_change'])
def test_sync_remote_with_timings(self):
+ ts_iter = make_timestamp_iter()
# setup a local container
broker = self._get_broker('a', 'c', node_index=0)
- put_timestamp = time.time()
- broker.initialize(put_timestamp, POLICIES.default.idx)
+ put_timestamp = ts_iter.next()
+ broker.initialize(put_timestamp.internal, POLICIES.default.idx)
broker.update_metadata(
- {'x-container-meta-test': ('foo', put_timestamp)})
+ {'x-container-meta-test': ('foo', put_timestamp.internal)})
# setup remote container
remote_broker = self._get_broker('a', 'c', node_index=1)
- remote_broker.initialize(time.time(), POLICIES.default.idx)
- timestamp = time.time()
+ remote_broker.initialize(ts_iter.next().internal, POLICIES.default.idx)
+ timestamp = ts_iter.next()
for db in (broker, remote_broker):
- db.put_object('/a/c/o', timestamp, 0, 'content-type', 'etag',
- storage_policy_index=db.storage_policy_index)
+ db.put_object(
+ '/a/c/o', timestamp.internal, 0, 'content-type', 'etag',
+ storage_policy_index=db.storage_policy_index)
# replicate
daemon = replicator.ContainerReplicator({})
part, node = self._get_broker_part_node(remote_broker)
info = broker.get_replication_info()
- with mock.patch.object(db_replicator, 'DEBUG_TIMINGS_THRESHOLD', 0):
+ with mock.patch.object(db_replicator, 'DEBUG_TIMINGS_THRESHOLD', -1):
success = daemon._repl_to_node(node, broker, part, info)
# nothing to do
self.assertTrue(success)
@@ -124,7 +126,10 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync):
expected_timings = ('info', 'update_metadata', 'merge_timestamps',
'get_sync', 'merge_syncs')
debug_lines = self.rpc.logger.logger.get_lines_for_level('debug')
- self.assertEqual(len(expected_timings), len(debug_lines))
+ self.assertEqual(len(expected_timings), len(debug_lines),
+ 'Expected %s debug lines but only got %s: %s' %
+ (len(expected_timings), len(debug_lines),
+ debug_lines))
for metric in expected_timings:
expected = 'replicator-rpc-sync time for %s:' % metric
self.assert_(any(expected in line for line in debug_lines),
From 03536dbb55c219b94389e635babec2df2e2759fa Mon Sep 17 00:00:00 2001
From: Prashanth Pai
Date: Wed, 29 Apr 2015 12:11:59 +0530
Subject: [PATCH 21/98] Fix incorrect passing of file object to fsync()
swift.common.utils.fsync() requires a file descriptor as argument
but file object handle was being passed.
Change-Id: I316b58f6bc37de0945eff551e4e50565653664f5
Signed-off-by: Prashanth Pai
---
swift/obj/diskfile.py | 4 ++--
test/unit/obj/test_diskfile.py | 3 +--
2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py
index 39eff67bde..3920315551 100644
--- a/swift/obj/diskfile.py
+++ b/swift/obj/diskfile.py
@@ -1786,8 +1786,8 @@ class ECDiskFileWriter(DiskFileWriter):
def _finalize_durable(self, durable_file_path):
exc = msg = None
try:
- with open(durable_file_path, 'w') as _fd:
- fsync(_fd)
+ with open(durable_file_path, 'w') as _fp:
+ fsync(_fp.fileno())
try:
self.manager.hash_cleanup_listdir(self._datadir)
except OSError:
diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py
index 2ccf3b1364..a84cafc8b4 100644
--- a/test/unit/obj/test_diskfile.py
+++ b/test/unit/obj/test_diskfile.py
@@ -2650,8 +2650,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
}[policy.policy_type]
self.assertEqual(expected, mock_fsync.call_count)
if policy.policy_type == EC_POLICY:
- durable_file = '%s.durable' % timestamp.internal
- self.assertTrue(durable_file in str(mock_fsync.call_args[0]))
+ self.assertTrue(isinstance(mock_fsync.call_args[0][0], int))
def test_commit_ignores_hash_cleanup_listdir_error(self):
for policy in POLICIES:
From 94215049fd37f810ddf4e6b8122cce02aea3e6e3 Mon Sep 17 00:00:00 2001
From: Samuel Merritt
Date: Mon, 4 May 2015 15:08:51 -0700
Subject: [PATCH 22/98] Bump up a timeout in a test
Got a slow crappy VM like I do? You might see this fail
occasionally. Bump up the timeout a little to help it out.
Change-Id: I8c0e5b99012830ea3525fa55b0811268db3da2a2
---
test/unit/common/test_utils.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py
index 22aa3db5e1..48610c1a7b 100644
--- a/test/unit/common/test_utils.py
+++ b/test/unit/common/test_utils.py
@@ -4482,7 +4482,7 @@ class TestGreenAsyncPile(unittest.TestCase):
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
- self.assertEqual(pile.waitall(0.2), [0.1])
+ self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
From c77c79b2c9b9ca7790c29577341cf36c5e9012cf Mon Sep 17 00:00:00 2001
From: OpenStack Proposal Bot
Date: Tue, 5 May 2015 06:08:09 +0000
Subject: [PATCH 23/98] Imported Translations from Transifex
For more information about this automatic import see:
https://wiki.openstack.org/wiki/Translations/Infrastructure
Change-Id: I9b0156c7fc315182d80604bf353586455bbc34d1
---
swift/locale/zh_CN/LC_MESSAGES/swift.po | 523 +++---------------------
1 file changed, 61 insertions(+), 462 deletions(-)
diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po
index 36f2767712..1352c93f83 100644
--- a/swift/locale/zh_CN/LC_MESSAGES/swift.po
+++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po
@@ -8,283 +8,226 @@ msgid ""
msgstr ""
"Project-Id-Version: Swift\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-04-16 06:06+0000\n"
+"POT-Creation-Date: 2015-05-05 06:08+0000\n"
"PO-Revision-Date: 2015-04-15 12:48+0000\n"
"Last-Translator: openstackjenkins \n"
-"Language-Team: Chinese (China) "
-"(http://www.transifex.com/projects/p/swift/language/zh_CN/)\n"
+"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/swift/"
+"language/zh_CN/)\n"
"Plural-Forms: nplurals=1; plural=0\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 1.3\n"
-#: swift/account/auditor.py:59
#, python-format
msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed"
-" audit"
+"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
+"audit"
msgstr "自%(time)s开始:账号审计:%(passed)s 通过审计,%(failed)s 失败"
-#: swift/account/auditor.py:82
msgid "Begin account audit pass."
msgstr "开始账号审计通过"
-#: swift/account/auditor.py:88 swift/container/auditor.py:86
msgid "ERROR auditing"
msgstr "错误 审计"
-#: swift/account/auditor.py:93
#, python-format
msgid "Account audit pass completed: %.02fs"
msgstr "账号审计完成:%.02fs"
-#: swift/account/auditor.py:99
msgid "Begin account audit \"once\" mode"
msgstr "开始账号审计\"once\"模式"
-#: swift/account/auditor.py:104
#, python-format
msgid "Account audit \"once\" mode completed: %.02fs"
msgstr "账号审计\"once\"模式完成: %.02fs"
-#: swift/account/auditor.py:123
#, python-format
msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of"
-" %(key)s across policies (%(sum)s)"
+"The total %(key)s for the container (%(total)s) does not match the sum of "
+"%(key)s across policies (%(sum)s)"
msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)"
-#: swift/account/auditor.py:149
#, python-format
msgid "Audit Failed for %s: %s"
msgstr "审计失败%s: %s"
-#: swift/account/auditor.py:153
#, python-format
msgid "ERROR Could not get account info %s"
msgstr "错误:无法获取账号信息%s"
-#: swift/account/reaper.py:134 swift/common/utils.py:2127
-#: swift/obj/diskfile.py:476 swift/obj/updater.py:88 swift/obj/updater.py:131
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "挂载失败 跳过%s"
-#: swift/account/reaper.py:138
msgid "Exception in top-level account reaper loop"
msgstr "异常出现在top-level账号reaper环"
-#: swift/account/reaper.py:141
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "设备通过完成: %.02fs"
-#: swift/account/reaper.py:238
#, python-format
msgid "Beginning pass on account %s"
msgstr "账号%s开始通过"
-#: swift/account/reaper.py:255
#, python-format
msgid "Exception with containers for account %s"
msgstr "账号%s内容器出现异常"
-#: swift/account/reaper.py:262
#, python-format
msgid "Exception with account %s"
msgstr "账号%s出现异常"
-#: swift/account/reaper.py:263
#, python-format
msgid "Incomplete pass on account %s"
msgstr "账号%s未完成通过"
-#: swift/account/reaper.py:265
#, python-format
msgid ", %s containers deleted"
msgstr ",删除容器%s"
-#: swift/account/reaper.py:267
#, python-format
msgid ", %s objects deleted"
msgstr ",删除对象%s"
-#: swift/account/reaper.py:269
#, python-format
msgid ", %s containers remaining"
msgstr ",剩余容器%s"
-#: swift/account/reaper.py:272
#, python-format
msgid ", %s objects remaining"
msgstr ",剩余对象%s"
-#: swift/account/reaper.py:274
#, python-format
msgid ", %s containers possibly remaining"
msgstr ",可能剩余容器%s"
-#: swift/account/reaper.py:277
#, python-format
msgid ", %s objects possibly remaining"
msgstr ",可能剩余对象%s"
-#: swift/account/reaper.py:280
msgid ", return codes: "
msgstr ",返回代码:"
-#: swift/account/reaper.py:284
#, python-format
msgid ", elapsed: %.02fs"
msgstr ",耗时:%.02fs"
-#: swift/account/reaper.py:290
#, python-format
msgid "Account %s has not been reaped since %s"
msgstr "账号%s自%s起未被reaped"
-#: swift/account/reaper.py:349 swift/account/reaper.py:397
-#: swift/account/reaper.py:464 swift/container/updater.py:306
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s出现异常"
-#: swift/account/reaper.py:369
#, python-format
-msgid "Exception with objects for container %(container)s for account %(account)s"
+msgid ""
+"Exception with objects for container %(container)s for account %(account)s"
msgstr "账号%(account)s容器%(container)s的对象出现异常"
-#: swift/account/server.py:275 swift/container/server.py:582
-#: swift/obj/server.py:910
#, python-format
msgid "ERROR __call__ error with %(method)s %(path)s "
msgstr "%(method)s %(path)s出现错误__call__ error"
-#: swift/common/bufferedhttp.py:157
#, python-format
msgid "Error encoding to UTF-8: %s"
msgstr "UTF-8编码错误:%s"
-#: swift/common/container_sync_realms.py:59
-#: swift/common/container_sync_realms.py:68
#, python-format
msgid "Could not load %r: %s"
msgstr "无法下载%r: %s"
-#: swift/common/container_sync_realms.py:81
#, python-format
msgid "Error in %r with mtime_check_interval: %s"
msgstr "%r中mtime_check_interval出现错误:%s"
-#: swift/common/db.py:347
#, python-format
msgid "Quarantined %s to %s due to %s database"
msgstr "隔离%s和%s 因为%s数据库"
-#: swift/common/db.py:402
msgid "Broker error trying to rollback locked connection"
msgstr "服务器错误并尝试去回滚已经锁住的链接"
-#: swift/common/db.py:605
#, python-format
msgid "Invalid pending entry %(file)s: %(entry)s"
msgstr "不可用的等待输入%(file)s: %(entry)s"
-#: swift/common/db_replicator.py:143
#, python-format
msgid "ERROR reading HTTP response from %s"
msgstr "读取HTTP错误 响应来源%s"
-#: swift/common/db_replicator.py:193
#, python-format
-msgid "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
+msgid ""
+"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
msgstr "%(time).5f seconds (%(rate).5f/s)尝试复制%(count)d dbs"
-#: swift/common/db_replicator.py:199
#, python-format
msgid "Removed %(remove)d dbs"
msgstr "删除%(remove)d dbs"
-#: swift/common/db_replicator.py:200
#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s成功,%(failure)s失败"
-#: swift/common/db_replicator.py:231
#, python-format
msgid "ERROR rsync failed with %(code)s: %(args)s"
msgstr "错误 rsync失败 %(code)s: %(args)s"
-#: swift/common/db_replicator.py:294
#, python-format
msgid "ERROR Bad response %(status)s from %(host)s"
msgstr "失败响应错误%(status)s来自%(host)s"
-#: swift/common/db_replicator.py:453 swift/common/db_replicator.py:678
#, python-format
msgid "Quarantining DB %s"
msgstr "隔离DB%s"
-#: swift/common/db_replicator.py:456
#, python-format
msgid "ERROR reading db %s"
msgstr "错误 读取db %s"
-#: swift/common/db_replicator.py:487
#, python-format
msgid "ERROR Remote drive not mounted %s"
msgstr "错误 远程驱动器无法挂载 %s"
-#: swift/common/db_replicator.py:489
#, python-format
msgid "ERROR syncing %(file)s with node %(node)s"
msgstr "错误 同步 %(file)s 和 节点%(node)s"
-#: swift/common/db_replicator.py:517
#, python-format
msgid "ERROR while trying to clean up %s"
msgstr "清理时出现错误%s"
-#: swift/common/db_replicator.py:543
msgid "ERROR Failed to get my own IPs?"
msgstr "错误 无法获得我方IPs?"
-#: swift/common/db_replicator.py:553
#, python-format
msgid "Skipping %(device)s as it is not mounted"
msgstr "因无法挂载跳过%(device)s"
-#: swift/common/db_replicator.py:562
msgid "Beginning replication run"
msgstr "开始运行复制"
-#: swift/common/db_replicator.py:567
msgid "Replication run OVER"
msgstr "复制运行结束"
-#: swift/common/db_replicator.py:580
msgid "ERROR trying to replicate"
msgstr "尝试复制时发生错误"
-#: swift/common/internal_client.py:193
#, python-format
msgid "Unexpected response: %s"
msgstr "意外响应:%s"
-#: swift/common/manager.py:63
msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?"
msgstr "警告:无法修改文件描述限制。是否按非root运行?"
-#: swift/common/manager.py:70
msgid "WARNING: Unable to modify memory limit. Running as non-root?"
msgstr "警告:无法修改内存极限,是否按非root运行?"
-#: swift/common/manager.py:77
msgid "WARNING: Unable to modify max process limit. Running as non-root?"
msgstr "警告:无法修改最大运行极限,是否按非root运行?"
-#: swift/common/manager.py:195
msgid ""
"\n"
"user quit"
@@ -292,239 +235,164 @@ msgstr ""
"\n"
"用户退出"
-#: swift/common/manager.py:232 swift/common/manager.py:547
#, python-format
msgid "No %s running"
msgstr "无%s账号运行"
-#: swift/common/manager.py:245
#, python-format
msgid "%s (%s) appears to have stopped"
msgstr "%s (%s)显示已停止"
-#: swift/common/manager.py:255
#, python-format
msgid "Waited %s seconds for %s to die; giving up"
msgstr "等待%s秒直到%s停止;放弃"
-#: swift/common/manager.py:439
-#, python-format
-msgid "Unable to locate config number %s for %s"
-msgstr ""
-
-#: swift/common/manager.py:442
-#, python-format
-msgid "Unable to locate config for %s"
-msgstr ""
-
-#: swift/common/manager.py:445
msgid "Found configs:"
msgstr "找到配置"
-#: swift/common/manager.py:489
#, python-format
msgid "Signal %s pid: %s signal: %s"
msgstr "发出信号%s pid: %s 信号: %s"
-#: swift/common/manager.py:496
#, python-format
msgid "Removing stale pid file %s"
msgstr "移除原有pid文件%s"
-#: swift/common/manager.py:499
#, python-format
msgid "No permission to signal PID %d"
msgstr "无权限发送信号PID%d"
-#: swift/common/manager.py:544
#, python-format
msgid "%s #%d not running (%s)"
msgstr "%s #%d无法运行(%s)"
-#: swift/common/manager.py:551 swift/common/manager.py:644
-#: swift/common/manager.py:647
#, python-format
msgid "%s running (%s - %s)"
msgstr "%s运行(%s - %s)"
-#: swift/common/manager.py:650
#, python-format
msgid "%s already started..."
msgstr "%s已启动..."
-#: swift/common/manager.py:659
#, python-format
msgid "Running %s once"
msgstr "运行%s一次"
-#: swift/common/manager.py:661
#, python-format
msgid "Starting %s"
msgstr "启动%s"
-#: swift/common/manager.py:668
#, python-format
msgid "%s does not exist"
msgstr "%s不存在"
-#: swift/common/memcached.py:191
#, python-format
msgid "Timeout %(action)s to memcached: %(server)s"
msgstr "%(action)s超时 高性能内存对象缓存: %(server)s"
-#: swift/common/memcached.py:194
#, python-format
msgid "Error %(action)s to memcached: %(server)s"
msgstr "%(action)s错误 高性能内存对象缓存: %(server)s"
-#: swift/common/memcached.py:219
#, python-format
msgid "Error limiting server %s"
msgstr "服务器出现错误%s "
-#: swift/common/request_helpers.py:102
-#, python-format
-msgid "No policy with index %s"
-msgstr ""
-
-#: swift/common/request_helpers.py:395
-msgid "ERROR: An error occurred while retrieving segments"
-msgstr ""
-
-#: swift/common/utils.py:388
#, python-format
msgid "Unable to locate %s in libc. Leaving as a no-op."
msgstr "无法查询到%s 保留为no-op"
-#: swift/common/utils.py:578
-msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
+msgid ""
+"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
msgstr "无法查询到fallocate, posix_fallocate。保存为no-op"
-#: swift/common/utils.py:662
-#, python-format
-msgid "Unable to perform fsync() on directory %s: %s"
-msgstr ""
-
-#: swift/common/utils.py:1074
-#, python-format
-msgid "%s: Connection reset by peer"
-msgstr ""
-
-#: swift/common/utils.py:1076 swift/common/utils.py:1079
-#, python-format
-msgid "%s: %s"
-msgstr ""
-
-#: swift/common/utils.py:1314
msgid "Connection refused"
msgstr "连接被拒绝"
-#: swift/common/utils.py:1316
msgid "Host unreachable"
msgstr "无法连接到主机"
-#: swift/common/utils.py:1318
msgid "Connection timeout"
msgstr "连接超时"
-#: swift/common/utils.py:1620
msgid "UNCAUGHT EXCEPTION"
msgstr "未捕获的异常"
-#: swift/common/utils.py:1675
msgid "Error: missing config path argument"
msgstr "错误:设置路径信息丢失"
-#: swift/common/utils.py:1680
#, python-format
msgid "Error: unable to locate %s"
msgstr "错误:无法查询到 %s"
-#: swift/common/utils.py:1988
#, python-format
msgid "Unable to read config from %s"
msgstr "无法从%s读取设置"
-#: swift/common/utils.py:1994
#, python-format
msgid "Unable to find %s config section in %s"
msgstr "无法在%s中查找到%s设置部分"
-#: swift/common/utils.py:2353
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "无效的X-Container-Sync-To格式%r"
-#: swift/common/utils.py:2358
#, python-format
msgid "No realm key for %r"
msgstr "%r权限key不存在"
-#: swift/common/utils.py:2362
#, python-format
msgid "No cluster endpoint for %r %r"
msgstr "%r %r的集群节点不存在"
-#: swift/common/utils.py:2371
#, python-format
msgid ""
"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
"\"https\"."
-msgstr "在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。"
+msgstr ""
+"在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。"
-#: swift/common/utils.py:2375
msgid "Path required in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中路径是必须的"
-#: swift/common/utils.py:2378
msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
msgstr "在X-Container-Sync-To中,变量,查询和碎片不被允许"
-#: swift/common/utils.py:2383
#, python-format
msgid "Invalid host %r in X-Container-Sync-To"
msgstr "X-Container-Sync-To中无效主机%r"
-#: swift/common/utils.py:2575
msgid "Exception dumping recon cache"
msgstr "执行dump recon的时候出现异常"
-#: swift/common/wsgi.py:197
#, python-format
msgid "Could not bind to %s:%s after trying for %s seconds"
msgstr "尝试过%s秒后无法捆绑%s:%s"
-#: swift/common/wsgi.py:207
msgid ""
-"WARNING: SSL should only be enabled for testing purposes. Use external "
-"SSL termination for a production deployment."
+"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
+"termination for a production deployment."
msgstr "警告:SSL仅可以做测试使用。产品部署时请使用外连SSL终端"
-#: swift/common/middleware/catch_errors.py:43
msgid "Error: An error occurred"
msgstr "错误:一个错误发生了"
-#: swift/common/middleware/cname_lookup.py:144
#, python-format
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "集合%(given_domain)s到%(found_domain)s"
-#: swift/common/middleware/cname_lookup.py:156
#, python-format
msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
msgstr "跟随CNAME链从%(given_domain)s到%(found_domain)s"
-#: swift/common/middleware/ratelimit.py:248
#, python-format
msgid "Returning 497 because of blacklisting: %s"
msgstr "返回497因为黑名单:%s"
-#: swift/common/middleware/ratelimit.py:263
#, python-format
msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s"
msgstr "流量控制休眠日志:%(sleep)s for %(account)s/%(container)s/%(object)s"
-#: swift/common/middleware/ratelimit.py:271
#, python-format
msgid ""
"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
@@ -533,538 +401,348 @@ msgstr ""
"返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,流量控制(Max \"\n"
"\"Sleep) %(e)s"
-#: swift/common/middleware/ratelimit.py:293
msgid "Warning: Cannot ratelimit without a memcached client"
msgstr "警告:缺失缓存客户端 无法控制流量 "
-#: swift/common/middleware/recon.py:80
msgid "Error reading recon cache file"
msgstr "读取recon cache file时出现错误"
-#: swift/common/middleware/recon.py:82
msgid "Error parsing recon cache file"
msgstr "解析recon cache file时出现错误"
-#: swift/common/middleware/recon.py:84
msgid "Error retrieving recon data"
msgstr "检索recon data时出现错误"
-#: swift/common/middleware/recon.py:158
msgid "Error listing devices"
msgstr "设备列表时出现错误"
-#: swift/common/middleware/recon.py:254
msgid "Error reading ringfile"
msgstr "读取ringfile时出现错误"
-#: swift/common/middleware/recon.py:268
msgid "Error reading swift.conf"
msgstr "读取swift.conf时出现错误"
-#: swift/common/middleware/xprofile.py:243
#, python-format
msgid "Error on render profiling results: %s"
msgstr "给予分析结果时发生错误:%s"
-#: swift/common/middleware/x_profile/exceptions.py:25
#, python-format
msgid "Profiling Error: %s"
msgstr "分析代码时出现错误:%s"
-#: swift/common/middleware/x_profile/html_viewer.py:306
#, python-format
msgid "method %s is not allowed."
msgstr "方法%s不被允许"
-#: swift/common/middleware/x_profile/html_viewer.py:317
#, python-format
msgid "Can not load profile data from %s."
msgstr "无法从%s下载分析数据"
-#: swift/common/middleware/x_profile/html_viewer.py:369
-#: swift/common/middleware/x_profile/html_viewer.py:399
msgid "no log file found"
msgstr "日志文件丢失"
-#: swift/common/middleware/x_profile/html_viewer.py:392
#, python-format
msgid "Data download error: %s"
msgstr "数据下载错误:%s"
-#: swift/common/middleware/x_profile/html_viewer.py:397
msgid "python-matplotlib not installed."
msgstr "python-matplotlib未安装"
-#: swift/common/middleware/x_profile/html_viewer.py:433
#, python-format
msgid "plotting results failed due to %s"
msgstr "绘制结果图标时失败因为%s"
-#: swift/common/middleware/x_profile/html_viewer.py:444
msgid "The file type are forbidden to access!"
msgstr "该文件类型被禁止访问!"
-#: swift/common/middleware/x_profile/html_viewer.py:465
#, python-format
msgid "Can not access the file %s."
msgstr "无法访问文件%s"
-#: swift/common/middleware/x_profile/profile_model.py:128
msgid "odfpy not installed."
msgstr "odfpy未安装"
-#: swift/container/auditor.py:58
#, python-format
msgid ""
"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
"audit"
msgstr "自%(time)s起:容器审计:%(pass)s通过审计, %(fail)s失败"
-#: swift/container/auditor.py:80
msgid "Begin container audit pass."
msgstr "开始通过容器审计"
-#: swift/container/auditor.py:91
#, python-format
msgid "Container audit pass completed: %.02fs"
msgstr "容器审计通过完成: %.02fs"
-#: swift/container/auditor.py:97
msgid "Begin container audit \"once\" mode"
msgstr "开始容器审计\"once\" 模式"
-#: swift/container/auditor.py:102
#, python-format
msgid "Container audit \"once\" mode completed: %.02fs"
msgstr "容器审计\"once\"模式完成:%.02fs"
-#: swift/container/auditor.py:123
#, python-format
msgid "ERROR Could not get container info %s"
msgstr "错误:无法获取容器%s信息"
-#: swift/container/server.py:180
#, python-format
msgid ""
"ERROR Account update failed: different numbers of hosts and devices in "
"request: \"%s\" vs \"%s\""
msgstr "出现错误 账号更新失败:本机数量与设备数量不符: \"%s\" vs \"%s\""
-#: swift/container/server.py:221
#, python-format
msgid ""
"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
"later): Response %(status)s %(reason)s"
-msgstr "出现错误 账号更新失败: %(ip)s:%(port)s/%(device)s (稍后尝试): 回应 %(status)s %(reason)s"
+msgstr ""
+"出现错误 账号更新失败: %(ip)s:%(port)s/%(device)s (稍后尝试): 回应 "
+"%(status)s %(reason)s"
-#: swift/container/server.py:230
#, python-format
msgid ""
"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
"later)"
msgstr "错误 账号更新失败 %(ip)s:%(port)s/%(device)s (稍后尝试)"
-#: swift/container/sync.py:217
-msgid ""
-"Configuration option internal_client_conf_path not defined. Using default"
-" configuration, See internal-client.conf-sample for options"
-msgstr ""
-
-#: swift/container/sync.py:230
-#, python-format
-msgid "Unable to load internal client from config: %r (%s)"
-msgstr ""
-
-#: swift/container/sync.py:264
msgid "Begin container sync \"once\" mode"
msgstr "开始容器同步\"once\"模式"
-#: swift/container/sync.py:276
#, python-format
msgid "Container sync \"once\" mode completed: %.02fs"
msgstr "容器同步\"once\"模式完成:%.02fs"
-#: swift/container/sync.py:284
#, python-format
msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], "
-"%(skip)s skipped, %(fail)s failed"
+"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
+"skipped, %(fail)s failed"
msgstr ""
"自%(time)s起:%(sync)s完成同步 [%(delete)s 删除, %(put)s 上传], \"\n"
"\"%(skip)s 跳过, %(fail)s 失败"
-#: swift/container/sync.py:337
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "错误 %(db_file)s: %(validate_sync_to_err)s"
-#: swift/container/sync.py:393
#, python-format
msgid "ERROR Syncing %s"
msgstr "同步时发生错误%s"
-#: swift/container/sync.py:476
-#, python-format
-msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r"
-msgstr ""
-
-#: swift/container/sync.py:510
#, python-format
msgid "Unauth %(sync_from)r => %(sync_to)r"
msgstr "未授权%(sync_from)r => %(sync_to)r"
-#: swift/container/sync.py:516
#, python-format
msgid ""
"Not found %(sync_from)r => %(sync_to)r - object "
"%(obj_name)r"
msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r"
-#: swift/container/sync.py:523 swift/container/sync.py:530
#, python-format
msgid "ERROR Syncing %(db_file)s %(row)s"
msgstr "同步错误 %(db_file)s %(row)s"
-#: swift/container/updater.py:77
#, python-format
msgid "ERROR: Failed to get paths to drive partitions: %s"
msgstr "%s未挂载"
-#: swift/container/updater.py:91 swift/obj/reconstructor.py:788
-#: swift/obj/replicator.py:487 swift/obj/replicator.py:575
#, python-format
msgid "%s is not mounted"
msgstr "%s未挂载"
-#: swift/container/updater.py:110
#, python-format
msgid "ERROR with loading suppressions from %s: "
msgstr "执行下载压缩时发生错误%s"
-#: swift/container/updater.py:120
msgid "Begin container update sweep"
msgstr "开始容器更新扫除"
-#: swift/container/updater.py:154
#, python-format
msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
+"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
+"successes, %(fail)s failures, %(no_change)s with no changes"
msgstr ""
-"通过路径%(path)s容器更新扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, "
-"%(no_change)s 无更改"
+"通过路径%(path)s容器更新扫除完成:%(elapsed).02fs, %(success)s 成功, "
+"%(fail)s 失败, %(no_change)s 无更改"
-#: swift/container/updater.py:168
#, python-format
msgid "Container update sweep completed: %.02fs"
msgstr "容器更新扫除完成:%.02fs"
-#: swift/container/updater.py:180
msgid "Begin container update single threaded sweep"
msgstr "开始容器更新单线程扫除"
-#: swift/container/updater.py:188
#, python-format
msgid ""
"Container update single threaded sweep completed: %(elapsed).02fs, "
"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
msgstr ""
-"容器更新单线程扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, %(no_change)s "
-"无更改"
+"容器更新单线程扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, "
+"%(no_change)s 无更改"
-#: swift/container/updater.py:243
#, python-format
msgid "Update report sent for %(container)s %(dbfile)s"
msgstr "更新报告发至%(container)s %(dbfile)s"
-#: swift/container/updater.py:252
#, python-format
msgid "Update report failed for %(container)s %(dbfile)s"
msgstr "%(container)s %(dbfile)s更新报告失败"
-#: swift/container/updater.py:294
#, python-format
msgid ""
"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
"later): "
msgstr "错误 账号更新失败%(ip)s:%(port)s/%(device)s (稍后尝试):"
-#: swift/obj/auditor.py:78
#, python-format
msgid " - parallel, %s"
msgstr "-平行,%s"
-#: swift/obj/auditor.py:80
#, python-format
msgid " - %s"
msgstr "- %s"
-#: swift/obj/auditor.py:81
#, python-format
msgid "Begin object audit \"%s\" mode (%s%s)"
msgstr "开始对象审计\\\"%s\\\" 模式 (%s%s)"
-#: swift/obj/auditor.py:100
#, python-format
msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d "
-"passed, %(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f ,"
-" bytes/sec: %(brate).2f, Total time: %(total).2f, Auditing time: "
-"%(audit).2f, Rate: %(audit_rate).2f"
+"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
+"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
+"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
+"%(audit_rate).2f"
msgstr ""
-"对象审计 (%(type)s). 自 %(start_time)s开始: 本地: %(passes)d 通过, %(quars)d 隔离, "
-"%(errors)d 错误 文件/秒: %(frate).2f , bytes/秒: %(brate).2f, 总时间: %(total).2f,"
-" 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
+"对象审计 (%(type)s). 自 %(start_time)s开始: 本地: %(passes)d 通过, %(quars)d "
+"隔离, %(errors)d 错误 文件/秒: %(frate).2f , bytes/秒: %(brate).2f, 总时间: "
+"%(total).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
-#: swift/obj/auditor.py:134
#, python-format
msgid ""
-"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. "
-"Total quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
-"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, "
-"Rate: %(audit_rate).2f"
+"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
+"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: "
+"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: "
+"%(audit_rate).2f"
msgstr ""
-"对象审计 (%(type)s) \\\"%(mode)s\\\"模式完成: %(elapsed).02fs 隔离总数: %(quars)d, "
-"错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: %(brate).2f, 审计时间: "
-"%(audit).2f, 速率: %(audit_rate).2f"
+"对象审计 (%(type)s) \\\"%(mode)s\\\"模式完成: %(elapsed).02fs 隔离总数: "
+"%(quars)d, 错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: "
+"%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
-#: swift/obj/auditor.py:149
#, python-format
msgid "Object audit stats: %s"
msgstr "对象审计统计:%s"
-#: swift/obj/auditor.py:177
#, python-format
msgid "ERROR Trying to audit %s"
msgstr "错误 尝试开始审计%s"
-#: swift/obj/auditor.py:213
#, python-format
msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
msgstr "错误 对象%(obj)s审计失败并被隔离:%(err)s"
-#: swift/obj/auditor.py:263
#, python-format
msgid "ERROR: Unable to run auditing: %s"
msgstr "错误:无法执行审计:%s"
-#: swift/obj/auditor.py:334 swift/obj/auditor.py:355
#, python-format
msgid "ERROR auditing: %s"
msgstr "审计错误:%s"
-#: swift/obj/diskfile.py:323 swift/obj/diskfile.py:2305
#, python-format
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录"
-#: swift/obj/diskfile.py:414 swift/obj/diskfile.py:2373
msgid "Error hashing suffix"
msgstr "执行Hashing后缀时发生错误"
-#: swift/obj/diskfile.py:486 swift/obj/updater.py:162
#, python-format
-msgid "Directory %r does not map to a valid policy (%s)"
-msgstr ""
-
-#: swift/obj/diskfile.py:737
-#, python-format
-msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
+msgid ""
+"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
msgstr "隔离%(object_path)s和%(quar_path)s因为非目录"
-#: swift/obj/diskfile.py:936 swift/obj/diskfile.py:1795
#, python-format
msgid "Problem cleaning up %s"
msgstr "问题清除%s"
-#: swift/obj/diskfile.py:1253
#, python-format
msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s"
-#: swift/obj/diskfile.py:1543
#, python-format
msgid ""
-"Client path %(client)s does not match path stored in object metadata "
-"%(meta)s"
+"Client path %(client)s does not match path stored in object metadata %(meta)s"
msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符"
-#: swift/obj/diskfile.py:1797
-#, python-format
-msgid "Problem fsyncing durable state file: %s"
-msgstr ""
-
-#: swift/obj/diskfile.py:1802
-#, python-format
-msgid "No space left on device for %s"
-msgstr ""
-
-#: swift/obj/diskfile.py:1806
-#, python-format
-msgid "Problem writing durable state file: %s"
-msgstr ""
-
-#: swift/obj/expirer.py:79
#, python-format
msgid "Pass completed in %ds; %d objects expired"
msgstr "%ds通过完成; %d对象过期"
-#: swift/obj/expirer.py:86
#, python-format
msgid "Pass so far %ds; %d objects expired"
msgstr "%ds目前通过;%d对象过期"
-#: swift/obj/expirer.py:170
#, python-format
msgid "Pass beginning; %s possible containers; %s possible objects"
msgstr "开始通过;%s可能容器;%s可能对象"
-#: swift/obj/expirer.py:196
#, python-format
msgid "Exception while deleting container %s %s"
msgstr "执行删除容器时出现异常 %s %s"
-#: swift/obj/expirer.py:201 swift/obj/expirer.py:218
msgid "Unhandled exception"
msgstr "未处理的异常"
-#: swift/obj/expirer.py:268
#, python-format
msgid "Exception while deleting object %s %s %s"
msgstr "执行删除对象时发生异常%s %s %s"
-#: swift/obj/reconstructor.py:189 swift/obj/reconstructor.py:472
-#, python-format
-msgid "Invalid response %(resp)s from %(full_path)s"
-msgstr ""
-
-#: swift/obj/reconstructor.py:195
-#, python-format
-msgid "Trying to GET %(full_path)s"
-msgstr ""
-
-#: swift/obj/reconstructor.py:301
-#, python-format
-msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s"
-msgstr ""
-
-#: swift/obj/reconstructor.py:324
#, python-format
msgid ""
-"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed"
-" in %(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-
-#: swift/obj/reconstructor.py:337 swift/obj/replicator.py:419
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% "
-"synced"
+"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced"
-#: swift/obj/reconstructor.py:344 swift/obj/replicator.py:426
#, python-format
msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-#: swift/obj/reconstructor.py:352
-#, python-format
-msgid "Nothing reconstructed for %s seconds."
-msgstr ""
-
-#: swift/obj/reconstructor.py:381 swift/obj/replicator.py:463
msgid "Lockup detected.. killing live coros."
msgstr "检测到lockup。终止正在执行的coros"
-#: swift/obj/reconstructor.py:442
-#, python-format
-msgid "Trying to sync suffixes with %s"
-msgstr ""
-
-#: swift/obj/reconstructor.py:467
-#, python-format
-msgid "%s responded as unmounted"
-msgstr ""
-
-#: swift/obj/reconstructor.py:849 swift/obj/replicator.py:295
#, python-format
msgid "Removing partition: %s"
msgstr "移除分区:%s"
-#: swift/obj/reconstructor.py:865
-msgid "Ring change detected. Aborting current reconstruction pass."
-msgstr ""
-
-#: swift/obj/reconstructor.py:884
-msgid "Exception in top-levelreconstruction loop"
-msgstr ""
-
-#: swift/obj/reconstructor.py:894
-msgid "Running object reconstructor in script mode."
-msgstr ""
-
-#: swift/obj/reconstructor.py:903
-#, python-format
-msgid "Object reconstruction complete (once). (%.02f minutes)"
-msgstr ""
-
-#: swift/obj/reconstructor.py:910
-msgid "Starting object reconstructor in daemon mode."
-msgstr ""
-
-#: swift/obj/reconstructor.py:914
-msgid "Starting object reconstruction pass."
-msgstr ""
-
-#: swift/obj/reconstructor.py:919
-#, python-format
-msgid "Object reconstruction complete. (%.02f minutes)"
-msgstr ""
-
-#: swift/obj/replicator.py:139
#, python-format
msgid "Killing long-running rsync: %s"
msgstr "终止long-running同步: %s"
-#: swift/obj/replicator.py:153
#, python-format
msgid "Bad rsync return code: %(ret)d <- %(args)s"
msgstr "Bad rsync返还代码:%(ret)d <- %(args)s"
-#: swift/obj/replicator.py:160 swift/obj/replicator.py:164
#, python-format
msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)"
-#: swift/obj/replicator.py:281
-#, python-format
-msgid "Removing %s objects"
-msgstr ""
-
-#: swift/obj/replicator.py:289
msgid "Error syncing handoff partition"
msgstr "执行同步切换分区时发生错误"
-#: swift/obj/replicator.py:351
#, python-format
msgid "%(ip)s/%(device)s responded as unmounted"
msgstr "%(ip)s/%(device)s的回应为未挂载"
-#: swift/obj/replicator.py:356
#, python-format
msgid "Invalid response %(resp)s from %(ip)s"
msgstr "无效的回应%(resp)s来自%(ip)s"
-#: swift/obj/replicator.py:391
#, python-format
msgid "Error syncing with node: %s"
msgstr "执行同步时节点%s发生错误"
-#: swift/obj/replicator.py:395
msgid "Error syncing partition"
msgstr "执行同步分区时发生错误"
-#: swift/obj/replicator.py:408
#, python-format
msgid ""
"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
@@ -1073,256 +751,177 @@ msgstr ""
"%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n"
"\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-#: swift/obj/replicator.py:434
#, python-format
msgid "Nothing replicated for %s seconds."
msgstr "%s秒无复制"
-#: swift/obj/replicator.py:578
msgid "Ring change detected. Aborting current replication pass."
msgstr "Ring改变被检测到。退出现有的复制通过"
-#: swift/obj/replicator.py:599
msgid "Exception in top-level replication loop"
msgstr "top-level复制圈出现异常"
-#: swift/obj/replicator.py:608
msgid "Running object replicator in script mode."
msgstr "在加密模式下执行对象复制"
-#: swift/obj/replicator.py:626
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "对象复制完成(一次)。(%.02f minutes)"
-#: swift/obj/replicator.py:633
msgid "Starting object replicator in daemon mode."
msgstr "在守护模式下开始对象复制"
-#: swift/obj/replicator.py:637
msgid "Starting object replication pass."
msgstr "开始通过对象复制"
-#: swift/obj/replicator.py:642
#, python-format
msgid "Object replication complete. (%.02f minutes)"
msgstr "对象复制完成。(%.02f minutes)"
-#: swift/obj/server.py:231
#, python-format
msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d"
-" response from %(ip)s:%(port)s/%(dev)s"
-msgstr "错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/%(dev)s"
+"ERROR Container update failed (saving for async update later): %(status)d "
+"response from %(ip)s:%(port)s/%(dev)s"
+msgstr ""
+"错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/"
+"%(dev)s"
-#: swift/obj/server.py:238
#, python-format
msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for "
-"async update later)"
+"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
+"update later)"
msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在保存 稍后同步更新)"
-#: swift/obj/server.py:273
#, python-format
msgid ""
"ERROR Container update failed: different numbers of hosts and devices in "
"request: \"%s\" vs \"%s\""
msgstr "错误 容器更新失败:主机数量和设备数量不符合请求: \"%s\" vs \"%s\""
-#: swift/obj/updater.py:63
-#, python-format
-msgid "ERROR: Unable to access %(path)s: %(error)s"
-msgstr ""
-
-#: swift/obj/updater.py:78
msgid "Begin object update sweep"
msgstr "开始对象更新扫除"
-#: swift/obj/updater.py:104
#, python-format
msgid ""
-"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s"
-" successes, %(fail)s failures"
-msgstr "%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败"
+"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s "
+"successes, %(fail)s failures"
+msgstr ""
+"%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败"
-#: swift/obj/updater.py:113
#, python-format
msgid "Object update sweep completed: %.02fs"
msgstr "对象更新扫除完成:%.02fs"
-#: swift/obj/updater.py:122
msgid "Begin object update single threaded sweep"
msgstr "开始对象更新单线程扫除"
-#: swift/obj/updater.py:136
#, python-format
msgid ""
-"Object update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures"
-msgstr "对象更新单线程扫除完成:%(elapsed).02fs,%(success)s 成功, %(fail)s 失败"
+"Object update single threaded sweep completed: %(elapsed).02fs, %(success)s "
+"successes, %(fail)s failures"
+msgstr ""
+"对象更新单线程扫除完成:%(elapsed).02fs,%(success)s 成功, %(fail)s 失败"
-#: swift/obj/updater.py:179
#, python-format
msgid "ERROR async pending file with unexpected name %s"
msgstr "执行同步等待文件 文件名不可知%s"
-#: swift/obj/updater.py:209
#, python-format
msgid "ERROR Pickle problem, quarantining %s"
msgstr "错误 Pickle问题 隔离%s"
-#: swift/obj/updater.py:274
#, python-format
msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
msgstr "远程服务器发生错误 %(ip)s:%(port)s/%(device)s"
-#: swift/proxy/server.py:405
msgid "ERROR Unhandled exception in request"
msgstr "错误 未处理的异常发出请求"
-#: swift/proxy/server.py:460
#, python-format
msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)"
-#: swift/proxy/server.py:477 swift/proxy/server.py:495
#, python-format
msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
-#: swift/proxy/server.py:571
#, python-format
msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
msgstr "%(type)s服务器发生错误 %(ip)s:%(port)s/%(device)s re: %(info)s"
-#: swift/proxy/controllers/account.py:64
msgid "Account"
msgstr "账号"
-#: swift/proxy/controllers/base.py:752 swift/proxy/controllers/base.py:814
-#: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:411
-#: swift/proxy/controllers/obj.py:427 swift/proxy/controllers/obj.py:643
-#: swift/proxy/controllers/obj.py:1130 swift/proxy/controllers/obj.py:1591
-#: swift/proxy/controllers/obj.py:1763 swift/proxy/controllers/obj.py:1908
-#: swift/proxy/controllers/obj.py:2093
msgid "Object"
msgstr "对象"
-#: swift/proxy/controllers/base.py:753
msgid "Trying to read during GET (retrying)"
msgstr "执行GET时尝试读取(重新尝试)"
-#: swift/proxy/controllers/base.py:815
msgid "Trying to read during GET"
msgstr "执行GET时尝试读取"
-#: swift/proxy/controllers/base.py:819
#, python-format
msgid "Client did not read from proxy within %ss"
msgstr "客户尚未从代理处读取%ss"
-#: swift/proxy/controllers/base.py:824
msgid "Client disconnected on read"
msgstr "客户读取时中断"
-#: swift/proxy/controllers/base.py:826
msgid "Trying to send to client"
msgstr "尝试发送到客户端"
-#: swift/proxy/controllers/base.py:863 swift/proxy/controllers/base.py:1141
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "尝试执行%(method)s %(path)s"
-#: swift/proxy/controllers/base.py:902 swift/proxy/controllers/base.py:1129
-#: swift/proxy/controllers/obj.py:402 swift/proxy/controllers/obj.py:450
-#: swift/proxy/controllers/obj.py:1900 swift/proxy/controllers/obj.py:2138
msgid "ERROR Insufficient Storage"
msgstr "错误 存储空间不足"
-#: swift/proxy/controllers/base.py:905
#, python-format
msgid "ERROR %(status)d %(body)s From %(type)s Server"
msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器"
-#: swift/proxy/controllers/base.py:1132
-#, python-format
-msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server"
-msgstr ""
-
-#: swift/proxy/controllers/base.py:1260
#, python-format
msgid "%(type)s returning 503 for %(statuses)s"
msgstr "%(type)s 返回 503 在 %(statuses)s"
-#: swift/proxy/controllers/container.py:98 swift/proxy/controllers/obj.py:161
msgid "Container"
msgstr "容器"
-#: swift/proxy/controllers/obj.py:365 swift/proxy/controllers/obj.py:1592
#, python-format
msgid "Trying to write to %s"
msgstr "尝试执行书写%s"
-#: swift/proxy/controllers/obj.py:406 swift/proxy/controllers/obj.py:1903
-#, python-format
-msgid "ERROR %(status)d Expect: 100-continue From Object Server"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:412 swift/proxy/controllers/obj.py:1909
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "已知:100-continue on %s"
-#: swift/proxy/controllers/obj.py:428
#, python-format
msgid "Trying to get final status of PUT to %s"
msgstr "尝试执行获取最后的PUT状态%s"
-#: swift/proxy/controllers/obj.py:454 swift/proxy/controllers/obj.py:2143
#, python-format
msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s"
-#: swift/proxy/controllers/obj.py:716
#, python-format
msgid "Object PUT returning 412, %(statuses)r"
msgstr "对象PUT返还 412,%(statuses)r "
-#: swift/proxy/controllers/obj.py:725
-#, python-format
-msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:811 swift/proxy/controllers/obj.py:2048
#, python-format
msgid "ERROR Client read timeout (%ss)"
msgstr "错误 客户读取超时(%ss)"
-#: swift/proxy/controllers/obj.py:818 swift/proxy/controllers/obj.py:2055
msgid "ERROR Exception causing client disconnect"
msgstr "错误 异常导致客户端中断连接"
-#: swift/proxy/controllers/obj.py:823 swift/proxy/controllers/obj.py:2060
msgid "Client disconnected without sending enough data"
msgstr "客户中断 尚未发送足够"
-#: swift/proxy/controllers/obj.py:869
#, python-format
msgid "Object servers returned %s mismatched etags"
msgstr "对象服务器返还%s不匹配etags"
-#: swift/proxy/controllers/obj.py:873 swift/proxy/controllers/obj.py:2218
msgid "Object PUT"
msgstr "对象上传"
-
-#: swift/proxy/controllers/obj.py:2035
-#, python-format
-msgid "Not enough object servers ack'ed (got %d)"
-msgstr ""
-
-#: swift/proxy/controllers/obj.py:2094
-#, python-format
-msgid "Trying to get %s status of PUT to %s"
-msgstr ""
-
From 55dd705a863c4500330cbd2b8c2fec46d618dc71 Mon Sep 17 00:00:00 2001
From: Christian Schwede
Date: Wed, 6 May 2015 19:53:09 +0200
Subject: [PATCH 24/98] Add missing statsd metrics section for
object-reconstructor
Change-Id: Id3f98e5f637ff537a387262b40f21c05876fca91
---
doc/source/admin_guide.rst | 25 +++++++++++++++++++++++++
1 file changed, 25 insertions(+)
diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst
index 5b7a02850a..50eb9bd5e6 100644
--- a/doc/source/admin_guide.rst
+++ b/doc/source/admin_guide.rst
@@ -896,6 +896,31 @@ Metric Name Description
including ones resulting in an error.
======================== ====================================================
+Metrics for `object-reconstructor`:
+
+====================================================== ======================================================
+Metric Name Description
+------------------------------------------------------ ------------------------------------------------------
+`object-reconstructor.partition.delete.count.` A count of partitions on which were
+ reconstructed and synced to another node because they
+ didn't belong on this node. This metric is tracked
+ per-device to allow for "quiescence detection" for
+ object reconstruction activity on each device.
+`object-reconstructor.partition.delete.timing` Timing data for partitions reconstructed and synced to
+ another node because they didn't belong on this node.
+ This metric is not tracked per device.
+`object-reconstructor.partition.update.count.` A count of partitions on which were
+ reconstructed and synced to another node, but also
+ belong on this node. As with delete.count, this metric
+ is tracked per-device.
+`object-reconstructor.partition.update.timing` Timing data for partitions reconstructed which also
+ belong on this node. This metric is not tracked
+ per-device.
+`object-reconstructor.suffix.hashes` Count of suffix directories whose hash (of filenames)
+ was recalculated.
+`object-reconstructor.suffix.syncs` Count of suffix directories reconstructed with ssync.
+====================================================== ======================================================
+
Metrics for `object-replicator`:
=================================================== ====================================================
From 1faad248f833735585aa8f6135babceb46fbb6f8 Mon Sep 17 00:00:00 2001
From: Emmanuel Cazenave
Date: Tue, 5 May 2015 12:31:22 +0200
Subject: [PATCH 25/98] X-Auth-Token should be a bytestring.
Change-Id: I2aa941d74883e17e9548b0144a4a2e2db33aba95
Closes-Bug: 1451773
---
test/functional/swift_test_client.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py
index f68dc035f0..4d77bcced0 100644
--- a/test/functional/swift_test_client.py
+++ b/test/functional/swift_test_client.py
@@ -181,7 +181,11 @@ class Connection(object):
self.storage_url = str('/%s/%s' % (x[3], x[4]))
self.account_name = str(x[4])
self.auth_user = auth_user
- self.storage_token = storage_token
+ # With v2 keystone, storage_token is unicode.
+ # We want it to be string otherwise this would cause
+ # troubles when doing query with already encoded
+ # non ascii characters in its headers.
+ self.storage_token = str(storage_token)
self.user_acl = '%s:%s' % (self.account, self.username)
self.http_connect()
From 0b20a18e5216b991855c461e41e9ef32e17fadb4 Mon Sep 17 00:00:00 2001
From: Pete Zaitcev
Date: Thu, 7 May 2015 13:16:15 -0600
Subject: [PATCH 26/98] Spell "rebalance" right in swift-ring-builder.1
See Red Hat bug #1218269.
Change-Id: I814eb4b3c0821f5a8df5feea2bda3a964aace536
---
doc/manpages/swift-ring-builder.1 | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/manpages/swift-ring-builder.1 b/doc/manpages/swift-ring-builder.1
index 46516d5c4e..6bff7e8e67 100644
--- a/doc/manpages/swift-ring-builder.1
+++ b/doc/manpages/swift-ring-builder.1
@@ -142,7 +142,7 @@ could take a while to run.
.RE
-.IP "\fBrebalence\fR"
+.IP "\fBrebalance\fR"
.RS 5
Attempts to rebalance the ring by reassigning partitions that haven't been recently reassigned.
.RE
From 664a632c01f8c5c80826b223b9ade774bfe2ed9a Mon Sep 17 00:00:00 2001
From: Christian Schwede
Date: Fri, 8 May 2015 08:41:39 +0200
Subject: [PATCH 27/98] Update my mailmap entry
Change-Id: I5d21a55d0fa4cab6eaa6ff426819aa1dc997de2f
---
.mailmap | 3 ++-
AUTHORS | 2 +-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/.mailmap b/.mailmap
index c45be7c95f..172c761c06 100644
--- a/.mailmap
+++ b/.mailmap
@@ -51,7 +51,8 @@ Tom Fifield Tom Fifield
Sascha Peilicke Sascha Peilicke
Zhenguo Niu
Peter Portante
-Christian Schwede
+Christian Schwede
+Christian Schwede
Constantine Peresypkin
Madhuri Kumari madhuri
Morgan Fainberg
diff --git a/AUTHORS b/AUTHORS
index fa2cee7458..ce680dae15 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -172,7 +172,7 @@ Brent Roskos (broskos@internap.com)
Shilla Saebi (shilla.saebi@gmail.com)
Cristian A Sanchez (cristian.a.sanchez@intel.com)
Sarvesh Ranjan (saranjan@cisco.com)
-Christian Schwede (christian.schwede@enovance.com)
+Christian Schwede (cschwede@redhat.com)
Mark Seger (Mark.Seger@hp.com)
Andrew Clay Shafer (acs@parvuscaptus.com)
Mitsuhiro SHIGEMATSU (shigematsu.mitsuhiro@lab.ntt.co.jp)
From 90b84d3a699811a99c97ebbe4f71a14d2f76a0e5 Mon Sep 17 00:00:00 2001
From: Tim Burke
Date: Fri, 8 May 2015 11:45:12 -0700
Subject: [PATCH 28/98] Properly re-raise exceptions in proxy_logging
Previously, this could encounter TypeErrors, presumably because
sys.exc_clear() was called somewhere in the block of code between
catching the exception and re-raising.
Related-Bug: 1181146
Change-Id: Iadeea3f61e70bf83dc0eb063fdb27edd16f3ca32
---
swift/common/middleware/proxy_logging.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/swift/common/middleware/proxy_logging.py b/swift/common/middleware/proxy_logging.py
index 66487502d5..968af2dd71 100644
--- a/swift/common/middleware/proxy_logging.py
+++ b/swift/common/middleware/proxy_logging.py
@@ -71,6 +71,7 @@ if this is a middleware subrequest or not. A log processor calculating
bandwidth usage will want to only sum up logs with no swift.source.
"""
+import sys
import time
from urllib import quote, unquote
@@ -296,12 +297,13 @@ class ProxyLoggingMiddleware(object):
try:
iterable = self.app(env, my_start_response)
except Exception:
+ exc_type, exc_value, exc_traceback = sys.exc_info()
req = Request(env)
status_int = status_int_for_logging(start_status=500)
self.log_request(
req, status_int, input_proxy.bytes_received, 0, start_time,
time.time())
- raise
+ raise exc_type, exc_value, exc_traceback
else:
return iter_response(iterable)
From 29f4393d88426fd6c34f2cfe43a8c434bfad8d47 Mon Sep 17 00:00:00 2001
From: Samuel Merritt
Date: Fri, 8 May 2015 15:55:14 -0700
Subject: [PATCH 29/98] Remove workaround for old eventlet version
Swift now requires eventlet >= 0.16.1, so we can get rid of this
workaround for a bug in eventlet 0.9.16.
Change-Id: I4a1200b9bd9266896a704a840fda0d1b720bc86d
---
swift/common/memcached.py | 35 ++----------------
test/unit/common/test_memcached.py | 58 ------------------------------
2 files changed, 2 insertions(+), 91 deletions(-)
diff --git a/swift/common/memcached.py b/swift/common/memcached.py
index 8a2cfa19ca..2e1ed4c08d 100644
--- a/swift/common/memcached.py
+++ b/swift/common/memcached.py
@@ -50,11 +50,10 @@ import time
from bisect import bisect
from swift import gettext_ as _
from hashlib import md5
-from distutils.version import StrictVersion
from eventlet.green import socket
from eventlet.pools import Pool
-from eventlet import Timeout, __version__ as eventlet_version
+from eventlet import Timeout
from swift.common.utils import json
@@ -107,14 +106,6 @@ class MemcacheConnPool(Pool):
Pool.__init__(self, max_size=size)
self.server = server
self._connect_timeout = connect_timeout
- self._parent_class_getter = super(MemcacheConnPool, self).get
- try:
- # call the patched .get() if eventlet is older than 0.9.17
- if StrictVersion(eventlet_version) < StrictVersion('0.9.17'):
- self._parent_class_getter = self._upstream_fixed_get
- except ValueError:
- # "invalid" version number or otherwise error parsing version
- pass
def create(self):
if ':' in self.server:
@@ -129,34 +120,12 @@ class MemcacheConnPool(Pool):
return (sock.makefile(), sock)
def get(self):
- fp, sock = self._parent_class_getter()
+ fp, sock = super(MemcacheConnPool, self).get()
if fp is None:
# An error happened previously, so we need a new connection
fp, sock = self.create()
return fp, sock
- # The following method is from eventlet post 0.9.16. This version
- # properly keeps track of pool size accounting, and therefore doesn't
- # let the pool grow without bound. This patched version is the result
- # of commit f5e5b2bda7b442f0262ee1084deefcc5a1cc0694 in eventlet and is
- # documented at https://bitbucket.org/eventlet/eventlet/issue/91
- def _upstream_fixed_get(self):
- """Return an item from the pool, when one is available. This may
- cause the calling greenthread to block.
- """
- if self.free_items:
- return self.free_items.popleft()
- self.current_size += 1
- if self.current_size <= self.max_size:
- try:
- created = self.create()
- except: # noqa
- self.current_size -= 1
- raise
- return created
- self.current_size -= 1 # did not create
- return self.channel.get()
-
class MemcacheRing(object):
"""
diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py
index f3df46c404..cd251f15d0 100644
--- a/test/unit/common/test_memcached.py
+++ b/test/unit/common/test_memcached.py
@@ -410,64 +410,6 @@ class TestMemcached(unittest.TestCase):
connections.get_nowait()
self.assertTrue(connections.empty())
- # Ensure we exercise the backported-for-pre-eventlet-version-0.9.17 get()
- # code, even if the executing eventlet's version is already newer.
- @patch.object(memcached, 'eventlet_version', '0.9.16')
- def test_connection_pooling_pre_0_9_17(self):
- with patch('swift.common.memcached.socket') as mock_module:
- connected = []
- count = [0]
-
- def _slow_yielding_connector(addr):
- count[0] += 1
- if count[0] % 3 == 0:
- raise ValueError('whoops!')
- sleep(0.1)
- connected.append(addr)
-
- mock_module.socket.return_value.connect.side_effect = \
- _slow_yielding_connector
-
- # If POOL_SIZE is not small enough relative to USER_COUNT, the
- # "free_items" business in the eventlet.pools.Pool will cause
- # spurious failures below. I found these values to work well on a
- # VM running in VirtualBox on a late 2013 Retina MacbookPro:
- POOL_SIZE = 5
- USER_COUNT = 50
-
- pool = memcached.MemcacheConnPool('1.2.3.4:11211', size=POOL_SIZE,
- connect_timeout=10)
- self.assertEqual(POOL_SIZE, pool.max_size)
-
- def _user():
- got = None
- while not got:
- try:
- got = pool.get()
- except: # noqa
- pass
- pool.put(got)
-
- # make a bunch of requests "at the same time"
- p = GreenPool()
- for i in range(USER_COUNT):
- p.spawn(_user)
- p.waitall()
-
- # If the except block after the "created = self.create()" call
- # doesn't correctly decrement self.current_size, this test will
- # fail by having some number less than POOL_SIZE connections (in my
- # testing, anyway).
- self.assertEqual(POOL_SIZE, len(connected))
-
- # Subsequent requests should get and use the existing
- # connections, not creating any more.
- for i in range(USER_COUNT):
- p.spawn(_user)
- p.waitall()
-
- self.assertEqual(POOL_SIZE, len(connected))
-
def test_connection_pool_timeout(self):
orig_conn_pool = memcached.MemcacheConnPool
try:
From 518262ab6ecd8faa2b915df118ffc70a30112a18 Mon Sep 17 00:00:00 2001
From: paul luse
Date: Tue, 12 May 2015 15:21:13 -0700
Subject: [PATCH 30/98] Remove 1 line of dead code from EC reconstructor
Assuming nobody intentionally left this in here for some reason...
Change-Id: I4bf43bb3828e062c0342557243076ed62d6790f4
---
swift/obj/reconstructor.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py
index 4385e42cc9..2dd743fa9a 100644
--- a/swift/obj/reconstructor.py
+++ b/swift/obj/reconstructor.py
@@ -193,7 +193,6 @@ class ObjectReconstructor(Daemon):
:returns: response
"""
resp = None
- headers['X-Backend-Node-Index'] = node['index']
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
From 98b725fec639c5501c645ce4e4dc9d12c686f91d Mon Sep 17 00:00:00 2001
From: Alistair Coles
Date: Fri, 1 May 2015 13:02:29 +0100
Subject: [PATCH 31/98] Cleanup and extend end to end ssync tests
Extends the existing end to end ssync tests with
a test using replication policy.
Also some cleanup and improvements to the test framework e.g. rather
than faking the connection between sender and receiver, use a real
connection and wrap it to capture traffic for verification.
Change-Id: Id71d2eb3fb8fa15c016ef151aacf95f97196a902
---
test/unit/obj/test_ssync_sender.py | 340 ++++++++++++++++++++---------
1 file changed, 235 insertions(+), 105 deletions(-)
diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py
index 42bd610eb6..fa38b658b2 100644
--- a/test/unit/obj/test_ssync_sender.py
+++ b/test/unit/obj/test_ssync_sender.py
@@ -29,9 +29,8 @@ from swift.common import exceptions, utils
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
DiskFileDeleted
-from swift.common.swob import Request
-from swift.common.utils import Timestamp, FileLikeIter
-from swift.obj import ssync_sender, diskfile, server, ssync_receiver
+from swift.common.utils import Timestamp
+from swift.obj import ssync_sender, diskfile, server
from swift.obj.reconstructor import RebuildingECDiskFileStream
from test.unit import debug_logger, patch_policies
@@ -1245,67 +1244,52 @@ class TestSender(BaseTestSender):
self.assertTrue(self.sender.connection.closed)
-@patch_policies(with_ec_default=True)
-class TestSsync(BaseTestSender):
+class TestBaseSsync(BaseTestSender):
"""
- Test interactions between sender and receiver. The basis for each test is
- actual diskfile state on either side - the connection between sender and
- receiver is faked. Assertions are made about the final state of the sender
- and receiver diskfiles.
+ Provides a framework to test end to end interactions between sender and
+ receiver. The basis for each test is actual diskfile state on either side.
+ The connection between sender and receiver is wrapped to capture ssync
+ traffic for subsequent verification of the protocol. Assertions are made
+ about the final state of the sender and receiver diskfiles.
"""
- def make_fake_ssync_connect(self, sender, rx_obj_controller, device,
- partition, policy):
- trace = []
+ def make_connect_wrapper(self, sender):
+ """
+ Make a wrapper function for the ssync_sender.Sender.connect() method
+ that will in turn wrap the HTTConnection.send() and the
+ Sender.readline() so that ssync protocol messages can be captured.
+ """
+ orig_connect = sender.connect
+ trace = dict(messages=[])
def add_trace(type, msg):
# record a protocol event for later analysis
if msg.strip():
- trace.append((type, msg.strip()))
+ trace['messages'].append((type, msg.strip()))
- def start_response(status, headers, exc_info=None):
- assert(status == '200 OK')
+ def make_send_wrapper(send):
+ def wrapped_send(msg):
+ _msg = msg.split('\r\n', 1)[1]
+ _msg = _msg.rsplit('\r\n', 1)[0]
+ add_trace('tx', _msg)
+ send(msg)
+ return wrapped_send
- class FakeConnection:
- def __init__(self, trace):
- self.trace = trace
- self.queue = []
- self.src = FileLikeIter(self.queue)
+ def make_readline_wrapper(readline):
+ def wrapped_readline():
+ data = readline()
+ add_trace('rx', data)
+ bytes_read = trace.setdefault('readline_bytes', 0)
+ trace['readline_bytes'] = bytes_read + len(data)
+ return data
+ return wrapped_readline
- def send(self, msg):
- msg = msg.split('\r\n', 1)[1]
- msg = msg.rsplit('\r\n', 1)[0]
- add_trace('tx', msg)
- self.queue.append(msg)
-
- def close(self):
- pass
-
- def wrap_gen(gen):
- # Strip response head and tail
- while True:
- try:
- msg = gen.next()
- if msg:
- add_trace('rx', msg)
- msg = '%x\r\n%s\r\n' % (len(msg), msg)
- yield msg
- except StopIteration:
- break
-
- def fake_connect():
- sender.connection = FakeConnection(trace)
- headers = {'Transfer-Encoding': 'chunked',
- 'X-Backend-Storage-Policy-Index': str(int(policy))}
- env = {'REQUEST_METHOD': 'SSYNC'}
- path = '/%s/%s' % (device, partition)
- req = Request.blank(path, environ=env, headers=headers)
- req.environ['wsgi.input'] = sender.connection.src
- resp = rx_obj_controller(req.environ, start_response)
- wrapped_gen = wrap_gen(resp)
- sender.response = FileLikeIter(wrapped_gen)
- sender.response.fp = sender.response
- return fake_connect
+ def wrapped_connect():
+ orig_connect()
+ sender.connection.send = make_send_wrapper(
+ sender.connection.send)
+ sender.readline = make_readline_wrapper(sender.readline)
+ return wrapped_connect, trace
def setUp(self):
self.device = 'dev'
@@ -1325,19 +1309,24 @@ class TestSsync(BaseTestSender):
'replication_one_per_device': 'false',
'log_requests': 'false'}
self.rx_controller = server.ObjectController(conf)
- self.orig_ensure_flush = ssync_receiver.Receiver._ensure_flush
- ssync_receiver.Receiver._ensure_flush = lambda *args: ''
self.ts_iter = (Timestamp(t)
for t in itertools.count(int(time.time())))
+ self.rx_ip = '127.0.0.1'
+ sock = eventlet.listen((self.rx_ip, 0))
+ self.rx_server = eventlet.spawn(
+ eventlet.wsgi.server, sock, self.rx_controller, utils.NullLogger())
+ self.rx_port = sock.getsockname()[1]
+ self.rx_node = {'replication_ip': self.rx_ip,
+ 'replication_port': self.rx_port,
+ 'device': self.device}
def tearDown(self):
- if self.orig_ensure_flush:
- ssync_receiver.Receiver._ensure_flush = self.orig_ensure_flush
+ self.rx_server.kill()
shutil.rmtree(self.tmpdir, ignore_errors=True)
def _create_ondisk_files(self, df_mgr, obj_name, policy, timestamp,
frag_indexes=None):
- frag_indexes = [] if frag_indexes is None else frag_indexes
+ frag_indexes = [None] if frag_indexes is None else frag_indexes
metadata = {'Content-Type': 'plain/text'}
diskfiles = []
for frag_index in frag_indexes:
@@ -1372,22 +1361,28 @@ class TestSsync(BaseTestSender):
df.open()
return df
- def _verify_diskfile_sync(self, tx_df, rx_df, frag_index):
+ def _verify_diskfile_sync(self, tx_df, rx_df, frag_index, same_etag=False):
# verify that diskfiles' metadata match
# sanity check, they are not the same ondisk files!
self.assertNotEqual(tx_df._datadir, rx_df._datadir)
rx_metadata = dict(rx_df.get_metadata())
for k, v in tx_df.get_metadata().iteritems():
- self.assertEqual(v, rx_metadata.pop(k))
+ if k == 'X-Object-Sysmeta-Ec-Frag-Index':
+ # if tx_df had a frag_index then rx_df should also have one
+ self.assertTrue(k in rx_metadata)
+ self.assertEqual(frag_index, int(rx_metadata.pop(k)))
+ elif k == 'ETag' and not same_etag:
+ self.assertNotEqual(v, rx_metadata.pop(k, None))
+ continue
+ else:
+ self.assertEqual(v, rx_metadata.pop(k), k)
# ugh, ssync duplicates ETag with Etag so have to clear it out here
if 'Etag' in rx_metadata:
rx_metadata.pop('Etag')
self.assertFalse(rx_metadata)
- if frag_index:
- rx_metadata = rx_df.get_metadata()
- fi_key = 'X-Object-Sysmeta-Ec-Frag-Index'
- self.assertTrue(fi_key in rx_metadata)
- self.assertEqual(frag_index, int(rx_metadata[fi_key]))
+ expected_body = '%s___%s' % (tx_df._name, frag_index)
+ actual_body = ''.join([chunk for chunk in rx_df.reader()])
+ self.assertEqual(expected_body, actual_body)
def _analyze_trace(self, trace):
"""
@@ -1445,7 +1440,7 @@ class TestSsync(BaseTestSender):
phases = ('tx_missing', 'rx_missing', 'tx_updates', 'rx_updates')
results = dict((k, []) for k in phases)
handler = unexpected
- lines = list(trace)
+ lines = list(trace.get('messages', []))
lines.reverse()
while lines:
line = lines.pop()
@@ -1471,27 +1466,35 @@ class TestSsync(BaseTestSender):
'Message outside of a phase: %s' % results.get(None))
return results
- def _verify_ondisk_files(self, tx_objs, policy, rx_node_index):
- # verify tx and rx files that should be in sync
+ def _verify_ondisk_files(self, tx_objs, policy, tx_frag_index=None,
+ rx_frag_index=None):
+ """
+ Verify tx and rx files that should be in sync.
+ :param tx_objs: sender diskfiles
+ :param policy: storage policy instance
+ :param tx_frag_index: the fragment index of tx diskfiles that should
+ have been used as a source for sync'ing
+ :param rx_frag_index: the fragment index of expected rx diskfiles
+ """
for o_name, diskfiles in tx_objs.iteritems():
for tx_df in diskfiles:
- frag_index = tx_df._frag_index
- if frag_index == rx_node_index:
- # this frag_index should have been sync'd,
+ if tx_frag_index is None or tx_df._frag_index == tx_frag_index:
+ # this diskfile should have been sync'd,
# check rx file is ok
- rx_df = self._open_rx_diskfile(o_name, policy, frag_index)
- self._verify_diskfile_sync(tx_df, rx_df, frag_index)
- expected_body = '/a/c/%s___%s' % (o_name, rx_node_index)
- actual_body = ''.join([chunk for chunk in rx_df.reader()])
- self.assertEqual(expected_body, actual_body)
+ rx_df = self._open_rx_diskfile(
+ o_name, policy, rx_frag_index)
+ # for EC revert job or replication etags should match
+ match_etag = (tx_frag_index == rx_frag_index)
+ self._verify_diskfile_sync(
+ tx_df, rx_df, rx_frag_index, match_etag)
else:
- # this frag_index should not have been sync'd,
+ # this diskfile should not have been sync'd,
# check no rx file,
- self.assertRaises(DiskFileNotExist,
- self._open_rx_diskfile,
- o_name, policy, frag_index=frag_index)
+ self.assertRaises(DiskFileNotExist, self._open_rx_diskfile,
+ o_name, policy,
+ frag_index=tx_df._frag_index)
# check tx file still intact - ssync does not do any cleanup!
- self._open_tx_diskfile(o_name, policy, frag_index)
+ tx_df.open()
def _verify_tombstones(self, tx_objs, policy):
# verify tx and rx tombstones that should be in sync
@@ -1509,13 +1512,17 @@ class TestSsync(BaseTestSender):
rx_delete_time = exc.timestamp
self.assertEqual(tx_delete_time, rx_delete_time)
+
+@patch_policies(with_ec_default=True)
+class TestSsyncEC(TestBaseSsync):
def test_handoff_fragment_revert(self):
# test that a sync_revert type job does send the correct frag archives
- # to the receiver, and that those frag archives are then removed from
- # local node.
+ # to the receiver
policy = POLICIES.default
rx_node_index = 0
tx_node_index = 1
+ # for a revert job we iterate over frag index that belongs on
+ # remote node
frag_index = rx_node_index
# create sender side diskfiles...
@@ -1557,20 +1564,18 @@ class TestSsync(BaseTestSender):
job = {'device': self.device,
'partition': self.partition,
'policy': policy,
- 'frag_index': frag_index,
- 'purge': True}
- node = {'index': rx_node_index}
- self.sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
- # fake connection from tx to rx...
- self.sender.connect = self.make_fake_ssync_connect(
- self.sender, self.rx_controller, self.device, self.partition,
- policy)
+ 'frag_index': frag_index}
+ node = dict(self.rx_node)
+ node.update({'index': rx_node_index})
+ sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
+ # wrap connection from tx to rx to capture ssync messages...
+ sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
- self.sender()
+ sender()
# verify protocol
- results = self._analyze_trace(self.sender.connection.trace)
+ results = self._analyze_trace(trace)
# sender has handoff frags for o1, o3 and o4 and ts for o5
self.assertEqual(4, len(results['tx_missing']))
# receiver is missing frags for o1, o3 and ts for o5
@@ -1591,7 +1596,8 @@ class TestSsync(BaseTestSender):
self.assertEqual(['/a/c/o1', '/a/c/o3', '/a/c/o5'], sorted(sync_paths))
# verify on disk files...
- self._verify_ondisk_files(tx_objs, policy, rx_node_index)
+ self._verify_ondisk_files(
+ tx_objs, policy, frag_index, rx_node_index)
self._verify_tombstones(tx_tombstones, policy)
def test_fragment_sync(self):
@@ -1656,19 +1662,17 @@ class TestSsync(BaseTestSender):
'policy': policy,
'frag_index': frag_index,
'sync_diskfile_builder': fake_reconstruct_fa}
- node = {'index': rx_node_index}
- self.sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
-
- # fake connection from tx to rx...
- self.sender.connect = self.make_fake_ssync_connect(
- self.sender, self.rx_controller, self.device, self.partition,
- policy)
+ node = dict(self.rx_node)
+ node.update({'index': rx_node_index})
+ sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
+ # wrap connection from tx to rx to capture ssync messages...
+ sender.connect, trace = self.make_connect_wrapper(sender)
# run the sync protocol...
- self.sender()
+ sender()
# verify protocol
- results = self._analyze_trace(self.sender.connection.trace)
+ results = self._analyze_trace(trace)
# sender has primary for o1, o2 and o3, o4 and ts for o5
self.assertEqual(5, len(results['tx_missing']))
# receiver is missing o1, o2 and o3 and ts for o5
@@ -1702,9 +1706,135 @@ class TestSsync(BaseTestSender):
# verify on disk files...
self.assertEqual(sorted(expect_sync_paths), sorted(actual_sync_paths))
- self._verify_ondisk_files(tx_objs, policy, rx_node_index)
+ self._verify_ondisk_files(
+ tx_objs, policy, frag_index, rx_node_index)
self._verify_tombstones(tx_tombstones, policy)
+@patch_policies
+class TestSsyncReplication(TestBaseSsync):
+ def test_sync(self):
+ policy = POLICIES.default
+ rx_node_index = 0
+
+ # create sender side diskfiles...
+ tx_objs = {}
+ rx_objs = {}
+ tx_tombstones = {}
+ rx_tombstones = {}
+ tx_df_mgr = self.daemon._diskfile_router[policy]
+ rx_df_mgr = self.rx_controller._diskfile_router[policy]
+ # o1 and o2 are on tx only
+ t1 = self.ts_iter.next()
+ tx_objs['o1'] = self._create_ondisk_files(tx_df_mgr, 'o1', policy, t1)
+ t2 = self.ts_iter.next()
+ tx_objs['o2'] = self._create_ondisk_files(tx_df_mgr, 'o2', policy, t2)
+ # o3 is on tx and older copy on rx
+ t3a = self.ts_iter.next()
+ rx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3a)
+ t3b = self.ts_iter.next()
+ tx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3b)
+ # o4 in sync on rx and tx
+ t4 = self.ts_iter.next()
+ tx_objs['o4'] = self._create_ondisk_files(tx_df_mgr, 'o4', policy, t4)
+ rx_objs['o4'] = self._create_ondisk_files(rx_df_mgr, 'o4', policy, t4)
+ # o5 is a tombstone, missing on receiver
+ t5 = self.ts_iter.next()
+ tx_tombstones['o5'] = self._create_ondisk_files(
+ tx_df_mgr, 'o5', policy, t5)
+ tx_tombstones['o5'][0].delete(t5)
+ # o6 is a tombstone, in sync on tx and rx
+ t6 = self.ts_iter.next()
+ tx_tombstones['o6'] = self._create_ondisk_files(
+ tx_df_mgr, 'o6', policy, t6)
+ tx_tombstones['o6'][0].delete(t6)
+ rx_tombstones['o6'] = self._create_ondisk_files(
+ rx_df_mgr, 'o6', policy, t6)
+ rx_tombstones['o6'][0].delete(t6)
+ # o7 is a tombstone on tx, older data on rx
+ t7a = self.ts_iter.next()
+ rx_objs['o7'] = self._create_ondisk_files(rx_df_mgr, 'o7', policy, t7a)
+ t7b = self.ts_iter.next()
+ tx_tombstones['o7'] = self._create_ondisk_files(
+ tx_df_mgr, 'o7', policy, t7b)
+ tx_tombstones['o7'][0].delete(t7b)
+
+ suffixes = set()
+ for diskfiles in (tx_objs.values() + tx_tombstones.values()):
+ for df in diskfiles:
+ suffixes.add(os.path.basename(os.path.dirname(df._datadir)))
+
+ # create ssync sender instance...
+ job = {'device': self.device,
+ 'partition': self.partition,
+ 'policy': policy}
+ node = dict(self.rx_node)
+ node.update({'index': rx_node_index})
+ sender = ssync_sender.Sender(self.daemon, node, job, suffixes)
+ # wrap connection from tx to rx to capture ssync messages...
+ sender.connect, trace = self.make_connect_wrapper(sender)
+
+ # run the sync protocol...
+ success, in_sync_objs = sender()
+
+ self.assertEqual(7, len(in_sync_objs))
+ self.assertTrue(success)
+
+ # verify protocol
+ results = self._analyze_trace(trace)
+ self.assertEqual(7, len(results['tx_missing']))
+ self.assertEqual(5, len(results['rx_missing']))
+ self.assertEqual(5, len(results['tx_updates']))
+ self.assertFalse(results['rx_updates'])
+ sync_paths = []
+ for subreq in results.get('tx_updates'):
+ if subreq.get('method') == 'PUT':
+ self.assertTrue(
+ subreq['path'] in ('/a/c/o1', '/a/c/o2', '/a/c/o3'))
+ expected_body = '%s___None' % subreq['path']
+ self.assertEqual(expected_body, subreq['body'])
+ elif subreq.get('method') == 'DELETE':
+ self.assertTrue(subreq['path'] in ('/a/c/o5', '/a/c/o7'))
+ sync_paths.append(subreq.get('path'))
+ self.assertEqual(
+ ['/a/c/o1', '/a/c/o2', '/a/c/o3', '/a/c/o5', '/a/c/o7'],
+ sorted(sync_paths))
+
+ # verify on disk files...
+ self._verify_ondisk_files(tx_objs, policy)
+ self._verify_tombstones(tx_tombstones, policy)
+
+ def test_nothing_to_sync(self):
+ job = {'device': self.device,
+ 'partition': self.partition,
+ 'policy': POLICIES.default}
+ node = {'replication_ip': self.rx_ip,
+ 'replication_port': self.rx_port,
+ 'device': self.device,
+ 'index': 0}
+ sender = ssync_sender.Sender(self.daemon, node, job, ['abc'])
+ # wrap connection from tx to rx to capture ssync messages...
+ sender.connect, trace = self.make_connect_wrapper(sender)
+
+ result, in_sync_objs = sender()
+
+ self.assertTrue(result)
+ self.assertFalse(in_sync_objs)
+ results = self._analyze_trace(trace)
+ self.assertFalse(results['tx_missing'])
+ self.assertFalse(results['rx_missing'])
+ self.assertFalse(results['tx_updates'])
+ self.assertFalse(results['rx_updates'])
+ # Minimal receiver response as read by sender:
+ # 2 * 4098 <-- _ensure_flush() twice
+ # + 23 <-- :MISSING CHECK START\r\n
+ # + 2 <-- \r\n (minimal missing check response)
+ # + 21 <-- :MISSING CHECK END\r\n
+ # + 17 <-- :UPDATES START\r\n
+ # + 15 <-- :UPDATES END\r\n
+ # TOTAL = 8274
+ self.assertEqual(8274, trace.get('readline_bytes'))
+
+
if __name__ == '__main__':
unittest.main()
From 025c4c4339e7ace2f5be8cb3a3cddf6c38ceff37 Mon Sep 17 00:00:00 2001
From: Kota Tsuyuzaki
Date: Fri, 24 Apr 2015 02:15:36 -0700
Subject: [PATCH 32/98] Remove confusable query string on post as copy
Current post as copy routine (i.e. POST object with post_as_copy option
turned on) on Object Controller uses "multipart-manifest" query string
which is feeded to env['copy_hook'] to decide which data (the manifest or
object pointed by the manifest) should be copied.
However, the way using the query string will confuse operators looking at
logging system (or analyzing the log) because whole POST object requests
have 'multipart-manifest=get' like as:
POST /v1/AUTH_test/d4c816b24d38489082f5118599a67920/manifest-abcde%3Fmultipart-manifest%3Dget
We cannot know whether the query string was added by hand
(from user) or not. In addition, the query isn't needed by the
backend conversation between proxy-server and object-server.
(Just needed by "copy_hook" on the proxy controller!)
To remove the confusable query string and to keep the log to be clean,
this patch introduces new environment variable "swift.post_as_copy"
and changes proxy controller and the copy_hook to use the new env.
This item was originally discussed at
https://review.openstack.org/#/c/177132/
Co-Authored-By: Alistair Coles
Change-Id: I0cd37520eea1825a10ebd27ccdc7e9162647233e
---
swift/common/middleware/slo.py | 3 +-
swift/proxy/controllers/obj.py | 16 ++++----
test/functional/swift_test_client.py | 5 ++-
test/functional/tests.py | 53 +++++++++++++++++++++++++
test/unit/proxy/controllers/test_obj.py | 25 +++++++++++-
5 files changed, 89 insertions(+), 13 deletions(-)
diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py
index d8df829981..6a6b8294b8 100644
--- a/swift/common/middleware/slo.py
+++ b/swift/common/middleware/slo.py
@@ -537,7 +537,8 @@ class StaticLargeObject(object):
def slo_hook(source_req, source_resp, sink_req):
x_slo = source_resp.headers.get('X-Static-Large-Object')
if (config_true_value(x_slo)
- and source_req.params.get('multipart-manifest') != 'get'):
+ and source_req.params.get('multipart-manifest') != 'get'
+ and 'swift.post_as_copy' not in source_req.environ):
source_resp = SloGetContext(self).get_or_head_response(
source_req, source_resp.headers.items(),
source_resp.app_iter)
diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py
index a83242b5f0..5b7c00c4aa 100644
--- a/swift/proxy/controllers/obj.py
+++ b/swift/proxy/controllers/obj.py
@@ -268,12 +268,8 @@ class BaseObjectController(Controller):
req.headers['Content-Length'] = 0
req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name,
self.object_name))
- req.headers['X-Fresh-Metadata'] = 'true'
+ req.environ['swift.post_as_copy'] = True
req.environ['swift_versioned_copy'] = True
- if req.environ.get('QUERY_STRING'):
- req.environ['QUERY_STRING'] += '&multipart-manifest=get'
- else:
- req.environ['QUERY_STRING'] = 'multipart-manifest=get'
resp = self.PUT(req)
# Older editions returned 202 Accepted on object POSTs, so we'll
# convert any 201 Created responses to that for compatibility with
@@ -577,8 +573,11 @@ class BaseObjectController(Controller):
if not req.content_type_manually_set:
sink_req.headers['Content-Type'] = \
source_resp.headers['Content-Type']
- if config_true_value(
- sink_req.headers.get('x-fresh-metadata', 'false')):
+
+ fresh_meta_flag = config_true_value(
+ sink_req.headers.get('x-fresh-metadata', 'false'))
+
+ if fresh_meta_flag or 'swift.post_as_copy' in sink_req.environ:
# post-as-copy: ignore new sysmeta, copy existing sysmeta
condition = lambda k: is_sys_meta('object', k)
remove_items(sink_req.headers, condition)
@@ -590,7 +589,8 @@ class BaseObjectController(Controller):
# copy over x-static-large-object for POSTs and manifest copies
if 'X-Static-Large-Object' in source_resp.headers and \
- req.params.get('multipart-manifest') == 'get':
+ (req.params.get('multipart-manifest') == 'get' or
+ 'swift.post_as_copy' in req.environ):
sink_req.headers['X-Static-Large-Object'] = \
source_resp.headers['X-Static-Large-Object']
diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py
index 4d77bcced0..695ea202d7 100644
--- a/test/functional/swift_test_client.py
+++ b/test/functional/swift_test_client.py
@@ -851,7 +851,7 @@ class File(Base):
finally:
fobj.close()
- def sync_metadata(self, metadata=None, cfg=None):
+ def sync_metadata(self, metadata=None, cfg=None, parms=None):
if metadata is None:
metadata = {}
if cfg is None:
@@ -868,7 +868,8 @@ class File(Base):
else:
headers['Content-Length'] = 0
- self.conn.make_request('POST', self.path, hdrs=headers, cfg=cfg)
+ self.conn.make_request('POST', self.path, hdrs=headers,
+ parms=parms, cfg=cfg)
if self.conn.response.status not in (201, 202):
raise ResponseError(self.conn.response, 'POST',
diff --git a/test/functional/tests.py b/test/functional/tests.py
index 3fbbdd784e..3f6f08b8d8 100644
--- a/test/functional/tests.py
+++ b/test/functional/tests.py
@@ -2151,6 +2151,7 @@ class TestSloEnv(object):
'manifest-bcd-submanifest')},
seg_info['seg_e']]),
parms={'multipart-manifest': 'put'})
+ cls.seg_info = seg_info
class TestSlo(Base):
@@ -2356,6 +2357,58 @@ class TestSlo(Base):
except ValueError:
self.fail("COPY didn't copy the manifest (invalid json on GET)")
+ def _make_manifest(self):
+ # To avoid the bug 1453807 on fast-post, make a new manifest
+ # for post test.
+ file_item = self.env.container.file("manifest-post")
+ seg_info = self.env.seg_info
+ file_item.write(
+ json.dumps([seg_info['seg_a'], seg_info['seg_b'],
+ seg_info['seg_c'], seg_info['seg_d'],
+ seg_info['seg_e']]),
+ parms={'multipart-manifest': 'put'})
+ return file_item
+
+ def test_slo_post_the_manifest_metadata_update(self):
+ file_item = self._make_manifest()
+ # sanity check, check the object is an SLO manifest
+ file_item.info()
+ file_item.header_fields([('slo', 'x-static-large-object')])
+
+ # POST a user metadata (i.e. x-object-meta-post)
+ file_item.sync_metadata({'post': 'update'})
+
+ updated = self.env.container.file("manifest-post")
+ updated.info()
+ updated.header_fields([('user-meta', 'x-object-meta-post')]) # sanity
+ updated_contents = updated.read(parms={'multipart-manifest': 'get'})
+ try:
+ json.loads(updated_contents)
+ except ValueError:
+ self.fail("Unexpected content on GET, expected a json body")
+
+ def test_slo_post_the_manifest_metadata_update_with_qs(self):
+ # multipart-manifest query should be ignored on post
+ for verb in ('put', 'get', 'delete'):
+ file_item = self._make_manifest()
+ # sanity check, check the object is an SLO manifest
+ file_item.info()
+ file_item.header_fields([('slo', 'x-static-large-object')])
+ # POST a user metadata (i.e. x-object-meta-post)
+ file_item.sync_metadata(metadata={'post': 'update'},
+ parms={'multipart-manifest': verb})
+ updated = self.env.container.file("manifest-post")
+ updated.info()
+ updated.header_fields(
+ [('user-meta', 'x-object-meta-post')]) # sanity
+ updated_contents = updated.read(
+ parms={'multipart-manifest': 'get'})
+ try:
+ json.loads(updated_contents)
+ except ValueError:
+ self.fail(
+ "Unexpected content on GET, expected a json body")
+
def test_slo_get_the_manifest(self):
manifest = self.env.container.file("manifest-abcde")
got_body = manifest.read(parms={'multipart-manifest': 'get'})
diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py
index a38e753ae0..b0e614a0dd 100755
--- a/test/unit/proxy/controllers/test_obj.py
+++ b/test/unit/proxy/controllers/test_obj.py
@@ -598,13 +598,31 @@ class TestReplicatedObjController(BaseObjectControllerMixin,
def test_POST_as_COPY_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST')
- head_resp = [200] * self.obj_ring.replicas + \
+ get_resp = [200] * self.obj_ring.replicas + \
[404] * self.obj_ring.max_more_nodes
put_resp = [201] * self.obj_ring.replicas
- codes = head_resp + put_resp
+ codes = get_resp + put_resp
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 202)
+ self.assertEquals(req.environ['QUERY_STRING'], '')
+ self.assertTrue('swift.post_as_copy' in req.environ)
+
+ def test_POST_as_COPY_static_large_object(self):
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST')
+ get_resp = [200] * self.obj_ring.replicas + \
+ [404] * self.obj_ring.max_more_nodes
+ put_resp = [201] * self.obj_ring.replicas
+ codes = get_resp + put_resp
+ slo_headers = \
+ [{'X-Static-Large-Object': True}] * self.obj_ring.replicas
+ get_headers = slo_headers + [{}] * (len(codes) - len(slo_headers))
+ headers = {'headers': get_headers}
+ with set_http_connect(*codes, **headers):
+ resp = req.get_response(self.app)
+ self.assertEquals(resp.status_int, 202)
+ self.assertEquals(req.environ['QUERY_STRING'], '')
+ self.assertTrue('swift.post_as_copy' in req.environ)
def test_POST_delete_at(self):
t = str(int(time.time() + 100))
@@ -624,6 +642,9 @@ class TestReplicatedObjController(BaseObjectControllerMixin,
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEquals(resp.status_int, 200)
+ self.assertEquals(req.environ['QUERY_STRING'], '') # sanity
+ self.assertTrue('swift.post_as_copy' in req.environ)
+
for given_headers in post_headers:
self.assertEquals(given_headers.get('X-Delete-At'), t)
self.assertTrue('X-Delete-At-Host' in given_headers)
From aa4866eb6014d7fbb8531eb5d927d5611ebfd06d Mon Sep 17 00:00:00 2001
From: Thiago da Silva
Date: Wed, 13 May 2015 20:10:59 +0000
Subject: [PATCH 33/98] move replication code to ReplicatedObjectController
Moving _connect_put_node, send_file, _transfer_data and _store_object
methods to ReplicatedObjectController. Each one of these methods are
specific to replication policy. EC policy implements their own
version of them.
Of these four methods, only _store_object and _connect_put_node are
required to be implemented by the policy specific Object Controllers.
Change-Id: Ifc72461b77dbfdaae9d63417f1286e8b5da3ca4e
Signed-off-by: Thiago da Silva
---
swift/proxy/controllers/obj.py | 380 ++++++++++++++++++---------------
1 file changed, 210 insertions(+), 170 deletions(-)
diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py
index a83242b5f0..c61986e1dd 100644
--- a/swift/proxy/controllers/obj.py
+++ b/swift/proxy/controllers/obj.py
@@ -350,67 +350,6 @@ class BaseObjectController(Controller):
return headers
- def _send_file(self, conn, path):
- """Method for a file PUT coro"""
- while True:
- chunk = conn.queue.get()
- if not conn.failed:
- try:
- with ChunkWriteTimeout(self.app.node_timeout):
- conn.send(chunk)
- except (Exception, ChunkWriteTimeout):
- conn.failed = True
- self.app.exception_occurred(
- conn.node, _('Object'),
- _('Trying to write to %s') % path)
- conn.queue.task_done()
-
- def _connect_put_node(self, nodes, part, path, headers,
- logger_thread_locals):
- """
- Make a connection for a replicated object.
-
- Connects to the first working node that it finds in node_iter
- and sends over the request headers. Returns an HTTPConnection
- object to handle the rest of the streaming.
- """
- self.app.logger.thread_locals = logger_thread_locals
- for node in nodes:
- try:
- start_time = time.time()
- with ConnectionTimeout(self.app.conn_timeout):
- conn = http_connect(
- node['ip'], node['port'], node['device'], part, 'PUT',
- path, headers)
- self.app.set_node_timing(node, time.time() - start_time)
- with Timeout(self.app.node_timeout):
- resp = conn.getexpect()
- if resp.status == HTTP_CONTINUE:
- conn.resp = None
- conn.node = node
- return conn
- elif is_success(resp.status) or resp.status == HTTP_CONFLICT:
- conn.resp = resp
- conn.node = node
- return conn
- elif headers['If-None-Match'] is not None and \
- resp.status == HTTP_PRECONDITION_FAILED:
- conn.resp = resp
- conn.node = node
- return conn
- elif resp.status == HTTP_INSUFFICIENT_STORAGE:
- self.app.error_limit(node, _('ERROR Insufficient Storage'))
- elif is_server_error(resp.status):
- self.app.error_occurred(
- node,
- _('ERROR %(status)d Expect: 100-continue '
- 'From Object Server') % {
- 'status': resp.status})
- except (Exception, Timeout):
- self.app.exception_occurred(
- node, _('Object'),
- _('Expect: 100-continue on %s') % path)
-
def _await_response(self, conn, **kwargs):
with Timeout(self.app.node_timeout):
if conn.resp:
@@ -730,6 +669,28 @@ class BaseObjectController(Controller):
self._check_min_conn(req, conns, min_conns)
+ def _connect_put_node(self, nodes, part, path, headers,
+ logger_thread_locals):
+ """
+ Make connection to storage nodes
+
+ Connects to the first working node that it finds in nodes iter
+ and sends over the request headers. Returns an HTTPConnection
+ object to handle the rest of the streaming.
+
+ This method must be implemented by each policy ObjectController.
+
+ :param nodes: an iterator of the target storage nodes
+ :param partition: ring partition number
+ :param path: the object path to send to the storage node
+ :param headers: request headers
+ :param logger_thread_locals: The thread local values to be set on the
+ self.app.logger to retain transaction
+ logging information.
+ :return: HTTPConnection object
+ """
+ raise NotImplementedError()
+
def _get_put_connections(self, req, nodes, partition, outgoing_headers,
policy, expect):
"""
@@ -760,120 +721,23 @@ class BaseObjectController(Controller):
{'conns': len(conns), 'nodes': min_conns})
raise HTTPServiceUnavailable(request=req)
- def _transfer_data(self, req, data_source, conns, nodes):
- """
- Transfer data for a replicated object.
-
- This method was added in the PUT method extraction change
- """
- min_conns = quorum_size(len(nodes))
- bytes_transferred = 0
- try:
- with ContextPool(len(nodes)) as pool:
- for conn in conns:
- conn.failed = False
- conn.queue = Queue(self.app.put_queue_depth)
- pool.spawn(self._send_file, conn, req.path)
- while True:
- with ChunkReadTimeout(self.app.client_timeout):
- try:
- chunk = next(data_source)
- except StopIteration:
- if req.is_chunked:
- for conn in conns:
- conn.queue.put('0\r\n\r\n')
- break
- bytes_transferred += len(chunk)
- if bytes_transferred > constraints.MAX_FILE_SIZE:
- raise HTTPRequestEntityTooLarge(request=req)
- for conn in list(conns):
- if not conn.failed:
- conn.queue.put(
- '%x\r\n%s\r\n' % (len(chunk), chunk)
- if req.is_chunked else chunk)
- else:
- conn.close()
- conns.remove(conn)
- self._check_min_conn(
- req, conns, min_conns,
- msg='Object PUT exceptions during'
- ' send, %(conns)s/%(nodes)s required connections')
- for conn in conns:
- if conn.queue.unfinished_tasks:
- conn.queue.join()
- conns = [conn for conn in conns if not conn.failed]
- self._check_min_conn(
- req, conns, min_conns,
- msg='Object PUT exceptions after last send, '
- '%(conns)s/%(nodes)s required connections')
- except ChunkReadTimeout as err:
- self.app.logger.warn(
- _('ERROR Client read timeout (%ss)'), err.seconds)
- self.app.logger.increment('client_timeouts')
- raise HTTPRequestTimeout(request=req)
- except HTTPException:
- raise
- except (Exception, Timeout):
- self.app.logger.exception(
- _('ERROR Exception causing client disconnect'))
- raise HTTPClientDisconnect(request=req)
- if req.content_length and bytes_transferred < req.content_length:
- req.client_disconnect = True
- self.app.logger.warn(
- _('Client disconnected without sending enough data'))
- self.app.logger.increment('client_disconnects')
- raise HTTPClientDisconnect(request=req)
-
def _store_object(self, req, data_source, nodes, partition,
outgoing_headers):
"""
- Store a replicated object.
-
This method is responsible for establishing connection
- with storage nodes and sending object to each one of those
- nodes. After sending the data, the "best" response will be
- returned based on statuses from all connections
+ with storage nodes and sending the data to each one of those
+ nodes. The process of transfering data is specific to each
+ Storage Policy, thus it is required for each policy specific
+ ObjectController to provide their own implementation of this method.
+
+ :param req: the PUT Request
+ :param data_source: an iterator of the source of the data
+ :param nodes: an iterator of the target storage nodes
+ :param partition: ring partition number
+ :param outgoing_headers: system headers to storage nodes
+ :return: Response object
"""
- policy_index = req.headers.get('X-Backend-Storage-Policy-Index')
- policy = POLICIES.get_by_index(policy_index)
- if not nodes:
- return HTTPNotFound()
-
- # RFC2616:8.2.3 disallows 100-continue without a body
- if (req.content_length > 0) or req.is_chunked:
- expect = True
- else:
- expect = False
- conns = self._get_put_connections(req, nodes, partition,
- outgoing_headers, policy, expect)
- min_conns = quorum_size(len(nodes))
- try:
- # check that a minimum number of connections were established and
- # meet all the correct conditions set in the request
- self._check_failure_put_connections(conns, req, nodes, min_conns)
-
- # transfer data
- self._transfer_data(req, data_source, conns, nodes)
-
- # get responses
- statuses, reasons, bodies, etags = self._get_put_responses(
- req, conns, nodes)
- except HTTPException as resp:
- return resp
- finally:
- for conn in conns:
- conn.close()
-
- if len(etags) > 1:
- self.app.logger.error(
- _('Object servers returned %s mismatched etags'), len(etags))
- return HTTPServerError(request=req)
- etag = etags.pop() if len(etags) else None
- resp = self.best_response(req, statuses, reasons, bodies,
- _('Object PUT'), etag=etag)
- resp.last_modified = math.ceil(
- float(Timestamp(req.headers['X-Timestamp'])))
- return resp
+ raise NotImplementedError()
@public
@cors_validation
@@ -1131,6 +995,182 @@ class ReplicatedObjectController(BaseObjectController):
req.swift_entity_path)
return resp
+ def _connect_put_node(self, nodes, part, path, headers,
+ logger_thread_locals):
+ """
+ Make a connection for a replicated object.
+
+ Connects to the first working node that it finds in node_iter
+ and sends over the request headers. Returns an HTTPConnection
+ object to handle the rest of the streaming.
+ """
+ self.app.logger.thread_locals = logger_thread_locals
+ for node in nodes:
+ try:
+ start_time = time.time()
+ with ConnectionTimeout(self.app.conn_timeout):
+ conn = http_connect(
+ node['ip'], node['port'], node['device'], part, 'PUT',
+ path, headers)
+ self.app.set_node_timing(node, time.time() - start_time)
+ with Timeout(self.app.node_timeout):
+ resp = conn.getexpect()
+ if resp.status == HTTP_CONTINUE:
+ conn.resp = None
+ conn.node = node
+ return conn
+ elif is_success(resp.status) or resp.status == HTTP_CONFLICT:
+ conn.resp = resp
+ conn.node = node
+ return conn
+ elif headers['If-None-Match'] is not None and \
+ resp.status == HTTP_PRECONDITION_FAILED:
+ conn.resp = resp
+ conn.node = node
+ return conn
+ elif resp.status == HTTP_INSUFFICIENT_STORAGE:
+ self.app.error_limit(node, _('ERROR Insufficient Storage'))
+ elif is_server_error(resp.status):
+ self.app.error_occurred(
+ node,
+ _('ERROR %(status)d Expect: 100-continue '
+ 'From Object Server') % {
+ 'status': resp.status})
+ except (Exception, Timeout):
+ self.app.exception_occurred(
+ node, _('Object'),
+ _('Expect: 100-continue on %s') % path)
+
+ def _send_file(self, conn, path):
+ """Method for a file PUT coro"""
+ while True:
+ chunk = conn.queue.get()
+ if not conn.failed:
+ try:
+ with ChunkWriteTimeout(self.app.node_timeout):
+ conn.send(chunk)
+ except (Exception, ChunkWriteTimeout):
+ conn.failed = True
+ self.app.exception_occurred(
+ conn.node, _('Object'),
+ _('Trying to write to %s') % path)
+ conn.queue.task_done()
+
+ def _transfer_data(self, req, data_source, conns, nodes):
+ """
+ Transfer data for a replicated object.
+
+ This method was added in the PUT method extraction change
+ """
+ min_conns = quorum_size(len(nodes))
+ bytes_transferred = 0
+ try:
+ with ContextPool(len(nodes)) as pool:
+ for conn in conns:
+ conn.failed = False
+ conn.queue = Queue(self.app.put_queue_depth)
+ pool.spawn(self._send_file, conn, req.path)
+ while True:
+ with ChunkReadTimeout(self.app.client_timeout):
+ try:
+ chunk = next(data_source)
+ except StopIteration:
+ if req.is_chunked:
+ for conn in conns:
+ conn.queue.put('0\r\n\r\n')
+ break
+ bytes_transferred += len(chunk)
+ if bytes_transferred > constraints.MAX_FILE_SIZE:
+ raise HTTPRequestEntityTooLarge(request=req)
+ for conn in list(conns):
+ if not conn.failed:
+ conn.queue.put(
+ '%x\r\n%s\r\n' % (len(chunk), chunk)
+ if req.is_chunked else chunk)
+ else:
+ conn.close()
+ conns.remove(conn)
+ self._check_min_conn(
+ req, conns, min_conns,
+ msg='Object PUT exceptions during'
+ ' send, %(conns)s/%(nodes)s required connections')
+ for conn in conns:
+ if conn.queue.unfinished_tasks:
+ conn.queue.join()
+ conns = [conn for conn in conns if not conn.failed]
+ self._check_min_conn(
+ req, conns, min_conns,
+ msg='Object PUT exceptions after last send, '
+ '%(conns)s/%(nodes)s required connections')
+ except ChunkReadTimeout as err:
+ self.app.logger.warn(
+ _('ERROR Client read timeout (%ss)'), err.seconds)
+ self.app.logger.increment('client_timeouts')
+ raise HTTPRequestTimeout(request=req)
+ except HTTPException:
+ raise
+ except (Exception, Timeout):
+ self.app.logger.exception(
+ _('ERROR Exception causing client disconnect'))
+ raise HTTPClientDisconnect(request=req)
+ if req.content_length and bytes_transferred < req.content_length:
+ req.client_disconnect = True
+ self.app.logger.warn(
+ _('Client disconnected without sending enough data'))
+ self.app.logger.increment('client_disconnects')
+ raise HTTPClientDisconnect(request=req)
+
+ def _store_object(self, req, data_source, nodes, partition,
+ outgoing_headers):
+ """
+ Store a replicated object.
+
+ This method is responsible for establishing connection
+ with storage nodes and sending object to each one of those
+ nodes. After sending the data, the "best" response will be
+ returned based on statuses from all connections
+ """
+ policy_index = req.headers.get('X-Backend-Storage-Policy-Index')
+ policy = POLICIES.get_by_index(policy_index)
+ if not nodes:
+ return HTTPNotFound()
+
+ # RFC2616:8.2.3 disallows 100-continue without a body
+ if (req.content_length > 0) or req.is_chunked:
+ expect = True
+ else:
+ expect = False
+ conns = self._get_put_connections(req, nodes, partition,
+ outgoing_headers, policy, expect)
+ min_conns = quorum_size(len(nodes))
+ try:
+ # check that a minimum number of connections were established and
+ # meet all the correct conditions set in the request
+ self._check_failure_put_connections(conns, req, nodes, min_conns)
+
+ # transfer data
+ self._transfer_data(req, data_source, conns, nodes)
+
+ # get responses
+ statuses, reasons, bodies, etags = self._get_put_responses(
+ req, conns, nodes)
+ except HTTPException as resp:
+ return resp
+ finally:
+ for conn in conns:
+ conn.close()
+
+ if len(etags) > 1:
+ self.app.logger.error(
+ _('Object servers returned %s mismatched etags'), len(etags))
+ return HTTPServerError(request=req)
+ etag = etags.pop() if len(etags) else None
+ resp = self.best_response(req, statuses, reasons, bodies,
+ _('Object PUT'), etag=etag)
+ resp.last_modified = math.ceil(
+ float(Timestamp(req.headers['X-Timestamp'])))
+ return resp
+
class ECAppIter(object):
"""
From ab9f63402de6d554528699a02955854ac28264c5 Mon Sep 17 00:00:00 2001
From: Takashi Kajinami
Date: Mon, 24 Nov 2014 21:44:03 +0900
Subject: [PATCH 34/98] Add process name checking into swift-init
Swift-init uses pid files to detect existing swift processes by pid.
However, it mistakes an unrelated process for a swift process and makes
a wrong decision, when the unrelated process is running with pid written
in swift pid files.
This patch adds process name checking into swift-init and enable it to remove
invalid pid files in such situation.
Change-Id: Ibca026bdfbdacdd92c8763e1eb15d98293c70656
Closes-Bug: #1327106
---
swift/common/exceptions.py | 4 +
swift/common/manager.py | 32 ++-
test/unit/common/test_manager.py | 323 ++++++++++++++++++++-----------
3 files changed, 241 insertions(+), 118 deletions(-)
diff --git a/swift/common/exceptions.py b/swift/common/exceptions.py
index dab0777d6d..b1edadee39 100644
--- a/swift/common/exceptions.py
+++ b/swift/common/exceptions.py
@@ -256,3 +256,7 @@ class ClientException(Exception):
b += ' [first 60 chars of response] %s' \
% self.http_response_content[:60]
return b and '%s: %s' % (a, b) or a
+
+
+class InvalidPidFileException(Exception):
+ pass
diff --git a/swift/common/manager.py b/swift/common/manager.py
index ba4832ee00..afed0bb8ca 100644
--- a/swift/common/manager.py
+++ b/swift/common/manager.py
@@ -24,9 +24,11 @@ import re
from swift import gettext_ as _
from swift.common.utils import search_tree, remove_file, write_file
+from swift.common.exceptions import InvalidPidFileException
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
+PROC_DIR = '/proc'
# auth-server has been removed from ALL_SERVERS, start it explicitly
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
@@ -134,6 +136,29 @@ def watch_server_pids(server_pids, interval=1, **kwargs):
time.sleep(0.1)
+def safe_kill(pid, sig, name):
+ """Send signal to process and check process name
+
+ : param pid: process id
+ : param sig: signal to send
+ : param name: name to ensure target process
+ """
+
+ # check process name for SIG_DFL
+ if sig == signal.SIG_DFL:
+ try:
+ proc_file = '%s/%d/cmdline' % (PROC_DIR, pid)
+ if os.path.exists(proc_file):
+ with open(proc_file, 'r') as fd:
+ if name not in fd.read():
+ # unknown process is using the pid
+ raise InvalidPidFileException()
+ except IOError:
+ pass
+
+ os.kill(pid, sig)
+
+
class UnknownCommandError(Exception):
pass
@@ -488,7 +513,12 @@ class Server(object):
if sig != signal.SIG_DFL:
print _('Signal %s pid: %s signal: %s') % (self.server,
pid, sig)
- os.kill(pid, sig)
+ safe_kill(pid, sig, 'swift-%s' % self.server)
+ except InvalidPidFileException as e:
+ if kwargs.get('verbose'):
+ print _('Removing pid file %s with wrong pid %d') \
+ % (pid_file, pid)
+ remove_file(pid_file)
except OSError as e:
if e.errno == errno.ESRCH:
# pid does not exist
diff --git a/test/unit/common/test_manager.py b/test/unit/common/test_manager.py
index 8896fc138a..5a9b3a6629 100644
--- a/test/unit/common/test_manager.py
+++ b/test/unit/common/test_manager.py
@@ -26,6 +26,7 @@ from threading import Thread
from time import sleep, time
from swift.common import manager
+from swift.common.exceptions import InvalidPidFileException
DUMMY_SIG = 1
@@ -63,7 +64,6 @@ def pop_stream(f):
output = f.read()
f.seek(0)
f.truncate()
- #print >> sys.stderr, output
return output
@@ -257,6 +257,23 @@ class TestManagerModule(unittest.TestCase):
manager.time = _orig_time
manager.Server = _orig_server
+ def test_safe_kill(self):
+ manager.os = MockOs([1, 2, 3, 4])
+
+ proc_files = (
+ ('1/cmdline', 'same-procname'),
+ ('2/cmdline', 'another-procname'),
+ ('4/cmdline', 'another-procname'),
+ )
+ files, contents = zip(*proc_files)
+ with temptree(files, contents) as t:
+ manager.PROC_DIR = t
+ manager.safe_kill(1, signal.SIG_DFL, 'same-procname')
+ self.assertRaises(InvalidPidFileException, manager.safe_kill,
+ 2, signal.SIG_DFL, 'same-procname')
+ manager.safe_kill(3, signal.SIG_DFL, 'same-procname')
+ manager.safe_kill(4, signal.SIGHUP, 'same-procname')
+
def test_exc(self):
self.assert_(issubclass(manager.UnknownCommandError, Exception))
@@ -680,17 +697,19 @@ class TestServer(unittest.TestCase):
self.assertEquals(pid_file, pid_two)
def test_signal_pids(self):
- pid_files = (
- ('proxy-server.pid', 1),
- ('auth-server.pid', 2),
- ('object-server.pid', 3),
+ temp_files = (
+ ('var/run/proxy-server.pid', 1),
+ ('var/run/auth-server.pid', 2),
+ ('var/run/one-server.pid', 3),
+ ('var/run/object-server.pid', 4),
+ ('proc/3/cmdline', 'swift-another-server')
)
- files, pids = zip(*pid_files)
- with temptree(files, pids) as t:
- manager.RUN_DIR = t
- # mock os with both pids running
+ with temptree(*zip(*temp_files)) as t:
+ manager.RUN_DIR = os.path.join(t, 'var/run')
+ manager.PROC_DIR = os.path.join(t, 'proc')
+ # mock os with so both the first and second are running
manager.os = MockOs([1, 2])
- server = manager.Server('proxy', run_dir=t)
+ server = manager.Server('proxy', run_dir=manager.RUN_DIR)
pids = server.signal_pids(DUMMY_SIG)
self.assertEquals(len(pids), 1)
self.assert_(1 in pids)
@@ -703,7 +722,7 @@ class TestServer(unittest.TestCase):
try:
with open(os.path.join(t, 'output'), 'w+') as f:
sys.stdout = f
- #test print details
+ # test print details
pids = server.signal_pids(DUMMY_SIG)
output = pop_stream(f)
self.assert_('pid: %s' % 1 in output)
@@ -711,7 +730,7 @@ class TestServer(unittest.TestCase):
# test no details on signal.SIG_DFL
pids = server.signal_pids(signal.SIG_DFL)
self.assertEquals(pop_stream(f), '')
- # reset mock os so only the other server is running
+ # reset mock os so only the second server is running
manager.os = MockOs([2])
# test pid not running
pids = server.signal_pids(signal.SIG_DFL)
@@ -722,42 +741,63 @@ class TestServer(unittest.TestCase):
self.join_run_dir('proxy-server.pid')))
# reset mock os with no running pids
manager.os = MockOs([])
- server = manager.Server('auth', run_dir=t)
- # test verbose warns on removing pid file
+ server = manager.Server('auth', run_dir=manager.RUN_DIR)
+ # test verbose warns on removing stale pid file
pids = server.signal_pids(signal.SIG_DFL, verbose=True)
output = pop_stream(f)
self.assert_('stale pid' in output.lower())
auth_pid = self.join_run_dir('auth-server.pid')
self.assert_(auth_pid in output)
+ # reset mock os so only the third server is running
+ manager.os = MockOs([3])
+ server = manager.Server('one', run_dir=manager.RUN_DIR)
+ # test verbose warns on removing invalid pid file
+ pids = server.signal_pids(signal.SIG_DFL, verbose=True)
+ output = pop_stream(f)
+ old_stdout.write('output %s' % output)
+ self.assert_('removing pid file' in output.lower())
+ one_pid = self.join_run_dir('one-server.pid')
+ self.assert_(one_pid in output)
+ # reset mock os with no running pids
+ manager.os = MockOs([])
# test warning with insufficient permissions
- server = manager.Server('object', run_dir=t)
+ server = manager.Server('object', run_dir=manager.RUN_DIR)
pids = server.signal_pids(manager.os.RAISE_EPERM_SIG)
output = pop_stream(f)
- self.assert_('no permission to signal pid 3' in
+ self.assert_('no permission to signal pid 4' in
output.lower(), output)
finally:
sys.stdout = old_stdout
def test_get_running_pids(self):
# test only gets running pids
- pid_files = (
- ('test-server1.pid', 1),
- ('test-server2.pid', 2),
+ temp_files = (
+ ('var/run/test-server1.pid', 1),
+ ('var/run/test-server2.pid', 2),
+ ('var/run/test-server3.pid', 3),
+ ('proc/1/cmdline', 'swift-test-server'),
+ ('proc/3/cmdline', 'swift-another-server')
)
- with temptree(*zip(*pid_files)) as t:
- manager.RUN_DIR = t
- server = manager.Server('test-server', run_dir=t)
+ with temptree(*zip(*temp_files)) as t:
+ manager.RUN_DIR = os.path.join(t, 'var/run')
+ manager.PROC_DIR = os.path.join(t, 'proc')
+ server = manager.Server(
+ 'test-server', run_dir=manager.RUN_DIR)
# mock os, only pid '1' is running
- manager.os = MockOs([1])
+ manager.os = MockOs([1, 3])
running_pids = server.get_running_pids()
self.assertEquals(len(running_pids), 1)
self.assert_(1 in running_pids)
self.assert_(2 not in running_pids)
+ self.assert_(3 not in running_pids)
# test persistent running pid files
- self.assert_(os.path.exists(os.path.join(t, 'test-server1.pid')))
+ self.assert_(os.path.exists(
+ os.path.join(manager.RUN_DIR, 'test-server1.pid')))
# test clean up stale pids
pid_two = self.join_swift_dir('test-server2.pid')
self.assertFalse(os.path.exists(pid_two))
+ pid_three = self.join_swift_dir('test-server3.pid')
+ self.assertFalse(os.path.exists(pid_three))
# reset mock os, no pids running
manager.os = MockOs([])
running_pids = server.get_running_pids()
@@ -765,7 +805,7 @@ class TestServer(unittest.TestCase):
# and now all pid files are cleaned out
pid_one = self.join_run_dir('test-server1.pid')
self.assertFalse(os.path.exists(pid_one))
- all_pids = os.listdir(t)
+ all_pids = os.listdir(manager.RUN_DIR)
self.assertEquals(len(all_pids), 0)
# test only get pids for right server
@@ -883,40 +923,68 @@ class TestServer(unittest.TestCase):
sys.stdout = f
# test status for all running
manager.os = MockOs(pids)
- self.assertEquals(server.status(), 0)
- output = pop_stream(f).strip().splitlines()
- self.assertEquals(len(output), 4)
- for line in output:
- self.assert_('test-server running' in line)
+ proc_files = (
+ ('1/cmdline', 'swift-test-server'),
+ ('2/cmdline', 'swift-test-server'),
+ ('3/cmdline', 'swift-test-server'),
+ ('4/cmdline', 'swift-test-server'),
+ )
+ files, contents = zip(*proc_files)
+ with temptree(files, contents) as t:
+ manager.PROC_DIR = t
+ self.assertEquals(server.status(), 0)
+ output = pop_stream(f).strip().splitlines()
+ self.assertEquals(len(output), 4)
+ for line in output:
+ self.assert_('test-server running' in line)
# test get single server by number
- self.assertEquals(server.status(number=4), 0)
- output = pop_stream(f).strip().splitlines()
- self.assertEquals(len(output), 1)
- line = output[0]
- self.assert_('test-server running' in line)
- conf_four = self.join_swift_dir(conf_files[3])
- self.assert_('4 - %s' % conf_four in line)
+ with temptree([], []) as t:
+ manager.PROC_DIR = t
+ self.assertEquals(server.status(number=4), 0)
+ output = pop_stream(f).strip().splitlines()
+ self.assertEquals(len(output), 1)
+ line = output[0]
+ self.assert_('test-server running' in line)
+ conf_four = self.join_swift_dir(conf_files[3])
+ self.assert_('4 - %s' % conf_four in line)
# test some servers not running
manager.os = MockOs([1, 2, 3])
- self.assertEquals(server.status(), 0)
- output = pop_stream(f).strip().splitlines()
- self.assertEquals(len(output), 3)
- for line in output:
- self.assert_('test-server running' in line)
+ proc_files = (
+ ('1/cmdline', 'swift-test-server'),
+ ('2/cmdline', 'swift-test-server'),
+ ('3/cmdline', 'swift-test-server'),
+ )
+ files, contents = zip(*proc_files)
+ with temptree(files, contents) as t:
+ manager.PROC_DIR = t
+ self.assertEquals(server.status(), 0)
+ output = pop_stream(f).strip().splitlines()
+ self.assertEquals(len(output), 3)
+ for line in output:
+ self.assert_('test-server running' in line)
# test single server not running
manager.os = MockOs([1, 2])
- self.assertEquals(server.status(number=3), 1)
- output = pop_stream(f).strip().splitlines()
- self.assertEquals(len(output), 1)
- line = output[0]
- self.assert_('not running' in line)
- conf_three = self.join_swift_dir(conf_files[2])
- self.assert_(conf_three in line)
+ proc_files = (
+ ('1/cmdline', 'swift-test-server'),
+ ('2/cmdline', 'swift-test-server'),
+ )
+ files, contents = zip(*proc_files)
+ with temptree(files, contents) as t:
+ manager.PROC_DIR = t
+ self.assertEquals(server.status(number=3), 1)
+ output = pop_stream(f).strip().splitlines()
+ self.assertEquals(len(output), 1)
+ line = output[0]
+ self.assert_('not running' in line)
+ conf_three = self.join_swift_dir(conf_files[2])
+ self.assert_(conf_three in line)
# test no running pids
manager.os = MockOs([])
- self.assertEquals(server.status(), 1)
- output = pop_stream(f).lower()
- self.assert_('no test-server running' in output)
+ with temptree([], []) as t:
+ manager.PROC_DIR = t
+ self.assertEquals(server.status(), 1)
+ output = pop_stream(f).lower()
+ self.assert_('no test-server running' in output)
# test use provided pids
pids = {
1: '1.pid',
@@ -1210,7 +1278,7 @@ class TestServer(unittest.TestCase):
('proxy-server/2.pid', 2),
)
- #mocks
+ # mocks
class MockSpawn(object):
def __init__(self, pids=None):
@@ -1247,76 +1315,97 @@ class TestServer(unittest.TestCase):
self.assertFalse(server.launch())
# start mock os running all pids
manager.os = MockOs(pids)
- server = manager.Server('proxy', run_dir=t)
- # can't start server if it's already running
- self.assertFalse(server.launch())
- output = pop_stream(f)
- self.assert_('running' in output)
- conf_file = self.join_swift_dir('proxy-server.conf')
- self.assert_(conf_file in output)
- pid_file = self.join_run_dir('proxy-server/2.pid')
- self.assert_(pid_file in output)
- self.assert_('already started' in output)
+ proc_files = (
+ ('1/cmdline', 'swift-proxy-server'),
+ ('2/cmdline', 'swift-proxy-server'),
+ )
+ files, contents = zip(*proc_files)
+ with temptree(files, contents) as proc_dir:
+ manager.PROC_DIR = proc_dir
+ server = manager.Server('proxy', run_dir=t)
+ # can't start server if it's already running
+ self.assertFalse(server.launch())
+ output = pop_stream(f)
+ self.assert_('running' in output)
+ conf_file = self.join_swift_dir(
+ 'proxy-server.conf')
+ self.assert_(conf_file in output)
+ pid_file = self.join_run_dir('proxy-server/2.pid')
+ self.assert_(pid_file in output)
+ self.assert_('already started' in output)
# no running pids
manager.os = MockOs([])
- # test ignore once for non-start-once server
- mock_spawn = MockSpawn([1])
- server.spawn = mock_spawn
- conf_file = self.join_swift_dir('proxy-server.conf')
- expected = {
- 1: conf_file,
- }
- self.assertEquals(server.launch(once=True), expected)
- self.assertEquals(mock_spawn.conf_files, [conf_file])
- expected = {
- 'once': False,
- }
- self.assertEquals(mock_spawn.kwargs, [expected])
- output = pop_stream(f)
- self.assert_('Starting' in output)
- self.assert_('once' not in output)
+ with temptree([], []) as proc_dir:
+ manager.PROC_DIR = proc_dir
+ # test ignore once for non-start-once server
+ mock_spawn = MockSpawn([1])
+ server.spawn = mock_spawn
+ conf_file = self.join_swift_dir(
+ 'proxy-server.conf')
+ expected = {
+ 1: conf_file,
+ }
+ self.assertEquals(server.launch(once=True),
+ expected)
+ self.assertEquals(mock_spawn.conf_files,
+ [conf_file])
+ expected = {
+ 'once': False,
+ }
+ self.assertEquals(mock_spawn.kwargs, [expected])
+ output = pop_stream(f)
+ self.assert_('Starting' in output)
+ self.assert_('once' not in output)
# test multi-server kwarg once
server = manager.Server('object-replicator')
- mock_spawn = MockSpawn([1, 2, 3, 4])
- server.spawn = mock_spawn
- conf1 = self.join_swift_dir('object-server/1.conf')
- conf2 = self.join_swift_dir('object-server/2.conf')
- conf3 = self.join_swift_dir('object-server/3.conf')
- conf4 = self.join_swift_dir('object-server/4.conf')
- expected = {
- 1: conf1,
- 2: conf2,
- 3: conf3,
- 4: conf4,
- }
- self.assertEquals(server.launch(once=True), expected)
- self.assertEquals(mock_spawn.conf_files, [
- conf1, conf2, conf3, conf4])
- expected = {
- 'once': True,
- }
- self.assertEquals(len(mock_spawn.kwargs), 4)
- for kwargs in mock_spawn.kwargs:
- self.assertEquals(kwargs, expected)
- # test number kwarg
- mock_spawn = MockSpawn([4])
- server.spawn = mock_spawn
- expected = {
- 4: conf4,
- }
- self.assertEquals(server.launch(number=4), expected)
- self.assertEquals(mock_spawn.conf_files, [conf4])
- expected = {
- 'number': 4
- }
- self.assertEquals(mock_spawn.kwargs, [expected])
+ with temptree([], []) as proc_dir:
+ manager.PROC_DIR = proc_dir
+ mock_spawn = MockSpawn([1, 2, 3, 4])
+ server.spawn = mock_spawn
+ conf1 = self.join_swift_dir('object-server/1.conf')
+ conf2 = self.join_swift_dir('object-server/2.conf')
+ conf3 = self.join_swift_dir('object-server/3.conf')
+ conf4 = self.join_swift_dir('object-server/4.conf')
+ expected = {
+ 1: conf1,
+ 2: conf2,
+ 3: conf3,
+ 4: conf4,
+ }
+ self.assertEquals(server.launch(once=True),
+ expected)
+ self.assertEquals(mock_spawn.conf_files, [
+ conf1, conf2, conf3, conf4])
+ expected = {
+ 'once': True,
+ }
+ self.assertEquals(len(mock_spawn.kwargs), 4)
+ for kwargs in mock_spawn.kwargs:
+ self.assertEquals(kwargs, expected)
+ # test number kwarg
+ mock_spawn = MockSpawn([4])
+ manager.PROC_DIR = proc_dir
+ server.spawn = mock_spawn
+ expected = {
+ 4: conf4,
+ }
+ self.assertEquals(server.launch(number=4),
+ expected)
+ self.assertEquals(mock_spawn.conf_files, [conf4])
+ expected = {
+ 'number': 4
+ }
+ self.assertEquals(mock_spawn.kwargs, [expected])
# test cmd does not exist
server = manager.Server('auth')
- mock_spawn = MockSpawn([OSError(errno.ENOENT, 'blah')])
- server.spawn = mock_spawn
- self.assertEquals(server.launch(), {})
- self.assert_('swift-auth-server does not exist' in
- pop_stream(f))
+ with temptree([], []) as proc_dir:
+ manager.PROC_DIR = proc_dir
+ mock_spawn = MockSpawn([OSError(errno.ENOENT,
+ 'blah')])
+ server.spawn = mock_spawn
+ self.assertEquals(server.launch(), {})
+ self.assert_('swift-auth-server does not exist' in
+ pop_stream(f))
finally:
sys.stdout = old_stdout
From e54781a2aa29808aff654861d8a0aafd24b6620c Mon Sep 17 00:00:00 2001
From: Thiago da Silva
Date: Tue, 19 May 2015 20:27:06 +0000
Subject: [PATCH 35/98] add object post and delete methods to
BaseObjectController
Adding post and delete methods to BaseObjectController that can
be overriden by ObjectCOntroller subclasses. These methods are
similar to the PUT and GET methods that were introduced as part
of the EC work
Change-Id: I197364bc3e2f2287c0afc8948863e3cdeab90383
Signed-off-by: Thiago da Silva
---
swift/proxy/controllers/obj.py | 45 ++++++++++++++++++++++++++--------
1 file changed, 35 insertions(+), 10 deletions(-)
diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py
index c61986e1dd..e84eafea13 100644
--- a/swift/proxy/controllers/obj.py
+++ b/swift/proxy/controllers/obj.py
@@ -313,10 +313,7 @@ class BaseObjectController(Controller):
headers = self._backend_requests(
req, len(nodes), container_partition, containers,
delete_at_container, delete_at_part, delete_at_nodes)
-
- resp = self.make_requests(req, obj_ring, partition,
- 'POST', req.swift_entity_path, headers)
- return resp
+ return self._post_object(req, obj_ring, partition, headers)
def _backend_requests(self, req, n_outgoing,
container_partition, containers,
@@ -739,6 +736,39 @@ class BaseObjectController(Controller):
"""
raise NotImplementedError()
+ def _delete_object(self, req, obj_ring, partition, headers):
+ """
+ send object DELETE request to storage nodes. Subclasses of
+ the BaseObjectController can provide their own implementation
+ of this method.
+
+ :param req: the DELETE Request
+ :param obj_ring: the object ring
+ :param partition: ring partition number
+ :param headers: system headers to storage nodes
+ :return: Response object
+ """
+ # When deleting objects treat a 404 status as 204.
+ status_overrides = {404: 204}
+ resp = self.make_requests(req, obj_ring,
+ partition, 'DELETE', req.swift_entity_path,
+ headers, overrides=status_overrides)
+ return resp
+
+ def _post_object(self, req, obj_ring, partition, headers):
+ """
+ send object POST request to storage nodes.
+
+ :param req: the POST Request
+ :param obj_ring: the object ring
+ :param partition: ring partition number
+ :param headers: system headers to storage nodes
+ :return: Response object
+ """
+ resp = self.make_requests(req, obj_ring, partition,
+ 'POST', req.swift_entity_path, headers)
+ return resp
+
@public
@cors_validation
@delay_denial
@@ -928,12 +958,7 @@ class BaseObjectController(Controller):
headers = self._backend_requests(
req, len(nodes), container_partition, containers)
- # When deleting objects treat a 404 status as 204.
- status_overrides = {404: 204}
- resp = self.make_requests(req, obj_ring,
- partition, 'DELETE', req.swift_entity_path,
- headers, overrides=status_overrides)
- return resp
+ return self._delete_object(req, obj_ring, partition, headers)
def _reroute(self, policy):
"""
From f864092455ebcd40b3568633c3524cc5c64d3309 Mon Sep 17 00:00:00 2001
From: Clay Gerrard
Date: Wed, 20 May 2015 17:50:07 -0700
Subject: [PATCH 36/98] Add Swift Inspector to assoicated projects
Change-Id: I5b5448674ea455119a51509ab5e7cd11a764b5a7
---
doc/source/associated_projects.rst | 1 +
1 file changed, 1 insertion(+)
diff --git a/doc/source/associated_projects.rst b/doc/source/associated_projects.rst
index c0f8cf7e5d..762971ca4f 100644
--- a/doc/source/associated_projects.rst
+++ b/doc/source/associated_projects.rst
@@ -48,6 +48,7 @@ Monitoring & Statistics
-----------------------
* `Swift Informant `_ - Swift Proxy Middleware to send events to a statsd instance.
+* `Swift Inspector `_ - Swift middleware to relay information about a request back to the client.
Content Distribution Network Integration
From f11d92d566757a54ff1e3800ec0bfac098347a68 Mon Sep 17 00:00:00 2001
From: Kota Tsuyuzaki
Date: Fri, 22 May 2015 16:58:04 -0700
Subject: [PATCH 37/98] Add swift-durability-calculator line to docs
This commits add a line (link and small doc) for
swift-durability-calculator which provides a browser based
durability calculation tool to docs as an associated project.
Change-Id: I4ea8015f512616dc25072080bef79b8734971ccf
---
doc/source/associated_projects.rst | 1 +
1 file changed, 1 insertion(+)
diff --git a/doc/source/associated_projects.rst b/doc/source/associated_projects.rst
index c0f8cf7e5d..d706334670 100644
--- a/doc/source/associated_projects.rst
+++ b/doc/source/associated_projects.rst
@@ -108,3 +108,4 @@ Other
* `liberasurecode `_ - Low Level Erasure Code library used by PyECLib
* `Swift Browser `_ - JavaScript interface for Swift
* `swift-ui `_ - OpenStack Swift web browser
+* `Swift Durability Calculator `_ - Data Durability Calculation Tool for Swift
From af8d842076ba269fed7f4128d0c7503ab5d1a94a Mon Sep 17 00:00:00 2001
From: "Joanna H. Huang"
Date: Tue, 21 Oct 2014 09:24:25 +0000
Subject: [PATCH 38/98] Replaced setting run_pause with standard interval
The deprecated directive `run_pause` should be replaced with the more
standard one `interval`. The `run_pause` should be still supported for
backward compatibility. This patch updates object replicator to use
`interval` and support `run_pause`. It also updates its sample config
and documentation.
Co-Authored-By: Joanna H. Huang
Co-Authored-By: Kamil Rykowski
Change-Id: Ie2a3414a96a94efb9273ff53a80b9d90c74fff09
Closes-Bug: #1364735
---
doc/manpages/account-server.conf.5 | 2 +-
doc/manpages/container-server.conf.5 | 2 +-
doc/manpages/object-server.conf.5 | 4 +++-
doc/source/deployment_guide.rst | 6 +++---
etc/account-server.conf-sample | 9 ++++-----
etc/container-server.conf-sample | 10 +++++-----
etc/object-server.conf-sample | 10 ++++++++++
swift/obj/reconstructor.py | 7 ++++---
swift/obj/replicator.py | 7 ++++---
9 files changed, 35 insertions(+), 22 deletions(-)
diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5
index c98b679b44..b60baeb60d 100644
--- a/doc/manpages/account-server.conf.5
+++ b/doc/manpages/account-server.conf.5
@@ -185,7 +185,7 @@ This caps how long the replicator will spend trying to sync a given database per
.IP \fBconcurrency\fR
Number of replication workers to spawn. The default is 8.
.IP "\fBrun_pause [deprecated]\fR"
-Time in seconds to wait between replication passes. The default is 10.
+Time in seconds to wait between replication passes. The default is 30.
.IP \fBinterval\fR
Replaces run_pause with the more standard "interval", which means the replicator won't pause unless it takes less than the interval set. The default is 30.
.IP \fBerror_suppression_interval\fR
diff --git a/doc/manpages/container-server.conf.5 b/doc/manpages/container-server.conf.5
index 93408cf7ad..2cd1623dc1 100644
--- a/doc/manpages/container-server.conf.5
+++ b/doc/manpages/container-server.conf.5
@@ -191,7 +191,7 @@ This caps how long the replicator will spend trying to sync a given database per
.IP \fBconcurrency\fR
Number of replication workers to spawn. The default is 8.
.IP "\fBrun_pause [deprecated]\fR"
-Time in seconds to wait between replication passes. The default is 10.
+Time in seconds to wait between replication passes. The default is 30.
.IP \fBinterval\fR
Replaces run_pause with the more standard "interval", which means the replicator won't pause unless it takes less than the interval set. The default is 30.
.IP \fBnode_timeout\fR
diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5
index 14e8a58b3b..fb2297421a 100644
--- a/doc/manpages/object-server.conf.5
+++ b/doc/manpages/object-server.conf.5
@@ -187,7 +187,9 @@ Logging address. The default is /dev/log.
Indicates that you are using a VM environment. The default is no.
.IP \fBdaemonize\fR
Whether or not to run replication as a daemon. The default is yes.
-.IP \fBrun_pause\fR
+.IP "\fBrun_pause [deprecated]\fR"
+Time in seconds to wait between replication passes. The default is 30.
+.IP \fBinterval\fR
Time in seconds to wait between replication passes. The default is 30.
.IP \fBconcurrency\fR
Number of replication workers to spawn. The default is 1.
diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst
index 0b40bb8568..552dfea314 100644
--- a/doc/source/deployment_guide.rst
+++ b/doc/source/deployment_guide.rst
@@ -465,7 +465,7 @@ log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
daemonize yes Whether or not to run replication as a
daemon
-run_pause 30 Time in seconds to wait between
+interval 30 Time in seconds to wait between
replication passes
concurrency 1 Number of replication workers to spawn
timeout 5 Timeout value sent to rsync --timeout
@@ -614,7 +614,7 @@ log_level INFO Logging level
per_diff 1000
concurrency 8 Number of replication workers to
spawn
-run_pause 30 Time in seconds to wait between
+interval 30 Time in seconds to wait between
replication passes
node_timeout 10 Request timeout to external services
conn_timeout 0.5 Connection timeout to external
@@ -742,7 +742,7 @@ log_facility LOG_LOCAL0 Syslog log facility
log_level INFO Logging level
per_diff 1000
concurrency 8 Number of replication workers to spawn
-run_pause 30 Time in seconds to wait between
+interval 30 Time in seconds to wait between
replication passes
node_timeout 10 Request timeout to external services
conn_timeout 0.5 Connection timeout to external services
diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample
index 98c97acf6f..3631986fa2 100644
--- a/etc/account-server.conf-sample
+++ b/etc/account-server.conf-sample
@@ -94,7 +94,11 @@ use = egg:swift#recon
# per_diff = 1000
# max_diffs = 100
# concurrency = 8
+#
+# Time in seconds to wait between replication passes
# interval = 30
+# run_pause is deprecated, use interval instead
+# run_pause = 30
#
# How long without an error before a node's error count is reset. This will
# also be how long before a node is reenabled after suppression is triggered.
@@ -109,11 +113,6 @@ use = egg:swift#recon
# The replicator also performs reclamation
# reclaim_age = 604800
#
-# Time in seconds to wait between replication passes
-# Note: if the parameter 'interval' is defined then it will be used in place
-# of run_pause.
-# run_pause = 30
-#
# Allow rsync to compress data which is transmitted to destination node
# during sync. However, this is applicable only when destination node is in
# a different region than the local one.
diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample
index f09aa66fd7..54daee1e8e 100644
--- a/etc/container-server.conf-sample
+++ b/etc/container-server.conf-sample
@@ -103,18 +103,18 @@ use = egg:swift#recon
# per_diff = 1000
# max_diffs = 100
# concurrency = 8
+#
+# Time in seconds to wait between replication passes
# interval = 30
+# run_pause is deprecated, use interval instead
+# run_pause = 30
+#
# node_timeout = 10
# conn_timeout = 0.5
#
# The replicator also performs reclamation
# reclaim_age = 604800
#
-# Time in seconds to wait between replication passes
-# Note: if the parameter 'interval' is defined then it will be used in place
-# of run_pause.
-# run_pause = 30
-#
# Allow rsync to compress data which is transmitted to destination node
# during sync. However, this is applicable only when destination node is in
# a different region than the local one.
diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample
index a3f76ceb92..4fafa7c18b 100644
--- a/etc/object-server.conf-sample
+++ b/etc/object-server.conf-sample
@@ -155,7 +155,12 @@ use = egg:swift#recon
#
# vm_test_mode = no
# daemonize = on
+#
+# Time in seconds to wait between replication passes
+# interval = 30
+# run_pause is deprecated, use interval instead
# run_pause = 30
+#
# concurrency = 1
# stats_interval = 300
#
@@ -230,7 +235,12 @@ use = egg:swift#recon
# log_address = /dev/log
#
# daemonize = on
+#
+# Time in seconds to wait between reconstruction passes
+# interval = 30
+# run_pause is deprecated, use interval instead
# run_pause = 30
+#
# concurrency = 1
# stats_interval = 300
# node_timeout = 10
diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py
index 2dd743fa9a..44920d54f7 100644
--- a/swift/obj/reconstructor.py
+++ b/swift/obj/reconstructor.py
@@ -126,7 +126,8 @@ class ObjectReconstructor(Daemon):
self.next_check = time.time() + self.ring_check_interval
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
self.partition_times = []
- self.run_pause = int(conf.get('run_pause', 30))
+ self.interval = int(conf.get('interval') or
+ conf.get('run_pause') or 30)
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
self.recon_cache_path = conf.get('recon_cache_path',
@@ -916,5 +917,5 @@ class ObjectReconstructor(Daemon):
'object_reconstruction_last': time.time()},
self.rcache, self.logger)
self.logger.debug('reconstruction sleeping for %s seconds.',
- self.run_pause)
- sleep(self.run_pause)
+ self.interval)
+ sleep(self.interval)
diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py
index 402de63af3..d23624b382 100644
--- a/swift/obj/replicator.py
+++ b/swift/obj/replicator.py
@@ -72,7 +72,8 @@ class ObjectReplicator(Daemon):
self.next_check = time.time() + self.ring_check_interval
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
self.partition_times = []
- self.run_pause = int(conf.get('run_pause', 30))
+ self.interval = int(conf.get('interval') or
+ conf.get('run_pause') or 30)
self.rsync_timeout = int(conf.get('rsync_timeout', 900))
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
@@ -651,5 +652,5 @@ class ObjectReplicator(Daemon):
'object_replication_last': time.time()},
self.rcache, self.logger)
self.logger.debug('Replication sleeping for %s seconds.',
- self.run_pause)
- sleep(self.run_pause)
+ self.interval)
+ sleep(self.interval)
From e7c8c578d9e5b0aa7e56b02bd9c39baa99d2d6ae Mon Sep 17 00:00:00 2001
From: Michael MATUR
Date: Mon, 25 May 2015 15:13:01 +0200
Subject: [PATCH 39/98] fixup!Patch of "parse_content_disposition" method to
meet RFC2183
The spec of Content-Disposition does not require a space character after
comma: http://www.ietf.org/rfc/rfc2183.txt
Change-Id: Iff438dc36ce78c6a79bb66ab3d889a8dae7c0e1f
Closes-Bug: #1458497
---
swift/common/utils.py | 4 ++--
test/unit/common/test_utils.py | 6 ++++++
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/swift/common/utils.py b/swift/common/utils.py
index 19dcfd3d61..11a97d126b 100644
--- a/swift/common/utils.py
+++ b/swift/common/utils.py
@@ -3355,8 +3355,8 @@ def parse_content_disposition(header):
"""
attributes = {}
attrs = ''
- if '; ' in header:
- header, attrs = header.split('; ', 1)
+ if ';' in header:
+ header, attrs = [x.strip() for x in header.split(';', 1)]
m = True
while m:
m = ATTRIBUTES_RE.match(attrs)
diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py
index 48610c1a7b..113b712ab1 100644
--- a/test/unit/common/test_utils.py
+++ b/test/unit/common/test_utils.py
@@ -4629,6 +4629,12 @@ class TestParseContentDisposition(unittest.TestCase):
self.assertEquals(name, 'form-data')
self.assertEquals(attrs, {'name': 'somefile', 'filename': 'test.html'})
+ def test_content_disposition_without_white_space(self):
+ name, attrs = utils.parse_content_disposition(
+ 'form-data;name="somefile";filename="test.html"')
+ self.assertEquals(name, 'form-data')
+ self.assertEquals(attrs, {'name': 'somefile', 'filename': 'test.html'})
+
class TestIterMultipartMimeDocuments(unittest.TestCase):
From 666bf06c26bc9e0d6256d054835386e50e67b8a2 Mon Sep 17 00:00:00 2001
From: Samuel Merritt
Date: Wed, 6 May 2015 16:29:06 -0700
Subject: [PATCH 40/98] EC: don't 503 on marginally-successful PUT
On EC PUT in an M+K scheme, we require M+1 fragment archives to
durably land on disk. If we get that, then we go ahead and ask the
object servers to "commit" the object by writing out .durable
files. We only require 2 of those.
When we got exactly M+1 fragment archives on disk, and then one
connection timed out while writing .durable files, we should still be
okay (provided M is at least 3). However, we'd take our M > 2
remaining successful responses and pass that off to best_response()
with a quorum size of M+1, thus getting a 503 even though everything
worked well enough.
Now we pass 2 to best_response() to avoid that false negative.
There was also a spot where we were getting the quorum size wrong. If
we wrote out 3 fragment archives for a 2+1 policy, we were only
requiring 2 successful backend PUTs. That's wrong; the right number is
3, which is what the policy's .quorum() method says. There was a spot
where the right number wasn't getting plumbed through, but it is now.
Change-Id: Ic658a199e952558db329268f4d7b4009f47c6d03
Co-Authored-By: Clay Gerrard
Closes-Bug: 1452468
---
swift/proxy/controllers/base.py | 6 +-
swift/proxy/controllers/obj.py | 11 ++-
test/unit/__init__.py | 108 +++++++++++++++++-------
test/unit/proxy/controllers/test_obj.py | 31 ++++++-
test/unit/proxy/test_server.py | 18 ++--
5 files changed, 125 insertions(+), 49 deletions(-)
diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py
index 6bf7ea0ef6..2fb7cd945e 100644
--- a/swift/proxy/controllers/base.py
+++ b/swift/proxy/controllers/base.py
@@ -1197,16 +1197,18 @@ class Controller(object):
"""
return quorum_size(n)
- def have_quorum(self, statuses, node_count):
+ def have_quorum(self, statuses, node_count, quorum=None):
"""
Given a list of statuses from several requests, determine if
a quorum response can already be decided.
:param statuses: list of statuses returned
:param node_count: number of nodes being queried (basically ring count)
+ :param quorum: number of statuses required for quorum
:returns: True or False, depending on if quorum is established
"""
- quorum = self._quorum_size(node_count)
+ if quorum is None:
+ quorum = self._quorum_size(node_count)
if len(statuses) >= quorum:
for hundred in (HTTP_CONTINUE, HTTP_OK, HTTP_MULTIPLE_CHOICES,
HTTP_BAD_REQUEST):
diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py
index a83242b5f0..b04f9c3161 100644
--- a/swift/proxy/controllers/obj.py
+++ b/swift/proxy/controllers/obj.py
@@ -2171,7 +2171,7 @@ class ECObjectController(BaseObjectController):
else:
# intermediate response phase - set return value to true only
# if there are enough 100-continue acknowledgements
- if self.have_quorum(statuses, num_nodes):
+ if self.have_quorum(statuses, num_nodes, quorum=min_responses):
quorum = True
return statuses, reasons, bodies, etags, quorum
@@ -2203,12 +2203,17 @@ class ECObjectController(BaseObjectController):
nodes, min_conns, etag_hasher)
final_phase = True
need_quorum = False
- min_resp = 2
+ # The .durable file will propagate in a replicated fashion; if
+ # one exists, the reconstructor will spread it around. Thus, we
+ # don't require as many .durable files to be successfully
+ # written as we do fragment archives in order to call the PUT a
+ # success.
+ min_conns = 2
putters = [p for p in putters if not p.failed]
# ignore response etags, and quorum boolean
statuses, reasons, bodies, _etags, _quorum = \
self._get_put_responses(req, putters, len(nodes),
- final_phase, min_resp,
+ final_phase, min_conns,
need_quorum=need_quorum)
except HTTPException as resp:
return resp
diff --git a/test/unit/__init__.py b/test/unit/__init__.py
index 372fb58bbf..e839e5568c 100644
--- a/test/unit/__init__.py
+++ b/test/unit/__init__.py
@@ -704,6 +704,74 @@ def mock(update):
delattr(module, attr)
+class FakeStatus(object):
+ """
+ This will work with our fake_http_connect, if you hand in one of these
+ instead of a status int or status int tuple to the "codes" iter you can
+ add some eventlet sleep to the expect and response stages of the
+ connection.
+ """
+
+ def __init__(self, status, expect_sleep=None, response_sleep=None):
+ """
+ :param status: the response status int, or a tuple of
+ ([expect_status, ...], response_status)
+ :param expect_sleep: float, time to eventlet sleep during expect, can
+ be a iter of floats
+ :param response_sleep: float, time to eventlet sleep during response
+ """
+ # connect exception
+ if isinstance(status, (Exception, eventlet.Timeout)):
+ raise status
+ if isinstance(status, tuple):
+ self.expect_status = list(status[:-1])
+ self.status = status[-1]
+ self.explicit_expect_list = True
+ else:
+ self.expect_status, self.status = ([], status)
+ self.explicit_expect_list = False
+ if not self.expect_status:
+ # when a swift backend service returns a status before reading
+ # from the body (mostly an error response) eventlet.wsgi will
+ # respond with that status line immediately instead of 100
+ # Continue, even if the client sent the Expect 100 header.
+ # BufferedHttp and the proxy both see these error statuses
+ # when they call getexpect, so our FakeConn tries to act like
+ # our backend services and return certain types of responses
+ # as expect statuses just like a real backend server would do.
+ if self.status in (507, 412, 409):
+ self.expect_status = [status]
+ else:
+ self.expect_status = [100, 100]
+
+ # setup sleep attributes
+ if not isinstance(expect_sleep, (list, tuple)):
+ expect_sleep = [expect_sleep] * len(self.expect_status)
+ self.expect_sleep_list = list(expect_sleep)
+ while len(self.expect_sleep_list) < len(self.expect_status):
+ self.expect_sleep_list.append(None)
+ self.response_sleep = response_sleep
+
+ def get_response_status(self):
+ if self.response_sleep is not None:
+ eventlet.sleep(self.response_sleep)
+ if self.expect_status and self.explicit_expect_list:
+ raise Exception('Test did not consume all fake '
+ 'expect status: %r' % (self.expect_status,))
+ if isinstance(self.status, (Exception, eventlet.Timeout)):
+ raise self.status
+ return self.status
+
+ def get_expect_status(self):
+ expect_sleep = self.expect_sleep_list.pop(0)
+ if expect_sleep is not None:
+ eventlet.sleep(expect_sleep)
+ expect_status = self.expect_status.pop(0)
+ if isinstance(expect_status, (Exception, eventlet.Timeout)):
+ raise expect_status
+ return expect_status
+
+
class SlowBody(object):
"""
This will work with our fake_http_connect, if you hand in these
@@ -741,29 +809,9 @@ def fake_http_connect(*code_iter, **kwargs):
def __init__(self, status, etag=None, body='', timestamp='1',
headers=None, expect_headers=None, connection_id=None,
give_send=None):
- # connect exception
- if isinstance(status, (Exception, eventlet.Timeout)):
- raise status
- if isinstance(status, tuple):
- self.expect_status = list(status[:-1])
- self.status = status[-1]
- self.explicit_expect_list = True
- else:
- self.expect_status, self.status = ([], status)
- self.explicit_expect_list = False
- if not self.expect_status:
- # when a swift backend service returns a status before reading
- # from the body (mostly an error response) eventlet.wsgi will
- # respond with that status line immediately instead of 100
- # Continue, even if the client sent the Expect 100 header.
- # BufferedHttp and the proxy both see these error statuses
- # when they call getexpect, so our FakeConn tries to act like
- # our backend services and return certain types of responses
- # as expect statuses just like a real backend server would do.
- if self.status in (507, 412, 409):
- self.expect_status = [status]
- else:
- self.expect_status = [100, 100]
+ if not isinstance(status, FakeStatus):
+ status = FakeStatus(status)
+ self._status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
@@ -785,11 +833,6 @@ def fake_http_connect(*code_iter, **kwargs):
eventlet.sleep()
def getresponse(self):
- if self.expect_status and self.explicit_expect_list:
- raise Exception('Test did not consume all fake '
- 'expect status: %r' % (self.expect_status,))
- if isinstance(self.status, (Exception, eventlet.Timeout)):
- raise self.status
exc = kwargs.get('raise_exc')
if exc:
if isinstance(exc, (Exception, eventlet.Timeout)):
@@ -797,16 +840,17 @@ def fake_http_connect(*code_iter, **kwargs):
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
raise eventlet.Timeout()
+ self.status = self._status.get_response_status()
return self
def getexpect(self):
- expect_status = self.expect_status.pop(0)
- if isinstance(self.expect_status, (Exception, eventlet.Timeout)):
- raise self.expect_status
+ expect_status = self._status.get_expect_status()
headers = dict(self.expect_headers)
if expect_status == 409:
headers['X-Backend-Timestamp'] = self.timestamp
- return FakeConn(expect_status, headers=headers)
+ response = FakeConn(expect_status, headers=headers)
+ response.status = expect_status
+ return response
def getheaders(self):
etag = self.etag
diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py
index a38e753ae0..04cb57d934 100755
--- a/test/unit/proxy/controllers/test_obj.py
+++ b/test/unit/proxy/controllers/test_obj.py
@@ -35,7 +35,7 @@ from swift.proxy.controllers.base import get_info as _real_get_info
from swift.common.storage_policy import POLICIES, ECDriverError
from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
- debug_logger, patch_policies, SlowBody
+ debug_logger, patch_policies, SlowBody, FakeStatus
from test.unit.proxy.test_server import node_error_count
@@ -1406,6 +1406,35 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase):
self.assertEqual(1, len(error_lines))
self.assertTrue('retrying' in error_lines[0])
+ def test_PUT_with_slow_commits(self):
+ # It's important that this timeout be much less than the delay in
+ # the slow commit responses so that the slow commits are not waited
+ # for.
+ self.app.post_quorum_timeout = 0.01
+ req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
+ body='')
+ # plenty of slow commits
+ response_sleep = 5.0
+ codes = [FakeStatus(201, response_sleep=response_sleep)
+ for i in range(self.replicas())]
+ # swap out some with regular fast responses
+ number_of_fast_responses_needed_to_be_quick_enough = 2
+ fast_indexes = random.sample(
+ xrange(self.replicas()),
+ number_of_fast_responses_needed_to_be_quick_enough)
+ for i in fast_indexes:
+ codes[i] = 201
+ expect_headers = {
+ 'X-Obj-Metadata-Footer': 'yes',
+ 'X-Obj-Multiphase-Commit': 'yes'
+ }
+ with set_http_connect(*codes, expect_headers=expect_headers):
+ start = time.time()
+ resp = req.get_response(self.app)
+ response_time = time.time() - start
+ self.assertEquals(resp.status_int, 201)
+ self.assertTrue(response_time < response_sleep)
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py
index 3319696eb7..3b0115bbfc 100644
--- a/test/unit/proxy/test_server.py
+++ b/test/unit/proxy/test_server.py
@@ -6217,7 +6217,8 @@ class TestECMismatchedFA(unittest.TestCase):
# pyeclib has checks for unequal-length; we don't want to trip those
self.assertEqual(len(obj1), len(obj2))
- # Servers obj1 and obj2 will have the first version of the object
+ # Server obj1 will have the first version of the object (obj2 also
+ # gets it, but that gets stepped on later)
prosrv._error_limiting = {}
with nested(
mock.patch.object(obj3srv, 'PUT', bad_disk),
@@ -6227,18 +6228,13 @@ class TestECMismatchedFA(unittest.TestCase):
resp = put_req1.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
- # Server obj3 (and, in real life, some handoffs) will have the
- # second version of the object.
+ # Servers obj2 and obj3 will have the second version of the object.
prosrv._error_limiting = {}
with nested(
mock.patch.object(obj1srv, 'PUT', bad_disk),
- mock.patch.object(obj2srv, 'PUT', bad_disk),
mock.patch(
- 'swift.common.storage_policy.ECStoragePolicy.quorum'),
- mock.patch(
- 'swift.proxy.controllers.base.Controller._quorum_size',
- lambda *a, **kw: 1)):
- type(ec_policy).quorum = mock.PropertyMock(return_value=1)
+ 'swift.common.storage_policy.ECStoragePolicy.quorum')):
+ type(ec_policy).quorum = mock.PropertyMock(return_value=2)
resp = put_req2.get_response(prosrv)
self.assertEqual(resp.status_int, 201)
@@ -6258,10 +6254,10 @@ class TestECMismatchedFA(unittest.TestCase):
environ={"REQUEST_METHOD": "GET"},
headers={"X-Auth-Token": "t"})
prosrv._error_limiting = {}
- with mock.patch.object(obj3srv, 'GET', bad_disk):
+ with mock.patch.object(obj1srv, 'GET', bad_disk):
resp = get_req.get_response(prosrv)
self.assertEqual(resp.status_int, 200)
- self.assertEqual(resp.body, obj1)
+ self.assertEqual(resp.body, obj2)
# A GET that sees 2 mismatching FAs will fail
get_req = Request.blank("/v1/a/ec-crazytown/obj",
From a3559edc2342e2cf92a5188336ab263ffd038554 Mon Sep 17 00:00:00 2001
From: Clay Gerrard
Date: Fri, 17 Apr 2015 16:30:30 -0700
Subject: [PATCH 41/98] Exclude local_dev from sync partners on failure
If the primary left or right hand partners are down, the next best thing
is to validate the rest of the primary nodes. Where the rest should
exclude not just the left and right hand partners - but ourself as well.
This fixes a accidental noop when partner node is unavailable and
another node is missing data.
Validation:
Add probetests to cover ssync failures for the primary sync_to nodes for
sync jobs.
Drive-by:
Make additional plumbing for the check_mount and check_dir constraints into
the remaining daemons.
Change-Id: I4d1c047106c242bca85c94b569d98fd59bb255f4
---
swift/obj/reconstructor.py | 22 +++++----
swift/obj/ssync_receiver.py | 4 +-
test/probe/common.py | 15 ++++++-
test/probe/test_reconstructor_rebuild.py | 57 ++++++++++++++++++++++++
test/probe/test_reconstructor_revert.py | 17 +------
test/unit/obj/test_reconstructor.py | 42 ++++++++++-------
test/unit/obj/test_ssync_receiver.py | 10 ++---
7 files changed, 118 insertions(+), 49 deletions(-)
diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py
index 4385e42cc9..734958ec2d 100644
--- a/swift/obj/reconstructor.py
+++ b/swift/obj/reconstructor.py
@@ -29,8 +29,8 @@ from eventlet.support.greenlets import GreenletExit
from swift import gettext_ as _
from swift.common.utils import (
whataremyips, unlink_older_than, compute_eta, get_logger,
- dump_recon_cache, ismount, mkdirs, config_true_value, list_from_csv,
- get_hub, tpool_reraise, GreenAsyncPile, Timestamp, remove_file)
+ dump_recon_cache, mkdirs, config_true_value, list_from_csv, get_hub,
+ tpool_reraise, GreenAsyncPile, Timestamp, remove_file)
from swift.common.swob import HeaderKeyDict
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
@@ -569,9 +569,12 @@ class ObjectReconstructor(Daemon):
job['sync_to'],
# I think we could order these based on our index to better
# protect against a broken chain
- itertools.ifilter(
- lambda n: n['id'] not in (n['id'] for n in job['sync_to']),
- job['policy'].object_ring.get_part_nodes(job['partition'])),
+ [
+ n for n in
+ job['policy'].object_ring.get_part_nodes(job['partition'])
+ if n['id'] != job['local_dev']['id'] and
+ n['id'] not in (m['id'] for m in job['sync_to'])
+ ],
)
syncd_with = 0
for node in dest_nodes:
@@ -777,13 +780,14 @@ class ObjectReconstructor(Daemon):
if override_devices and (local_dev['device'] not in
override_devices):
continue
- dev_path = join(self.devices_dir, local_dev['device'])
- obj_path = join(dev_path, data_dir)
- tmp_path = join(dev_path, get_tmp_dir(int(policy)))
- if self.mount_check and not ismount(dev_path):
+ dev_path = self._df_router[policy].get_dev_path(
+ local_dev['device'])
+ if not dev_path:
self.logger.warn(_('%s is not mounted'),
local_dev['device'])
continue
+ obj_path = join(dev_path, data_dir)
+ tmp_path = join(dev_path, get_tmp_dir(int(policy)))
unlink_older_than(tmp_path, time.time() -
self.reclaim_age)
if not os.path.exists(obj_path):
diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py
index b636a16245..aa685211ae 100644
--- a/swift/obj/ssync_receiver.py
+++ b/swift/obj/ssync_receiver.py
@@ -19,7 +19,6 @@ import eventlet
import eventlet.wsgi
import eventlet.greenio
-from swift.common import constraints
from swift.common import exceptions
from swift.common import http
from swift.common import swob
@@ -176,8 +175,7 @@ class Receiver(object):
self.frag_index = None
utils.validate_device_partition(self.device, self.partition)
self.diskfile_mgr = self.app._diskfile_router[self.policy]
- if self.diskfile_mgr.mount_check and not constraints.check_mount(
- self.diskfile_mgr.devices, self.device):
+ if not self.diskfile_mgr.get_dev_path(self.device):
raise swob.HTTPInsufficientStorage(drive=self.device)
self.fp = self.request.environ['wsgi.input']
for data in self._ensure_flush():
diff --git a/test/probe/common.py b/test/probe/common.py
index 7d1e754014..ca1225f9fb 100644
--- a/test/probe/common.py
+++ b/test/probe/common.py
@@ -26,7 +26,7 @@ from swiftclient import get_auth, head_account
from swift.obj.diskfile import get_data_dir
from swift.common.ring import Ring
-from swift.common.utils import readconf
+from swift.common.utils import readconf, renamer
from swift.common.manager import Manager
from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY
@@ -314,6 +314,19 @@ class ProbeTest(unittest.TestCase):
self.updaters.once()
self.replicators.once()
+ def kill_drive(self, device):
+ if os.path.ismount(device):
+ os.system('sudo umount %s' % device)
+ else:
+ renamer(device, device + "X")
+
+ def revive_drive(self, device):
+ disabled_name = device + "X"
+ if os.path.isdir(disabled_name):
+ renamer(device + "X", device)
+ else:
+ os.system('sudo mount %s' % device)
+
class ReplProbeTest(ProbeTest):
diff --git a/test/probe/test_reconstructor_rebuild.py b/test/probe/test_reconstructor_rebuild.py
index 5edfcc52d1..bf568ccc68 100644
--- a/test/probe/test_reconstructor_rebuild.py
+++ b/test/probe/test_reconstructor_rebuild.py
@@ -19,12 +19,14 @@ import unittest
import uuid
import shutil
import random
+from collections import defaultdict
from test.probe.common import ECProbeTest
from swift.common import direct_client
from swift.common.storage_policy import EC_POLICY
from swift.common.manager import Manager
+from swift.obj.reconstructor import _get_partners
from swiftclient import client
@@ -165,6 +167,61 @@ class TestReconstructorRebuild(ECProbeTest):
self._format_node(onode),
[self._format_node(n) for n in node_list]))
+ def test_rebuild_partner_down(self):
+ # create EC container
+ headers = {'X-Storage-Policy': self.policy.name}
+ client.put_container(self.url, self.token, self.container_name,
+ headers=headers)
+
+ # PUT object
+ contents = Body()
+ client.put_object(self.url, self.token,
+ self.container_name,
+ self.object_name,
+ contents=contents)
+
+ opart, onodes = self.object_ring.get_nodes(
+ self.account, self.container_name, self.object_name)
+
+ # find a primary server that only has one of it's devices in the
+ # primary node list
+ group_nodes_by_config = defaultdict(list)
+ for n in onodes:
+ group_nodes_by_config[self.config_number(n)].append(n)
+ for config_number, node_list in group_nodes_by_config.items():
+ if len(node_list) == 1:
+ break
+ else:
+ self.fail('ring balancing did not use all available nodes')
+ primary_node = node_list[0]
+
+ # pick one it's partners to fail randomly
+ partner_node = random.choice(_get_partners(
+ primary_node['index'], onodes))
+
+ # 507 the partner device
+ device_path = self.device_dir('object', partner_node)
+ self.kill_drive(device_path)
+
+ # select another primary sync_to node to fail
+ failed_primary = [n for n in onodes if n['id'] not in
+ (primary_node['id'], partner_node['id'])][0]
+ # ... capture it's fragment etag
+ failed_primary_etag = self.direct_get(failed_primary, opart)
+ # ... and delete it
+ part_dir = self.storage_dir('object', failed_primary, part=opart)
+ shutil.rmtree(part_dir, True)
+
+ # reconstruct from the primary, while one of it's partners is 507'd
+ self.reconstructor.once(number=self.config_number(primary_node))
+
+ # the other failed primary will get it's fragment rebuilt instead
+ self.assertEqual(failed_primary_etag,
+ self.direct_get(failed_primary, opart))
+
+ # just to be nice
+ self.revive_drive(device_path)
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/probe/test_reconstructor_revert.py b/test/probe/test_reconstructor_revert.py
index 39739b617d..249a6b5d62 100755
--- a/test/probe/test_reconstructor_revert.py
+++ b/test/probe/test_reconstructor_revert.py
@@ -17,7 +17,6 @@
from hashlib import md5
import unittest
import uuid
-import os
import random
import shutil
from collections import defaultdict
@@ -27,7 +26,6 @@ from test.probe.common import ECProbeTest
from swift.common import direct_client
from swift.common.storage_policy import EC_POLICY
from swift.common.manager import Manager
-from swift.common.utils import renamer
from swift.obj import reconstructor
from swiftclient import client
@@ -70,19 +68,6 @@ class TestReconstructorRevert(ECProbeTest):
self.assertEqual(self.policy.policy_type, EC_POLICY)
self.reconstructor = Manager(["object-reconstructor"])
- def kill_drive(self, device):
- if os.path.ismount(device):
- os.system('sudo umount %s' % device)
- else:
- renamer(device, device + "X")
-
- def revive_drive(self, device):
- disabled_name = device + "X"
- if os.path.isdir(disabled_name):
- renamer(device + "X", device)
- else:
- os.system('sudo mount %s' % device)
-
def proxy_get(self):
# GET object
headers, body = client.get_object(self.url, self.token,
@@ -277,6 +262,8 @@ class TestReconstructorRevert(ECProbeTest):
else:
self.fail('ring balancing did not use all available nodes')
primary_node = node_list[0]
+
+ # ... and 507 it's device
primary_device = self.device_dir('object', primary_node)
self.kill_drive(primary_device)
diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py
index 23e70543f7..321ea3751d 100755
--- a/test/unit/obj/test_reconstructor.py
+++ b/test/unit/obj/test_reconstructor.py
@@ -932,7 +932,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
def test_process_job_all_insufficient_storage(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
- with mocked_http_conn(*[507] * 10):
+ with mocked_http_conn(*[507] * 8):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
@@ -954,7 +954,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
def test_process_job_all_client_error(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
- with mocked_http_conn(*[400] * 10):
+ with mocked_http_conn(*[400] * 8):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
@@ -976,7 +976,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
def test_process_job_all_timeout(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
- with nested(mocked_http_conn(*[Timeout()] * 10)):
+ with nested(mocked_http_conn(*[Timeout()] * 8)):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
@@ -1012,6 +1012,13 @@ class TestObjectReconstructor(unittest.TestCase):
'bind_port': self.port,
}
self.logger = debug_logger('object-reconstructor')
+ self._configure_reconstructor()
+ self.policy.object_ring.max_more_nodes = \
+ self.policy.object_ring.replicas
+ self.ts_iter = make_timestamp_iter()
+
+ def _configure_reconstructor(self, **kwargs):
+ self.conf.update(kwargs)
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.reconstructor._reset_stats()
@@ -1019,9 +1026,6 @@ class TestObjectReconstructor(unittest.TestCase):
# directly, so you end up with a /0 when you try to show the
# percentage of complete jobs as ratio of the total job count
self.reconstructor.job_count = 1
- self.policy.object_ring.max_more_nodes = \
- self.policy.object_ring.replicas
- self.ts_iter = make_timestamp_iter()
def tearDown(self):
self.reconstructor.stats_line()
@@ -1115,16 +1119,16 @@ class TestObjectReconstructor(unittest.TestCase):
paths = []
- def fake_ismount(path):
- paths.append(path)
+ def fake_check_mount(devices, device):
+ paths.append(os.path.join(devices, device))
return False
with nested(mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]),
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs),
- mock.patch('swift.obj.reconstructor.ismount',
- fake_ismount)):
+ mock.patch('swift.obj.diskfile.check_mount',
+ fake_check_mount)):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(2, len(part_infos)) # sanity, same jobs
self.assertEqual(set(int(p['partition']) for p in part_infos),
@@ -1134,13 +1138,16 @@ class TestObjectReconstructor(unittest.TestCase):
self.assertEqual(paths, [])
# ... now with mount check
- self.reconstructor.mount_check = True
+ self._configure_reconstructor(mount_check=True)
+ self.assertTrue(self.reconstructor.mount_check)
+ for policy in POLICIES:
+ self.assertTrue(self.reconstructor._df_router[policy].mount_check)
with nested(mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]),
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs),
- mock.patch('swift.obj.reconstructor.ismount',
- fake_ismount)):
+ mock.patch('swift.obj.diskfile.check_mount',
+ fake_check_mount)):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual([], part_infos) # sanity, no jobs
@@ -1148,7 +1155,8 @@ class TestObjectReconstructor(unittest.TestCase):
self.assertEqual(set(paths), set([
os.path.join(self.devices, dev) for dev in local_devs]))
- def fake_ismount(path):
+ def fake_check_mount(devices, device):
+ path = os.path.join(devices, device)
if path.endswith('sda'):
return True
else:
@@ -1158,8 +1166,8 @@ class TestObjectReconstructor(unittest.TestCase):
return_value=[self.ip]),
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs),
- mock.patch('swift.obj.reconstructor.ismount',
- fake_ismount)):
+ mock.patch('swift.obj.diskfile.check_mount',
+ fake_check_mount)):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(1, len(part_infos)) # only sda picked up (part 0)
self.assertEqual(part_infos[0]['partition'], 0)
@@ -1171,6 +1179,8 @@ class TestObjectReconstructor(unittest.TestCase):
'replication_ip': self.ip,
'replication_port': self.port
} for dev in local_devs]
+ for device in local_devs:
+ utils.mkdirs(os.path.join(self.devices, device))
fake_unlink = mock.MagicMock()
self.reconstructor.reclaim_age = 1000
now = time.time()
diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py
index 4a030c821d..9fdfe7d102 100644
--- a/test/unit/obj/test_ssync_receiver.py
+++ b/test/unit/obj/test_ssync_receiver.py
@@ -23,7 +23,6 @@ import unittest
import eventlet
import mock
-from swift.common import constraints
from swift.common import exceptions
from swift.common import swob
from swift.common import utils
@@ -53,6 +52,7 @@ class TestReceiver(unittest.TestCase):
'mount_check': 'false',
'replication_one_per_device': 'false',
'log_requests': 'false'}
+ utils.mkdirs(os.path.join(self.testdir, 'device', 'partition'))
self.controller = server.ObjectController(self.conf)
self.controller.bytes_per_sync = 1
@@ -285,8 +285,8 @@ class TestReceiver(unittest.TestCase):
mock.patch.object(
self.controller._diskfile_router[POLICIES.legacy],
'mount_check', False),
- mock.patch.object(
- constraints, 'check_mount', return_value=False)) as (
+ mock.patch('swift.obj.diskfile.check_mount',
+ return_value=False)) as (
mocked_replication_semaphore,
mocked_mount_check,
mocked_check_mount):
@@ -305,8 +305,8 @@ class TestReceiver(unittest.TestCase):
mock.patch.object(
self.controller._diskfile_router[POLICIES.legacy],
'mount_check', True),
- mock.patch.object(
- constraints, 'check_mount', return_value=False)) as (
+ mock.patch('swift.obj.diskfile.check_mount',
+ return_value=False)) as (
mocked_replication_semaphore,
mocked_mount_check,
mocked_check_mount):
From a1c327022c70907ccc159d6203de26b37c3a4586 Mon Sep 17 00:00:00 2001
From: Samuel Merritt
Date: Tue, 26 May 2015 16:43:55 -0700
Subject: [PATCH 42/98] Remove simplejson from swift-recon
Since we're dropping Python 2.6 support, we can rely on stdlib's json
and get rid of our dependency on simplejson.
All swift-recon was doing with json was decoding a JSON response (from
the recon middleware) and printing it to the terminal. This still
works just fine.
Change-Id: I28cf25a7c2856f230d4642c62fb8bf9c4d37e9e5
---
swift/cli/recon.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/swift/cli/recon.py b/swift/cli/recon.py
index 8c2042cb53..a0fcdf7835 100755
--- a/swift/cli/recon.py
+++ b/swift/cli/recon.py
@@ -22,12 +22,9 @@ from eventlet.green import urllib2
from swift.common.utils import SWIFT_CONF_FILE
from swift.common.ring import Ring
from urlparse import urlparse
-try:
- import simplejson as json
-except ImportError:
- import json
from hashlib import md5
import eventlet
+import json
import optparse
import time
import sys
From bb716573ab5c8455348ec013feb894421e0e1f1c Mon Sep 17 00:00:00 2001
From: Tim Burke
Date: Wed, 20 May 2015 00:39:41 -0700
Subject: [PATCH 43/98] Allow SLO PUTs to forgo per-segment integrity checks
While manifests still require 'etag' and 'size_bytes' fields for each
segment (to catch user errors like 'etaf' or 'size_btyes'), an explicit
null for either will skip that particular integrity check and instead
use whatever value is retrieved when HEADing the segment. So, if a user
uploads a manifest like:
[{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576},
{"path": "/con/obj_seg_2", "etag": "etag2", "size_bytes": null},
{"path": "/con/obj_seg_3", "etag": null, "size_bytes": null}]
then the etag will only be verified for the /con/obj_seg_2 segment,
and the segment size will only be verified for the /con/obj_seg_1
segment. However, the manifest that's ultimately stored (and can be
retrieved with a ?multipart-manifest=get query-string) will still look
like:
[{"name": "/con/obj_seg_1", "hash": "etag1", "bytes": 1048576, ...},
{"name": "/con/obj_seg_2", "hash": "etag2", "bytes": 1048576, ...},
{"name": "/con/obj_seg_3", "hash": "etag3", "bytes": 1234, ...}]
This allows the middleware to continue performing integrity checks on
object GET.
Change-Id: I2c4e585221387dd02a8679a50398d6b614407b12
DocImpact
---
swift/common/middleware/slo.py | 54 ++++++++++++-----
test/functional/tests.py | 79 +++++++++++++++++++++++++
test/unit/common/middleware/test_slo.py | 55 +++++++++++++++++
3 files changed, 172 insertions(+), 16 deletions(-)
diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py
index d8df829981..3c3ad7feac 100644
--- a/swift/common/middleware/slo.py
+++ b/swift/common/middleware/slo.py
@@ -36,8 +36,8 @@ json data format. The data to be supplied for each segment is::
path: the path to the segment (not including account)
/container/object_name
- etag: the etag given back when the segment was PUT
- size_bytes: the size of the segment in bytes
+ etag: the etag given back when the segment was PUT, or null
+ size_bytes: the size of the segment in bytes, or null
The format of the list will be::
@@ -48,15 +48,25 @@ The format of the list will be::
The number of object segments is limited to a configurable amount, default
1000. Each segment, except for the final one, must be at least 1 megabyte
-(configurable). On upload, the middleware will head every segment passed in and
-verify the size and etag of each. If any of the objects do not match (not
+(configurable). On upload, the middleware will head every segment passed in to
+verify:
+
+ 1. the segment exists (i.e. the HEAD was successful);
+ 2. the segment meets minimum size requirements (if not the last segment);
+ 3. if the user provided a non-null etag, the etag matches; and
+ 4. if the user provided a non-null size_bytes, the size_bytes matches.
+
+Note that the etag and size_bytes keys are still required; this acts as a guard
+against user errors such as typos. If any of the objects fail to verify (not
found, size/etag mismatch, below minimum size) then the user will receive a 4xx
error response. If everything does match, the user will receive a 2xx response
and the SLO object is ready for downloading.
Behind the scenes, on success, a json manifest generated from the user input is
sent to object servers with an extra "X-Static-Large-Object: True" header
-and a modified Content-Type. The parameter: swift_bytes=$total_size will be
+and a modified Content-Type. The items in this manifest will include the etag
+and size_bytes for each segment, regardless of whether the client specified
+them for verification. The parameter: swift_bytes=$total_size will be
appended to the existing Content-Type, where total_size is the sum of all
the included segments' size_bytes. This extra parameter will be hidden from
the user.
@@ -73,9 +83,11 @@ Retrieving a Large Object
A GET request to the manifest object will return the concatenation of the
objects from the manifest much like DLO. If any of the segments from the
-manifest are not found or their Etag/Content Length no longer match the
-connection will drop. In this case a 409 Conflict will be logged in the proxy
-logs and the user will receive incomplete results.
+manifest are not found or their Etag/Content Length have changed since upload,
+the connection will drop. In this case a 409 Conflict will be logged in the
+proxy logs and the user will receive incomplete results. Note that this will be
+enforced regardless of whether the user perfomed per-segment validation during
+upload.
The headers from this GET or HEAD request will return the metadata attached
to the manifest object itself with some exceptions::
@@ -594,8 +606,11 @@ class StaticLargeObject(object):
try:
seg_size = int(seg_dict['size_bytes'])
except (ValueError, TypeError):
- raise HTTPBadRequest('Invalid Manifest File')
- if seg_size < self.min_segment_size and \
+ if seg_dict['size_bytes'] is None:
+ seg_size = None
+ else:
+ raise HTTPBadRequest('Invalid Manifest File')
+ if seg_size is not None and seg_size < self.min_segment_size and \
index < len(parsed_data) - 1:
raise HTTPBadRequest(
'Each segment, except the last, must be at least '
@@ -613,11 +628,18 @@ class StaticLargeObject(object):
head_seg_resp = \
Request.blank(obj_path, new_env).get_response(self)
if head_seg_resp.is_success:
- total_size += seg_size
- if seg_size != head_seg_resp.content_length:
+ if head_seg_resp.content_length < self.min_segment_size and \
+ index < len(parsed_data) - 1:
+ raise HTTPBadRequest(
+ 'Each segment, except the last, must be at least '
+ '%d bytes.' % self.min_segment_size)
+ total_size += head_seg_resp.content_length
+ if seg_size is not None and \
+ seg_size != head_seg_resp.content_length:
problem_segments.append([quote(obj_name), 'Size Mismatch'])
- if seg_dict['etag'] == head_seg_resp.etag:
- slo_etag.update(seg_dict['etag'])
+ if seg_dict['etag'] is None or \
+ seg_dict['etag'] == head_seg_resp.etag:
+ slo_etag.update(head_seg_resp.etag)
else:
problem_segments.append([quote(obj_name), 'Etag Mismatch'])
if head_seg_resp.last_modified:
@@ -629,8 +651,8 @@ class StaticLargeObject(object):
last_modified_formatted = \
last_modified.strftime('%Y-%m-%dT%H:%M:%S.%f')
seg_data = {'name': '/' + seg_dict['path'].lstrip('/'),
- 'bytes': seg_size,
- 'hash': seg_dict['etag'],
+ 'bytes': head_seg_resp.content_length,
+ 'hash': head_seg_resp.etag,
'content_type': head_seg_resp.content_type,
'last_modified': last_modified_formatted}
if config_true_value(
diff --git a/test/functional/tests.py b/test/functional/tests.py
index 3fbbdd784e..df96e5c4da 100644
--- a/test/functional/tests.py
+++ b/test/functional/tests.py
@@ -2152,6 +2152,15 @@ class TestSloEnv(object):
seg_info['seg_e']]),
parms={'multipart-manifest': 'put'})
+ file_item = cls.container.file("manifest-db")
+ file_item.write(
+ json.dumps([
+ {'path': seg_info['seg_d']['path'], 'etag': None,
+ 'size_bytes': None},
+ {'path': seg_info['seg_b']['path'], 'etag': None,
+ 'size_bytes': None},
+ ]), parms={'multipart-manifest': 'put'})
+
class TestSlo(Base):
env = TestSloEnv
@@ -2259,6 +2268,52 @@ class TestSlo(Base):
else:
self.fail("Expected ResponseError but didn't get it")
+ def test_slo_unspecified_etag(self):
+ file_item = self.env.container.file("manifest-a-unspecified-etag")
+ file_item.write(
+ json.dumps([{
+ 'size_bytes': 1024 * 1024,
+ 'etag': None,
+ 'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
+ parms={'multipart-manifest': 'put'})
+ self.assert_status(201)
+
+ def test_slo_unspecified_size(self):
+ file_item = self.env.container.file("manifest-a-unspecified-size")
+ file_item.write(
+ json.dumps([{
+ 'size_bytes': None,
+ 'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(),
+ 'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
+ parms={'multipart-manifest': 'put'})
+ self.assert_status(201)
+
+ def test_slo_missing_etag(self):
+ file_item = self.env.container.file("manifest-a-missing-etag")
+ try:
+ file_item.write(
+ json.dumps([{
+ 'size_bytes': 1024 * 1024,
+ 'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
+ parms={'multipart-manifest': 'put'})
+ except ResponseError as err:
+ self.assertEqual(400, err.status)
+ else:
+ self.fail("Expected ResponseError but didn't get it")
+
+ def test_slo_missing_size(self):
+ file_item = self.env.container.file("manifest-a-missing-size")
+ try:
+ file_item.write(
+ json.dumps([{
+ 'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(),
+ 'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]),
+ parms={'multipart-manifest': 'put'})
+ except ResponseError as err:
+ self.assertEqual(400, err.status)
+ else:
+ self.fail("Expected ResponseError but didn't get it")
+
def test_slo_overwrite_segment_with_manifest(self):
file_item = self.env.container.file("seg_b")
try:
@@ -2367,6 +2422,30 @@ class TestSlo(Base):
except ValueError:
self.fail("GET with multipart-manifest=get got invalid json")
+ def test_slo_get_the_manifest_with_details_from_server(self):
+ manifest = self.env.container.file("manifest-db")
+ got_body = manifest.read(parms={'multipart-manifest': 'get'})
+
+ self.assertEqual('application/json; charset=utf-8',
+ manifest.content_type)
+ try:
+ value = json.loads(got_body)
+ except ValueError:
+ self.fail("GET with multipart-manifest=get got invalid json")
+
+ self.assertEqual(len(value), 2)
+ self.assertEqual(value[0]['bytes'], 1024 * 1024)
+ self.assertEqual(value[0]['hash'],
+ hashlib.md5('d' * 1024 * 1024).hexdigest())
+ self.assertEqual(value[0]['name'],
+ '/%s/seg_d' % self.env.container.name.decode("utf-8"))
+
+ self.assertEqual(value[1]['bytes'], 1024 * 1024)
+ self.assertEqual(value[1]['hash'],
+ hashlib.md5('b' * 1024 * 1024).hexdigest())
+ self.assertEqual(value[1]['name'],
+ '/%s/seg_b' % self.env.container.name.decode("utf-8"))
+
def test_slo_head_the_manifest(self):
manifest = self.env.container.file("manifest-abcde")
got_info = manifest.info(parms={'multipart-manifest': 'get'})
diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py
index d70a25ccc4..86a11734d3 100644
--- a/test/unit/common/middleware/test_slo.py
+++ b/test/unit/common/middleware/test_slo.py
@@ -441,6 +441,61 @@ class TestSloPutManifest(SloTestCase):
self.assertEqual(status, '409 Conflict')
self.assertEqual(self.app.call_count, 1)
+ def test_handle_multipart_put_skip_size_check(self):
+ good_data = json.dumps(
+ [{'path': '/checktest/a_1', 'etag': 'a', 'size_bytes': None},
+ {'path': '/checktest/b_2', 'etag': 'b', 'size_bytes': None}])
+ req = Request.blank(
+ '/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
+ environ={'REQUEST_METHOD': 'PUT'}, body=good_data)
+ status, headers, body = self.call_slo(req)
+ self.assertEquals(self.app.call_count, 3)
+
+ # Check that we still populated the manifest properly from our HEADs
+ req = Request.blank(
+ # this string looks weird, but it's just an artifact
+ # of FakeSwift
+ '/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
+ environ={'REQUEST_METHOD': 'GET'})
+ status, headers, body = self.call_app(req)
+ manifest_data = json.loads(body)
+ self.assertEquals(1, manifest_data[0]['bytes'])
+ self.assertEquals(2, manifest_data[1]['bytes'])
+
+ def test_handle_multipart_put_skip_size_check_still_uses_min_size(self):
+ with patch.object(self.slo, 'min_segment_size', 50):
+ test_json_data = json.dumps([{'path': '/cont/small_object',
+ 'etag': 'etagoftheobjectsegment',
+ 'size_bytes': None},
+ {'path': '/cont/small_object',
+ 'etag': 'etagoftheobjectsegment',
+ 'size_bytes': 100}])
+ req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data)
+ with self.assertRaises(HTTPException) as cm:
+ self.slo.handle_multipart_put(req, fake_start_response)
+ self.assertEquals(cm.exception.status_int, 400)
+
+ def test_handle_multipart_put_skip_etag_check(self):
+ good_data = json.dumps(
+ [{'path': '/checktest/a_1', 'etag': None, 'size_bytes': 1},
+ {'path': '/checktest/b_2', 'etag': None, 'size_bytes': 2}])
+ req = Request.blank(
+ '/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
+ environ={'REQUEST_METHOD': 'PUT'}, body=good_data)
+ status, headers, body = self.call_slo(req)
+ self.assertEquals(self.app.call_count, 3)
+
+ # Check that we still populated the manifest properly from our HEADs
+ req = Request.blank(
+ # this string looks weird, but it's just an artifact
+ # of FakeSwift
+ '/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
+ environ={'REQUEST_METHOD': 'GET'})
+ status, headers, body = self.call_app(req)
+ manifest_data = json.loads(body)
+ self.assertEquals('a', manifest_data[0]['hash'])
+ self.assertEquals('b', manifest_data[1]['hash'])
+
class TestSloDeleteManifest(SloTestCase):
From 3aa06f185ac4256a8883c565bdc90b1ffbd519ca Mon Sep 17 00:00:00 2001
From: Alistair Coles
Date: Mon, 27 Apr 2015 09:17:46 +0100
Subject: [PATCH 44/98] Make SSYNC receiver return a reponse when initial
checks fail
The ssync Receiver performs some checks on request parameters
in initialize_request() before starting the exchange of missing
hashes and updates e.g. the destination device must be available;
the policy must be valid. Currently if any of these checks fails
then the receiver just closes the connection, so the Sender gets
no useful response code and noise is generated in logs by httplib
and wsgi Exceptions.
This change moves the request parameter checks to the Receiver
constructor so that the HTTPExceptions raised are actually sent
as responses. (The 'connection close' exception handling still
applies once the 'missing_check' and 'updates' handshakes are in
progress.)
Moving initialize_request() revealed the following lurking bug:
* initialize_request() sets
req.environ['eventlet.minimum_write_chunk_size'] = 0
* this was previously ineffective because the Response environ
had already been copied from Request environ before this value
was set, so the Response never used the value :/
* Now that it is effective (a good thing) it causes the empty string
yielded by the receiver when there are no missing hashes in
missing_checks() to be sent to the sender immediately. This makes
the Sender.readline() think there has been an early disconnect
and raise an Exception (a bad thing), as revealed by
test/unit/obj/test_ssync_sender.py:TestSsync.test_nothing_to_sync
The fix for this is to simply make the receiver skip sending the empty
string if there are no missing object_hashes.
Change-Id: I036a6919fead6e970505dccbb0da7bfbdf8cecc3
---
swift/obj/ssync_receiver.py | 10 ++-
test/unit/obj/test_server.py | 2 +-
test/unit/obj/test_ssync_receiver.py | 102 +++++++++++++++++++++++----
test/unit/obj/test_ssync_sender.py | 18 +++--
4 files changed, 105 insertions(+), 27 deletions(-)
diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py
index aa685211ae..b907ebf563 100644
--- a/swift/obj/ssync_receiver.py
+++ b/swift/obj/ssync_receiver.py
@@ -69,6 +69,7 @@ class Receiver(object):
# raised during processing because otherwise the sender could send for
# quite some time before realizing it was all in vain.
self.disconnect = True
+ self.initialize_request()
def __call__(self):
"""
@@ -88,9 +89,7 @@ class Receiver(object):
try:
# Double try blocks in case our main error handlers fail.
try:
- # initialize_request is for preamble items that can be done
- # outside a replication semaphore lock.
- for data in self.initialize_request():
+ for data in self._ensure_flush():
yield data
# If semaphore is in use, try to acquire it, non-blocking, and
# return a 503 if it fails.
@@ -178,8 +177,6 @@ class Receiver(object):
if not self.diskfile_mgr.get_dev_path(self.device):
raise swob.HTTPInsufficientStorage(drive=self.device)
self.fp = self.request.environ['wsgi.input']
- for data in self._ensure_flush():
- yield data
def missing_check(self):
"""
@@ -249,7 +246,8 @@ class Receiver(object):
if want:
object_hashes.append(object_hash)
yield ':MISSING_CHECK: START\r\n'
- yield '\r\n'.join(object_hashes)
+ if object_hashes:
+ yield '\r\n'.join(object_hashes)
yield '\r\n'
yield ':MISSING_CHECK: END\r\n'
for data in self._ensure_flush():
diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py
index 52a34347ac..4c669a874c 100755
--- a/test/unit/obj/test_server.py
+++ b/test/unit/obj/test_server.py
@@ -4410,7 +4410,7 @@ class TestObjectController(unittest.TestCase):
self.assertEqual(resp.status_int, 507)
def test_SSYNC_can_be_called(self):
- req = Request.blank('/sda1/p/other/suff',
+ req = Request.blank('/sda1/0',
environ={'REQUEST_METHOD': 'SSYNC'},
headers={})
resp = req.get_response(self.object_controller)
diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py
index 9fdfe7d102..8b652ad2ec 100644
--- a/test/unit/obj/test_ssync_receiver.py
+++ b/test/unit/obj/test_ssync_receiver.py
@@ -23,15 +23,19 @@ import unittest
import eventlet
import mock
+from swift.common import bufferedhttp
from swift.common import exceptions
from swift.common import swob
-from swift.common import utils
from swift.common.storage_policy import POLICIES
+from swift.common import utils
+from swift.common.swob import HTTPException
from swift.obj import diskfile
from swift.obj import server
from swift.obj import ssync_receiver
+from swift.obj.reconstructor import ObjectReconstructor
from test import unit
+from test.unit import debug_logger, patch_policies
@unit.patch_policies()
@@ -176,9 +180,12 @@ class TestReceiver(unittest.TestCase):
':MISSING_CHECK: END\r\n'
':UPDATES: START\r\n:UPDATES: END\r\n')
self.controller.logger = mock.MagicMock()
- receiver = ssync_receiver.Receiver(self.controller, req)
- body_lines = [chunk.strip() for chunk in receiver() if chunk.strip()]
- self.assertEqual(body_lines, [":ERROR: 503 'No policy with index 2'"])
+ try:
+ ssync_receiver.Receiver(self.controller, req)
+ self.fail('Expected HTTPException to be raised.')
+ except HTTPException as err:
+ self.assertEqual('503 Service Unavailable', err.status)
+ self.assertEqual('No policy with index 2', err.body)
@unit.patch_policies()
def test_Receiver_with_frag_index_header(self):
@@ -233,8 +240,8 @@ class TestReceiver(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
- [":ERROR: 400 'Invalid path: /device'"])
- self.assertEqual(resp.status_int, 200)
+ ["Invalid path: /device"])
+ self.assertEqual(resp.status_int, 400)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
@@ -246,8 +253,8 @@ class TestReceiver(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
- [":ERROR: 400 'Invalid path: /device/'"])
- self.assertEqual(resp.status_int, 200)
+ ["Invalid path: /device/"])
+ self.assertEqual(resp.status_int, 400)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
@@ -273,8 +280,8 @@ class TestReceiver(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
- [":ERROR: 400 'Invalid path: /device/partition/junk'"])
- self.assertEqual(resp.status_int, 200)
+ ["Invalid path: /device/partition/junk"])
+ self.assertEqual(resp.status_int, 400)
self.assertFalse(mocked_replication_semaphore.acquire.called)
self.assertFalse(mocked_replication_semaphore.release.called)
@@ -315,10 +322,10 @@ class TestReceiver(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(
self.body_lines(resp.body),
- [":ERROR: 507 'Insufficient Storage
There "
+ ["
Insufficient Storage
There "
"was not enough space to save the resource. Drive: "
- "device
'"])
- self.assertEqual(resp.status_int, 200)
+ "device
"])
+ self.assertEqual(resp.status_int, 507)
mocked_check_mount.assert_called_once_with(
self.controller._diskfile_router[POLICIES.legacy].devices,
'device')
@@ -1476,5 +1483,74 @@ class TestReceiver(unittest.TestCase):
self.assertEqual(_requests, [])
+@patch_policies(with_ec_default=True)
+class TestSsyncRxServer(unittest.TestCase):
+ # Tests to verify behavior of SSYNC requests sent to an object
+ # server socket.
+
+ def setUp(self):
+ self.rx_ip = '127.0.0.1'
+ # dirs
+ self.tmpdir = tempfile.mkdtemp()
+ self.tempdir = os.path.join(self.tmpdir, 'tmp_test_obj_server')
+
+ self.devices = os.path.join(self.tempdir, 'srv/node')
+ for device in ('sda1', 'sdb1'):
+ os.makedirs(os.path.join(self.devices, device))
+
+ self.conf = {
+ 'devices': self.devices,
+ 'swift_dir': self.tempdir,
+ }
+ self.rx_logger = debug_logger('test-object-server')
+ rx_server = server.ObjectController(self.conf, logger=self.rx_logger)
+ sock = eventlet.listen((self.rx_ip, 0))
+ self.rx_server = eventlet.spawn(
+ eventlet.wsgi.server, sock, rx_server, utils.NullLogger())
+ self.rx_port = sock.getsockname()[1]
+ self.tx_logger = debug_logger('test-reconstructor')
+ self.daemon = ObjectReconstructor(self.conf, self.tx_logger)
+ self.daemon._diskfile_mgr = self.daemon._df_router[POLICIES[0]]
+
+ def tearDown(self):
+ shutil.rmtree(self.tmpdir)
+
+ def test_SSYNC_device_not_available(self):
+ with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\
+ as mock_missing_check:
+ self.connection = bufferedhttp.BufferedHTTPConnection(
+ '127.0.0.1:%s' % self.rx_port)
+ self.connection.putrequest('SSYNC', '/sdc1/0')
+ self.connection.putheader('Transfer-Encoding', 'chunked')
+ self.connection.putheader('X-Backend-Storage-Policy-Index',
+ int(POLICIES[0]))
+ self.connection.endheaders()
+ resp = self.connection.getresponse()
+ self.assertEqual(507, resp.status)
+ resp.read()
+ resp.close()
+ # sanity check that the receiver did not proceed to missing_check
+ self.assertFalse(mock_missing_check.called)
+
+ def test_SSYNC_invalid_policy(self):
+ valid_indices = sorted([int(policy) for policy in POLICIES])
+ bad_index = valid_indices[-1] + 1
+ with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\
+ as mock_missing_check:
+ self.connection = bufferedhttp.BufferedHTTPConnection(
+ '127.0.0.1:%s' % self.rx_port)
+ self.connection.putrequest('SSYNC', '/sda1/0')
+ self.connection.putheader('Transfer-Encoding', 'chunked')
+ self.connection.putheader('X-Backend-Storage-Policy-Index',
+ bad_index)
+ self.connection.endheaders()
+ resp = self.connection.getresponse()
+ self.assertEqual(503, resp.status)
+ resp.read()
+ resp.close()
+ # sanity check that the receiver did not proceed to missing_check
+ self.assertFalse(mock_missing_check.called)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py
index fa38b658b2..19fea0be2b 100644
--- a/test/unit/obj/test_ssync_sender.py
+++ b/test/unit/obj/test_ssync_sender.py
@@ -497,8 +497,6 @@ class TestSender(BaseTestSender):
node = dict(replication_ip='1.2.3.4', replication_port=5678,
device='sda1', index=0)
job = dict(partition='9', policy=POLICIES.legacy)
- self.sender = ssync_sender.Sender(self.daemon, node, job, None)
- self.sender.suffixes = ['abc']
class FakeBufferedHTTPConnection(NullBufferedHTTPConnection):
def getresponse(*args, **kwargs):
@@ -506,16 +504,22 @@ class TestSender(BaseTestSender):
response.status = 503
return response
- with mock.patch.object(
+ missing_check_fn = 'swift.obj.ssync_sender.Sender.missing_check'
+ with mock.patch(missing_check_fn) as mock_missing_check:
+ with mock.patch.object(
ssync_sender.bufferedhttp, 'BufferedHTTPConnection',
- FakeBufferedHTTPConnection):
- success, candidates = self.sender()
- self.assertFalse(success)
- self.assertEquals(candidates, {})
+ FakeBufferedHTTPConnection):
+ self.sender = ssync_sender.Sender(
+ self.daemon, node, job, ['abc'])
+ success, candidates = self.sender()
+ self.assertFalse(success)
+ self.assertEquals(candidates, {})
error_lines = self.daemon.logger.get_lines_for_level('error')
for line in error_lines:
self.assertTrue(line.startswith(
'1.2.3.4:5678/sda1/9 Expected status 200; got 503'))
+ # sanity check that Sender did not proceed to missing_check exchange
+ self.assertFalse(mock_missing_check.called)
def test_readline_newline_in_buffer(self):
self.sender.response_buffer = 'Has a newline already.\r\nOkay.'
From 191f2a00bd9121fddc03d8f07f15e5e34790541e Mon Sep 17 00:00:00 2001
From: Alistair Coles
Date: Mon, 27 Apr 2015 16:39:23 +0100
Subject: [PATCH 45/98] Remove _ensure_flush() from SSYNC receiver
The Receiver._ensure_flush() method in ssync_receiver.py has
the following comment:
Sends a blank line sufficient to flush buffers.
This is to ensure Eventlet versions that don't support
eventlet.minimum_write_chunk_size will send any previous data
buffered.
If https://bitbucket.org/eventlet/eventlet/pull-request/37
ever gets released in an Eventlet version, we should make
this yield only for versions older than that.
The reference pull request was included with eventlet 0.14 [1] and
swift now requires >=0.16.1 so it is safe to remove _ensure_flush()
and save > 8k bytes per SSYNC response.
[1] https://bitbucket.org/eventlet/eventlet/commits/4bd654205a4217970a57a7c4802fed7ff2c8b770
Change-Id: I367e9a6e92b7ea75fe7e5795cded212657de57ed
---
swift/obj/ssync_receiver.py | 27 ++++++---------------------
test/unit/obj/test_ssync_sender.py | 6 +++---
2 files changed, 9 insertions(+), 24 deletions(-)
diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py
index b907ebf563..5f2461d62e 100644
--- a/swift/obj/ssync_receiver.py
+++ b/swift/obj/ssync_receiver.py
@@ -89,8 +89,9 @@ class Receiver(object):
try:
# Double try blocks in case our main error handlers fail.
try:
- for data in self._ensure_flush():
- yield data
+ # Need to send something to trigger wsgi to return response
+ # headers and kick off the ssync exchange.
+ yield '\r\n'
# If semaphore is in use, try to acquire it, non-blocking, and
# return a 503 if it fails.
if self.app.replication_semaphore:
@@ -142,20 +143,6 @@ class Receiver(object):
except Exception:
pass # We're okay with the above failing.
- def _ensure_flush(self):
- """
- Sends a blank line sufficient to flush buffers.
-
- This is to ensure Eventlet versions that don't support
- eventlet.minimum_write_chunk_size will send any previous data
- buffered.
-
- If https://bitbucket.org/eventlet/eventlet/pull-request/37
- ever gets released in an Eventlet version, we should make
- this yield only for versions older than that.
- """
- yield ' ' * eventlet.wsgi.MINIMUM_CHUNK_SIZE + '\r\n'
-
def initialize_request(self):
"""
Basic validation of request and mount check.
@@ -163,7 +150,9 @@ class Receiver(object):
This function will be called before attempting to acquire a
replication semaphore lock, so contains only quick checks.
"""
- # The following is the setting we talk about above in _ensure_flush.
+ # This environ override has been supported since eventlet 0.14:
+ # https://bitbucket.org/eventlet/eventlet/commits/ \
+ # 4bd654205a4217970a57a7c4802fed7ff2c8b770
self.request.environ['eventlet.minimum_write_chunk_size'] = 0
self.device, self.partition, self.policy = \
request_helpers.get_name_and_placement(self.request, 2, 2, False)
@@ -250,8 +239,6 @@ class Receiver(object):
yield '\r\n'.join(object_hashes)
yield '\r\n'
yield ':MISSING_CHECK: END\r\n'
- for data in self._ensure_flush():
- yield data
def updates(self):
"""
@@ -385,5 +372,3 @@ class Receiver(object):
(failures, successes))
yield ':UPDATES: START\r\n'
yield ':UPDATES: END\r\n'
- for data in self._ensure_flush():
- yield data
diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py
index 19fea0be2b..c48a239351 100644
--- a/test/unit/obj/test_ssync_sender.py
+++ b/test/unit/obj/test_ssync_sender.py
@@ -1830,14 +1830,14 @@ class TestSsyncReplication(TestBaseSsync):
self.assertFalse(results['tx_updates'])
self.assertFalse(results['rx_updates'])
# Minimal receiver response as read by sender:
- # 2 * 4098 <-- _ensure_flush() twice
+ # 2 <-- initial \r\n to start ssync exchange
# + 23 <-- :MISSING CHECK START\r\n
# + 2 <-- \r\n (minimal missing check response)
# + 21 <-- :MISSING CHECK END\r\n
# + 17 <-- :UPDATES START\r\n
# + 15 <-- :UPDATES END\r\n
- # TOTAL = 8274
- self.assertEqual(8274, trace.get('readline_bytes'))
+ # TOTAL = 80
+ self.assertEqual(80, trace.get('readline_bytes'))
if __name__ == '__main__':
From 5374ba3a80a5b895542196502eac4d9300ba53d2 Mon Sep 17 00:00:00 2001
From: John Dickinson
Date: Wed, 27 May 2015 12:28:04 -0700
Subject: [PATCH 46/98] drop Python 2.6 testing support
Change-Id: I78f21e5794e8ba7a095f03d279247516a241f555
---
doc/source/development_guidelines.rst | 2 +-
doc/source/getting_started.rst | 3 +--
tox.ini | 2 +-
3 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst
index 241eda6cf5..1da8457682 100644
--- a/doc/source/development_guidelines.rst
+++ b/doc/source/development_guidelines.rst
@@ -49,7 +49,7 @@ To execute the unit tests:
* Optionally, run only specific tox builds:
- - `tox -e pep8,py26`
+ - `tox -e pep8,py27`
The functional tests may be executed against a :doc:`development_saio` or
other running Swift cluster using the command:
diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst
index b3b201d461..0e3b408ad4 100644
--- a/doc/source/getting_started.rst
+++ b/doc/source/getting_started.rst
@@ -16,8 +16,7 @@ Swift is written in Python and has these dependencies:
* The Python packages listed in `the requirements file `_
* Testing additionally requires `the test dependencies `_
-Python 2.6 should work, but it's not actively tested. There is no current
-support for Python 3.
+There is no current support for Python 3.
-------------
Getting Swift
diff --git a/tox.ini b/tox.ini
index 96e32f87a8..de72f26950 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py26,py27,pep8
+envlist = py27,pep8
minversion = 1.6
skipsdist = True
From 68c30b80b47c281c549d101c43d79e718e91d21d Mon Sep 17 00:00:00 2001
From: OpenStack Proposal Bot
Date: Thu, 28 May 2015 06:08:12 +0000
Subject: [PATCH 47/98] Imported Translations from Transifex
For more information about this automatic import see:
https://wiki.openstack.org/wiki/Translations/Infrastructure
Change-Id: I0c7b2bdb0edcc3bb3fa06903ec8457ca5e2dd688
---
swift/locale/zh_CN/LC_MESSAGES/swift.po | 1400 +++++++++++------------
1 file changed, 700 insertions(+), 700 deletions(-)
diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po
index 1352c93f83..48f5ded42c 100644
--- a/swift/locale/zh_CN/LC_MESSAGES/swift.po
+++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: Swift\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-05-05 06:08+0000\n"
+"POT-Creation-Date: 2015-05-28 06:08+0000\n"
"PO-Revision-Date: 2015-04-15 12:48+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/swift/"
@@ -19,215 +19,6 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 1.3\n"
-#, python-format
-msgid ""
-"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
-"audit"
-msgstr "自%(time)s开始:账号审计:%(passed)s 通过审计,%(failed)s 失败"
-
-msgid "Begin account audit pass."
-msgstr "开始账号审计通过"
-
-msgid "ERROR auditing"
-msgstr "错误 审计"
-
-#, python-format
-msgid "Account audit pass completed: %.02fs"
-msgstr "账号审计完成:%.02fs"
-
-msgid "Begin account audit \"once\" mode"
-msgstr "开始账号审计\"once\"模式"
-
-#, python-format
-msgid "Account audit \"once\" mode completed: %.02fs"
-msgstr "账号审计\"once\"模式完成: %.02fs"
-
-#, python-format
-msgid ""
-"The total %(key)s for the container (%(total)s) does not match the sum of "
-"%(key)s across policies (%(sum)s)"
-msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)"
-
-#, python-format
-msgid "Audit Failed for %s: %s"
-msgstr "审计失败%s: %s"
-
-#, python-format
-msgid "ERROR Could not get account info %s"
-msgstr "错误:无法获取账号信息%s"
-
-#, python-format
-msgid "Skipping %s as it is not mounted"
-msgstr "挂载失败 跳过%s"
-
-msgid "Exception in top-level account reaper loop"
-msgstr "异常出现在top-level账号reaper环"
-
-#, python-format
-msgid "Devices pass completed: %.02fs"
-msgstr "设备通过完成: %.02fs"
-
-#, python-format
-msgid "Beginning pass on account %s"
-msgstr "账号%s开始通过"
-
-#, python-format
-msgid "Exception with containers for account %s"
-msgstr "账号%s内容器出现异常"
-
-#, python-format
-msgid "Exception with account %s"
-msgstr "账号%s出现异常"
-
-#, python-format
-msgid "Incomplete pass on account %s"
-msgstr "账号%s未完成通过"
-
-#, python-format
-msgid ", %s containers deleted"
-msgstr ",删除容器%s"
-
-#, python-format
-msgid ", %s objects deleted"
-msgstr ",删除对象%s"
-
-#, python-format
-msgid ", %s containers remaining"
-msgstr ",剩余容器%s"
-
-#, python-format
-msgid ", %s objects remaining"
-msgstr ",剩余对象%s"
-
-#, python-format
-msgid ", %s containers possibly remaining"
-msgstr ",可能剩余容器%s"
-
-#, python-format
-msgid ", %s objects possibly remaining"
-msgstr ",可能剩余对象%s"
-
-msgid ", return codes: "
-msgstr ",返回代码:"
-
-#, python-format
-msgid ", elapsed: %.02fs"
-msgstr ",耗时:%.02fs"
-
-#, python-format
-msgid "Account %s has not been reaped since %s"
-msgstr "账号%s自%s起未被reaped"
-
-#, python-format
-msgid "Exception with %(ip)s:%(port)s/%(device)s"
-msgstr "%(ip)s:%(port)s/%(device)s出现异常"
-
-#, python-format
-msgid ""
-"Exception with objects for container %(container)s for account %(account)s"
-msgstr "账号%(account)s容器%(container)s的对象出现异常"
-
-#, python-format
-msgid "ERROR __call__ error with %(method)s %(path)s "
-msgstr "%(method)s %(path)s出现错误__call__ error"
-
-#, python-format
-msgid "Error encoding to UTF-8: %s"
-msgstr "UTF-8编码错误:%s"
-
-#, python-format
-msgid "Could not load %r: %s"
-msgstr "无法下载%r: %s"
-
-#, python-format
-msgid "Error in %r with mtime_check_interval: %s"
-msgstr "%r中mtime_check_interval出现错误:%s"
-
-#, python-format
-msgid "Quarantined %s to %s due to %s database"
-msgstr "隔离%s和%s 因为%s数据库"
-
-msgid "Broker error trying to rollback locked connection"
-msgstr "服务器错误并尝试去回滚已经锁住的链接"
-
-#, python-format
-msgid "Invalid pending entry %(file)s: %(entry)s"
-msgstr "不可用的等待输入%(file)s: %(entry)s"
-
-#, python-format
-msgid "ERROR reading HTTP response from %s"
-msgstr "读取HTTP错误 响应来源%s"
-
-#, python-format
-msgid ""
-"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
-msgstr "%(time).5f seconds (%(rate).5f/s)尝试复制%(count)d dbs"
-
-#, python-format
-msgid "Removed %(remove)d dbs"
-msgstr "删除%(remove)d dbs"
-
-#, python-format
-msgid "%(success)s successes, %(failure)s failures"
-msgstr "%(success)s成功,%(failure)s失败"
-
-#, python-format
-msgid "ERROR rsync failed with %(code)s: %(args)s"
-msgstr "错误 rsync失败 %(code)s: %(args)s"
-
-#, python-format
-msgid "ERROR Bad response %(status)s from %(host)s"
-msgstr "失败响应错误%(status)s来自%(host)s"
-
-#, python-format
-msgid "Quarantining DB %s"
-msgstr "隔离DB%s"
-
-#, python-format
-msgid "ERROR reading db %s"
-msgstr "错误 读取db %s"
-
-#, python-format
-msgid "ERROR Remote drive not mounted %s"
-msgstr "错误 远程驱动器无法挂载 %s"
-
-#, python-format
-msgid "ERROR syncing %(file)s with node %(node)s"
-msgstr "错误 同步 %(file)s 和 节点%(node)s"
-
-#, python-format
-msgid "ERROR while trying to clean up %s"
-msgstr "清理时出现错误%s"
-
-msgid "ERROR Failed to get my own IPs?"
-msgstr "错误 无法获得我方IPs?"
-
-#, python-format
-msgid "Skipping %(device)s as it is not mounted"
-msgstr "因无法挂载跳过%(device)s"
-
-msgid "Beginning replication run"
-msgstr "开始运行复制"
-
-msgid "Replication run OVER"
-msgstr "复制运行结束"
-
-msgid "ERROR trying to replicate"
-msgstr "尝试复制时发生错误"
-
-#, python-format
-msgid "Unexpected response: %s"
-msgstr "意外响应:%s"
-
-msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?"
-msgstr "警告:无法修改文件描述限制。是否按非root运行?"
-
-msgid "WARNING: Unable to modify memory limit. Running as non-root?"
-msgstr "警告:无法修改内存极限,是否按非root运行?"
-
-msgid "WARNING: Unable to modify max process limit. Running as non-root?"
-msgstr "警告:无法修改最大运行极限,是否按非root运行?"
-
msgid ""
"\n"
"user quit"
@@ -236,261 +27,257 @@ msgstr ""
"用户退出"
#, python-format
-msgid "No %s running"
-msgstr "无%s账号运行"
+msgid " - %s"
+msgstr "- %s"
#, python-format
-msgid "%s (%s) appears to have stopped"
-msgstr "%s (%s)显示已停止"
+msgid " - parallel, %s"
+msgstr "-平行,%s"
#, python-format
-msgid "Waited %s seconds for %s to die; giving up"
-msgstr "等待%s秒直到%s停止;放弃"
-
-msgid "Found configs:"
-msgstr "找到配置"
+msgid ""
+"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
+msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced"
#, python-format
-msgid "Signal %s pid: %s signal: %s"
-msgstr "发出信号%s pid: %s 信号: %s"
+msgid "%(ip)s/%(device)s responded as unmounted"
+msgstr "%(ip)s/%(device)s的回应为未挂载"
#, python-format
-msgid "Removing stale pid file %s"
-msgstr "移除原有pid文件%s"
+msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
+msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
#, python-format
-msgid "No permission to signal PID %d"
-msgstr "无权限发送信号PID%d"
+msgid ""
+"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
+"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
+msgstr ""
+"%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n"
+"\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
+
+#, python-format
+msgid "%(success)s successes, %(failure)s failures"
+msgstr "%(success)s成功,%(failure)s失败"
+
+#, python-format
+msgid "%(type)s returning 503 for %(statuses)s"
+msgstr "%(type)s 返回 503 在 %(statuses)s"
#, python-format
msgid "%s #%d not running (%s)"
msgstr "%s #%d无法运行(%s)"
#, python-format
-msgid "%s running (%s - %s)"
-msgstr "%s运行(%s - %s)"
+msgid "%s (%s) appears to have stopped"
+msgstr "%s (%s)显示已停止"
#, python-format
msgid "%s already started..."
msgstr "%s已启动..."
-#, python-format
-msgid "Running %s once"
-msgstr "运行%s一次"
-
-#, python-format
-msgid "Starting %s"
-msgstr "启动%s"
-
#, python-format
msgid "%s does not exist"
msgstr "%s不存在"
#, python-format
-msgid "Timeout %(action)s to memcached: %(server)s"
-msgstr "%(action)s超时 高性能内存对象缓存: %(server)s"
+msgid "%s is not mounted"
+msgstr "%s未挂载"
#, python-format
-msgid "Error %(action)s to memcached: %(server)s"
-msgstr "%(action)s错误 高性能内存对象缓存: %(server)s"
+msgid "%s running (%s - %s)"
+msgstr "%s运行(%s - %s)"
#, python-format
-msgid "Error limiting server %s"
-msgstr "服务器出现错误%s "
+msgid ", %s containers deleted"
+msgstr ",删除容器%s"
#, python-format
-msgid "Unable to locate %s in libc. Leaving as a no-op."
-msgstr "无法查询到%s 保留为no-op"
-
-msgid ""
-"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
-msgstr "无法查询到fallocate, posix_fallocate。保存为no-op"
-
-msgid "Connection refused"
-msgstr "连接被拒绝"
-
-msgid "Host unreachable"
-msgstr "无法连接到主机"
-
-msgid "Connection timeout"
-msgstr "连接超时"
-
-msgid "UNCAUGHT EXCEPTION"
-msgstr "未捕获的异常"
-
-msgid "Error: missing config path argument"
-msgstr "错误:设置路径信息丢失"
+msgid ", %s containers possibly remaining"
+msgstr ",可能剩余容器%s"
#, python-format
-msgid "Error: unable to locate %s"
-msgstr "错误:无法查询到 %s"
+msgid ", %s containers remaining"
+msgstr ",剩余容器%s"
#, python-format
-msgid "Unable to read config from %s"
-msgstr "无法从%s读取设置"
+msgid ", %s objects deleted"
+msgstr ",删除对象%s"
#, python-format
-msgid "Unable to find %s config section in %s"
-msgstr "无法在%s中查找到%s设置部分"
+msgid ", %s objects possibly remaining"
+msgstr ",可能剩余对象%s"
#, python-format
-msgid "Invalid X-Container-Sync-To format %r"
-msgstr "无效的X-Container-Sync-To格式%r"
+msgid ", %s objects remaining"
+msgstr ",剩余对象%s"
#, python-format
-msgid "No realm key for %r"
-msgstr "%r权限key不存在"
+msgid ", elapsed: %.02fs"
+msgstr ",耗时:%.02fs"
+
+msgid ", return codes: "
+msgstr ",返回代码:"
+
+msgid "Account"
+msgstr "账号"
#, python-format
-msgid "No cluster endpoint for %r %r"
-msgstr "%r %r的集群节点不存在"
+msgid "Account %s has not been reaped since %s"
+msgstr "账号%s自%s起未被reaped"
+
+#, python-format
+msgid "Account audit \"once\" mode completed: %.02fs"
+msgstr "账号审计\"once\"模式完成: %.02fs"
+
+#, python-format
+msgid "Account audit pass completed: %.02fs"
+msgstr "账号审计完成:%.02fs"
#, python-format
msgid ""
-"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
-"\"https\"."
-msgstr ""
-"在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。"
-
-msgid "Path required in X-Container-Sync-To"
-msgstr "在X-Container-Sync-To中路径是必须的"
-
-msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
-msgstr "在X-Container-Sync-To中,变量,查询和碎片不被允许"
+"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
+msgstr "%(time).5f seconds (%(rate).5f/s)尝试复制%(count)d dbs"
#, python-format
-msgid "Invalid host %r in X-Container-Sync-To"
-msgstr "X-Container-Sync-To中无效主机%r"
-
-msgid "Exception dumping recon cache"
-msgstr "执行dump recon的时候出现异常"
+msgid "Audit Failed for %s: %s"
+msgstr "审计失败%s: %s"
#, python-format
-msgid "Could not bind to %s:%s after trying for %s seconds"
-msgstr "尝试过%s秒后无法捆绑%s:%s"
+msgid "Bad rsync return code: %(ret)d <- %(args)s"
+msgstr "Bad rsync返还代码:%(ret)d <- %(args)s"
-msgid ""
-"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
-"termination for a production deployment."
-msgstr "警告:SSL仅可以做测试使用。产品部署时请使用外连SSL终端"
+msgid "Begin account audit \"once\" mode"
+msgstr "开始账号审计\"once\"模式"
-msgid "Error: An error occurred"
-msgstr "错误:一个错误发生了"
+msgid "Begin account audit pass."
+msgstr "开始账号审计通过"
+
+msgid "Begin container audit \"once\" mode"
+msgstr "开始容器审计\"once\" 模式"
+
+msgid "Begin container audit pass."
+msgstr "开始通过容器审计"
+
+msgid "Begin container sync \"once\" mode"
+msgstr "开始容器同步\"once\"模式"
+
+msgid "Begin container update single threaded sweep"
+msgstr "开始容器更新单线程扫除"
+
+msgid "Begin container update sweep"
+msgstr "开始容器更新扫除"
#, python-format
-msgid "Mapped %(given_domain)s to %(found_domain)s"
-msgstr "集合%(given_domain)s到%(found_domain)s"
+msgid "Begin object audit \"%s\" mode (%s%s)"
+msgstr "开始对象审计\\\"%s\\\" 模式 (%s%s)"
+
+msgid "Begin object update single threaded sweep"
+msgstr "开始对象更新单线程扫除"
+
+msgid "Begin object update sweep"
+msgstr "开始对象更新扫除"
#, python-format
-msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
-msgstr "跟随CNAME链从%(given_domain)s到%(found_domain)s"
+msgid "Beginning pass on account %s"
+msgstr "账号%s开始通过"
-#, python-format
-msgid "Returning 497 because of blacklisting: %s"
-msgstr "返回497因为黑名单:%s"
+msgid "Beginning replication run"
+msgstr "开始运行复制"
-#, python-format
-msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s"
-msgstr "流量控制休眠日志:%(sleep)s for %(account)s/%(container)s/%(object)s"
-
-#, python-format
-msgid ""
-"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
-"Sleep) %(e)s"
-msgstr ""
-"返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,流量控制(Max \"\n"
-"\"Sleep) %(e)s"
-
-msgid "Warning: Cannot ratelimit without a memcached client"
-msgstr "警告:缺失缓存客户端 无法控制流量 "
-
-msgid "Error reading recon cache file"
-msgstr "读取recon cache file时出现错误"
-
-msgid "Error parsing recon cache file"
-msgstr "解析recon cache file时出现错误"
-
-msgid "Error retrieving recon data"
-msgstr "检索recon data时出现错误"
-
-msgid "Error listing devices"
-msgstr "设备列表时出现错误"
-
-msgid "Error reading ringfile"
-msgstr "读取ringfile时出现错误"
-
-msgid "Error reading swift.conf"
-msgstr "读取swift.conf时出现错误"
-
-#, python-format
-msgid "Error on render profiling results: %s"
-msgstr "给予分析结果时发生错误:%s"
-
-#, python-format
-msgid "Profiling Error: %s"
-msgstr "分析代码时出现错误:%s"
-
-#, python-format
-msgid "method %s is not allowed."
-msgstr "方法%s不被允许"
-
-#, python-format
-msgid "Can not load profile data from %s."
-msgstr "无法从%s下载分析数据"
-
-msgid "no log file found"
-msgstr "日志文件丢失"
-
-#, python-format
-msgid "Data download error: %s"
-msgstr "数据下载错误:%s"
-
-msgid "python-matplotlib not installed."
-msgstr "python-matplotlib未安装"
-
-#, python-format
-msgid "plotting results failed due to %s"
-msgstr "绘制结果图标时失败因为%s"
-
-msgid "The file type are forbidden to access!"
-msgstr "该文件类型被禁止访问!"
+msgid "Broker error trying to rollback locked connection"
+msgstr "服务器错误并尝试去回滚已经锁住的链接"
#, python-format
msgid "Can not access the file %s."
msgstr "无法访问文件%s"
-msgid "odfpy not installed."
-msgstr "odfpy未安装"
+#, python-format
+msgid "Can not load profile data from %s."
+msgstr "无法从%s下载分析数据"
+
+#, python-format
+msgid "Client did not read from proxy within %ss"
+msgstr "客户尚未从代理处读取%ss"
+
+msgid "Client disconnected on read"
+msgstr "客户读取时中断"
+
+msgid "Client disconnected without sending enough data"
+msgstr "客户中断 尚未发送足够"
#, python-format
msgid ""
-"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
-"audit"
-msgstr "自%(time)s起:容器审计:%(pass)s通过审计, %(fail)s失败"
+"Client path %(client)s does not match path stored in object metadata %(meta)s"
+msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符"
-msgid "Begin container audit pass."
-msgstr "开始通过容器审计"
+msgid "Connection refused"
+msgstr "连接被拒绝"
-#, python-format
-msgid "Container audit pass completed: %.02fs"
-msgstr "容器审计通过完成: %.02fs"
+msgid "Connection timeout"
+msgstr "连接超时"
-msgid "Begin container audit \"once\" mode"
-msgstr "开始容器审计\"once\" 模式"
+msgid "Container"
+msgstr "容器"
#, python-format
msgid "Container audit \"once\" mode completed: %.02fs"
msgstr "容器审计\"once\"模式完成:%.02fs"
#, python-format
-msgid "ERROR Could not get container info %s"
-msgstr "错误:无法获取容器%s信息"
+msgid "Container audit pass completed: %.02fs"
+msgstr "容器审计通过完成: %.02fs"
+
+#, python-format
+msgid "Container sync \"once\" mode completed: %.02fs"
+msgstr "容器同步\"once\"模式完成:%.02fs"
#, python-format
msgid ""
-"ERROR Account update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr "出现错误 账号更新失败:本机数量与设备数量不符: \"%s\" vs \"%s\""
+"Container update single threaded sweep completed: %(elapsed).02fs, "
+"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
+msgstr ""
+"容器更新单线程扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, "
+"%(no_change)s 无更改"
+
+#, python-format
+msgid "Container update sweep completed: %.02fs"
+msgstr "容器更新扫除完成:%.02fs"
+
+#, python-format
+msgid ""
+"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
+"successes, %(fail)s failures, %(no_change)s with no changes"
+msgstr ""
+"通过路径%(path)s容器更新扫除完成:%(elapsed).02fs, %(success)s 成功, "
+"%(fail)s 失败, %(no_change)s 无更改"
+
+#, python-format
+msgid "Could not bind to %s:%s after trying for %s seconds"
+msgstr "尝试过%s秒后无法捆绑%s:%s"
+
+#, python-format
+msgid "Could not load %r: %s"
+msgstr "无法下载%r: %s"
+
+#, python-format
+msgid "Data download error: %s"
+msgstr "数据下载错误:%s"
+
+#, python-format
+msgid "Devices pass completed: %.02fs"
+msgstr "设备通过完成: %.02fs"
+
+#, python-format
+msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
+msgstr "错误 %(db_file)s: %(validate_sync_to_err)s"
+
+#, python-format
+msgid "ERROR %(status)d %(body)s From %(type)s Server"
+msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器"
+
+#, python-format
+msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
+msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s"
#, python-format
msgid ""
@@ -502,92 +289,89 @@ msgstr ""
#, python-format
msgid ""
-"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
-"later)"
-msgstr "错误 账号更新失败 %(ip)s:%(port)s/%(device)s (稍后尝试)"
-
-msgid "Begin container sync \"once\" mode"
-msgstr "开始容器同步\"once\"模式"
+"ERROR Account update failed: different numbers of hosts and devices in "
+"request: \"%s\" vs \"%s\""
+msgstr "出现错误 账号更新失败:本机数量与设备数量不符: \"%s\" vs \"%s\""
#, python-format
-msgid "Container sync \"once\" mode completed: %.02fs"
-msgstr "容器同步\"once\"模式完成:%.02fs"
+msgid "ERROR Bad response %(status)s from %(host)s"
+msgstr "失败响应错误%(status)s来自%(host)s"
+
+#, python-format
+msgid "ERROR Client read timeout (%ss)"
+msgstr "错误 客户读取超时(%ss)"
#, python-format
msgid ""
-"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
-"skipped, %(fail)s failed"
+"ERROR Container update failed (saving for async update later): %(status)d "
+"response from %(ip)s:%(port)s/%(dev)s"
msgstr ""
-"自%(time)s起:%(sync)s完成同步 [%(delete)s 删除, %(put)s 上传], \"\n"
-"\"%(skip)s 跳过, %(fail)s 失败"
-
-#, python-format
-msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
-msgstr "错误 %(db_file)s: %(validate_sync_to_err)s"
-
-#, python-format
-msgid "ERROR Syncing %s"
-msgstr "同步时发生错误%s"
-
-#, python-format
-msgid "Unauth %(sync_from)r => %(sync_to)r"
-msgstr "未授权%(sync_from)r => %(sync_to)r"
+"错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/"
+"%(dev)s"
#, python-format
msgid ""
-"Not found %(sync_from)r => %(sync_to)r - object "
-"%(obj_name)r"
-msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r"
+"ERROR Container update failed: different numbers of hosts and devices in "
+"request: \"%s\" vs \"%s\""
+msgstr "错误 容器更新失败:主机数量和设备数量不符合请求: \"%s\" vs \"%s\""
+
+#, python-format
+msgid "ERROR Could not get account info %s"
+msgstr "错误:无法获取账号信息%s"
+
+#, python-format
+msgid "ERROR Could not get container info %s"
+msgstr "错误:无法获取容器%s信息"
+
+#, python-format
+msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
+msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s"
+
+msgid "ERROR Exception causing client disconnect"
+msgstr "错误 异常导致客户端中断连接"
+
+msgid "ERROR Failed to get my own IPs?"
+msgstr "错误 无法获得我方IPs?"
+
+msgid "ERROR Insufficient Storage"
+msgstr "错误 存储空间不足"
+
+#, python-format
+msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
+msgstr "错误 对象%(obj)s审计失败并被隔离:%(err)s"
+
+#, python-format
+msgid "ERROR Pickle problem, quarantining %s"
+msgstr "错误 Pickle问题 隔离%s"
+
+#, python-format
+msgid "ERROR Remote drive not mounted %s"
+msgstr "错误 远程驱动器无法挂载 %s"
#, python-format
msgid "ERROR Syncing %(db_file)s %(row)s"
msgstr "同步错误 %(db_file)s %(row)s"
#, python-format
-msgid "ERROR: Failed to get paths to drive partitions: %s"
-msgstr "%s未挂载"
+msgid "ERROR Syncing %s"
+msgstr "同步时发生错误%s"
#, python-format
-msgid "%s is not mounted"
-msgstr "%s未挂载"
+msgid "ERROR Trying to audit %s"
+msgstr "错误 尝试开始审计%s"
+
+msgid "ERROR Unhandled exception in request"
+msgstr "错误 未处理的异常发出请求"
#, python-format
-msgid "ERROR with loading suppressions from %s: "
-msgstr "执行下载压缩时发生错误%s"
-
-msgid "Begin container update sweep"
-msgstr "开始容器更新扫除"
+msgid "ERROR __call__ error with %(method)s %(path)s "
+msgstr "%(method)s %(path)s出现错误__call__ error"
#, python-format
msgid ""
-"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"通过路径%(path)s容器更新扫除完成:%(elapsed).02fs, %(success)s 成功, "
-"%(fail)s 失败, %(no_change)s 无更改"
-
-#, python-format
-msgid "Container update sweep completed: %.02fs"
-msgstr "容器更新扫除完成:%.02fs"
-
-msgid "Begin container update single threaded sweep"
-msgstr "开始容器更新单线程扫除"
-
-#, python-format
-msgid ""
-"Container update single threaded sweep completed: %(elapsed).02fs, "
-"%(success)s successes, %(fail)s failures, %(no_change)s with no changes"
-msgstr ""
-"容器更新单线程扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, "
-"%(no_change)s 无更改"
-
-#, python-format
-msgid "Update report sent for %(container)s %(dbfile)s"
-msgstr "更新报告发至%(container)s %(dbfile)s"
-
-#, python-format
-msgid "Update report failed for %(container)s %(dbfile)s"
-msgstr "%(container)s %(dbfile)s更新报告失败"
+"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry "
+"later)"
+msgstr "错误 账号更新失败 %(ip)s:%(port)s/%(device)s (稍后尝试)"
#, python-format
msgid ""
@@ -596,27 +380,251 @@ msgid ""
msgstr "错误 账号更新失败%(ip)s:%(port)s/%(device)s (稍后尝试):"
#, python-format
-msgid " - parallel, %s"
-msgstr "-平行,%s"
+msgid "ERROR async pending file with unexpected name %s"
+msgstr "执行同步等待文件 文件名不可知%s"
+
+msgid "ERROR auditing"
+msgstr "错误 审计"
#, python-format
-msgid " - %s"
-msgstr "- %s"
-
-#, python-format
-msgid "Begin object audit \"%s\" mode (%s%s)"
-msgstr "开始对象审计\\\"%s\\\" 模式 (%s%s)"
+msgid "ERROR auditing: %s"
+msgstr "审计错误:%s"
#, python-format
msgid ""
-"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
-"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
-"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
-"%(audit_rate).2f"
+"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
+"update later)"
+msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在保存 稍后同步更新)"
+
+#, python-format
+msgid "ERROR reading HTTP response from %s"
+msgstr "读取HTTP错误 响应来源%s"
+
+#, python-format
+msgid "ERROR reading db %s"
+msgstr "错误 读取db %s"
+
+#, python-format
+msgid "ERROR rsync failed with %(code)s: %(args)s"
+msgstr "错误 rsync失败 %(code)s: %(args)s"
+
+#, python-format
+msgid "ERROR syncing %(file)s with node %(node)s"
+msgstr "错误 同步 %(file)s 和 节点%(node)s"
+
+msgid "ERROR trying to replicate"
+msgstr "尝试复制时发生错误"
+
+#, python-format
+msgid "ERROR while trying to clean up %s"
+msgstr "清理时出现错误%s"
+
+#, python-format
+msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
+msgstr "%(type)s服务器发生错误 %(ip)s:%(port)s/%(device)s re: %(info)s"
+
+#, python-format
+msgid "ERROR with loading suppressions from %s: "
+msgstr "执行下载压缩时发生错误%s"
+
+#, python-format
+msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
+msgstr "远程服务器发生错误 %(ip)s:%(port)s/%(device)s"
+
+#, python-format
+msgid "ERROR: Failed to get paths to drive partitions: %s"
+msgstr "%s未挂载"
+
+#, python-format
+msgid "ERROR: Unable to run auditing: %s"
+msgstr "错误:无法执行审计:%s"
+
+#, python-format
+msgid "Error %(action)s to memcached: %(server)s"
+msgstr "%(action)s错误 高性能内存对象缓存: %(server)s"
+
+#, python-format
+msgid "Error encoding to UTF-8: %s"
+msgstr "UTF-8编码错误:%s"
+
+msgid "Error hashing suffix"
+msgstr "执行Hashing后缀时发生错误"
+
+#, python-format
+msgid "Error in %r with mtime_check_interval: %s"
+msgstr "%r中mtime_check_interval出现错误:%s"
+
+#, python-format
+msgid "Error limiting server %s"
+msgstr "服务器出现错误%s "
+
+msgid "Error listing devices"
+msgstr "设备列表时出现错误"
+
+#, python-format
+msgid "Error on render profiling results: %s"
+msgstr "给予分析结果时发生错误:%s"
+
+msgid "Error parsing recon cache file"
+msgstr "解析recon cache file时出现错误"
+
+msgid "Error reading recon cache file"
+msgstr "读取recon cache file时出现错误"
+
+msgid "Error reading ringfile"
+msgstr "读取ringfile时出现错误"
+
+msgid "Error reading swift.conf"
+msgstr "读取swift.conf时出现错误"
+
+msgid "Error retrieving recon data"
+msgstr "检索recon data时出现错误"
+
+msgid "Error syncing handoff partition"
+msgstr "执行同步切换分区时发生错误"
+
+msgid "Error syncing partition"
+msgstr "执行同步分区时发生错误"
+
+#, python-format
+msgid "Error syncing with node: %s"
+msgstr "执行同步时节点%s发生错误"
+
+msgid "Error: An error occurred"
+msgstr "错误:一个错误发生了"
+
+msgid "Error: missing config path argument"
+msgstr "错误:设置路径信息丢失"
+
+#, python-format
+msgid "Error: unable to locate %s"
+msgstr "错误:无法查询到 %s"
+
+msgid "Exception dumping recon cache"
+msgstr "执行dump recon的时候出现异常"
+
+msgid "Exception in top-level account reaper loop"
+msgstr "异常出现在top-level账号reaper环"
+
+msgid "Exception in top-level replication loop"
+msgstr "top-level复制圈出现异常"
+
+#, python-format
+msgid "Exception while deleting container %s %s"
+msgstr "执行删除容器时出现异常 %s %s"
+
+#, python-format
+msgid "Exception while deleting object %s %s %s"
+msgstr "执行删除对象时发生异常%s %s %s"
+
+#, python-format
+msgid "Exception with %(ip)s:%(port)s/%(device)s"
+msgstr "%(ip)s:%(port)s/%(device)s出现异常"
+
+#, python-format
+msgid "Exception with account %s"
+msgstr "账号%s出现异常"
+
+#, python-format
+msgid "Exception with containers for account %s"
+msgstr "账号%s内容器出现异常"
+
+#, python-format
+msgid ""
+"Exception with objects for container %(container)s for account %(account)s"
+msgstr "账号%(account)s容器%(container)s的对象出现异常"
+
+#, python-format
+msgid "Expect: 100-continue on %s"
+msgstr "已知:100-continue on %s"
+
+#, python-format
+msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s"
+msgstr "跟随CNAME链从%(given_domain)s到%(found_domain)s"
+
+msgid "Found configs:"
+msgstr "找到配置"
+
+msgid "Host unreachable"
+msgstr "无法连接到主机"
+
+#, python-format
+msgid "Incomplete pass on account %s"
+msgstr "账号%s未完成通过"
+
+#, python-format
+msgid "Invalid X-Container-Sync-To format %r"
+msgstr "无效的X-Container-Sync-To格式%r"
+
+#, python-format
+msgid "Invalid host %r in X-Container-Sync-To"
+msgstr "X-Container-Sync-To中无效主机%r"
+
+#, python-format
+msgid "Invalid pending entry %(file)s: %(entry)s"
+msgstr "不可用的等待输入%(file)s: %(entry)s"
+
+#, python-format
+msgid "Invalid response %(resp)s from %(ip)s"
+msgstr "无效的回应%(resp)s来自%(ip)s"
+
+#, python-format
+msgid ""
+"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or "
+"\"https\"."
msgstr ""
-"对象审计 (%(type)s). 自 %(start_time)s开始: 本地: %(passes)d 通过, %(quars)d "
-"隔离, %(errors)d 错误 文件/秒: %(frate).2f , bytes/秒: %(brate).2f, 总时间: "
-"%(total).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
+"在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。"
+
+#, python-format
+msgid "Killing long-running rsync: %s"
+msgstr "终止long-running同步: %s"
+
+msgid "Lockup detected.. killing live coros."
+msgstr "检测到lockup。终止正在执行的coros"
+
+#, python-format
+msgid "Mapped %(given_domain)s to %(found_domain)s"
+msgstr "集合%(given_domain)s到%(found_domain)s"
+
+#, python-format
+msgid "No %s running"
+msgstr "无%s账号运行"
+
+#, python-format
+msgid "No cluster endpoint for %r %r"
+msgstr "%r %r的集群节点不存在"
+
+#, python-format
+msgid "No permission to signal PID %d"
+msgstr "无权限发送信号PID%d"
+
+#, python-format
+msgid "No realm key for %r"
+msgstr "%r权限key不存在"
+
+#, python-format
+msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
+msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)"
+
+#, python-format
+msgid ""
+"Not found %(sync_from)r => %(sync_to)r - object "
+"%(obj_name)r"
+msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r"
+
+#, python-format
+msgid "Nothing replicated for %s seconds."
+msgstr "%s秒无复制"
+
+msgid "Object"
+msgstr "对象"
+
+msgid "Object PUT"
+msgstr "对象上传"
+
+#, python-format
+msgid "Object PUT returning 412, %(statuses)r"
+msgstr "对象PUT返还 412,%(statuses)r "
#, python-format
msgid ""
@@ -629,191 +637,32 @@ msgstr ""
"%(quars)d, 错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: "
"%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
+#, python-format
+msgid ""
+"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, "
+"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: "
+"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: "
+"%(audit_rate).2f"
+msgstr ""
+"对象审计 (%(type)s). 自 %(start_time)s开始: 本地: %(passes)d 通过, %(quars)d "
+"隔离, %(errors)d 错误 文件/秒: %(frate).2f , bytes/秒: %(brate).2f, 总时间: "
+"%(total).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f"
+
#, python-format
msgid "Object audit stats: %s"
msgstr "对象审计统计:%s"
-#, python-format
-msgid "ERROR Trying to audit %s"
-msgstr "错误 尝试开始审计%s"
-
-#, python-format
-msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s"
-msgstr "错误 对象%(obj)s审计失败并被隔离:%(err)s"
-
-#, python-format
-msgid "ERROR: Unable to run auditing: %s"
-msgstr "错误:无法执行审计:%s"
-
-#, python-format
-msgid "ERROR auditing: %s"
-msgstr "审计错误:%s"
-
-#, python-format
-msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
-msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录"
-
-msgid "Error hashing suffix"
-msgstr "执行Hashing后缀时发生错误"
-
-#, python-format
-msgid ""
-"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
-msgstr "隔离%(object_path)s和%(quar_path)s因为非目录"
-
-#, python-format
-msgid "Problem cleaning up %s"
-msgstr "问题清除%s"
-
-#, python-format
-msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s"
-msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s"
-
-#, python-format
-msgid ""
-"Client path %(client)s does not match path stored in object metadata %(meta)s"
-msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符"
-
-#, python-format
-msgid "Pass completed in %ds; %d objects expired"
-msgstr "%ds通过完成; %d对象过期"
-
-#, python-format
-msgid "Pass so far %ds; %d objects expired"
-msgstr "%ds目前通过;%d对象过期"
-
-#, python-format
-msgid "Pass beginning; %s possible containers; %s possible objects"
-msgstr "开始通过;%s可能容器;%s可能对象"
-
-#, python-format
-msgid "Exception while deleting container %s %s"
-msgstr "执行删除容器时出现异常 %s %s"
-
-msgid "Unhandled exception"
-msgstr "未处理的异常"
-
-#, python-format
-msgid "Exception while deleting object %s %s %s"
-msgstr "执行删除对象时发生异常%s %s %s"
-
-#, python-format
-msgid ""
-"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced"
-msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced"
-
-#, python-format
-msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs"
-
-msgid "Lockup detected.. killing live coros."
-msgstr "检测到lockup。终止正在执行的coros"
-
-#, python-format
-msgid "Removing partition: %s"
-msgstr "移除分区:%s"
-
-#, python-format
-msgid "Killing long-running rsync: %s"
-msgstr "终止long-running同步: %s"
-
-#, python-format
-msgid "Bad rsync return code: %(ret)d <- %(args)s"
-msgstr "Bad rsync返还代码:%(ret)d <- %(args)s"
-
-#, python-format
-msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
-msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)"
-
-msgid "Error syncing handoff partition"
-msgstr "执行同步切换分区时发生错误"
-
-#, python-format
-msgid "%(ip)s/%(device)s responded as unmounted"
-msgstr "%(ip)s/%(device)s的回应为未挂载"
-
-#, python-format
-msgid "Invalid response %(resp)s from %(ip)s"
-msgstr "无效的回应%(resp)s来自%(ip)s"
-
-#, python-format
-msgid "Error syncing with node: %s"
-msgstr "执行同步时节点%s发生错误"
-
-msgid "Error syncing partition"
-msgstr "执行同步分区时发生错误"
-
-#, python-format
-msgid ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in "
-"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-msgstr ""
-"%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n"
-"\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)"
-
-#, python-format
-msgid "Nothing replicated for %s seconds."
-msgstr "%s秒无复制"
-
-msgid "Ring change detected. Aborting current replication pass."
-msgstr "Ring改变被检测到。退出现有的复制通过"
-
-msgid "Exception in top-level replication loop"
-msgstr "top-level复制圈出现异常"
-
-msgid "Running object replicator in script mode."
-msgstr "在加密模式下执行对象复制"
-
#, python-format
msgid "Object replication complete (once). (%.02f minutes)"
msgstr "对象复制完成(一次)。(%.02f minutes)"
-msgid "Starting object replicator in daemon mode."
-msgstr "在守护模式下开始对象复制"
-
-msgid "Starting object replication pass."
-msgstr "开始通过对象复制"
-
#, python-format
msgid "Object replication complete. (%.02f minutes)"
msgstr "对象复制完成。(%.02f minutes)"
#, python-format
-msgid ""
-"ERROR Container update failed (saving for async update later): %(status)d "
-"response from %(ip)s:%(port)s/%(dev)s"
-msgstr ""
-"错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/"
-"%(dev)s"
-
-#, python-format
-msgid ""
-"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async "
-"update later)"
-msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在保存 稍后同步更新)"
-
-#, python-format
-msgid ""
-"ERROR Container update failed: different numbers of hosts and devices in "
-"request: \"%s\" vs \"%s\""
-msgstr "错误 容器更新失败:主机数量和设备数量不符合请求: \"%s\" vs \"%s\""
-
-msgid "Begin object update sweep"
-msgstr "开始对象更新扫除"
-
-#, python-format
-msgid ""
-"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s "
-"successes, %(fail)s failures"
-msgstr ""
-"%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败"
-
-#, python-format
-msgid "Object update sweep completed: %.02fs"
-msgstr "对象更新扫除完成:%.02fs"
-
-msgid "Begin object update single threaded sweep"
-msgstr "开始对象更新单线程扫除"
+msgid "Object servers returned %s mismatched etags"
+msgstr "对象服务器返还%s不匹配etags"
#, python-format
msgid ""
@@ -823,105 +672,256 @@ msgstr ""
"对象更新单线程扫除完成:%(elapsed).02fs,%(success)s 成功, %(fail)s 失败"
#, python-format
-msgid "ERROR async pending file with unexpected name %s"
-msgstr "执行同步等待文件 文件名不可知%s"
+msgid "Object update sweep completed: %.02fs"
+msgstr "对象更新扫除完成:%.02fs"
#, python-format
-msgid "ERROR Pickle problem, quarantining %s"
-msgstr "错误 Pickle问题 隔离%s"
+msgid ""
+"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s "
+"successes, %(fail)s failures"
+msgstr ""
+"%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败"
+
+msgid "Params, queries, and fragments not allowed in X-Container-Sync-To"
+msgstr "在X-Container-Sync-To中,变量,查询和碎片不被允许"
#, python-format
-msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s"
-msgstr "远程服务器发生错误 %(ip)s:%(port)s/%(device)s"
-
-msgid "ERROR Unhandled exception in request"
-msgstr "错误 未处理的异常发出请求"
+msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
+msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs"
#, python-format
-msgid "Node error limited %(ip)s:%(port)s (%(device)s)"
-msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)"
+msgid "Pass beginning; %s possible containers; %s possible objects"
+msgstr "开始通过;%s可能容器;%s可能对象"
#, python-format
-msgid "%(msg)s %(ip)s:%(port)s/%(device)s"
-msgstr "%(msg)s %(ip)s:%(port)s/%(device)s"
+msgid "Pass completed in %ds; %d objects expired"
+msgstr "%ds通过完成; %d对象过期"
#, python-format
-msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s"
-msgstr "%(type)s服务器发生错误 %(ip)s:%(port)s/%(device)s re: %(info)s"
+msgid "Pass so far %ds; %d objects expired"
+msgstr "%ds目前通过;%d对象过期"
-msgid "Account"
-msgstr "账号"
-
-msgid "Object"
-msgstr "对象"
-
-msgid "Trying to read during GET (retrying)"
-msgstr "执行GET时尝试读取(重新尝试)"
-
-msgid "Trying to read during GET"
-msgstr "执行GET时尝试读取"
+msgid "Path required in X-Container-Sync-To"
+msgstr "在X-Container-Sync-To中路径是必须的"
#, python-format
-msgid "Client did not read from proxy within %ss"
-msgstr "客户尚未从代理处读取%ss"
+msgid "Problem cleaning up %s"
+msgstr "问题清除%s"
-msgid "Client disconnected on read"
-msgstr "客户读取时中断"
+#, python-format
+msgid "Profiling Error: %s"
+msgstr "分析代码时出现错误:%s"
-msgid "Trying to send to client"
-msgstr "尝试发送到客户端"
+#, python-format
+msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
+msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录"
+
+#, python-format
+msgid ""
+"Quarantined %(object_path)s to %(quar_path)s because it is not a directory"
+msgstr "隔离%(object_path)s和%(quar_path)s因为非目录"
+
+#, python-format
+msgid "Quarantined %s to %s due to %s database"
+msgstr "隔离%s和%s 因为%s数据库"
+
+#, python-format
+msgid "Quarantining DB %s"
+msgstr "隔离DB%s"
+
+#, python-format
+msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s"
+msgstr "流量控制休眠日志:%(sleep)s for %(account)s/%(container)s/%(object)s"
+
+#, python-format
+msgid "Removed %(remove)d dbs"
+msgstr "删除%(remove)d dbs"
+
+#, python-format
+msgid "Removing partition: %s"
+msgstr "移除分区:%s"
+
+#, python-format
+msgid "Removing stale pid file %s"
+msgstr "移除原有pid文件%s"
+
+msgid "Replication run OVER"
+msgstr "复制运行结束"
+
+#, python-format
+msgid "Returning 497 because of blacklisting: %s"
+msgstr "返回497因为黑名单:%s"
+
+#, python-format
+msgid ""
+"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max "
+"Sleep) %(e)s"
+msgstr ""
+"返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,流量控制(Max \"\n"
+"\"Sleep) %(e)s"
+
+msgid "Ring change detected. Aborting current replication pass."
+msgstr "Ring改变被检测到。退出现有的复制通过"
+
+#, python-format
+msgid "Running %s once"
+msgstr "运行%s一次"
+
+msgid "Running object replicator in script mode."
+msgstr "在加密模式下执行对象复制"
+
+#, python-format
+msgid "Signal %s pid: %s signal: %s"
+msgstr "发出信号%s pid: %s 信号: %s"
+
+#, python-format
+msgid ""
+"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s "
+"skipped, %(fail)s failed"
+msgstr ""
+"自%(time)s起:%(sync)s完成同步 [%(delete)s 删除, %(put)s 上传], \"\n"
+"\"%(skip)s 跳过, %(fail)s 失败"
+
+#, python-format
+msgid ""
+"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed "
+"audit"
+msgstr "自%(time)s开始:账号审计:%(passed)s 通过审计,%(failed)s 失败"
+
+#, python-format
+msgid ""
+"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed "
+"audit"
+msgstr "自%(time)s起:容器审计:%(pass)s通过审计, %(fail)s失败"
+
+#, python-format
+msgid "Skipping %(device)s as it is not mounted"
+msgstr "因无法挂载跳过%(device)s"
+
+#, python-format
+msgid "Skipping %s as it is not mounted"
+msgstr "挂载失败 跳过%s"
+
+#, python-format
+msgid "Starting %s"
+msgstr "启动%s"
+
+msgid "Starting object replication pass."
+msgstr "开始通过对象复制"
+
+msgid "Starting object replicator in daemon mode."
+msgstr "在守护模式下开始对象复制"
+
+#, python-format
+msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)"
+msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)"
+
+msgid "The file type are forbidden to access!"
+msgstr "该文件类型被禁止访问!"
+
+#, python-format
+msgid ""
+"The total %(key)s for the container (%(total)s) does not match the sum of "
+"%(key)s across policies (%(sum)s)"
+msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)"
+
+#, python-format
+msgid "Timeout %(action)s to memcached: %(server)s"
+msgstr "%(action)s超时 高性能内存对象缓存: %(server)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "尝试执行%(method)s %(path)s"
-msgid "ERROR Insufficient Storage"
-msgstr "错误 存储空间不足"
-
#, python-format
-msgid "ERROR %(status)d %(body)s From %(type)s Server"
-msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器"
+msgid "Trying to get final status of PUT to %s"
+msgstr "尝试执行获取最后的PUT状态%s"
-#, python-format
-msgid "%(type)s returning 503 for %(statuses)s"
-msgstr "%(type)s 返回 503 在 %(statuses)s"
+msgid "Trying to read during GET"
+msgstr "执行GET时尝试读取"
-msgid "Container"
-msgstr "容器"
+msgid "Trying to read during GET (retrying)"
+msgstr "执行GET时尝试读取(重新尝试)"
+
+msgid "Trying to send to client"
+msgstr "尝试发送到客户端"
#, python-format
msgid "Trying to write to %s"
msgstr "尝试执行书写%s"
-#, python-format
-msgid "Expect: 100-continue on %s"
-msgstr "已知:100-continue on %s"
+msgid "UNCAUGHT EXCEPTION"
+msgstr "未捕获的异常"
#, python-format
-msgid "Trying to get final status of PUT to %s"
-msgstr "尝试执行获取最后的PUT状态%s"
+msgid "Unable to find %s config section in %s"
+msgstr "无法在%s中查找到%s设置部分"
#, python-format
-msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s"
-msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s"
+msgid "Unable to locate %s in libc. Leaving as a no-op."
+msgstr "无法查询到%s 保留为no-op"
+
+msgid ""
+"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op."
+msgstr "无法查询到fallocate, posix_fallocate。保存为no-op"
#, python-format
-msgid "Object PUT returning 412, %(statuses)r"
-msgstr "对象PUT返还 412,%(statuses)r "
+msgid "Unable to read config from %s"
+msgstr "无法从%s读取设置"
#, python-format
-msgid "ERROR Client read timeout (%ss)"
-msgstr "错误 客户读取超时(%ss)"
-
-msgid "ERROR Exception causing client disconnect"
-msgstr "错误 异常导致客户端中断连接"
-
-msgid "Client disconnected without sending enough data"
-msgstr "客户中断 尚未发送足够"
+msgid "Unauth %(sync_from)r => %(sync_to)r"
+msgstr "未授权%(sync_from)r => %(sync_to)r"
#, python-format
-msgid "Object servers returned %s mismatched etags"
-msgstr "对象服务器返还%s不匹配etags"
+msgid "Unexpected response: %s"
+msgstr "意外响应:%s"
-msgid "Object PUT"
-msgstr "对象上传"
+msgid "Unhandled exception"
+msgstr "未处理的异常"
+
+#, python-format
+msgid "Update report failed for %(container)s %(dbfile)s"
+msgstr "%(container)s %(dbfile)s更新报告失败"
+
+#, python-format
+msgid "Update report sent for %(container)s %(dbfile)s"
+msgstr "更新报告发至%(container)s %(dbfile)s"
+
+msgid ""
+"WARNING: SSL should only be enabled for testing purposes. Use external SSL "
+"termination for a production deployment."
+msgstr "警告:SSL仅可以做测试使用。产品部署时请使用外连SSL终端"
+
+msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?"
+msgstr "警告:无法修改文件描述限制。是否按非root运行?"
+
+msgid "WARNING: Unable to modify max process limit. Running as non-root?"
+msgstr "警告:无法修改最大运行极限,是否按非root运行?"
+
+msgid "WARNING: Unable to modify memory limit. Running as non-root?"
+msgstr "警告:无法修改内存极限,是否按非root运行?"
+
+#, python-format
+msgid "Waited %s seconds for %s to die; giving up"
+msgstr "等待%s秒直到%s停止;放弃"
+
+msgid "Warning: Cannot ratelimit without a memcached client"
+msgstr "警告:缺失缓存客户端 无法控制流量 "
+
+#, python-format
+msgid "method %s is not allowed."
+msgstr "方法%s不被允许"
+
+msgid "no log file found"
+msgstr "日志文件丢失"
+
+msgid "odfpy not installed."
+msgstr "odfpy未安装"
+
+#, python-format
+msgid "plotting results failed due to %s"
+msgstr "绘制结果图标时失败因为%s"
+
+msgid "python-matplotlib not installed."
+msgstr "python-matplotlib未安装"
From 736cf54adf3ee85d2f473e5e5374f9833422967c Mon Sep 17 00:00:00 2001
From: Samuel Merritt
Date: Thu, 28 May 2015 15:30:47 -0700
Subject: [PATCH 48/98] Remove simplejson from tests
Since we're dropping Python 2.6 support, we can rely on stdlib's json
and get rid of our dependency on simplejson.
This commit just takes simplejson out of the unit and functional
tests. They still pass.
Change-Id: I96f17df81fa5d265395a938b19213d2638682106
---
test/functional/test_object.py | 2 +-
test/unit/common/middleware/test_list_endpoints.py | 3 ++-
test/unit/common/test_direct_client.py | 3 ++-
test/unit/proxy/controllers/test_info.py | 2 +-
4 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/test/functional/test_object.py b/test/functional/test_object.py
index e74a7f632e..4a62da1a77 100755
--- a/test/functional/test_object.py
+++ b/test/functional/test_object.py
@@ -15,11 +15,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import json
import unittest
from nose import SkipTest
from uuid import uuid4
-from swift.common.utils import json
from test.functional import check_response, retry, requires_acls, \
requires_policies
diff --git a/test/unit/common/middleware/test_list_endpoints.py b/test/unit/common/middleware/test_list_endpoints.py
index 3ec0379586..2537d0ffdc 100644
--- a/test/unit/common/middleware/test_list_endpoints.py
+++ b/test/unit/common/middleware/test_list_endpoints.py
@@ -14,6 +14,7 @@
# limitations under the License.
import array
+import json
import unittest
from tempfile import mkdtemp
from shutil import rmtree
@@ -21,7 +22,7 @@ from shutil import rmtree
import os
import mock
from swift.common import ring, utils
-from swift.common.utils import json, split_path
+from swift.common.utils import split_path
from swift.common.swob import Request, Response
from swift.common.middleware import list_endpoints
from swift.common.storage_policy import StoragePolicy, POLICIES
diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py
index 6f7660cdf3..145ac83c08 100644
--- a/test/unit/common/test_direct_client.py
+++ b/test/unit/common/test_direct_client.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import json
import unittest
import os
import urllib
@@ -25,7 +26,7 @@ import mock
from swift.common import direct_client
from swift.common.exceptions import ClientException
-from swift.common.utils import json, Timestamp
+from swift.common.utils import Timestamp
from swift.common.swob import HeaderKeyDict, RESPONSE_REASONS
from swift.common.storage_policy import POLICIES
diff --git a/test/unit/proxy/controllers/test_info.py b/test/unit/proxy/controllers/test_info.py
index f33beba024..adf3329683 100644
--- a/test/unit/proxy/controllers/test_info.py
+++ b/test/unit/proxy/controllers/test_info.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import json
import unittest
import time
from mock import Mock
@@ -20,7 +21,6 @@ from mock import Mock
from swift.proxy.controllers import InfoController
from swift.proxy.server import Application as ProxyApp
from swift.common import utils
-from swift.common.utils import json
from swift.common.swob import Request, HTTPException
From 38787d0fb5102e41b153e4629d2ef374a02965e9 Mon Sep 17 00:00:00 2001
From: Samuel Merritt
Date: Tue, 26 May 2015 16:19:54 -0700
Subject: [PATCH 49/98] Remove simplejson from staticweb
Since we're dropping Python 2.6 support, we can rely on stdlib's json
and get rid of our dependency on simplejson.
This lets us get rid of some redundant Unicode encoding. Before, we
would take the container-listing response off the wire,
JSON-deserialize it (str -> unicode), then pass each of several fields
from each entry to get_valid_utf8_str(), which would encode it,
(unicode -> str), decode it (str -> unicode), and then encode it again
(unicode -> str) for good measure.
The net effect was that each object's name would, in the proxy server,
go str -> unicode -> str -> unicode -> str.
By replacing simplejson with stdlib json, we get a guarantee that each
container-listing entry's name, hash, content_type, and last_modified
are unicodes, so we can stop worrying about them being valid UTF-8 or
not. This takes an encode and decode out of the path, so we just have
str -> unicode -> str. While it'd be ideal to avoid this, the first
transform (str -> unicode) happens when we decode the
container-listing response body (json.loads()), so there's no way out.
Change-Id: I00aedf952d691a809c23025b89131ea0f02b6431
---
swift/common/middleware/staticweb.py | 18 +++++++------
test/unit/common/middleware/test_staticweb.py | 26 +------------------
2 files changed, 11 insertions(+), 33 deletions(-)
diff --git a/swift/common/middleware/staticweb.py b/swift/common/middleware/staticweb.py
index 34b102ea53..d16b5ae3e7 100644
--- a/swift/common/middleware/staticweb.py
+++ b/swift/common/middleware/staticweb.py
@@ -117,10 +117,11 @@ Example usage of this middleware via ``swift``:
import cgi
+import json
import time
from swift.common.utils import human_readable, split_path, config_true_value, \
- json, quote, get_valid_utf8_str, register_swift_info
+ quote, register_swift_info
from swift.common.wsgi import make_pre_authed_env, WSGIContext
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND
from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound
@@ -289,7 +290,7 @@ class _StaticWebContext(WSGIContext):
' \n'
for item in listing:
if 'subdir' in item:
- subdir = get_valid_utf8_str(item['subdir'])
+ subdir = item['subdir'].encode("utf-8")
if prefix:
subdir = subdir[len(prefix):]
body += ' \n' \
@@ -300,13 +301,14 @@ class _StaticWebContext(WSGIContext):
(quote(subdir), cgi.escape(subdir))
for item in listing:
if 'name' in item:
- name = get_valid_utf8_str(item['name'])
+ name = item['name'].encode("utf-8")
if prefix:
name = name[len(prefix):]
- content_type = get_valid_utf8_str(item['content_type'])
- bytes = get_valid_utf8_str(human_readable(item['bytes']))
- last_modified = (cgi.escape(item['last_modified']).
- split('.')[0].replace('T', ' '))
+ content_type = item['content_type'].encode("utf-8")
+ bytes = human_readable(item['bytes'])
+ last_modified = (
+ cgi.escape(item['last_modified'].encode("utf-8")).
+ split('.')[0].replace('T', ' '))
body += '
\n' \
' %s | \n' \
' %s | \n' \
@@ -315,7 +317,7 @@ class _StaticWebContext(WSGIContext):
(' '.join('type-' + cgi.escape(t.lower(), quote=True)
for t in content_type.split('/')),
quote(name), cgi.escape(name),
- bytes, get_valid_utf8_str(last_modified))
+ bytes, last_modified)
body += ' \n' \
'