From 9c33bbde6923b26f111572ae967a3b97a8ab12f2 Mon Sep 17 00:00:00 2001 From: Prashanth Pai Date: Tue, 20 Jan 2015 12:14:32 +0530 Subject: [PATCH 01/98] Allow rsync to use compression From rsync's man page: -z, --compress With this option, rsync compresses the file data as it is sent to the destination machine, which reduces the amount of data being transmitted -- something that is useful over a slow connection. A configurable option has been added to allow rsync to compress, but only if the remote node is in a different region than the local one. NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might slow down the syncing process. On wire compression can also be extended to ssync later in a different change if required. In case of ssync, we could explore faster compression libraries like lz4. rsync uses zlib which is slow but offers higher compression ratio. Change-Id: Ic9b9cbff9b5e68bef8257b522cc352fc3544db3c Signed-off-by: Prashanth Pai --- etc/account-server.conf-sample | 5 +++ etc/container-server.conf-sample | 5 +++ etc/object-server.conf-sample | 7 +++ swift/common/db_replicator.py | 62 +++++++++++++++++++++----- swift/container/replicator.py | 5 ++- swift/obj/replicator.py | 7 +++ test/unit/common/test_db_replicator.py | 59 +++++++++++++++++++----- test/unit/obj/test_replicator.py | 37 +++++++++++++++ 8 files changed, 163 insertions(+), 24 deletions(-) diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index 6a7fcb929b..98c97acf6f 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -114,6 +114,11 @@ use = egg:swift#recon # of run_pause. # run_pause = 30 # +# Allow rsync to compress data which is transmitted to destination node +# during sync. However, this is applicable only when destination node is in +# a different region than the local one. +# rsync_compress = no +# # recon_cache_path = /var/cache/swift [account-auditor] diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample index de511368ad..7405a3d250 100644 --- a/etc/container-server.conf-sample +++ b/etc/container-server.conf-sample @@ -115,6 +115,11 @@ use = egg:swift#recon # of run_pause. # run_pause = 30 # +# Allow rsync to compress data which is transmitted to destination node +# during sync. However, this is applicable only when destination node is in +# a different region than the local one. +# rsync_compress = no +# # recon_cache_path = /var/cache/swift [container-updater] diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index b594a9576f..933f30f2f1 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -174,6 +174,13 @@ use = egg:swift#recon # passed to rsync for io op timeout # rsync_io_timeout = 30 # +# Allow rsync to compress data which is transmitted to destination node +# during sync. However, this is applicable only when destination node is in +# a different region than the local one. +# NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might +# slow down the syncing process. +# rsync_compress = no +# # node_timeout = # max duration of an http request; this is for REPLICATE finalization calls and # so should be longer than node_timeout diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index e456beed75..334cf74347 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -167,6 +167,8 @@ class Replicator(Daemon): self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no')) self.node_timeout = int(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) + self.rsync_compress = config_true_value( + conf.get('rsync_compress', 'no')) self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7)) swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) @@ -209,13 +211,16 @@ class Replicator(Daemon): ('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', 'empty', 'diff_capped')])) - def _rsync_file(self, db_file, remote_file, whole_file=True): + def _rsync_file(self, db_file, remote_file, whole_file=True, + different_region=False): """ Sync a single file using rsync. Used by _rsync_db to handle syncing. :param db_file: file to be synced :param remote_file: remote location to sync the DB file to :param whole-file: if True, uses rsync's --whole-file flag + :param different_region: if True, the destination node is in a + different region :returns: True if the sync was successful, False otherwise """ @@ -224,6 +229,12 @@ class Replicator(Daemon): '--contimeout=%s' % int(math.ceil(self.conn_timeout))] if whole_file: popen_args.append('--whole-file') + + if self.rsync_compress and different_region: + # Allow for compression, but only if the remote node is in + # a different region than the local one. + popen_args.append('--compress') + popen_args.extend([db_file, remote_file]) proc = subprocess.Popen(popen_args) proc.communicate() @@ -233,7 +244,8 @@ class Replicator(Daemon): return proc.returncode == 0 def _rsync_db(self, broker, device, http, local_id, - replicate_method='complete_rsync', replicate_timeout=None): + replicate_method='complete_rsync', replicate_timeout=None, + different_region=False): """ Sync a whole db using rsync. @@ -243,6 +255,8 @@ class Replicator(Daemon): :param local_id: unique ID of the local database replica :param replicate_method: remote operation to perform after rsync :param replicate_timeout: timeout to wait in seconds + :param different_region: if True, the destination node is in a + different region """ device_ip = rsync_ip(device['replication_ip']) if self.vm_test_mode: @@ -253,14 +267,17 @@ class Replicator(Daemon): remote_file = '%s::%s/%s/tmp/%s' % ( device_ip, self.server_type, device['device'], local_id) mtime = os.path.getmtime(broker.db_file) - if not self._rsync_file(broker.db_file, remote_file): + if not self._rsync_file(broker.db_file, remote_file, + different_region=different_region): return False # perform block-level sync if the db was modified during the first sync if os.path.exists(broker.db_file + '-journal') or \ os.path.getmtime(broker.db_file) > mtime: # grab a lock so nobody else can modify it with broker.lock(): - if not self._rsync_file(broker.db_file, remote_file, False): + if not self._rsync_file(broker.db_file, remote_file, + whole_file=False, + different_region=different_region): return False with Timeout(replicate_timeout or self.node_timeout): response = http.replicate(replicate_method, local_id) @@ -363,7 +380,8 @@ class Replicator(Daemon): 'put_timestamp', 'delete_timestamp', 'metadata') return tuple(info[key] for key in sync_args_order) - def _repl_to_node(self, node, broker, partition, info): + def _repl_to_node(self, node, broker, partition, info, + different_region=False): """ Replicate a database to a node. @@ -373,6 +391,8 @@ class Replicator(Daemon): :param info: DB info as a dictionary of {'max_row', 'hash', 'id', 'created_at', 'put_timestamp', 'delete_timestamp', 'metadata'} + :param different_region: if True, the destination node is in a + different region :returns: True if successful, False otherwise """ @@ -382,13 +402,16 @@ class Replicator(Daemon): response = http.replicate('sync', *sync_args) if not response: return False - return self._handle_sync_response(node, response, info, broker, http) + return self._handle_sync_response(node, response, info, broker, http, + different_region=different_region) - def _handle_sync_response(self, node, response, info, broker, http): + def _handle_sync_response(self, node, response, info, broker, http, + different_region=False): if response.status == HTTP_NOT_FOUND: # completely missing, rsync self.stats['rsync'] += 1 self.logger.increment('rsyncs') - return self._rsync_db(broker, node, http, info['id']) + return self._rsync_db(broker, node, http, info['id'], + different_region=different_region) elif response.status == HTTP_INSUFFICIENT_STORAGE: raise DriveNotMounted() elif response.status >= 200 and response.status < 300: @@ -403,7 +426,8 @@ class Replicator(Daemon): self.logger.increment('remote_merges') return self._rsync_db(broker, node, http, info['id'], replicate_method='rsync_then_merge', - replicate_timeout=(info['count'] / 2000)) + replicate_timeout=(info['count'] / 2000), + different_region=different_region) # else send diffs over to the remote server return self._usync_db(max(rinfo['point'], local_sync), broker, http, rinfo['id'], info['id']) @@ -470,6 +494,11 @@ class Replicator(Daemon): return responses = [] nodes = self.ring.get_part_nodes(int(partition)) + local_dev = None + for node in nodes: + if node['id'] == node_id: + local_dev = node + break if shouldbehere: shouldbehere = bool([n for n in nodes if n['id'] == node_id]) # See Footnote [1] for an explanation of the repl_nodes assignment. @@ -478,10 +507,23 @@ class Replicator(Daemon): i += 1 repl_nodes = nodes[i + 1:] + nodes[:i] more_nodes = self.ring.get_more_nodes(int(partition)) + if not local_dev: + # Check further if local device is a handoff node + for node in more_nodes: + if node['id'] == node_id: + local_dev = node + break for node in repl_nodes: + different_region = False + if local_dev and local_dev['region'] != node['region']: + # This additional information will help later if we + # want to handle syncing to a node in different + # region with some optimizations. + different_region = True success = False try: - success = self._repl_to_node(node, broker, partition, info) + success = self._repl_to_node(node, broker, partition, info, + different_region) except DriveNotMounted: repl_nodes.append(more_nodes.next()) self.logger.error(_('ERROR Remote drive not mounted %s'), node) diff --git a/swift/container/replicator.py b/swift/container/replicator.py index 8974535251..8d3bfce7f8 100644 --- a/swift/container/replicator.py +++ b/swift/container/replicator.py @@ -59,7 +59,8 @@ class ContainerReplicator(db_replicator.Replicator): 'storage_policy_index')) return sync_args - def _handle_sync_response(self, node, response, info, broker, http): + def _handle_sync_response(self, node, response, info, broker, http, + different_region): parent = super(ContainerReplicator, self) if is_success(response.status): remote_info = json.loads(response.data) @@ -74,7 +75,7 @@ class ContainerReplicator(db_replicator.Replicator): broker.merge_timestamps(*(remote_info[key] for key in sync_timestamps)) rv = parent._handle_sync_response( - node, response, info, broker, http) + node, response, info, broker, http, different_region) return rv def find_local_handoff_for_part(self, part): diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index b3df0ce28f..eb65eb3879 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -76,6 +76,8 @@ class ObjectReplicator(Daemon): self.rsync_timeout = int(conf.get('rsync_timeout', 900)) self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.rsync_bwlimit = conf.get('rsync_bwlimit', '0') + self.rsync_compress = config_true_value( + conf.get('rsync_compress', 'no')) self.http_timeout = int(conf.get('http_timeout', 60)) self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) self.recon_cache_path = conf.get('recon_cache_path', @@ -183,6 +185,11 @@ class ObjectReplicator(Daemon): '--contimeout=%s' % self.rsync_io_timeout, '--bwlimit=%s' % self.rsync_bwlimit, ] + if self.rsync_compress and \ + job['region'] != node['region']: + # Allow for compression, but only if the remote node is in + # a different region than the local one. + args.append('--compress') node_ip = rsync_ip(node['replication_ip']) if self.vm_test_mode: rsync_module = '%s::object%s' % (node_ip, node['replication_port']) diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py index 0f3cc72e94..e50aa68dae 100644 --- a/test/unit/common/test_db_replicator.py +++ b/test/unit/common/test_db_replicator.py @@ -92,22 +92,23 @@ class FakeRingWithNodes(object): class Ring(object): devs = [dict( id=1, weight=10.0, zone=1, ip='1.1.1.1', port=6000, device='sdb', - meta='' + meta='', replication_ip='1.1.1.1', replication_port=6000, region=1 ), dict( id=2, weight=10.0, zone=2, ip='1.1.1.2', port=6000, device='sdb', - meta='' + meta='', replication_ip='1.1.1.2', replication_port=6000, region=2 ), dict( id=3, weight=10.0, zone=3, ip='1.1.1.3', port=6000, device='sdb', - meta='' + meta='', replication_ip='1.1.1.3', replication_port=6000, region=1 ), dict( id=4, weight=10.0, zone=4, ip='1.1.1.4', port=6000, device='sdb', - meta='' + meta='', replication_ip='1.1.1.4', replication_port=6000, region=2 ), dict( id=5, weight=10.0, zone=5, ip='1.1.1.5', port=6000, device='sdb', - meta='' + meta='', replication_ip='1.1.1.5', replication_port=6000, region=1 ), dict( id=6, weight=10.0, zone=6, ip='1.1.1.6', port=6000, device='sdb', - meta='')] + meta='', replication_ip='1.1.1.6', replication_port=6000, region=2 + )] def __init__(self, path, reload_time=15, ring_name=None): pass @@ -334,9 +335,26 @@ class TestDBReplicator(unittest.TestCase): '/some/file', 'remote:/some_file'],) self.assertEqual(exp_args, process.args) + def test_rsync_file_popen_args_different_region_and_rsync_compress(self): + replicator = TestReplicator({}) + for rsync_compress in (False, True): + replicator.rsync_compress = rsync_compress + for different_region in (False, True): + with _mock_process(0) as process: + replicator._rsync_file('/some/file', 'remote:/some_file', + False, different_region) + if rsync_compress and different_region: + # --compress arg should be passed to rsync binary + # only when rsync_compress option is enabled + # AND destination node is in a different + # region + self.assertTrue('--compress' in process.args[0]) + else: + self.assertFalse('--compress' in process.args[0]) + def test_rsync_db(self): replicator = TestReplicator({}) - replicator._rsync_file = lambda *args: True + replicator._rsync_file = lambda *args, **kwargs: True fake_device = {'replication_ip': '127.0.0.1', 'device': 'sda1'} replicator._rsync_db(FakeBroker(), fake_device, ReplHttp(), 'abcd') @@ -355,7 +373,8 @@ class TestDBReplicator(unittest.TestCase): self.db_file = db_file self.remote_file = remote_file - def _rsync_file(self_, db_file, remote_file, whole_file=True): + def _rsync_file(self_, db_file, remote_file, whole_file=True, + different_region=False): self.assertEqual(self_.db_file, db_file) self.assertEqual(self_.remote_file, remote_file) self_._rsync_file_called = True @@ -403,7 +422,8 @@ class TestDBReplicator(unittest.TestCase): self.broker = broker self._rsync_file_call_count = 0 - def _rsync_file(self_, db_file, remote_file, whole_file=True): + def _rsync_file(self_, db_file, remote_file, whole_file=True, + different_region=False): self_._rsync_file_call_count += 1 if self_._rsync_file_call_count == 1: self.assertEquals(True, whole_file) @@ -630,6 +650,20 @@ class TestDBReplicator(unittest.TestCase): [(('Found /path/to/file for /a%20c%20t/c%20o%20n when it should ' 'be on partition 0; will replicate out and remove.',), {})]) + def test_replicate_object_different_region(self): + db_replicator.ring = FakeRingWithNodes() + replicator = TestReplicator({}) + replicator._repl_to_node = mock.Mock() + # For node_id = 1, one replica in same region(1) and other is in a + # different region(2). Refer: FakeRingWithNodes + replicator._replicate_object('0', '/path/to/file', 1) + # different_region was set True and passed to _repl_to_node() + self.assertEqual(replicator._repl_to_node.call_args_list[0][0][-1], + True) + # different_region was set False and passed to _repl_to_node() + self.assertEqual(replicator._repl_to_node.call_args_list[1][0][-1], + False) + def test_delete_db(self): db_replicator.lock_parent_directory = lock_parent_directory replicator = TestReplicator({}, logger=unit.FakeLogger()) @@ -1202,7 +1236,8 @@ class TestReplToNode(unittest.TestCase): mock.call(self.broker, self.fake_node, self.http, self.fake_info['id'], replicate_method='rsync_then_merge', - replicate_timeout=(self.fake_info['count'] / 2000)) + replicate_timeout=(self.fake_info['count'] / 2000), + different_region=False) ]) def test_repl_to_node_already_in_sync(self): @@ -1217,13 +1252,13 @@ class TestReplToNode(unittest.TestCase): def test_repl_to_node_not_found(self): self.http = ReplHttp('{"id": 3, "point": -1}', set_status=404) self.assertEquals(self.replicator._repl_to_node( - self.fake_node, self.broker, '0', self.fake_info), True) + self.fake_node, self.broker, '0', self.fake_info, False), True) self.replicator.logger.increment.assert_has_calls([ mock.call.increment('rsyncs') ]) self.replicator._rsync_db.assert_has_calls([ mock.call(self.broker, self.fake_node, self.http, - self.fake_info['id']) + self.fake_info['id'], different_region=False) ]) def test_repl_to_node_drive_not_mounted(self): diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py index bf1c5bcb52..0bb86794ee 100644 --- a/test/unit/obj/test_replicator.py +++ b/test/unit/obj/test_replicator.py @@ -1116,6 +1116,43 @@ class TestObjectReplicator(unittest.TestCase): '/a83', headers=self.headers)) mock_http.assert_has_calls(reqs, any_order=True) + def test_rsync_compress_different_region(self): + self.assertEqual(self.replicator.sync_method, self.replicator.rsync) + jobs = self.replicator.collect_jobs() + _m_rsync = mock.Mock(return_value=0) + _m_os_path_exists = mock.Mock(return_value=True) + with mock.patch.object(self.replicator, '_rsync', _m_rsync): + with mock.patch('os.path.exists', _m_os_path_exists): + for job in jobs: + self.assertTrue('region' in job) + for node in job['nodes']: + for rsync_compress in (True, False): + self.replicator.rsync_compress = rsync_compress + ret = \ + self.replicator.sync(node, job, + ['fake_suffix']) + self.assertTrue(ret) + if node['region'] != job['region']: + if rsync_compress: + # --compress arg should be passed to rsync + # binary only when rsync_compress option is + # enabled AND destination node is in a + # different region + self.assertTrue('--compress' in + _m_rsync.call_args[0][0]) + else: + self.assertFalse('--compress' in + _m_rsync.call_args[0][0]) + else: + self.assertFalse('--compress' in + _m_rsync.call_args[0][0]) + self.assertEqual( + _m_os_path_exists.call_args_list[-1][0][0], + os.path.join(job['path'], 'fake_suffix')) + self.assertEqual( + _m_os_path_exists.call_args_list[-2][0][0], + os.path.join(job['path'])) + if __name__ == '__main__': unittest.main() From 38ae7bb89c95292c171cb89c1d62859cee0cc24f Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 15 Apr 2015 23:03:21 +0100 Subject: [PATCH 02/98] Make the reaper use same timestamp for replica deletes The account reaper is using a unique timestamp when deleting replicas of the same resource. This will result in unnecessary replication traffic after reaping. This patch makes the reaper use a single timestamp per resource. Probe test is modified to check that delete times are equal across replicas before replicators run. test_direct_client.py is modified to check that it uses explicit timestamp when passed to direct_delete_[object|container] methods. Drive-by bug fixes in the probe test e.g. it was not sending X-Backend-Storage-Policy-Index when doing a direct GET to check object state, so the 404s being verified could in fact be due to diskfile not existing rather than diskfile being deleted. Closes-Bug: 1442879 Change-Id: I8bab22d66308bb9d3294e1e0def017c784228423 --- swift/account/reaper.py | 9 +++- swift/common/direct_client.py | 3 +- test/probe/test_account_reaper.py | 73 ++++++++++++++++++++++++-- test/unit/account/test_reaper.py | 71 +++++++++++++++++-------- test/unit/common/test_direct_client.py | 26 +++++++++ 5 files changed, 153 insertions(+), 29 deletions(-) diff --git a/swift/account/reaper.py b/swift/account/reaper.py index 06a0085352..9eaee561ec 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -376,6 +376,7 @@ class AccountReaper(Daemon): break successes = 0 failures = 0 + timestamp = Timestamp(time()) for node in nodes: anode = account_nodes.pop() try: @@ -386,7 +387,8 @@ class AccountReaper(Daemon): headers={'X-Account-Host': '%(ip)s:%(port)s' % anode, 'X-Account-Partition': str(account_partition), 'X-Account-Device': anode['device'], - 'X-Account-Override-Deleted': 'yes'}) + 'X-Account-Override-Deleted': 'yes', + 'X-Timestamp': timestamp.internal}) successes += 1 self.stats_return_codes[2] = \ self.stats_return_codes.get(2, 0) + 1 @@ -443,6 +445,8 @@ class AccountReaper(Daemon): part, nodes = ring.get_nodes(account, container, obj) successes = 0 failures = 0 + timestamp = Timestamp(time()) + for node in nodes: cnode = next(cnodes) try: @@ -453,7 +457,8 @@ class AccountReaper(Daemon): headers={'X-Container-Host': '%(ip)s:%(port)s' % cnode, 'X-Container-Partition': str(container_partition), 'X-Container-Device': cnode['device'], - 'X-Backend-Storage-Policy-Index': policy_index}) + 'X-Backend-Storage-Policy-Index': policy_index, + 'X-Timestamp': timestamp.internal}) successes += 1 self.stats_return_codes[2] = \ self.stats_return_codes.get(2, 0) + 1 diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py index 35ca24a64c..dd36e3b499 100644 --- a/swift/common/direct_client.py +++ b/swift/common/direct_client.py @@ -204,10 +204,11 @@ def direct_delete_container(node, part, account, container, conn_timeout=5, headers = {} path = '/%s/%s' % (account, container) + add_timestamp = 'x-timestamp' not in (k.lower() for k in headers) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'DELETE', path, - headers=gen_headers(headers, True)) + headers=gen_headers(headers, add_timestamp)) with Timeout(response_timeout): resp = conn.getresponse() resp.read() diff --git a/test/probe/test_account_reaper.py b/test/probe/test_account_reaper.py index 8368a59206..7da9dcd39d 100644 --- a/test/probe/test_account_reaper.py +++ b/test/probe/test_account_reaper.py @@ -53,33 +53,96 @@ class TestAccountReaper(ReplProbeTest): for node in nodes: direct_delete_account(node, part, self.account) + # run the reaper Manager(['account-reaper']).once() - self.get_to_final_state() - for policy, container, obj in all_objects: + # verify that any container deletes were at same timestamp cpart, cnodes = self.container_ring.get_nodes( self.account, container) + delete_times = set() for cnode in cnodes: try: direct_head_container(cnode, cpart, self.account, container) except ClientException as err: self.assertEquals(err.http_status, 404) + delete_time = err.http_headers.get( + 'X-Backend-DELETE-Timestamp') + # 'X-Backend-DELETE-Timestamp' confirms it was deleted + self.assertTrue(delete_time) + delete_times.add(delete_time) + else: - self.fail('Found un-reaped /%s/%s on %r' % - (self.account, container, node)) + # Container replicas may not yet be deleted if we have a + # policy with object replicas < container replicas, so + # ignore successful HEAD. We'll check for all replicas to + # be deleted again after running the replicators. + pass + self.assertEqual(1, len(delete_times), delete_times) + + # verify that all object deletes were at same timestamp object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/') part, nodes = object_ring.get_nodes(self.account, container, obj) + headers = {'X-Backend-Storage-Policy-Index': int(policy)} + delete_times = set() for node in nodes: try: direct_get_object(node, part, self.account, - container, obj) + container, obj, headers=headers) except ClientException as err: self.assertEquals(err.http_status, 404) + delete_time = err.http_headers.get('X-Backend-Timestamp') + # 'X-Backend-Timestamp' confirms obj was deleted + self.assertTrue(delete_time) + delete_times.add(delete_time) else: self.fail('Found un-reaped /%s/%s/%s on %r in %s!' % (self.account, container, obj, node, policy)) + self.assertEqual(1, len(delete_times)) + + # run replicators and updaters + self.get_to_final_state() + + for policy, container, obj in all_objects: + # verify that ALL container replicas are now deleted + cpart, cnodes = self.container_ring.get_nodes( + self.account, container) + delete_times = set() + for cnode in cnodes: + try: + direct_head_container(cnode, cpart, self.account, + container) + except ClientException as err: + self.assertEquals(err.http_status, 404) + delete_time = err.http_headers.get( + 'X-Backend-DELETE-Timestamp') + # 'X-Backend-DELETE-Timestamp' confirms it was deleted + self.assertTrue(delete_time) + delete_times.add(delete_time) + else: + self.fail('Found un-reaped /%s/%s on %r' % + (self.account, container, cnode)) + + # sanity check that object state is still consistent... + object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/') + part, nodes = object_ring.get_nodes(self.account, container, obj) + headers = {'X-Backend-Storage-Policy-Index': int(policy)} + delete_times = set() + for node in nodes: + try: + direct_get_object(node, part, self.account, + container, obj, headers=headers) + except ClientException as err: + self.assertEquals(err.http_status, 404) + delete_time = err.http_headers.get('X-Backend-Timestamp') + # 'X-Backend-Timestamp' confirms obj was deleted + self.assertTrue(delete_time) + delete_times.add(delete_time) + else: + self.fail('Found un-reaped /%s/%s/%s on %r in %s!' % + (self.account, container, obj, node, policy)) + self.assertEqual(1, len(delete_times)) if __name__ == "__main__": diff --git a/test/unit/account/test_reaper.py b/test/unit/account/test_reaper.py index d81b565fc4..b413a646a1 100644 --- a/test/unit/account/test_reaper.py +++ b/test/unit/account/test_reaper.py @@ -278,30 +278,34 @@ class TestReaper(unittest.TestCase): 'mount_check': 'false', } r = reaper.AccountReaper(conf, logger=unit.debug_logger()) - ring = unit.FakeRing() mock_path = 'swift.account.reaper.direct_delete_object' for policy in POLICIES: r.reset_stats() with patch(mock_path) as fake_direct_delete: - r.reap_object('a', 'c', 'partition', cont_nodes, 'o', - policy.idx) - for i, call_args in enumerate( - fake_direct_delete.call_args_list): - cnode = cont_nodes[i % len(cont_nodes)] - host = '%(ip)s:%(port)s' % cnode - device = cnode['device'] - headers = { - 'X-Container-Host': host, - 'X-Container-Partition': 'partition', - 'X-Container-Device': device, - 'X-Backend-Storage-Policy-Index': policy.idx - } - ring = r.get_object_ring(policy.idx) - expected = call(dict(ring.devs[i], index=i), 0, - 'a', 'c', 'o', - headers=headers, conn_timeout=0.5, - response_timeout=10) - self.assertEqual(call_args, expected) + with patch('swift.account.reaper.time') as mock_time: + mock_time.return_value = 1429117638.86767 + r.reap_object('a', 'c', 'partition', cont_nodes, 'o', + policy.idx) + mock_time.assert_called_once_with() + for i, call_args in enumerate( + fake_direct_delete.call_args_list): + cnode = cont_nodes[i % len(cont_nodes)] + host = '%(ip)s:%(port)s' % cnode + device = cnode['device'] + headers = { + 'X-Container-Host': host, + 'X-Container-Partition': 'partition', + 'X-Container-Device': device, + 'X-Backend-Storage-Policy-Index': policy.idx, + 'X-Timestamp': '1429117638.86767' + } + ring = r.get_object_ring(policy.idx) + expected = call(dict(ring.devs[i], index=i), 0, + 'a', 'c', 'o', + headers=headers, conn_timeout=0.5, + response_timeout=10) + self.assertEqual(call_args, expected) + self.assertEqual(policy.object_ring.replicas - 1, i) self.assertEqual(r.stats_objects_deleted, policy.object_ring.replicas) @@ -366,7 +370,11 @@ class TestReaper(unittest.TestCase): return headers, obj_list mocks['direct_get_container'].side_effect = fake_get_container - r.reap_container('a', 'partition', acc_nodes, 'c') + with patch('swift.account.reaper.time') as mock_time: + mock_time.side_effect = [1429117638.86767, 1429117639.67676] + r.reap_container('a', 'partition', acc_nodes, 'c') + + # verify calls to direct_delete_object mock_calls = mocks['direct_delete_object'].call_args_list self.assertEqual(policy.object_ring.replicas, len(mock_calls)) for call_args in mock_calls: @@ -374,8 +382,29 @@ class TestReaper(unittest.TestCase): self.assertEqual(kwargs['headers'] ['X-Backend-Storage-Policy-Index'], policy.idx) + self.assertEqual(kwargs['headers'] + ['X-Timestamp'], + '1429117638.86767') + # verify calls to direct_delete_container self.assertEquals(mocks['direct_delete_container'].call_count, 3) + for i, call_args in enumerate( + mocks['direct_delete_container'].call_args_list): + anode = acc_nodes[i % len(acc_nodes)] + host = '%(ip)s:%(port)s' % anode + device = anode['device'] + headers = { + 'X-Account-Host': host, + 'X-Account-Partition': 'partition', + 'X-Account-Device': device, + 'X-Account-Override-Deleted': 'yes', + 'X-Timestamp': '1429117639.67676' + } + ring = r.get_object_ring(policy.idx) + expected = call(dict(ring.devs[i], index=i), 0, 'a', 'c', + headers=headers, conn_timeout=0.5, + response_timeout=10) + self.assertEqual(call_args, expected) self.assertEqual(r.stats_objects_deleted, policy.object_ring.replicas) def test_reap_container_get_object_fail(self): diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py index d41a7c9672..6f7660cdf3 100644 --- a/test/unit/common/test_direct_client.py +++ b/test/unit/common/test_direct_client.py @@ -341,6 +341,19 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.method, 'DELETE') self.assertEqual(conn.path, self.container_path) + def test_direct_delete_container_with_timestamp(self): + # ensure timestamp is different from any that might be auto-generated + timestamp = Timestamp(time.time() - 100) + headers = {'X-Timestamp': timestamp.internal} + with mocked_http_conn(200) as conn: + direct_client.direct_delete_container( + self.node, self.part, self.account, self.container, + headers=headers) + self.assertEqual(conn.method, 'DELETE') + self.assertEqual(conn.path, self.container_path) + self.assertTrue('X-Timestamp' in conn.req_headers) + self.assertEqual(timestamp, conn.req_headers['X-Timestamp']) + def test_direct_delete_container_error(self): with mocked_http_conn(500) as conn: try: @@ -536,6 +549,19 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.path, self.obj_path) self.assertEqual(resp, None) + def test_direct_delete_object_with_timestamp(self): + # ensure timestamp is different from any that might be auto-generated + timestamp = Timestamp(time.time() - 100) + headers = {'X-Timestamp': timestamp.internal} + with mocked_http_conn(200) as conn: + direct_client.direct_delete_object( + self.node, self.part, self.account, self.container, self.obj, + headers=headers) + self.assertEqual(conn.method, 'DELETE') + self.assertEqual(conn.path, self.obj_path) + self.assertTrue('X-Timestamp' in conn.req_headers) + self.assertEqual(timestamp, conn.req_headers['X-Timestamp']) + def test_direct_delete_object_error(self): with mocked_http_conn(503) as conn: try: From e4d326b5a7dd186d762726faa45733ff2900343d Mon Sep 17 00:00:00 2001 From: Kazuhiro MIYAHARA Date: Thu, 19 Feb 2015 17:38:10 +0900 Subject: [PATCH 03/98] Fix conflict SLO reponse This patch fixes Swift to respond "409 Conflict" when a segment object path of the manifest on PUT SLO is same as requested object path. It is because the request will overwrite the segment and then it will absolutely cause "409 Conflict" on GET SLO. e.g.: request: PUT "http://hostname/v1/AUTH_account/container/segment_object_00?multipart-manifest=put" manifest file: [{"path" : "container/segment_object_00", "etag" : "", "size_bytes" : }, {"path" : "container/segment_object_01", "etag" : "", "size_bytes" : }, {"path" : "container/segment_object_02", "etag" : "", "size_bytes" : }] Change-Id: I4f4f7b9dbeb6a7c355b801c7e0ae560aa19a70b4 Closes-Bug: 1417936 --- AUTHORS | 1 + swift/common/middleware/slo.py | 5 +++ test/unit/common/middleware/test_slo.py | 52 ++++++++++++++++++++++++- 3 files changed, 57 insertions(+), 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index be3c5deeb9..fa2cee7458 100644 --- a/AUTHORS +++ b/AUTHORS @@ -223,3 +223,4 @@ Hua Zhang (zhuadl@cn.ibm.com) Jian Zhang (jian.zhang@intel.com) Ning Zhang (ning@zmanda.com) Yuan Zhou (yuan.zhou@intel.com) +Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index e8f1707e28..d8df829981 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -586,6 +586,11 @@ class StaticLargeObject(object): if isinstance(obj_name, unicode): obj_name = obj_name.encode('utf-8') obj_path = '/'.join(['', vrs, account, obj_name.lstrip('/')]) + if req.path == quote(obj_path): + raise HTTPConflict( + 'Manifest object name "%s" ' + 'cannot be included in the manifest' + % obj_name) try: seg_size = int(seg_dict['size_bytes']) except (ValueError, TypeError): diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 4160d91d46..d70a25ccc4 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -24,7 +24,7 @@ from swift.common import swob, utils from swift.common.exceptions import ListingIterError, SegmentError from swift.common.middleware import slo from swift.common.swob import Request, Response, HTTPException -from swift.common.utils import json +from swift.common.utils import quote, json from test.unit.common.middleware.helpers import FakeSwift @@ -139,6 +139,11 @@ class TestSloPutManifest(SloTestCase): swob.HTTPOk, {'Content-Length': '100', 'Etag': 'etagoftheobjectsegment'}, None) + self.app.register( + 'HEAD', '/v1/AUTH_test/cont/object2', + swob.HTTPOk, + {'Content-Length': '100', 'Etag': 'etagoftheobjectsegment'}, + None) self.app.register( 'HEAD', '/v1/AUTH_test/cont/object\xe2\x99\xa1', swob.HTTPOk, @@ -149,6 +154,11 @@ class TestSloPutManifest(SloTestCase): swob.HTTPOk, {'Content-Length': '10', 'Etag': 'etagoftheobjectsegment'}, None) + self.app.register( + 'HEAD', u'/v1/AUTH_test/cont/あ_1', + swob.HTTPOk, + {'Content-Length': '1', 'Etag': 'a'}, + None) self.app.register( 'PUT', '/v1/AUTH_test/c/man', swob.HTTPCreated, {}, None) self.app.register( @@ -391,6 +401,46 @@ class TestSloPutManifest(SloTestCase): self.assertEquals(errors[4][0], '/checktest/slob') self.assertEquals(errors[4][1], 'Etag Mismatch') + def test_handle_multipart_put_manifest_equal_slo(self): + test_json_data = json.dumps([{'path': '/cont/object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': 100}]) + req = Request.blank( + '/v1/AUTH_test/cont/object?multipart-manifest=put', + environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'}, + body=test_json_data) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '409 Conflict') + self.assertEqual(self.app.call_count, 0) + + def test_handle_multipart_put_manifest_equal_slo_non_ascii(self): + test_json_data = json.dumps([{'path': u'/cont/あ_1', + 'etag': 'a', + 'size_bytes': 1}]) + path = quote(u'/v1/AUTH_test/cont/あ_1') + req = Request.blank( + path + '?multipart-manifest=put', + environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'}, + body=test_json_data) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '409 Conflict') + self.assertEqual(self.app.call_count, 0) + + def test_handle_multipart_put_manifest_equal_last_segment(self): + test_json_data = json.dumps([{'path': '/cont/object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': 100}, + {'path': '/cont/object2', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': 100}]) + req = Request.blank( + '/v1/AUTH_test/cont/object2?multipart-manifest=put', + environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'}, + body=test_json_data) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '409 Conflict') + self.assertEqual(self.app.call_count, 1) + class TestSloDeleteManifest(SloTestCase): From a2a5b6aa6664b61fa31d25f329426ff089372f17 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Thu, 16 Apr 2015 11:42:12 -0700 Subject: [PATCH 04/98] Functional test for SLO PUT overwriting one of its own segments Change-Id: I4855816848f4fdb148d0b82735cf79bc68429617 --- test/functional/tests.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/test/functional/tests.py b/test/functional/tests.py index 95f168e6e8..3fbbdd784e 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -2259,6 +2259,26 @@ class TestSlo(Base): else: self.fail("Expected ResponseError but didn't get it") + def test_slo_overwrite_segment_with_manifest(self): + file_item = self.env.container.file("seg_b") + try: + file_item.write( + json.dumps([ + {'size_bytes': 1024 * 1024, + 'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(), + 'path': '/%s/%s' % (self.env.container.name, 'seg_a')}, + {'size_bytes': 1024 * 1024, + 'etag': hashlib.md5('b' * 1024 * 1024).hexdigest(), + 'path': '/%s/%s' % (self.env.container.name, 'seg_b')}, + {'size_bytes': 1024 * 1024, + 'etag': hashlib.md5('c' * 1024 * 1024).hexdigest(), + 'path': '/%s/%s' % (self.env.container.name, 'seg_c')}]), + parms={'multipart-manifest': 'put'}) + except ResponseError as err: + self.assertEqual(409, err.status) + else: + self.fail("Expected ResponseError but didn't get it") + def test_slo_copy(self): file_item = self.env.container.file("manifest-abcde") file_item.copy(self.env.container.name, "copied-abcde") From f6482bdece27144ee083a53d696469528d7940c2 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Thu, 16 Apr 2015 22:08:47 +0200 Subject: [PATCH 05/98] Set default branch to stable/kilo Open stable/kilo branch by setting defaultbranch for git-review. Change-Id: I81bcda30f99173416eaaa3f1d42da32f3ab5b6d2 --- .gitreview | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitreview b/.gitreview index d7c52c0593..94552c9bcb 100644 --- a/.gitreview +++ b/.gitreview @@ -2,3 +2,4 @@ host=review.openstack.org port=29418 project=openstack/swift.git +defaultbranch=stable/kilo From 2203b46e3f14ef68a090aaea284f0a0442bbb86f Mon Sep 17 00:00:00 2001 From: Tushar Gohad Date: Wed, 15 Apr 2015 17:34:48 -0700 Subject: [PATCH 06/98] Bump PyECLib version from 1.0.3 to 1.0.7 In addition to fixing several bugs, 1.0.7 eliminates the need for a few work-around code in Swift. This code was only to hide issues in the current version, but it also ends up breaking some third-party integration. In order to enable expected functionality and to avoid dealing with deprecation issues right from the beginning, we need to bump the minium PyECLib requirement to 1.0.7. Change-Id: I03e059e7335656c22be28ffd6157b56e13bdfc1b --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 27d507901a..9f81b844ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,4 +9,4 @@ netifaces>=0.5,!=0.10.0,!=0.10.1 pastedeploy>=1.3.3 simplejson>=2.0.9 xattr>=0.4 -PyECLib>=1.0.3 +PyECLib>=1.0.7 From c5c281ba6dd97a301ba80511e3356d6ca536d701 Mon Sep 17 00:00:00 2001 From: Minwoo B Date: Mon, 20 Apr 2015 17:03:25 -0500 Subject: [PATCH 07/98] Included step in development_saio.rst for installing dependencies in requirements.txt. Change-Id: I6ed1704148e5ae1e3164d10080c350d81856f7a9 --- doc/source/development_saio.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index 3bd94872dd..49ef6eede0 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -176,7 +176,7 @@ Getting the code #. Build a development installation of swift:: - cd $HOME/swift; sudo python setup.py develop; cd - + cd $HOME/swift; sudo pip install -r requirements.txt; sudo python setup.py develop; cd - Fedora 19 or later users might have to perform the following if development installation of swift fails:: From 0c391d6daffe1943f6def803db42e08e6d6846d2 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Mon, 20 Apr 2015 16:47:10 -0700 Subject: [PATCH 08/98] SAIO instructions: ensure ~/bin exists before copying into it Change-Id: I16cd211b00b529ccc4b46f6b10497c32b6741896 --- doc/source/development_saio.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index 3bd94872dd..dc952df3ab 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -409,6 +409,7 @@ Setting up scripts for running Swift #. Copy the SAIO scripts for resetting the environment:: + mkdir -p $HOME/bin cd $HOME/swift/doc; cp saio/bin/* $HOME/bin; cd - chmod +x $HOME/bin/* From 27f6fba5c3b9e0461d52c1deffe32130e7e9af51 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Mon, 20 Apr 2015 00:18:25 -0700 Subject: [PATCH 09/98] Use reconstruct insetad of decode/encode With bumping PyECLib up to 1.0.7 on global requirements, we can use the "reconstruct" function directly instead of the current hack doing decode/encode on reconstructor. That is because the hack was for treating PyECLib < 1.0.7 (strictly jearsure scheme) reconstruction bug so we don't have to do decode/encode anymore. Co-Authored-By: Clay Gerrard Change-Id: I69aae495670e3d0bdebe665f73915547a4d56f99 --- swift/obj/reconstructor.py | 10 ++------ test/unit/obj/test_reconstructor.py | 39 +++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index db078de2fc..4385e42cc9 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -276,14 +276,8 @@ class ObjectReconstructor(Daemon): rebuilt_fragment_iter) def _reconstruct(self, policy, fragment_payload, frag_index): - # XXX with jerasure this doesn't work if we need to rebuild a - # parity fragment, and not all data fragments are available - # segment = policy.pyeclib_driver.reconstruct( - # fragment_payload, [frag_index])[0] - - # for safety until pyeclib 1.0.7 we'll just use decode and encode - segment = policy.pyeclib_driver.decode(fragment_payload) - return policy.pyeclib_driver.encode(segment)[frag_index] + return policy.pyeclib_driver.reconstruct(fragment_payload, + [frag_index])[0] def make_rebuilt_fragment_iter(self, responses, path, policy, frag_index): """ diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index b7254f4343..23e70543f7 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -2347,6 +2347,45 @@ class TestObjectReconstructor(unittest.TestCase): self.assertEqual(md5(fixed_body).hexdigest(), md5(broken_body).hexdigest()) + def test_reconstruct_parity_fa_with_data_node_failure(self): + job = { + 'partition': 0, + 'policy': self.policy, + } + part_nodes = self.policy.object_ring.get_part_nodes(0) + node = part_nodes[-4] + metadata = { + 'name': '/a/c/o', + 'Content-Length': 0, + 'ETag': 'etag', + } + + # make up some data (trim some amount to make it unaligned with + # segment size) + test_data = ('rebuild' * self.policy.ec_segment_size)[:-454] + etag = md5(test_data).hexdigest() + ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data) + + # the scheme is 10+4, so this gets a parity node + broken_body = ec_archive_bodies.pop(-4) + + base_responses = list((200, body) for body in ec_archive_bodies) + for error in (Timeout(), 404, Exception('kaboom!')): + responses = list(base_responses) + # grab a data node index + error_index = random.randint(0, self.policy.ec_ndata - 1) + responses[error_index] = (error, '') + headers = {'X-Object-Sysmeta-Ec-Etag': etag} + codes, body_iter = zip(*responses) + with mocked_http_conn(*codes, body_iter=body_iter, + headers=headers): + df = self.reconstructor.reconstruct_fa( + job, node, dict(metadata)) + fixed_body = ''.join(df.reader()) + self.assertEqual(len(fixed_body), len(broken_body)) + self.assertEqual(md5(fixed_body).hexdigest(), + md5(broken_body).hexdigest()) + def test_reconstruct_fa_errors_fails(self): job = { 'partition': 0, From 2080f7dbd897d6130542dbf88e37641a41625eb5 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Thu, 26 Feb 2015 15:16:22 +0000 Subject: [PATCH 10/98] Fix tempauth acl checks when simplejson has no speedups As documented in linked bug report, tempauth unit tests were seen to fail on a system where simplejson was installed but without the speedups extension. This is because the tempauth account acl validation checks that values are type str, but without the speedups extension the json parser is returning unicode objects. Fix is to have the acl validator tolerate those objects being unicode or str. Also change common/bufferedhttp.py to coerce ring device to type str when constructing a path, in order to avoid a UnicodeDecodeError when httplib sends a message that has non-ascii header values. Change-Id: I01524282cbaa25dc4b6dfa09f3f4723516cdba99 Closes-Bug: 1425776 --- swift/common/bufferedhttp.py | 5 ++ swift/common/middleware/tempauth.py | 8 +-- test/unit/common/middleware/test_tempauth.py | 30 +++++++--- test/unit/common/test_bufferedhttp.py | 60 ++++++++++++++------ 4 files changed, 73 insertions(+), 30 deletions(-) diff --git a/swift/common/bufferedhttp.py b/swift/common/bufferedhttp.py index d4a977c21e..2b3ec1609d 100644 --- a/swift/common/bufferedhttp.py +++ b/swift/common/bufferedhttp.py @@ -155,6 +155,11 @@ def http_connect(ipaddr, port, device, partition, method, path, path = path.encode("utf-8") except UnicodeError as e: logging.exception(_('Error encoding to UTF-8: %s'), str(e)) + if isinstance(device, unicode): + try: + device = device.encode("utf-8") + except UnicodeError as e: + logging.exception(_('Error encoding to UTF-8: %s'), str(e)) path = quote('/' + device + '/' + str(partition) + path) return http_connect_raw( ipaddr, port, method, path, headers, query_string, ssl) diff --git a/swift/common/middleware/tempauth.py b/swift/common/middleware/tempauth.py index 93f55ff031..dfde519f42 100644 --- a/swift/common/middleware/tempauth.py +++ b/swift/common/middleware/tempauth.py @@ -447,16 +447,16 @@ class TempAuth(object): # on ACLs, TempAuth is not such an auth system. At this point, # it thinks it is authoritative. if key not in tempauth_acl_keys: - return 'Key %r not recognized' % key + return "Key '%s' not recognized" % key for key in tempauth_acl_keys: if key not in result: continue if not isinstance(result[key], list): - return 'Value for key %r must be a list' % key + return "Value for key '%s' must be a list" % key for grantee in result[key]: - if not isinstance(grantee, str): - return 'Elements of %r list must be strings' % key + if not isinstance(grantee, basestring): + return "Elements of '%s' list must be strings" % key # Everything looks fine, no errors found internal_hdr = get_sys_meta_prefix('account') + 'core-access-control' diff --git a/test/unit/common/middleware/test_tempauth.py b/test/unit/common/middleware/test_tempauth.py index b9be84bb92..e8af310c82 100644 --- a/test/unit/common/middleware/test_tempauth.py +++ b/test/unit/common/middleware/test_tempauth.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2011-2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import unittest from contextlib import contextmanager, nested from base64 import b64encode @@ -22,7 +24,7 @@ import mock from swift.common.middleware import tempauth as auth from swift.common.middleware.acl import format_acl from swift.common.swob import Request, Response -from swift.common.utils import split_path, get_swift_info +from swift.common.utils import split_path NO_CONTENT_RESP = (('204 No Content', {}, ''),) # mock server response @@ -111,10 +113,6 @@ class TestAuth(unittest.TestCase): def setUp(self): self.test_auth = auth.filter_factory({})(FakeApp()) - def test_swift_info(self): - info = get_swift_info() - self.assertTrue(info['tempauth']['account_acls']) - def _make_request(self, path, **kwargs): req = Request.blank(path, **kwargs) req.environ['swift.cache'] = FakeMemcache() @@ -1200,7 +1198,8 @@ class TestAccountAcls(unittest.TestCase): user_groups = test_auth._get_user_groups('admin', 'admin:user', 'AUTH_admin') good_headers = {'X-Auth-Token': 'AUTH_t'} - good_acl = '{"read-only":["a","b"]}' + good_acl = json.dumps({"read-only": [u"á", "b"]}) + bad_list_types = '{"read-only": ["a", 99]}' bad_acl = 'syntactically invalid acl -- this does not parse as JSON' wrong_acl = '{"other-auth-system":["valid","json","but","wrong"]}' bad_value_acl = '{"read-write":["fine"],"admin":"should be a list"}' @@ -1220,7 +1219,9 @@ class TestAccountAcls(unittest.TestCase): req = self._make_request(target, user_groups=user_groups, headers=dict(good_headers, **update)) resp = req.get_response(test_auth) - self.assertEquals(resp.status_int, 204) + self.assertEquals(resp.status_int, 204, + 'Expected 204, got %s, response body: %s' + % (resp.status_int, resp.body)) # syntactically valid empty acls should go through for acl in empty_acls: @@ -1243,14 +1244,25 @@ class TestAccountAcls(unittest.TestCase): req = self._make_request(target, headers=dict(good_headers, **update)) resp = req.get_response(test_auth) self.assertEquals(resp.status_int, 400) - self.assertEquals(errmsg % "Key '", resp.body[:39]) + self.assertTrue(resp.body.startswith( + errmsg % "Key 'other-auth-system' not recognized"), resp.body) # acls with good keys but bad values also get a 400 update = {'x-account-access-control': bad_value_acl} req = self._make_request(target, headers=dict(good_headers, **update)) resp = req.get_response(test_auth) self.assertEquals(resp.status_int, 400) - self.assertEquals(errmsg % "Value", resp.body[:39]) + self.assertTrue(resp.body.startswith( + errmsg % "Value for key 'admin' must be a list"), resp.body) + + # acls with non-string-types in list also get a 400 + update = {'x-account-access-control': bad_list_types} + req = self._make_request(target, headers=dict(good_headers, **update)) + resp = req.get_response(test_auth) + self.assertEquals(resp.status_int, 400) + self.assertTrue(resp.body.startswith( + errmsg % "Elements of 'read-only' list must be strings"), + resp.body) # acls with wrong json structure also get a 400 update = {'x-account-access-control': not_dict_acl} diff --git a/test/unit/common/test_bufferedhttp.py b/test/unit/common/test_bufferedhttp.py index a663a3d121..6e51973147 100644 --- a/test/unit/common/test_bufferedhttp.py +++ b/test/unit/common/test_bufferedhttp.py @@ -1,4 +1,5 @@ -# Copyright (c) 2010-2012 OpenStack Foundation +# -*- coding: utf-8 -*- +# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +13,7 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. +import mock import unittest @@ -22,6 +24,24 @@ from eventlet import spawn, Timeout, listen from swift.common import bufferedhttp +class MockHTTPSConnection(object): + + def __init__(self, hostport): + pass + + def putrequest(self, method, path, skip_host=0): + self.path = path + pass + + def putheader(self, header, *values): + # Verify that path and values can be safely joined + # Essentially what Python 2.7 does that caused us problems. + '\r\n\t'.join((self.path,) + values) + + def endheaders(self): + pass + + class TestBufferedHTTP(unittest.TestCase): def test_http_connect(self): @@ -76,22 +96,6 @@ class TestBufferedHTTP(unittest.TestCase): raise Exception(err) def test_nonstr_header_values(self): - - class MockHTTPSConnection(object): - - def __init__(self, hostport): - pass - - def putrequest(self, method, path, skip_host=0): - pass - - def putheader(self, header, *values): - # Essentially what Python 2.7 does that caused us problems. - '\r\n\t'.join(values) - - def endheaders(self): - pass - origHTTPSConnection = bufferedhttp.HTTPSConnection bufferedhttp.HTTPSConnection = MockHTTPSConnection try: @@ -106,6 +110,28 @@ class TestBufferedHTTP(unittest.TestCase): finally: bufferedhttp.HTTPSConnection = origHTTPSConnection + def test_unicode_values(self): + # simplejson may decode the ring devices as str or unicode + # depending on whether speedups is installed and/or the values are + # non-ascii. Verify all types are tolerated in combination with + # whatever type path might be and possible encoded non-ascii in + # a header value. + with mock.patch('swift.common.bufferedhttp.HTTPSConnection', + MockHTTPSConnection): + for dev in ('sda', u'sda', u'sdá', u'sdá'.encode('utf-8')): + for path in ( + '/v1/a', u'/v1/a', u'/v1/á', u'/v1/á'.encode('utf-8')): + for header in ('abc', u'abc', u'ábc'.encode('utf-8')): + try: + bufferedhttp.http_connect( + '127.0.0.1', 8080, dev, 1, 'GET', path, + headers={'X-Container-Meta-Whatever': header}, + ssl=True) + except Exception as e: + self.fail( + 'Exception %r for device=%r path=%r header=%r' + % (e, dev, path, header)) + if __name__ == '__main__': unittest.main() From 51e31c5c7147f3ba61437e132b12b491ca718ce4 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Wed, 15 Apr 2015 15:31:06 -0700 Subject: [PATCH 11/98] Don't apply the wrong Etag validation to rebuilt fragments Because of the object-server's interaction with ssync sender's X-Backend-Replication-Headers when a object (or fragment archive) is pushed unmodified to another node it's ETag value is duped into the recieving ends metadata as Etag. This interacts poorly with the reconstructor's RebuildingECDiskFileStream which can not know ahead of time the ETag of the fragment archive being rebuilt. Don't send the Etag from the local source fragment archive being used as the basis for the rebuilt fragent archive's metadata along to ssync. Closes-Bug: 1446800 Change-Id: Ie59ad93a67a7f439c9a84cd9cff31540f97f334a --- swift/obj/reconstructor.py | 34 +++---- test/probe/common.py | 5 + test/probe/test_reconstructor_revert.py | 120 +++++++++++++++++++++++- test/unit/obj/test_reconstructor.py | 18 +--- 4 files changed, 143 insertions(+), 34 deletions(-) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 0ee2afbf6d..db078de2fc 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -49,6 +49,21 @@ SYNC, REVERT = ('sync_only', 'sync_revert') hubs.use_hub(get_hub()) +def _get_partners(frag_index, part_nodes): + """ + Returns the left and right partners of the node whose index is + equal to the given frag_index. + + :param frag_index: a fragment index + :param part_nodes: a list of primary nodes + :returns: [, ] + """ + return [ + part_nodes[(frag_index - 1) % len(part_nodes)], + part_nodes[(frag_index + 1) % len(part_nodes)], + ] + + class RebuildingECDiskFileStream(object): """ This class wraps the the reconstructed fragment archive data and @@ -65,7 +80,8 @@ class RebuildingECDiskFileStream(object): # update the FI and delete the ETag, the obj server will # recalc on the other side... self.metadata['X-Object-Sysmeta-Ec-Frag-Index'] = frag_index - del self.metadata['ETag'] + for etag_key in ('ETag', 'Etag'): + self.metadata.pop(etag_key, None) self.frag_index = frag_index self.rebuilt_fragment_iter = rebuilt_fragment_iter @@ -382,20 +398,6 @@ class ObjectReconstructor(Daemon): self.kill_coros() self.last_reconstruction_count = self.reconstruction_count - def _get_partners(self, frag_index, part_nodes): - """ - Returns the left and right partners of the node whose index is - equal to the given frag_index. - - :param frag_index: a fragment index - :param part_nodes: a list of primary nodes - :returns: [, ] - """ - return [ - part_nodes[(frag_index - 1) % len(part_nodes)], - part_nodes[(frag_index + 1) % len(part_nodes)], - ] - def _get_hashes(self, policy, path, recalculate=None, do_listdir=False): df_mgr = self._df_router[policy] hashed, suffix_hashes = tpool_reraise( @@ -715,7 +717,7 @@ class ObjectReconstructor(Daemon): job_type=SYNC, frag_index=frag_index, suffixes=suffixes, - sync_to=self._get_partners(frag_index, part_nodes), + sync_to=_get_partners(frag_index, part_nodes), ) # ssync callback to rebuild missing fragment_archives sync_job['sync_diskfile_builder'] = self.reconstruct_fa diff --git a/test/probe/common.py b/test/probe/common.py index 1311cc178a..7d1e754014 100644 --- a/test/probe/common.py +++ b/test/probe/common.py @@ -299,6 +299,11 @@ class ProbeTest(unittest.TestCase): path_parts.append(str(part)) return os.path.join(*path_parts) + def config_number(self, node): + _server_type, config_number = get_server_number( + node['port'], self.port2server) + return config_number + def get_to_final_state(self): # these .stop()s are probably not strictly necessary, # but may prevent race conditions diff --git a/test/probe/test_reconstructor_revert.py b/test/probe/test_reconstructor_revert.py index 2a7bd7c834..39739b617d 100755 --- a/test/probe/test_reconstructor_revert.py +++ b/test/probe/test_reconstructor_revert.py @@ -18,6 +18,9 @@ from hashlib import md5 import unittest import uuid import os +import random +import shutil +from collections import defaultdict from test.probe.common import ECProbeTest @@ -25,6 +28,7 @@ from swift.common import direct_client from swift.common.storage_policy import EC_POLICY from swift.common.manager import Manager from swift.common.utils import renamer +from swift.obj import reconstructor from swiftclient import client @@ -233,7 +237,7 @@ class TestReconstructorRevert(ECProbeTest): # fire up reconstructor on handoff nodes only for hnode in hnodes: hnode_id = (hnode['port'] - 6000) / 10 - self.reconstructor.once(number=hnode_id, override_devices=['sdb8']) + self.reconstructor.once(number=hnode_id) # check the first node to make sure its gone try: @@ -253,6 +257,120 @@ class TestReconstructorRevert(ECProbeTest): self.fail('Node data on %r was not fully destoryed!' % (onodes[0])) + def test_reconstruct_from_reverted_fragment_archive(self): + headers = {'X-Storage-Policy': self.policy.name} + client.put_container(self.url, self.token, self.container_name, + headers=headers) + + # get our node lists + opart, onodes = self.object_ring.get_nodes( + self.account, self.container_name, self.object_name) + + # find a primary server that only has one of it's devices in the + # primary node list + group_nodes_by_config = defaultdict(list) + for n in onodes: + group_nodes_by_config[self.config_number(n)].append(n) + for config_number, node_list in group_nodes_by_config.items(): + if len(node_list) == 1: + break + else: + self.fail('ring balancing did not use all available nodes') + primary_node = node_list[0] + primary_device = self.device_dir('object', primary_node) + self.kill_drive(primary_device) + + # PUT object + contents = Body() + etag = client.put_object(self.url, self.token, self.container_name, + self.object_name, contents=contents) + self.assertEqual(contents.etag, etag) + + # fix the primary device and sanity GET + self.revive_drive(primary_device) + self.assertEqual(etag, self.proxy_get()) + + # find a handoff holding the fragment + for hnode in self.object_ring.get_more_nodes(opart): + try: + reverted_fragment_etag = self.direct_get(hnode, opart) + except direct_client.DirectClientException as err: + if err.http_status != 404: + raise + else: + break + else: + self.fail('Unable to find handoff fragment!') + + # we'll force the handoff device to revert instead of potentially + # racing with rebuild by deleting any other fragments that may be on + # the same server + handoff_fragment_etag = None + for node in onodes: + if node['port'] == hnode['port']: + # we'll keep track of the etag of this fragment we're removing + # in case we need it later (queue forshadowing music)... + try: + handoff_fragment_etag = self.direct_get(node, opart) + except direct_client.DirectClientException as err: + if err.http_status != 404: + raise + # this just means our handoff device was on the same + # machine as the primary! + continue + # use the primary nodes device - not the hnode device + part_dir = self.storage_dir('object', node, part=opart) + shutil.rmtree(part_dir, True) + + # revert from handoff device with reconstructor + self.reconstructor.once(number=self.config_number(hnode)) + + # verify fragment reverted to primary server + self.assertEqual(reverted_fragment_etag, + self.direct_get(primary_node, opart)) + + # now we'll remove some data on one of the primary node's partners + partner = random.choice(reconstructor._get_partners( + primary_node['index'], onodes)) + + try: + rebuilt_fragment_etag = self.direct_get(partner, opart) + except direct_client.DirectClientException as err: + if err.http_status != 404: + raise + # partner already had it's fragment removed + if (handoff_fragment_etag is not None and + hnode['port'] == partner['port']): + # oh, well that makes sense then... + rebuilt_fragment_etag = handoff_fragment_etag + else: + # I wonder what happened? + self.fail('Partner inexplicably missing fragment!') + part_dir = self.storage_dir('object', partner, part=opart) + shutil.rmtree(part_dir, True) + + # sanity, it's gone + try: + self.direct_get(partner, opart) + except direct_client.DirectClientException as err: + if err.http_status != 404: + raise + else: + self.fail('successful GET of removed partner fragment archive!?') + + # and force the primary node to do a rebuild + self.reconstructor.once(number=self.config_number(primary_node)) + + # and validate the partners rebuilt_fragment_etag + try: + self.assertEqual(rebuilt_fragment_etag, + self.direct_get(partner, opart)) + except direct_client.DirectClientException as err: + if err.http_status != 404: + raise + else: + self.fail('Did not find rebuilt fragment on partner node') + if __name__ == "__main__": unittest.main() diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index 93a50e84de..b7254f4343 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -293,22 +293,6 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): writer.commit(timestamp) return df - def debug_wtf(self): - # won't include this in the final, just handy reminder of where - # things are... - for pol in [p for p in POLICIES if p.policy_type == EC_POLICY]: - obj_ring = pol.object_ring - for part_num in self.part_nums: - print "\n part_num %s " % part_num - part_nodes = obj_ring.get_part_nodes(int(part_num)) - print "\n part_nodes %s " % part_nodes - for local_dev in obj_ring.devs: - partners = self.reconstructor._get_partners( - local_dev['id'], obj_ring, part_num) - if partners: - print "\n local_dev %s \n partners %s " % (local_dev, - partners) - def assert_expected_jobs(self, part_num, jobs): for job in jobs: del job['path'] @@ -702,7 +686,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): part_nodes = obj_ring.get_part_nodes(int(part_num)) primary_ids = [n['id'] for n in part_nodes] for node in part_nodes: - partners = self.reconstructor._get_partners( + partners = object_reconstructor._get_partners( node['index'], part_nodes) left = partners[0]['id'] right = partners[1]['id'] From 281cb1c210f0292bad190cabaae447145fa5eade Mon Sep 17 00:00:00 2001 From: Tushar Gohad Date: Wed, 15 Apr 2015 17:34:48 -0700 Subject: [PATCH 12/98] Bump PyECLib version from 1.0.3 to 1.0.7 In addition to fixing several bugs, 1.0.7 eliminates the need for a few work-around code in Swift. This code was only to hide issues in the current version, but it also ends up breaking some third-party integration. In order to enable expected functionality and to avoid dealing with deprecation issues right from the beginning, we need to bump the minium PyECLib requirement to 1.0.7. Closes-Bug: 1446727 Change-Id: I03e059e7335656c22be28ffd6157b56e13bdfc1b --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 27d507901a..9f81b844ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,4 +9,4 @@ netifaces>=0.5,!=0.10.0,!=0.10.1 pastedeploy>=1.3.3 simplejson>=2.0.9 xattr>=0.4 -PyECLib>=1.0.3 +PyECLib>=1.0.7 From cd7c58e93690fd25f5266754d0593c656dd51e2e Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Mon, 20 Apr 2015 00:18:25 -0700 Subject: [PATCH 13/98] Use reconstruct insetad of decode/encode With bumping PyECLib up to 1.0.7 on global requirements, we can use the "reconstruct" function directly instead of the current hack doing decode/encode on reconstructor. That is because the hack was for treating PyECLib < 1.0.7 (strictly jearsure scheme) reconstruction bug so we don't have to do decode/encode anymore. Closes-Bug: 1446801 Co-Authored-By: Clay Gerrard Change-Id: I69aae495670e3d0bdebe665f73915547a4d56f99 --- swift/obj/reconstructor.py | 10 ++------ test/unit/obj/test_reconstructor.py | 39 +++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index db078de2fc..4385e42cc9 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -276,14 +276,8 @@ class ObjectReconstructor(Daemon): rebuilt_fragment_iter) def _reconstruct(self, policy, fragment_payload, frag_index): - # XXX with jerasure this doesn't work if we need to rebuild a - # parity fragment, and not all data fragments are available - # segment = policy.pyeclib_driver.reconstruct( - # fragment_payload, [frag_index])[0] - - # for safety until pyeclib 1.0.7 we'll just use decode and encode - segment = policy.pyeclib_driver.decode(fragment_payload) - return policy.pyeclib_driver.encode(segment)[frag_index] + return policy.pyeclib_driver.reconstruct(fragment_payload, + [frag_index])[0] def make_rebuilt_fragment_iter(self, responses, path, policy, frag_index): """ diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index b7254f4343..23e70543f7 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -2347,6 +2347,45 @@ class TestObjectReconstructor(unittest.TestCase): self.assertEqual(md5(fixed_body).hexdigest(), md5(broken_body).hexdigest()) + def test_reconstruct_parity_fa_with_data_node_failure(self): + job = { + 'partition': 0, + 'policy': self.policy, + } + part_nodes = self.policy.object_ring.get_part_nodes(0) + node = part_nodes[-4] + metadata = { + 'name': '/a/c/o', + 'Content-Length': 0, + 'ETag': 'etag', + } + + # make up some data (trim some amount to make it unaligned with + # segment size) + test_data = ('rebuild' * self.policy.ec_segment_size)[:-454] + etag = md5(test_data).hexdigest() + ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data) + + # the scheme is 10+4, so this gets a parity node + broken_body = ec_archive_bodies.pop(-4) + + base_responses = list((200, body) for body in ec_archive_bodies) + for error in (Timeout(), 404, Exception('kaboom!')): + responses = list(base_responses) + # grab a data node index + error_index = random.randint(0, self.policy.ec_ndata - 1) + responses[error_index] = (error, '') + headers = {'X-Object-Sysmeta-Ec-Etag': etag} + codes, body_iter = zip(*responses) + with mocked_http_conn(*codes, body_iter=body_iter, + headers=headers): + df = self.reconstructor.reconstruct_fa( + job, node, dict(metadata)) + fixed_body = ''.join(df.reader()) + self.assertEqual(len(fixed_body), len(broken_body)) + self.assertEqual(md5(fixed_body).hexdigest(), + md5(broken_body).hexdigest()) + def test_reconstruct_fa_errors_fails(self): job = { 'partition': 0, From f8dee761bd36f857aa1288c27e095907032fad68 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Mon, 20 Apr 2015 11:15:35 +0200 Subject: [PATCH 14/98] Release Import of Translations from Transifex Manual import of Translations from Transifex. This change also removes all po files that are less than 66 per cent translated since such partially translated files will not help users. This updates also recreates all pot (translation source files) to reflect the state of the repository. This change needs to be done manually since the automatic import does not handle the proposed branches and we need to sync with latest translations. Note: This is part of importing of translations, there are no new translations for this project, thus only the pot file gets updated. Change-Id: I0cbfdae3bd1662da54c58e91a13f49419eba9b2d --- swift/locale/swift.pot | 503 ++++++++++++++---------- swift/locale/zh_CN/LC_MESSAGES/swift.po | 479 +++++++++++++--------- 2 files changed, 588 insertions(+), 394 deletions(-) diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot index f7a79f7239..4845819076 100644 --- a/swift/locale/swift.pot +++ b/swift/locale/swift.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.2.2.post136\n" +"Project-Id-Version: swift 2.3.0rc1.1.gf6482bd\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-03-24 06:06+0000\n" +"POT-Creation-Date: 2015-04-20 11:15+0200\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -63,98 +63,98 @@ msgstr "" msgid "ERROR Could not get account info %s" msgstr "" -#: swift/account/reaper.py:133 swift/common/utils.py:2127 -#: swift/obj/diskfile.py:470 swift/obj/updater.py:87 swift/obj/updater.py:130 +#: swift/account/reaper.py:134 swift/common/utils.py:2127 +#: swift/obj/diskfile.py:476 swift/obj/updater.py:88 swift/obj/updater.py:131 #, python-format msgid "Skipping %s as it is not mounted" msgstr "" -#: swift/account/reaper.py:137 +#: swift/account/reaper.py:138 msgid "Exception in top-level account reaper loop" msgstr "" -#: swift/account/reaper.py:140 +#: swift/account/reaper.py:141 #, python-format msgid "Devices pass completed: %.02fs" msgstr "" -#: swift/account/reaper.py:237 +#: swift/account/reaper.py:238 #, python-format msgid "Beginning pass on account %s" msgstr "" -#: swift/account/reaper.py:254 +#: swift/account/reaper.py:255 #, python-format msgid "Exception with containers for account %s" msgstr "" -#: swift/account/reaper.py:261 +#: swift/account/reaper.py:262 #, python-format msgid "Exception with account %s" msgstr "" -#: swift/account/reaper.py:262 +#: swift/account/reaper.py:263 #, python-format msgid "Incomplete pass on account %s" msgstr "" -#: swift/account/reaper.py:264 +#: swift/account/reaper.py:265 #, python-format msgid ", %s containers deleted" msgstr "" -#: swift/account/reaper.py:266 +#: swift/account/reaper.py:267 #, python-format msgid ", %s objects deleted" msgstr "" -#: swift/account/reaper.py:268 +#: swift/account/reaper.py:269 #, python-format msgid ", %s containers remaining" msgstr "" -#: swift/account/reaper.py:271 +#: swift/account/reaper.py:272 #, python-format msgid ", %s objects remaining" msgstr "" -#: swift/account/reaper.py:273 +#: swift/account/reaper.py:274 #, python-format msgid ", %s containers possibly remaining" msgstr "" -#: swift/account/reaper.py:276 +#: swift/account/reaper.py:277 #, python-format msgid ", %s objects possibly remaining" msgstr "" -#: swift/account/reaper.py:279 +#: swift/account/reaper.py:280 msgid ", return codes: " msgstr "" -#: swift/account/reaper.py:283 +#: swift/account/reaper.py:284 #, python-format msgid ", elapsed: %.02fs" msgstr "" -#: swift/account/reaper.py:289 +#: swift/account/reaper.py:290 #, python-format msgid "Account %s has not been reaped since %s" msgstr "" -#: swift/account/reaper.py:348 swift/account/reaper.py:396 -#: swift/account/reaper.py:463 swift/container/updater.py:306 +#: swift/account/reaper.py:349 swift/account/reaper.py:397 +#: swift/account/reaper.py:464 swift/container/updater.py:306 #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/account/reaper.py:368 +#: swift/account/reaper.py:369 #, python-format msgid "Exception with objects for container %(container)s for account %(account)s" msgstr "" #: swift/account/server.py:275 swift/container/server.py:582 -#: swift/obj/server.py:730 +#: swift/obj/server.py:910 #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "" @@ -270,90 +270,90 @@ msgstr "" msgid "Unexpected response: %s" msgstr "" -#: swift/common/manager.py:62 +#: swift/common/manager.py:63 msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:69 +#: swift/common/manager.py:70 msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:76 +#: swift/common/manager.py:77 msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:194 +#: swift/common/manager.py:195 msgid "" "\n" "user quit" msgstr "" -#: swift/common/manager.py:231 swift/common/manager.py:543 +#: swift/common/manager.py:232 swift/common/manager.py:544 #, python-format msgid "No %s running" msgstr "" -#: swift/common/manager.py:244 +#: swift/common/manager.py:245 #, python-format msgid "%s (%s) appears to have stopped" msgstr "" -#: swift/common/manager.py:254 +#: swift/common/manager.py:255 #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "" -#: swift/common/manager.py:437 +#: swift/common/manager.py:438 #, python-format msgid "Unable to locate config %sfor %s" msgstr "" -#: swift/common/manager.py:441 +#: swift/common/manager.py:442 msgid "Found configs:" msgstr "" -#: swift/common/manager.py:485 +#: swift/common/manager.py:486 #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "" -#: swift/common/manager.py:492 +#: swift/common/manager.py:493 #, python-format msgid "Removing stale pid file %s" msgstr "" -#: swift/common/manager.py:495 +#: swift/common/manager.py:496 #, python-format msgid "No permission to signal PID %d" msgstr "" -#: swift/common/manager.py:540 +#: swift/common/manager.py:541 #, python-format msgid "%s #%d not running (%s)" msgstr "" -#: swift/common/manager.py:547 swift/common/manager.py:640 -#: swift/common/manager.py:643 +#: swift/common/manager.py:548 swift/common/manager.py:641 +#: swift/common/manager.py:644 #, python-format msgid "%s running (%s - %s)" msgstr "" -#: swift/common/manager.py:646 +#: swift/common/manager.py:647 #, python-format msgid "%s already started..." msgstr "" -#: swift/common/manager.py:655 +#: swift/common/manager.py:656 #, python-format msgid "Running %s once" msgstr "" -#: swift/common/manager.py:657 +#: swift/common/manager.py:658 #, python-format msgid "Starting %s" msgstr "" -#: swift/common/manager.py:664 +#: swift/common/manager.py:665 #, python-format msgid "%s does not exist" msgstr "" @@ -373,7 +373,12 @@ msgstr "" msgid "Error limiting server %s" msgstr "" -#: swift/common/request_helpers.py:387 +#: swift/common/request_helpers.py:102 +#, python-format +msgid "No policy with index %s" +msgstr "" + +#: swift/common/request_helpers.py:395 msgid "ERROR: An error occurred while retrieving segments" msgstr "" @@ -436,51 +441,51 @@ msgstr "" msgid "Unable to find %s config section in %s" msgstr "" -#: swift/common/utils.py:2348 +#: swift/common/utils.py:2353 #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "" -#: swift/common/utils.py:2353 +#: swift/common/utils.py:2358 #, python-format msgid "No realm key for %r" msgstr "" -#: swift/common/utils.py:2357 +#: swift/common/utils.py:2362 #, python-format msgid "No cluster endpoint for %r %r" msgstr "" -#: swift/common/utils.py:2366 +#: swift/common/utils.py:2371 #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" -#: swift/common/utils.py:2370 +#: swift/common/utils.py:2375 msgid "Path required in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2373 +#: swift/common/utils.py:2378 msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2378 +#: swift/common/utils.py:2383 #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2570 +#: swift/common/utils.py:2575 msgid "Exception dumping recon cache" msgstr "" -#: swift/common/wsgi.py:175 +#: swift/common/wsgi.py:197 #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "" -#: swift/common/wsgi.py:185 +#: swift/common/wsgi.py:207 msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external " "SSL termination for a production deployment." @@ -521,27 +526,27 @@ msgstr "" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" -#: swift/common/middleware/recon.py:78 +#: swift/common/middleware/recon.py:80 msgid "Error reading recon cache file" msgstr "" -#: swift/common/middleware/recon.py:80 +#: swift/common/middleware/recon.py:82 msgid "Error parsing recon cache file" msgstr "" -#: swift/common/middleware/recon.py:82 +#: swift/common/middleware/recon.py:84 msgid "Error retrieving recon data" msgstr "" -#: swift/common/middleware/recon.py:151 +#: swift/common/middleware/recon.py:158 msgid "Error listing devices" msgstr "" -#: swift/common/middleware/recon.py:247 +#: swift/common/middleware/recon.py:254 msgid "Error reading ringfile" msgstr "" -#: swift/common/middleware/recon.py:261 +#: swift/common/middleware/recon.py:268 msgid "Error reading swift.conf" msgstr "" @@ -648,52 +653,61 @@ msgid "" "later)" msgstr "" -#: swift/container/sync.py:193 +#: swift/container/sync.py:217 +msgid "" +"Configuration option internal_client_conf_path not defined. Using default" +" configuration, See internal-client.conf-sample for options" +msgstr "" + +#: swift/container/sync.py:230 +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "" + +#: swift/container/sync.py:264 msgid "Begin container sync \"once\" mode" msgstr "" -#: swift/container/sync.py:205 +#: swift/container/sync.py:276 #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" -#: swift/container/sync.py:213 +#: swift/container/sync.py:284 #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], " "%(skip)s skipped, %(fail)s failed" msgstr "" -#: swift/container/sync.py:266 +#: swift/container/sync.py:337 #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "" -#: swift/container/sync.py:322 +#: swift/container/sync.py:393 #, python-format msgid "ERROR Syncing %s" msgstr "" -#: swift/container/sync.py:410 +#: swift/container/sync.py:476 #, python-format -msgid "" -"Unknown exception trying to GET: %(node)r %(account)r %(container)r " -"%(object)r" +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" -#: swift/container/sync.py:444 +#: swift/container/sync.py:510 #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "" -#: swift/container/sync.py:450 +#: swift/container/sync.py:516 #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" -#: swift/container/sync.py:457 swift/container/sync.py:464 +#: swift/container/sync.py:523 swift/container/sync.py:530 #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "" @@ -703,8 +717,8 @@ msgstr "" msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" -#: swift/container/updater.py:91 swift/obj/replicator.py:484 -#: swift/obj/replicator.py:570 +#: swift/container/updater.py:91 swift/obj/reconstructor.py:788 +#: swift/obj/replicator.py:487 swift/obj/replicator.py:575 #, python-format msgid "%s is not mounted" msgstr "" @@ -816,42 +830,57 @@ msgstr "" msgid "ERROR auditing: %s" msgstr "" -#: swift/obj/diskfile.py:318 +#: swift/obj/diskfile.py:323 swift/obj/diskfile.py:2305 #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:409 +#: swift/obj/diskfile.py:414 swift/obj/diskfile.py:2373 msgid "Error hashing suffix" msgstr "" -#: swift/obj/diskfile.py:484 swift/obj/updater.py:169 +#: swift/obj/diskfile.py:486 swift/obj/updater.py:162 #, python-format -msgid "Directory %s does not map to a valid policy" +msgid "Directory %r does not map to a valid policy (%s)" msgstr "" -#: swift/obj/diskfile.py:678 +#: swift/obj/diskfile.py:737 #, python-format msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:869 +#: swift/obj/diskfile.py:936 swift/obj/diskfile.py:1795 #, python-format msgid "Problem cleaning up %s" msgstr "" -#: swift/obj/diskfile.py:1168 +#: swift/obj/diskfile.py:1253 #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" -#: swift/obj/diskfile.py:1449 +#: swift/obj/diskfile.py:1543 #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata " "%(meta)s" msgstr "" +#: swift/obj/diskfile.py:1797 +#, python-format +msgid "Problem fsyncing durable state file: %s" +msgstr "" + +#: swift/obj/diskfile.py:1802 +#, python-format +msgid "No space left on device for %s" +msgstr "" + +#: swift/obj/diskfile.py:1806 +#, python-format +msgid "Problem writing durable state file: %s" +msgstr "" + #: swift/obj/expirer.py:79 #, python-format msgid "Pass completed in %ds; %d objects expired" @@ -881,326 +910,394 @@ msgstr "" msgid "Exception while deleting object %s %s %s" msgstr "" -#: swift/obj/mem_server.py:87 +#: swift/obj/reconstructor.py:189 swift/obj/reconstructor.py:472 +#, python-format +msgid "Invalid response %(resp)s from %(full_path)s" +msgstr "" + +#: swift/obj/reconstructor.py:195 +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "" + +#: swift/obj/reconstructor.py:301 +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" + +#: swift/obj/reconstructor.py:324 #, python-format msgid "" -"ERROR Container update failed: %(status)d response from " -"%(ip)s:%(port)s/%(dev)s" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed" +" in %(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/mem_server.py:93 -#, python-format -msgid "ERROR container update failed with %(ip)s:%(port)s/%(dev)s" -msgstr "" - -#: swift/obj/replicator.py:138 -#, python-format -msgid "Killing long-running rsync: %s" -msgstr "" - -#: swift/obj/replicator.py:152 -#, python-format -msgid "Bad rsync return code: %(ret)d <- %(args)s" -msgstr "" - -#: swift/obj/replicator.py:159 swift/obj/replicator.py:163 -#, python-format -msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" -msgstr "" - -#: swift/obj/replicator.py:278 -#, python-format -msgid "Removing %s objects" -msgstr "" - -#: swift/obj/replicator.py:286 -msgid "Error syncing handoff partition" -msgstr "" - -#: swift/obj/replicator.py:292 -#, python-format -msgid "Removing partition: %s" -msgstr "" - -#: swift/obj/replicator.py:347 -#, python-format -msgid "%(ip)s/%(device)s responded as unmounted" -msgstr "" - -#: swift/obj/replicator.py:352 -#, python-format -msgid "Invalid response %(resp)s from %(ip)s" -msgstr "" - -#: swift/obj/replicator.py:387 -#, python-format -msgid "Error syncing with node: %s" -msgstr "" - -#: swift/obj/replicator.py:391 -msgid "Error syncing partition" -msgstr "" - -#: swift/obj/replicator.py:404 -#, python-format -msgid "" -"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " -"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" -msgstr "" - -#: swift/obj/replicator.py:415 +#: swift/obj/reconstructor.py:337 swift/obj/replicator.py:419 #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " "synced" msgstr "" -#: swift/obj/replicator.py:422 +#: swift/obj/reconstructor.py:344 swift/obj/replicator.py:426 #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" -#: swift/obj/replicator.py:430 +#: swift/obj/reconstructor.py:352 +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "" + +#: swift/obj/reconstructor.py:381 swift/obj/replicator.py:463 +msgid "Lockup detected.. killing live coros." +msgstr "" + +#: swift/obj/reconstructor.py:442 +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "" + +#: swift/obj/reconstructor.py:467 +#, python-format +msgid "%s responded as unmounted" +msgstr "" + +#: swift/obj/reconstructor.py:849 swift/obj/replicator.py:295 +#, python-format +msgid "Removing partition: %s" +msgstr "" + +#: swift/obj/reconstructor.py:865 +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "" + +#: swift/obj/reconstructor.py:884 +msgid "Exception in top-levelreconstruction loop" +msgstr "" + +#: swift/obj/reconstructor.py:894 +msgid "Running object reconstructor in script mode." +msgstr "" + +#: swift/obj/reconstructor.py:903 +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "" + +#: swift/obj/reconstructor.py:910 +msgid "Starting object reconstructor in daemon mode." +msgstr "" + +#: swift/obj/reconstructor.py:914 +msgid "Starting object reconstruction pass." +msgstr "" + +#: swift/obj/reconstructor.py:919 +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "" + +#: swift/obj/replicator.py:139 +#, python-format +msgid "Killing long-running rsync: %s" +msgstr "" + +#: swift/obj/replicator.py:153 +#, python-format +msgid "Bad rsync return code: %(ret)d <- %(args)s" +msgstr "" + +#: swift/obj/replicator.py:160 swift/obj/replicator.py:164 +#, python-format +msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" +msgstr "" + +#: swift/obj/replicator.py:281 +#, python-format +msgid "Removing %s objects" +msgstr "" + +#: swift/obj/replicator.py:289 +msgid "Error syncing handoff partition" +msgstr "" + +#: swift/obj/replicator.py:351 +#, python-format +msgid "%(ip)s/%(device)s responded as unmounted" +msgstr "" + +#: swift/obj/replicator.py:356 +#, python-format +msgid "Invalid response %(resp)s from %(ip)s" +msgstr "" + +#: swift/obj/replicator.py:391 +#, python-format +msgid "Error syncing with node: %s" +msgstr "" + +#: swift/obj/replicator.py:395 +msgid "Error syncing partition" +msgstr "" + +#: swift/obj/replicator.py:408 +#, python-format +msgid "" +"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " +"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" + +#: swift/obj/replicator.py:434 #, python-format msgid "Nothing replicated for %s seconds." msgstr "" -#: swift/obj/replicator.py:459 -msgid "Lockup detected.. killing live coros." -msgstr "" - -#: swift/obj/replicator.py:573 +#: swift/obj/replicator.py:578 msgid "Ring change detected. Aborting current replication pass." msgstr "" -#: swift/obj/replicator.py:594 +#: swift/obj/replicator.py:599 msgid "Exception in top-level replication loop" msgstr "" -#: swift/obj/replicator.py:603 +#: swift/obj/replicator.py:608 msgid "Running object replicator in script mode." msgstr "" -#: swift/obj/replicator.py:621 +#: swift/obj/replicator.py:626 #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" -#: swift/obj/replicator.py:628 +#: swift/obj/replicator.py:633 msgid "Starting object replicator in daemon mode." msgstr "" -#: swift/obj/replicator.py:632 +#: swift/obj/replicator.py:637 msgid "Starting object replication pass." msgstr "" -#: swift/obj/replicator.py:637 +#: swift/obj/replicator.py:642 #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "" -#: swift/obj/server.py:202 +#: swift/obj/server.py:231 #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d" " response from %(ip)s:%(port)s/%(dev)s" msgstr "" -#: swift/obj/server.py:209 +#: swift/obj/server.py:238 #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for " "async update later)" msgstr "" -#: swift/obj/server.py:244 +#: swift/obj/server.py:273 #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" -#: swift/obj/updater.py:62 +#: swift/obj/updater.py:63 #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "" -#: swift/obj/updater.py:77 +#: swift/obj/updater.py:78 msgid "Begin object update sweep" msgstr "" -#: swift/obj/updater.py:103 +#: swift/obj/updater.py:104 #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s" " successes, %(fail)s failures" msgstr "" -#: swift/obj/updater.py:112 +#: swift/obj/updater.py:113 #, python-format msgid "Object update sweep completed: %.02fs" msgstr "" -#: swift/obj/updater.py:121 +#: swift/obj/updater.py:122 msgid "Begin object update single threaded sweep" msgstr "" -#: swift/obj/updater.py:135 +#: swift/obj/updater.py:136 #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures" msgstr "" -#: swift/obj/updater.py:187 +#: swift/obj/updater.py:179 #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" -#: swift/obj/updater.py:217 +#: swift/obj/updater.py:209 #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "" -#: swift/obj/updater.py:282 +#: swift/obj/updater.py:274 #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/proxy/server.py:380 +#: swift/proxy/server.py:405 msgid "ERROR Unhandled exception in request" msgstr "" -#: swift/proxy/server.py:435 +#: swift/proxy/server.py:460 #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "" -#: swift/proxy/server.py:452 swift/proxy/server.py:470 +#: swift/proxy/server.py:477 swift/proxy/server.py:495 #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "" -#: swift/proxy/server.py:540 +#: swift/proxy/server.py:571 #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" -#: swift/proxy/controllers/account.py:63 +#: swift/proxy/controllers/account.py:64 msgid "Account" msgstr "" -#: swift/proxy/controllers/base.py:698 swift/proxy/controllers/base.py:731 -#: swift/proxy/controllers/obj.py:192 swift/proxy/controllers/obj.py:319 -#: swift/proxy/controllers/obj.py:366 swift/proxy/controllers/obj.py:382 -#: swift/proxy/controllers/obj.py:593 +#: swift/proxy/controllers/base.py:752 swift/proxy/controllers/base.py:814 +#: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:411 +#: swift/proxy/controllers/obj.py:427 swift/proxy/controllers/obj.py:643 +#: swift/proxy/controllers/obj.py:1130 swift/proxy/controllers/obj.py:1591 +#: swift/proxy/controllers/obj.py:1763 swift/proxy/controllers/obj.py:1908 +#: swift/proxy/controllers/obj.py:2093 msgid "Object" msgstr "" -#: swift/proxy/controllers/base.py:699 +#: swift/proxy/controllers/base.py:753 msgid "Trying to read during GET (retrying)" msgstr "" -#: swift/proxy/controllers/base.py:732 +#: swift/proxy/controllers/base.py:815 msgid "Trying to read during GET" msgstr "" -#: swift/proxy/controllers/base.py:736 +#: swift/proxy/controllers/base.py:819 #, python-format msgid "Client did not read from proxy within %ss" msgstr "" -#: swift/proxy/controllers/base.py:741 +#: swift/proxy/controllers/base.py:824 msgid "Client disconnected on read" msgstr "" -#: swift/proxy/controllers/base.py:743 +#: swift/proxy/controllers/base.py:826 msgid "Trying to send to client" msgstr "" -#: swift/proxy/controllers/base.py:780 swift/proxy/controllers/base.py:1049 +#: swift/proxy/controllers/base.py:863 swift/proxy/controllers/base.py:1141 #, python-format msgid "Trying to %(method)s %(path)s" msgstr "" -#: swift/proxy/controllers/base.py:817 swift/proxy/controllers/base.py:1037 -#: swift/proxy/controllers/obj.py:357 swift/proxy/controllers/obj.py:402 +#: swift/proxy/controllers/base.py:902 swift/proxy/controllers/base.py:1129 +#: swift/proxy/controllers/obj.py:402 swift/proxy/controllers/obj.py:450 +#: swift/proxy/controllers/obj.py:1900 swift/proxy/controllers/obj.py:2138 msgid "ERROR Insufficient Storage" msgstr "" -#: swift/proxy/controllers/base.py:820 +#: swift/proxy/controllers/base.py:905 #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "" -#: swift/proxy/controllers/base.py:1040 +#: swift/proxy/controllers/base.py:1132 #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" -#: swift/proxy/controllers/base.py:1152 +#: swift/proxy/controllers/base.py:1260 #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "" -#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:118 +#: swift/proxy/controllers/container.py:98 swift/proxy/controllers/obj.py:161 msgid "Container" msgstr "" -#: swift/proxy/controllers/obj.py:320 +#: swift/proxy/controllers/obj.py:365 swift/proxy/controllers/obj.py:1592 #, python-format msgid "Trying to write to %s" msgstr "" -#: swift/proxy/controllers/obj.py:361 +#: swift/proxy/controllers/obj.py:406 swift/proxy/controllers/obj.py:1903 #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" -#: swift/proxy/controllers/obj.py:367 +#: swift/proxy/controllers/obj.py:412 swift/proxy/controllers/obj.py:1909 #, python-format msgid "Expect: 100-continue on %s" msgstr "" -#: swift/proxy/controllers/obj.py:383 +#: swift/proxy/controllers/obj.py:428 #, python-format msgid "Trying to get final status of PUT to %s" msgstr "" -#: swift/proxy/controllers/obj.py:406 +#: swift/proxy/controllers/obj.py:454 swift/proxy/controllers/obj.py:2143 #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "" -#: swift/proxy/controllers/obj.py:663 +#: swift/proxy/controllers/obj.py:716 #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "" -#: swift/proxy/controllers/obj.py:672 +#: swift/proxy/controllers/obj.py:725 #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" -#: swift/proxy/controllers/obj.py:755 +#: swift/proxy/controllers/obj.py:811 swift/proxy/controllers/obj.py:2048 #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "" -#: swift/proxy/controllers/obj.py:762 +#: swift/proxy/controllers/obj.py:818 swift/proxy/controllers/obj.py:2055 msgid "ERROR Exception causing client disconnect" msgstr "" -#: swift/proxy/controllers/obj.py:767 +#: swift/proxy/controllers/obj.py:823 swift/proxy/controllers/obj.py:2060 msgid "Client disconnected without sending enough data" msgstr "" -#: swift/proxy/controllers/obj.py:813 +#: swift/proxy/controllers/obj.py:869 #, python-format msgid "Object servers returned %s mismatched etags" msgstr "" -#: swift/proxy/controllers/obj.py:817 +#: swift/proxy/controllers/obj.py:873 swift/proxy/controllers/obj.py:2218 msgid "Object PUT" msgstr "" +#: swift/proxy/controllers/obj.py:2035 +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "" + +#: swift/proxy/controllers/obj.py:2094 +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "" + diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index b123396e0d..8c239cc188 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: Swift\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-03-24 06:06+0000\n" -"PO-Revision-Date: 2015-03-24 04:20+0000\n" +"POT-Creation-Date: 2015-04-20 11:15+0200\n" +"PO-Revision-Date: 2015-04-15 12:48+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (China) " "(http://www.transifex.com/projects/p/swift/language/zh_CN/)\n" @@ -65,98 +65,98 @@ msgstr "审计失败%s: %s" msgid "ERROR Could not get account info %s" msgstr "错误:无法获取账号信息%s" -#: swift/account/reaper.py:133 swift/common/utils.py:2127 -#: swift/obj/diskfile.py:470 swift/obj/updater.py:87 swift/obj/updater.py:130 +#: swift/account/reaper.py:134 swift/common/utils.py:2127 +#: swift/obj/diskfile.py:476 swift/obj/updater.py:88 swift/obj/updater.py:131 #, python-format msgid "Skipping %s as it is not mounted" msgstr "挂载失败 跳过%s" -#: swift/account/reaper.py:137 +#: swift/account/reaper.py:138 msgid "Exception in top-level account reaper loop" msgstr "异常出现在top-level账号reaper环" -#: swift/account/reaper.py:140 +#: swift/account/reaper.py:141 #, python-format msgid "Devices pass completed: %.02fs" msgstr "设备通过完成: %.02fs" -#: swift/account/reaper.py:237 +#: swift/account/reaper.py:238 #, python-format msgid "Beginning pass on account %s" msgstr "账号%s开始通过" -#: swift/account/reaper.py:254 +#: swift/account/reaper.py:255 #, python-format msgid "Exception with containers for account %s" msgstr "账号%s内容器出现异常" -#: swift/account/reaper.py:261 +#: swift/account/reaper.py:262 #, python-format msgid "Exception with account %s" msgstr "账号%s出现异常" -#: swift/account/reaper.py:262 +#: swift/account/reaper.py:263 #, python-format msgid "Incomplete pass on account %s" msgstr "账号%s未完成通过" -#: swift/account/reaper.py:264 +#: swift/account/reaper.py:265 #, python-format msgid ", %s containers deleted" msgstr ",删除容器%s" -#: swift/account/reaper.py:266 +#: swift/account/reaper.py:267 #, python-format msgid ", %s objects deleted" msgstr ",删除对象%s" -#: swift/account/reaper.py:268 +#: swift/account/reaper.py:269 #, python-format msgid ", %s containers remaining" msgstr ",剩余容器%s" -#: swift/account/reaper.py:271 +#: swift/account/reaper.py:272 #, python-format msgid ", %s objects remaining" msgstr ",剩余对象%s" -#: swift/account/reaper.py:273 +#: swift/account/reaper.py:274 #, python-format msgid ", %s containers possibly remaining" msgstr ",可能剩余容器%s" -#: swift/account/reaper.py:276 +#: swift/account/reaper.py:277 #, python-format msgid ", %s objects possibly remaining" msgstr ",可能剩余对象%s" -#: swift/account/reaper.py:279 +#: swift/account/reaper.py:280 msgid ", return codes: " msgstr ",返回代码:" -#: swift/account/reaper.py:283 +#: swift/account/reaper.py:284 #, python-format msgid ", elapsed: %.02fs" msgstr ",耗时:%.02fs" -#: swift/account/reaper.py:289 +#: swift/account/reaper.py:290 #, python-format msgid "Account %s has not been reaped since %s" msgstr "账号%s自%s起未被reaped" -#: swift/account/reaper.py:348 swift/account/reaper.py:396 -#: swift/account/reaper.py:463 swift/container/updater.py:306 +#: swift/account/reaper.py:349 swift/account/reaper.py:397 +#: swift/account/reaper.py:464 swift/container/updater.py:306 #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s出现异常" -#: swift/account/reaper.py:368 +#: swift/account/reaper.py:369 #, python-format msgid "Exception with objects for container %(container)s for account %(account)s" msgstr "账号%(account)s容器%(container)s的对象出现异常" #: swift/account/server.py:275 swift/container/server.py:582 -#: swift/obj/server.py:730 +#: swift/obj/server.py:910 #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "%(method)s %(path)s出现错误__call__ error" @@ -272,19 +272,19 @@ msgstr "尝试复制时发生错误" msgid "Unexpected response: %s" msgstr "意外响应:%s" -#: swift/common/manager.py:62 +#: swift/common/manager.py:63 msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告:无法修改文件描述限制。是否按非root运行?" -#: swift/common/manager.py:69 +#: swift/common/manager.py:70 msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告:无法修改内存极限,是否按非root运行?" -#: swift/common/manager.py:76 +#: swift/common/manager.py:77 msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告:无法修改最大运行极限,是否按非root运行?" -#: swift/common/manager.py:194 +#: swift/common/manager.py:195 msgid "" "\n" "user quit" @@ -292,72 +292,72 @@ msgstr "" "\n" "用户退出" -#: swift/common/manager.py:231 swift/common/manager.py:543 +#: swift/common/manager.py:232 swift/common/manager.py:544 #, python-format msgid "No %s running" msgstr "无%s账号运行" -#: swift/common/manager.py:244 +#: swift/common/manager.py:245 #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s)显示已停止" -#: swift/common/manager.py:254 +#: swift/common/manager.py:255 #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "等待%s秒直到%s停止;放弃" -#: swift/common/manager.py:437 +#: swift/common/manager.py:438 #, python-format msgid "Unable to locate config %sfor %s" -msgstr "无法找到配置%s的%s" +msgstr "" -#: swift/common/manager.py:441 +#: swift/common/manager.py:442 msgid "Found configs:" msgstr "找到配置" -#: swift/common/manager.py:485 +#: swift/common/manager.py:486 #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "发出信号%s pid: %s 信号: %s" -#: swift/common/manager.py:492 +#: swift/common/manager.py:493 #, python-format msgid "Removing stale pid file %s" msgstr "移除原有pid文件%s" -#: swift/common/manager.py:495 +#: swift/common/manager.py:496 #, python-format msgid "No permission to signal PID %d" msgstr "无权限发送信号PID%d" -#: swift/common/manager.py:540 +#: swift/common/manager.py:541 #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d无法运行(%s)" -#: swift/common/manager.py:547 swift/common/manager.py:640 -#: swift/common/manager.py:643 +#: swift/common/manager.py:548 swift/common/manager.py:641 +#: swift/common/manager.py:644 #, python-format msgid "%s running (%s - %s)" msgstr "%s运行(%s - %s)" -#: swift/common/manager.py:646 +#: swift/common/manager.py:647 #, python-format msgid "%s already started..." msgstr "%s已启动..." -#: swift/common/manager.py:655 +#: swift/common/manager.py:656 #, python-format msgid "Running %s once" msgstr "运行%s一次" -#: swift/common/manager.py:657 +#: swift/common/manager.py:658 #, python-format msgid "Starting %s" msgstr "启动%s" -#: swift/common/manager.py:664 +#: swift/common/manager.py:665 #, python-format msgid "%s does not exist" msgstr "%s不存在" @@ -377,7 +377,12 @@ msgstr "%(action)s错误 高性能内存对象缓存: %(server)s" msgid "Error limiting server %s" msgstr "服务器出现错误%s " -#: swift/common/request_helpers.py:387 +#: swift/common/request_helpers.py:102 +#, python-format +msgid "No policy with index %s" +msgstr "" + +#: swift/common/request_helpers.py:395 msgid "ERROR: An error occurred while retrieving segments" msgstr "" @@ -440,51 +445,51 @@ msgstr "无法从%s读取设置" msgid "Unable to find %s config section in %s" msgstr "无法在%s中查找到%s设置部分" -#: swift/common/utils.py:2348 +#: swift/common/utils.py:2353 #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "无效的X-Container-Sync-To格式%r" -#: swift/common/utils.py:2353 +#: swift/common/utils.py:2358 #, python-format msgid "No realm key for %r" msgstr "%r权限key不存在" -#: swift/common/utils.py:2357 +#: swift/common/utils.py:2362 #, python-format msgid "No cluster endpoint for %r %r" msgstr "%r %r的集群节点不存在" -#: swift/common/utils.py:2366 +#: swift/common/utils.py:2371 #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。" -#: swift/common/utils.py:2370 +#: swift/common/utils.py:2375 msgid "Path required in X-Container-Sync-To" msgstr "在X-Container-Sync-To中路径是必须的" -#: swift/common/utils.py:2373 +#: swift/common/utils.py:2378 msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "在X-Container-Sync-To中,变量,查询和碎片不被允许" -#: swift/common/utils.py:2378 +#: swift/common/utils.py:2383 #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To中无效主机%r" -#: swift/common/utils.py:2570 +#: swift/common/utils.py:2575 msgid "Exception dumping recon cache" msgstr "执行dump recon的时候出现异常" -#: swift/common/wsgi.py:175 +#: swift/common/wsgi.py:197 #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "尝试过%s秒后无法捆绑%s:%s" -#: swift/common/wsgi.py:185 +#: swift/common/wsgi.py:207 msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external " "SSL termination for a production deployment." @@ -527,27 +532,27 @@ msgstr "" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:缺失缓存客户端 无法控制流量 " -#: swift/common/middleware/recon.py:78 +#: swift/common/middleware/recon.py:80 msgid "Error reading recon cache file" msgstr "读取recon cache file时出现错误" -#: swift/common/middleware/recon.py:80 +#: swift/common/middleware/recon.py:82 msgid "Error parsing recon cache file" msgstr "解析recon cache file时出现错误" -#: swift/common/middleware/recon.py:82 +#: swift/common/middleware/recon.py:84 msgid "Error retrieving recon data" msgstr "检索recon data时出现错误" -#: swift/common/middleware/recon.py:151 +#: swift/common/middleware/recon.py:158 msgid "Error listing devices" msgstr "设备列表时出现错误" -#: swift/common/middleware/recon.py:247 +#: swift/common/middleware/recon.py:254 msgid "Error reading ringfile" msgstr "读取ringfile时出现错误" -#: swift/common/middleware/recon.py:261 +#: swift/common/middleware/recon.py:268 msgid "Error reading swift.conf" msgstr "读取swift.conf时出现错误" @@ -654,16 +659,27 @@ msgid "" "later)" msgstr "错误 账号更新失败 %(ip)s:%(port)s/%(device)s (稍后尝试)" -#: swift/container/sync.py:193 +#: swift/container/sync.py:217 +msgid "" +"Configuration option internal_client_conf_path not defined. Using default" +" configuration, See internal-client.conf-sample for options" +msgstr "" + +#: swift/container/sync.py:230 +#, python-format +msgid "Unable to load internal client from config: %r (%s)" +msgstr "" + +#: swift/container/sync.py:264 msgid "Begin container sync \"once\" mode" msgstr "开始容器同步\"once\"模式" -#: swift/container/sync.py:205 +#: swift/container/sync.py:276 #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "容器同步\"once\"模式完成:%.02fs" -#: swift/container/sync.py:213 +#: swift/container/sync.py:284 #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], " @@ -672,36 +688,34 @@ msgstr "" "自%(time)s起:%(sync)s完成同步 [%(delete)s 删除, %(put)s 上传], \"\n" "\"%(skip)s 跳过, %(fail)s 失败" -#: swift/container/sync.py:266 +#: swift/container/sync.py:337 #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "错误 %(db_file)s: %(validate_sync_to_err)s" -#: swift/container/sync.py:322 +#: swift/container/sync.py:393 #, python-format msgid "ERROR Syncing %s" msgstr "同步时发生错误%s" -#: swift/container/sync.py:410 +#: swift/container/sync.py:476 #, python-format -msgid "" -"Unknown exception trying to GET: %(node)r %(account)r %(container)r " -"%(object)r" -msgstr "尝试获取时发生未知的异常%(node)r %(account)r %(container)r %(object)r" +msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" +msgstr "" -#: swift/container/sync.py:444 +#: swift/container/sync.py:510 #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "未授权%(sync_from)r => %(sync_to)r" -#: swift/container/sync.py:450 +#: swift/container/sync.py:516 #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r" -#: swift/container/sync.py:457 swift/container/sync.py:464 +#: swift/container/sync.py:523 swift/container/sync.py:530 #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "同步错误 %(db_file)s %(row)s" @@ -711,8 +725,8 @@ msgstr "同步错误 %(db_file)s %(row)s" msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "%s未挂载" -#: swift/container/updater.py:91 swift/obj/replicator.py:484 -#: swift/obj/replicator.py:570 +#: swift/container/updater.py:91 swift/obj/reconstructor.py:788 +#: swift/obj/replicator.py:487 swift/obj/replicator.py:575 #, python-format msgid "%s is not mounted" msgstr "%s未挂载" @@ -834,42 +848,57 @@ msgstr "错误:无法执行审计:%s" msgid "ERROR auditing: %s" msgstr "审计错误:%s" -#: swift/obj/diskfile.py:318 +#: swift/obj/diskfile.py:323 swift/obj/diskfile.py:2305 #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录" -#: swift/obj/diskfile.py:409 +#: swift/obj/diskfile.py:414 swift/obj/diskfile.py:2373 msgid "Error hashing suffix" msgstr "执行Hashing后缀时发生错误" -#: swift/obj/diskfile.py:484 swift/obj/updater.py:169 +#: swift/obj/diskfile.py:486 swift/obj/updater.py:162 #, python-format -msgid "Directory %s does not map to a valid policy" -msgstr "目录%s无法映射到一个有效的policy" +msgid "Directory %r does not map to a valid policy (%s)" +msgstr "" -#: swift/obj/diskfile.py:678 +#: swift/obj/diskfile.py:737 #, python-format msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(object_path)s和%(quar_path)s因为非目录" -#: swift/obj/diskfile.py:869 +#: swift/obj/diskfile.py:936 swift/obj/diskfile.py:1795 #, python-format msgid "Problem cleaning up %s" msgstr "问题清除%s" -#: swift/obj/diskfile.py:1168 +#: swift/obj/diskfile.py:1253 #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s" -#: swift/obj/diskfile.py:1449 +#: swift/obj/diskfile.py:1543 #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata " "%(meta)s" msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符" +#: swift/obj/diskfile.py:1797 +#, python-format +msgid "Problem fsyncing durable state file: %s" +msgstr "" + +#: swift/obj/diskfile.py:1802 +#, python-format +msgid "No space left on device for %s" +msgstr "" + +#: swift/obj/diskfile.py:1806 +#, python-format +msgid "Problem writing durable state file: %s" +msgstr "" + #: swift/obj/expirer.py:79 #, python-format msgid "Pass completed in %ds; %d objects expired" @@ -899,67 +928,138 @@ msgstr "未处理的异常" msgid "Exception while deleting object %s %s %s" msgstr "执行删除对象时发生异常%s %s %s" -#: swift/obj/mem_server.py:87 +#: swift/obj/reconstructor.py:189 swift/obj/reconstructor.py:472 #, python-format -msgid "" -"ERROR Container update failed: %(status)d response from " -"%(ip)s:%(port)s/%(dev)s" -msgstr "错误 容器更新失败:%(status)d 从%(ip)s:%(port)s/%(dev)s得到回应" - -#: swift/obj/mem_server.py:93 -#, python-format -msgid "ERROR container update failed with %(ip)s:%(port)s/%(dev)s" -msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s" - -#: swift/obj/replicator.py:138 -#, python-format -msgid "Killing long-running rsync: %s" -msgstr "终止long-running同步: %s" - -#: swift/obj/replicator.py:152 -#, python-format -msgid "Bad rsync return code: %(ret)d <- %(args)s" -msgstr "Bad rsync返还代码:%(ret)d <- %(args)s" - -#: swift/obj/replicator.py:159 swift/obj/replicator.py:163 -#, python-format -msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" -msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)" - -#: swift/obj/replicator.py:278 -#, python-format -msgid "Removing %s objects" +msgid "Invalid response %(resp)s from %(full_path)s" msgstr "" -#: swift/obj/replicator.py:286 -msgid "Error syncing handoff partition" -msgstr "执行同步切换分区时发生错误" +#: swift/obj/reconstructor.py:195 +#, python-format +msgid "Trying to GET %(full_path)s" +msgstr "" -#: swift/obj/replicator.py:292 +#: swift/obj/reconstructor.py:301 +#, python-format +msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" +msgstr "" + +#: swift/obj/reconstructor.py:324 +#, python-format +msgid "" +"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed" +" in %(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" + +#: swift/obj/reconstructor.py:337 swift/obj/replicator.py:419 +#, python-format +msgid "" +"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " +"synced" +msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced" + +#: swift/obj/reconstructor.py:344 swift/obj/replicator.py:426 +#, python-format +msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" +msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs" + +#: swift/obj/reconstructor.py:352 +#, python-format +msgid "Nothing reconstructed for %s seconds." +msgstr "" + +#: swift/obj/reconstructor.py:381 swift/obj/replicator.py:463 +msgid "Lockup detected.. killing live coros." +msgstr "检测到lockup。终止正在执行的coros" + +#: swift/obj/reconstructor.py:442 +#, python-format +msgid "Trying to sync suffixes with %s" +msgstr "" + +#: swift/obj/reconstructor.py:467 +#, python-format +msgid "%s responded as unmounted" +msgstr "" + +#: swift/obj/reconstructor.py:849 swift/obj/replicator.py:295 #, python-format msgid "Removing partition: %s" msgstr "移除分区:%s" -#: swift/obj/replicator.py:347 +#: swift/obj/reconstructor.py:865 +msgid "Ring change detected. Aborting current reconstruction pass." +msgstr "" + +#: swift/obj/reconstructor.py:884 +msgid "Exception in top-levelreconstruction loop" +msgstr "" + +#: swift/obj/reconstructor.py:894 +msgid "Running object reconstructor in script mode." +msgstr "" + +#: swift/obj/reconstructor.py:903 +#, python-format +msgid "Object reconstruction complete (once). (%.02f minutes)" +msgstr "" + +#: swift/obj/reconstructor.py:910 +msgid "Starting object reconstructor in daemon mode." +msgstr "" + +#: swift/obj/reconstructor.py:914 +msgid "Starting object reconstruction pass." +msgstr "" + +#: swift/obj/reconstructor.py:919 +#, python-format +msgid "Object reconstruction complete. (%.02f minutes)" +msgstr "" + +#: swift/obj/replicator.py:139 +#, python-format +msgid "Killing long-running rsync: %s" +msgstr "终止long-running同步: %s" + +#: swift/obj/replicator.py:153 +#, python-format +msgid "Bad rsync return code: %(ret)d <- %(args)s" +msgstr "Bad rsync返还代码:%(ret)d <- %(args)s" + +#: swift/obj/replicator.py:160 swift/obj/replicator.py:164 +#, python-format +msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" +msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)" + +#: swift/obj/replicator.py:281 +#, python-format +msgid "Removing %s objects" +msgstr "" + +#: swift/obj/replicator.py:289 +msgid "Error syncing handoff partition" +msgstr "执行同步切换分区时发生错误" + +#: swift/obj/replicator.py:351 #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s的回应为未挂载" -#: swift/obj/replicator.py:352 +#: swift/obj/replicator.py:356 #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "无效的回应%(resp)s来自%(ip)s" -#: swift/obj/replicator.py:387 +#: swift/obj/replicator.py:391 #, python-format msgid "Error syncing with node: %s" msgstr "执行同步时节点%s发生错误" -#: swift/obj/replicator.py:391 +#: swift/obj/replicator.py:395 msgid "Error syncing partition" msgstr "执行同步分区时发生错误" -#: swift/obj/replicator.py:404 +#: swift/obj/replicator.py:408 #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -968,259 +1068,256 @@ msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n" "\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" -#: swift/obj/replicator.py:415 -#, python-format -msgid "" -"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " -"synced" -msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced" - -#: swift/obj/replicator.py:422 -#, python-format -msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" -msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs" - -#: swift/obj/replicator.py:430 +#: swift/obj/replicator.py:434 #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s秒无复制" -#: swift/obj/replicator.py:459 -msgid "Lockup detected.. killing live coros." -msgstr "检测到lockup。终止正在执行的coros" - -#: swift/obj/replicator.py:573 +#: swift/obj/replicator.py:578 msgid "Ring change detected. Aborting current replication pass." msgstr "Ring改变被检测到。退出现有的复制通过" -#: swift/obj/replicator.py:594 +#: swift/obj/replicator.py:599 msgid "Exception in top-level replication loop" msgstr "top-level复制圈出现异常" -#: swift/obj/replicator.py:603 +#: swift/obj/replicator.py:608 msgid "Running object replicator in script mode." msgstr "在加密模式下执行对象复制" -#: swift/obj/replicator.py:621 +#: swift/obj/replicator.py:626 #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "对象复制完成(一次)。(%.02f minutes)" -#: swift/obj/replicator.py:628 +#: swift/obj/replicator.py:633 msgid "Starting object replicator in daemon mode." msgstr "在守护模式下开始对象复制" -#: swift/obj/replicator.py:632 +#: swift/obj/replicator.py:637 msgid "Starting object replication pass." msgstr "开始通过对象复制" -#: swift/obj/replicator.py:637 +#: swift/obj/replicator.py:642 #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "对象复制完成。(%.02f minutes)" -#: swift/obj/server.py:202 +#: swift/obj/server.py:231 #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d" " response from %(ip)s:%(port)s/%(dev)s" msgstr "错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/%(dev)s" -#: swift/obj/server.py:209 +#: swift/obj/server.py:238 #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for " "async update later)" msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在保存 稍后同步更新)" -#: swift/obj/server.py:244 +#: swift/obj/server.py:273 #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "错误 容器更新失败:主机数量和设备数量不符合请求: \"%s\" vs \"%s\"" -#: swift/obj/updater.py:62 +#: swift/obj/updater.py:63 #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "" -#: swift/obj/updater.py:77 +#: swift/obj/updater.py:78 msgid "Begin object update sweep" msgstr "开始对象更新扫除" -#: swift/obj/updater.py:103 +#: swift/obj/updater.py:104 #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s" " successes, %(fail)s failures" msgstr "%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败" -#: swift/obj/updater.py:112 +#: swift/obj/updater.py:113 #, python-format msgid "Object update sweep completed: %.02fs" msgstr "对象更新扫除完成:%.02fs" -#: swift/obj/updater.py:121 +#: swift/obj/updater.py:122 msgid "Begin object update single threaded sweep" msgstr "开始对象更新单线程扫除" -#: swift/obj/updater.py:135 +#: swift/obj/updater.py:136 #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures" msgstr "对象更新单线程扫除完成:%(elapsed).02fs,%(success)s 成功, %(fail)s 失败" -#: swift/obj/updater.py:187 +#: swift/obj/updater.py:179 #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "执行同步等待文件 文件名不可知%s" -#: swift/obj/updater.py:217 +#: swift/obj/updater.py:209 #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "错误 Pickle问题 隔离%s" -#: swift/obj/updater.py:282 +#: swift/obj/updater.py:274 #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "远程服务器发生错误 %(ip)s:%(port)s/%(device)s" -#: swift/proxy/server.py:380 +#: swift/proxy/server.py:405 msgid "ERROR Unhandled exception in request" msgstr "错误 未处理的异常发出请求" -#: swift/proxy/server.py:435 +#: swift/proxy/server.py:460 #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)" -#: swift/proxy/server.py:452 swift/proxy/server.py:470 +#: swift/proxy/server.py:477 swift/proxy/server.py:495 #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" -#: swift/proxy/server.py:540 +#: swift/proxy/server.py:571 #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)s服务器发生错误 %(ip)s:%(port)s/%(device)s re: %(info)s" -#: swift/proxy/controllers/account.py:63 +#: swift/proxy/controllers/account.py:64 msgid "Account" msgstr "账号" -#: swift/proxy/controllers/base.py:698 swift/proxy/controllers/base.py:731 -#: swift/proxy/controllers/obj.py:192 swift/proxy/controllers/obj.py:319 -#: swift/proxy/controllers/obj.py:366 swift/proxy/controllers/obj.py:382 -#: swift/proxy/controllers/obj.py:593 +#: swift/proxy/controllers/base.py:752 swift/proxy/controllers/base.py:814 +#: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:411 +#: swift/proxy/controllers/obj.py:427 swift/proxy/controllers/obj.py:643 +#: swift/proxy/controllers/obj.py:1130 swift/proxy/controllers/obj.py:1591 +#: swift/proxy/controllers/obj.py:1763 swift/proxy/controllers/obj.py:1908 +#: swift/proxy/controllers/obj.py:2093 msgid "Object" msgstr "对象" -#: swift/proxy/controllers/base.py:699 +#: swift/proxy/controllers/base.py:753 msgid "Trying to read during GET (retrying)" msgstr "执行GET时尝试读取(重新尝试)" -#: swift/proxy/controllers/base.py:732 +#: swift/proxy/controllers/base.py:815 msgid "Trying to read during GET" msgstr "执行GET时尝试读取" -#: swift/proxy/controllers/base.py:736 +#: swift/proxy/controllers/base.py:819 #, python-format msgid "Client did not read from proxy within %ss" msgstr "客户尚未从代理处读取%ss" -#: swift/proxy/controllers/base.py:741 +#: swift/proxy/controllers/base.py:824 msgid "Client disconnected on read" msgstr "客户读取时中断" -#: swift/proxy/controllers/base.py:743 +#: swift/proxy/controllers/base.py:826 msgid "Trying to send to client" msgstr "尝试发送到客户端" -#: swift/proxy/controllers/base.py:780 swift/proxy/controllers/base.py:1049 +#: swift/proxy/controllers/base.py:863 swift/proxy/controllers/base.py:1141 #, python-format msgid "Trying to %(method)s %(path)s" msgstr "尝试执行%(method)s %(path)s" -#: swift/proxy/controllers/base.py:817 swift/proxy/controllers/base.py:1037 -#: swift/proxy/controllers/obj.py:357 swift/proxy/controllers/obj.py:402 +#: swift/proxy/controllers/base.py:902 swift/proxy/controllers/base.py:1129 +#: swift/proxy/controllers/obj.py:402 swift/proxy/controllers/obj.py:450 +#: swift/proxy/controllers/obj.py:1900 swift/proxy/controllers/obj.py:2138 msgid "ERROR Insufficient Storage" msgstr "错误 存储空间不足" -#: swift/proxy/controllers/base.py:820 +#: swift/proxy/controllers/base.py:905 #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器" -#: swift/proxy/controllers/base.py:1040 +#: swift/proxy/controllers/base.py:1132 #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" -#: swift/proxy/controllers/base.py:1152 +#: swift/proxy/controllers/base.py:1260 #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s 返回 503 在 %(statuses)s" -#: swift/proxy/controllers/container.py:97 swift/proxy/controllers/obj.py:118 +#: swift/proxy/controllers/container.py:98 swift/proxy/controllers/obj.py:161 msgid "Container" msgstr "容器" -#: swift/proxy/controllers/obj.py:320 +#: swift/proxy/controllers/obj.py:365 swift/proxy/controllers/obj.py:1592 #, python-format msgid "Trying to write to %s" msgstr "尝试执行书写%s" -#: swift/proxy/controllers/obj.py:361 +#: swift/proxy/controllers/obj.py:406 swift/proxy/controllers/obj.py:1903 #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" -#: swift/proxy/controllers/obj.py:367 +#: swift/proxy/controllers/obj.py:412 swift/proxy/controllers/obj.py:1909 #, python-format msgid "Expect: 100-continue on %s" msgstr "已知:100-continue on %s" -#: swift/proxy/controllers/obj.py:383 +#: swift/proxy/controllers/obj.py:428 #, python-format msgid "Trying to get final status of PUT to %s" msgstr "尝试执行获取最后的PUT状态%s" -#: swift/proxy/controllers/obj.py:406 +#: swift/proxy/controllers/obj.py:454 swift/proxy/controllers/obj.py:2143 #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s" -#: swift/proxy/controllers/obj.py:663 +#: swift/proxy/controllers/obj.py:716 #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "对象PUT返还 412,%(statuses)r " -#: swift/proxy/controllers/obj.py:672 +#: swift/proxy/controllers/obj.py:725 #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" -#: swift/proxy/controllers/obj.py:755 +#: swift/proxy/controllers/obj.py:811 swift/proxy/controllers/obj.py:2048 #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "错误 客户读取超时(%ss)" -#: swift/proxy/controllers/obj.py:762 +#: swift/proxy/controllers/obj.py:818 swift/proxy/controllers/obj.py:2055 msgid "ERROR Exception causing client disconnect" msgstr "错误 异常导致客户端中断连接" -#: swift/proxy/controllers/obj.py:767 +#: swift/proxy/controllers/obj.py:823 swift/proxy/controllers/obj.py:2060 msgid "Client disconnected without sending enough data" msgstr "客户中断 尚未发送足够" -#: swift/proxy/controllers/obj.py:813 +#: swift/proxy/controllers/obj.py:869 #, python-format msgid "Object servers returned %s mismatched etags" msgstr "对象服务器返还%s不匹配etags" -#: swift/proxy/controllers/obj.py:817 +#: swift/proxy/controllers/obj.py:873 swift/proxy/controllers/obj.py:2218 msgid "Object PUT" msgstr "对象上传" +#: swift/proxy/controllers/obj.py:2035 +#, python-format +msgid "Not enough object servers ack'ed (got %d)" +msgstr "" + +#: swift/proxy/controllers/obj.py:2094 +#, python-format +msgid "Trying to get %s status of PUT to %s" +msgstr "" + From 215cd551df8be066edafd2a1e16d0bd143ec214b Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Tue, 21 Apr 2015 17:38:04 -0700 Subject: [PATCH 15/98] Bulk upload: treat user xattrs as object metadata Currently, if you PUT a single object, then you can also associate metadata with it by putting it in the request headers, prefixed with "X-Object-Meta". However, if you're bulk-uploading objects, then you have no way to assign any metadata. The tar file format* allows for arbitrary UTF-8 key/value pairs to be associated with each file in an archive (as well as with the archive itself, but we don't care about that here). If a file has extended attributes, then tar will store those as key/value pairs. This commit makes bulk upload read those extended attributes, if present, and convert those to Swift object metadata. Attributes starting with "user.meta" are converted to object metadata, and "user.mime_type"** is converted to Content-Type. For example, if you have a file "setup.py": $ setfattr -n user.mime_type -v "application/python-setup" setup.py $ setfattr -n user.meta.lunch -v "burger and fries" setup.py $ setfattr -n user.meta.dinner -v "baked ziti" setup.py $ setfattr -n user.stuff -v "whee" setup.py This will get translated to headers: Content-Type: application/python-setup X-Object-Meta-Lunch: burger and fries X-Object-Meta-Dinner: baked ziti Swift will handle xattrs stored by both GNU and BSD tar***. Only xattrs user.mime_type and user.meta.* are processed; others are ignored. This brings bulk upload much closer to feature-parity with non-bulk upload. * The POSIX 1003.1-2001 (pax) format, at least. There are a few different, mutually-incompatible tar formats out there, because of course there are. This is the default format on GNU tar 1.27.1 or later. ** http://standards.freedesktop.org/shared-mime-info-spec/latest/ar01s02.html#idm140622087713936 *** Even with pax-format tarballs, different encoders store xattrs slightly differently; for example, GNU tar stores the xattr "user.rubberducky" as pax header "SCHILY.xattr.user.rubberducky", while BSD tar (which uses libarchive) stores it as "LIBARCHIVE.xattr.user.rubberducky". One might wonder if this is some programmer's attempt at job security. Change-Id: I5e3ce87d31054f5239e86d47c45adbde2bb93640 --- swift/common/middleware/bulk.py | 27 ++++++ test/unit/common/middleware/test_bulk.py | 103 ++++++++++++++++++++++- 2 files changed, 129 insertions(+), 1 deletion(-) diff --git a/swift/common/middleware/bulk.py b/swift/common/middleware/bulk.py index 7dc69b6ff1..888ff2356a 100644 --- a/swift/common/middleware/bulk.py +++ b/swift/common/middleware/bulk.py @@ -75,6 +75,23 @@ def get_response_body(data_format, data_dict, error_list): return output +def pax_key_to_swift_header(pax_key): + if (pax_key == u"SCHILY.xattr.user.mime_type" or + pax_key == u"LIBARCHIVE.xattr.user.mime_type"): + return "Content-Type" + elif pax_key.startswith(u"SCHILY.xattr.user.meta."): + useful_part = pax_key[len(u"SCHILY.xattr.user.meta."):] + return "X-Object-Meta-" + useful_part.encode("utf-8") + elif pax_key.startswith(u"LIBARCHIVE.xattr.user.meta."): + useful_part = pax_key[len(u"LIBARCHIVE.xattr.user.meta."):] + return "X-Object-Meta-" + useful_part.encode("utf-8") + else: + # You can get things like atime/mtime/ctime or filesystem ACLs in + # pax headers; those aren't really user metadata. The same goes for + # other, non-user metadata. + return None + + class Bulk(object): """ Middleware that will do many operations on a single request. @@ -464,6 +481,16 @@ class Bulk(object): new_env['HTTP_USER_AGENT'] = \ '%s BulkExpand' % req.environ.get('HTTP_USER_AGENT') create_obj_req = Request.blank(destination, new_env) + + for pax_key, pax_value in tar_info.pax_headers.items(): + header_name = pax_key_to_swift_header(pax_key) + if header_name: + # Both pax_key and pax_value are unicode + # strings; the key is already UTF-8 encoded, but + # we still have to encode the value. + create_obj_req.headers[header_name] = \ + pax_value.encode("utf-8") + resp = create_obj_req.get_response(self.app) containers_accessed.add(container) if resp.is_success: diff --git a/test/unit/common/middleware/test_bulk.py b/test/unit/common/middleware/test_bulk.py index 0f0b83a7d4..2bd0b78158 100644 --- a/test/unit/common/middleware/test_bulk.py +++ b/test/unit/common/middleware/test_bulk.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,9 +26,11 @@ from tempfile import mkdtemp from StringIO import StringIO from eventlet import sleep from mock import patch, call +from test.unit.common.middleware.helpers import FakeSwift from swift.common import utils, constraints from swift.common.middleware import bulk -from swift.common.swob import Request, Response, HTTPException +from swift.common.swob import Request, Response, HTTPException, \ + HTTPNoContent, HTTPCreated, HeaderKeyDict from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED @@ -126,6 +129,104 @@ def build_tar_tree(tar, start_path, tree_obj, base_path=''): tar.addfile(tar_info) +class TestUntarMetadata(unittest.TestCase): + def setUp(self): + self.app = FakeSwift() + self.bulk = bulk.filter_factory({})(self.app) + self.testdir = mkdtemp(suffix='tmp_test_bulk') + + def tearDown(self): + rmtree(self.testdir, ignore_errors=1) + + def test_extract_metadata(self): + self.app.register('HEAD', '/v1/a/c?extract-archive=tar', + HTTPNoContent, {}, None) + self.app.register('PUT', '/v1/a/c/obj1?extract-archive=tar', + HTTPCreated, {}, None) + self.app.register('PUT', '/v1/a/c/obj2?extract-archive=tar', + HTTPCreated, {}, None) + + # It's a real pain to instantiate TarInfo objects directly; they + # really want to come from a file on disk or a tarball. So, we write + # out some files and add pax headers to them as they get placed into + # the tarball. + with open(os.path.join(self.testdir, "obj1"), "w") as fh1: + fh1.write("obj1 contents\n") + with open(os.path.join(self.testdir, "obj2"), "w") as fh2: + fh2.write("obj2 contents\n") + + tar_ball = StringIO() + tar_file = tarfile.TarFile.open(fileobj=tar_ball, mode="w", + format=tarfile.PAX_FORMAT) + + # With GNU tar 1.27.1 or later (possibly 1.27 as well), a file with + # extended attribute user.thingy = dingy gets put into the tarfile + # with pax_headers containing key/value pair + # (SCHILY.xattr.user.thingy, dingy), both unicode strings (py2: type + # unicode, not type str). + # + # With BSD tar (libarchive), you get key/value pair + # (LIBARCHIVE.xattr.user.thingy, dingy), which strikes me as + # gratuitous incompatibility. + # + # Still, we'll support uploads with both. Just heap more code on the + # problem until you can forget it's under there. + with open(os.path.join(self.testdir, "obj1")) as fh1: + tar_info1 = tar_file.gettarinfo(fileobj=fh1, + arcname="obj1") + tar_info1.pax_headers[u'SCHILY.xattr.user.mime_type'] = \ + u'application/food-diary' + tar_info1.pax_headers[u'SCHILY.xattr.user.meta.lunch'] = \ + u'sopa de albóndigas' + tar_info1.pax_headers[ + u'SCHILY.xattr.user.meta.afternoon-snack'] = \ + u'gigantic bucket of coffee' + tar_file.addfile(tar_info1, fh1) + + with open(os.path.join(self.testdir, "obj2")) as fh2: + tar_info2 = tar_file.gettarinfo(fileobj=fh2, + arcname="obj2") + tar_info2.pax_headers[ + u'LIBARCHIVE.xattr.user.meta.muppet'] = u'bert' + tar_info2.pax_headers[ + u'LIBARCHIVE.xattr.user.meta.cat'] = u'fluffy' + tar_info2.pax_headers[ + u'LIBARCHIVE.xattr.user.notmeta'] = u'skipped' + tar_file.addfile(tar_info2, fh2) + + tar_ball.seek(0) + + req = Request.blank('/v1/a/c?extract-archive=tar') + req.environ['REQUEST_METHOD'] = 'PUT' + req.environ['wsgi.input'] = tar_ball + req.headers['transfer-encoding'] = 'chunked' + req.headers['accept'] = 'application/json;q=1.0' + + resp = req.get_response(self.bulk) + self.assertEqual(resp.status_int, 200) + + # sanity check to make sure the upload worked + upload_status = utils.json.loads(resp.body) + self.assertEqual(upload_status['Number Files Created'], 2) + + put1_headers = HeaderKeyDict(self.app.calls_with_headers[1][2]) + self.assertEqual( + put1_headers.get('Content-Type'), + 'application/food-diary') + self.assertEqual( + put1_headers.get('X-Object-Meta-Lunch'), + 'sopa de alb\xc3\xb3ndigas') + self.assertEqual( + put1_headers.get('X-Object-Meta-Afternoon-Snack'), + 'gigantic bucket of coffee') + + put2_headers = HeaderKeyDict(self.app.calls_with_headers[2][2]) + self.assertEqual(put2_headers.get('X-Object-Meta-Muppet'), 'bert') + self.assertEqual(put2_headers.get('X-Object-Meta-Cat'), 'fluffy') + self.assertEqual(put2_headers.get('Content-Type'), None) + self.assertEqual(put2_headers.get('X-Object-Meta-Blah'), None) + + class TestUntar(unittest.TestCase): def setUp(self): From 43ace3c62893364b6e3c130df56438995627598d Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Mon, 20 Apr 2015 12:17:56 -0700 Subject: [PATCH 16/98] Make RingBuilders deep-copy-able We used to be able to deep-copy RingBuilder objects, but the addition of debug logging (8d3b3b2) broke that since you can't deep-copy a Python logger. This commit fixes that. Swift doesn't really deep-copy RingBuilders anywhere, but third-party code might. Change-Id: If8bdadd93d9980db3d8a093f32d76ca604de9301 --- swift/cli/ringbuilder.py | 3 +-- swift/common/ring/builder.py | 12 ++++++++++++ test/unit/common/ring/test_builder.py | 21 +++++++++++++++++++++ 3 files changed, 34 insertions(+), 2 deletions(-) diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index eac586e267..f5c8c14792 100755 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -1073,8 +1073,7 @@ swift-ring-builder write_builder [min_part_hours] '_last_part_gather_start': 0, '_remove_devs': [], } - builder = RingBuilder(1, 1, 1) - builder.copy_from(builder_dict) + builder = RingBuilder.from_dict(builder_dict) for parts in builder._replica2part2dev: for dev_id in parts: builder.devs[dev_id]['parts'] += 1 diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index 6672fdbecc..c0a37f8a54 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -21,6 +21,7 @@ import logging import math import random import cPickle as pickle +from copy import deepcopy from array import array from collections import defaultdict @@ -125,6 +126,12 @@ class RingBuilder(object): 'ring, or all devices have been ' 'deleted') + @classmethod + def from_dict(cls, builder_data): + b = cls(1, 1, 1) # Dummy values + b.copy_from(builder_data) + return b + def copy_from(self, builder): """ Reinitializes this RingBuilder instance from data obtained from the @@ -173,6 +180,11 @@ class RingBuilder(object): for dev in self._iter_devs(): dev.setdefault("region", 1) + def __deepcopy__(self, memo): + the_copy = type(self).from_dict(deepcopy(self.to_dict(), memo)) + memo[id(self)] = the_copy + return the_copy + def to_dict(self): """ Returns a dict that can be used later with copy_from to diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index e2dc80824c..a05823368c 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import errno import mock import operator @@ -84,6 +85,26 @@ class TestRingBuilder(unittest.TestCase): ring.RingBuilder(8, 3, 0) # passes by not crashing self.assertRaises(ValueError, ring.RingBuilder, 8, 3, -1) + def test_deepcopy(self): + rb = ring.RingBuilder(8, 3, 1) + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) + rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) + rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) + rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10004, 'device': 'sda1'}) + rb.rebalance() + rb_copy = copy.deepcopy(rb) + + self.assertEqual(rb.to_dict(), rb_copy.to_dict()) + self.assertTrue(rb.devs is not rb_copy.devs) + self.assertTrue(rb._replica2part2dev is not rb_copy._replica2part2dev) + self.assertTrue(rb._last_part_moves is not rb_copy._last_part_moves) + self.assertTrue(rb._remove_devs is not rb_copy._remove_devs) + self.assertTrue(rb._dispersion_graph is not rb_copy._dispersion_graph) + def test_get_ring(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, From 03380380efb5d5c0b2a3acfaa64b486ee2cb6e64 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Thu, 23 Apr 2015 19:39:16 -0700 Subject: [PATCH 17/98] Simplify ring.builder.RingBuilder.__deepcopy__ Only container classes (lists, sets, dicts, graphs, collections, etc) need to track objects they deepcopy in the memo dict - particularly when they may contain other containers! As they recreate a new container with the same items as themselves, they'll reference the memo for each item they contain before making a deepcopy of it, and place a reference to the copied item into memo after they do. Trying to help out some other container class in this endeavor by attempting to add ourselves to the memo dict in some useful manor on their behalf however; is not helpful. All we need to do to make sure we're being a good __deepcopy__ implementation is make sure we pass on memo to our calls of deepcopy so that other container classes can avoid making additional deepcopy's of our containers if they already have a memorized copy (which would be odd since unique instances of RingBuilders aren't expected to share state, but hey - python doesn't have private attributes so you never know!) Change-Id: Ifac444dffbf79d650b2d858f6282e05d8ea741a0 --- swift/common/ring/builder.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index c0a37f8a54..e51ef71c9f 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -181,9 +181,7 @@ class RingBuilder(object): dev.setdefault("region", 1) def __deepcopy__(self, memo): - the_copy = type(self).from_dict(deepcopy(self.to_dict(), memo)) - memo[id(self)] = the_copy - return the_copy + return type(self).from_dict(deepcopy(self.to_dict(), memo)) def to_dict(self): """ From 8cdf0fdebe9eb782322fccfc11253dc959cf321d Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Mon, 27 Apr 2015 13:29:50 -0700 Subject: [PATCH 18/98] Fix account replication during pre-storage-policy upgrade Old account schemas don't send the storage_policy_index key for container rows during replication, and if the recieving end is already running an upgraded server it is surprised with a KeyError. Normally this would work itself out if the old schema recieved any updates from container layer, or a new container is created, or requires a row sync from another account database - but if the account databases have rows out of sync and there's no activity in the account otherwise, there's nothing to force the old schemas to be upgraded. Rather than force the old schema that already has a complete set of container rows to migrate even in the absense of activity we can just fill in default legacy value for the storage policy index and allow the accounts to get back in sync and migrate the next time a container update occurs. FWIW, I never able to get a cluster upgrade to get stuck in this state without some sort of account failure that forced them to get their rows out of sync (in my cause I just unlinked a pending and then made sure to force all my account datbases to commit pending files before upgrading - leading to an upgraded cluster that absolutly needed account-replication to solve a row mismatch for inactive accounts with old schemas) Closes-Bug #1424108 Change-Id: Iaf4ef834eb24f0e11a52cc22b93a864574fabf83 --- swift/account/backend.py | 1 + test/unit/account/test_backend.py | 41 ++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/swift/account/backend.py b/swift/account/backend.py index 3ff42518d2..ec28394626 100644 --- a/swift/account/backend.py +++ b/swift/account/backend.py @@ -460,6 +460,7 @@ class AccountBroker(DatabaseBroker): max_rowid = -1 curs = conn.cursor() for rec in item_list: + rec.setdefault('storage_policy_index', 0) # legacy record = [rec['name'], rec['put_timestamp'], rec['delete_timestamp'], rec['object_count'], rec['bytes_used'], rec['deleted'], diff --git a/test/unit/account/test_backend.py b/test/unit/account/test_backend.py index d231fea741..d262689e87 100644 --- a/test/unit/account/test_backend.py +++ b/test/unit/account/test_backend.py @@ -32,7 +32,7 @@ import random from swift.account.backend import AccountBroker from swift.common.utils import Timestamp -from test.unit import patch_policies, with_tempdir +from test.unit import patch_policies, with_tempdir, make_timestamp_iter from swift.common.db import DatabaseConnectionError from swift.common.storage_policy import StoragePolicy, POLICIES @@ -1120,6 +1120,45 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker): conn.execute('SELECT * FROM policy_stat') conn.execute('SELECT storage_policy_index FROM container') + @with_tempdir + def test_pre_storage_policy_replication(self, tempdir): + ts = make_timestamp_iter() + + # make and two account database "replicas" + old_broker = AccountBroker(os.path.join(tempdir, 'old_account.db'), + account='a') + old_broker.initialize(ts.next().internal) + new_broker = AccountBroker(os.path.join(tempdir, 'new_account.db'), + account='a') + new_broker.initialize(ts.next().internal) + + # manually insert an existing row to avoid migration for old database + with old_broker.get() as conn: + conn.execute(''' + INSERT INTO container (name, put_timestamp, + delete_timestamp, object_count, bytes_used, + deleted) + VALUES (?, ?, ?, ?, ?, ?) + ''', ('test_name', ts.next().internal, 0, 1, 2, 0)) + conn.commit() + + # get replication info and rows form old database + info = old_broker.get_info() + rows = old_broker.get_items_since(0, 10) + + # "send" replication rows to new database + new_broker.merge_items(rows, info['id']) + + # make sure "test_name" container in new database + self.assertEqual(new_broker.get_info()['container_count'], 1) + for c in new_broker.list_containers_iter(1, None, None, None, None): + self.assertEqual(c, ('test_name', 1, 2, 0)) + + # full migration successful + with new_broker.get() as conn: + conn.execute('SELECT * FROM policy_stat') + conn.execute('SELECT storage_policy_index FROM container') + def pre_track_containers_create_policy_stat(self, conn): """ From dbb9d4b7938f24ee588d260c9b51ca65d5095749 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Mon, 27 Apr 2015 08:52:18 +0200 Subject: [PATCH 19/98] Add missing docstring in direct_client Added a missing docstring to the direct_delete_container method. Also checked other docstrings in the same file and fixed a wrong docstring element in direct_delete_object. Added raises: docstring entry to all methods that raise an Exception. Change-Id: If463a0f9ddff3fe2d13f6d97fcfa955e91d0f01f --- swift/common/direct_client.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py index 35ca24a64c..c95bc44128 100644 --- a/swift/common/direct_client.py +++ b/swift/common/direct_client.py @@ -153,6 +153,7 @@ def direct_head_container(node, part, account, container, conn_timeout=5, :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :returns: a dict containing the response's headers in a HeaderKeyDict + :raises ClientException: HTTP HEAD request failed """ path = '/%s/%s' % (account, container) with Timeout(conn_timeout): @@ -200,6 +201,18 @@ def direct_get_container(node, part, account, container, marker=None, def direct_delete_container(node, part, account, container, conn_timeout=5, response_timeout=15, headers=None): + """ + Delete container directly from the container server. + + :param node: node dictionary from the ring + :param part: partition the container is on + :param account: account name + :param container: container name + :param conn_timeout: timeout in seconds for establishing the connection + :param response_timeout: timeout in seconds for getting the response + :param headers: dict to be passed into HTTPConnection headers + :raises ClientException: HTTP DELETE request failed + """ if headers is None: headers = {} @@ -274,6 +287,7 @@ def direct_head_object(node, part, account, container, obj, conn_timeout=5, :param response_timeout: timeout in seconds for getting the response :param headers: dict to be passed into HTTPConnection headers :returns: a dict containing the response's headers in a HeaderKeyDict + :raises ClientException: HTTP HEAD request failed """ if headers is None: headers = {} @@ -312,6 +326,7 @@ def direct_get_object(node, part, account, container, obj, conn_timeout=5, :param headers: dict to be passed into HTTPConnection headers :returns: a tuple of (response headers, the object's contents) The response headers will be a HeaderKeyDict. + :raises ClientException: HTTP GET request failed """ if headers is None: headers = {} @@ -363,6 +378,7 @@ def direct_put_object(node, part, account, container, name, contents, :param response_timeout: timeout in seconds for getting the response :param chunk_size: if defined, chunk size of data to send. :returns: etag from the server response + :raises ClientException: HTTP PUT request failed """ path = '/%s/%s/%s' % (account, container, name) @@ -462,7 +478,7 @@ def direct_delete_object(node, part, account, container, obj, :param obj: object name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response - :returns: response from server + :raises ClientException: HTTP DELETE request failed """ if headers is None: headers = {} @@ -493,7 +509,8 @@ def retry(func, *args, **kwargs): :param kwargs: keyward arguments to send to func (if retries or error_log are sent, they will be deleted from kwargs before sending on to func) - :returns: restult of func + :returns: result of func + :raises ClientException: all retries failed """ retries = 5 if 'retries' in kwargs: From bfbc94c3cb34eb9ff288fb817dee667cc870d9eb Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 28 Apr 2015 10:45:50 +0100 Subject: [PATCH 20/98] Fix intermittent container replicator test failure Intermittent failure of this test could be due to insufficient time elapsing between either the local and remote db's being created or during the debug_timing calls. This patch forces greater timestamp separation and forces debug_timing to always log timings. Also add message to the failign assertion so if this does fail again we get some clue as to why. Closes-Bug: 1369663 Change-Id: I4b69b2e759d586a14abd0931a68dbdf256d57c32 --- test/unit/container/test_replicator.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/test/unit/container/test_replicator.py b/test/unit/container/test_replicator.py index 399bb8bb19..49fea253cc 100644 --- a/test/unit/container/test_replicator.py +++ b/test/unit/container/test_replicator.py @@ -30,7 +30,7 @@ from swift.common.utils import Timestamp from swift.common.storage_policy import POLICIES from test.unit.common import test_db_replicator -from test.unit import patch_policies +from test.unit import patch_policies, make_timestamp_iter from contextlib import contextmanager @@ -99,24 +99,26 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): self.assertEqual(1, daemon.stats['no_change']) def test_sync_remote_with_timings(self): + ts_iter = make_timestamp_iter() # setup a local container broker = self._get_broker('a', 'c', node_index=0) - put_timestamp = time.time() - broker.initialize(put_timestamp, POLICIES.default.idx) + put_timestamp = ts_iter.next() + broker.initialize(put_timestamp.internal, POLICIES.default.idx) broker.update_metadata( - {'x-container-meta-test': ('foo', put_timestamp)}) + {'x-container-meta-test': ('foo', put_timestamp.internal)}) # setup remote container remote_broker = self._get_broker('a', 'c', node_index=1) - remote_broker.initialize(time.time(), POLICIES.default.idx) - timestamp = time.time() + remote_broker.initialize(ts_iter.next().internal, POLICIES.default.idx) + timestamp = ts_iter.next() for db in (broker, remote_broker): - db.put_object('/a/c/o', timestamp, 0, 'content-type', 'etag', - storage_policy_index=db.storage_policy_index) + db.put_object( + '/a/c/o', timestamp.internal, 0, 'content-type', 'etag', + storage_policy_index=db.storage_policy_index) # replicate daemon = replicator.ContainerReplicator({}) part, node = self._get_broker_part_node(remote_broker) info = broker.get_replication_info() - with mock.patch.object(db_replicator, 'DEBUG_TIMINGS_THRESHOLD', 0): + with mock.patch.object(db_replicator, 'DEBUG_TIMINGS_THRESHOLD', -1): success = daemon._repl_to_node(node, broker, part, info) # nothing to do self.assertTrue(success) @@ -124,7 +126,10 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): expected_timings = ('info', 'update_metadata', 'merge_timestamps', 'get_sync', 'merge_syncs') debug_lines = self.rpc.logger.logger.get_lines_for_level('debug') - self.assertEqual(len(expected_timings), len(debug_lines)) + self.assertEqual(len(expected_timings), len(debug_lines), + 'Expected %s debug lines but only got %s: %s' % + (len(expected_timings), len(debug_lines), + debug_lines)) for metric in expected_timings: expected = 'replicator-rpc-sync time for %s:' % metric self.assert_(any(expected in line for line in debug_lines), From 03536dbb55c219b94389e635babec2df2e2759fa Mon Sep 17 00:00:00 2001 From: Prashanth Pai Date: Wed, 29 Apr 2015 12:11:59 +0530 Subject: [PATCH 21/98] Fix incorrect passing of file object to fsync() swift.common.utils.fsync() requires a file descriptor as argument but file object handle was being passed. Change-Id: I316b58f6bc37de0945eff551e4e50565653664f5 Signed-off-by: Prashanth Pai --- swift/obj/diskfile.py | 4 ++-- test/unit/obj/test_diskfile.py | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 39eff67bde..3920315551 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -1786,8 +1786,8 @@ class ECDiskFileWriter(DiskFileWriter): def _finalize_durable(self, durable_file_path): exc = msg = None try: - with open(durable_file_path, 'w') as _fd: - fsync(_fd) + with open(durable_file_path, 'w') as _fp: + fsync(_fp.fileno()) try: self.manager.hash_cleanup_listdir(self._datadir) except OSError: diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 2ccf3b1364..a84cafc8b4 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -2650,8 +2650,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): }[policy.policy_type] self.assertEqual(expected, mock_fsync.call_count) if policy.policy_type == EC_POLICY: - durable_file = '%s.durable' % timestamp.internal - self.assertTrue(durable_file in str(mock_fsync.call_args[0])) + self.assertTrue(isinstance(mock_fsync.call_args[0][0], int)) def test_commit_ignores_hash_cleanup_listdir_error(self): for policy in POLICIES: From 94215049fd37f810ddf4e6b8122cce02aea3e6e3 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Mon, 4 May 2015 15:08:51 -0700 Subject: [PATCH 22/98] Bump up a timeout in a test Got a slow crappy VM like I do? You might see this fail occasionally. Bump up the timeout a little to help it out. Change-Id: I8c0e5b99012830ea3525fa55b0811268db3da2a2 --- test/unit/common/test_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 22aa3db5e1..48610c1a7b 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -4482,7 +4482,7 @@ class TestGreenAsyncPile(unittest.TestCase): pile = utils.GreenAsyncPile(3) pile.spawn(run_test, 0.1) pile.spawn(run_test, 1.0) - self.assertEqual(pile.waitall(0.2), [0.1]) + self.assertEqual(pile.waitall(0.5), [0.1]) self.assertEqual(completed[0], 1) def test_waitall_timeout_completes(self): From c77c79b2c9b9ca7790c29577341cf36c5e9012cf Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 5 May 2015 06:08:09 +0000 Subject: [PATCH 23/98] Imported Translations from Transifex For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I9b0156c7fc315182d80604bf353586455bbc34d1 --- swift/locale/zh_CN/LC_MESSAGES/swift.po | 523 +++--------------------- 1 file changed, 61 insertions(+), 462 deletions(-) diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index 36f2767712..1352c93f83 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -8,283 +8,226 @@ msgid "" msgstr "" "Project-Id-Version: Swift\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-16 06:06+0000\n" +"POT-Creation-Date: 2015-05-05 06:08+0000\n" "PO-Revision-Date: 2015-04-15 12:48+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: Chinese (China) " -"(http://www.transifex.com/projects/p/swift/language/zh_CN/)\n" +"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/swift/" +"language/zh_CN/)\n" "Plural-Forms: nplurals=1; plural=0\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: swift/account/auditor.py:59 #, python-format msgid "" -"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed" -" audit" +"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " +"audit" msgstr "自%(time)s开始:账号审计:%(passed)s 通过审计,%(failed)s 失败" -#: swift/account/auditor.py:82 msgid "Begin account audit pass." msgstr "开始账号审计通过" -#: swift/account/auditor.py:88 swift/container/auditor.py:86 msgid "ERROR auditing" msgstr "错误 审计" -#: swift/account/auditor.py:93 #, python-format msgid "Account audit pass completed: %.02fs" msgstr "账号审计完成:%.02fs" -#: swift/account/auditor.py:99 msgid "Begin account audit \"once\" mode" msgstr "开始账号审计\"once\"模式" -#: swift/account/auditor.py:104 #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "账号审计\"once\"模式完成: %.02fs" -#: swift/account/auditor.py:123 #, python-format msgid "" -"The total %(key)s for the container (%(total)s) does not match the sum of" -" %(key)s across policies (%(sum)s)" +"The total %(key)s for the container (%(total)s) does not match the sum of " +"%(key)s across policies (%(sum)s)" msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)" -#: swift/account/auditor.py:149 #, python-format msgid "Audit Failed for %s: %s" msgstr "审计失败%s: %s" -#: swift/account/auditor.py:153 #, python-format msgid "ERROR Could not get account info %s" msgstr "错误:无法获取账号信息%s" -#: swift/account/reaper.py:134 swift/common/utils.py:2127 -#: swift/obj/diskfile.py:476 swift/obj/updater.py:88 swift/obj/updater.py:131 #, python-format msgid "Skipping %s as it is not mounted" msgstr "挂载失败 跳过%s" -#: swift/account/reaper.py:138 msgid "Exception in top-level account reaper loop" msgstr "异常出现在top-level账号reaper环" -#: swift/account/reaper.py:141 #, python-format msgid "Devices pass completed: %.02fs" msgstr "设备通过完成: %.02fs" -#: swift/account/reaper.py:238 #, python-format msgid "Beginning pass on account %s" msgstr "账号%s开始通过" -#: swift/account/reaper.py:255 #, python-format msgid "Exception with containers for account %s" msgstr "账号%s内容器出现异常" -#: swift/account/reaper.py:262 #, python-format msgid "Exception with account %s" msgstr "账号%s出现异常" -#: swift/account/reaper.py:263 #, python-format msgid "Incomplete pass on account %s" msgstr "账号%s未完成通过" -#: swift/account/reaper.py:265 #, python-format msgid ", %s containers deleted" msgstr ",删除容器%s" -#: swift/account/reaper.py:267 #, python-format msgid ", %s objects deleted" msgstr ",删除对象%s" -#: swift/account/reaper.py:269 #, python-format msgid ", %s containers remaining" msgstr ",剩余容器%s" -#: swift/account/reaper.py:272 #, python-format msgid ", %s objects remaining" msgstr ",剩余对象%s" -#: swift/account/reaper.py:274 #, python-format msgid ", %s containers possibly remaining" msgstr ",可能剩余容器%s" -#: swift/account/reaper.py:277 #, python-format msgid ", %s objects possibly remaining" msgstr ",可能剩余对象%s" -#: swift/account/reaper.py:280 msgid ", return codes: " msgstr ",返回代码:" -#: swift/account/reaper.py:284 #, python-format msgid ", elapsed: %.02fs" msgstr ",耗时:%.02fs" -#: swift/account/reaper.py:290 #, python-format msgid "Account %s has not been reaped since %s" msgstr "账号%s自%s起未被reaped" -#: swift/account/reaper.py:349 swift/account/reaper.py:397 -#: swift/account/reaper.py:464 swift/container/updater.py:306 #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s出现异常" -#: swift/account/reaper.py:369 #, python-format -msgid "Exception with objects for container %(container)s for account %(account)s" +msgid "" +"Exception with objects for container %(container)s for account %(account)s" msgstr "账号%(account)s容器%(container)s的对象出现异常" -#: swift/account/server.py:275 swift/container/server.py:582 -#: swift/obj/server.py:910 #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "%(method)s %(path)s出现错误__call__ error" -#: swift/common/bufferedhttp.py:157 #, python-format msgid "Error encoding to UTF-8: %s" msgstr "UTF-8编码错误:%s" -#: swift/common/container_sync_realms.py:59 -#: swift/common/container_sync_realms.py:68 #, python-format msgid "Could not load %r: %s" msgstr "无法下载%r: %s" -#: swift/common/container_sync_realms.py:81 #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "%r中mtime_check_interval出现错误:%s" -#: swift/common/db.py:347 #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "隔离%s和%s 因为%s数据库" -#: swift/common/db.py:402 msgid "Broker error trying to rollback locked connection" msgstr "服务器错误并尝试去回滚已经锁住的链接" -#: swift/common/db.py:605 #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "不可用的等待输入%(file)s: %(entry)s" -#: swift/common/db_replicator.py:143 #, python-format msgid "ERROR reading HTTP response from %s" msgstr "读取HTTP错误 响应来源%s" -#: swift/common/db_replicator.py:193 #, python-format -msgid "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" +msgid "" +"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(time).5f seconds (%(rate).5f/s)尝试复制%(count)d dbs" -#: swift/common/db_replicator.py:199 #, python-format msgid "Removed %(remove)d dbs" msgstr "删除%(remove)d dbs" -#: swift/common/db_replicator.py:200 #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s成功,%(failure)s失败" -#: swift/common/db_replicator.py:231 #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "错误 rsync失败 %(code)s: %(args)s" -#: swift/common/db_replicator.py:294 #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "失败响应错误%(status)s来自%(host)s" -#: swift/common/db_replicator.py:453 swift/common/db_replicator.py:678 #, python-format msgid "Quarantining DB %s" msgstr "隔离DB%s" -#: swift/common/db_replicator.py:456 #, python-format msgid "ERROR reading db %s" msgstr "错误 读取db %s" -#: swift/common/db_replicator.py:487 #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "错误 远程驱动器无法挂载 %s" -#: swift/common/db_replicator.py:489 #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "错误 同步 %(file)s 和 节点%(node)s" -#: swift/common/db_replicator.py:517 #, python-format msgid "ERROR while trying to clean up %s" msgstr "清理时出现错误%s" -#: swift/common/db_replicator.py:543 msgid "ERROR Failed to get my own IPs?" msgstr "错误 无法获得我方IPs?" -#: swift/common/db_replicator.py:553 #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "因无法挂载跳过%(device)s" -#: swift/common/db_replicator.py:562 msgid "Beginning replication run" msgstr "开始运行复制" -#: swift/common/db_replicator.py:567 msgid "Replication run OVER" msgstr "复制运行结束" -#: swift/common/db_replicator.py:580 msgid "ERROR trying to replicate" msgstr "尝试复制时发生错误" -#: swift/common/internal_client.py:193 #, python-format msgid "Unexpected response: %s" msgstr "意外响应:%s" -#: swift/common/manager.py:63 msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告:无法修改文件描述限制。是否按非root运行?" -#: swift/common/manager.py:70 msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告:无法修改内存极限,是否按非root运行?" -#: swift/common/manager.py:77 msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告:无法修改最大运行极限,是否按非root运行?" -#: swift/common/manager.py:195 msgid "" "\n" "user quit" @@ -292,239 +235,164 @@ msgstr "" "\n" "用户退出" -#: swift/common/manager.py:232 swift/common/manager.py:547 #, python-format msgid "No %s running" msgstr "无%s账号运行" -#: swift/common/manager.py:245 #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s)显示已停止" -#: swift/common/manager.py:255 #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "等待%s秒直到%s停止;放弃" -#: swift/common/manager.py:439 -#, python-format -msgid "Unable to locate config number %s for %s" -msgstr "" - -#: swift/common/manager.py:442 -#, python-format -msgid "Unable to locate config for %s" -msgstr "" - -#: swift/common/manager.py:445 msgid "Found configs:" msgstr "找到配置" -#: swift/common/manager.py:489 #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "发出信号%s pid: %s 信号: %s" -#: swift/common/manager.py:496 #, python-format msgid "Removing stale pid file %s" msgstr "移除原有pid文件%s" -#: swift/common/manager.py:499 #, python-format msgid "No permission to signal PID %d" msgstr "无权限发送信号PID%d" -#: swift/common/manager.py:544 #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d无法运行(%s)" -#: swift/common/manager.py:551 swift/common/manager.py:644 -#: swift/common/manager.py:647 #, python-format msgid "%s running (%s - %s)" msgstr "%s运行(%s - %s)" -#: swift/common/manager.py:650 #, python-format msgid "%s already started..." msgstr "%s已启动..." -#: swift/common/manager.py:659 #, python-format msgid "Running %s once" msgstr "运行%s一次" -#: swift/common/manager.py:661 #, python-format msgid "Starting %s" msgstr "启动%s" -#: swift/common/manager.py:668 #, python-format msgid "%s does not exist" msgstr "%s不存在" -#: swift/common/memcached.py:191 #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "%(action)s超时 高性能内存对象缓存: %(server)s" -#: swift/common/memcached.py:194 #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "%(action)s错误 高性能内存对象缓存: %(server)s" -#: swift/common/memcached.py:219 #, python-format msgid "Error limiting server %s" msgstr "服务器出现错误%s " -#: swift/common/request_helpers.py:102 -#, python-format -msgid "No policy with index %s" -msgstr "" - -#: swift/common/request_helpers.py:395 -msgid "ERROR: An error occurred while retrieving segments" -msgstr "" - -#: swift/common/utils.py:388 #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "无法查询到%s 保留为no-op" -#: swift/common/utils.py:578 -msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." +msgid "" +"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "无法查询到fallocate, posix_fallocate。保存为no-op" -#: swift/common/utils.py:662 -#, python-format -msgid "Unable to perform fsync() on directory %s: %s" -msgstr "" - -#: swift/common/utils.py:1074 -#, python-format -msgid "%s: Connection reset by peer" -msgstr "" - -#: swift/common/utils.py:1076 swift/common/utils.py:1079 -#, python-format -msgid "%s: %s" -msgstr "" - -#: swift/common/utils.py:1314 msgid "Connection refused" msgstr "连接被拒绝" -#: swift/common/utils.py:1316 msgid "Host unreachable" msgstr "无法连接到主机" -#: swift/common/utils.py:1318 msgid "Connection timeout" msgstr "连接超时" -#: swift/common/utils.py:1620 msgid "UNCAUGHT EXCEPTION" msgstr "未捕获的异常" -#: swift/common/utils.py:1675 msgid "Error: missing config path argument" msgstr "错误:设置路径信息丢失" -#: swift/common/utils.py:1680 #, python-format msgid "Error: unable to locate %s" msgstr "错误:无法查询到 %s" -#: swift/common/utils.py:1988 #, python-format msgid "Unable to read config from %s" msgstr "无法从%s读取设置" -#: swift/common/utils.py:1994 #, python-format msgid "Unable to find %s config section in %s" msgstr "无法在%s中查找到%s设置部分" -#: swift/common/utils.py:2353 #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "无效的X-Container-Sync-To格式%r" -#: swift/common/utils.py:2358 #, python-format msgid "No realm key for %r" msgstr "%r权限key不存在" -#: swift/common/utils.py:2362 #, python-format msgid "No cluster endpoint for %r %r" msgstr "%r %r的集群节点不存在" -#: swift/common/utils.py:2371 #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." -msgstr "在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。" +msgstr "" +"在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。" -#: swift/common/utils.py:2375 msgid "Path required in X-Container-Sync-To" msgstr "在X-Container-Sync-To中路径是必须的" -#: swift/common/utils.py:2378 msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "在X-Container-Sync-To中,变量,查询和碎片不被允许" -#: swift/common/utils.py:2383 #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To中无效主机%r" -#: swift/common/utils.py:2575 msgid "Exception dumping recon cache" msgstr "执行dump recon的时候出现异常" -#: swift/common/wsgi.py:197 #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "尝试过%s秒后无法捆绑%s:%s" -#: swift/common/wsgi.py:207 msgid "" -"WARNING: SSL should only be enabled for testing purposes. Use external " -"SSL termination for a production deployment." +"WARNING: SSL should only be enabled for testing purposes. Use external SSL " +"termination for a production deployment." msgstr "警告:SSL仅可以做测试使用。产品部署时请使用外连SSL终端" -#: swift/common/middleware/catch_errors.py:43 msgid "Error: An error occurred" msgstr "错误:一个错误发生了" -#: swift/common/middleware/cname_lookup.py:144 #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "集合%(given_domain)s到%(found_domain)s" -#: swift/common/middleware/cname_lookup.py:156 #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "跟随CNAME链从%(given_domain)s到%(found_domain)s" -#: swift/common/middleware/ratelimit.py:248 #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "返回497因为黑名单:%s" -#: swift/common/middleware/ratelimit.py:263 #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "流量控制休眠日志:%(sleep)s for %(account)s/%(container)s/%(object)s" -#: swift/common/middleware/ratelimit.py:271 #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " @@ -533,538 +401,348 @@ msgstr "" "返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,流量控制(Max \"\n" "\"Sleep) %(e)s" -#: swift/common/middleware/ratelimit.py:293 msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:缺失缓存客户端 无法控制流量 " -#: swift/common/middleware/recon.py:80 msgid "Error reading recon cache file" msgstr "读取recon cache file时出现错误" -#: swift/common/middleware/recon.py:82 msgid "Error parsing recon cache file" msgstr "解析recon cache file时出现错误" -#: swift/common/middleware/recon.py:84 msgid "Error retrieving recon data" msgstr "检索recon data时出现错误" -#: swift/common/middleware/recon.py:158 msgid "Error listing devices" msgstr "设备列表时出现错误" -#: swift/common/middleware/recon.py:254 msgid "Error reading ringfile" msgstr "读取ringfile时出现错误" -#: swift/common/middleware/recon.py:268 msgid "Error reading swift.conf" msgstr "读取swift.conf时出现错误" -#: swift/common/middleware/xprofile.py:243 #, python-format msgid "Error on render profiling results: %s" msgstr "给予分析结果时发生错误:%s" -#: swift/common/middleware/x_profile/exceptions.py:25 #, python-format msgid "Profiling Error: %s" msgstr "分析代码时出现错误:%s" -#: swift/common/middleware/x_profile/html_viewer.py:306 #, python-format msgid "method %s is not allowed." msgstr "方法%s不被允许" -#: swift/common/middleware/x_profile/html_viewer.py:317 #, python-format msgid "Can not load profile data from %s." msgstr "无法从%s下载分析数据" -#: swift/common/middleware/x_profile/html_viewer.py:369 -#: swift/common/middleware/x_profile/html_viewer.py:399 msgid "no log file found" msgstr "日志文件丢失" -#: swift/common/middleware/x_profile/html_viewer.py:392 #, python-format msgid "Data download error: %s" msgstr "数据下载错误:%s" -#: swift/common/middleware/x_profile/html_viewer.py:397 msgid "python-matplotlib not installed." msgstr "python-matplotlib未安装" -#: swift/common/middleware/x_profile/html_viewer.py:433 #, python-format msgid "plotting results failed due to %s" msgstr "绘制结果图标时失败因为%s" -#: swift/common/middleware/x_profile/html_viewer.py:444 msgid "The file type are forbidden to access!" msgstr "该文件类型被禁止访问!" -#: swift/common/middleware/x_profile/html_viewer.py:465 #, python-format msgid "Can not access the file %s." msgstr "无法访问文件%s" -#: swift/common/middleware/x_profile/profile_model.py:128 msgid "odfpy not installed." msgstr "odfpy未安装" -#: swift/container/auditor.py:58 #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "自%(time)s起:容器审计:%(pass)s通过审计, %(fail)s失败" -#: swift/container/auditor.py:80 msgid "Begin container audit pass." msgstr "开始通过容器审计" -#: swift/container/auditor.py:91 #, python-format msgid "Container audit pass completed: %.02fs" msgstr "容器审计通过完成: %.02fs" -#: swift/container/auditor.py:97 msgid "Begin container audit \"once\" mode" msgstr "开始容器审计\"once\" 模式" -#: swift/container/auditor.py:102 #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "容器审计\"once\"模式完成:%.02fs" -#: swift/container/auditor.py:123 #, python-format msgid "ERROR Could not get container info %s" msgstr "错误:无法获取容器%s信息" -#: swift/container/server.py:180 #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "出现错误 账号更新失败:本机数量与设备数量不符: \"%s\" vs \"%s\"" -#: swift/container/server.py:221 #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" -msgstr "出现错误 账号更新失败: %(ip)s:%(port)s/%(device)s (稍后尝试): 回应 %(status)s %(reason)s" +msgstr "" +"出现错误 账号更新失败: %(ip)s:%(port)s/%(device)s (稍后尝试): 回应 " +"%(status)s %(reason)s" -#: swift/container/server.py:230 #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "错误 账号更新失败 %(ip)s:%(port)s/%(device)s (稍后尝试)" -#: swift/container/sync.py:217 -msgid "" -"Configuration option internal_client_conf_path not defined. Using default" -" configuration, See internal-client.conf-sample for options" -msgstr "" - -#: swift/container/sync.py:230 -#, python-format -msgid "Unable to load internal client from config: %r (%s)" -msgstr "" - -#: swift/container/sync.py:264 msgid "Begin container sync \"once\" mode" msgstr "开始容器同步\"once\"模式" -#: swift/container/sync.py:276 #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "容器同步\"once\"模式完成:%.02fs" -#: swift/container/sync.py:284 #, python-format msgid "" -"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], " -"%(skip)s skipped, %(fail)s failed" +"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " +"skipped, %(fail)s failed" msgstr "" "自%(time)s起:%(sync)s完成同步 [%(delete)s 删除, %(put)s 上传], \"\n" "\"%(skip)s 跳过, %(fail)s 失败" -#: swift/container/sync.py:337 #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "错误 %(db_file)s: %(validate_sync_to_err)s" -#: swift/container/sync.py:393 #, python-format msgid "ERROR Syncing %s" msgstr "同步时发生错误%s" -#: swift/container/sync.py:476 -#, python-format -msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" -msgstr "" - -#: swift/container/sync.py:510 #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "未授权%(sync_from)r => %(sync_to)r" -#: swift/container/sync.py:516 #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r" -#: swift/container/sync.py:523 swift/container/sync.py:530 #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "同步错误 %(db_file)s %(row)s" -#: swift/container/updater.py:77 #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "%s未挂载" -#: swift/container/updater.py:91 swift/obj/reconstructor.py:788 -#: swift/obj/replicator.py:487 swift/obj/replicator.py:575 #, python-format msgid "%s is not mounted" msgstr "%s未挂载" -#: swift/container/updater.py:110 #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "执行下载压缩时发生错误%s" -#: swift/container/updater.py:120 msgid "Begin container update sweep" msgstr "开始容器更新扫除" -#: swift/container/updater.py:154 #, python-format msgid "" -"Container update sweep of %(path)s completed: %(elapsed).02fs, " -"%(success)s successes, %(fail)s failures, %(no_change)s with no changes" +"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " +"successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" -"通过路径%(path)s容器更新扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, " -"%(no_change)s 无更改" +"通过路径%(path)s容器更新扫除完成:%(elapsed).02fs, %(success)s 成功, " +"%(fail)s 失败, %(no_change)s 无更改" -#: swift/container/updater.py:168 #, python-format msgid "Container update sweep completed: %.02fs" msgstr "容器更新扫除完成:%.02fs" -#: swift/container/updater.py:180 msgid "Begin container update single threaded sweep" msgstr "开始容器更新单线程扫除" -#: swift/container/updater.py:188 #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" -"容器更新单线程扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, %(no_change)s " -"无更改" +"容器更新单线程扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, " +"%(no_change)s 无更改" -#: swift/container/updater.py:243 #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "更新报告发至%(container)s %(dbfile)s" -#: swift/container/updater.py:252 #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s更新报告失败" -#: swift/container/updater.py:294 #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "错误 账号更新失败%(ip)s:%(port)s/%(device)s (稍后尝试):" -#: swift/obj/auditor.py:78 #, python-format msgid " - parallel, %s" msgstr "-平行,%s" -#: swift/obj/auditor.py:80 #, python-format msgid " - %s" msgstr "- %s" -#: swift/obj/auditor.py:81 #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "开始对象审计\\\"%s\\\" 模式 (%s%s)" -#: swift/obj/auditor.py:100 #, python-format msgid "" -"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d " -"passed, %(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f ," -" bytes/sec: %(brate).2f, Total time: %(total).2f, Auditing time: " -"%(audit).2f, Rate: %(audit_rate).2f" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" msgstr "" -"对象审计 (%(type)s). 自 %(start_time)s开始: 本地: %(passes)d 通过, %(quars)d 隔离, " -"%(errors)d 错误 文件/秒: %(frate).2f , bytes/秒: %(brate).2f, 总时间: %(total).2f," -" 审计时间: %(audit).2f, 速率: %(audit_rate).2f" +"对象审计 (%(type)s). 自 %(start_time)s开始: 本地: %(passes)d 通过, %(quars)d " +"隔离, %(errors)d 错误 文件/秒: %(frate).2f , bytes/秒: %(brate).2f, 总时间: " +"%(total).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f" -#: swift/obj/auditor.py:134 #, python-format msgid "" -"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. " -"Total quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " -"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, " -"Rate: %(audit_rate).2f" +"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " +"quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " +"%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" msgstr "" -"对象审计 (%(type)s) \\\"%(mode)s\\\"模式完成: %(elapsed).02fs 隔离总数: %(quars)d, " -"错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: %(brate).2f, 审计时间: " -"%(audit).2f, 速率: %(audit_rate).2f" +"对象审计 (%(type)s) \\\"%(mode)s\\\"模式完成: %(elapsed).02fs 隔离总数: " +"%(quars)d, 错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: " +"%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f" -#: swift/obj/auditor.py:149 #, python-format msgid "Object audit stats: %s" msgstr "对象审计统计:%s" -#: swift/obj/auditor.py:177 #, python-format msgid "ERROR Trying to audit %s" msgstr "错误 尝试开始审计%s" -#: swift/obj/auditor.py:213 #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "错误 对象%(obj)s审计失败并被隔离:%(err)s" -#: swift/obj/auditor.py:263 #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "错误:无法执行审计:%s" -#: swift/obj/auditor.py:334 swift/obj/auditor.py:355 #, python-format msgid "ERROR auditing: %s" msgstr "审计错误:%s" -#: swift/obj/diskfile.py:323 swift/obj/diskfile.py:2305 #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录" -#: swift/obj/diskfile.py:414 swift/obj/diskfile.py:2373 msgid "Error hashing suffix" msgstr "执行Hashing后缀时发生错误" -#: swift/obj/diskfile.py:486 swift/obj/updater.py:162 #, python-format -msgid "Directory %r does not map to a valid policy (%s)" -msgstr "" - -#: swift/obj/diskfile.py:737 -#, python-format -msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" +msgid "" +"Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(object_path)s和%(quar_path)s因为非目录" -#: swift/obj/diskfile.py:936 swift/obj/diskfile.py:1795 #, python-format msgid "Problem cleaning up %s" msgstr "问题清除%s" -#: swift/obj/diskfile.py:1253 #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s" -#: swift/obj/diskfile.py:1543 #, python-format msgid "" -"Client path %(client)s does not match path stored in object metadata " -"%(meta)s" +"Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符" -#: swift/obj/diskfile.py:1797 -#, python-format -msgid "Problem fsyncing durable state file: %s" -msgstr "" - -#: swift/obj/diskfile.py:1802 -#, python-format -msgid "No space left on device for %s" -msgstr "" - -#: swift/obj/diskfile.py:1806 -#, python-format -msgid "Problem writing durable state file: %s" -msgstr "" - -#: swift/obj/expirer.py:79 #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "%ds通过完成; %d对象过期" -#: swift/obj/expirer.py:86 #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "%ds目前通过;%d对象过期" -#: swift/obj/expirer.py:170 #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "开始通过;%s可能容器;%s可能对象" -#: swift/obj/expirer.py:196 #, python-format msgid "Exception while deleting container %s %s" msgstr "执行删除容器时出现异常 %s %s" -#: swift/obj/expirer.py:201 swift/obj/expirer.py:218 msgid "Unhandled exception" msgstr "未处理的异常" -#: swift/obj/expirer.py:268 #, python-format msgid "Exception while deleting object %s %s %s" msgstr "执行删除对象时发生异常%s %s %s" -#: swift/obj/reconstructor.py:189 swift/obj/reconstructor.py:472 -#, python-format -msgid "Invalid response %(resp)s from %(full_path)s" -msgstr "" - -#: swift/obj/reconstructor.py:195 -#, python-format -msgid "Trying to GET %(full_path)s" -msgstr "" - -#: swift/obj/reconstructor.py:301 -#, python-format -msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" -msgstr "" - -#: swift/obj/reconstructor.py:324 #, python-format msgid "" -"%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed" -" in %(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" -msgstr "" - -#: swift/obj/reconstructor.py:337 swift/obj/replicator.py:419 -#, python-format -msgid "" -"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " -"synced" +"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced" -#: swift/obj/reconstructor.py:344 swift/obj/replicator.py:426 #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs" -#: swift/obj/reconstructor.py:352 -#, python-format -msgid "Nothing reconstructed for %s seconds." -msgstr "" - -#: swift/obj/reconstructor.py:381 swift/obj/replicator.py:463 msgid "Lockup detected.. killing live coros." msgstr "检测到lockup。终止正在执行的coros" -#: swift/obj/reconstructor.py:442 -#, python-format -msgid "Trying to sync suffixes with %s" -msgstr "" - -#: swift/obj/reconstructor.py:467 -#, python-format -msgid "%s responded as unmounted" -msgstr "" - -#: swift/obj/reconstructor.py:849 swift/obj/replicator.py:295 #, python-format msgid "Removing partition: %s" msgstr "移除分区:%s" -#: swift/obj/reconstructor.py:865 -msgid "Ring change detected. Aborting current reconstruction pass." -msgstr "" - -#: swift/obj/reconstructor.py:884 -msgid "Exception in top-levelreconstruction loop" -msgstr "" - -#: swift/obj/reconstructor.py:894 -msgid "Running object reconstructor in script mode." -msgstr "" - -#: swift/obj/reconstructor.py:903 -#, python-format -msgid "Object reconstruction complete (once). (%.02f minutes)" -msgstr "" - -#: swift/obj/reconstructor.py:910 -msgid "Starting object reconstructor in daemon mode." -msgstr "" - -#: swift/obj/reconstructor.py:914 -msgid "Starting object reconstruction pass." -msgstr "" - -#: swift/obj/reconstructor.py:919 -#, python-format -msgid "Object reconstruction complete. (%.02f minutes)" -msgstr "" - -#: swift/obj/replicator.py:139 #, python-format msgid "Killing long-running rsync: %s" msgstr "终止long-running同步: %s" -#: swift/obj/replicator.py:153 #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Bad rsync返还代码:%(ret)d <- %(args)s" -#: swift/obj/replicator.py:160 swift/obj/replicator.py:164 #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)" -#: swift/obj/replicator.py:281 -#, python-format -msgid "Removing %s objects" -msgstr "" - -#: swift/obj/replicator.py:289 msgid "Error syncing handoff partition" msgstr "执行同步切换分区时发生错误" -#: swift/obj/replicator.py:351 #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s的回应为未挂载" -#: swift/obj/replicator.py:356 #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "无效的回应%(resp)s来自%(ip)s" -#: swift/obj/replicator.py:391 #, python-format msgid "Error syncing with node: %s" msgstr "执行同步时节点%s发生错误" -#: swift/obj/replicator.py:395 msgid "Error syncing partition" msgstr "执行同步分区时发生错误" -#: swift/obj/replicator.py:408 #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " @@ -1073,256 +751,177 @@ msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n" "\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" -#: swift/obj/replicator.py:434 #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s秒无复制" -#: swift/obj/replicator.py:578 msgid "Ring change detected. Aborting current replication pass." msgstr "Ring改变被检测到。退出现有的复制通过" -#: swift/obj/replicator.py:599 msgid "Exception in top-level replication loop" msgstr "top-level复制圈出现异常" -#: swift/obj/replicator.py:608 msgid "Running object replicator in script mode." msgstr "在加密模式下执行对象复制" -#: swift/obj/replicator.py:626 #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "对象复制完成(一次)。(%.02f minutes)" -#: swift/obj/replicator.py:633 msgid "Starting object replicator in daemon mode." msgstr "在守护模式下开始对象复制" -#: swift/obj/replicator.py:637 msgid "Starting object replication pass." msgstr "开始通过对象复制" -#: swift/obj/replicator.py:642 #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "对象复制完成。(%.02f minutes)" -#: swift/obj/server.py:231 #, python-format msgid "" -"ERROR Container update failed (saving for async update later): %(status)d" -" response from %(ip)s:%(port)s/%(dev)s" -msgstr "错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/%(dev)s" +"ERROR Container update failed (saving for async update later): %(status)d " +"response from %(ip)s:%(port)s/%(dev)s" +msgstr "" +"错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/" +"%(dev)s" -#: swift/obj/server.py:238 #, python-format msgid "" -"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for " -"async update later)" +"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " +"update later)" msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在保存 稍后同步更新)" -#: swift/obj/server.py:273 #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "错误 容器更新失败:主机数量和设备数量不符合请求: \"%s\" vs \"%s\"" -#: swift/obj/updater.py:63 -#, python-format -msgid "ERROR: Unable to access %(path)s: %(error)s" -msgstr "" - -#: swift/obj/updater.py:78 msgid "Begin object update sweep" msgstr "开始对象更新扫除" -#: swift/obj/updater.py:104 #, python-format msgid "" -"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s" -" successes, %(fail)s failures" -msgstr "%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败" +"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " +"successes, %(fail)s failures" +msgstr "" +"%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败" -#: swift/obj/updater.py:113 #, python-format msgid "Object update sweep completed: %.02fs" msgstr "对象更新扫除完成:%.02fs" -#: swift/obj/updater.py:122 msgid "Begin object update single threaded sweep" msgstr "开始对象更新单线程扫除" -#: swift/obj/updater.py:136 #, python-format msgid "" -"Object update single threaded sweep completed: %(elapsed).02fs, " -"%(success)s successes, %(fail)s failures" -msgstr "对象更新单线程扫除完成:%(elapsed).02fs,%(success)s 成功, %(fail)s 失败" +"Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " +"successes, %(fail)s failures" +msgstr "" +"对象更新单线程扫除完成:%(elapsed).02fs,%(success)s 成功, %(fail)s 失败" -#: swift/obj/updater.py:179 #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "执行同步等待文件 文件名不可知%s" -#: swift/obj/updater.py:209 #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "错误 Pickle问题 隔离%s" -#: swift/obj/updater.py:274 #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "远程服务器发生错误 %(ip)s:%(port)s/%(device)s" -#: swift/proxy/server.py:405 msgid "ERROR Unhandled exception in request" msgstr "错误 未处理的异常发出请求" -#: swift/proxy/server.py:460 #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)" -#: swift/proxy/server.py:477 swift/proxy/server.py:495 #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" -#: swift/proxy/server.py:571 #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)s服务器发生错误 %(ip)s:%(port)s/%(device)s re: %(info)s" -#: swift/proxy/controllers/account.py:64 msgid "Account" msgstr "账号" -#: swift/proxy/controllers/base.py:752 swift/proxy/controllers/base.py:814 -#: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:411 -#: swift/proxy/controllers/obj.py:427 swift/proxy/controllers/obj.py:643 -#: swift/proxy/controllers/obj.py:1130 swift/proxy/controllers/obj.py:1591 -#: swift/proxy/controllers/obj.py:1763 swift/proxy/controllers/obj.py:1908 -#: swift/proxy/controllers/obj.py:2093 msgid "Object" msgstr "对象" -#: swift/proxy/controllers/base.py:753 msgid "Trying to read during GET (retrying)" msgstr "执行GET时尝试读取(重新尝试)" -#: swift/proxy/controllers/base.py:815 msgid "Trying to read during GET" msgstr "执行GET时尝试读取" -#: swift/proxy/controllers/base.py:819 #, python-format msgid "Client did not read from proxy within %ss" msgstr "客户尚未从代理处读取%ss" -#: swift/proxy/controllers/base.py:824 msgid "Client disconnected on read" msgstr "客户读取时中断" -#: swift/proxy/controllers/base.py:826 msgid "Trying to send to client" msgstr "尝试发送到客户端" -#: swift/proxy/controllers/base.py:863 swift/proxy/controllers/base.py:1141 #, python-format msgid "Trying to %(method)s %(path)s" msgstr "尝试执行%(method)s %(path)s" -#: swift/proxy/controllers/base.py:902 swift/proxy/controllers/base.py:1129 -#: swift/proxy/controllers/obj.py:402 swift/proxy/controllers/obj.py:450 -#: swift/proxy/controllers/obj.py:1900 swift/proxy/controllers/obj.py:2138 msgid "ERROR Insufficient Storage" msgstr "错误 存储空间不足" -#: swift/proxy/controllers/base.py:905 #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器" -#: swift/proxy/controllers/base.py:1132 -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "" - -#: swift/proxy/controllers/base.py:1260 #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s 返回 503 在 %(statuses)s" -#: swift/proxy/controllers/container.py:98 swift/proxy/controllers/obj.py:161 msgid "Container" msgstr "容器" -#: swift/proxy/controllers/obj.py:365 swift/proxy/controllers/obj.py:1592 #, python-format msgid "Trying to write to %s" msgstr "尝试执行书写%s" -#: swift/proxy/controllers/obj.py:406 swift/proxy/controllers/obj.py:1903 -#, python-format -msgid "ERROR %(status)d Expect: 100-continue From Object Server" -msgstr "" - -#: swift/proxy/controllers/obj.py:412 swift/proxy/controllers/obj.py:1909 #, python-format msgid "Expect: 100-continue on %s" msgstr "已知:100-continue on %s" -#: swift/proxy/controllers/obj.py:428 #, python-format msgid "Trying to get final status of PUT to %s" msgstr "尝试执行获取最后的PUT状态%s" -#: swift/proxy/controllers/obj.py:454 swift/proxy/controllers/obj.py:2143 #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s" -#: swift/proxy/controllers/obj.py:716 #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "对象PUT返还 412,%(statuses)r " -#: swift/proxy/controllers/obj.py:725 -#, python-format -msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" -msgstr "" - -#: swift/proxy/controllers/obj.py:811 swift/proxy/controllers/obj.py:2048 #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "错误 客户读取超时(%ss)" -#: swift/proxy/controllers/obj.py:818 swift/proxy/controllers/obj.py:2055 msgid "ERROR Exception causing client disconnect" msgstr "错误 异常导致客户端中断连接" -#: swift/proxy/controllers/obj.py:823 swift/proxy/controllers/obj.py:2060 msgid "Client disconnected without sending enough data" msgstr "客户中断 尚未发送足够" -#: swift/proxy/controllers/obj.py:869 #, python-format msgid "Object servers returned %s mismatched etags" msgstr "对象服务器返还%s不匹配etags" -#: swift/proxy/controllers/obj.py:873 swift/proxy/controllers/obj.py:2218 msgid "Object PUT" msgstr "对象上传" - -#: swift/proxy/controllers/obj.py:2035 -#, python-format -msgid "Not enough object servers ack'ed (got %d)" -msgstr "" - -#: swift/proxy/controllers/obj.py:2094 -#, python-format -msgid "Trying to get %s status of PUT to %s" -msgstr "" - From 55dd705a863c4500330cbd2b8c2fec46d618dc71 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Wed, 6 May 2015 19:53:09 +0200 Subject: [PATCH 24/98] Add missing statsd metrics section for object-reconstructor Change-Id: Id3f98e5f637ff537a387262b40f21c05876fca91 --- doc/source/admin_guide.rst | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index 5b7a02850a..50eb9bd5e6 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -896,6 +896,31 @@ Metric Name Description including ones resulting in an error. ======================== ==================================================== +Metrics for `object-reconstructor`: + +====================================================== ====================================================== +Metric Name Description +------------------------------------------------------ ------------------------------------------------------ +`object-reconstructor.partition.delete.count.` A count of partitions on which were + reconstructed and synced to another node because they + didn't belong on this node. This metric is tracked + per-device to allow for "quiescence detection" for + object reconstruction activity on each device. +`object-reconstructor.partition.delete.timing` Timing data for partitions reconstructed and synced to + another node because they didn't belong on this node. + This metric is not tracked per device. +`object-reconstructor.partition.update.count.` A count of partitions on which were + reconstructed and synced to another node, but also + belong on this node. As with delete.count, this metric + is tracked per-device. +`object-reconstructor.partition.update.timing` Timing data for partitions reconstructed which also + belong on this node. This metric is not tracked + per-device. +`object-reconstructor.suffix.hashes` Count of suffix directories whose hash (of filenames) + was recalculated. +`object-reconstructor.suffix.syncs` Count of suffix directories reconstructed with ssync. +====================================================== ====================================================== + Metrics for `object-replicator`: =================================================== ==================================================== From 1faad248f833735585aa8f6135babceb46fbb6f8 Mon Sep 17 00:00:00 2001 From: Emmanuel Cazenave Date: Tue, 5 May 2015 12:31:22 +0200 Subject: [PATCH 25/98] X-Auth-Token should be a bytestring. Change-Id: I2aa941d74883e17e9548b0144a4a2e2db33aba95 Closes-Bug: 1451773 --- test/functional/swift_test_client.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index f68dc035f0..4d77bcced0 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -181,7 +181,11 @@ class Connection(object): self.storage_url = str('/%s/%s' % (x[3], x[4])) self.account_name = str(x[4]) self.auth_user = auth_user - self.storage_token = storage_token + # With v2 keystone, storage_token is unicode. + # We want it to be string otherwise this would cause + # troubles when doing query with already encoded + # non ascii characters in its headers. + self.storage_token = str(storage_token) self.user_acl = '%s:%s' % (self.account, self.username) self.http_connect() From 0b20a18e5216b991855c461e41e9ef32e17fadb4 Mon Sep 17 00:00:00 2001 From: Pete Zaitcev Date: Thu, 7 May 2015 13:16:15 -0600 Subject: [PATCH 26/98] Spell "rebalance" right in swift-ring-builder.1 See Red Hat bug #1218269. Change-Id: I814eb4b3c0821f5a8df5feea2bda3a964aace536 --- doc/manpages/swift-ring-builder.1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manpages/swift-ring-builder.1 b/doc/manpages/swift-ring-builder.1 index 46516d5c4e..6bff7e8e67 100644 --- a/doc/manpages/swift-ring-builder.1 +++ b/doc/manpages/swift-ring-builder.1 @@ -142,7 +142,7 @@ could take a while to run. .RE -.IP "\fBrebalence\fR" +.IP "\fBrebalance\fR" .RS 5 Attempts to rebalance the ring by reassigning partitions that haven't been recently reassigned. .RE From 664a632c01f8c5c80826b223b9ade774bfe2ed9a Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Fri, 8 May 2015 08:41:39 +0200 Subject: [PATCH 27/98] Update my mailmap entry Change-Id: I5d21a55d0fa4cab6eaa6ff426819aa1dc997de2f --- .mailmap | 3 ++- AUTHORS | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index c45be7c95f..172c761c06 100644 --- a/.mailmap +++ b/.mailmap @@ -51,7 +51,8 @@ Tom Fifield Tom Fifield Sascha Peilicke Sascha Peilicke Zhenguo Niu Peter Portante -Christian Schwede +Christian Schwede +Christian Schwede Constantine Peresypkin Madhuri Kumari madhuri Morgan Fainberg diff --git a/AUTHORS b/AUTHORS index fa2cee7458..ce680dae15 100644 --- a/AUTHORS +++ b/AUTHORS @@ -172,7 +172,7 @@ Brent Roskos (broskos@internap.com) Shilla Saebi (shilla.saebi@gmail.com) Cristian A Sanchez (cristian.a.sanchez@intel.com) Sarvesh Ranjan (saranjan@cisco.com) -Christian Schwede (christian.schwede@enovance.com) +Christian Schwede (cschwede@redhat.com) Mark Seger (Mark.Seger@hp.com) Andrew Clay Shafer (acs@parvuscaptus.com) Mitsuhiro SHIGEMATSU (shigematsu.mitsuhiro@lab.ntt.co.jp) From 90b84d3a699811a99c97ebbe4f71a14d2f76a0e5 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 8 May 2015 11:45:12 -0700 Subject: [PATCH 28/98] Properly re-raise exceptions in proxy_logging Previously, this could encounter TypeErrors, presumably because sys.exc_clear() was called somewhere in the block of code between catching the exception and re-raising. Related-Bug: 1181146 Change-Id: Iadeea3f61e70bf83dc0eb063fdb27edd16f3ca32 --- swift/common/middleware/proxy_logging.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/swift/common/middleware/proxy_logging.py b/swift/common/middleware/proxy_logging.py index 66487502d5..968af2dd71 100644 --- a/swift/common/middleware/proxy_logging.py +++ b/swift/common/middleware/proxy_logging.py @@ -71,6 +71,7 @@ if this is a middleware subrequest or not. A log processor calculating bandwidth usage will want to only sum up logs with no swift.source. """ +import sys import time from urllib import quote, unquote @@ -296,12 +297,13 @@ class ProxyLoggingMiddleware(object): try: iterable = self.app(env, my_start_response) except Exception: + exc_type, exc_value, exc_traceback = sys.exc_info() req = Request(env) status_int = status_int_for_logging(start_status=500) self.log_request( req, status_int, input_proxy.bytes_received, 0, start_time, time.time()) - raise + raise exc_type, exc_value, exc_traceback else: return iter_response(iterable) From 29f4393d88426fd6c34f2cfe43a8c434bfad8d47 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Fri, 8 May 2015 15:55:14 -0700 Subject: [PATCH 29/98] Remove workaround for old eventlet version Swift now requires eventlet >= 0.16.1, so we can get rid of this workaround for a bug in eventlet 0.9.16. Change-Id: I4a1200b9bd9266896a704a840fda0d1b720bc86d --- swift/common/memcached.py | 35 ++---------------- test/unit/common/test_memcached.py | 58 ------------------------------ 2 files changed, 2 insertions(+), 91 deletions(-) diff --git a/swift/common/memcached.py b/swift/common/memcached.py index 8a2cfa19ca..2e1ed4c08d 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -50,11 +50,10 @@ import time from bisect import bisect from swift import gettext_ as _ from hashlib import md5 -from distutils.version import StrictVersion from eventlet.green import socket from eventlet.pools import Pool -from eventlet import Timeout, __version__ as eventlet_version +from eventlet import Timeout from swift.common.utils import json @@ -107,14 +106,6 @@ class MemcacheConnPool(Pool): Pool.__init__(self, max_size=size) self.server = server self._connect_timeout = connect_timeout - self._parent_class_getter = super(MemcacheConnPool, self).get - try: - # call the patched .get() if eventlet is older than 0.9.17 - if StrictVersion(eventlet_version) < StrictVersion('0.9.17'): - self._parent_class_getter = self._upstream_fixed_get - except ValueError: - # "invalid" version number or otherwise error parsing version - pass def create(self): if ':' in self.server: @@ -129,34 +120,12 @@ class MemcacheConnPool(Pool): return (sock.makefile(), sock) def get(self): - fp, sock = self._parent_class_getter() + fp, sock = super(MemcacheConnPool, self).get() if fp is None: # An error happened previously, so we need a new connection fp, sock = self.create() return fp, sock - # The following method is from eventlet post 0.9.16. This version - # properly keeps track of pool size accounting, and therefore doesn't - # let the pool grow without bound. This patched version is the result - # of commit f5e5b2bda7b442f0262ee1084deefcc5a1cc0694 in eventlet and is - # documented at https://bitbucket.org/eventlet/eventlet/issue/91 - def _upstream_fixed_get(self): - """Return an item from the pool, when one is available. This may - cause the calling greenthread to block. - """ - if self.free_items: - return self.free_items.popleft() - self.current_size += 1 - if self.current_size <= self.max_size: - try: - created = self.create() - except: # noqa - self.current_size -= 1 - raise - return created - self.current_size -= 1 # did not create - return self.channel.get() - class MemcacheRing(object): """ diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py index f3df46c404..cd251f15d0 100644 --- a/test/unit/common/test_memcached.py +++ b/test/unit/common/test_memcached.py @@ -410,64 +410,6 @@ class TestMemcached(unittest.TestCase): connections.get_nowait() self.assertTrue(connections.empty()) - # Ensure we exercise the backported-for-pre-eventlet-version-0.9.17 get() - # code, even if the executing eventlet's version is already newer. - @patch.object(memcached, 'eventlet_version', '0.9.16') - def test_connection_pooling_pre_0_9_17(self): - with patch('swift.common.memcached.socket') as mock_module: - connected = [] - count = [0] - - def _slow_yielding_connector(addr): - count[0] += 1 - if count[0] % 3 == 0: - raise ValueError('whoops!') - sleep(0.1) - connected.append(addr) - - mock_module.socket.return_value.connect.side_effect = \ - _slow_yielding_connector - - # If POOL_SIZE is not small enough relative to USER_COUNT, the - # "free_items" business in the eventlet.pools.Pool will cause - # spurious failures below. I found these values to work well on a - # VM running in VirtualBox on a late 2013 Retina MacbookPro: - POOL_SIZE = 5 - USER_COUNT = 50 - - pool = memcached.MemcacheConnPool('1.2.3.4:11211', size=POOL_SIZE, - connect_timeout=10) - self.assertEqual(POOL_SIZE, pool.max_size) - - def _user(): - got = None - while not got: - try: - got = pool.get() - except: # noqa - pass - pool.put(got) - - # make a bunch of requests "at the same time" - p = GreenPool() - for i in range(USER_COUNT): - p.spawn(_user) - p.waitall() - - # If the except block after the "created = self.create()" call - # doesn't correctly decrement self.current_size, this test will - # fail by having some number less than POOL_SIZE connections (in my - # testing, anyway). - self.assertEqual(POOL_SIZE, len(connected)) - - # Subsequent requests should get and use the existing - # connections, not creating any more. - for i in range(USER_COUNT): - p.spawn(_user) - p.waitall() - - self.assertEqual(POOL_SIZE, len(connected)) - def test_connection_pool_timeout(self): orig_conn_pool = memcached.MemcacheConnPool try: From 518262ab6ecd8faa2b915df118ffc70a30112a18 Mon Sep 17 00:00:00 2001 From: paul luse Date: Tue, 12 May 2015 15:21:13 -0700 Subject: [PATCH 30/98] Remove 1 line of dead code from EC reconstructor Assuming nobody intentionally left this in here for some reason... Change-Id: I4bf43bb3828e062c0342557243076ed62d6790f4 --- swift/obj/reconstructor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 4385e42cc9..2dd743fa9a 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -193,7 +193,6 @@ class ObjectReconstructor(Daemon): :returns: response """ resp = None - headers['X-Backend-Node-Index'] = node['index'] try: with ConnectionTimeout(self.conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], From 98b725fec639c5501c645ce4e4dc9d12c686f91d Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Fri, 1 May 2015 13:02:29 +0100 Subject: [PATCH 31/98] Cleanup and extend end to end ssync tests Extends the existing end to end ssync tests with a test using replication policy. Also some cleanup and improvements to the test framework e.g. rather than faking the connection between sender and receiver, use a real connection and wrap it to capture traffic for verification. Change-Id: Id71d2eb3fb8fa15c016ef151aacf95f97196a902 --- test/unit/obj/test_ssync_sender.py | 340 ++++++++++++++++++++--------- 1 file changed, 235 insertions(+), 105 deletions(-) diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py index 42bd610eb6..fa38b658b2 100644 --- a/test/unit/obj/test_ssync_sender.py +++ b/test/unit/obj/test_ssync_sender.py @@ -29,9 +29,8 @@ from swift.common import exceptions, utils from swift.common.storage_policy import POLICIES from swift.common.exceptions import DiskFileNotExist, DiskFileError, \ DiskFileDeleted -from swift.common.swob import Request -from swift.common.utils import Timestamp, FileLikeIter -from swift.obj import ssync_sender, diskfile, server, ssync_receiver +from swift.common.utils import Timestamp +from swift.obj import ssync_sender, diskfile, server from swift.obj.reconstructor import RebuildingECDiskFileStream from test.unit import debug_logger, patch_policies @@ -1245,67 +1244,52 @@ class TestSender(BaseTestSender): self.assertTrue(self.sender.connection.closed) -@patch_policies(with_ec_default=True) -class TestSsync(BaseTestSender): +class TestBaseSsync(BaseTestSender): """ - Test interactions between sender and receiver. The basis for each test is - actual diskfile state on either side - the connection between sender and - receiver is faked. Assertions are made about the final state of the sender - and receiver diskfiles. + Provides a framework to test end to end interactions between sender and + receiver. The basis for each test is actual diskfile state on either side. + The connection between sender and receiver is wrapped to capture ssync + traffic for subsequent verification of the protocol. Assertions are made + about the final state of the sender and receiver diskfiles. """ - def make_fake_ssync_connect(self, sender, rx_obj_controller, device, - partition, policy): - trace = [] + def make_connect_wrapper(self, sender): + """ + Make a wrapper function for the ssync_sender.Sender.connect() method + that will in turn wrap the HTTConnection.send() and the + Sender.readline() so that ssync protocol messages can be captured. + """ + orig_connect = sender.connect + trace = dict(messages=[]) def add_trace(type, msg): # record a protocol event for later analysis if msg.strip(): - trace.append((type, msg.strip())) + trace['messages'].append((type, msg.strip())) - def start_response(status, headers, exc_info=None): - assert(status == '200 OK') + def make_send_wrapper(send): + def wrapped_send(msg): + _msg = msg.split('\r\n', 1)[1] + _msg = _msg.rsplit('\r\n', 1)[0] + add_trace('tx', _msg) + send(msg) + return wrapped_send - class FakeConnection: - def __init__(self, trace): - self.trace = trace - self.queue = [] - self.src = FileLikeIter(self.queue) + def make_readline_wrapper(readline): + def wrapped_readline(): + data = readline() + add_trace('rx', data) + bytes_read = trace.setdefault('readline_bytes', 0) + trace['readline_bytes'] = bytes_read + len(data) + return data + return wrapped_readline - def send(self, msg): - msg = msg.split('\r\n', 1)[1] - msg = msg.rsplit('\r\n', 1)[0] - add_trace('tx', msg) - self.queue.append(msg) - - def close(self): - pass - - def wrap_gen(gen): - # Strip response head and tail - while True: - try: - msg = gen.next() - if msg: - add_trace('rx', msg) - msg = '%x\r\n%s\r\n' % (len(msg), msg) - yield msg - except StopIteration: - break - - def fake_connect(): - sender.connection = FakeConnection(trace) - headers = {'Transfer-Encoding': 'chunked', - 'X-Backend-Storage-Policy-Index': str(int(policy))} - env = {'REQUEST_METHOD': 'SSYNC'} - path = '/%s/%s' % (device, partition) - req = Request.blank(path, environ=env, headers=headers) - req.environ['wsgi.input'] = sender.connection.src - resp = rx_obj_controller(req.environ, start_response) - wrapped_gen = wrap_gen(resp) - sender.response = FileLikeIter(wrapped_gen) - sender.response.fp = sender.response - return fake_connect + def wrapped_connect(): + orig_connect() + sender.connection.send = make_send_wrapper( + sender.connection.send) + sender.readline = make_readline_wrapper(sender.readline) + return wrapped_connect, trace def setUp(self): self.device = 'dev' @@ -1325,19 +1309,24 @@ class TestSsync(BaseTestSender): 'replication_one_per_device': 'false', 'log_requests': 'false'} self.rx_controller = server.ObjectController(conf) - self.orig_ensure_flush = ssync_receiver.Receiver._ensure_flush - ssync_receiver.Receiver._ensure_flush = lambda *args: '' self.ts_iter = (Timestamp(t) for t in itertools.count(int(time.time()))) + self.rx_ip = '127.0.0.1' + sock = eventlet.listen((self.rx_ip, 0)) + self.rx_server = eventlet.spawn( + eventlet.wsgi.server, sock, self.rx_controller, utils.NullLogger()) + self.rx_port = sock.getsockname()[1] + self.rx_node = {'replication_ip': self.rx_ip, + 'replication_port': self.rx_port, + 'device': self.device} def tearDown(self): - if self.orig_ensure_flush: - ssync_receiver.Receiver._ensure_flush = self.orig_ensure_flush + self.rx_server.kill() shutil.rmtree(self.tmpdir, ignore_errors=True) def _create_ondisk_files(self, df_mgr, obj_name, policy, timestamp, frag_indexes=None): - frag_indexes = [] if frag_indexes is None else frag_indexes + frag_indexes = [None] if frag_indexes is None else frag_indexes metadata = {'Content-Type': 'plain/text'} diskfiles = [] for frag_index in frag_indexes: @@ -1372,22 +1361,28 @@ class TestSsync(BaseTestSender): df.open() return df - def _verify_diskfile_sync(self, tx_df, rx_df, frag_index): + def _verify_diskfile_sync(self, tx_df, rx_df, frag_index, same_etag=False): # verify that diskfiles' metadata match # sanity check, they are not the same ondisk files! self.assertNotEqual(tx_df._datadir, rx_df._datadir) rx_metadata = dict(rx_df.get_metadata()) for k, v in tx_df.get_metadata().iteritems(): - self.assertEqual(v, rx_metadata.pop(k)) + if k == 'X-Object-Sysmeta-Ec-Frag-Index': + # if tx_df had a frag_index then rx_df should also have one + self.assertTrue(k in rx_metadata) + self.assertEqual(frag_index, int(rx_metadata.pop(k))) + elif k == 'ETag' and not same_etag: + self.assertNotEqual(v, rx_metadata.pop(k, None)) + continue + else: + self.assertEqual(v, rx_metadata.pop(k), k) # ugh, ssync duplicates ETag with Etag so have to clear it out here if 'Etag' in rx_metadata: rx_metadata.pop('Etag') self.assertFalse(rx_metadata) - if frag_index: - rx_metadata = rx_df.get_metadata() - fi_key = 'X-Object-Sysmeta-Ec-Frag-Index' - self.assertTrue(fi_key in rx_metadata) - self.assertEqual(frag_index, int(rx_metadata[fi_key])) + expected_body = '%s___%s' % (tx_df._name, frag_index) + actual_body = ''.join([chunk for chunk in rx_df.reader()]) + self.assertEqual(expected_body, actual_body) def _analyze_trace(self, trace): """ @@ -1445,7 +1440,7 @@ class TestSsync(BaseTestSender): phases = ('tx_missing', 'rx_missing', 'tx_updates', 'rx_updates') results = dict((k, []) for k in phases) handler = unexpected - lines = list(trace) + lines = list(trace.get('messages', [])) lines.reverse() while lines: line = lines.pop() @@ -1471,27 +1466,35 @@ class TestSsync(BaseTestSender): 'Message outside of a phase: %s' % results.get(None)) return results - def _verify_ondisk_files(self, tx_objs, policy, rx_node_index): - # verify tx and rx files that should be in sync + def _verify_ondisk_files(self, tx_objs, policy, tx_frag_index=None, + rx_frag_index=None): + """ + Verify tx and rx files that should be in sync. + :param tx_objs: sender diskfiles + :param policy: storage policy instance + :param tx_frag_index: the fragment index of tx diskfiles that should + have been used as a source for sync'ing + :param rx_frag_index: the fragment index of expected rx diskfiles + """ for o_name, diskfiles in tx_objs.iteritems(): for tx_df in diskfiles: - frag_index = tx_df._frag_index - if frag_index == rx_node_index: - # this frag_index should have been sync'd, + if tx_frag_index is None or tx_df._frag_index == tx_frag_index: + # this diskfile should have been sync'd, # check rx file is ok - rx_df = self._open_rx_diskfile(o_name, policy, frag_index) - self._verify_diskfile_sync(tx_df, rx_df, frag_index) - expected_body = '/a/c/%s___%s' % (o_name, rx_node_index) - actual_body = ''.join([chunk for chunk in rx_df.reader()]) - self.assertEqual(expected_body, actual_body) + rx_df = self._open_rx_diskfile( + o_name, policy, rx_frag_index) + # for EC revert job or replication etags should match + match_etag = (tx_frag_index == rx_frag_index) + self._verify_diskfile_sync( + tx_df, rx_df, rx_frag_index, match_etag) else: - # this frag_index should not have been sync'd, + # this diskfile should not have been sync'd, # check no rx file, - self.assertRaises(DiskFileNotExist, - self._open_rx_diskfile, - o_name, policy, frag_index=frag_index) + self.assertRaises(DiskFileNotExist, self._open_rx_diskfile, + o_name, policy, + frag_index=tx_df._frag_index) # check tx file still intact - ssync does not do any cleanup! - self._open_tx_diskfile(o_name, policy, frag_index) + tx_df.open() def _verify_tombstones(self, tx_objs, policy): # verify tx and rx tombstones that should be in sync @@ -1509,13 +1512,17 @@ class TestSsync(BaseTestSender): rx_delete_time = exc.timestamp self.assertEqual(tx_delete_time, rx_delete_time) + +@patch_policies(with_ec_default=True) +class TestSsyncEC(TestBaseSsync): def test_handoff_fragment_revert(self): # test that a sync_revert type job does send the correct frag archives - # to the receiver, and that those frag archives are then removed from - # local node. + # to the receiver policy = POLICIES.default rx_node_index = 0 tx_node_index = 1 + # for a revert job we iterate over frag index that belongs on + # remote node frag_index = rx_node_index # create sender side diskfiles... @@ -1557,20 +1564,18 @@ class TestSsync(BaseTestSender): job = {'device': self.device, 'partition': self.partition, 'policy': policy, - 'frag_index': frag_index, - 'purge': True} - node = {'index': rx_node_index} - self.sender = ssync_sender.Sender(self.daemon, node, job, suffixes) - # fake connection from tx to rx... - self.sender.connect = self.make_fake_ssync_connect( - self.sender, self.rx_controller, self.device, self.partition, - policy) + 'frag_index': frag_index} + node = dict(self.rx_node) + node.update({'index': rx_node_index}) + sender = ssync_sender.Sender(self.daemon, node, job, suffixes) + # wrap connection from tx to rx to capture ssync messages... + sender.connect, trace = self.make_connect_wrapper(sender) # run the sync protocol... - self.sender() + sender() # verify protocol - results = self._analyze_trace(self.sender.connection.trace) + results = self._analyze_trace(trace) # sender has handoff frags for o1, o3 and o4 and ts for o5 self.assertEqual(4, len(results['tx_missing'])) # receiver is missing frags for o1, o3 and ts for o5 @@ -1591,7 +1596,8 @@ class TestSsync(BaseTestSender): self.assertEqual(['/a/c/o1', '/a/c/o3', '/a/c/o5'], sorted(sync_paths)) # verify on disk files... - self._verify_ondisk_files(tx_objs, policy, rx_node_index) + self._verify_ondisk_files( + tx_objs, policy, frag_index, rx_node_index) self._verify_tombstones(tx_tombstones, policy) def test_fragment_sync(self): @@ -1656,19 +1662,17 @@ class TestSsync(BaseTestSender): 'policy': policy, 'frag_index': frag_index, 'sync_diskfile_builder': fake_reconstruct_fa} - node = {'index': rx_node_index} - self.sender = ssync_sender.Sender(self.daemon, node, job, suffixes) - - # fake connection from tx to rx... - self.sender.connect = self.make_fake_ssync_connect( - self.sender, self.rx_controller, self.device, self.partition, - policy) + node = dict(self.rx_node) + node.update({'index': rx_node_index}) + sender = ssync_sender.Sender(self.daemon, node, job, suffixes) + # wrap connection from tx to rx to capture ssync messages... + sender.connect, trace = self.make_connect_wrapper(sender) # run the sync protocol... - self.sender() + sender() # verify protocol - results = self._analyze_trace(self.sender.connection.trace) + results = self._analyze_trace(trace) # sender has primary for o1, o2 and o3, o4 and ts for o5 self.assertEqual(5, len(results['tx_missing'])) # receiver is missing o1, o2 and o3 and ts for o5 @@ -1702,9 +1706,135 @@ class TestSsync(BaseTestSender): # verify on disk files... self.assertEqual(sorted(expect_sync_paths), sorted(actual_sync_paths)) - self._verify_ondisk_files(tx_objs, policy, rx_node_index) + self._verify_ondisk_files( + tx_objs, policy, frag_index, rx_node_index) self._verify_tombstones(tx_tombstones, policy) +@patch_policies +class TestSsyncReplication(TestBaseSsync): + def test_sync(self): + policy = POLICIES.default + rx_node_index = 0 + + # create sender side diskfiles... + tx_objs = {} + rx_objs = {} + tx_tombstones = {} + rx_tombstones = {} + tx_df_mgr = self.daemon._diskfile_router[policy] + rx_df_mgr = self.rx_controller._diskfile_router[policy] + # o1 and o2 are on tx only + t1 = self.ts_iter.next() + tx_objs['o1'] = self._create_ondisk_files(tx_df_mgr, 'o1', policy, t1) + t2 = self.ts_iter.next() + tx_objs['o2'] = self._create_ondisk_files(tx_df_mgr, 'o2', policy, t2) + # o3 is on tx and older copy on rx + t3a = self.ts_iter.next() + rx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3a) + t3b = self.ts_iter.next() + tx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3b) + # o4 in sync on rx and tx + t4 = self.ts_iter.next() + tx_objs['o4'] = self._create_ondisk_files(tx_df_mgr, 'o4', policy, t4) + rx_objs['o4'] = self._create_ondisk_files(rx_df_mgr, 'o4', policy, t4) + # o5 is a tombstone, missing on receiver + t5 = self.ts_iter.next() + tx_tombstones['o5'] = self._create_ondisk_files( + tx_df_mgr, 'o5', policy, t5) + tx_tombstones['o5'][0].delete(t5) + # o6 is a tombstone, in sync on tx and rx + t6 = self.ts_iter.next() + tx_tombstones['o6'] = self._create_ondisk_files( + tx_df_mgr, 'o6', policy, t6) + tx_tombstones['o6'][0].delete(t6) + rx_tombstones['o6'] = self._create_ondisk_files( + rx_df_mgr, 'o6', policy, t6) + rx_tombstones['o6'][0].delete(t6) + # o7 is a tombstone on tx, older data on rx + t7a = self.ts_iter.next() + rx_objs['o7'] = self._create_ondisk_files(rx_df_mgr, 'o7', policy, t7a) + t7b = self.ts_iter.next() + tx_tombstones['o7'] = self._create_ondisk_files( + tx_df_mgr, 'o7', policy, t7b) + tx_tombstones['o7'][0].delete(t7b) + + suffixes = set() + for diskfiles in (tx_objs.values() + tx_tombstones.values()): + for df in diskfiles: + suffixes.add(os.path.basename(os.path.dirname(df._datadir))) + + # create ssync sender instance... + job = {'device': self.device, + 'partition': self.partition, + 'policy': policy} + node = dict(self.rx_node) + node.update({'index': rx_node_index}) + sender = ssync_sender.Sender(self.daemon, node, job, suffixes) + # wrap connection from tx to rx to capture ssync messages... + sender.connect, trace = self.make_connect_wrapper(sender) + + # run the sync protocol... + success, in_sync_objs = sender() + + self.assertEqual(7, len(in_sync_objs)) + self.assertTrue(success) + + # verify protocol + results = self._analyze_trace(trace) + self.assertEqual(7, len(results['tx_missing'])) + self.assertEqual(5, len(results['rx_missing'])) + self.assertEqual(5, len(results['tx_updates'])) + self.assertFalse(results['rx_updates']) + sync_paths = [] + for subreq in results.get('tx_updates'): + if subreq.get('method') == 'PUT': + self.assertTrue( + subreq['path'] in ('/a/c/o1', '/a/c/o2', '/a/c/o3')) + expected_body = '%s___None' % subreq['path'] + self.assertEqual(expected_body, subreq['body']) + elif subreq.get('method') == 'DELETE': + self.assertTrue(subreq['path'] in ('/a/c/o5', '/a/c/o7')) + sync_paths.append(subreq.get('path')) + self.assertEqual( + ['/a/c/o1', '/a/c/o2', '/a/c/o3', '/a/c/o5', '/a/c/o7'], + sorted(sync_paths)) + + # verify on disk files... + self._verify_ondisk_files(tx_objs, policy) + self._verify_tombstones(tx_tombstones, policy) + + def test_nothing_to_sync(self): + job = {'device': self.device, + 'partition': self.partition, + 'policy': POLICIES.default} + node = {'replication_ip': self.rx_ip, + 'replication_port': self.rx_port, + 'device': self.device, + 'index': 0} + sender = ssync_sender.Sender(self.daemon, node, job, ['abc']) + # wrap connection from tx to rx to capture ssync messages... + sender.connect, trace = self.make_connect_wrapper(sender) + + result, in_sync_objs = sender() + + self.assertTrue(result) + self.assertFalse(in_sync_objs) + results = self._analyze_trace(trace) + self.assertFalse(results['tx_missing']) + self.assertFalse(results['rx_missing']) + self.assertFalse(results['tx_updates']) + self.assertFalse(results['rx_updates']) + # Minimal receiver response as read by sender: + # 2 * 4098 <-- _ensure_flush() twice + # + 23 <-- :MISSING CHECK START\r\n + # + 2 <-- \r\n (minimal missing check response) + # + 21 <-- :MISSING CHECK END\r\n + # + 17 <-- :UPDATES START\r\n + # + 15 <-- :UPDATES END\r\n + # TOTAL = 8274 + self.assertEqual(8274, trace.get('readline_bytes')) + + if __name__ == '__main__': unittest.main() From 025c4c4339e7ace2f5be8cb3a3cddf6c38ceff37 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Fri, 24 Apr 2015 02:15:36 -0700 Subject: [PATCH 32/98] Remove confusable query string on post as copy Current post as copy routine (i.e. POST object with post_as_copy option turned on) on Object Controller uses "multipart-manifest" query string which is feeded to env['copy_hook'] to decide which data (the manifest or object pointed by the manifest) should be copied. However, the way using the query string will confuse operators looking at logging system (or analyzing the log) because whole POST object requests have 'multipart-manifest=get' like as: POST /v1/AUTH_test/d4c816b24d38489082f5118599a67920/manifest-abcde%3Fmultipart-manifest%3Dget We cannot know whether the query string was added by hand (from user) or not. In addition, the query isn't needed by the backend conversation between proxy-server and object-server. (Just needed by "copy_hook" on the proxy controller!) To remove the confusable query string and to keep the log to be clean, this patch introduces new environment variable "swift.post_as_copy" and changes proxy controller and the copy_hook to use the new env. This item was originally discussed at https://review.openstack.org/#/c/177132/ Co-Authored-By: Alistair Coles Change-Id: I0cd37520eea1825a10ebd27ccdc7e9162647233e --- swift/common/middleware/slo.py | 3 +- swift/proxy/controllers/obj.py | 16 ++++---- test/functional/swift_test_client.py | 5 ++- test/functional/tests.py | 53 +++++++++++++++++++++++++ test/unit/proxy/controllers/test_obj.py | 25 +++++++++++- 5 files changed, 89 insertions(+), 13 deletions(-) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index d8df829981..6a6b8294b8 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -537,7 +537,8 @@ class StaticLargeObject(object): def slo_hook(source_req, source_resp, sink_req): x_slo = source_resp.headers.get('X-Static-Large-Object') if (config_true_value(x_slo) - and source_req.params.get('multipart-manifest') != 'get'): + and source_req.params.get('multipart-manifest') != 'get' + and 'swift.post_as_copy' not in source_req.environ): source_resp = SloGetContext(self).get_or_head_response( source_req, source_resp.headers.items(), source_resp.app_iter) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index a83242b5f0..5b7c00c4aa 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -268,12 +268,8 @@ class BaseObjectController(Controller): req.headers['Content-Length'] = 0 req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name, self.object_name)) - req.headers['X-Fresh-Metadata'] = 'true' + req.environ['swift.post_as_copy'] = True req.environ['swift_versioned_copy'] = True - if req.environ.get('QUERY_STRING'): - req.environ['QUERY_STRING'] += '&multipart-manifest=get' - else: - req.environ['QUERY_STRING'] = 'multipart-manifest=get' resp = self.PUT(req) # Older editions returned 202 Accepted on object POSTs, so we'll # convert any 201 Created responses to that for compatibility with @@ -577,8 +573,11 @@ class BaseObjectController(Controller): if not req.content_type_manually_set: sink_req.headers['Content-Type'] = \ source_resp.headers['Content-Type'] - if config_true_value( - sink_req.headers.get('x-fresh-metadata', 'false')): + + fresh_meta_flag = config_true_value( + sink_req.headers.get('x-fresh-metadata', 'false')) + + if fresh_meta_flag or 'swift.post_as_copy' in sink_req.environ: # post-as-copy: ignore new sysmeta, copy existing sysmeta condition = lambda k: is_sys_meta('object', k) remove_items(sink_req.headers, condition) @@ -590,7 +589,8 @@ class BaseObjectController(Controller): # copy over x-static-large-object for POSTs and manifest copies if 'X-Static-Large-Object' in source_resp.headers and \ - req.params.get('multipart-manifest') == 'get': + (req.params.get('multipart-manifest') == 'get' or + 'swift.post_as_copy' in req.environ): sink_req.headers['X-Static-Large-Object'] = \ source_resp.headers['X-Static-Large-Object'] diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 4d77bcced0..695ea202d7 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -851,7 +851,7 @@ class File(Base): finally: fobj.close() - def sync_metadata(self, metadata=None, cfg=None): + def sync_metadata(self, metadata=None, cfg=None, parms=None): if metadata is None: metadata = {} if cfg is None: @@ -868,7 +868,8 @@ class File(Base): else: headers['Content-Length'] = 0 - self.conn.make_request('POST', self.path, hdrs=headers, cfg=cfg) + self.conn.make_request('POST', self.path, hdrs=headers, + parms=parms, cfg=cfg) if self.conn.response.status not in (201, 202): raise ResponseError(self.conn.response, 'POST', diff --git a/test/functional/tests.py b/test/functional/tests.py index 3fbbdd784e..3f6f08b8d8 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -2151,6 +2151,7 @@ class TestSloEnv(object): 'manifest-bcd-submanifest')}, seg_info['seg_e']]), parms={'multipart-manifest': 'put'}) + cls.seg_info = seg_info class TestSlo(Base): @@ -2356,6 +2357,58 @@ class TestSlo(Base): except ValueError: self.fail("COPY didn't copy the manifest (invalid json on GET)") + def _make_manifest(self): + # To avoid the bug 1453807 on fast-post, make a new manifest + # for post test. + file_item = self.env.container.file("manifest-post") + seg_info = self.env.seg_info + file_item.write( + json.dumps([seg_info['seg_a'], seg_info['seg_b'], + seg_info['seg_c'], seg_info['seg_d'], + seg_info['seg_e']]), + parms={'multipart-manifest': 'put'}) + return file_item + + def test_slo_post_the_manifest_metadata_update(self): + file_item = self._make_manifest() + # sanity check, check the object is an SLO manifest + file_item.info() + file_item.header_fields([('slo', 'x-static-large-object')]) + + # POST a user metadata (i.e. x-object-meta-post) + file_item.sync_metadata({'post': 'update'}) + + updated = self.env.container.file("manifest-post") + updated.info() + updated.header_fields([('user-meta', 'x-object-meta-post')]) # sanity + updated_contents = updated.read(parms={'multipart-manifest': 'get'}) + try: + json.loads(updated_contents) + except ValueError: + self.fail("Unexpected content on GET, expected a json body") + + def test_slo_post_the_manifest_metadata_update_with_qs(self): + # multipart-manifest query should be ignored on post + for verb in ('put', 'get', 'delete'): + file_item = self._make_manifest() + # sanity check, check the object is an SLO manifest + file_item.info() + file_item.header_fields([('slo', 'x-static-large-object')]) + # POST a user metadata (i.e. x-object-meta-post) + file_item.sync_metadata(metadata={'post': 'update'}, + parms={'multipart-manifest': verb}) + updated = self.env.container.file("manifest-post") + updated.info() + updated.header_fields( + [('user-meta', 'x-object-meta-post')]) # sanity + updated_contents = updated.read( + parms={'multipart-manifest': 'get'}) + try: + json.loads(updated_contents) + except ValueError: + self.fail( + "Unexpected content on GET, expected a json body") + def test_slo_get_the_manifest(self): manifest = self.env.container.file("manifest-abcde") got_body = manifest.read(parms={'multipart-manifest': 'get'}) diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index a38e753ae0..b0e614a0dd 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -598,13 +598,31 @@ class TestReplicatedObjController(BaseObjectControllerMixin, def test_POST_as_COPY_simple(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST') - head_resp = [200] * self.obj_ring.replicas + \ + get_resp = [200] * self.obj_ring.replicas + \ [404] * self.obj_ring.max_more_nodes put_resp = [201] * self.obj_ring.replicas - codes = head_resp + put_resp + codes = get_resp + put_resp with set_http_connect(*codes): resp = req.get_response(self.app) self.assertEquals(resp.status_int, 202) + self.assertEquals(req.environ['QUERY_STRING'], '') + self.assertTrue('swift.post_as_copy' in req.environ) + + def test_POST_as_COPY_static_large_object(self): + req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST') + get_resp = [200] * self.obj_ring.replicas + \ + [404] * self.obj_ring.max_more_nodes + put_resp = [201] * self.obj_ring.replicas + codes = get_resp + put_resp + slo_headers = \ + [{'X-Static-Large-Object': True}] * self.obj_ring.replicas + get_headers = slo_headers + [{}] * (len(codes) - len(slo_headers)) + headers = {'headers': get_headers} + with set_http_connect(*codes, **headers): + resp = req.get_response(self.app) + self.assertEquals(resp.status_int, 202) + self.assertEquals(req.environ['QUERY_STRING'], '') + self.assertTrue('swift.post_as_copy' in req.environ) def test_POST_delete_at(self): t = str(int(time.time() + 100)) @@ -624,6 +642,9 @@ class TestReplicatedObjController(BaseObjectControllerMixin, with set_http_connect(*codes, give_connect=capture_headers): resp = req.get_response(self.app) self.assertEquals(resp.status_int, 200) + self.assertEquals(req.environ['QUERY_STRING'], '') # sanity + self.assertTrue('swift.post_as_copy' in req.environ) + for given_headers in post_headers: self.assertEquals(given_headers.get('X-Delete-At'), t) self.assertTrue('X-Delete-At-Host' in given_headers) From aa4866eb6014d7fbb8531eb5d927d5611ebfd06d Mon Sep 17 00:00:00 2001 From: Thiago da Silva Date: Wed, 13 May 2015 20:10:59 +0000 Subject: [PATCH 33/98] move replication code to ReplicatedObjectController Moving _connect_put_node, send_file, _transfer_data and _store_object methods to ReplicatedObjectController. Each one of these methods are specific to replication policy. EC policy implements their own version of them. Of these four methods, only _store_object and _connect_put_node are required to be implemented by the policy specific Object Controllers. Change-Id: Ifc72461b77dbfdaae9d63417f1286e8b5da3ca4e Signed-off-by: Thiago da Silva --- swift/proxy/controllers/obj.py | 380 ++++++++++++++++++--------------- 1 file changed, 210 insertions(+), 170 deletions(-) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index a83242b5f0..c61986e1dd 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -350,67 +350,6 @@ class BaseObjectController(Controller): return headers - def _send_file(self, conn, path): - """Method for a file PUT coro""" - while True: - chunk = conn.queue.get() - if not conn.failed: - try: - with ChunkWriteTimeout(self.app.node_timeout): - conn.send(chunk) - except (Exception, ChunkWriteTimeout): - conn.failed = True - self.app.exception_occurred( - conn.node, _('Object'), - _('Trying to write to %s') % path) - conn.queue.task_done() - - def _connect_put_node(self, nodes, part, path, headers, - logger_thread_locals): - """ - Make a connection for a replicated object. - - Connects to the first working node that it finds in node_iter - and sends over the request headers. Returns an HTTPConnection - object to handle the rest of the streaming. - """ - self.app.logger.thread_locals = logger_thread_locals - for node in nodes: - try: - start_time = time.time() - with ConnectionTimeout(self.app.conn_timeout): - conn = http_connect( - node['ip'], node['port'], node['device'], part, 'PUT', - path, headers) - self.app.set_node_timing(node, time.time() - start_time) - with Timeout(self.app.node_timeout): - resp = conn.getexpect() - if resp.status == HTTP_CONTINUE: - conn.resp = None - conn.node = node - return conn - elif is_success(resp.status) or resp.status == HTTP_CONFLICT: - conn.resp = resp - conn.node = node - return conn - elif headers['If-None-Match'] is not None and \ - resp.status == HTTP_PRECONDITION_FAILED: - conn.resp = resp - conn.node = node - return conn - elif resp.status == HTTP_INSUFFICIENT_STORAGE: - self.app.error_limit(node, _('ERROR Insufficient Storage')) - elif is_server_error(resp.status): - self.app.error_occurred( - node, - _('ERROR %(status)d Expect: 100-continue ' - 'From Object Server') % { - 'status': resp.status}) - except (Exception, Timeout): - self.app.exception_occurred( - node, _('Object'), - _('Expect: 100-continue on %s') % path) - def _await_response(self, conn, **kwargs): with Timeout(self.app.node_timeout): if conn.resp: @@ -730,6 +669,28 @@ class BaseObjectController(Controller): self._check_min_conn(req, conns, min_conns) + def _connect_put_node(self, nodes, part, path, headers, + logger_thread_locals): + """ + Make connection to storage nodes + + Connects to the first working node that it finds in nodes iter + and sends over the request headers. Returns an HTTPConnection + object to handle the rest of the streaming. + + This method must be implemented by each policy ObjectController. + + :param nodes: an iterator of the target storage nodes + :param partition: ring partition number + :param path: the object path to send to the storage node + :param headers: request headers + :param logger_thread_locals: The thread local values to be set on the + self.app.logger to retain transaction + logging information. + :return: HTTPConnection object + """ + raise NotImplementedError() + def _get_put_connections(self, req, nodes, partition, outgoing_headers, policy, expect): """ @@ -760,120 +721,23 @@ class BaseObjectController(Controller): {'conns': len(conns), 'nodes': min_conns}) raise HTTPServiceUnavailable(request=req) - def _transfer_data(self, req, data_source, conns, nodes): - """ - Transfer data for a replicated object. - - This method was added in the PUT method extraction change - """ - min_conns = quorum_size(len(nodes)) - bytes_transferred = 0 - try: - with ContextPool(len(nodes)) as pool: - for conn in conns: - conn.failed = False - conn.queue = Queue(self.app.put_queue_depth) - pool.spawn(self._send_file, conn, req.path) - while True: - with ChunkReadTimeout(self.app.client_timeout): - try: - chunk = next(data_source) - except StopIteration: - if req.is_chunked: - for conn in conns: - conn.queue.put('0\r\n\r\n') - break - bytes_transferred += len(chunk) - if bytes_transferred > constraints.MAX_FILE_SIZE: - raise HTTPRequestEntityTooLarge(request=req) - for conn in list(conns): - if not conn.failed: - conn.queue.put( - '%x\r\n%s\r\n' % (len(chunk), chunk) - if req.is_chunked else chunk) - else: - conn.close() - conns.remove(conn) - self._check_min_conn( - req, conns, min_conns, - msg='Object PUT exceptions during' - ' send, %(conns)s/%(nodes)s required connections') - for conn in conns: - if conn.queue.unfinished_tasks: - conn.queue.join() - conns = [conn for conn in conns if not conn.failed] - self._check_min_conn( - req, conns, min_conns, - msg='Object PUT exceptions after last send, ' - '%(conns)s/%(nodes)s required connections') - except ChunkReadTimeout as err: - self.app.logger.warn( - _('ERROR Client read timeout (%ss)'), err.seconds) - self.app.logger.increment('client_timeouts') - raise HTTPRequestTimeout(request=req) - except HTTPException: - raise - except (Exception, Timeout): - self.app.logger.exception( - _('ERROR Exception causing client disconnect')) - raise HTTPClientDisconnect(request=req) - if req.content_length and bytes_transferred < req.content_length: - req.client_disconnect = True - self.app.logger.warn( - _('Client disconnected without sending enough data')) - self.app.logger.increment('client_disconnects') - raise HTTPClientDisconnect(request=req) - def _store_object(self, req, data_source, nodes, partition, outgoing_headers): """ - Store a replicated object. - This method is responsible for establishing connection - with storage nodes and sending object to each one of those - nodes. After sending the data, the "best" response will be - returned based on statuses from all connections + with storage nodes and sending the data to each one of those + nodes. The process of transfering data is specific to each + Storage Policy, thus it is required for each policy specific + ObjectController to provide their own implementation of this method. + + :param req: the PUT Request + :param data_source: an iterator of the source of the data + :param nodes: an iterator of the target storage nodes + :param partition: ring partition number + :param outgoing_headers: system headers to storage nodes + :return: Response object """ - policy_index = req.headers.get('X-Backend-Storage-Policy-Index') - policy = POLICIES.get_by_index(policy_index) - if not nodes: - return HTTPNotFound() - - # RFC2616:8.2.3 disallows 100-continue without a body - if (req.content_length > 0) or req.is_chunked: - expect = True - else: - expect = False - conns = self._get_put_connections(req, nodes, partition, - outgoing_headers, policy, expect) - min_conns = quorum_size(len(nodes)) - try: - # check that a minimum number of connections were established and - # meet all the correct conditions set in the request - self._check_failure_put_connections(conns, req, nodes, min_conns) - - # transfer data - self._transfer_data(req, data_source, conns, nodes) - - # get responses - statuses, reasons, bodies, etags = self._get_put_responses( - req, conns, nodes) - except HTTPException as resp: - return resp - finally: - for conn in conns: - conn.close() - - if len(etags) > 1: - self.app.logger.error( - _('Object servers returned %s mismatched etags'), len(etags)) - return HTTPServerError(request=req) - etag = etags.pop() if len(etags) else None - resp = self.best_response(req, statuses, reasons, bodies, - _('Object PUT'), etag=etag) - resp.last_modified = math.ceil( - float(Timestamp(req.headers['X-Timestamp']))) - return resp + raise NotImplementedError() @public @cors_validation @@ -1131,6 +995,182 @@ class ReplicatedObjectController(BaseObjectController): req.swift_entity_path) return resp + def _connect_put_node(self, nodes, part, path, headers, + logger_thread_locals): + """ + Make a connection for a replicated object. + + Connects to the first working node that it finds in node_iter + and sends over the request headers. Returns an HTTPConnection + object to handle the rest of the streaming. + """ + self.app.logger.thread_locals = logger_thread_locals + for node in nodes: + try: + start_time = time.time() + with ConnectionTimeout(self.app.conn_timeout): + conn = http_connect( + node['ip'], node['port'], node['device'], part, 'PUT', + path, headers) + self.app.set_node_timing(node, time.time() - start_time) + with Timeout(self.app.node_timeout): + resp = conn.getexpect() + if resp.status == HTTP_CONTINUE: + conn.resp = None + conn.node = node + return conn + elif is_success(resp.status) or resp.status == HTTP_CONFLICT: + conn.resp = resp + conn.node = node + return conn + elif headers['If-None-Match'] is not None and \ + resp.status == HTTP_PRECONDITION_FAILED: + conn.resp = resp + conn.node = node + return conn + elif resp.status == HTTP_INSUFFICIENT_STORAGE: + self.app.error_limit(node, _('ERROR Insufficient Storage')) + elif is_server_error(resp.status): + self.app.error_occurred( + node, + _('ERROR %(status)d Expect: 100-continue ' + 'From Object Server') % { + 'status': resp.status}) + except (Exception, Timeout): + self.app.exception_occurred( + node, _('Object'), + _('Expect: 100-continue on %s') % path) + + def _send_file(self, conn, path): + """Method for a file PUT coro""" + while True: + chunk = conn.queue.get() + if not conn.failed: + try: + with ChunkWriteTimeout(self.app.node_timeout): + conn.send(chunk) + except (Exception, ChunkWriteTimeout): + conn.failed = True + self.app.exception_occurred( + conn.node, _('Object'), + _('Trying to write to %s') % path) + conn.queue.task_done() + + def _transfer_data(self, req, data_source, conns, nodes): + """ + Transfer data for a replicated object. + + This method was added in the PUT method extraction change + """ + min_conns = quorum_size(len(nodes)) + bytes_transferred = 0 + try: + with ContextPool(len(nodes)) as pool: + for conn in conns: + conn.failed = False + conn.queue = Queue(self.app.put_queue_depth) + pool.spawn(self._send_file, conn, req.path) + while True: + with ChunkReadTimeout(self.app.client_timeout): + try: + chunk = next(data_source) + except StopIteration: + if req.is_chunked: + for conn in conns: + conn.queue.put('0\r\n\r\n') + break + bytes_transferred += len(chunk) + if bytes_transferred > constraints.MAX_FILE_SIZE: + raise HTTPRequestEntityTooLarge(request=req) + for conn in list(conns): + if not conn.failed: + conn.queue.put( + '%x\r\n%s\r\n' % (len(chunk), chunk) + if req.is_chunked else chunk) + else: + conn.close() + conns.remove(conn) + self._check_min_conn( + req, conns, min_conns, + msg='Object PUT exceptions during' + ' send, %(conns)s/%(nodes)s required connections') + for conn in conns: + if conn.queue.unfinished_tasks: + conn.queue.join() + conns = [conn for conn in conns if not conn.failed] + self._check_min_conn( + req, conns, min_conns, + msg='Object PUT exceptions after last send, ' + '%(conns)s/%(nodes)s required connections') + except ChunkReadTimeout as err: + self.app.logger.warn( + _('ERROR Client read timeout (%ss)'), err.seconds) + self.app.logger.increment('client_timeouts') + raise HTTPRequestTimeout(request=req) + except HTTPException: + raise + except (Exception, Timeout): + self.app.logger.exception( + _('ERROR Exception causing client disconnect')) + raise HTTPClientDisconnect(request=req) + if req.content_length and bytes_transferred < req.content_length: + req.client_disconnect = True + self.app.logger.warn( + _('Client disconnected without sending enough data')) + self.app.logger.increment('client_disconnects') + raise HTTPClientDisconnect(request=req) + + def _store_object(self, req, data_source, nodes, partition, + outgoing_headers): + """ + Store a replicated object. + + This method is responsible for establishing connection + with storage nodes and sending object to each one of those + nodes. After sending the data, the "best" response will be + returned based on statuses from all connections + """ + policy_index = req.headers.get('X-Backend-Storage-Policy-Index') + policy = POLICIES.get_by_index(policy_index) + if not nodes: + return HTTPNotFound() + + # RFC2616:8.2.3 disallows 100-continue without a body + if (req.content_length > 0) or req.is_chunked: + expect = True + else: + expect = False + conns = self._get_put_connections(req, nodes, partition, + outgoing_headers, policy, expect) + min_conns = quorum_size(len(nodes)) + try: + # check that a minimum number of connections were established and + # meet all the correct conditions set in the request + self._check_failure_put_connections(conns, req, nodes, min_conns) + + # transfer data + self._transfer_data(req, data_source, conns, nodes) + + # get responses + statuses, reasons, bodies, etags = self._get_put_responses( + req, conns, nodes) + except HTTPException as resp: + return resp + finally: + for conn in conns: + conn.close() + + if len(etags) > 1: + self.app.logger.error( + _('Object servers returned %s mismatched etags'), len(etags)) + return HTTPServerError(request=req) + etag = etags.pop() if len(etags) else None + resp = self.best_response(req, statuses, reasons, bodies, + _('Object PUT'), etag=etag) + resp.last_modified = math.ceil( + float(Timestamp(req.headers['X-Timestamp']))) + return resp + class ECAppIter(object): """ From ab9f63402de6d554528699a02955854ac28264c5 Mon Sep 17 00:00:00 2001 From: Takashi Kajinami Date: Mon, 24 Nov 2014 21:44:03 +0900 Subject: [PATCH 34/98] Add process name checking into swift-init Swift-init uses pid files to detect existing swift processes by pid. However, it mistakes an unrelated process for a swift process and makes a wrong decision, when the unrelated process is running with pid written in swift pid files. This patch adds process name checking into swift-init and enable it to remove invalid pid files in such situation. Change-Id: Ibca026bdfbdacdd92c8763e1eb15d98293c70656 Closes-Bug: #1327106 --- swift/common/exceptions.py | 4 + swift/common/manager.py | 32 ++- test/unit/common/test_manager.py | 323 ++++++++++++++++++++----------- 3 files changed, 241 insertions(+), 118 deletions(-) diff --git a/swift/common/exceptions.py b/swift/common/exceptions.py index dab0777d6d..b1edadee39 100644 --- a/swift/common/exceptions.py +++ b/swift/common/exceptions.py @@ -256,3 +256,7 @@ class ClientException(Exception): b += ' [first 60 chars of response] %s' \ % self.http_response_content[:60] return b and '%s: %s' % (a, b) or a + + +class InvalidPidFileException(Exception): + pass diff --git a/swift/common/manager.py b/swift/common/manager.py index ba4832ee00..afed0bb8ca 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -24,9 +24,11 @@ import re from swift import gettext_ as _ from swift.common.utils import search_tree, remove_file, write_file +from swift.common.exceptions import InvalidPidFileException SWIFT_DIR = '/etc/swift' RUN_DIR = '/var/run/swift' +PROC_DIR = '/proc' # auth-server has been removed from ALL_SERVERS, start it explicitly ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor', @@ -134,6 +136,29 @@ def watch_server_pids(server_pids, interval=1, **kwargs): time.sleep(0.1) +def safe_kill(pid, sig, name): + """Send signal to process and check process name + + : param pid: process id + : param sig: signal to send + : param name: name to ensure target process + """ + + # check process name for SIG_DFL + if sig == signal.SIG_DFL: + try: + proc_file = '%s/%d/cmdline' % (PROC_DIR, pid) + if os.path.exists(proc_file): + with open(proc_file, 'r') as fd: + if name not in fd.read(): + # unknown process is using the pid + raise InvalidPidFileException() + except IOError: + pass + + os.kill(pid, sig) + + class UnknownCommandError(Exception): pass @@ -488,7 +513,12 @@ class Server(object): if sig != signal.SIG_DFL: print _('Signal %s pid: %s signal: %s') % (self.server, pid, sig) - os.kill(pid, sig) + safe_kill(pid, sig, 'swift-%s' % self.server) + except InvalidPidFileException as e: + if kwargs.get('verbose'): + print _('Removing pid file %s with wrong pid %d') \ + % (pid_file, pid) + remove_file(pid_file) except OSError as e: if e.errno == errno.ESRCH: # pid does not exist diff --git a/test/unit/common/test_manager.py b/test/unit/common/test_manager.py index 8896fc138a..5a9b3a6629 100644 --- a/test/unit/common/test_manager.py +++ b/test/unit/common/test_manager.py @@ -26,6 +26,7 @@ from threading import Thread from time import sleep, time from swift.common import manager +from swift.common.exceptions import InvalidPidFileException DUMMY_SIG = 1 @@ -63,7 +64,6 @@ def pop_stream(f): output = f.read() f.seek(0) f.truncate() - #print >> sys.stderr, output return output @@ -257,6 +257,23 @@ class TestManagerModule(unittest.TestCase): manager.time = _orig_time manager.Server = _orig_server + def test_safe_kill(self): + manager.os = MockOs([1, 2, 3, 4]) + + proc_files = ( + ('1/cmdline', 'same-procname'), + ('2/cmdline', 'another-procname'), + ('4/cmdline', 'another-procname'), + ) + files, contents = zip(*proc_files) + with temptree(files, contents) as t: + manager.PROC_DIR = t + manager.safe_kill(1, signal.SIG_DFL, 'same-procname') + self.assertRaises(InvalidPidFileException, manager.safe_kill, + 2, signal.SIG_DFL, 'same-procname') + manager.safe_kill(3, signal.SIG_DFL, 'same-procname') + manager.safe_kill(4, signal.SIGHUP, 'same-procname') + def test_exc(self): self.assert_(issubclass(manager.UnknownCommandError, Exception)) @@ -680,17 +697,19 @@ class TestServer(unittest.TestCase): self.assertEquals(pid_file, pid_two) def test_signal_pids(self): - pid_files = ( - ('proxy-server.pid', 1), - ('auth-server.pid', 2), - ('object-server.pid', 3), + temp_files = ( + ('var/run/proxy-server.pid', 1), + ('var/run/auth-server.pid', 2), + ('var/run/one-server.pid', 3), + ('var/run/object-server.pid', 4), + ('proc/3/cmdline', 'swift-another-server') ) - files, pids = zip(*pid_files) - with temptree(files, pids) as t: - manager.RUN_DIR = t - # mock os with both pids running + with temptree(*zip(*temp_files)) as t: + manager.RUN_DIR = os.path.join(t, 'var/run') + manager.PROC_DIR = os.path.join(t, 'proc') + # mock os with so both the first and second are running manager.os = MockOs([1, 2]) - server = manager.Server('proxy', run_dir=t) + server = manager.Server('proxy', run_dir=manager.RUN_DIR) pids = server.signal_pids(DUMMY_SIG) self.assertEquals(len(pids), 1) self.assert_(1 in pids) @@ -703,7 +722,7 @@ class TestServer(unittest.TestCase): try: with open(os.path.join(t, 'output'), 'w+') as f: sys.stdout = f - #test print details + # test print details pids = server.signal_pids(DUMMY_SIG) output = pop_stream(f) self.assert_('pid: %s' % 1 in output) @@ -711,7 +730,7 @@ class TestServer(unittest.TestCase): # test no details on signal.SIG_DFL pids = server.signal_pids(signal.SIG_DFL) self.assertEquals(pop_stream(f), '') - # reset mock os so only the other server is running + # reset mock os so only the second server is running manager.os = MockOs([2]) # test pid not running pids = server.signal_pids(signal.SIG_DFL) @@ -722,42 +741,63 @@ class TestServer(unittest.TestCase): self.join_run_dir('proxy-server.pid'))) # reset mock os with no running pids manager.os = MockOs([]) - server = manager.Server('auth', run_dir=t) - # test verbose warns on removing pid file + server = manager.Server('auth', run_dir=manager.RUN_DIR) + # test verbose warns on removing stale pid file pids = server.signal_pids(signal.SIG_DFL, verbose=True) output = pop_stream(f) self.assert_('stale pid' in output.lower()) auth_pid = self.join_run_dir('auth-server.pid') self.assert_(auth_pid in output) + # reset mock os so only the third server is running + manager.os = MockOs([3]) + server = manager.Server('one', run_dir=manager.RUN_DIR) + # test verbose warns on removing invalid pid file + pids = server.signal_pids(signal.SIG_DFL, verbose=True) + output = pop_stream(f) + old_stdout.write('output %s' % output) + self.assert_('removing pid file' in output.lower()) + one_pid = self.join_run_dir('one-server.pid') + self.assert_(one_pid in output) + # reset mock os with no running pids + manager.os = MockOs([]) # test warning with insufficient permissions - server = manager.Server('object', run_dir=t) + server = manager.Server('object', run_dir=manager.RUN_DIR) pids = server.signal_pids(manager.os.RAISE_EPERM_SIG) output = pop_stream(f) - self.assert_('no permission to signal pid 3' in + self.assert_('no permission to signal pid 4' in output.lower(), output) finally: sys.stdout = old_stdout def test_get_running_pids(self): # test only gets running pids - pid_files = ( - ('test-server1.pid', 1), - ('test-server2.pid', 2), + temp_files = ( + ('var/run/test-server1.pid', 1), + ('var/run/test-server2.pid', 2), + ('var/run/test-server3.pid', 3), + ('proc/1/cmdline', 'swift-test-server'), + ('proc/3/cmdline', 'swift-another-server') ) - with temptree(*zip(*pid_files)) as t: - manager.RUN_DIR = t - server = manager.Server('test-server', run_dir=t) + with temptree(*zip(*temp_files)) as t: + manager.RUN_DIR = os.path.join(t, 'var/run') + manager.PROC_DIR = os.path.join(t, 'proc') + server = manager.Server( + 'test-server', run_dir=manager.RUN_DIR) # mock os, only pid '1' is running - manager.os = MockOs([1]) + manager.os = MockOs([1, 3]) running_pids = server.get_running_pids() self.assertEquals(len(running_pids), 1) self.assert_(1 in running_pids) self.assert_(2 not in running_pids) + self.assert_(3 not in running_pids) # test persistent running pid files - self.assert_(os.path.exists(os.path.join(t, 'test-server1.pid'))) + self.assert_(os.path.exists( + os.path.join(manager.RUN_DIR, 'test-server1.pid'))) # test clean up stale pids pid_two = self.join_swift_dir('test-server2.pid') self.assertFalse(os.path.exists(pid_two)) + pid_three = self.join_swift_dir('test-server3.pid') + self.assertFalse(os.path.exists(pid_three)) # reset mock os, no pids running manager.os = MockOs([]) running_pids = server.get_running_pids() @@ -765,7 +805,7 @@ class TestServer(unittest.TestCase): # and now all pid files are cleaned out pid_one = self.join_run_dir('test-server1.pid') self.assertFalse(os.path.exists(pid_one)) - all_pids = os.listdir(t) + all_pids = os.listdir(manager.RUN_DIR) self.assertEquals(len(all_pids), 0) # test only get pids for right server @@ -883,40 +923,68 @@ class TestServer(unittest.TestCase): sys.stdout = f # test status for all running manager.os = MockOs(pids) - self.assertEquals(server.status(), 0) - output = pop_stream(f).strip().splitlines() - self.assertEquals(len(output), 4) - for line in output: - self.assert_('test-server running' in line) + proc_files = ( + ('1/cmdline', 'swift-test-server'), + ('2/cmdline', 'swift-test-server'), + ('3/cmdline', 'swift-test-server'), + ('4/cmdline', 'swift-test-server'), + ) + files, contents = zip(*proc_files) + with temptree(files, contents) as t: + manager.PROC_DIR = t + self.assertEquals(server.status(), 0) + output = pop_stream(f).strip().splitlines() + self.assertEquals(len(output), 4) + for line in output: + self.assert_('test-server running' in line) # test get single server by number - self.assertEquals(server.status(number=4), 0) - output = pop_stream(f).strip().splitlines() - self.assertEquals(len(output), 1) - line = output[0] - self.assert_('test-server running' in line) - conf_four = self.join_swift_dir(conf_files[3]) - self.assert_('4 - %s' % conf_four in line) + with temptree([], []) as t: + manager.PROC_DIR = t + self.assertEquals(server.status(number=4), 0) + output = pop_stream(f).strip().splitlines() + self.assertEquals(len(output), 1) + line = output[0] + self.assert_('test-server running' in line) + conf_four = self.join_swift_dir(conf_files[3]) + self.assert_('4 - %s' % conf_four in line) # test some servers not running manager.os = MockOs([1, 2, 3]) - self.assertEquals(server.status(), 0) - output = pop_stream(f).strip().splitlines() - self.assertEquals(len(output), 3) - for line in output: - self.assert_('test-server running' in line) + proc_files = ( + ('1/cmdline', 'swift-test-server'), + ('2/cmdline', 'swift-test-server'), + ('3/cmdline', 'swift-test-server'), + ) + files, contents = zip(*proc_files) + with temptree(files, contents) as t: + manager.PROC_DIR = t + self.assertEquals(server.status(), 0) + output = pop_stream(f).strip().splitlines() + self.assertEquals(len(output), 3) + for line in output: + self.assert_('test-server running' in line) # test single server not running manager.os = MockOs([1, 2]) - self.assertEquals(server.status(number=3), 1) - output = pop_stream(f).strip().splitlines() - self.assertEquals(len(output), 1) - line = output[0] - self.assert_('not running' in line) - conf_three = self.join_swift_dir(conf_files[2]) - self.assert_(conf_three in line) + proc_files = ( + ('1/cmdline', 'swift-test-server'), + ('2/cmdline', 'swift-test-server'), + ) + files, contents = zip(*proc_files) + with temptree(files, contents) as t: + manager.PROC_DIR = t + self.assertEquals(server.status(number=3), 1) + output = pop_stream(f).strip().splitlines() + self.assertEquals(len(output), 1) + line = output[0] + self.assert_('not running' in line) + conf_three = self.join_swift_dir(conf_files[2]) + self.assert_(conf_three in line) # test no running pids manager.os = MockOs([]) - self.assertEquals(server.status(), 1) - output = pop_stream(f).lower() - self.assert_('no test-server running' in output) + with temptree([], []) as t: + manager.PROC_DIR = t + self.assertEquals(server.status(), 1) + output = pop_stream(f).lower() + self.assert_('no test-server running' in output) # test use provided pids pids = { 1: '1.pid', @@ -1210,7 +1278,7 @@ class TestServer(unittest.TestCase): ('proxy-server/2.pid', 2), ) - #mocks + # mocks class MockSpawn(object): def __init__(self, pids=None): @@ -1247,76 +1315,97 @@ class TestServer(unittest.TestCase): self.assertFalse(server.launch()) # start mock os running all pids manager.os = MockOs(pids) - server = manager.Server('proxy', run_dir=t) - # can't start server if it's already running - self.assertFalse(server.launch()) - output = pop_stream(f) - self.assert_('running' in output) - conf_file = self.join_swift_dir('proxy-server.conf') - self.assert_(conf_file in output) - pid_file = self.join_run_dir('proxy-server/2.pid') - self.assert_(pid_file in output) - self.assert_('already started' in output) + proc_files = ( + ('1/cmdline', 'swift-proxy-server'), + ('2/cmdline', 'swift-proxy-server'), + ) + files, contents = zip(*proc_files) + with temptree(files, contents) as proc_dir: + manager.PROC_DIR = proc_dir + server = manager.Server('proxy', run_dir=t) + # can't start server if it's already running + self.assertFalse(server.launch()) + output = pop_stream(f) + self.assert_('running' in output) + conf_file = self.join_swift_dir( + 'proxy-server.conf') + self.assert_(conf_file in output) + pid_file = self.join_run_dir('proxy-server/2.pid') + self.assert_(pid_file in output) + self.assert_('already started' in output) # no running pids manager.os = MockOs([]) - # test ignore once for non-start-once server - mock_spawn = MockSpawn([1]) - server.spawn = mock_spawn - conf_file = self.join_swift_dir('proxy-server.conf') - expected = { - 1: conf_file, - } - self.assertEquals(server.launch(once=True), expected) - self.assertEquals(mock_spawn.conf_files, [conf_file]) - expected = { - 'once': False, - } - self.assertEquals(mock_spawn.kwargs, [expected]) - output = pop_stream(f) - self.assert_('Starting' in output) - self.assert_('once' not in output) + with temptree([], []) as proc_dir: + manager.PROC_DIR = proc_dir + # test ignore once for non-start-once server + mock_spawn = MockSpawn([1]) + server.spawn = mock_spawn + conf_file = self.join_swift_dir( + 'proxy-server.conf') + expected = { + 1: conf_file, + } + self.assertEquals(server.launch(once=True), + expected) + self.assertEquals(mock_spawn.conf_files, + [conf_file]) + expected = { + 'once': False, + } + self.assertEquals(mock_spawn.kwargs, [expected]) + output = pop_stream(f) + self.assert_('Starting' in output) + self.assert_('once' not in output) # test multi-server kwarg once server = manager.Server('object-replicator') - mock_spawn = MockSpawn([1, 2, 3, 4]) - server.spawn = mock_spawn - conf1 = self.join_swift_dir('object-server/1.conf') - conf2 = self.join_swift_dir('object-server/2.conf') - conf3 = self.join_swift_dir('object-server/3.conf') - conf4 = self.join_swift_dir('object-server/4.conf') - expected = { - 1: conf1, - 2: conf2, - 3: conf3, - 4: conf4, - } - self.assertEquals(server.launch(once=True), expected) - self.assertEquals(mock_spawn.conf_files, [ - conf1, conf2, conf3, conf4]) - expected = { - 'once': True, - } - self.assertEquals(len(mock_spawn.kwargs), 4) - for kwargs in mock_spawn.kwargs: - self.assertEquals(kwargs, expected) - # test number kwarg - mock_spawn = MockSpawn([4]) - server.spawn = mock_spawn - expected = { - 4: conf4, - } - self.assertEquals(server.launch(number=4), expected) - self.assertEquals(mock_spawn.conf_files, [conf4]) - expected = { - 'number': 4 - } - self.assertEquals(mock_spawn.kwargs, [expected]) + with temptree([], []) as proc_dir: + manager.PROC_DIR = proc_dir + mock_spawn = MockSpawn([1, 2, 3, 4]) + server.spawn = mock_spawn + conf1 = self.join_swift_dir('object-server/1.conf') + conf2 = self.join_swift_dir('object-server/2.conf') + conf3 = self.join_swift_dir('object-server/3.conf') + conf4 = self.join_swift_dir('object-server/4.conf') + expected = { + 1: conf1, + 2: conf2, + 3: conf3, + 4: conf4, + } + self.assertEquals(server.launch(once=True), + expected) + self.assertEquals(mock_spawn.conf_files, [ + conf1, conf2, conf3, conf4]) + expected = { + 'once': True, + } + self.assertEquals(len(mock_spawn.kwargs), 4) + for kwargs in mock_spawn.kwargs: + self.assertEquals(kwargs, expected) + # test number kwarg + mock_spawn = MockSpawn([4]) + manager.PROC_DIR = proc_dir + server.spawn = mock_spawn + expected = { + 4: conf4, + } + self.assertEquals(server.launch(number=4), + expected) + self.assertEquals(mock_spawn.conf_files, [conf4]) + expected = { + 'number': 4 + } + self.assertEquals(mock_spawn.kwargs, [expected]) # test cmd does not exist server = manager.Server('auth') - mock_spawn = MockSpawn([OSError(errno.ENOENT, 'blah')]) - server.spawn = mock_spawn - self.assertEquals(server.launch(), {}) - self.assert_('swift-auth-server does not exist' in - pop_stream(f)) + with temptree([], []) as proc_dir: + manager.PROC_DIR = proc_dir + mock_spawn = MockSpawn([OSError(errno.ENOENT, + 'blah')]) + server.spawn = mock_spawn + self.assertEquals(server.launch(), {}) + self.assert_('swift-auth-server does not exist' in + pop_stream(f)) finally: sys.stdout = old_stdout From e54781a2aa29808aff654861d8a0aafd24b6620c Mon Sep 17 00:00:00 2001 From: Thiago da Silva Date: Tue, 19 May 2015 20:27:06 +0000 Subject: [PATCH 35/98] add object post and delete methods to BaseObjectController Adding post and delete methods to BaseObjectController that can be overriden by ObjectCOntroller subclasses. These methods are similar to the PUT and GET methods that were introduced as part of the EC work Change-Id: I197364bc3e2f2287c0afc8948863e3cdeab90383 Signed-off-by: Thiago da Silva --- swift/proxy/controllers/obj.py | 45 ++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index c61986e1dd..e84eafea13 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -313,10 +313,7 @@ class BaseObjectController(Controller): headers = self._backend_requests( req, len(nodes), container_partition, containers, delete_at_container, delete_at_part, delete_at_nodes) - - resp = self.make_requests(req, obj_ring, partition, - 'POST', req.swift_entity_path, headers) - return resp + return self._post_object(req, obj_ring, partition, headers) def _backend_requests(self, req, n_outgoing, container_partition, containers, @@ -739,6 +736,39 @@ class BaseObjectController(Controller): """ raise NotImplementedError() + def _delete_object(self, req, obj_ring, partition, headers): + """ + send object DELETE request to storage nodes. Subclasses of + the BaseObjectController can provide their own implementation + of this method. + + :param req: the DELETE Request + :param obj_ring: the object ring + :param partition: ring partition number + :param headers: system headers to storage nodes + :return: Response object + """ + # When deleting objects treat a 404 status as 204. + status_overrides = {404: 204} + resp = self.make_requests(req, obj_ring, + partition, 'DELETE', req.swift_entity_path, + headers, overrides=status_overrides) + return resp + + def _post_object(self, req, obj_ring, partition, headers): + """ + send object POST request to storage nodes. + + :param req: the POST Request + :param obj_ring: the object ring + :param partition: ring partition number + :param headers: system headers to storage nodes + :return: Response object + """ + resp = self.make_requests(req, obj_ring, partition, + 'POST', req.swift_entity_path, headers) + return resp + @public @cors_validation @delay_denial @@ -928,12 +958,7 @@ class BaseObjectController(Controller): headers = self._backend_requests( req, len(nodes), container_partition, containers) - # When deleting objects treat a 404 status as 204. - status_overrides = {404: 204} - resp = self.make_requests(req, obj_ring, - partition, 'DELETE', req.swift_entity_path, - headers, overrides=status_overrides) - return resp + return self._delete_object(req, obj_ring, partition, headers) def _reroute(self, policy): """ From f864092455ebcd40b3568633c3524cc5c64d3309 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Wed, 20 May 2015 17:50:07 -0700 Subject: [PATCH 36/98] Add Swift Inspector to assoicated projects Change-Id: I5b5448674ea455119a51509ab5e7cd11a764b5a7 --- doc/source/associated_projects.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/associated_projects.rst b/doc/source/associated_projects.rst index c0f8cf7e5d..762971ca4f 100644 --- a/doc/source/associated_projects.rst +++ b/doc/source/associated_projects.rst @@ -48,6 +48,7 @@ Monitoring & Statistics ----------------------- * `Swift Informant `_ - Swift Proxy Middleware to send events to a statsd instance. +* `Swift Inspector `_ - Swift middleware to relay information about a request back to the client. Content Distribution Network Integration From f11d92d566757a54ff1e3800ec0bfac098347a68 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Fri, 22 May 2015 16:58:04 -0700 Subject: [PATCH 37/98] Add swift-durability-calculator line to docs This commits add a line (link and small doc) for swift-durability-calculator which provides a browser based durability calculation tool to docs as an associated project. Change-Id: I4ea8015f512616dc25072080bef79b8734971ccf --- doc/source/associated_projects.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/associated_projects.rst b/doc/source/associated_projects.rst index c0f8cf7e5d..d706334670 100644 --- a/doc/source/associated_projects.rst +++ b/doc/source/associated_projects.rst @@ -108,3 +108,4 @@ Other * `liberasurecode `_ - Low Level Erasure Code library used by PyECLib * `Swift Browser `_ - JavaScript interface for Swift * `swift-ui `_ - OpenStack Swift web browser +* `Swift Durability Calculator `_ - Data Durability Calculation Tool for Swift From af8d842076ba269fed7f4128d0c7503ab5d1a94a Mon Sep 17 00:00:00 2001 From: "Joanna H. Huang" Date: Tue, 21 Oct 2014 09:24:25 +0000 Subject: [PATCH 38/98] Replaced setting run_pause with standard interval The deprecated directive `run_pause` should be replaced with the more standard one `interval`. The `run_pause` should be still supported for backward compatibility. This patch updates object replicator to use `interval` and support `run_pause`. It also updates its sample config and documentation. Co-Authored-By: Joanna H. Huang Co-Authored-By: Kamil Rykowski Change-Id: Ie2a3414a96a94efb9273ff53a80b9d90c74fff09 Closes-Bug: #1364735 --- doc/manpages/account-server.conf.5 | 2 +- doc/manpages/container-server.conf.5 | 2 +- doc/manpages/object-server.conf.5 | 4 +++- doc/source/deployment_guide.rst | 6 +++--- etc/account-server.conf-sample | 9 ++++----- etc/container-server.conf-sample | 10 +++++----- etc/object-server.conf-sample | 10 ++++++++++ swift/obj/reconstructor.py | 7 ++++--- swift/obj/replicator.py | 7 ++++--- 9 files changed, 35 insertions(+), 22 deletions(-) diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5 index c98b679b44..b60baeb60d 100644 --- a/doc/manpages/account-server.conf.5 +++ b/doc/manpages/account-server.conf.5 @@ -185,7 +185,7 @@ This caps how long the replicator will spend trying to sync a given database per .IP \fBconcurrency\fR Number of replication workers to spawn. The default is 8. .IP "\fBrun_pause [deprecated]\fR" -Time in seconds to wait between replication passes. The default is 10. +Time in seconds to wait between replication passes. The default is 30. .IP \fBinterval\fR Replaces run_pause with the more standard "interval", which means the replicator won't pause unless it takes less than the interval set. The default is 30. .IP \fBerror_suppression_interval\fR diff --git a/doc/manpages/container-server.conf.5 b/doc/manpages/container-server.conf.5 index 93408cf7ad..2cd1623dc1 100644 --- a/doc/manpages/container-server.conf.5 +++ b/doc/manpages/container-server.conf.5 @@ -191,7 +191,7 @@ This caps how long the replicator will spend trying to sync a given database per .IP \fBconcurrency\fR Number of replication workers to spawn. The default is 8. .IP "\fBrun_pause [deprecated]\fR" -Time in seconds to wait between replication passes. The default is 10. +Time in seconds to wait between replication passes. The default is 30. .IP \fBinterval\fR Replaces run_pause with the more standard "interval", which means the replicator won't pause unless it takes less than the interval set. The default is 30. .IP \fBnode_timeout\fR diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index 14e8a58b3b..fb2297421a 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -187,7 +187,9 @@ Logging address. The default is /dev/log. Indicates that you are using a VM environment. The default is no. .IP \fBdaemonize\fR Whether or not to run replication as a daemon. The default is yes. -.IP \fBrun_pause\fR +.IP "\fBrun_pause [deprecated]\fR" +Time in seconds to wait between replication passes. The default is 30. +.IP \fBinterval\fR Time in seconds to wait between replication passes. The default is 30. .IP \fBconcurrency\fR Number of replication workers to spawn. The default is 1. diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 0b40bb8568..552dfea314 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -465,7 +465,7 @@ log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level daemonize yes Whether or not to run replication as a daemon -run_pause 30 Time in seconds to wait between +interval 30 Time in seconds to wait between replication passes concurrency 1 Number of replication workers to spawn timeout 5 Timeout value sent to rsync --timeout @@ -614,7 +614,7 @@ log_level INFO Logging level per_diff 1000 concurrency 8 Number of replication workers to spawn -run_pause 30 Time in seconds to wait between +interval 30 Time in seconds to wait between replication passes node_timeout 10 Request timeout to external services conn_timeout 0.5 Connection timeout to external @@ -742,7 +742,7 @@ log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level per_diff 1000 concurrency 8 Number of replication workers to spawn -run_pause 30 Time in seconds to wait between +interval 30 Time in seconds to wait between replication passes node_timeout 10 Request timeout to external services conn_timeout 0.5 Connection timeout to external services diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index 98c97acf6f..3631986fa2 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -94,7 +94,11 @@ use = egg:swift#recon # per_diff = 1000 # max_diffs = 100 # concurrency = 8 +# +# Time in seconds to wait between replication passes # interval = 30 +# run_pause is deprecated, use interval instead +# run_pause = 30 # # How long without an error before a node's error count is reset. This will # also be how long before a node is reenabled after suppression is triggered. @@ -109,11 +113,6 @@ use = egg:swift#recon # The replicator also performs reclamation # reclaim_age = 604800 # -# Time in seconds to wait between replication passes -# Note: if the parameter 'interval' is defined then it will be used in place -# of run_pause. -# run_pause = 30 -# # Allow rsync to compress data which is transmitted to destination node # during sync. However, this is applicable only when destination node is in # a different region than the local one. diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample index f09aa66fd7..54daee1e8e 100644 --- a/etc/container-server.conf-sample +++ b/etc/container-server.conf-sample @@ -103,18 +103,18 @@ use = egg:swift#recon # per_diff = 1000 # max_diffs = 100 # concurrency = 8 +# +# Time in seconds to wait between replication passes # interval = 30 +# run_pause is deprecated, use interval instead +# run_pause = 30 +# # node_timeout = 10 # conn_timeout = 0.5 # # The replicator also performs reclamation # reclaim_age = 604800 # -# Time in seconds to wait between replication passes -# Note: if the parameter 'interval' is defined then it will be used in place -# of run_pause. -# run_pause = 30 -# # Allow rsync to compress data which is transmitted to destination node # during sync. However, this is applicable only when destination node is in # a different region than the local one. diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index a3f76ceb92..4fafa7c18b 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -155,7 +155,12 @@ use = egg:swift#recon # # vm_test_mode = no # daemonize = on +# +# Time in seconds to wait between replication passes +# interval = 30 +# run_pause is deprecated, use interval instead # run_pause = 30 +# # concurrency = 1 # stats_interval = 300 # @@ -230,7 +235,12 @@ use = egg:swift#recon # log_address = /dev/log # # daemonize = on +# +# Time in seconds to wait between reconstruction passes +# interval = 30 +# run_pause is deprecated, use interval instead # run_pause = 30 +# # concurrency = 1 # stats_interval = 300 # node_timeout = 10 diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 2dd743fa9a..44920d54f7 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -126,7 +126,8 @@ class ObjectReconstructor(Daemon): self.next_check = time.time() + self.ring_check_interval self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.partition_times = [] - self.run_pause = int(conf.get('run_pause', 30)) + self.interval = int(conf.get('interval') or + conf.get('run_pause') or 30) self.http_timeout = int(conf.get('http_timeout', 60)) self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) self.recon_cache_path = conf.get('recon_cache_path', @@ -916,5 +917,5 @@ class ObjectReconstructor(Daemon): 'object_reconstruction_last': time.time()}, self.rcache, self.logger) self.logger.debug('reconstruction sleeping for %s seconds.', - self.run_pause) - sleep(self.run_pause) + self.interval) + sleep(self.interval) diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index 402de63af3..d23624b382 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -72,7 +72,8 @@ class ObjectReplicator(Daemon): self.next_check = time.time() + self.ring_check_interval self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7)) self.partition_times = [] - self.run_pause = int(conf.get('run_pause', 30)) + self.interval = int(conf.get('interval') or + conf.get('run_pause') or 30) self.rsync_timeout = int(conf.get('rsync_timeout', 900)) self.rsync_io_timeout = conf.get('rsync_io_timeout', '30') self.rsync_bwlimit = conf.get('rsync_bwlimit', '0') @@ -651,5 +652,5 @@ class ObjectReplicator(Daemon): 'object_replication_last': time.time()}, self.rcache, self.logger) self.logger.debug('Replication sleeping for %s seconds.', - self.run_pause) - sleep(self.run_pause) + self.interval) + sleep(self.interval) From e7c8c578d9e5b0aa7e56b02bd9c39baa99d2d6ae Mon Sep 17 00:00:00 2001 From: Michael MATUR Date: Mon, 25 May 2015 15:13:01 +0200 Subject: [PATCH 39/98] fixup!Patch of "parse_content_disposition" method to meet RFC2183 The spec of Content-Disposition does not require a space character after comma: http://www.ietf.org/rfc/rfc2183.txt Change-Id: Iff438dc36ce78c6a79bb66ab3d889a8dae7c0e1f Closes-Bug: #1458497 --- swift/common/utils.py | 4 ++-- test/unit/common/test_utils.py | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index 19dcfd3d61..11a97d126b 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -3355,8 +3355,8 @@ def parse_content_disposition(header): """ attributes = {} attrs = '' - if '; ' in header: - header, attrs = header.split('; ', 1) + if ';' in header: + header, attrs = [x.strip() for x in header.split(';', 1)] m = True while m: m = ATTRIBUTES_RE.match(attrs) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 48610c1a7b..113b712ab1 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -4629,6 +4629,12 @@ class TestParseContentDisposition(unittest.TestCase): self.assertEquals(name, 'form-data') self.assertEquals(attrs, {'name': 'somefile', 'filename': 'test.html'}) + def test_content_disposition_without_white_space(self): + name, attrs = utils.parse_content_disposition( + 'form-data;name="somefile";filename="test.html"') + self.assertEquals(name, 'form-data') + self.assertEquals(attrs, {'name': 'somefile', 'filename': 'test.html'}) + class TestIterMultipartMimeDocuments(unittest.TestCase): From 666bf06c26bc9e0d6256d054835386e50e67b8a2 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Wed, 6 May 2015 16:29:06 -0700 Subject: [PATCH 40/98] EC: don't 503 on marginally-successful PUT On EC PUT in an M+K scheme, we require M+1 fragment archives to durably land on disk. If we get that, then we go ahead and ask the object servers to "commit" the object by writing out .durable files. We only require 2 of those. When we got exactly M+1 fragment archives on disk, and then one connection timed out while writing .durable files, we should still be okay (provided M is at least 3). However, we'd take our M > 2 remaining successful responses and pass that off to best_response() with a quorum size of M+1, thus getting a 503 even though everything worked well enough. Now we pass 2 to best_response() to avoid that false negative. There was also a spot where we were getting the quorum size wrong. If we wrote out 3 fragment archives for a 2+1 policy, we were only requiring 2 successful backend PUTs. That's wrong; the right number is 3, which is what the policy's .quorum() method says. There was a spot where the right number wasn't getting plumbed through, but it is now. Change-Id: Ic658a199e952558db329268f4d7b4009f47c6d03 Co-Authored-By: Clay Gerrard Closes-Bug: 1452468 --- swift/proxy/controllers/base.py | 6 +- swift/proxy/controllers/obj.py | 11 ++- test/unit/__init__.py | 108 +++++++++++++++++------- test/unit/proxy/controllers/test_obj.py | 31 ++++++- test/unit/proxy/test_server.py | 18 ++-- 5 files changed, 125 insertions(+), 49 deletions(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 6bf7ea0ef6..2fb7cd945e 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -1197,16 +1197,18 @@ class Controller(object): """ return quorum_size(n) - def have_quorum(self, statuses, node_count): + def have_quorum(self, statuses, node_count, quorum=None): """ Given a list of statuses from several requests, determine if a quorum response can already be decided. :param statuses: list of statuses returned :param node_count: number of nodes being queried (basically ring count) + :param quorum: number of statuses required for quorum :returns: True or False, depending on if quorum is established """ - quorum = self._quorum_size(node_count) + if quorum is None: + quorum = self._quorum_size(node_count) if len(statuses) >= quorum: for hundred in (HTTP_CONTINUE, HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST): diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index a83242b5f0..b04f9c3161 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -2171,7 +2171,7 @@ class ECObjectController(BaseObjectController): else: # intermediate response phase - set return value to true only # if there are enough 100-continue acknowledgements - if self.have_quorum(statuses, num_nodes): + if self.have_quorum(statuses, num_nodes, quorum=min_responses): quorum = True return statuses, reasons, bodies, etags, quorum @@ -2203,12 +2203,17 @@ class ECObjectController(BaseObjectController): nodes, min_conns, etag_hasher) final_phase = True need_quorum = False - min_resp = 2 + # The .durable file will propagate in a replicated fashion; if + # one exists, the reconstructor will spread it around. Thus, we + # don't require as many .durable files to be successfully + # written as we do fragment archives in order to call the PUT a + # success. + min_conns = 2 putters = [p for p in putters if not p.failed] # ignore response etags, and quorum boolean statuses, reasons, bodies, _etags, _quorum = \ self._get_put_responses(req, putters, len(nodes), - final_phase, min_resp, + final_phase, min_conns, need_quorum=need_quorum) except HTTPException as resp: return resp diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 372fb58bbf..e839e5568c 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -704,6 +704,74 @@ def mock(update): delattr(module, attr) +class FakeStatus(object): + """ + This will work with our fake_http_connect, if you hand in one of these + instead of a status int or status int tuple to the "codes" iter you can + add some eventlet sleep to the expect and response stages of the + connection. + """ + + def __init__(self, status, expect_sleep=None, response_sleep=None): + """ + :param status: the response status int, or a tuple of + ([expect_status, ...], response_status) + :param expect_sleep: float, time to eventlet sleep during expect, can + be a iter of floats + :param response_sleep: float, time to eventlet sleep during response + """ + # connect exception + if isinstance(status, (Exception, eventlet.Timeout)): + raise status + if isinstance(status, tuple): + self.expect_status = list(status[:-1]) + self.status = status[-1] + self.explicit_expect_list = True + else: + self.expect_status, self.status = ([], status) + self.explicit_expect_list = False + if not self.expect_status: + # when a swift backend service returns a status before reading + # from the body (mostly an error response) eventlet.wsgi will + # respond with that status line immediately instead of 100 + # Continue, even if the client sent the Expect 100 header. + # BufferedHttp and the proxy both see these error statuses + # when they call getexpect, so our FakeConn tries to act like + # our backend services and return certain types of responses + # as expect statuses just like a real backend server would do. + if self.status in (507, 412, 409): + self.expect_status = [status] + else: + self.expect_status = [100, 100] + + # setup sleep attributes + if not isinstance(expect_sleep, (list, tuple)): + expect_sleep = [expect_sleep] * len(self.expect_status) + self.expect_sleep_list = list(expect_sleep) + while len(self.expect_sleep_list) < len(self.expect_status): + self.expect_sleep_list.append(None) + self.response_sleep = response_sleep + + def get_response_status(self): + if self.response_sleep is not None: + eventlet.sleep(self.response_sleep) + if self.expect_status and self.explicit_expect_list: + raise Exception('Test did not consume all fake ' + 'expect status: %r' % (self.expect_status,)) + if isinstance(self.status, (Exception, eventlet.Timeout)): + raise self.status + return self.status + + def get_expect_status(self): + expect_sleep = self.expect_sleep_list.pop(0) + if expect_sleep is not None: + eventlet.sleep(expect_sleep) + expect_status = self.expect_status.pop(0) + if isinstance(expect_status, (Exception, eventlet.Timeout)): + raise expect_status + return expect_status + + class SlowBody(object): """ This will work with our fake_http_connect, if you hand in these @@ -741,29 +809,9 @@ def fake_http_connect(*code_iter, **kwargs): def __init__(self, status, etag=None, body='', timestamp='1', headers=None, expect_headers=None, connection_id=None, give_send=None): - # connect exception - if isinstance(status, (Exception, eventlet.Timeout)): - raise status - if isinstance(status, tuple): - self.expect_status = list(status[:-1]) - self.status = status[-1] - self.explicit_expect_list = True - else: - self.expect_status, self.status = ([], status) - self.explicit_expect_list = False - if not self.expect_status: - # when a swift backend service returns a status before reading - # from the body (mostly an error response) eventlet.wsgi will - # respond with that status line immediately instead of 100 - # Continue, even if the client sent the Expect 100 header. - # BufferedHttp and the proxy both see these error statuses - # when they call getexpect, so our FakeConn tries to act like - # our backend services and return certain types of responses - # as expect statuses just like a real backend server would do. - if self.status in (507, 412, 409): - self.expect_status = [status] - else: - self.expect_status = [100, 100] + if not isinstance(status, FakeStatus): + status = FakeStatus(status) + self._status = status self.reason = 'Fake' self.host = '1.2.3.4' self.port = '1234' @@ -785,11 +833,6 @@ def fake_http_connect(*code_iter, **kwargs): eventlet.sleep() def getresponse(self): - if self.expect_status and self.explicit_expect_list: - raise Exception('Test did not consume all fake ' - 'expect status: %r' % (self.expect_status,)) - if isinstance(self.status, (Exception, eventlet.Timeout)): - raise self.status exc = kwargs.get('raise_exc') if exc: if isinstance(exc, (Exception, eventlet.Timeout)): @@ -797,16 +840,17 @@ def fake_http_connect(*code_iter, **kwargs): raise Exception('test') if kwargs.get('raise_timeout_exc'): raise eventlet.Timeout() + self.status = self._status.get_response_status() return self def getexpect(self): - expect_status = self.expect_status.pop(0) - if isinstance(self.expect_status, (Exception, eventlet.Timeout)): - raise self.expect_status + expect_status = self._status.get_expect_status() headers = dict(self.expect_headers) if expect_status == 409: headers['X-Backend-Timestamp'] = self.timestamp - return FakeConn(expect_status, headers=headers) + response = FakeConn(expect_status, headers=headers) + response.status = expect_status + return response def getheaders(self): etag = self.etag diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index a38e753ae0..04cb57d934 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -35,7 +35,7 @@ from swift.proxy.controllers.base import get_info as _real_get_info from swift.common.storage_policy import POLICIES, ECDriverError from test.unit import FakeRing, FakeMemcache, fake_http_connect, \ - debug_logger, patch_policies, SlowBody + debug_logger, patch_policies, SlowBody, FakeStatus from test.unit.proxy.test_server import node_error_count @@ -1406,6 +1406,35 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(1, len(error_lines)) self.assertTrue('retrying' in error_lines[0]) + def test_PUT_with_slow_commits(self): + # It's important that this timeout be much less than the delay in + # the slow commit responses so that the slow commits are not waited + # for. + self.app.post_quorum_timeout = 0.01 + req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', + body='') + # plenty of slow commits + response_sleep = 5.0 + codes = [FakeStatus(201, response_sleep=response_sleep) + for i in range(self.replicas())] + # swap out some with regular fast responses + number_of_fast_responses_needed_to_be_quick_enough = 2 + fast_indexes = random.sample( + xrange(self.replicas()), + number_of_fast_responses_needed_to_be_quick_enough) + for i in fast_indexes: + codes[i] = 201 + expect_headers = { + 'X-Obj-Metadata-Footer': 'yes', + 'X-Obj-Multiphase-Commit': 'yes' + } + with set_http_connect(*codes, expect_headers=expect_headers): + start = time.time() + resp = req.get_response(self.app) + response_time = time.time() - start + self.assertEquals(resp.status_int, 201) + self.assertTrue(response_time < response_sleep) + if __name__ == '__main__': unittest.main() diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 3319696eb7..3b0115bbfc 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -6217,7 +6217,8 @@ class TestECMismatchedFA(unittest.TestCase): # pyeclib has checks for unequal-length; we don't want to trip those self.assertEqual(len(obj1), len(obj2)) - # Servers obj1 and obj2 will have the first version of the object + # Server obj1 will have the first version of the object (obj2 also + # gets it, but that gets stepped on later) prosrv._error_limiting = {} with nested( mock.patch.object(obj3srv, 'PUT', bad_disk), @@ -6227,18 +6228,13 @@ class TestECMismatchedFA(unittest.TestCase): resp = put_req1.get_response(prosrv) self.assertEqual(resp.status_int, 201) - # Server obj3 (and, in real life, some handoffs) will have the - # second version of the object. + # Servers obj2 and obj3 will have the second version of the object. prosrv._error_limiting = {} with nested( mock.patch.object(obj1srv, 'PUT', bad_disk), - mock.patch.object(obj2srv, 'PUT', bad_disk), mock.patch( - 'swift.common.storage_policy.ECStoragePolicy.quorum'), - mock.patch( - 'swift.proxy.controllers.base.Controller._quorum_size', - lambda *a, **kw: 1)): - type(ec_policy).quorum = mock.PropertyMock(return_value=1) + 'swift.common.storage_policy.ECStoragePolicy.quorum')): + type(ec_policy).quorum = mock.PropertyMock(return_value=2) resp = put_req2.get_response(prosrv) self.assertEqual(resp.status_int, 201) @@ -6258,10 +6254,10 @@ class TestECMismatchedFA(unittest.TestCase): environ={"REQUEST_METHOD": "GET"}, headers={"X-Auth-Token": "t"}) prosrv._error_limiting = {} - with mock.patch.object(obj3srv, 'GET', bad_disk): + with mock.patch.object(obj1srv, 'GET', bad_disk): resp = get_req.get_response(prosrv) self.assertEqual(resp.status_int, 200) - self.assertEqual(resp.body, obj1) + self.assertEqual(resp.body, obj2) # A GET that sees 2 mismatching FAs will fail get_req = Request.blank("/v1/a/ec-crazytown/obj", From a3559edc2342e2cf92a5188336ab263ffd038554 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Fri, 17 Apr 2015 16:30:30 -0700 Subject: [PATCH 41/98] Exclude local_dev from sync partners on failure If the primary left or right hand partners are down, the next best thing is to validate the rest of the primary nodes. Where the rest should exclude not just the left and right hand partners - but ourself as well. This fixes a accidental noop when partner node is unavailable and another node is missing data. Validation: Add probetests to cover ssync failures for the primary sync_to nodes for sync jobs. Drive-by: Make additional plumbing for the check_mount and check_dir constraints into the remaining daemons. Change-Id: I4d1c047106c242bca85c94b569d98fd59bb255f4 --- swift/obj/reconstructor.py | 22 +++++---- swift/obj/ssync_receiver.py | 4 +- test/probe/common.py | 15 ++++++- test/probe/test_reconstructor_rebuild.py | 57 ++++++++++++++++++++++++ test/probe/test_reconstructor_revert.py | 17 +------ test/unit/obj/test_reconstructor.py | 42 ++++++++++------- test/unit/obj/test_ssync_receiver.py | 10 ++--- 7 files changed, 118 insertions(+), 49 deletions(-) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 4385e42cc9..734958ec2d 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -29,8 +29,8 @@ from eventlet.support.greenlets import GreenletExit from swift import gettext_ as _ from swift.common.utils import ( whataremyips, unlink_older_than, compute_eta, get_logger, - dump_recon_cache, ismount, mkdirs, config_true_value, list_from_csv, - get_hub, tpool_reraise, GreenAsyncPile, Timestamp, remove_file) + dump_recon_cache, mkdirs, config_true_value, list_from_csv, get_hub, + tpool_reraise, GreenAsyncPile, Timestamp, remove_file) from swift.common.swob import HeaderKeyDict from swift.common.bufferedhttp import http_connect from swift.common.daemon import Daemon @@ -569,9 +569,12 @@ class ObjectReconstructor(Daemon): job['sync_to'], # I think we could order these based on our index to better # protect against a broken chain - itertools.ifilter( - lambda n: n['id'] not in (n['id'] for n in job['sync_to']), - job['policy'].object_ring.get_part_nodes(job['partition'])), + [ + n for n in + job['policy'].object_ring.get_part_nodes(job['partition']) + if n['id'] != job['local_dev']['id'] and + n['id'] not in (m['id'] for m in job['sync_to']) + ], ) syncd_with = 0 for node in dest_nodes: @@ -777,13 +780,14 @@ class ObjectReconstructor(Daemon): if override_devices and (local_dev['device'] not in override_devices): continue - dev_path = join(self.devices_dir, local_dev['device']) - obj_path = join(dev_path, data_dir) - tmp_path = join(dev_path, get_tmp_dir(int(policy))) - if self.mount_check and not ismount(dev_path): + dev_path = self._df_router[policy].get_dev_path( + local_dev['device']) + if not dev_path: self.logger.warn(_('%s is not mounted'), local_dev['device']) continue + obj_path = join(dev_path, data_dir) + tmp_path = join(dev_path, get_tmp_dir(int(policy))) unlink_older_than(tmp_path, time.time() - self.reclaim_age) if not os.path.exists(obj_path): diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py index b636a16245..aa685211ae 100644 --- a/swift/obj/ssync_receiver.py +++ b/swift/obj/ssync_receiver.py @@ -19,7 +19,6 @@ import eventlet import eventlet.wsgi import eventlet.greenio -from swift.common import constraints from swift.common import exceptions from swift.common import http from swift.common import swob @@ -176,8 +175,7 @@ class Receiver(object): self.frag_index = None utils.validate_device_partition(self.device, self.partition) self.diskfile_mgr = self.app._diskfile_router[self.policy] - if self.diskfile_mgr.mount_check and not constraints.check_mount( - self.diskfile_mgr.devices, self.device): + if not self.diskfile_mgr.get_dev_path(self.device): raise swob.HTTPInsufficientStorage(drive=self.device) self.fp = self.request.environ['wsgi.input'] for data in self._ensure_flush(): diff --git a/test/probe/common.py b/test/probe/common.py index 7d1e754014..ca1225f9fb 100644 --- a/test/probe/common.py +++ b/test/probe/common.py @@ -26,7 +26,7 @@ from swiftclient import get_auth, head_account from swift.obj.diskfile import get_data_dir from swift.common.ring import Ring -from swift.common.utils import readconf +from swift.common.utils import readconf, renamer from swift.common.manager import Manager from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY @@ -314,6 +314,19 @@ class ProbeTest(unittest.TestCase): self.updaters.once() self.replicators.once() + def kill_drive(self, device): + if os.path.ismount(device): + os.system('sudo umount %s' % device) + else: + renamer(device, device + "X") + + def revive_drive(self, device): + disabled_name = device + "X" + if os.path.isdir(disabled_name): + renamer(device + "X", device) + else: + os.system('sudo mount %s' % device) + class ReplProbeTest(ProbeTest): diff --git a/test/probe/test_reconstructor_rebuild.py b/test/probe/test_reconstructor_rebuild.py index 5edfcc52d1..bf568ccc68 100644 --- a/test/probe/test_reconstructor_rebuild.py +++ b/test/probe/test_reconstructor_rebuild.py @@ -19,12 +19,14 @@ import unittest import uuid import shutil import random +from collections import defaultdict from test.probe.common import ECProbeTest from swift.common import direct_client from swift.common.storage_policy import EC_POLICY from swift.common.manager import Manager +from swift.obj.reconstructor import _get_partners from swiftclient import client @@ -165,6 +167,61 @@ class TestReconstructorRebuild(ECProbeTest): self._format_node(onode), [self._format_node(n) for n in node_list])) + def test_rebuild_partner_down(self): + # create EC container + headers = {'X-Storage-Policy': self.policy.name} + client.put_container(self.url, self.token, self.container_name, + headers=headers) + + # PUT object + contents = Body() + client.put_object(self.url, self.token, + self.container_name, + self.object_name, + contents=contents) + + opart, onodes = self.object_ring.get_nodes( + self.account, self.container_name, self.object_name) + + # find a primary server that only has one of it's devices in the + # primary node list + group_nodes_by_config = defaultdict(list) + for n in onodes: + group_nodes_by_config[self.config_number(n)].append(n) + for config_number, node_list in group_nodes_by_config.items(): + if len(node_list) == 1: + break + else: + self.fail('ring balancing did not use all available nodes') + primary_node = node_list[0] + + # pick one it's partners to fail randomly + partner_node = random.choice(_get_partners( + primary_node['index'], onodes)) + + # 507 the partner device + device_path = self.device_dir('object', partner_node) + self.kill_drive(device_path) + + # select another primary sync_to node to fail + failed_primary = [n for n in onodes if n['id'] not in + (primary_node['id'], partner_node['id'])][0] + # ... capture it's fragment etag + failed_primary_etag = self.direct_get(failed_primary, opart) + # ... and delete it + part_dir = self.storage_dir('object', failed_primary, part=opart) + shutil.rmtree(part_dir, True) + + # reconstruct from the primary, while one of it's partners is 507'd + self.reconstructor.once(number=self.config_number(primary_node)) + + # the other failed primary will get it's fragment rebuilt instead + self.assertEqual(failed_primary_etag, + self.direct_get(failed_primary, opart)) + + # just to be nice + self.revive_drive(device_path) + if __name__ == "__main__": unittest.main() diff --git a/test/probe/test_reconstructor_revert.py b/test/probe/test_reconstructor_revert.py index 39739b617d..249a6b5d62 100755 --- a/test/probe/test_reconstructor_revert.py +++ b/test/probe/test_reconstructor_revert.py @@ -17,7 +17,6 @@ from hashlib import md5 import unittest import uuid -import os import random import shutil from collections import defaultdict @@ -27,7 +26,6 @@ from test.probe.common import ECProbeTest from swift.common import direct_client from swift.common.storage_policy import EC_POLICY from swift.common.manager import Manager -from swift.common.utils import renamer from swift.obj import reconstructor from swiftclient import client @@ -70,19 +68,6 @@ class TestReconstructorRevert(ECProbeTest): self.assertEqual(self.policy.policy_type, EC_POLICY) self.reconstructor = Manager(["object-reconstructor"]) - def kill_drive(self, device): - if os.path.ismount(device): - os.system('sudo umount %s' % device) - else: - renamer(device, device + "X") - - def revive_drive(self, device): - disabled_name = device + "X" - if os.path.isdir(disabled_name): - renamer(device + "X", device) - else: - os.system('sudo mount %s' % device) - def proxy_get(self): # GET object headers, body = client.get_object(self.url, self.token, @@ -277,6 +262,8 @@ class TestReconstructorRevert(ECProbeTest): else: self.fail('ring balancing did not use all available nodes') primary_node = node_list[0] + + # ... and 507 it's device primary_device = self.device_dir('object', primary_node) self.kill_drive(primary_device) diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index 23e70543f7..321ea3751d 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -932,7 +932,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): def test_process_job_all_insufficient_storage(self): self.reconstructor._reset_stats() with mock_ssync_sender(): - with mocked_http_conn(*[507] * 10): + with mocked_http_conn(*[507] * 8): found_jobs = [] for part_info in self.reconstructor.collect_parts(): jobs = self.reconstructor.build_reconstruction_jobs( @@ -954,7 +954,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): def test_process_job_all_client_error(self): self.reconstructor._reset_stats() with mock_ssync_sender(): - with mocked_http_conn(*[400] * 10): + with mocked_http_conn(*[400] * 8): found_jobs = [] for part_info in self.reconstructor.collect_parts(): jobs = self.reconstructor.build_reconstruction_jobs( @@ -976,7 +976,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): def test_process_job_all_timeout(self): self.reconstructor._reset_stats() with mock_ssync_sender(): - with nested(mocked_http_conn(*[Timeout()] * 10)): + with nested(mocked_http_conn(*[Timeout()] * 8)): found_jobs = [] for part_info in self.reconstructor.collect_parts(): jobs = self.reconstructor.build_reconstruction_jobs( @@ -1012,6 +1012,13 @@ class TestObjectReconstructor(unittest.TestCase): 'bind_port': self.port, } self.logger = debug_logger('object-reconstructor') + self._configure_reconstructor() + self.policy.object_ring.max_more_nodes = \ + self.policy.object_ring.replicas + self.ts_iter = make_timestamp_iter() + + def _configure_reconstructor(self, **kwargs): + self.conf.update(kwargs) self.reconstructor = object_reconstructor.ObjectReconstructor( self.conf, logger=self.logger) self.reconstructor._reset_stats() @@ -1019,9 +1026,6 @@ class TestObjectReconstructor(unittest.TestCase): # directly, so you end up with a /0 when you try to show the # percentage of complete jobs as ratio of the total job count self.reconstructor.job_count = 1 - self.policy.object_ring.max_more_nodes = \ - self.policy.object_ring.replicas - self.ts_iter = make_timestamp_iter() def tearDown(self): self.reconstructor.stats_line() @@ -1115,16 +1119,16 @@ class TestObjectReconstructor(unittest.TestCase): paths = [] - def fake_ismount(path): - paths.append(path) + def fake_check_mount(devices, device): + paths.append(os.path.join(devices, device)) return False with nested(mock.patch('swift.obj.reconstructor.whataremyips', return_value=[self.ip]), mock.patch.object(self.policy.object_ring, '_devs', new=stub_ring_devs), - mock.patch('swift.obj.reconstructor.ismount', - fake_ismount)): + mock.patch('swift.obj.diskfile.check_mount', + fake_check_mount)): part_infos = list(self.reconstructor.collect_parts()) self.assertEqual(2, len(part_infos)) # sanity, same jobs self.assertEqual(set(int(p['partition']) for p in part_infos), @@ -1134,13 +1138,16 @@ class TestObjectReconstructor(unittest.TestCase): self.assertEqual(paths, []) # ... now with mount check - self.reconstructor.mount_check = True + self._configure_reconstructor(mount_check=True) + self.assertTrue(self.reconstructor.mount_check) + for policy in POLICIES: + self.assertTrue(self.reconstructor._df_router[policy].mount_check) with nested(mock.patch('swift.obj.reconstructor.whataremyips', return_value=[self.ip]), mock.patch.object(self.policy.object_ring, '_devs', new=stub_ring_devs), - mock.patch('swift.obj.reconstructor.ismount', - fake_ismount)): + mock.patch('swift.obj.diskfile.check_mount', + fake_check_mount)): part_infos = list(self.reconstructor.collect_parts()) self.assertEqual([], part_infos) # sanity, no jobs @@ -1148,7 +1155,8 @@ class TestObjectReconstructor(unittest.TestCase): self.assertEqual(set(paths), set([ os.path.join(self.devices, dev) for dev in local_devs])) - def fake_ismount(path): + def fake_check_mount(devices, device): + path = os.path.join(devices, device) if path.endswith('sda'): return True else: @@ -1158,8 +1166,8 @@ class TestObjectReconstructor(unittest.TestCase): return_value=[self.ip]), mock.patch.object(self.policy.object_ring, '_devs', new=stub_ring_devs), - mock.patch('swift.obj.reconstructor.ismount', - fake_ismount)): + mock.patch('swift.obj.diskfile.check_mount', + fake_check_mount)): part_infos = list(self.reconstructor.collect_parts()) self.assertEqual(1, len(part_infos)) # only sda picked up (part 0) self.assertEqual(part_infos[0]['partition'], 0) @@ -1171,6 +1179,8 @@ class TestObjectReconstructor(unittest.TestCase): 'replication_ip': self.ip, 'replication_port': self.port } for dev in local_devs] + for device in local_devs: + utils.mkdirs(os.path.join(self.devices, device)) fake_unlink = mock.MagicMock() self.reconstructor.reclaim_age = 1000 now = time.time() diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py index 4a030c821d..9fdfe7d102 100644 --- a/test/unit/obj/test_ssync_receiver.py +++ b/test/unit/obj/test_ssync_receiver.py @@ -23,7 +23,6 @@ import unittest import eventlet import mock -from swift.common import constraints from swift.common import exceptions from swift.common import swob from swift.common import utils @@ -53,6 +52,7 @@ class TestReceiver(unittest.TestCase): 'mount_check': 'false', 'replication_one_per_device': 'false', 'log_requests': 'false'} + utils.mkdirs(os.path.join(self.testdir, 'device', 'partition')) self.controller = server.ObjectController(self.conf) self.controller.bytes_per_sync = 1 @@ -285,8 +285,8 @@ class TestReceiver(unittest.TestCase): mock.patch.object( self.controller._diskfile_router[POLICIES.legacy], 'mount_check', False), - mock.patch.object( - constraints, 'check_mount', return_value=False)) as ( + mock.patch('swift.obj.diskfile.check_mount', + return_value=False)) as ( mocked_replication_semaphore, mocked_mount_check, mocked_check_mount): @@ -305,8 +305,8 @@ class TestReceiver(unittest.TestCase): mock.patch.object( self.controller._diskfile_router[POLICIES.legacy], 'mount_check', True), - mock.patch.object( - constraints, 'check_mount', return_value=False)) as ( + mock.patch('swift.obj.diskfile.check_mount', + return_value=False)) as ( mocked_replication_semaphore, mocked_mount_check, mocked_check_mount): From a1c327022c70907ccc159d6203de26b37c3a4586 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Tue, 26 May 2015 16:43:55 -0700 Subject: [PATCH 42/98] Remove simplejson from swift-recon Since we're dropping Python 2.6 support, we can rely on stdlib's json and get rid of our dependency on simplejson. All swift-recon was doing with json was decoding a JSON response (from the recon middleware) and printing it to the terminal. This still works just fine. Change-Id: I28cf25a7c2856f230d4642c62fb8bf9c4d37e9e5 --- swift/cli/recon.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/swift/cli/recon.py b/swift/cli/recon.py index 8c2042cb53..a0fcdf7835 100755 --- a/swift/cli/recon.py +++ b/swift/cli/recon.py @@ -22,12 +22,9 @@ from eventlet.green import urllib2 from swift.common.utils import SWIFT_CONF_FILE from swift.common.ring import Ring from urlparse import urlparse -try: - import simplejson as json -except ImportError: - import json from hashlib import md5 import eventlet +import json import optparse import time import sys From bb716573ab5c8455348ec013feb894421e0e1f1c Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 20 May 2015 00:39:41 -0700 Subject: [PATCH 43/98] Allow SLO PUTs to forgo per-segment integrity checks While manifests still require 'etag' and 'size_bytes' fields for each segment (to catch user errors like 'etaf' or 'size_btyes'), an explicit null for either will skip that particular integrity check and instead use whatever value is retrieved when HEADing the segment. So, if a user uploads a manifest like: [{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 1048576}, {"path": "/con/obj_seg_2", "etag": "etag2", "size_bytes": null}, {"path": "/con/obj_seg_3", "etag": null, "size_bytes": null}] then the etag will only be verified for the /con/obj_seg_2 segment, and the segment size will only be verified for the /con/obj_seg_1 segment. However, the manifest that's ultimately stored (and can be retrieved with a ?multipart-manifest=get query-string) will still look like: [{"name": "/con/obj_seg_1", "hash": "etag1", "bytes": 1048576, ...}, {"name": "/con/obj_seg_2", "hash": "etag2", "bytes": 1048576, ...}, {"name": "/con/obj_seg_3", "hash": "etag3", "bytes": 1234, ...}] This allows the middleware to continue performing integrity checks on object GET. Change-Id: I2c4e585221387dd02a8679a50398d6b614407b12 DocImpact --- swift/common/middleware/slo.py | 54 ++++++++++++----- test/functional/tests.py | 79 +++++++++++++++++++++++++ test/unit/common/middleware/test_slo.py | 55 +++++++++++++++++ 3 files changed, 172 insertions(+), 16 deletions(-) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index d8df829981..3c3ad7feac 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -36,8 +36,8 @@ json data format. The data to be supplied for each segment is:: path: the path to the segment (not including account) /container/object_name - etag: the etag given back when the segment was PUT - size_bytes: the size of the segment in bytes + etag: the etag given back when the segment was PUT, or null + size_bytes: the size of the segment in bytes, or null The format of the list will be:: @@ -48,15 +48,25 @@ The format of the list will be:: The number of object segments is limited to a configurable amount, default 1000. Each segment, except for the final one, must be at least 1 megabyte -(configurable). On upload, the middleware will head every segment passed in and -verify the size and etag of each. If any of the objects do not match (not +(configurable). On upload, the middleware will head every segment passed in to +verify: + + 1. the segment exists (i.e. the HEAD was successful); + 2. the segment meets minimum size requirements (if not the last segment); + 3. if the user provided a non-null etag, the etag matches; and + 4. if the user provided a non-null size_bytes, the size_bytes matches. + +Note that the etag and size_bytes keys are still required; this acts as a guard +against user errors such as typos. If any of the objects fail to verify (not found, size/etag mismatch, below minimum size) then the user will receive a 4xx error response. If everything does match, the user will receive a 2xx response and the SLO object is ready for downloading. Behind the scenes, on success, a json manifest generated from the user input is sent to object servers with an extra "X-Static-Large-Object: True" header -and a modified Content-Type. The parameter: swift_bytes=$total_size will be +and a modified Content-Type. The items in this manifest will include the etag +and size_bytes for each segment, regardless of whether the client specified +them for verification. The parameter: swift_bytes=$total_size will be appended to the existing Content-Type, where total_size is the sum of all the included segments' size_bytes. This extra parameter will be hidden from the user. @@ -73,9 +83,11 @@ Retrieving a Large Object A GET request to the manifest object will return the concatenation of the objects from the manifest much like DLO. If any of the segments from the -manifest are not found or their Etag/Content Length no longer match the -connection will drop. In this case a 409 Conflict will be logged in the proxy -logs and the user will receive incomplete results. +manifest are not found or their Etag/Content Length have changed since upload, +the connection will drop. In this case a 409 Conflict will be logged in the +proxy logs and the user will receive incomplete results. Note that this will be +enforced regardless of whether the user perfomed per-segment validation during +upload. The headers from this GET or HEAD request will return the metadata attached to the manifest object itself with some exceptions:: @@ -594,8 +606,11 @@ class StaticLargeObject(object): try: seg_size = int(seg_dict['size_bytes']) except (ValueError, TypeError): - raise HTTPBadRequest('Invalid Manifest File') - if seg_size < self.min_segment_size and \ + if seg_dict['size_bytes'] is None: + seg_size = None + else: + raise HTTPBadRequest('Invalid Manifest File') + if seg_size is not None and seg_size < self.min_segment_size and \ index < len(parsed_data) - 1: raise HTTPBadRequest( 'Each segment, except the last, must be at least ' @@ -613,11 +628,18 @@ class StaticLargeObject(object): head_seg_resp = \ Request.blank(obj_path, new_env).get_response(self) if head_seg_resp.is_success: - total_size += seg_size - if seg_size != head_seg_resp.content_length: + if head_seg_resp.content_length < self.min_segment_size and \ + index < len(parsed_data) - 1: + raise HTTPBadRequest( + 'Each segment, except the last, must be at least ' + '%d bytes.' % self.min_segment_size) + total_size += head_seg_resp.content_length + if seg_size is not None and \ + seg_size != head_seg_resp.content_length: problem_segments.append([quote(obj_name), 'Size Mismatch']) - if seg_dict['etag'] == head_seg_resp.etag: - slo_etag.update(seg_dict['etag']) + if seg_dict['etag'] is None or \ + seg_dict['etag'] == head_seg_resp.etag: + slo_etag.update(head_seg_resp.etag) else: problem_segments.append([quote(obj_name), 'Etag Mismatch']) if head_seg_resp.last_modified: @@ -629,8 +651,8 @@ class StaticLargeObject(object): last_modified_formatted = \ last_modified.strftime('%Y-%m-%dT%H:%M:%S.%f') seg_data = {'name': '/' + seg_dict['path'].lstrip('/'), - 'bytes': seg_size, - 'hash': seg_dict['etag'], + 'bytes': head_seg_resp.content_length, + 'hash': head_seg_resp.etag, 'content_type': head_seg_resp.content_type, 'last_modified': last_modified_formatted} if config_true_value( diff --git a/test/functional/tests.py b/test/functional/tests.py index 3fbbdd784e..df96e5c4da 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -2152,6 +2152,15 @@ class TestSloEnv(object): seg_info['seg_e']]), parms={'multipart-manifest': 'put'}) + file_item = cls.container.file("manifest-db") + file_item.write( + json.dumps([ + {'path': seg_info['seg_d']['path'], 'etag': None, + 'size_bytes': None}, + {'path': seg_info['seg_b']['path'], 'etag': None, + 'size_bytes': None}, + ]), parms={'multipart-manifest': 'put'}) + class TestSlo(Base): env = TestSloEnv @@ -2259,6 +2268,52 @@ class TestSlo(Base): else: self.fail("Expected ResponseError but didn't get it") + def test_slo_unspecified_etag(self): + file_item = self.env.container.file("manifest-a-unspecified-etag") + file_item.write( + json.dumps([{ + 'size_bytes': 1024 * 1024, + 'etag': None, + 'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]), + parms={'multipart-manifest': 'put'}) + self.assert_status(201) + + def test_slo_unspecified_size(self): + file_item = self.env.container.file("manifest-a-unspecified-size") + file_item.write( + json.dumps([{ + 'size_bytes': None, + 'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(), + 'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]), + parms={'multipart-manifest': 'put'}) + self.assert_status(201) + + def test_slo_missing_etag(self): + file_item = self.env.container.file("manifest-a-missing-etag") + try: + file_item.write( + json.dumps([{ + 'size_bytes': 1024 * 1024, + 'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]), + parms={'multipart-manifest': 'put'}) + except ResponseError as err: + self.assertEqual(400, err.status) + else: + self.fail("Expected ResponseError but didn't get it") + + def test_slo_missing_size(self): + file_item = self.env.container.file("manifest-a-missing-size") + try: + file_item.write( + json.dumps([{ + 'etag': hashlib.md5('a' * 1024 * 1024).hexdigest(), + 'path': '/%s/%s' % (self.env.container.name, 'seg_a')}]), + parms={'multipart-manifest': 'put'}) + except ResponseError as err: + self.assertEqual(400, err.status) + else: + self.fail("Expected ResponseError but didn't get it") + def test_slo_overwrite_segment_with_manifest(self): file_item = self.env.container.file("seg_b") try: @@ -2367,6 +2422,30 @@ class TestSlo(Base): except ValueError: self.fail("GET with multipart-manifest=get got invalid json") + def test_slo_get_the_manifest_with_details_from_server(self): + manifest = self.env.container.file("manifest-db") + got_body = manifest.read(parms={'multipart-manifest': 'get'}) + + self.assertEqual('application/json; charset=utf-8', + manifest.content_type) + try: + value = json.loads(got_body) + except ValueError: + self.fail("GET with multipart-manifest=get got invalid json") + + self.assertEqual(len(value), 2) + self.assertEqual(value[0]['bytes'], 1024 * 1024) + self.assertEqual(value[0]['hash'], + hashlib.md5('d' * 1024 * 1024).hexdigest()) + self.assertEqual(value[0]['name'], + '/%s/seg_d' % self.env.container.name.decode("utf-8")) + + self.assertEqual(value[1]['bytes'], 1024 * 1024) + self.assertEqual(value[1]['hash'], + hashlib.md5('b' * 1024 * 1024).hexdigest()) + self.assertEqual(value[1]['name'], + '/%s/seg_b' % self.env.container.name.decode("utf-8")) + def test_slo_head_the_manifest(self): manifest = self.env.container.file("manifest-abcde") got_info = manifest.info(parms={'multipart-manifest': 'get'}) diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index d70a25ccc4..86a11734d3 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -441,6 +441,61 @@ class TestSloPutManifest(SloTestCase): self.assertEqual(status, '409 Conflict') self.assertEqual(self.app.call_count, 1) + def test_handle_multipart_put_skip_size_check(self): + good_data = json.dumps( + [{'path': '/checktest/a_1', 'etag': 'a', 'size_bytes': None}, + {'path': '/checktest/b_2', 'etag': 'b', 'size_bytes': None}]) + req = Request.blank( + '/v1/AUTH_test/checktest/man_3?multipart-manifest=put', + environ={'REQUEST_METHOD': 'PUT'}, body=good_data) + status, headers, body = self.call_slo(req) + self.assertEquals(self.app.call_count, 3) + + # Check that we still populated the manifest properly from our HEADs + req = Request.blank( + # this string looks weird, but it's just an artifact + # of FakeSwift + '/v1/AUTH_test/checktest/man_3?multipart-manifest=put', + environ={'REQUEST_METHOD': 'GET'}) + status, headers, body = self.call_app(req) + manifest_data = json.loads(body) + self.assertEquals(1, manifest_data[0]['bytes']) + self.assertEquals(2, manifest_data[1]['bytes']) + + def test_handle_multipart_put_skip_size_check_still_uses_min_size(self): + with patch.object(self.slo, 'min_segment_size', 50): + test_json_data = json.dumps([{'path': '/cont/small_object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': None}, + {'path': '/cont/small_object', + 'etag': 'etagoftheobjectsegment', + 'size_bytes': 100}]) + req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data) + with self.assertRaises(HTTPException) as cm: + self.slo.handle_multipart_put(req, fake_start_response) + self.assertEquals(cm.exception.status_int, 400) + + def test_handle_multipart_put_skip_etag_check(self): + good_data = json.dumps( + [{'path': '/checktest/a_1', 'etag': None, 'size_bytes': 1}, + {'path': '/checktest/b_2', 'etag': None, 'size_bytes': 2}]) + req = Request.blank( + '/v1/AUTH_test/checktest/man_3?multipart-manifest=put', + environ={'REQUEST_METHOD': 'PUT'}, body=good_data) + status, headers, body = self.call_slo(req) + self.assertEquals(self.app.call_count, 3) + + # Check that we still populated the manifest properly from our HEADs + req = Request.blank( + # this string looks weird, but it's just an artifact + # of FakeSwift + '/v1/AUTH_test/checktest/man_3?multipart-manifest=put', + environ={'REQUEST_METHOD': 'GET'}) + status, headers, body = self.call_app(req) + manifest_data = json.loads(body) + self.assertEquals('a', manifest_data[0]['hash']) + self.assertEquals('b', manifest_data[1]['hash']) + class TestSloDeleteManifest(SloTestCase): From 3aa06f185ac4256a8883c565bdc90b1ffbd519ca Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Mon, 27 Apr 2015 09:17:46 +0100 Subject: [PATCH 44/98] Make SSYNC receiver return a reponse when initial checks fail The ssync Receiver performs some checks on request parameters in initialize_request() before starting the exchange of missing hashes and updates e.g. the destination device must be available; the policy must be valid. Currently if any of these checks fails then the receiver just closes the connection, so the Sender gets no useful response code and noise is generated in logs by httplib and wsgi Exceptions. This change moves the request parameter checks to the Receiver constructor so that the HTTPExceptions raised are actually sent as responses. (The 'connection close' exception handling still applies once the 'missing_check' and 'updates' handshakes are in progress.) Moving initialize_request() revealed the following lurking bug: * initialize_request() sets req.environ['eventlet.minimum_write_chunk_size'] = 0 * this was previously ineffective because the Response environ had already been copied from Request environ before this value was set, so the Response never used the value :/ * Now that it is effective (a good thing) it causes the empty string yielded by the receiver when there are no missing hashes in missing_checks() to be sent to the sender immediately. This makes the Sender.readline() think there has been an early disconnect and raise an Exception (a bad thing), as revealed by test/unit/obj/test_ssync_sender.py:TestSsync.test_nothing_to_sync The fix for this is to simply make the receiver skip sending the empty string if there are no missing object_hashes. Change-Id: I036a6919fead6e970505dccbb0da7bfbdf8cecc3 --- swift/obj/ssync_receiver.py | 10 ++- test/unit/obj/test_server.py | 2 +- test/unit/obj/test_ssync_receiver.py | 102 +++++++++++++++++++++++---- test/unit/obj/test_ssync_sender.py | 18 +++-- 4 files changed, 105 insertions(+), 27 deletions(-) diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py index aa685211ae..b907ebf563 100644 --- a/swift/obj/ssync_receiver.py +++ b/swift/obj/ssync_receiver.py @@ -69,6 +69,7 @@ class Receiver(object): # raised during processing because otherwise the sender could send for # quite some time before realizing it was all in vain. self.disconnect = True + self.initialize_request() def __call__(self): """ @@ -88,9 +89,7 @@ class Receiver(object): try: # Double try blocks in case our main error handlers fail. try: - # initialize_request is for preamble items that can be done - # outside a replication semaphore lock. - for data in self.initialize_request(): + for data in self._ensure_flush(): yield data # If semaphore is in use, try to acquire it, non-blocking, and # return a 503 if it fails. @@ -178,8 +177,6 @@ class Receiver(object): if not self.diskfile_mgr.get_dev_path(self.device): raise swob.HTTPInsufficientStorage(drive=self.device) self.fp = self.request.environ['wsgi.input'] - for data in self._ensure_flush(): - yield data def missing_check(self): """ @@ -249,7 +246,8 @@ class Receiver(object): if want: object_hashes.append(object_hash) yield ':MISSING_CHECK: START\r\n' - yield '\r\n'.join(object_hashes) + if object_hashes: + yield '\r\n'.join(object_hashes) yield '\r\n' yield ':MISSING_CHECK: END\r\n' for data in self._ensure_flush(): diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 52a34347ac..4c669a874c 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -4410,7 +4410,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.status_int, 507) def test_SSYNC_can_be_called(self): - req = Request.blank('/sda1/p/other/suff', + req = Request.blank('/sda1/0', environ={'REQUEST_METHOD': 'SSYNC'}, headers={}) resp = req.get_response(self.object_controller) diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py index 9fdfe7d102..8b652ad2ec 100644 --- a/test/unit/obj/test_ssync_receiver.py +++ b/test/unit/obj/test_ssync_receiver.py @@ -23,15 +23,19 @@ import unittest import eventlet import mock +from swift.common import bufferedhttp from swift.common import exceptions from swift.common import swob -from swift.common import utils from swift.common.storage_policy import POLICIES +from swift.common import utils +from swift.common.swob import HTTPException from swift.obj import diskfile from swift.obj import server from swift.obj import ssync_receiver +from swift.obj.reconstructor import ObjectReconstructor from test import unit +from test.unit import debug_logger, patch_policies @unit.patch_policies() @@ -176,9 +180,12 @@ class TestReceiver(unittest.TestCase): ':MISSING_CHECK: END\r\n' ':UPDATES: START\r\n:UPDATES: END\r\n') self.controller.logger = mock.MagicMock() - receiver = ssync_receiver.Receiver(self.controller, req) - body_lines = [chunk.strip() for chunk in receiver() if chunk.strip()] - self.assertEqual(body_lines, [":ERROR: 503 'No policy with index 2'"]) + try: + ssync_receiver.Receiver(self.controller, req) + self.fail('Expected HTTPException to be raised.') + except HTTPException as err: + self.assertEqual('503 Service Unavailable', err.status) + self.assertEqual('No policy with index 2', err.body) @unit.patch_policies() def test_Receiver_with_frag_index_header(self): @@ -233,8 +240,8 @@ class TestReceiver(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual( self.body_lines(resp.body), - [":ERROR: 400 'Invalid path: /device'"]) - self.assertEqual(resp.status_int, 200) + ["Invalid path: /device"]) + self.assertEqual(resp.status_int, 400) self.assertFalse(mocked_replication_semaphore.acquire.called) self.assertFalse(mocked_replication_semaphore.release.called) @@ -246,8 +253,8 @@ class TestReceiver(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual( self.body_lines(resp.body), - [":ERROR: 400 'Invalid path: /device/'"]) - self.assertEqual(resp.status_int, 200) + ["Invalid path: /device/"]) + self.assertEqual(resp.status_int, 400) self.assertFalse(mocked_replication_semaphore.acquire.called) self.assertFalse(mocked_replication_semaphore.release.called) @@ -273,8 +280,8 @@ class TestReceiver(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual( self.body_lines(resp.body), - [":ERROR: 400 'Invalid path: /device/partition/junk'"]) - self.assertEqual(resp.status_int, 200) + ["Invalid path: /device/partition/junk"]) + self.assertEqual(resp.status_int, 400) self.assertFalse(mocked_replication_semaphore.acquire.called) self.assertFalse(mocked_replication_semaphore.release.called) @@ -315,10 +322,10 @@ class TestReceiver(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual( self.body_lines(resp.body), - [":ERROR: 507 '

Insufficient Storage

There " + ["

Insufficient Storage

There " "was not enough space to save the resource. Drive: " - "device

'"]) - self.assertEqual(resp.status_int, 200) + "device

"]) + self.assertEqual(resp.status_int, 507) mocked_check_mount.assert_called_once_with( self.controller._diskfile_router[POLICIES.legacy].devices, 'device') @@ -1476,5 +1483,74 @@ class TestReceiver(unittest.TestCase): self.assertEqual(_requests, []) +@patch_policies(with_ec_default=True) +class TestSsyncRxServer(unittest.TestCase): + # Tests to verify behavior of SSYNC requests sent to an object + # server socket. + + def setUp(self): + self.rx_ip = '127.0.0.1' + # dirs + self.tmpdir = tempfile.mkdtemp() + self.tempdir = os.path.join(self.tmpdir, 'tmp_test_obj_server') + + self.devices = os.path.join(self.tempdir, 'srv/node') + for device in ('sda1', 'sdb1'): + os.makedirs(os.path.join(self.devices, device)) + + self.conf = { + 'devices': self.devices, + 'swift_dir': self.tempdir, + } + self.rx_logger = debug_logger('test-object-server') + rx_server = server.ObjectController(self.conf, logger=self.rx_logger) + sock = eventlet.listen((self.rx_ip, 0)) + self.rx_server = eventlet.spawn( + eventlet.wsgi.server, sock, rx_server, utils.NullLogger()) + self.rx_port = sock.getsockname()[1] + self.tx_logger = debug_logger('test-reconstructor') + self.daemon = ObjectReconstructor(self.conf, self.tx_logger) + self.daemon._diskfile_mgr = self.daemon._df_router[POLICIES[0]] + + def tearDown(self): + shutil.rmtree(self.tmpdir) + + def test_SSYNC_device_not_available(self): + with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\ + as mock_missing_check: + self.connection = bufferedhttp.BufferedHTTPConnection( + '127.0.0.1:%s' % self.rx_port) + self.connection.putrequest('SSYNC', '/sdc1/0') + self.connection.putheader('Transfer-Encoding', 'chunked') + self.connection.putheader('X-Backend-Storage-Policy-Index', + int(POLICIES[0])) + self.connection.endheaders() + resp = self.connection.getresponse() + self.assertEqual(507, resp.status) + resp.read() + resp.close() + # sanity check that the receiver did not proceed to missing_check + self.assertFalse(mock_missing_check.called) + + def test_SSYNC_invalid_policy(self): + valid_indices = sorted([int(policy) for policy in POLICIES]) + bad_index = valid_indices[-1] + 1 + with mock.patch('swift.obj.ssync_receiver.Receiver.missing_check')\ + as mock_missing_check: + self.connection = bufferedhttp.BufferedHTTPConnection( + '127.0.0.1:%s' % self.rx_port) + self.connection.putrequest('SSYNC', '/sda1/0') + self.connection.putheader('Transfer-Encoding', 'chunked') + self.connection.putheader('X-Backend-Storage-Policy-Index', + bad_index) + self.connection.endheaders() + resp = self.connection.getresponse() + self.assertEqual(503, resp.status) + resp.read() + resp.close() + # sanity check that the receiver did not proceed to missing_check + self.assertFalse(mock_missing_check.called) + + if __name__ == '__main__': unittest.main() diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py index fa38b658b2..19fea0be2b 100644 --- a/test/unit/obj/test_ssync_sender.py +++ b/test/unit/obj/test_ssync_sender.py @@ -497,8 +497,6 @@ class TestSender(BaseTestSender): node = dict(replication_ip='1.2.3.4', replication_port=5678, device='sda1', index=0) job = dict(partition='9', policy=POLICIES.legacy) - self.sender = ssync_sender.Sender(self.daemon, node, job, None) - self.sender.suffixes = ['abc'] class FakeBufferedHTTPConnection(NullBufferedHTTPConnection): def getresponse(*args, **kwargs): @@ -506,16 +504,22 @@ class TestSender(BaseTestSender): response.status = 503 return response - with mock.patch.object( + missing_check_fn = 'swift.obj.ssync_sender.Sender.missing_check' + with mock.patch(missing_check_fn) as mock_missing_check: + with mock.patch.object( ssync_sender.bufferedhttp, 'BufferedHTTPConnection', - FakeBufferedHTTPConnection): - success, candidates = self.sender() - self.assertFalse(success) - self.assertEquals(candidates, {}) + FakeBufferedHTTPConnection): + self.sender = ssync_sender.Sender( + self.daemon, node, job, ['abc']) + success, candidates = self.sender() + self.assertFalse(success) + self.assertEquals(candidates, {}) error_lines = self.daemon.logger.get_lines_for_level('error') for line in error_lines: self.assertTrue(line.startswith( '1.2.3.4:5678/sda1/9 Expected status 200; got 503')) + # sanity check that Sender did not proceed to missing_check exchange + self.assertFalse(mock_missing_check.called) def test_readline_newline_in_buffer(self): self.sender.response_buffer = 'Has a newline already.\r\nOkay.' From 191f2a00bd9121fddc03d8f07f15e5e34790541e Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Mon, 27 Apr 2015 16:39:23 +0100 Subject: [PATCH 45/98] Remove _ensure_flush() from SSYNC receiver The Receiver._ensure_flush() method in ssync_receiver.py has the following comment: Sends a blank line sufficient to flush buffers. This is to ensure Eventlet versions that don't support eventlet.minimum_write_chunk_size will send any previous data buffered. If https://bitbucket.org/eventlet/eventlet/pull-request/37 ever gets released in an Eventlet version, we should make this yield only for versions older than that. The reference pull request was included with eventlet 0.14 [1] and swift now requires >=0.16.1 so it is safe to remove _ensure_flush() and save > 8k bytes per SSYNC response. [1] https://bitbucket.org/eventlet/eventlet/commits/4bd654205a4217970a57a7c4802fed7ff2c8b770 Change-Id: I367e9a6e92b7ea75fe7e5795cded212657de57ed --- swift/obj/ssync_receiver.py | 27 ++++++--------------------- test/unit/obj/test_ssync_sender.py | 6 +++--- 2 files changed, 9 insertions(+), 24 deletions(-) diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py index b907ebf563..5f2461d62e 100644 --- a/swift/obj/ssync_receiver.py +++ b/swift/obj/ssync_receiver.py @@ -89,8 +89,9 @@ class Receiver(object): try: # Double try blocks in case our main error handlers fail. try: - for data in self._ensure_flush(): - yield data + # Need to send something to trigger wsgi to return response + # headers and kick off the ssync exchange. + yield '\r\n' # If semaphore is in use, try to acquire it, non-blocking, and # return a 503 if it fails. if self.app.replication_semaphore: @@ -142,20 +143,6 @@ class Receiver(object): except Exception: pass # We're okay with the above failing. - def _ensure_flush(self): - """ - Sends a blank line sufficient to flush buffers. - - This is to ensure Eventlet versions that don't support - eventlet.minimum_write_chunk_size will send any previous data - buffered. - - If https://bitbucket.org/eventlet/eventlet/pull-request/37 - ever gets released in an Eventlet version, we should make - this yield only for versions older than that. - """ - yield ' ' * eventlet.wsgi.MINIMUM_CHUNK_SIZE + '\r\n' - def initialize_request(self): """ Basic validation of request and mount check. @@ -163,7 +150,9 @@ class Receiver(object): This function will be called before attempting to acquire a replication semaphore lock, so contains only quick checks. """ - # The following is the setting we talk about above in _ensure_flush. + # This environ override has been supported since eventlet 0.14: + # https://bitbucket.org/eventlet/eventlet/commits/ \ + # 4bd654205a4217970a57a7c4802fed7ff2c8b770 self.request.environ['eventlet.minimum_write_chunk_size'] = 0 self.device, self.partition, self.policy = \ request_helpers.get_name_and_placement(self.request, 2, 2, False) @@ -250,8 +239,6 @@ class Receiver(object): yield '\r\n'.join(object_hashes) yield '\r\n' yield ':MISSING_CHECK: END\r\n' - for data in self._ensure_flush(): - yield data def updates(self): """ @@ -385,5 +372,3 @@ class Receiver(object): (failures, successes)) yield ':UPDATES: START\r\n' yield ':UPDATES: END\r\n' - for data in self._ensure_flush(): - yield data diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py index 19fea0be2b..c48a239351 100644 --- a/test/unit/obj/test_ssync_sender.py +++ b/test/unit/obj/test_ssync_sender.py @@ -1830,14 +1830,14 @@ class TestSsyncReplication(TestBaseSsync): self.assertFalse(results['tx_updates']) self.assertFalse(results['rx_updates']) # Minimal receiver response as read by sender: - # 2 * 4098 <-- _ensure_flush() twice + # 2 <-- initial \r\n to start ssync exchange # + 23 <-- :MISSING CHECK START\r\n # + 2 <-- \r\n (minimal missing check response) # + 21 <-- :MISSING CHECK END\r\n # + 17 <-- :UPDATES START\r\n # + 15 <-- :UPDATES END\r\n - # TOTAL = 8274 - self.assertEqual(8274, trace.get('readline_bytes')) + # TOTAL = 80 + self.assertEqual(80, trace.get('readline_bytes')) if __name__ == '__main__': From 5374ba3a80a5b895542196502eac4d9300ba53d2 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Wed, 27 May 2015 12:28:04 -0700 Subject: [PATCH 46/98] drop Python 2.6 testing support Change-Id: I78f21e5794e8ba7a095f03d279247516a241f555 --- doc/source/development_guidelines.rst | 2 +- doc/source/getting_started.rst | 3 +-- tox.ini | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst index 241eda6cf5..1da8457682 100644 --- a/doc/source/development_guidelines.rst +++ b/doc/source/development_guidelines.rst @@ -49,7 +49,7 @@ To execute the unit tests: * Optionally, run only specific tox builds: - - `tox -e pep8,py26` + - `tox -e pep8,py27` The functional tests may be executed against a :doc:`development_saio` or other running Swift cluster using the command: diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst index b3b201d461..0e3b408ad4 100644 --- a/doc/source/getting_started.rst +++ b/doc/source/getting_started.rst @@ -16,8 +16,7 @@ Swift is written in Python and has these dependencies: * The Python packages listed in `the requirements file `_ * Testing additionally requires `the test dependencies `_ -Python 2.6 should work, but it's not actively tested. There is no current -support for Python 3. +There is no current support for Python 3. ------------- Getting Swift diff --git a/tox.ini b/tox.ini index 96e32f87a8..de72f26950 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py26,py27,pep8 +envlist = py27,pep8 minversion = 1.6 skipsdist = True From 68c30b80b47c281c549d101c43d79e718e91d21d Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 28 May 2015 06:08:12 +0000 Subject: [PATCH 47/98] Imported Translations from Transifex For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I0c7b2bdb0edcc3bb3fa06903ec8457ca5e2dd688 --- swift/locale/zh_CN/LC_MESSAGES/swift.po | 1400 +++++++++++------------ 1 file changed, 700 insertions(+), 700 deletions(-) diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index 1352c93f83..48f5ded42c 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: Swift\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-05 06:08+0000\n" +"POT-Creation-Date: 2015-05-28 06:08+0000\n" "PO-Revision-Date: 2015-04-15 12:48+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (China) (http://www.transifex.com/projects/p/swift/" @@ -19,215 +19,6 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#, python-format -msgid "" -"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " -"audit" -msgstr "自%(time)s开始:账号审计:%(passed)s 通过审计,%(failed)s 失败" - -msgid "Begin account audit pass." -msgstr "开始账号审计通过" - -msgid "ERROR auditing" -msgstr "错误 审计" - -#, python-format -msgid "Account audit pass completed: %.02fs" -msgstr "账号审计完成:%.02fs" - -msgid "Begin account audit \"once\" mode" -msgstr "开始账号审计\"once\"模式" - -#, python-format -msgid "Account audit \"once\" mode completed: %.02fs" -msgstr "账号审计\"once\"模式完成: %.02fs" - -#, python-format -msgid "" -"The total %(key)s for the container (%(total)s) does not match the sum of " -"%(key)s across policies (%(sum)s)" -msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)" - -#, python-format -msgid "Audit Failed for %s: %s" -msgstr "审计失败%s: %s" - -#, python-format -msgid "ERROR Could not get account info %s" -msgstr "错误:无法获取账号信息%s" - -#, python-format -msgid "Skipping %s as it is not mounted" -msgstr "挂载失败 跳过%s" - -msgid "Exception in top-level account reaper loop" -msgstr "异常出现在top-level账号reaper环" - -#, python-format -msgid "Devices pass completed: %.02fs" -msgstr "设备通过完成: %.02fs" - -#, python-format -msgid "Beginning pass on account %s" -msgstr "账号%s开始通过" - -#, python-format -msgid "Exception with containers for account %s" -msgstr "账号%s内容器出现异常" - -#, python-format -msgid "Exception with account %s" -msgstr "账号%s出现异常" - -#, python-format -msgid "Incomplete pass on account %s" -msgstr "账号%s未完成通过" - -#, python-format -msgid ", %s containers deleted" -msgstr ",删除容器%s" - -#, python-format -msgid ", %s objects deleted" -msgstr ",删除对象%s" - -#, python-format -msgid ", %s containers remaining" -msgstr ",剩余容器%s" - -#, python-format -msgid ", %s objects remaining" -msgstr ",剩余对象%s" - -#, python-format -msgid ", %s containers possibly remaining" -msgstr ",可能剩余容器%s" - -#, python-format -msgid ", %s objects possibly remaining" -msgstr ",可能剩余对象%s" - -msgid ", return codes: " -msgstr ",返回代码:" - -#, python-format -msgid ", elapsed: %.02fs" -msgstr ",耗时:%.02fs" - -#, python-format -msgid "Account %s has not been reaped since %s" -msgstr "账号%s自%s起未被reaped" - -#, python-format -msgid "Exception with %(ip)s:%(port)s/%(device)s" -msgstr "%(ip)s:%(port)s/%(device)s出现异常" - -#, python-format -msgid "" -"Exception with objects for container %(container)s for account %(account)s" -msgstr "账号%(account)s容器%(container)s的对象出现异常" - -#, python-format -msgid "ERROR __call__ error with %(method)s %(path)s " -msgstr "%(method)s %(path)s出现错误__call__ error" - -#, python-format -msgid "Error encoding to UTF-8: %s" -msgstr "UTF-8编码错误:%s" - -#, python-format -msgid "Could not load %r: %s" -msgstr "无法下载%r: %s" - -#, python-format -msgid "Error in %r with mtime_check_interval: %s" -msgstr "%r中mtime_check_interval出现错误:%s" - -#, python-format -msgid "Quarantined %s to %s due to %s database" -msgstr "隔离%s和%s 因为%s数据库" - -msgid "Broker error trying to rollback locked connection" -msgstr "服务器错误并尝试去回滚已经锁住的链接" - -#, python-format -msgid "Invalid pending entry %(file)s: %(entry)s" -msgstr "不可用的等待输入%(file)s: %(entry)s" - -#, python-format -msgid "ERROR reading HTTP response from %s" -msgstr "读取HTTP错误 响应来源%s" - -#, python-format -msgid "" -"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" -msgstr "%(time).5f seconds (%(rate).5f/s)尝试复制%(count)d dbs" - -#, python-format -msgid "Removed %(remove)d dbs" -msgstr "删除%(remove)d dbs" - -#, python-format -msgid "%(success)s successes, %(failure)s failures" -msgstr "%(success)s成功,%(failure)s失败" - -#, python-format -msgid "ERROR rsync failed with %(code)s: %(args)s" -msgstr "错误 rsync失败 %(code)s: %(args)s" - -#, python-format -msgid "ERROR Bad response %(status)s from %(host)s" -msgstr "失败响应错误%(status)s来自%(host)s" - -#, python-format -msgid "Quarantining DB %s" -msgstr "隔离DB%s" - -#, python-format -msgid "ERROR reading db %s" -msgstr "错误 读取db %s" - -#, python-format -msgid "ERROR Remote drive not mounted %s" -msgstr "错误 远程驱动器无法挂载 %s" - -#, python-format -msgid "ERROR syncing %(file)s with node %(node)s" -msgstr "错误 同步 %(file)s 和 节点%(node)s" - -#, python-format -msgid "ERROR while trying to clean up %s" -msgstr "清理时出现错误%s" - -msgid "ERROR Failed to get my own IPs?" -msgstr "错误 无法获得我方IPs?" - -#, python-format -msgid "Skipping %(device)s as it is not mounted" -msgstr "因无法挂载跳过%(device)s" - -msgid "Beginning replication run" -msgstr "开始运行复制" - -msgid "Replication run OVER" -msgstr "复制运行结束" - -msgid "ERROR trying to replicate" -msgstr "尝试复制时发生错误" - -#, python-format -msgid "Unexpected response: %s" -msgstr "意外响应:%s" - -msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" -msgstr "警告:无法修改文件描述限制。是否按非root运行?" - -msgid "WARNING: Unable to modify memory limit. Running as non-root?" -msgstr "警告:无法修改内存极限,是否按非root运行?" - -msgid "WARNING: Unable to modify max process limit. Running as non-root?" -msgstr "警告:无法修改最大运行极限,是否按非root运行?" - msgid "" "\n" "user quit" @@ -236,261 +27,257 @@ msgstr "" "用户退出" #, python-format -msgid "No %s running" -msgstr "无%s账号运行" +msgid " - %s" +msgstr "- %s" #, python-format -msgid "%s (%s) appears to have stopped" -msgstr "%s (%s)显示已停止" +msgid " - parallel, %s" +msgstr "-平行,%s" #, python-format -msgid "Waited %s seconds for %s to die; giving up" -msgstr "等待%s秒直到%s停止;放弃" - -msgid "Found configs:" -msgstr "找到配置" +msgid "" +"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" +msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced" #, python-format -msgid "Signal %s pid: %s signal: %s" -msgstr "发出信号%s pid: %s 信号: %s" +msgid "%(ip)s/%(device)s responded as unmounted" +msgstr "%(ip)s/%(device)s的回应为未挂载" #, python-format -msgid "Removing stale pid file %s" -msgstr "移除原有pid文件%s" +msgid "%(msg)s %(ip)s:%(port)s/%(device)s" +msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format -msgid "No permission to signal PID %d" -msgstr "无权限发送信号PID%d" +msgid "" +"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " +"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" +msgstr "" +"%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n" +"\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" + +#, python-format +msgid "%(success)s successes, %(failure)s failures" +msgstr "%(success)s成功,%(failure)s失败" + +#, python-format +msgid "%(type)s returning 503 for %(statuses)s" +msgstr "%(type)s 返回 503 在 %(statuses)s" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d无法运行(%s)" #, python-format -msgid "%s running (%s - %s)" -msgstr "%s运行(%s - %s)" +msgid "%s (%s) appears to have stopped" +msgstr "%s (%s)显示已停止" #, python-format msgid "%s already started..." msgstr "%s已启动..." -#, python-format -msgid "Running %s once" -msgstr "运行%s一次" - -#, python-format -msgid "Starting %s" -msgstr "启动%s" - #, python-format msgid "%s does not exist" msgstr "%s不存在" #, python-format -msgid "Timeout %(action)s to memcached: %(server)s" -msgstr "%(action)s超时 高性能内存对象缓存: %(server)s" +msgid "%s is not mounted" +msgstr "%s未挂载" #, python-format -msgid "Error %(action)s to memcached: %(server)s" -msgstr "%(action)s错误 高性能内存对象缓存: %(server)s" +msgid "%s running (%s - %s)" +msgstr "%s运行(%s - %s)" #, python-format -msgid "Error limiting server %s" -msgstr "服务器出现错误%s " +msgid ", %s containers deleted" +msgstr ",删除容器%s" #, python-format -msgid "Unable to locate %s in libc. Leaving as a no-op." -msgstr "无法查询到%s 保留为no-op" - -msgid "" -"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." -msgstr "无法查询到fallocate, posix_fallocate。保存为no-op" - -msgid "Connection refused" -msgstr "连接被拒绝" - -msgid "Host unreachable" -msgstr "无法连接到主机" - -msgid "Connection timeout" -msgstr "连接超时" - -msgid "UNCAUGHT EXCEPTION" -msgstr "未捕获的异常" - -msgid "Error: missing config path argument" -msgstr "错误:设置路径信息丢失" +msgid ", %s containers possibly remaining" +msgstr ",可能剩余容器%s" #, python-format -msgid "Error: unable to locate %s" -msgstr "错误:无法查询到 %s" +msgid ", %s containers remaining" +msgstr ",剩余容器%s" #, python-format -msgid "Unable to read config from %s" -msgstr "无法从%s读取设置" +msgid ", %s objects deleted" +msgstr ",删除对象%s" #, python-format -msgid "Unable to find %s config section in %s" -msgstr "无法在%s中查找到%s设置部分" +msgid ", %s objects possibly remaining" +msgstr ",可能剩余对象%s" #, python-format -msgid "Invalid X-Container-Sync-To format %r" -msgstr "无效的X-Container-Sync-To格式%r" +msgid ", %s objects remaining" +msgstr ",剩余对象%s" #, python-format -msgid "No realm key for %r" -msgstr "%r权限key不存在" +msgid ", elapsed: %.02fs" +msgstr ",耗时:%.02fs" + +msgid ", return codes: " +msgstr ",返回代码:" + +msgid "Account" +msgstr "账号" #, python-format -msgid "No cluster endpoint for %r %r" -msgstr "%r %r的集群节点不存在" +msgid "Account %s has not been reaped since %s" +msgstr "账号%s自%s起未被reaped" + +#, python-format +msgid "Account audit \"once\" mode completed: %.02fs" +msgstr "账号审计\"once\"模式完成: %.02fs" + +#, python-format +msgid "Account audit pass completed: %.02fs" +msgstr "账号审计完成:%.02fs" #, python-format msgid "" -"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " -"\"https\"." -msgstr "" -"在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。" - -msgid "Path required in X-Container-Sync-To" -msgstr "在X-Container-Sync-To中路径是必须的" - -msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" -msgstr "在X-Container-Sync-To中,变量,查询和碎片不被允许" +"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" +msgstr "%(time).5f seconds (%(rate).5f/s)尝试复制%(count)d dbs" #, python-format -msgid "Invalid host %r in X-Container-Sync-To" -msgstr "X-Container-Sync-To中无效主机%r" - -msgid "Exception dumping recon cache" -msgstr "执行dump recon的时候出现异常" +msgid "Audit Failed for %s: %s" +msgstr "审计失败%s: %s" #, python-format -msgid "Could not bind to %s:%s after trying for %s seconds" -msgstr "尝试过%s秒后无法捆绑%s:%s" +msgid "Bad rsync return code: %(ret)d <- %(args)s" +msgstr "Bad rsync返还代码:%(ret)d <- %(args)s" -msgid "" -"WARNING: SSL should only be enabled for testing purposes. Use external SSL " -"termination for a production deployment." -msgstr "警告:SSL仅可以做测试使用。产品部署时请使用外连SSL终端" +msgid "Begin account audit \"once\" mode" +msgstr "开始账号审计\"once\"模式" -msgid "Error: An error occurred" -msgstr "错误:一个错误发生了" +msgid "Begin account audit pass." +msgstr "开始账号审计通过" + +msgid "Begin container audit \"once\" mode" +msgstr "开始容器审计\"once\" 模式" + +msgid "Begin container audit pass." +msgstr "开始通过容器审计" + +msgid "Begin container sync \"once\" mode" +msgstr "开始容器同步\"once\"模式" + +msgid "Begin container update single threaded sweep" +msgstr "开始容器更新单线程扫除" + +msgid "Begin container update sweep" +msgstr "开始容器更新扫除" #, python-format -msgid "Mapped %(given_domain)s to %(found_domain)s" -msgstr "集合%(given_domain)s到%(found_domain)s" +msgid "Begin object audit \"%s\" mode (%s%s)" +msgstr "开始对象审计\\\"%s\\\" 模式 (%s%s)" + +msgid "Begin object update single threaded sweep" +msgstr "开始对象更新单线程扫除" + +msgid "Begin object update sweep" +msgstr "开始对象更新扫除" #, python-format -msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" -msgstr "跟随CNAME链从%(given_domain)s到%(found_domain)s" +msgid "Beginning pass on account %s" +msgstr "账号%s开始通过" -#, python-format -msgid "Returning 497 because of blacklisting: %s" -msgstr "返回497因为黑名单:%s" +msgid "Beginning replication run" +msgstr "开始运行复制" -#, python-format -msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" -msgstr "流量控制休眠日志:%(sleep)s for %(account)s/%(container)s/%(object)s" - -#, python-format -msgid "" -"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " -"Sleep) %(e)s" -msgstr "" -"返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,流量控制(Max \"\n" -"\"Sleep) %(e)s" - -msgid "Warning: Cannot ratelimit without a memcached client" -msgstr "警告:缺失缓存客户端 无法控制流量 " - -msgid "Error reading recon cache file" -msgstr "读取recon cache file时出现错误" - -msgid "Error parsing recon cache file" -msgstr "解析recon cache file时出现错误" - -msgid "Error retrieving recon data" -msgstr "检索recon data时出现错误" - -msgid "Error listing devices" -msgstr "设备列表时出现错误" - -msgid "Error reading ringfile" -msgstr "读取ringfile时出现错误" - -msgid "Error reading swift.conf" -msgstr "读取swift.conf时出现错误" - -#, python-format -msgid "Error on render profiling results: %s" -msgstr "给予分析结果时发生错误:%s" - -#, python-format -msgid "Profiling Error: %s" -msgstr "分析代码时出现错误:%s" - -#, python-format -msgid "method %s is not allowed." -msgstr "方法%s不被允许" - -#, python-format -msgid "Can not load profile data from %s." -msgstr "无法从%s下载分析数据" - -msgid "no log file found" -msgstr "日志文件丢失" - -#, python-format -msgid "Data download error: %s" -msgstr "数据下载错误:%s" - -msgid "python-matplotlib not installed." -msgstr "python-matplotlib未安装" - -#, python-format -msgid "plotting results failed due to %s" -msgstr "绘制结果图标时失败因为%s" - -msgid "The file type are forbidden to access!" -msgstr "该文件类型被禁止访问!" +msgid "Broker error trying to rollback locked connection" +msgstr "服务器错误并尝试去回滚已经锁住的链接" #, python-format msgid "Can not access the file %s." msgstr "无法访问文件%s" -msgid "odfpy not installed." -msgstr "odfpy未安装" +#, python-format +msgid "Can not load profile data from %s." +msgstr "无法从%s下载分析数据" + +#, python-format +msgid "Client did not read from proxy within %ss" +msgstr "客户尚未从代理处读取%ss" + +msgid "Client disconnected on read" +msgstr "客户读取时中断" + +msgid "Client disconnected without sending enough data" +msgstr "客户中断 尚未发送足够" #, python-format msgid "" -"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " -"audit" -msgstr "自%(time)s起:容器审计:%(pass)s通过审计, %(fail)s失败" +"Client path %(client)s does not match path stored in object metadata %(meta)s" +msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符" -msgid "Begin container audit pass." -msgstr "开始通过容器审计" +msgid "Connection refused" +msgstr "连接被拒绝" -#, python-format -msgid "Container audit pass completed: %.02fs" -msgstr "容器审计通过完成: %.02fs" +msgid "Connection timeout" +msgstr "连接超时" -msgid "Begin container audit \"once\" mode" -msgstr "开始容器审计\"once\" 模式" +msgid "Container" +msgstr "容器" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "容器审计\"once\"模式完成:%.02fs" #, python-format -msgid "ERROR Could not get container info %s" -msgstr "错误:无法获取容器%s信息" +msgid "Container audit pass completed: %.02fs" +msgstr "容器审计通过完成: %.02fs" + +#, python-format +msgid "Container sync \"once\" mode completed: %.02fs" +msgstr "容器同步\"once\"模式完成:%.02fs" #, python-format msgid "" -"ERROR Account update failed: different numbers of hosts and devices in " -"request: \"%s\" vs \"%s\"" -msgstr "出现错误 账号更新失败:本机数量与设备数量不符: \"%s\" vs \"%s\"" +"Container update single threaded sweep completed: %(elapsed).02fs, " +"%(success)s successes, %(fail)s failures, %(no_change)s with no changes" +msgstr "" +"容器更新单线程扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, " +"%(no_change)s 无更改" + +#, python-format +msgid "Container update sweep completed: %.02fs" +msgstr "容器更新扫除完成:%.02fs" + +#, python-format +msgid "" +"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " +"successes, %(fail)s failures, %(no_change)s with no changes" +msgstr "" +"通过路径%(path)s容器更新扫除完成:%(elapsed).02fs, %(success)s 成功, " +"%(fail)s 失败, %(no_change)s 无更改" + +#, python-format +msgid "Could not bind to %s:%s after trying for %s seconds" +msgstr "尝试过%s秒后无法捆绑%s:%s" + +#, python-format +msgid "Could not load %r: %s" +msgstr "无法下载%r: %s" + +#, python-format +msgid "Data download error: %s" +msgstr "数据下载错误:%s" + +#, python-format +msgid "Devices pass completed: %.02fs" +msgstr "设备通过完成: %.02fs" + +#, python-format +msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" +msgstr "错误 %(db_file)s: %(validate_sync_to_err)s" + +#, python-format +msgid "ERROR %(status)d %(body)s From %(type)s Server" +msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器" + +#, python-format +msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" +msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s" #, python-format msgid "" @@ -502,92 +289,89 @@ msgstr "" #, python-format msgid "" -"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " -"later)" -msgstr "错误 账号更新失败 %(ip)s:%(port)s/%(device)s (稍后尝试)" - -msgid "Begin container sync \"once\" mode" -msgstr "开始容器同步\"once\"模式" +"ERROR Account update failed: different numbers of hosts and devices in " +"request: \"%s\" vs \"%s\"" +msgstr "出现错误 账号更新失败:本机数量与设备数量不符: \"%s\" vs \"%s\"" #, python-format -msgid "Container sync \"once\" mode completed: %.02fs" -msgstr "容器同步\"once\"模式完成:%.02fs" +msgid "ERROR Bad response %(status)s from %(host)s" +msgstr "失败响应错误%(status)s来自%(host)s" + +#, python-format +msgid "ERROR Client read timeout (%ss)" +msgstr "错误 客户读取超时(%ss)" #, python-format msgid "" -"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " -"skipped, %(fail)s failed" +"ERROR Container update failed (saving for async update later): %(status)d " +"response from %(ip)s:%(port)s/%(dev)s" msgstr "" -"自%(time)s起:%(sync)s完成同步 [%(delete)s 删除, %(put)s 上传], \"\n" -"\"%(skip)s 跳过, %(fail)s 失败" - -#, python-format -msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" -msgstr "错误 %(db_file)s: %(validate_sync_to_err)s" - -#, python-format -msgid "ERROR Syncing %s" -msgstr "同步时发生错误%s" - -#, python-format -msgid "Unauth %(sync_from)r => %(sync_to)r" -msgstr "未授权%(sync_from)r => %(sync_to)r" +"错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/" +"%(dev)s" #, python-format msgid "" -"Not found %(sync_from)r => %(sync_to)r - object " -"%(obj_name)r" -msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r" +"ERROR Container update failed: different numbers of hosts and devices in " +"request: \"%s\" vs \"%s\"" +msgstr "错误 容器更新失败:主机数量和设备数量不符合请求: \"%s\" vs \"%s\"" + +#, python-format +msgid "ERROR Could not get account info %s" +msgstr "错误:无法获取账号信息%s" + +#, python-format +msgid "ERROR Could not get container info %s" +msgstr "错误:无法获取容器%s信息" + +#, python-format +msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" +msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s" + +msgid "ERROR Exception causing client disconnect" +msgstr "错误 异常导致客户端中断连接" + +msgid "ERROR Failed to get my own IPs?" +msgstr "错误 无法获得我方IPs?" + +msgid "ERROR Insufficient Storage" +msgstr "错误 存储空间不足" + +#, python-format +msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" +msgstr "错误 对象%(obj)s审计失败并被隔离:%(err)s" + +#, python-format +msgid "ERROR Pickle problem, quarantining %s" +msgstr "错误 Pickle问题 隔离%s" + +#, python-format +msgid "ERROR Remote drive not mounted %s" +msgstr "错误 远程驱动器无法挂载 %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "同步错误 %(db_file)s %(row)s" #, python-format -msgid "ERROR: Failed to get paths to drive partitions: %s" -msgstr "%s未挂载" +msgid "ERROR Syncing %s" +msgstr "同步时发生错误%s" #, python-format -msgid "%s is not mounted" -msgstr "%s未挂载" +msgid "ERROR Trying to audit %s" +msgstr "错误 尝试开始审计%s" + +msgid "ERROR Unhandled exception in request" +msgstr "错误 未处理的异常发出请求" #, python-format -msgid "ERROR with loading suppressions from %s: " -msgstr "执行下载压缩时发生错误%s" - -msgid "Begin container update sweep" -msgstr "开始容器更新扫除" +msgid "ERROR __call__ error with %(method)s %(path)s " +msgstr "%(method)s %(path)s出现错误__call__ error" #, python-format msgid "" -"Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " -"successes, %(fail)s failures, %(no_change)s with no changes" -msgstr "" -"通过路径%(path)s容器更新扫除完成:%(elapsed).02fs, %(success)s 成功, " -"%(fail)s 失败, %(no_change)s 无更改" - -#, python-format -msgid "Container update sweep completed: %.02fs" -msgstr "容器更新扫除完成:%.02fs" - -msgid "Begin container update single threaded sweep" -msgstr "开始容器更新单线程扫除" - -#, python-format -msgid "" -"Container update single threaded sweep completed: %(elapsed).02fs, " -"%(success)s successes, %(fail)s failures, %(no_change)s with no changes" -msgstr "" -"容器更新单线程扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, " -"%(no_change)s 无更改" - -#, python-format -msgid "Update report sent for %(container)s %(dbfile)s" -msgstr "更新报告发至%(container)s %(dbfile)s" - -#, python-format -msgid "Update report failed for %(container)s %(dbfile)s" -msgstr "%(container)s %(dbfile)s更新报告失败" +"ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " +"later)" +msgstr "错误 账号更新失败 %(ip)s:%(port)s/%(device)s (稍后尝试)" #, python-format msgid "" @@ -596,27 +380,251 @@ msgid "" msgstr "错误 账号更新失败%(ip)s:%(port)s/%(device)s (稍后尝试):" #, python-format -msgid " - parallel, %s" -msgstr "-平行,%s" +msgid "ERROR async pending file with unexpected name %s" +msgstr "执行同步等待文件 文件名不可知%s" + +msgid "ERROR auditing" +msgstr "错误 审计" #, python-format -msgid " - %s" -msgstr "- %s" - -#, python-format -msgid "Begin object audit \"%s\" mode (%s%s)" -msgstr "开始对象审计\\\"%s\\\" 模式 (%s%s)" +msgid "ERROR auditing: %s" +msgstr "审计错误:%s" #, python-format msgid "" -"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " -"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: " -"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " -"%(audit_rate).2f" +"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " +"update later)" +msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在保存 稍后同步更新)" + +#, python-format +msgid "ERROR reading HTTP response from %s" +msgstr "读取HTTP错误 响应来源%s" + +#, python-format +msgid "ERROR reading db %s" +msgstr "错误 读取db %s" + +#, python-format +msgid "ERROR rsync failed with %(code)s: %(args)s" +msgstr "错误 rsync失败 %(code)s: %(args)s" + +#, python-format +msgid "ERROR syncing %(file)s with node %(node)s" +msgstr "错误 同步 %(file)s 和 节点%(node)s" + +msgid "ERROR trying to replicate" +msgstr "尝试复制时发生错误" + +#, python-format +msgid "ERROR while trying to clean up %s" +msgstr "清理时出现错误%s" + +#, python-format +msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" +msgstr "%(type)s服务器发生错误 %(ip)s:%(port)s/%(device)s re: %(info)s" + +#, python-format +msgid "ERROR with loading suppressions from %s: " +msgstr "执行下载压缩时发生错误%s" + +#, python-format +msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" +msgstr "远程服务器发生错误 %(ip)s:%(port)s/%(device)s" + +#, python-format +msgid "ERROR: Failed to get paths to drive partitions: %s" +msgstr "%s未挂载" + +#, python-format +msgid "ERROR: Unable to run auditing: %s" +msgstr "错误:无法执行审计:%s" + +#, python-format +msgid "Error %(action)s to memcached: %(server)s" +msgstr "%(action)s错误 高性能内存对象缓存: %(server)s" + +#, python-format +msgid "Error encoding to UTF-8: %s" +msgstr "UTF-8编码错误:%s" + +msgid "Error hashing suffix" +msgstr "执行Hashing后缀时发生错误" + +#, python-format +msgid "Error in %r with mtime_check_interval: %s" +msgstr "%r中mtime_check_interval出现错误:%s" + +#, python-format +msgid "Error limiting server %s" +msgstr "服务器出现错误%s " + +msgid "Error listing devices" +msgstr "设备列表时出现错误" + +#, python-format +msgid "Error on render profiling results: %s" +msgstr "给予分析结果时发生错误:%s" + +msgid "Error parsing recon cache file" +msgstr "解析recon cache file时出现错误" + +msgid "Error reading recon cache file" +msgstr "读取recon cache file时出现错误" + +msgid "Error reading ringfile" +msgstr "读取ringfile时出现错误" + +msgid "Error reading swift.conf" +msgstr "读取swift.conf时出现错误" + +msgid "Error retrieving recon data" +msgstr "检索recon data时出现错误" + +msgid "Error syncing handoff partition" +msgstr "执行同步切换分区时发生错误" + +msgid "Error syncing partition" +msgstr "执行同步分区时发生错误" + +#, python-format +msgid "Error syncing with node: %s" +msgstr "执行同步时节点%s发生错误" + +msgid "Error: An error occurred" +msgstr "错误:一个错误发生了" + +msgid "Error: missing config path argument" +msgstr "错误:设置路径信息丢失" + +#, python-format +msgid "Error: unable to locate %s" +msgstr "错误:无法查询到 %s" + +msgid "Exception dumping recon cache" +msgstr "执行dump recon的时候出现异常" + +msgid "Exception in top-level account reaper loop" +msgstr "异常出现在top-level账号reaper环" + +msgid "Exception in top-level replication loop" +msgstr "top-level复制圈出现异常" + +#, python-format +msgid "Exception while deleting container %s %s" +msgstr "执行删除容器时出现异常 %s %s" + +#, python-format +msgid "Exception while deleting object %s %s %s" +msgstr "执行删除对象时发生异常%s %s %s" + +#, python-format +msgid "Exception with %(ip)s:%(port)s/%(device)s" +msgstr "%(ip)s:%(port)s/%(device)s出现异常" + +#, python-format +msgid "Exception with account %s" +msgstr "账号%s出现异常" + +#, python-format +msgid "Exception with containers for account %s" +msgstr "账号%s内容器出现异常" + +#, python-format +msgid "" +"Exception with objects for container %(container)s for account %(account)s" +msgstr "账号%(account)s容器%(container)s的对象出现异常" + +#, python-format +msgid "Expect: 100-continue on %s" +msgstr "已知:100-continue on %s" + +#, python-format +msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" +msgstr "跟随CNAME链从%(given_domain)s到%(found_domain)s" + +msgid "Found configs:" +msgstr "找到配置" + +msgid "Host unreachable" +msgstr "无法连接到主机" + +#, python-format +msgid "Incomplete pass on account %s" +msgstr "账号%s未完成通过" + +#, python-format +msgid "Invalid X-Container-Sync-To format %r" +msgstr "无效的X-Container-Sync-To格式%r" + +#, python-format +msgid "Invalid host %r in X-Container-Sync-To" +msgstr "X-Container-Sync-To中无效主机%r" + +#, python-format +msgid "Invalid pending entry %(file)s: %(entry)s" +msgstr "不可用的等待输入%(file)s: %(entry)s" + +#, python-format +msgid "Invalid response %(resp)s from %(ip)s" +msgstr "无效的回应%(resp)s来自%(ip)s" + +#, python-format +msgid "" +"Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " +"\"https\"." msgstr "" -"对象审计 (%(type)s). 自 %(start_time)s开始: 本地: %(passes)d 通过, %(quars)d " -"隔离, %(errors)d 错误 文件/秒: %(frate).2f , bytes/秒: %(brate).2f, 总时间: " -"%(total).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f" +"在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。" + +#, python-format +msgid "Killing long-running rsync: %s" +msgstr "终止long-running同步: %s" + +msgid "Lockup detected.. killing live coros." +msgstr "检测到lockup。终止正在执行的coros" + +#, python-format +msgid "Mapped %(given_domain)s to %(found_domain)s" +msgstr "集合%(given_domain)s到%(found_domain)s" + +#, python-format +msgid "No %s running" +msgstr "无%s账号运行" + +#, python-format +msgid "No cluster endpoint for %r %r" +msgstr "%r %r的集群节点不存在" + +#, python-format +msgid "No permission to signal PID %d" +msgstr "无权限发送信号PID%d" + +#, python-format +msgid "No realm key for %r" +msgstr "%r权限key不存在" + +#, python-format +msgid "Node error limited %(ip)s:%(port)s (%(device)s)" +msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)" + +#, python-format +msgid "" +"Not found %(sync_from)r => %(sync_to)r - object " +"%(obj_name)r" +msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r" + +#, python-format +msgid "Nothing replicated for %s seconds." +msgstr "%s秒无复制" + +msgid "Object" +msgstr "对象" + +msgid "Object PUT" +msgstr "对象上传" + +#, python-format +msgid "Object PUT returning 412, %(statuses)r" +msgstr "对象PUT返还 412,%(statuses)r " #, python-format msgid "" @@ -629,191 +637,32 @@ msgstr "" "%(quars)d, 错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: " "%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f" +#, python-format +msgid "" +"Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " +"%(quars)d quarantined, %(errors)d errors files/sec: %(frate).2f , bytes/sec: " +"%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " +"%(audit_rate).2f" +msgstr "" +"对象审计 (%(type)s). 自 %(start_time)s开始: 本地: %(passes)d 通过, %(quars)d " +"隔离, %(errors)d 错误 文件/秒: %(frate).2f , bytes/秒: %(brate).2f, 总时间: " +"%(total).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f" + #, python-format msgid "Object audit stats: %s" msgstr "对象审计统计:%s" -#, python-format -msgid "ERROR Trying to audit %s" -msgstr "错误 尝试开始审计%s" - -#, python-format -msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" -msgstr "错误 对象%(obj)s审计失败并被隔离:%(err)s" - -#, python-format -msgid "ERROR: Unable to run auditing: %s" -msgstr "错误:无法执行审计:%s" - -#, python-format -msgid "ERROR auditing: %s" -msgstr "审计错误:%s" - -#, python-format -msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" -msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录" - -msgid "Error hashing suffix" -msgstr "执行Hashing后缀时发生错误" - -#, python-format -msgid "" -"Quarantined %(object_path)s to %(quar_path)s because it is not a directory" -msgstr "隔离%(object_path)s和%(quar_path)s因为非目录" - -#, python-format -msgid "Problem cleaning up %s" -msgstr "问题清除%s" - -#, python-format -msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" -msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s" - -#, python-format -msgid "" -"Client path %(client)s does not match path stored in object metadata %(meta)s" -msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符" - -#, python-format -msgid "Pass completed in %ds; %d objects expired" -msgstr "%ds通过完成; %d对象过期" - -#, python-format -msgid "Pass so far %ds; %d objects expired" -msgstr "%ds目前通过;%d对象过期" - -#, python-format -msgid "Pass beginning; %s possible containers; %s possible objects" -msgstr "开始通过;%s可能容器;%s可能对象" - -#, python-format -msgid "Exception while deleting container %s %s" -msgstr "执行删除容器时出现异常 %s %s" - -msgid "Unhandled exception" -msgstr "未处理的异常" - -#, python-format -msgid "Exception while deleting object %s %s %s" -msgstr "执行删除对象时发生异常%s %s %s" - -#, python-format -msgid "" -"%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" -msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced" - -#, python-format -msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" -msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs" - -msgid "Lockup detected.. killing live coros." -msgstr "检测到lockup。终止正在执行的coros" - -#, python-format -msgid "Removing partition: %s" -msgstr "移除分区:%s" - -#, python-format -msgid "Killing long-running rsync: %s" -msgstr "终止long-running同步: %s" - -#, python-format -msgid "Bad rsync return code: %(ret)d <- %(args)s" -msgstr "Bad rsync返还代码:%(ret)d <- %(args)s" - -#, python-format -msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" -msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)" - -msgid "Error syncing handoff partition" -msgstr "执行同步切换分区时发生错误" - -#, python-format -msgid "%(ip)s/%(device)s responded as unmounted" -msgstr "%(ip)s/%(device)s的回应为未挂载" - -#, python-format -msgid "Invalid response %(resp)s from %(ip)s" -msgstr "无效的回应%(resp)s来自%(ip)s" - -#, python-format -msgid "Error syncing with node: %s" -msgstr "执行同步时节点%s发生错误" - -msgid "Error syncing partition" -msgstr "执行同步分区时发生错误" - -#, python-format -msgid "" -"%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " -"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" -msgstr "" -"%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n" -"\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" - -#, python-format -msgid "Nothing replicated for %s seconds." -msgstr "%s秒无复制" - -msgid "Ring change detected. Aborting current replication pass." -msgstr "Ring改变被检测到。退出现有的复制通过" - -msgid "Exception in top-level replication loop" -msgstr "top-level复制圈出现异常" - -msgid "Running object replicator in script mode." -msgstr "在加密模式下执行对象复制" - #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "对象复制完成(一次)。(%.02f minutes)" -msgid "Starting object replicator in daemon mode." -msgstr "在守护模式下开始对象复制" - -msgid "Starting object replication pass." -msgstr "开始通过对象复制" - #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "对象复制完成。(%.02f minutes)" #, python-format -msgid "" -"ERROR Container update failed (saving for async update later): %(status)d " -"response from %(ip)s:%(port)s/%(dev)s" -msgstr "" -"错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/" -"%(dev)s" - -#, python-format -msgid "" -"ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " -"update later)" -msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在保存 稍后同步更新)" - -#, python-format -msgid "" -"ERROR Container update failed: different numbers of hosts and devices in " -"request: \"%s\" vs \"%s\"" -msgstr "错误 容器更新失败:主机数量和设备数量不符合请求: \"%s\" vs \"%s\"" - -msgid "Begin object update sweep" -msgstr "开始对象更新扫除" - -#, python-format -msgid "" -"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " -"successes, %(fail)s failures" -msgstr "" -"%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败" - -#, python-format -msgid "Object update sweep completed: %.02fs" -msgstr "对象更新扫除完成:%.02fs" - -msgid "Begin object update single threaded sweep" -msgstr "开始对象更新单线程扫除" +msgid "Object servers returned %s mismatched etags" +msgstr "对象服务器返还%s不匹配etags" #, python-format msgid "" @@ -823,105 +672,256 @@ msgstr "" "对象更新单线程扫除完成:%(elapsed).02fs,%(success)s 成功, %(fail)s 失败" #, python-format -msgid "ERROR async pending file with unexpected name %s" -msgstr "执行同步等待文件 文件名不可知%s" +msgid "Object update sweep completed: %.02fs" +msgstr "对象更新扫除完成:%.02fs" #, python-format -msgid "ERROR Pickle problem, quarantining %s" -msgstr "错误 Pickle问题 隔离%s" +msgid "" +"Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " +"successes, %(fail)s failures" +msgstr "" +"%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败" + +msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" +msgstr "在X-Container-Sync-To中,变量,查询和碎片不被允许" #, python-format -msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" -msgstr "远程服务器发生错误 %(ip)s:%(port)s/%(device)s" - -msgid "ERROR Unhandled exception in request" -msgstr "错误 未处理的异常发出请求" +msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" +msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs" #, python-format -msgid "Node error limited %(ip)s:%(port)s (%(device)s)" -msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)" +msgid "Pass beginning; %s possible containers; %s possible objects" +msgstr "开始通过;%s可能容器;%s可能对象" #, python-format -msgid "%(msg)s %(ip)s:%(port)s/%(device)s" -msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +msgid "Pass completed in %ds; %d objects expired" +msgstr "%ds通过完成; %d对象过期" #, python-format -msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" -msgstr "%(type)s服务器发生错误 %(ip)s:%(port)s/%(device)s re: %(info)s" +msgid "Pass so far %ds; %d objects expired" +msgstr "%ds目前通过;%d对象过期" -msgid "Account" -msgstr "账号" - -msgid "Object" -msgstr "对象" - -msgid "Trying to read during GET (retrying)" -msgstr "执行GET时尝试读取(重新尝试)" - -msgid "Trying to read during GET" -msgstr "执行GET时尝试读取" +msgid "Path required in X-Container-Sync-To" +msgstr "在X-Container-Sync-To中路径是必须的" #, python-format -msgid "Client did not read from proxy within %ss" -msgstr "客户尚未从代理处读取%ss" +msgid "Problem cleaning up %s" +msgstr "问题清除%s" -msgid "Client disconnected on read" -msgstr "客户读取时中断" +#, python-format +msgid "Profiling Error: %s" +msgstr "分析代码时出现错误:%s" -msgid "Trying to send to client" -msgstr "尝试发送到客户端" +#, python-format +msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" +msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录" + +#, python-format +msgid "" +"Quarantined %(object_path)s to %(quar_path)s because it is not a directory" +msgstr "隔离%(object_path)s和%(quar_path)s因为非目录" + +#, python-format +msgid "Quarantined %s to %s due to %s database" +msgstr "隔离%s和%s 因为%s数据库" + +#, python-format +msgid "Quarantining DB %s" +msgstr "隔离DB%s" + +#, python-format +msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" +msgstr "流量控制休眠日志:%(sleep)s for %(account)s/%(container)s/%(object)s" + +#, python-format +msgid "Removed %(remove)d dbs" +msgstr "删除%(remove)d dbs" + +#, python-format +msgid "Removing partition: %s" +msgstr "移除分区:%s" + +#, python-format +msgid "Removing stale pid file %s" +msgstr "移除原有pid文件%s" + +msgid "Replication run OVER" +msgstr "复制运行结束" + +#, python-format +msgid "Returning 497 because of blacklisting: %s" +msgstr "返回497因为黑名单:%s" + +#, python-format +msgid "" +"Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " +"Sleep) %(e)s" +msgstr "" +"返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,流量控制(Max \"\n" +"\"Sleep) %(e)s" + +msgid "Ring change detected. Aborting current replication pass." +msgstr "Ring改变被检测到。退出现有的复制通过" + +#, python-format +msgid "Running %s once" +msgstr "运行%s一次" + +msgid "Running object replicator in script mode." +msgstr "在加密模式下执行对象复制" + +#, python-format +msgid "Signal %s pid: %s signal: %s" +msgstr "发出信号%s pid: %s 信号: %s" + +#, python-format +msgid "" +"Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " +"skipped, %(fail)s failed" +msgstr "" +"自%(time)s起:%(sync)s完成同步 [%(delete)s 删除, %(put)s 上传], \"\n" +"\"%(skip)s 跳过, %(fail)s 失败" + +#, python-format +msgid "" +"Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " +"audit" +msgstr "自%(time)s开始:账号审计:%(passed)s 通过审计,%(failed)s 失败" + +#, python-format +msgid "" +"Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " +"audit" +msgstr "自%(time)s起:容器审计:%(pass)s通过审计, %(fail)s失败" + +#, python-format +msgid "Skipping %(device)s as it is not mounted" +msgstr "因无法挂载跳过%(device)s" + +#, python-format +msgid "Skipping %s as it is not mounted" +msgstr "挂载失败 跳过%s" + +#, python-format +msgid "Starting %s" +msgstr "启动%s" + +msgid "Starting object replication pass." +msgstr "开始通过对象复制" + +msgid "Starting object replicator in daemon mode." +msgstr "在守护模式下开始对象复制" + +#, python-format +msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" +msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)" + +msgid "The file type are forbidden to access!" +msgstr "该文件类型被禁止访问!" + +#, python-format +msgid "" +"The total %(key)s for the container (%(total)s) does not match the sum of " +"%(key)s across policies (%(sum)s)" +msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)" + +#, python-format +msgid "Timeout %(action)s to memcached: %(server)s" +msgstr "%(action)s超时 高性能内存对象缓存: %(server)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "尝试执行%(method)s %(path)s" -msgid "ERROR Insufficient Storage" -msgstr "错误 存储空间不足" - #, python-format -msgid "ERROR %(status)d %(body)s From %(type)s Server" -msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器" +msgid "Trying to get final status of PUT to %s" +msgstr "尝试执行获取最后的PUT状态%s" -#, python-format -msgid "%(type)s returning 503 for %(statuses)s" -msgstr "%(type)s 返回 503 在 %(statuses)s" +msgid "Trying to read during GET" +msgstr "执行GET时尝试读取" -msgid "Container" -msgstr "容器" +msgid "Trying to read during GET (retrying)" +msgstr "执行GET时尝试读取(重新尝试)" + +msgid "Trying to send to client" +msgstr "尝试发送到客户端" #, python-format msgid "Trying to write to %s" msgstr "尝试执行书写%s" -#, python-format -msgid "Expect: 100-continue on %s" -msgstr "已知:100-continue on %s" +msgid "UNCAUGHT EXCEPTION" +msgstr "未捕获的异常" #, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "尝试执行获取最后的PUT状态%s" +msgid "Unable to find %s config section in %s" +msgstr "无法在%s中查找到%s设置部分" #, python-format -msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" -msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s" +msgid "Unable to locate %s in libc. Leaving as a no-op." +msgstr "无法查询到%s 保留为no-op" + +msgid "" +"Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." +msgstr "无法查询到fallocate, posix_fallocate。保存为no-op" #, python-format -msgid "Object PUT returning 412, %(statuses)r" -msgstr "对象PUT返还 412,%(statuses)r " +msgid "Unable to read config from %s" +msgstr "无法从%s读取设置" #, python-format -msgid "ERROR Client read timeout (%ss)" -msgstr "错误 客户读取超时(%ss)" - -msgid "ERROR Exception causing client disconnect" -msgstr "错误 异常导致客户端中断连接" - -msgid "Client disconnected without sending enough data" -msgstr "客户中断 尚未发送足够" +msgid "Unauth %(sync_from)r => %(sync_to)r" +msgstr "未授权%(sync_from)r => %(sync_to)r" #, python-format -msgid "Object servers returned %s mismatched etags" -msgstr "对象服务器返还%s不匹配etags" +msgid "Unexpected response: %s" +msgstr "意外响应:%s" -msgid "Object PUT" -msgstr "对象上传" +msgid "Unhandled exception" +msgstr "未处理的异常" + +#, python-format +msgid "Update report failed for %(container)s %(dbfile)s" +msgstr "%(container)s %(dbfile)s更新报告失败" + +#, python-format +msgid "Update report sent for %(container)s %(dbfile)s" +msgstr "更新报告发至%(container)s %(dbfile)s" + +msgid "" +"WARNING: SSL should only be enabled for testing purposes. Use external SSL " +"termination for a production deployment." +msgstr "警告:SSL仅可以做测试使用。产品部署时请使用外连SSL终端" + +msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" +msgstr "警告:无法修改文件描述限制。是否按非root运行?" + +msgid "WARNING: Unable to modify max process limit. Running as non-root?" +msgstr "警告:无法修改最大运行极限,是否按非root运行?" + +msgid "WARNING: Unable to modify memory limit. Running as non-root?" +msgstr "警告:无法修改内存极限,是否按非root运行?" + +#, python-format +msgid "Waited %s seconds for %s to die; giving up" +msgstr "等待%s秒直到%s停止;放弃" + +msgid "Warning: Cannot ratelimit without a memcached client" +msgstr "警告:缺失缓存客户端 无法控制流量 " + +#, python-format +msgid "method %s is not allowed." +msgstr "方法%s不被允许" + +msgid "no log file found" +msgstr "日志文件丢失" + +msgid "odfpy not installed." +msgstr "odfpy未安装" + +#, python-format +msgid "plotting results failed due to %s" +msgstr "绘制结果图标时失败因为%s" + +msgid "python-matplotlib not installed." +msgstr "python-matplotlib未安装" From 736cf54adf3ee85d2f473e5e5374f9833422967c Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Thu, 28 May 2015 15:30:47 -0700 Subject: [PATCH 48/98] Remove simplejson from tests Since we're dropping Python 2.6 support, we can rely on stdlib's json and get rid of our dependency on simplejson. This commit just takes simplejson out of the unit and functional tests. They still pass. Change-Id: I96f17df81fa5d265395a938b19213d2638682106 --- test/functional/test_object.py | 2 +- test/unit/common/middleware/test_list_endpoints.py | 3 ++- test/unit/common/test_direct_client.py | 3 ++- test/unit/proxy/controllers/test_info.py | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/test/functional/test_object.py b/test/functional/test_object.py index e74a7f632e..4a62da1a77 100755 --- a/test/functional/test_object.py +++ b/test/functional/test_object.py @@ -15,11 +15,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import unittest from nose import SkipTest from uuid import uuid4 -from swift.common.utils import json from test.functional import check_response, retry, requires_acls, \ requires_policies diff --git a/test/unit/common/middleware/test_list_endpoints.py b/test/unit/common/middleware/test_list_endpoints.py index 3ec0379586..2537d0ffdc 100644 --- a/test/unit/common/middleware/test_list_endpoints.py +++ b/test/unit/common/middleware/test_list_endpoints.py @@ -14,6 +14,7 @@ # limitations under the License. import array +import json import unittest from tempfile import mkdtemp from shutil import rmtree @@ -21,7 +22,7 @@ from shutil import rmtree import os import mock from swift.common import ring, utils -from swift.common.utils import json, split_path +from swift.common.utils import split_path from swift.common.swob import Request, Response from swift.common.middleware import list_endpoints from swift.common.storage_policy import StoragePolicy, POLICIES diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py index 6f7660cdf3..145ac83c08 100644 --- a/test/unit/common/test_direct_client.py +++ b/test/unit/common/test_direct_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import unittest import os import urllib @@ -25,7 +26,7 @@ import mock from swift.common import direct_client from swift.common.exceptions import ClientException -from swift.common.utils import json, Timestamp +from swift.common.utils import Timestamp from swift.common.swob import HeaderKeyDict, RESPONSE_REASONS from swift.common.storage_policy import POLICIES diff --git a/test/unit/proxy/controllers/test_info.py b/test/unit/proxy/controllers/test_info.py index f33beba024..adf3329683 100644 --- a/test/unit/proxy/controllers/test_info.py +++ b/test/unit/proxy/controllers/test_info.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import unittest import time from mock import Mock @@ -20,7 +21,6 @@ from mock import Mock from swift.proxy.controllers import InfoController from swift.proxy.server import Application as ProxyApp from swift.common import utils -from swift.common.utils import json from swift.common.swob import Request, HTTPException From 38787d0fb5102e41b153e4629d2ef374a02965e9 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Tue, 26 May 2015 16:19:54 -0700 Subject: [PATCH 49/98] Remove simplejson from staticweb Since we're dropping Python 2.6 support, we can rely on stdlib's json and get rid of our dependency on simplejson. This lets us get rid of some redundant Unicode encoding. Before, we would take the container-listing response off the wire, JSON-deserialize it (str -> unicode), then pass each of several fields from each entry to get_valid_utf8_str(), which would encode it, (unicode -> str), decode it (str -> unicode), and then encode it again (unicode -> str) for good measure. The net effect was that each object's name would, in the proxy server, go str -> unicode -> str -> unicode -> str. By replacing simplejson with stdlib json, we get a guarantee that each container-listing entry's name, hash, content_type, and last_modified are unicodes, so we can stop worrying about them being valid UTF-8 or not. This takes an encode and decode out of the path, so we just have str -> unicode -> str. While it'd be ideal to avoid this, the first transform (str -> unicode) happens when we decode the container-listing response body (json.loads()), so there's no way out. Change-Id: I00aedf952d691a809c23025b89131ea0f02b6431 --- swift/common/middleware/staticweb.py | 18 +++++++------ test/unit/common/middleware/test_staticweb.py | 26 +------------------ 2 files changed, 11 insertions(+), 33 deletions(-) diff --git a/swift/common/middleware/staticweb.py b/swift/common/middleware/staticweb.py index 34b102ea53..d16b5ae3e7 100644 --- a/swift/common/middleware/staticweb.py +++ b/swift/common/middleware/staticweb.py @@ -117,10 +117,11 @@ Example usage of this middleware via ``swift``: import cgi +import json import time from swift.common.utils import human_readable, split_path, config_true_value, \ - json, quote, get_valid_utf8_str, register_swift_info + quote, register_swift_info from swift.common.wsgi import make_pre_authed_env, WSGIContext from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound @@ -289,7 +290,7 @@ class _StaticWebContext(WSGIContext): ' \n' for item in listing: if 'subdir' in item: - subdir = get_valid_utf8_str(item['subdir']) + subdir = item['subdir'].encode("utf-8") if prefix: subdir = subdir[len(prefix):] body += ' \n' \ @@ -300,13 +301,14 @@ class _StaticWebContext(WSGIContext): (quote(subdir), cgi.escape(subdir)) for item in listing: if 'name' in item: - name = get_valid_utf8_str(item['name']) + name = item['name'].encode("utf-8") if prefix: name = name[len(prefix):] - content_type = get_valid_utf8_str(item['content_type']) - bytes = get_valid_utf8_str(human_readable(item['bytes'])) - last_modified = (cgi.escape(item['last_modified']). - split('.')[0].replace('T', ' ')) + content_type = item['content_type'].encode("utf-8") + bytes = human_readable(item['bytes']) + last_modified = ( + cgi.escape(item['last_modified'].encode("utf-8")). + split('.')[0].replace('T', ' ')) body += ' \n' \ ' %s\n' \ ' %s\n' \ @@ -315,7 +317,7 @@ class _StaticWebContext(WSGIContext): (' '.join('type-' + cgi.escape(t.lower(), quote=True) for t in content_type.split('/')), quote(name), cgi.escape(name), - bytes, get_valid_utf8_str(last_modified)) + bytes, last_modified) body += ' \n' \ ' \n' \ '\n' diff --git a/test/unit/common/middleware/test_staticweb.py b/test/unit/common/middleware/test_staticweb.py index a5c61b1c31..fe361fde7a 100644 --- a/test/unit/common/middleware/test_staticweb.py +++ b/test/unit/common/middleware/test_staticweb.py @@ -13,15 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -try: - import simplejson as json -except ImportError: - import json -import json as stdlib_json +import json import unittest -import mock - from swift.common.swob import Request, Response from swift.common.middleware import staticweb @@ -699,24 +693,6 @@ class TestStaticWeb(unittest.TestCase): self.assert_('listing.css' not in resp.body) self.assert_(' Date: Mon, 1 Jun 2015 06:50:33 +0000 Subject: [PATCH 50/98] Make swift-recon compatible for servers without storage policies Swift recon introduced a new key for storage policies, and the CLI expected this key in the server response. However, if one updates the CLI but not yet the server an exception will be raised, because there is no default value and no check if the key is included in the response. This change checks if the policies key is included in the response and updates one test to ensure backward compability. Closes-Bug: 1453599 Change-Id: I7c7a90f9933bec2ab45595df9dc600a6cba65666 --- swift/cli/recon.py | 9 ++++----- test/unit/cli/test_recon.py | 13 ++++++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/swift/cli/recon.py b/swift/cli/recon.py index a0fcdf7835..f57f75c22a 100755 --- a/swift/cli/recon.py +++ b/swift/cli/recon.py @@ -770,11 +770,10 @@ class SwiftRecon(object): objq[url] = response['objects'] conq[url] = response['containers'] acctq[url] = response['accounts'] - if response['policies']: - for key in response['policies']: - pkey = "objects_%s" % key - stats.setdefault(pkey, {}) - stats[pkey][url] = response['policies'][key]['objects'] + for key in response.get('policies', {}): + pkey = "objects_%s" % key + stats.setdefault(pkey, {}) + stats[pkey][url] = response['policies'][key]['objects'] stats.update({"objects": objq, "containers": conq, "accounts": acctq}) for item in stats: if len(stats[item]) > 0: diff --git a/test/unit/cli/test_recon.py b/test/unit/cli/test_recon.py index 0b6ffd7a33..dd53ae9d54 100644 --- a/test/unit/cli/test_recon.py +++ b/test/unit/cli/test_recon.py @@ -246,7 +246,8 @@ class TestRecon(unittest.TestCase): def test_quarantine_check(self): hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6020), - ('127.0.0.1', 6030), ('127.0.0.1', 6040)] + ('127.0.0.1', 6030), ('127.0.0.1', 6040), + ('127.0.0.1', 6050)] # sample json response from http://:/recon/quarantined responses = {6010: {'accounts': 0, 'containers': 0, 'objects': 1, 'policies': {'0': {'objects': 0}, @@ -259,13 +260,15 @@ class TestRecon(unittest.TestCase): '1': {'objects': 3}}}, 6040: {'accounts': 3, 'containers': 3, 'objects': 7, 'policies': {'0': {'objects': 3}, - '1': {'objects': 4}}}} + '1': {'objects': 4}}}, + # A server without storage policies enabled + 6050: {'accounts': 0, 'containers': 0, 'objects': 4}} # expected = {'objects_0': (0, 3, 1.5, 6, 0.0, 0, 4), 'objects_1': (1, 4, 2.5, 10, 0.0, 0, 4), - 'objects': (1, 7, 4.0, 16, 0.0, 0, 4), - 'accounts': (0, 3, 1.5, 6, 0.0, 0, 4), - 'containers': (0, 3, 1.5, 6, 0.0, 0, 4)} + 'objects': (1, 7, 4.0, 20, 0.0, 0, 5), + 'accounts': (0, 3, 1.2, 6, 0.0, 0, 5), + 'containers': (0, 3, 1.2, 6, 0.0, 0, 5)} def mock_scout_quarantine(app, host): url = 'http://%s:%s/recon/quarantined' % host From 8b4af92dac813d707d124b3be37df586260fe74a Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Mon, 1 Jun 2015 17:57:25 +0100 Subject: [PATCH 51/98] Tighten up overall metadata size tests Overall metadata size constraint is enforced in the container and account backends as well as in the proxy controllers. Whereas the proxy controller can check that constraints are not exceeded by a single PUT or POST request, the backend checks that constraints are not exceeded by the aggregate of all PUTs and POSTs. The current functional tests only exercise the proxy controller checks, since they test for a 400 when sending excessive metadata in a single POST. This patch adds a test for a 400 when a single metadata item in a POST causes the backend aggregate constraints check to go over limit. The extra coverage of the new assertions can be seen by modifying swift/common/db.DatabasBroker.validate_metadata() to always return None immediately - only the new assertions fail when functests are run. Change-Id: I1489e29686013cbd3d70283d8756b548aea3c2e1 --- test/functional/test_account.py | 8 ++++++++ test/functional/test_container.py | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/test/functional/test_account.py b/test/functional/test_account.py index 30a8e74184..d0d18c0529 100755 --- a/test/functional/test_account.py +++ b/test/functional/test_account.py @@ -827,11 +827,19 @@ class TestAccount(unittest.TestCase): resp = retry(post, headers) resp.read() self.assertEqual(resp.status, 204) + # this POST includes metadata size that is over limit headers['X-Account-Meta-k'] = \ 'v' * (self.max_meta_overall_size - size) resp = retry(post, headers) resp.read() self.assertEqual(resp.status, 400) + # this last POST would be ok by itself but takes the aggregate + # backend metadata size over limit + headers = {'X-Account-Meta-k': + 'v' * (self.max_meta_overall_size - size)} + resp = retry(post, headers) + resp.read() + self.assertEqual(resp.status, 400) class TestAccountInNonDefaultDomain(unittest.TestCase): diff --git a/test/functional/test_container.py b/test/functional/test_container.py index d7896a42e7..5de866b97e 100755 --- a/test/functional/test_container.py +++ b/test/functional/test_container.py @@ -449,11 +449,19 @@ class TestContainer(unittest.TestCase): resp = retry(post, headers) resp.read() self.assertEqual(resp.status, 204) + # this POST includes metadata size that is over limit headers['X-Container-Meta-k'] = \ 'v' * (self.max_meta_overall_size - size) resp = retry(post, headers) resp.read() self.assertEqual(resp.status, 400) + # this last POST would be ok by itself but takes the aggregate + # backend metadata size over limit + headers = {'X-Container-Meta-k': + 'v' * (self.max_meta_overall_size - size)} + resp = retry(post, headers) + resp.read() + self.assertEqual(resp.status, 400) def test_public_container(self): if tf.skip: From 1bef06eec8f5f780914ac701d63f9c498b29119b Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Sun, 31 May 2015 23:10:15 +0000 Subject: [PATCH 52/98] Don't quarantine on read_metadata ENOENT An operation that removes an existing .ts or .meta out from under another concurrent operation at the right point can cause the whole object to be needlessly quarantined. Closes-Bug: #1451520 Change-Id: I37d660199e54411d0610889f9ee230b13747244b --- swift/obj/diskfile.py | 8 ++++++- test/unit/obj/test_server.py | 45 ++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 3920315551..6a1b37c870 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -117,13 +117,17 @@ def read_metadata(fd): metadata += xattr.getxattr(fd, '%s%s' % (METADATA_KEY, (key or ''))) key += 1 - except IOError as e: + except (IOError, OSError) as e: for err in 'ENOTSUP', 'EOPNOTSUPP': if hasattr(errno, err) and e.errno == getattr(errno, err): msg = "Filesystem at %s does not support xattr" % \ _get_filename(fd) logging.exception(msg) raise DiskFileXattrNotSupported(e) + if e.errno == errno.ENOENT: + raise DiskFileNotExist() + # TODO: we might want to re-raise errors that don't denote a missing + # xattr here. Seems to be ENODATA on linux and ENOATTR on BSD/OSX. return pickle.loads(metadata) @@ -1590,6 +1594,8 @@ class DiskFile(object): # file if we have one try: return read_metadata(source) + except (DiskFileXattrNotSupported, DiskFileNotExist): + raise except Exception as err: raise self._quarantine( quarantine_filename, diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 4c669a874c..fe9ac5794f 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -4860,6 +4860,51 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 503) self.assertFalse(os.path.isdir(object_dir)) + def test_race_doesnt_quarantine(self): + existing_timestamp = normalize_timestamp(time()) + delete_timestamp = normalize_timestamp(time() + 1) + put_timestamp = normalize_timestamp(time() + 2) + + # make a .ts + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, + headers={'X-Timestamp': existing_timestamp}) + req.get_response(self.object_controller) + + # force a PUT between the listdir and read_metadata of a DELETE + put_once = [False] + orig_listdir = os.listdir + + def mock_listdir(path): + listing = orig_listdir(path) + if not put_once[0]: + put_once[0] = True + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': put_timestamp, + 'Content-Length': '9', + 'Content-Type': 'application/octet-stream'}) + req.body = 'some data' + resp = req.get_response(self.object_controller) + self.assertEquals(resp.status_int, 201) + return listing + + with mock.patch('os.listdir', mock_listdir): + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, + headers={'X-Timestamp': delete_timestamp}) + resp = req.get_response(self.object_controller) + self.assertEquals(resp.status_int, 404) + + qdir = os.path.join(self.testdir, 'sda1', 'quarantined') + self.assertFalse(os.path.exists(qdir)) + + req = Request.blank('/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'HEAD'}) + resp = req.get_response(self.object_controller) + self.assertEquals(resp.status_int, 200) + self.assertEquals(resp.headers['X-Timestamp'], put_timestamp) + @patch_policies(test_policies) class TestObjectServer(unittest.TestCase): From 2759a6f159d4a90e4ad4c8e9e479085d195f8d14 Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Singh Date: Wed, 13 May 2015 21:18:02 +0530 Subject: [PATCH 53/98] Handle Disk IO error Exception in swift account auditor Swift account auditor fails to quarantine corrupt db due to Disk IO error. This patch fixes that by handling Disk IO Error Exception. Closes-Bug:1169189 Change-Id: I031ee2a5775e4a88d4fb00d972d553936147c42e --- swift/common/db.py | 2 ++ test/unit/common/test_db.py | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/swift/common/db.py b/swift/common/db.py index c857bf1054..80a855ac1c 100644 --- a/swift/common/db.py +++ b/swift/common/db.py @@ -328,6 +328,8 @@ class DatabaseBroker(object): exc_hint = 'malformed' elif 'file is encrypted or is not a database' in str(exc_value): exc_hint = 'corrupted' + elif 'disk I/O error' in str(exc_value): + exc_hint = 'disk error while accessing' else: raise exc_type, exc_value, exc_traceback prefix_path = os.path.dirname(self.db_dir) diff --git a/test/unit/common/test_db.py b/test/unit/common/test_db.py index 6472f36b06..cf19730947 100644 --- a/test/unit/common/test_db.py +++ b/test/unit/common/test_db.py @@ -16,6 +16,7 @@ """Tests for swift.common.db""" import os +import sys import unittest from tempfile import mkdtemp from shutil import rmtree, copy @@ -1200,6 +1201,29 @@ class TestDatabaseBroker(unittest.TestCase): message = str(e) self.assertEqual(message, '400 Bad Request') + def test_possibly_quarantine_disk_error(self): + dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db') + mkdirs(dbpath) + qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'tests', 'db') + broker = DatabaseBroker(os.path.join(dbpath, '1.db')) + broker.db_type = 'test' + + def stub(): + raise sqlite3.OperationalError('disk I/O error') + + try: + stub() + except Exception: + try: + broker.possibly_quarantine(*sys.exc_info()) + except Exception as exc: + self.assertEquals( + str(exc), + 'Quarantined %s to %s due to disk error ' + 'while accessing database' % + (dbpath, qpath)) + else: + self.fail('Expected an exception to be raised') if __name__ == '__main__': unittest.main() From f1f4bb30cd8ad930ddb7a232b2744b48e35a0480 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Tue, 2 Jun 2015 19:51:39 +0000 Subject: [PATCH 54/98] Fix testing issues When functional tests are run in tox and an exception is raised when connecting to Swift (for example: Swift not running, missing python-keystoneclient package used by python-swiftclient) 0 tests are executed, but tox returns a success. An exception is raised during tests, caused by a missing python-keystoneclient in python-swiftclient. Instead of adding python-keystoneclient as a dependency in python-swiftclient the package is added to the test-requirements.txt in Swift itself. Note that adding python-keystoneclient to the test-requirements in python-swiftclient is not sufficient (it's already in there). The exception in setup_package() is catched by the openstack.nose_plugin, thus disabling this plugin for now as well. Also fixing two test errors seen on the gate regarding the tempurl middleware. There was also an update to tox, environment variables were no longer passed with versions >= 2.0 (http://tox.readthedocs.org/en/latest/changelog.html). Swift test environment variables have been added to the passenv to re-enable the former behavior, as well as environment variables required to pass proxy settings. This also led to skipped tempauth tests, and together with the missing python-keystoneclient no tests were executed. Related-Bug: 1461440 Related-Bug: 1455102 Co-Authored-By: Alistair Coles Change-Id: Ideea071017d04912c60ed0bc76532adbb446c31d --- doc/source/development_guidelines.rst | 15 +++++++++++++-- test-requirements.txt | 2 +- test/functional/tests.py | 4 +++- tox.ini | 7 +------ 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst index 1da8457682..29d9f35a41 100644 --- a/doc/source/development_guidelines.rst +++ b/doc/source/development_guidelines.rst @@ -44,13 +44,24 @@ To execute the unit tests: If you installed using: `cd ~/swift; sudo python setup.py develop`, you may need to do: `cd ~/swift; sudo chown -R swift:swift swift.egg-info` prior to running tox. - If you ever encounter DistributionNotFound, try to use `tox --recreate` - or removing .tox directory to force tox to recreate the dependency list * Optionally, run only specific tox builds: - `tox -e pep8,py27` +.. note:: + As of tox version 2.0.0, most environment variables are not automatically + passed to the test environment. Swift's tox.ini overrides this default + behavior so that variable names matching SWIFT_* and *_proxy will be passed, + but you may need to run tox --recreate for this to take effect after + upgrading from tox<2.0.0. + + Conversely, if you do not want those environment variables to be passed to + the test environment then you will need to unset them before calling tox. + + Also, if you ever encounter DistributionNotFound, try to use `tox --recreate` + or remove the .tox directory to force tox to recreate the dependency list. + The functional tests may be executed against a :doc:`development_saio` or other running Swift cluster using the command: diff --git a/test-requirements.txt b/test-requirements.txt index 8c617baacb..b3f7eed5be 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,9 +7,9 @@ hacking>=0.8.0,<0.9 coverage nose nosexcover -openstack.nose_plugin nosehtmloutput oslosphinx sphinx>=1.1.2,<1.2 mock>=1.0 python-swiftclient +python-keystoneclient>=1.3.0 diff --git a/test/functional/tests.py b/test/functional/tests.py index 63d6aae91e..aa3d440c2b 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -31,7 +31,7 @@ from nose import SkipTest from swift.common.http import is_success, is_client_error from test.functional import normalized_urls, load_constraint, cluster_info -from test.functional import check_response, retry +from test.functional import check_response, retry, requires_acls import test.functional as tf from test.functional.swift_test_client import Account, Connection, File, \ ResponseError @@ -3136,6 +3136,7 @@ class TestContainerTempurl(Base): parms=parms) self.assert_status([401]) + @requires_acls def test_tempurl_keys_visible_to_account_owner(self): if not tf.cluster_info.get('tempauth'): raise SkipTest('TEMP AUTH SPECIFIC TEST') @@ -3143,6 +3144,7 @@ class TestContainerTempurl(Base): self.assertEqual(metadata.get('tempurl_key'), self.env.tempurl_key) self.assertEqual(metadata.get('tempurl_key2'), self.env.tempurl_key2) + @requires_acls def test_tempurl_keys_hidden_from_acl_readonly(self): if not tf.cluster_info.get('tempauth'): raise SkipTest('TEMP AUTH SPECIFIC TEST') diff --git a/tox.ini b/tox.ini index de72f26950..8b7061a026 100644 --- a/tox.ini +++ b/tox.ini @@ -7,18 +7,13 @@ skipsdist = True usedevelop = True install_command = pip install --allow-external netifaces --allow-insecure netifaces -U {opts} {packages} setenv = VIRTUAL_ENV={envdir} - NOSE_WITH_OPENSTACK=1 - NOSE_OPENSTACK_COLOR=1 - NOSE_OPENSTACK_RED=0.05 - NOSE_OPENSTACK_YELLOW=0.025 - NOSE_OPENSTACK_SHOW_ELAPSED=1 - NOSE_OPENSTACK_STDOUT=1 NOSE_WITH_COVERAGE=1 NOSE_COVER_BRANCHES=1 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = nosetests {posargs:test/unit} +passenv = SWIFT_* *_proxy [testenv:cover] setenv = VIRTUAL_ENV={envdir} From 4f2ed8bcd0468f3b69d5fded274d8d6b02ac3d10 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Fri, 20 Mar 2015 09:56:30 -0700 Subject: [PATCH 55/98] EC: support multiple ranges for GET requests This commit lets clients receive multipart/byteranges responses (see RFC 7233, Appendix A) for erasure-coded objects. Clients can already do this for replicated objects, so this brings EC closer to feature parity (ha!). GetOrHeadHandler got a base class extracted from it that treats an HTTP response as a sequence of byte-range responses. This way, it can continue to yield whole fragments, not just N-byte pieces of the raw HTTP response, since an N-byte piece of a multipart/byteranges response is pretty much useless. There are a couple of bonus fixes in here, too. For starters, download resuming now works on multipart/byteranges responses. Before, it only worked on 200 responses or 206 responses for a single byte range. Also, BufferedHTTPResponse grew a readline() method. Also, the MIME response for replicated objects got tightened up a little. Before, it had some leading and trailing CRLFs which, while allowed by RFC 7233, provide no benefit. Now, both replicated and EC multipart/byteranges avoid extraneous bytes. This let me re-use the Content-Length calculation in swob instead of having to either hack around it or add extraneous whitespace to match. Change-Id: I16fc65e0ec4e356706d327bdb02a3741e36330a0 --- swift/common/bufferedhttp.py | 43 ++ swift/common/exceptions.py | 4 + swift/common/swob.py | 45 +- swift/common/utils.py | 178 ++++++- swift/proxy/controllers/base.py | 404 +++++++++++----- swift/proxy/controllers/obj.py | 569 +++++++++++++++++------ test/unit/common/test_swob.py | 5 +- test/unit/common/test_utils.py | 164 ++++++- test/unit/proxy/controllers/test_base.py | 18 +- test/unit/proxy/controllers/test_obj.py | 15 +- test/unit/proxy/test_server.py | 540 ++++++++++++++++++++- 11 files changed, 1688 insertions(+), 297 deletions(-) diff --git a/swift/common/bufferedhttp.py b/swift/common/bufferedhttp.py index 2b3ec1609d..c7acccc27c 100644 --- a/swift/common/bufferedhttp.py +++ b/swift/common/bufferedhttp.py @@ -62,6 +62,7 @@ class BufferedHTTPResponse(HTTPResponse): self.chunk_left = _UNKNOWN # bytes left to read in current chunk self.length = _UNKNOWN # number of bytes left in response self.will_close = _UNKNOWN # conn will close at end of response + self._readline_buffer = '' def expect_response(self): if self.fp: @@ -79,6 +80,48 @@ class BufferedHTTPResponse(HTTPResponse): self.msg = HTTPMessage(self.fp, 0) self.msg.fp = None + def read(self, amt=None): + if not self._readline_buffer: + return HTTPResponse.read(self, amt) + + if amt is None: + # Unbounded read: send anything we have buffered plus whatever + # is left. + buffered = self._readline_buffer + self._readline_buffer = '' + return buffered + HTTPResponse.read(self, amt) + elif amt <= len(self._readline_buffer): + # Bounded read that we can satisfy entirely from our buffer + res = self._readline_buffer[:amt] + self._readline_buffer = self._readline_buffer[amt:] + return res + else: + # Bounded read that wants more bytes than we have + smaller_amt = amt - len(self._readline_buffer) + buf = self._readline_buffer + self._readline_buffer = '' + return buf + HTTPResponse.read(self, smaller_amt) + + def readline(self, size=1024): + # You'd think Python's httplib would provide this, but it doesn't. + # It does, however, provide a comment in the HTTPResponse class: + # + # # XXX It would be nice to have readline and __iter__ for this, + # # too. + # + # Yes, it certainly would. + while ('\n' not in self._readline_buffer + and len(self._readline_buffer) < size): + read_size = size - len(self._readline_buffer) + chunk = HTTPResponse.read(self, read_size) + if not chunk: + break + self._readline_buffer += chunk + + line, newline, rest = self._readline_buffer.partition('\n') + self._readline_buffer = rest + return line + newline + def nuke_from_orbit(self): """ Terminate the socket with extreme prejudice. diff --git a/swift/common/exceptions.py b/swift/common/exceptions.py index b1edadee39..abc09bf36b 100644 --- a/swift/common/exceptions.py +++ b/swift/common/exceptions.py @@ -57,6 +57,10 @@ class SuffixSyncError(SwiftException): pass +class RangeAlreadyComplete(SwiftException): + pass + + class DiskFileError(SwiftException): pass diff --git a/swift/common/swob.py b/swift/common/swob.py index c2e3afb4e8..39f0c0e3cb 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -1089,13 +1089,14 @@ def content_range_header(start, stop, size): def multi_range_iterator(ranges, content_type, boundary, size, sub_iter_gen): for start, stop in ranges: - yield ''.join(['\r\n--', boundary, '\r\n', + yield ''.join(['--', boundary, '\r\n', 'Content-Type: ', content_type, '\r\n']) yield content_range_header(start, stop, size) + '\r\n\r\n' sub_iter = sub_iter_gen(start, stop) for chunk in sub_iter: yield chunk - yield '\r\n--' + boundary + '--\r\n' + yield '\r\n' + yield '--' + boundary + '--' class Response(object): @@ -1177,21 +1178,37 @@ class Response(object): self.content_type = ''.join(['multipart/byteranges;', 'boundary=', self.boundary]) - # This section calculate the total size of the targeted response - # The value 12 is the length of total bytes of hyphen, new line - # form feed for each section header. The value 8 is the length of - # total bytes of hyphen, new line, form feed characters for the - # closing boundary which appears only once - section_header_fixed_len = 12 + (len(self.boundary) + - len('Content-Type: ') + - len(content_type) + - len('Content-Range: bytes ')) + # This section calculates the total size of the response. + section_header_fixed_len = ( + # --boundary\r\n + len(self.boundary) + 4 + # Content-Type: \r\n + + len('Content-Type: ') + len(content_type) + 2 + # Content-Range: \r\n; accounted for later + + len('Content-Range: ') + 2 + # \r\n at end of headers + + 2) + body_size = 0 for start, end in ranges: body_size += section_header_fixed_len - body_size += len(str(start) + '-' + str(end - 1) + '/' + - str(content_size)) + (end - start) - body_size += 8 + len(self.boundary) + + # length of the value of Content-Range, not including the \r\n + # since that's already accounted for + cr = content_range_header_value(start, end, content_size) + body_size += len(cr) + + # the actual bytes (note: this range is half-open, i.e. begins + # with byte and ends with byte , so there's no + # fencepost error here) + body_size += (end - start) + + # \r\n prior to --boundary + body_size += 2 + + # --boundary-- terminates the message + body_size += len(self.boundary) + 4 + self.content_length = body_size self.content_range = None return content_size, content_type diff --git a/swift/common/utils.py b/swift/common/utils.py index 11a97d126b..856065a680 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -25,6 +25,7 @@ import operator import os import pwd import re +import rfc822 import sys import threading as stdlib_threading import time @@ -3181,7 +3182,7 @@ def parse_content_type(content_type): ('text/plain', [('charset, 'UTF-8'), ('level', '1')]) :param content_type: content_type to parse - :returns: a typle containing (content type, list of k, v parameter tuples) + :returns: a tuple containing (content type, list of k, v parameter tuples) """ parm_list = [] if ';' in content_type: @@ -3313,7 +3314,9 @@ class _MultipartMimeFileLikeObject(object): def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096): """ Given a multi-part-mime-encoded input file object and boundary, - yield file-like objects for each part. + yield file-like objects for each part. Note that this does not + split each part into headers and body; the caller is responsible + for doing that if necessary. :param wsgi_input: The file-like object to read from. :param boundary: The mime boundary to separate new file-like @@ -3324,6 +3327,9 @@ def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096): boundary = '--' + boundary blen = len(boundary) + 2 # \r\n got = wsgi_input.readline(blen) + while got == '\r\n': + got = wsgi_input.readline(blen) + if got.strip() != boundary: raise swift.common.exceptions.MimeInvalid( 'invalid starting boundary: wanted %r, got %r', (boundary, got)) @@ -3338,6 +3344,174 @@ def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096): input_buffer = it.input_buffer +def mime_to_document_iters(input_file, boundary, read_chunk_size=4096): + """ + Takes a file-like object containing a multipart MIME document and + returns an iterator of (headers, body-file) tuples. + + :param input_file: file-like object with the MIME doc in it + :param boundary: MIME boundary, sans dashes + (e.g. "divider", not "--divider") + :param read_chunk_size: size of strings read via input_file.read() + """ + doc_files = iter_multipart_mime_documents(input_file, boundary, + read_chunk_size) + for i, doc_file in enumerate(doc_files): + # this consumes the headers and leaves just the body in doc_file + headers = rfc822.Message(doc_file, 0) + yield (headers, doc_file) + + +def document_iters_to_multipart_byteranges(ranges_iter, boundary): + """ + Takes an iterator of range iters and yields a multipart/byteranges MIME + document suitable for sending as the body of a multi-range 206 response. + + See document_iters_to_http_response_body for parameter descriptions. + """ + + divider = "--" + boundary + "\r\n" + terminator = "--" + boundary + "--" + + for range_spec in ranges_iter: + start_byte = range_spec["start_byte"] + end_byte = range_spec["end_byte"] + entity_length = range_spec.get("entity_length", "*") + content_type = range_spec["content_type"] + part_iter = range_spec["part_iter"] + + part_header = ''.join(( + divider, + "Content-Type: ", str(content_type), "\r\n", + "Content-Range: ", "bytes %d-%d/%s\r\n" % ( + start_byte, end_byte, entity_length), + "\r\n" + )) + yield part_header + + for chunk in part_iter: + yield chunk + yield "\r\n" + yield terminator + + +def document_iters_to_http_response_body(ranges_iter, boundary, multipart, + logger): + """ + Takes an iterator of range iters and turns it into an appropriate + HTTP response body, whether that's multipart/byteranges or not. + + This is almost, but not quite, the inverse of + http_response_to_document_iters(). This function only yields chunks of + the body, not any headers. + + :param ranges_iter: an iterator of dictionaries, one per range. + Each dictionary must contain at least the following key: + "part_iter": iterator yielding the bytes in the range + + Additionally, if multipart is True, then the following other keys + are required: + + "start_byte": index of the first byte in the range + "end_byte": index of the last byte in the range + "content_type": value for the range's Content-Type header + + Finally, there is one optional key that is used in the + multipart/byteranges case: + + "entity_length": length of the requested entity (not necessarily + equal to the response length). If omitted, "*" will be used. + + Each part_iter will be exhausted prior to calling next(ranges_iter). + + :param boundary: MIME boundary to use, sans dashes (e.g. "boundary", not + "--boundary"). + :param multipart: True if the response should be multipart/byteranges, + False otherwise. This should be True if and only if you have 2 or + more ranges. + :param logger: a logger + """ + if multipart: + return document_iters_to_multipart_byteranges(ranges_iter, boundary) + else: + try: + response_body_iter = next(ranges_iter)['part_iter'] + except StopIteration: + return '' + + # We need to make sure ranges_iter does not get garbage-collected + # before response_body_iter is exhausted. The reason is that + # ranges_iter has a finally block that calls close_swift_conn, and + # so if that finally block fires before we read response_body_iter, + # there's nothing there. + def string_along(useful_iter, useless_iter_iter, logger): + for x in useful_iter: + yield x + + try: + next(useless_iter_iter) + except StopIteration: + pass + else: + logger.warn("More than one part in a single-part response?") + + return string_along(response_body_iter, ranges_iter, logger) + + +def multipart_byteranges_to_document_iters(input_file, boundary, + read_chunk_size=4096): + """ + Takes a file-like object containing a multipart/byteranges MIME document + (see RFC 7233, Appendix A) and returns an iterator of (first-byte, + last-byte, length, document-headers, body-file) 5-tuples. + + :param input_file: file-like object with the MIME doc in it + :param boundary: MIME boundary, sans dashes + (e.g. "divider", not "--divider") + :param read_chunk_size: size of strings read via input_file.read() + """ + for headers, body in mime_to_document_iters(input_file, boundary, + read_chunk_size): + first_byte, last_byte, length = parse_content_range( + headers.getheader('content-range')) + yield (first_byte, last_byte, length, headers.items(), body) + + +def http_response_to_document_iters(response, read_chunk_size=4096): + """ + Takes a successful object-GET HTTP response and turns it into an + iterator of (first-byte, last-byte, length, headers, body-file) + 5-tuples. + + The response must either be a 200 or a 206; if you feed in a 204 or + something similar, this probably won't work. + + :param response: HTTP response, like from bufferedhttp.http_connect(), + not a swob.Response. + """ + if response.status == 200: + # Single "range" that's the whole object + content_length = int(response.getheader('Content-Length')) + return iter([(0, content_length - 1, content_length, + response.getheaders(), response)]) + + content_type, params_list = parse_content_type( + response.getheader('Content-Type')) + if content_type != 'multipart/byteranges': + # Single range; no MIME framing, just the bytes. The start and end + # byte indices are in the Content-Range header. + start, end, length = parse_content_range( + response.getheader('Content-Range')) + return iter([(start, end, length, response.getheaders(), response)]) + else: + # Multiple ranges; the response body is a multipart/byteranges MIME + # document, and we have to parse it using the MIME boundary + # extracted from the Content-Type header. + params = dict(params_list) + return multipart_byteranges_to_document_iters( + response, params['boundary'], read_chunk_size) + + #: Regular expression to match form attributes. ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)') diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 2fb7cd945e..953a85af58 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -28,7 +28,6 @@ import os import time import functools import inspect -import logging import operator from sys import exc_info from swift import gettext_ as _ @@ -40,10 +39,11 @@ from eventlet.timeout import Timeout from swift.common.wsgi import make_pre_authed_env from swift.common.utils import Timestamp, config_true_value, \ public, split_path, list_from_csv, GreenthreadSafeIterator, \ - GreenAsyncPile, quorum_size, parse_content_range + GreenAsyncPile, quorum_size, parse_content_type, \ + http_response_to_document_iters, document_iters_to_http_response_body from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ChunkReadTimeout, ChunkWriteTimeout, \ - ConnectionTimeout + ConnectionTimeout, RangeAlreadyComplete from swift.common.http import is_informational, is_success, is_redirection, \ is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \ HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \ @@ -613,10 +613,9 @@ def bytes_to_skip(record_size, range_start): return (record_size - (range_start % record_size)) % record_size -class GetOrHeadHandler(object): - +class ResumingGetter(object): def __init__(self, app, req, server_type, node_iter, partition, path, - backend_headers, client_chunk_size=None): + backend_headers, client_chunk_size=None, newest=None): self.app = app self.node_iter = node_iter self.server_type = server_type @@ -632,7 +631,10 @@ class GetOrHeadHandler(object): self.req_method = req.method self.req_path = req.path self.req_query_string = req.query_string - self.newest = config_true_value(req.headers.get('x-newest', 'f')) + if newest is None: + self.newest = config_true_value(req.headers.get('x-newest', 'f')) + else: + self.newest = newest # populated when finding source self.statuses = [] @@ -640,6 +642,9 @@ class GetOrHeadHandler(object): self.bodies = [] self.source_headers = [] + # populated from response headers + self.start_byte = self.end_byte = self.length = None + def fast_forward(self, num_bytes): """ Will skip num_bytes into the current ranges. @@ -648,57 +653,89 @@ class GetOrHeadHandler(object): this request. This will change the Range header so that the next req will start where it left off. - :raises NotImplementedError: if this is a multirange request :raises ValueError: if invalid range header :raises HTTPRequestedRangeNotSatisfiable: if begin + num_bytes - > end of range + > end of range + 1 + :raises RangeAlreadyComplete: if begin + num_bytes == end of range + 1 """ if 'Range' in self.backend_headers: req_range = Range(self.backend_headers['Range']) - if len(req_range.ranges) > 1: - raise NotImplementedError() - begin, end = req_range.ranges.pop() + begin, end = req_range.ranges[0] if begin is None: # this is a -50 range req (last 50 bytes of file) end -= num_bytes else: begin += num_bytes - if end and begin > end: + if end and begin == end + 1: + # we sent out exactly the first range's worth of bytes, so + # we're done with it + raise RangeAlreadyComplete() + elif end and begin > end: raise HTTPRequestedRangeNotSatisfiable() - req_range.ranges = [(begin, end)] + elif end and begin: + req_range.ranges = [(begin, end)] + req_range.ranges[1:] + elif end: + req_range.ranges = [(None, end)] + req_range.ranges[1:] + else: + req_range.ranges = [(begin, None)] + req_range.ranges[1:] + self.backend_headers['Range'] = str(req_range) else: self.backend_headers['Range'] = 'bytes=%d-' % num_bytes - def learn_size_from_content_range(self, start, end): + def pop_range(self): + """ + Remove the first byterange from our Range header. + + This is used after a byterange has been completely sent to the + client; this way, should we need to resume the download from another + object server, we do not re-fetch byteranges that the client already + has. + + If we have no Range header, this is a no-op. + """ + if 'Range' in self.backend_headers: + req_range = Range(self.backend_headers['Range']) + begin, end = req_range.ranges.pop(0) + if len(req_range.ranges) > 0: + self.backend_headers['Range'] = str(req_range) + else: + self.backend_headers.pop('Range') + + def learn_size_from_content_range(self, start, end, length): """ If client_chunk_size is set, makes sure we yield things starting on chunk boundaries based on the Content-Range header in the response. - Sets our first Range header to the value learned from the - Content-Range header in the response; if we were given a + Sets our Range header's first byterange to the value learned from + the Content-Range header in the response; if we were given a fully-specified range (e.g. "bytes=123-456"), this is a no-op. If we were given a half-specified range (e.g. "bytes=123-" or "bytes=-456"), then this changes the Range header to a semantically-equivalent one *and* it lets us resume on a proper boundary instead of just in the middle of a piece somewhere. - - If the original request is for more than one range, this does not - affect our backend Range header, since we don't support resuming one - of those anyway. """ + if length == 0: + return + if self.client_chunk_size: self.skip_bytes = bytes_to_skip(self.client_chunk_size, start) if 'Range' in self.backend_headers: - req_range = Range(self.backend_headers['Range']) + try: + req_range = Range(self.backend_headers['Range']) + new_ranges = [(start, end)] + req_range.ranges[1:] + except ValueError: + new_ranges = [(start, end)] + else: + new_ranges = [(start, end)] - if len(req_range.ranges) > 1: - return - - self.backend_headers['Range'] = "bytes=%d-%d" % (start, end) + self.backend_headers['Range'] = ( + "bytes=" + (",".join("%s-%s" % (s if s is not None else '', + e if e is not None else '') + for s, e in new_ranges))) def is_good_source(self, src): """ @@ -712,106 +749,183 @@ class GetOrHeadHandler(object): return True return is_success(src.status) or is_redirection(src.status) - def _make_app_iter(self, req, node, source): - """ - Returns an iterator over the contents of the source (via its read - func). There is also quite a bit of cleanup to ensure garbage - collection works and the underlying socket of the source is closed. + def response_parts_iter(self, req): + source, node = self._get_source_and_node() + it = None + if source: + it = self._get_response_parts_iter(req, node, source) + return it + + def _get_response_parts_iter(self, req, node, source): + # Someday we can replace this [mess] with python 3's "nonlocal" + source = [source] + node = [node] - :param req: incoming request object - :param source: The httplib.Response object this iterator should read - from. - :param node: The node the source is reading from, for logging purposes. - """ try: - nchunks = 0 client_chunk_size = self.client_chunk_size - bytes_consumed_from_backend = 0 node_timeout = self.app.node_timeout if self.server_type == 'Object': node_timeout = self.app.recoverable_node_timeout - buf = '' - while True: - try: - with ChunkReadTimeout(node_timeout): - chunk = source.read(self.app.object_chunk_size) - nchunks += 1 - buf += chunk - except ChunkReadTimeout: - exc_type, exc_value, exc_traceback = exc_info() - if self.newest or self.server_type != 'Object': - raise exc_type, exc_value, exc_traceback + + # This is safe; it sets up a generator but does not call next() + # on it, so no IO is performed. + parts_iter = [ + http_response_to_document_iters( + source[0], read_chunk_size=self.app.object_chunk_size)] + + def get_next_doc_part(): + while True: try: - self.fast_forward(bytes_consumed_from_backend) - except (NotImplementedError, HTTPException, ValueError): - raise exc_type, exc_value, exc_traceback - buf = '' - new_source, new_node = self._get_source_and_node() - if new_source: - self.app.exception_occurred( - node, _('Object'), - _('Trying to read during GET (retrying)'), - level=logging.ERROR, exc_info=( - exc_type, exc_value, exc_traceback)) - # Close-out the connection as best as possible. - if getattr(source, 'swift_conn', None): - close_swift_conn(source) - source = new_source - node = new_node - continue - else: - raise exc_type, exc_value, exc_traceback + # This call to next() performs IO when we have a + # multipart/byteranges response; it reads the MIME + # boundary and part headers. + # + # If we don't have a multipart/byteranges response, + # but just a 200 or a single-range 206, then this + # performs no IO, and either just returns source or + # raises StopIteration. + with ChunkReadTimeout(node_timeout): + # if StopIteration is raised, it escapes and is + # handled elsewhere + start_byte, end_byte, length, headers, part = next( + parts_iter[0]) + return (start_byte, end_byte, length, headers, part) + except ChunkReadTimeout: + new_source, new_node = self._get_source_and_node() + if new_source: + self.app.exception_occurred( + node[0], _('Object'), + _('Trying to read during GET (retrying)')) + # Close-out the connection as best as possible. + if getattr(source[0], 'swift_conn', None): + close_swift_conn(source[0]) + source[0] = new_source + node[0] = new_node + # This is safe; it sets up a generator but does + # not call next() on it, so no IO is performed. + parts_iter[0] = http_response_to_document_iters( + new_source, + read_chunk_size=self.app.object_chunk_size) + else: + raise StopIteration() - if buf and self.skip_bytes: - if self.skip_bytes < len(buf): - buf = buf[self.skip_bytes:] - bytes_consumed_from_backend += self.skip_bytes - self.skip_bytes = 0 - else: - self.skip_bytes -= len(buf) - bytes_consumed_from_backend += len(buf) + def iter_bytes_from_response_part(part_file): + nchunks = 0 + buf = '' + bytes_used_from_backend = 0 + while True: + try: + with ChunkReadTimeout(node_timeout): + chunk = part_file.read(self.app.object_chunk_size) + nchunks += 1 + buf += chunk + except ChunkReadTimeout: + exc_type, exc_value, exc_traceback = exc_info() + if self.newest or self.server_type != 'Object': + raise exc_type, exc_value, exc_traceback + try: + self.fast_forward(bytes_used_from_backend) + except (HTTPException, ValueError): + raise exc_type, exc_value, exc_traceback + except RangeAlreadyComplete: + break buf = '' + new_source, new_node = self._get_source_and_node() + if new_source: + self.app.exception_occurred( + node[0], _('Object'), + _('Trying to read during GET (retrying)')) + # Close-out the connection as best as possible. + if getattr(source[0], 'swift_conn', None): + close_swift_conn(source[0]) + source[0] = new_source + node[0] = new_node + # This is safe; it just sets up a generator but + # does not call next() on it, so no IO is + # performed. + parts_iter[0] = http_response_to_document_iters( + new_source, + read_chunk_size=self.app.object_chunk_size) - if not chunk: - if buf: - with ChunkWriteTimeout(self.app.client_timeout): - bytes_consumed_from_backend += len(buf) - yield buf - buf = '' - break + try: + _junk, _junk, _junk, _junk, part_file = \ + get_next_doc_part() + except StopIteration: + # Tried to find a new node from which to + # finish the GET, but failed. There's + # nothing more to do here. + return + else: + raise exc_type, exc_value, exc_traceback + else: + if buf and self.skip_bytes: + if self.skip_bytes < len(buf): + buf = buf[self.skip_bytes:] + bytes_used_from_backend += self.skip_bytes + self.skip_bytes = 0 + else: + self.skip_bytes -= len(buf) + bytes_used_from_backend += len(buf) + buf = '' - if client_chunk_size is not None: - while len(buf) >= client_chunk_size: - client_chunk = buf[:client_chunk_size] - buf = buf[client_chunk_size:] - with ChunkWriteTimeout(self.app.client_timeout): - yield client_chunk - bytes_consumed_from_backend += len(client_chunk) - else: - with ChunkWriteTimeout(self.app.client_timeout): - yield buf - bytes_consumed_from_backend += len(buf) - buf = '' + if not chunk: + if buf: + with ChunkWriteTimeout( + self.app.client_timeout): + bytes_used_from_backend += len(buf) + yield buf + buf = '' + break - # This is for fairness; if the network is outpacing the CPU, - # we'll always be able to read and write data without - # encountering an EWOULDBLOCK, and so eventlet will not switch - # greenthreads on its own. We do it manually so that clients - # don't starve. - # - # The number 5 here was chosen by making stuff up. It's not - # every single chunk, but it's not too big either, so it seemed - # like it would probably be an okay choice. - # - # Note that we may trampoline to other greenthreads more often - # than once every 5 chunks, depending on how blocking our - # network IO is; the explicit sleep here simply provides a - # lower bound on the rate of trampolining. - if nchunks % 5 == 0: - sleep() + if client_chunk_size is not None: + while len(buf) >= client_chunk_size: + client_chunk = buf[:client_chunk_size] + buf = buf[client_chunk_size:] + with ChunkWriteTimeout( + self.app.client_timeout): + yield client_chunk + bytes_used_from_backend += len(client_chunk) + else: + with ChunkWriteTimeout(self.app.client_timeout): + yield buf + bytes_used_from_backend += len(buf) + buf = '' + + # This is for fairness; if the network is outpacing + # the CPU, we'll always be able to read and write + # data without encountering an EWOULDBLOCK, and so + # eventlet will not switch greenthreads on its own. + # We do it manually so that clients don't starve. + # + # The number 5 here was chosen by making stuff up. + # It's not every single chunk, but it's not too big + # either, so it seemed like it would probably be an + # okay choice. + # + # Note that we may trampoline to other greenthreads + # more often than once every 5 chunks, depending on + # how blocking our network IO is; the explicit sleep + # here simply provides a lower bound on the rate of + # trampolining. + if nchunks % 5 == 0: + sleep() + + try: + while True: + start_byte, end_byte, length, headers, part = \ + get_next_doc_part() + self.learn_size_from_content_range( + start_byte, end_byte, length) + part_iter = iter_bytes_from_response_part(part) + yield {'start_byte': start_byte, 'end_byte': end_byte, + 'entity_length': length, 'headers': headers, + 'part_iter': part_iter} + self.pop_range() + except StopIteration: + return except ChunkReadTimeout: - self.app.exception_occurred(node, _('Object'), + self.app.exception_occurred(node[0], _('Object'), _('Trying to read during GET')) raise except ChunkWriteTimeout: @@ -827,8 +941,22 @@ class GetOrHeadHandler(object): raise finally: # Close-out the connection as best as possible. - if getattr(source, 'swift_conn', None): - close_swift_conn(source) + if getattr(source[0], 'swift_conn', None): + close_swift_conn(source[0]) + + @property + def last_status(self): + if self.statuses: + return self.statuses[-1] + else: + return None + + @property + def last_headers(self): + if self.source_headers: + return self.source_headers[-1] + else: + return None def _get_source_and_node(self): self.statuses = [] @@ -869,7 +997,7 @@ class GetOrHeadHandler(object): self.statuses.append(HTTP_NOT_FOUND) self.reasons.append('') self.bodies.append('') - self.source_headers.append('') + self.source_headers.append([]) close_swift_conn(possible_source) else: if self.used_source_etag: @@ -883,13 +1011,13 @@ class GetOrHeadHandler(object): self.statuses.append(HTTP_NOT_FOUND) self.reasons.append('') self.bodies.append('') - self.source_headers.append('') + self.source_headers.append([]) continue self.statuses.append(possible_source.status) self.reasons.append(possible_source.reason) self.bodies.append('') - self.source_headers.append('') + self.source_headers.append(possible_source.getheaders()) sources.append((possible_source, node)) if not self.newest: # one good source is enough break @@ -923,6 +1051,44 @@ class GetOrHeadHandler(object): return source, node return None, None + +class GetOrHeadHandler(ResumingGetter): + def _make_app_iter(self, req, node, source): + """ + Returns an iterator over the contents of the source (via its read + func). There is also quite a bit of cleanup to ensure garbage + collection works and the underlying socket of the source is closed. + + :param req: incoming request object + :param source: The httplib.Response object this iterator should read + from. + :param node: The node the source is reading from, for logging purposes. + """ + + ct = source.getheader('Content-Type') + if ct: + content_type, content_type_attrs = parse_content_type(ct) + is_multipart = content_type == 'multipart/byteranges' + else: + is_multipart = False + + boundary = "dontcare" + if is_multipart: + # we need some MIME boundary; fortunately, the object server has + # furnished one for us, so we'll just re-use it + boundary = dict(content_type_attrs)["boundary"] + + parts_iter = self._get_response_parts_iter(req, node, source) + + def add_content_type(response_part): + response_part["content_type"] = \ + HeaderKeyDict(response_part["headers"]).get("Content-Type") + return response_part + + return document_iters_to_http_response_body( + (add_content_type(pi) for pi in parts_iter), + boundary, is_multipart, self.app.logger) + def get_working_response(self, req): source, node = self._get_source_and_node() res = None @@ -932,10 +1098,6 @@ class GetOrHeadHandler(object): update_headers(res, source.getheaders()) if req.method == 'GET' and \ source.status in (HTTP_OK, HTTP_PARTIAL_CONTENT): - cr = res.headers.get('Content-Range') - if cr: - start, end, total = parse_content_range(cr) - self.learn_size_from_content_range(start, end) res.app_iter = self._make_app_iter(req, node, source) # See NOTE: swift_conn at top of file about this. res.swift_conn = source.swift_conn diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index e2ae7a325a..10e83bcad7 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -43,7 +43,8 @@ from swift.common.utils import ( clean_content_type, config_true_value, ContextPool, csv_append, GreenAsyncPile, GreenthreadSafeIterator, json, Timestamp, normalize_delete_at_timestamp, public, get_expirer_container, - quorum_size) + document_iters_to_http_response_body, parse_content_range, + quorum_size, reiterate) from swift.common.bufferedhttp import http_connect from swift.common.constraints import check_metadata, check_object_creation, \ check_copy_from_header, check_destination_header, \ @@ -62,11 +63,12 @@ from swift.common.http import ( from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY, ECDriverError, PolicyError) from swift.proxy.controllers.base import Controller, delay_denial, \ - cors_validation + cors_validation, ResumingGetter from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \ HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \ HTTPServerError, HTTPServiceUnavailable, Request, HeaderKeyDict, \ - HTTPClientDisconnect, HTTPUnprocessableEntity, Response, HTTPException + HTTPClientDisconnect, HTTPUnprocessableEntity, Response, HTTPException, \ + HTTPRequestedRangeNotSatisfiable, Range from swift.common.request_helpers import is_sys_or_user_meta, is_sys_meta, \ remove_items, copy_header_subset, close_if_possible @@ -1137,119 +1139,350 @@ class ECAppIter(object): WSGI iterable that decodes EC fragment archives (or portions thereof) into the original object (or portions thereof). - :param path: path for the request + :param path: object's path, sans v1 (e.g. /a/c/o) :param policy: storage policy for this object - :param internal_app_iters: list of the WSGI iterables from object server - GET responses for fragment archives. For an M+K erasure code, the - caller must supply M such iterables. + :param internal_parts_iters: list of the response-document-parts + iterators for the backend GET responses. For an M+K erasure code, + the caller must supply M such iterables. :param range_specs: list of dictionaries describing the ranges requested by the client. Each dictionary contains the start and end of the client's requested byte range as well as the start and end of the EC segments containing that byte range. + :param fa_length: length of the fragment archive, in bytes, if the + response is a 200. If it's a 206, then this is ignored. + :param obj_length: length of the object, in bytes. Learned from the headers in the GET response from the object server. :param logger: a logger """ - def __init__(self, path, policy, internal_app_iters, range_specs, - obj_length, logger): + def __init__(self, path, policy, internal_parts_iters, range_specs, + fa_length, obj_length, logger): self.path = path self.policy = policy - self.internal_app_iters = internal_app_iters + self.internal_parts_iters = internal_parts_iters self.range_specs = range_specs - self.obj_length = obj_length + self.fa_length = fa_length + self.obj_length = obj_length if obj_length is not None else 0 self.boundary = '' self.logger = logger + self.mime_boundary = None + self.learned_content_type = None + self.stashed_iter = None + def close(self): - for it in self.internal_app_iters: + for it in self.internal_parts_iters: close_if_possible(it) - def __iter__(self): - segments_iter = self.decode_segments_from_fragments() + def kickoff(self, req, resp): + """ + Start pulling data from the backends so that we can learn things like + the real Content-Type that might only be in the multipart/byteranges + response body. Update our response accordingly. - if len(self.range_specs) == 0: - # plain GET; just yield up segments - for seg in segments_iter: - yield seg - return + Also, this is the first point at which we can learn the MIME + boundary that our response has in the headers. We grab that so we + can also use it in the body. - if len(self.range_specs) > 1: - raise NotImplementedError("multi-range GETs not done yet") + :returns: None + :raises: HTTPException on error + """ + self.mime_boundary = resp.boundary - for range_spec in self.range_specs: - client_start = range_spec['client_start'] - client_end = range_spec['client_end'] - segment_start = range_spec['segment_start'] - segment_end = range_spec['segment_end'] + self.stashed_iter = reiterate(self._real_iter(req, resp.headers)) + + if self.learned_content_type is not None: + resp.content_type = self.learned_content_type + resp.content_length = self.obj_length + + def _next_range(self): + # Each FA part should have approximately the same headers. We really + # only care about Content-Range and Content-Type, and that'll be the + # same for all the different FAs. + frag_iters = [] + headers = None + for parts_iter in self.internal_parts_iters: + part_info = next(parts_iter) + frag_iters.append(part_info['part_iter']) + headers = part_info['headers'] + headers = HeaderKeyDict(headers) + return headers, frag_iters + + def _actual_range(self, req_start, req_end, entity_length): + try: + rng = Range("bytes=%s-%s" % ( + req_start if req_start is not None else '', + req_end if req_end is not None else '')) + except ValueError: + return (None, None) + + rfl = rng.ranges_for_length(entity_length) + if not rfl: + return (None, None) + else: + # ranges_for_length() adds 1 to the last byte's position + # because webob once made a mistake + return (rfl[0][0], rfl[0][1] - 1) + + def _fill_out_range_specs_from_obj_length(self, range_specs): + # Add a few fields to each range spec: + # + # * resp_client_start, resp_client_end: the actual bytes that will + # be delivered to the client for the requested range. This may + # differ from the requested bytes if, say, the requested range + # overlaps the end of the object. + # + # * resp_segment_start, resp_segment_end: the actual offsets of the + # segments that will be decoded for the requested range. These + # differ from resp_client_start/end in that these are aligned + # to segment boundaries, while resp_client_start/end are not + # necessarily so. + # + # * satisfiable: a boolean indicating whether the range is + # satisfiable or not (i.e. the requested range overlaps the + # object in at least one byte). + # + # This is kept separate from _fill_out_range_specs_from_fa_length() + # because this computation can be done with just the response + # headers from the object servers (in particular + # X-Object-Sysmeta-Ec-Content-Length), while the computation in + # _fill_out_range_specs_from_fa_length() requires the beginnings of + # the response bodies. + for spec in range_specs: + cstart, cend = self._actual_range( + spec['req_client_start'], + spec['req_client_end'], + self.obj_length) + spec['resp_client_start'] = cstart + spec['resp_client_end'] = cend + spec['satisfiable'] = (cstart is not None and cend is not None) + + sstart, send = self._actual_range( + spec['req_segment_start'], + spec['req_segment_end'], + self.obj_length) seg_size = self.policy.ec_segment_size - is_suffix = client_start is None + if spec['req_segment_start'] is None and sstart % seg_size != 0: + # Segment start may, in the case of a suffix request, need + # to be rounded up (not down!) to the nearest segment boundary. + # This reflects the trimming of leading garbage (partial + # fragments) from the retrieved fragments. + sstart += seg_size - (sstart % seg_size) - if is_suffix: - # Suffix byte ranges (i.e. requests for the last N bytes of - # an object) are likely to end up not on a segment boundary. - client_range_len = client_end - client_start = max(self.obj_length - client_range_len, 0) - client_end = self.obj_length - 1 + spec['resp_segment_start'] = sstart + spec['resp_segment_end'] = send - # may be mid-segment; if it is, then everything up to the - # first segment boundary is garbage, and is discarded before - # ever getting into this function. - unaligned_segment_start = max(self.obj_length - segment_end, 0) - alignment_offset = ( - (seg_size - (unaligned_segment_start % seg_size)) - % seg_size) - segment_start = unaligned_segment_start + alignment_offset - segment_end = self.obj_length - 1 - else: - # It's entirely possible that the client asked for a range that - # includes some bytes we have and some we don't; for example, a - # range of bytes 1000-20000000 on a 1500-byte object. - segment_end = (min(segment_end, self.obj_length - 1) - if segment_end is not None - else self.obj_length - 1) - client_end = (min(client_end, self.obj_length - 1) - if client_end is not None - else self.obj_length - 1) + def _fill_out_range_specs_from_fa_length(self, fa_length, range_specs): + # Add two fields to each range spec: + # + # * resp_fragment_start, resp_fragment_end: the start and end of + # the fragments that compose this byterange. These values are + # aligned to fragment boundaries. + # + # This way, ECAppIter has the knowledge it needs to correlate + # response byteranges with requested ones for when some byteranges + # are omitted from the response entirely and also to put the right + # Content-Range headers in a multipart/byteranges response. + for spec in range_specs: + fstart, fend = self._actual_range( + spec['req_fragment_start'], + spec['req_fragment_end'], + fa_length) + spec['resp_fragment_start'] = fstart + spec['resp_fragment_end'] = fend - num_segments = int( - math.ceil(float(segment_end + 1 - segment_start) - / self.policy.ec_segment_size)) - # We get full segments here, but the client may have requested a - # byte range that begins or ends in the middle of a segment. - # Thus, we have some amount of overrun (extra decoded bytes) - # that we trim off so the client gets exactly what they - # requested. - start_overrun = client_start - segment_start - end_overrun = segment_end - client_end + def __iter__(self): + if self.stashed_iter is not None: + return iter(self.stashed_iter) + else: + raise ValueError("Failed to call kickoff() before __iter__()") - for i, next_seg in enumerate(segments_iter): - # We may have a start_overrun of more than one segment in - # the case of suffix-byte-range requests. However, we never - # have an end_overrun of more than one segment. - if start_overrun > 0: - seglen = len(next_seg) - if seglen <= start_overrun: - start_overrun -= seglen - continue - else: - next_seg = next_seg[start_overrun:] - start_overrun = 0 + def _real_iter(self, req, resp_headers): + if not self.range_specs: + client_asked_for_range = False + range_specs = [{ + 'req_client_start': 0, + 'req_client_end': (None if self.obj_length is None + else self.obj_length - 1), + 'resp_client_start': 0, + 'resp_client_end': (None if self.obj_length is None + else self.obj_length - 1), + 'req_segment_start': 0, + 'req_segment_end': (None if self.obj_length is None + else self.obj_length - 1), + 'resp_segment_start': 0, + 'resp_segment_end': (None if self.obj_length is None + else self.obj_length - 1), + 'req_fragment_start': 0, + 'req_fragment_end': self.fa_length - 1, + 'resp_fragment_start': 0, + 'resp_fragment_end': self.fa_length - 1, + 'satisfiable': self.obj_length > 0, + }] + else: + client_asked_for_range = True + range_specs = self.range_specs - if i == (num_segments - 1) and end_overrun: - next_seg = next_seg[:-end_overrun] + self._fill_out_range_specs_from_obj_length(range_specs) - yield next_seg + multipart = (len([rs for rs in range_specs if rs['satisfiable']]) > 1) + # Multipart responses are not required to be in the same order as + # the Range header; the parts may be in any order the server wants. + # Further, if multiple ranges are requested and only some are + # satisfiable, then only the satisfiable ones appear in the response + # at all. Thus, we cannot simply iterate over range_specs in order; + # we must use the Content-Range header from each part to figure out + # what we've been given. + # + # We do, however, make the assumption that all the object-server + # responses have their ranges in the same order. Otherwise, a + # streaming decode would be impossible. - def decode_segments_from_fragments(self): + def convert_ranges_iter(): + seen_first_headers = False + ranges_for_resp = {} + + while True: + # this'll raise StopIteration and exit the loop + next_range = self._next_range() + + headers, frag_iters = next_range + content_type = headers['Content-Type'] + + content_range = headers.get('Content-Range') + if content_range is not None: + fa_start, fa_end, fa_length = parse_content_range( + content_range) + elif self.fa_length <= 0: + fa_start = None + fa_end = None + fa_length = 0 + else: + fa_start = 0 + fa_end = self.fa_length - 1 + fa_length = self.fa_length + + if not seen_first_headers: + # This is the earliest we can possibly do this. On a + # 200 or 206-single-byterange response, we can learn + # the FA's length from the HTTP response headers. + # However, on a 206-multiple-byteranges response, we + # don't learn it until the first part of the + # response body, in the headers of the first MIME + # part. + # + # Similarly, the content type of a + # 206-multiple-byteranges response is + # "multipart/byteranges", not the object's actual + # content type. + self._fill_out_range_specs_from_fa_length( + fa_length, range_specs) + + satisfiable = False + for range_spec in range_specs: + satisfiable |= range_spec['satisfiable'] + key = (range_spec['resp_fragment_start'], + range_spec['resp_fragment_end']) + ranges_for_resp.setdefault(key, []).append(range_spec) + + # The client may have asked for an unsatisfiable set of + # ranges, but when converted to fragments, the object + # servers see it as satisfiable. For example, imagine a + # request for bytes 800-900 of a 750-byte object with a + # 1024-byte segment size. The object servers will see a + # request for bytes 0-${fragsize-1}, and that's + # satisfiable, so they return 206. It's not until we + # learn the object size that we can check for this + # condition. + # + # Note that some unsatisfiable ranges *will* be caught + # by the object servers, like bytes 1800-1900 of a + # 100-byte object with 1024-byte segments. That's not + # what we're dealing with here, though. + if client_asked_for_range and not satisfiable: + raise HTTPRequestedRangeNotSatisfiable( + request=req, headers=resp_headers) + self.learned_content_type = content_type + seen_first_headers = True + + range_spec = ranges_for_resp[(fa_start, fa_end)].pop(0) + seg_iter = self._decode_segments_from_fragments(frag_iters) + if not range_spec['satisfiable']: + # This'll be small; just a single small segment. Discard + # it. + for x in seg_iter: + pass + continue + + byterange_iter = self._iter_one_range(range_spec, seg_iter) + + converted = { + "start_byte": range_spec["resp_client_start"], + "end_byte": range_spec["resp_client_end"], + "content_type": content_type, + "part_iter": byterange_iter} + + if self.obj_length is not None: + converted["entity_length"] = self.obj_length + yield converted + + return document_iters_to_http_response_body( + convert_ranges_iter(), self.mime_boundary, multipart, self.logger) + + def _iter_one_range(self, range_spec, segment_iter): + client_start = range_spec['resp_client_start'] + client_end = range_spec['resp_client_end'] + segment_start = range_spec['resp_segment_start'] + segment_end = range_spec['resp_segment_end'] + + # It's entirely possible that the client asked for a range that + # includes some bytes we have and some we don't; for example, a + # range of bytes 1000-20000000 on a 1500-byte object. + segment_end = (min(segment_end, self.obj_length - 1) + if segment_end is not None + else self.obj_length - 1) + client_end = (min(client_end, self.obj_length - 1) + if client_end is not None + else self.obj_length - 1) + num_segments = int( + math.ceil(float(segment_end + 1 - segment_start) + / self.policy.ec_segment_size)) + # We get full segments here, but the client may have requested a + # byte range that begins or ends in the middle of a segment. + # Thus, we have some amount of overrun (extra decoded bytes) + # that we trim off so the client gets exactly what they + # requested. + start_overrun = client_start - segment_start + end_overrun = segment_end - client_end + + for i, next_seg in enumerate(segment_iter): + # We may have a start_overrun of more than one segment in + # the case of suffix-byte-range requests. However, we never + # have an end_overrun of more than one segment. + if start_overrun > 0: + seglen = len(next_seg) + if seglen <= start_overrun: + start_overrun -= seglen + continue + else: + next_seg = next_seg[start_overrun:] + start_overrun = 0 + + if i == (num_segments - 1) and end_overrun: + next_seg = next_seg[:-end_overrun] + + yield next_seg + + def _decode_segments_from_fragments(self, fragment_iters): # Decodes the fragments from the object servers and yields one # segment at a time. - queues = [Queue(1) for _junk in range(len(self.internal_app_iters))] + queues = [Queue(1) for _junk in range(len(fragment_iters))] def put_fragments_in_queue(frag_iter, queue): try: @@ -1262,7 +1495,8 @@ class ECAppIter(object): pass except ChunkReadTimeout: # unable to resume in GetOrHeadHandler - pass + self.logger.exception("Timeout fetching fragments for %r" % + self.path) except: # noqa self.logger.exception("Exception fetching fragments for %r" % self.path) @@ -1270,14 +1504,13 @@ class ECAppIter(object): queue.resize(2) # ensure there's room queue.put(None) - with ContextPool(len(self.internal_app_iters)) as pool: - for app_iter, queue in zip( - self.internal_app_iters, queues): - pool.spawn(put_fragments_in_queue, app_iter, queue) + with ContextPool(len(fragment_iters)) as pool: + for frag_iter, queue in zip(fragment_iters, queues): + pool.spawn(put_fragments_in_queue, frag_iter, queue) while True: fragments = [] - for qi, queue in enumerate(queues): + for queue in queues: fragment = queue.get() queue.task_done() fragments.append(fragment) @@ -1302,8 +1535,8 @@ class ECAppIter(object): def app_iter_range(self, start, end): return self - def app_iter_ranges(self, content_type, boundary, content_size): - self.boundary = boundary + def app_iter_ranges(self, ranges, content_type, boundary, content_size): + return self def client_range_to_segment_range(client_start, client_end, segment_size): @@ -1750,6 +1983,71 @@ def trailing_metadata(policy, client_obj_hasher, @ObjectControllerRouter.register(EC_POLICY) class ECObjectController(BaseObjectController): + def _fragment_GET_request(self, req, node_iter, partition, policy): + """ + Makes a GET request for a fragment. + """ + backend_headers = self.generate_request_headers( + req, additional=req.headers) + + getter = ResumingGetter(self.app, req, 'Object', node_iter, + partition, req.swift_entity_path, + backend_headers, + client_chunk_size=policy.fragment_size, + newest=False) + return (getter, getter.response_parts_iter(req)) + + def _convert_range(self, req, policy): + """ + Take the requested range(s) from the client and convert it to range(s) + to be sent to the object servers. + + This includes widening requested ranges to full segments, then + converting those ranges to fragments so that we retrieve the minimum + number of fragments from the object server. + + Mutates the request passed in. + + Returns a list of range specs (dictionaries with the different byte + indices in them). + """ + # Since segments and fragments have different sizes, we need + # to modify the Range header sent to the object servers to + # make sure we get the right fragments out of the fragment + # archives. + segment_size = policy.ec_segment_size + fragment_size = policy.fragment_size + + range_specs = [] + new_ranges = [] + for client_start, client_end in req.range.ranges: + # TODO: coalesce ranges that overlap segments. For + # example, "bytes=0-10,20-30,40-50" with a 64 KiB + # segment size will result in a a Range header in the + # object request of "bytes=0-65535,0-65535,0-65535", + # which is wasteful. We should be smarter and only + # request that first segment once. + segment_start, segment_end = client_range_to_segment_range( + client_start, client_end, segment_size) + + fragment_start, fragment_end = \ + segment_range_to_fragment_range( + segment_start, segment_end, + segment_size, fragment_size) + + new_ranges.append((fragment_start, fragment_end)) + range_specs.append({'req_client_start': client_start, + 'req_client_end': client_end, + 'req_segment_start': segment_start, + 'req_segment_end': segment_end, + 'req_fragment_start': fragment_start, + 'req_fragment_end': fragment_end}) + + req.range = "bytes=" + ",".join( + "%s-%s" % (s if s is not None else "", + e if e is not None else "") + for s, e in new_ranges) + return range_specs def _get_or_head_response(self, req, node_iter, partition, policy): req.headers.setdefault("X-Backend-Etag-Is-At", @@ -1767,63 +2065,35 @@ class ECObjectController(BaseObjectController): range_specs = [] if req.range: orig_range = req.range - # Since segments and fragments have different sizes, we need - # to modify the Range header sent to the object servers to - # make sure we get the right fragments out of the fragment - # archives. - segment_size = policy.ec_segment_size - fragment_size = policy.fragment_size - - range_specs = [] - new_ranges = [] - for client_start, client_end in req.range.ranges: - - segment_start, segment_end = client_range_to_segment_range( - client_start, client_end, segment_size) - - fragment_start, fragment_end = \ - segment_range_to_fragment_range( - segment_start, segment_end, - segment_size, fragment_size) - - new_ranges.append((fragment_start, fragment_end)) - range_specs.append({'client_start': client_start, - 'client_end': client_end, - 'segment_start': segment_start, - 'segment_end': segment_end}) - - req.range = "bytes=" + ",".join( - "%s-%s" % (s if s is not None else "", - e if e is not None else "") - for s, e in new_ranges) + range_specs = self._convert_range(req, policy) node_iter = GreenthreadSafeIterator(node_iter) num_gets = policy.ec_ndata with ContextPool(num_gets) as pool: pile = GreenAsyncPile(pool) for _junk in range(num_gets): - pile.spawn(self.GETorHEAD_base, - req, 'Object', node_iter, partition, - req.swift_entity_path, - client_chunk_size=policy.fragment_size) + pile.spawn(self._fragment_GET_request, + req, node_iter, partition, + policy) - responses = list(pile) - good_responses = [] - bad_responses = [] - for response in responses: - if is_success(response.status_int): - good_responses.append(response) + gets = list(pile) + good_gets = [] + bad_gets = [] + for get, parts_iter in gets: + if is_success(get.last_status): + good_gets.append((get, parts_iter)) else: - bad_responses.append(response) + bad_gets.append((get, parts_iter)) req.range = orig_range - if len(good_responses) == num_gets: + if len(good_gets) == num_gets: # If these aren't all for the same object, then error out so # at least the client doesn't get garbage. We can do a lot # better here with more work, but this'll work for now. found_obj_etags = set( - resp.headers['X-Object-Sysmeta-Ec-Etag'] - for resp in good_responses) + HeaderKeyDict( + getter.last_headers)['X-Object-Sysmeta-Ec-Etag'] + for getter, _junk in good_gets) if len(found_obj_etags) > 1: self.app.logger.debug( "Returning 503 for %s; found too many etags (%s)", @@ -1833,30 +2103,41 @@ class ECObjectController(BaseObjectController): # we found enough pieces to decode the object, so now let's # decode the object - resp_headers = HeaderKeyDict(good_responses[0].headers.items()) + resp_headers = HeaderKeyDict( + good_gets[0][0].source_headers[-1]) resp_headers.pop('Content-Range', None) eccl = resp_headers.get('X-Object-Sysmeta-Ec-Content-Length') obj_length = int(eccl) if eccl is not None else None + # This is only true if we didn't get a 206 response, but + # that's the only time this is used anyway. + fa_length = int(resp_headers['Content-Length']) + + app_iter = ECAppIter( + req.swift_entity_path, + policy, + [iterator for getter, iterator in good_gets], + range_specs, fa_length, obj_length, + self.app.logger) resp = Response( request=req, headers=resp_headers, conditional_response=True, - app_iter=ECAppIter( - req.swift_entity_path, - policy, - [r.app_iter for r in good_responses], - range_specs, - obj_length, - logger=self.app.logger)) + app_iter=app_iter) + app_iter.kickoff(req, resp) else: + statuses = [] + reasons = [] + bodies = [] + headers = [] + for getter, body_parts_iter in bad_gets: + statuses.extend(getter.statuses) + reasons.extend(getter.reasons) + bodies.extend(getter.bodies) + headers.extend(getter.source_headers) resp = self.best_response( - req, - [r.status_int for r in bad_responses], - [r.status.split(' ', 1)[1] for r in bad_responses], - [r.body for r in bad_responses], - 'Object', - headers=[r.headers for r in bad_responses]) + req, statuses, reasons, bodies, 'Object', + headers=headers) self._fix_response_headers(resp) return resp diff --git a/test/unit/common/test_swob.py b/test/unit/common/test_swob.py index 7015abb8eb..5dcd230849 100644 --- a/test/unit/common/test_swob.py +++ b/test/unit/common/test_swob.py @@ -1208,8 +1208,7 @@ class TestResponse(unittest.TestCase): ('0123456789112345678' '92123456789'))) - self.assert_(re.match(('\r\n' - '--[a-f0-9]{32}\r\n' + self.assert_(re.match(('--[a-f0-9]{32}\r\n' 'Content-Type: text/plain\r\n' 'Content-Range: bytes ' '0-9/100\r\n\r\n0123456789\r\n' @@ -1221,7 +1220,7 @@ class TestResponse(unittest.TestCase): 'Content-Type: text/plain\r\n' 'Content-Range: bytes ' '20-29/100\r\n\r\n2123456789\r\n' - '--[a-f0-9]{32}--\r\n'), content)) + '--[a-f0-9]{32}--'), content)) def test_multi_response_iter(self): def test_app(environ, start_response): diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 113b712ab1..33298a6fcb 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -60,7 +60,7 @@ from swift.common.exceptions import (Timeout, MessageTimeout, MimeInvalid, ThreadPoolDead) from swift.common import utils from swift.common.container_sync_realms import ContainerSyncRealms -from swift.common.swob import Request, Response +from swift.common.swob import Request, Response, HeaderKeyDict from test.unit import FakeLogger @@ -4722,6 +4722,18 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): exc = err self.assertTrue(exc is not None) + def test_leading_crlfs(self): + it = utils.iter_multipart_mime_documents( + StringIO('\r\n\r\n\r\n--unique\r\nabcdefg\r\n' + '--unique\r\nhijkl\r\n--unique--'), + 'unique') + fp = it.next() + self.assertEquals(fp.read(65536), 'abcdefg') + self.assertEquals(fp.read(), '') + fp = it.next() + self.assertEquals(fp.read(), 'hijkl') + self.assertRaises(StopIteration, it.next) + def test_broken_mid_stream(self): # We go ahead and accept whatever is sent instead of rejecting the # whole request, in case the partial form is still useful. @@ -4777,6 +4789,156 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): self.assertTrue(exc is not None) +class FakeResponse(object): + def __init__(self, status, headers, body): + self.status = status + self.headers = HeaderKeyDict(headers) + self.body = StringIO(body) + + def getheader(self, header_name): + return str(self.headers.get(header_name, '')) + + def getheaders(self): + return self.headers.items() + + def read(self, length=None): + return self.body.read(length) + + def readline(self, length=None): + return self.body.readline(length) + + +class TestHTTPResponseToDocumentIters(unittest.TestCase): + def test_200(self): + fr = FakeResponse( + 200, + {'Content-Length': '10', 'Content-Type': 'application/lunch'}, + 'sandwiches') + + doc_iters = utils.http_response_to_document_iters(fr) + first_byte, last_byte, length, headers, body = next(doc_iters) + self.assertEqual(first_byte, 0) + self.assertEqual(last_byte, 9) + self.assertEqual(length, 10) + header_dict = HeaderKeyDict(headers) + self.assertEqual(header_dict.get('Content-Length'), '10') + self.assertEqual(header_dict.get('Content-Type'), 'application/lunch') + self.assertEqual(body.read(), 'sandwiches') + + self.assertRaises(StopIteration, next, doc_iters) + + def test_206_single_range(self): + fr = FakeResponse( + 206, + {'Content-Length': '8', 'Content-Type': 'application/lunch', + 'Content-Range': 'bytes 1-8/10'}, + 'andwiche') + + doc_iters = utils.http_response_to_document_iters(fr) + first_byte, last_byte, length, headers, body = next(doc_iters) + self.assertEqual(first_byte, 1) + self.assertEqual(last_byte, 8) + self.assertEqual(length, 10) + header_dict = HeaderKeyDict(headers) + self.assertEqual(header_dict.get('Content-Length'), '8') + self.assertEqual(header_dict.get('Content-Type'), 'application/lunch') + self.assertEqual(body.read(), 'andwiche') + + self.assertRaises(StopIteration, next, doc_iters) + + def test_206_multiple_ranges(self): + fr = FakeResponse( + 206, + {'Content-Type': 'multipart/byteranges; boundary=asdfasdfasdf'}, + ("--asdfasdfasdf\r\n" + "Content-Type: application/lunch\r\n" + "Content-Range: bytes 0-3/10\r\n" + "\r\n" + "sand\r\n" + "--asdfasdfasdf\r\n" + "Content-Type: application/lunch\r\n" + "Content-Range: bytes 6-9/10\r\n" + "\r\n" + "ches\r\n" + "--asdfasdfasdf--")) + + doc_iters = utils.http_response_to_document_iters(fr) + + first_byte, last_byte, length, headers, body = next(doc_iters) + self.assertEqual(first_byte, 0) + self.assertEqual(last_byte, 3) + self.assertEqual(length, 10) + header_dict = HeaderKeyDict(headers) + self.assertEqual(header_dict.get('Content-Type'), 'application/lunch') + self.assertEqual(body.read(), 'sand') + + first_byte, last_byte, length, headers, body = next(doc_iters) + self.assertEqual(first_byte, 6) + self.assertEqual(last_byte, 9) + self.assertEqual(length, 10) + header_dict = HeaderKeyDict(headers) + self.assertEqual(header_dict.get('Content-Type'), 'application/lunch') + self.assertEqual(body.read(), 'ches') + + self.assertRaises(StopIteration, next, doc_iters) + + +class TestDocumentItersToHTTPResponseBody(unittest.TestCase): + def test_no_parts(self): + body = utils.document_iters_to_http_response_body( + iter([]), 'dontcare', + multipart=False, logger=FakeLogger()) + self.assertEqual(body, '') + + def test_single_part(self): + body = "time flies like an arrow; fruit flies like a banana" + doc_iters = [{'part_iter': iter(StringIO(body).read, '')}] + + resp_body = ''.join( + utils.document_iters_to_http_response_body( + iter(doc_iters), 'dontcare', + multipart=False, logger=FakeLogger())) + self.assertEqual(resp_body, body) + + def test_multiple_parts(self): + part1 = "two peanuts were walking down a railroad track" + part2 = "and one was a salted. ... peanut." + + doc_iters = [{ + 'start_byte': 88, + 'end_byte': 133, + 'content_type': 'application/peanut', + 'entity_length': 1024, + 'part_iter': iter(StringIO(part1).read, ''), + }, { + 'start_byte': 500, + 'end_byte': 532, + 'content_type': 'application/salted', + 'entity_length': 1024, + 'part_iter': iter(StringIO(part2).read, ''), + }] + + resp_body = ''.join( + utils.document_iters_to_http_response_body( + iter(doc_iters), 'boundaryboundary', + multipart=True, logger=FakeLogger())) + self.assertEqual(resp_body, ( + "--boundaryboundary\r\n" + + # This is a little too strict; we don't actually care that the + # headers are in this order, but the test is much more legible + # this way. + "Content-Type: application/peanut\r\n" + + "Content-Range: bytes 88-133/1024\r\n" + + "\r\n" + + part1 + "\r\n" + + "--boundaryboundary\r\n" + "Content-Type: application/salted\r\n" + + "Content-Range: bytes 500-532/1024\r\n" + + "\r\n" + + part2 + "\r\n" + + "--boundaryboundary--")) + + class TestPairs(unittest.TestCase): def test_pairs(self): items = [10, 20, 30, 40, 50, 60] diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 0ebd96eabd..d3fafd8b91 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -658,6 +658,7 @@ class TestFuncs(unittest.TestCase): class TestSource(object): def __init__(self, chunks): self.chunks = list(chunks) + self.status = 200 def read(self, _read_size): if self.chunks: @@ -665,6 +666,13 @@ class TestFuncs(unittest.TestCase): else: return '' + def getheader(self, header): + if header.lower() == "content-length": + return str(sum(len(c) for c in self.chunks)) + + def getheaders(self): + return [('content-length', self.getheader('content-length'))] + source = TestSource(( 'abcd', '1234', 'abc', 'd1', '234abcd1234abcd1', '2')) req = Request.blank('/v1/a/c/o') @@ -682,6 +690,7 @@ class TestFuncs(unittest.TestCase): class TestSource(object): def __init__(self, chunks): self.chunks = list(chunks) + self.status = 200 def read(self, _read_size): if self.chunks: @@ -693,6 +702,14 @@ class TestFuncs(unittest.TestCase): else: return '' + def getheader(self, header): + if header.lower() == "content-length": + return str(sum(len(c) for c in self.chunks + if c is not None)) + + def getheaders(self): + return [('content-length', self.getheader('content-length'))] + node = {'ip': '1.2.3.4', 'port': 6000, 'device': 'sda'} source1 = TestSource(['abcd', '1234', 'abc', None]) @@ -707,7 +724,6 @@ class TestFuncs(unittest.TestCase): lambda: (source2, node)): client_chunks = list(app_iter) self.assertEqual(client_chunks, ['abcd1234', 'efgh5678']) - self.assertEqual(handler.backend_headers['Range'], 'bytes=8-') def test_bytes_to_skip(self): # if you start at the beginning, skip nothing diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index c28a3625aa..d16f66c34b 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -939,8 +939,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): def test_GET_simple_x_newest(self): req = swift.common.swob.Request.blank('/v1/a/c/o', headers={'X-Newest': 'true'}) - codes = [200] * self.replicas() - codes += [404] * self.obj_ring.max_more_nodes + codes = [200] * self.policy.ec_ndata with set_http_connect(*codes): resp = req.get_response(self.app) self.assertEquals(resp.status_int, 200) @@ -976,7 +975,8 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): node_fragments = zip(*fragment_payloads) self.assertEqual(len(node_fragments), self.replicas()) # sanity - responses = [(200, ''.join(node_fragments[i]), {}) + headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))} + responses = [(200, ''.join(node_fragments[i]), headers) for i in range(POLICIES.default.ec_ndata)] status_codes, body_iter, headers = zip(*responses) with set_http_connect(*status_codes, body_iter=body_iter, @@ -1260,8 +1260,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): 'X-Copy-From': 'c2/o'}) # c2 get - codes = [200] * self.replicas() - codes += [404] * self.obj_ring.max_more_nodes + codes = [404, 200] * self.policy.ec_ndata headers = { 'X-Object-Sysmeta-Ec-Content-Length': 0, } @@ -1318,9 +1317,11 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): ec_archive_bodies1 = self._make_ec_archive_bodies(test_data1) ec_archive_bodies2 = self._make_ec_archive_bodies(test_data2) - headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1} + headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1, + 'X-Object-Sysmeta-Ec-Content-Length': '333'} # here we're going to *lie* and say the etag here matches - headers2 = {'X-Object-Sysmeta-Ec-Etag': etag1} + headers2 = {'X-Object-Sysmeta-Ec-Etag': etag1, + 'X-Object-Sysmeta-Ec-Content-Length': '333'} responses1 = [(200, body, headers1) for body in ec_archive_bodies1] diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 3b0115bbfc..5f85c55e4a 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -14,10 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +import email.parser import logging import math import os import pickle +import rfc822 import sys import unittest from contextlib import closing, contextmanager, nested @@ -40,7 +42,8 @@ import random import mock from eventlet import sleep, spawn, wsgi, listen, Timeout -from swift.common.utils import hash_path, json, storage_directory, public +from swift.common.utils import hash_path, json, storage_directory, \ + parse_content_type, iter_multipart_mime_documents, public from test.unit import ( connect_tcp, readuntil2crlfs, FakeLogger, fake_http_connect, FakeRing, @@ -1378,6 +1381,331 @@ class TestObjectController(unittest.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(res.body, obj) + @unpatch_policies + def test_GET_ranges(self): + prolis = _test_sockets[0] + prosrv = _test_servers[0] + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + obj = (''.join( + ('beans lots of beans lots of beans lots of beans yeah %04d ' % i) + for i in range(100))) + + path = '/v1/a/c/o.beans' + fd.write('PUT %s HTTP/1.1\r\n' + 'Host: localhost\r\n' + 'Connection: close\r\n' + 'X-Storage-Token: t\r\n' + 'Content-Length: %s\r\n' + 'Content-Type: application/octet-stream\r\n' + '\r\n%s' % (path, str(len(obj)), obj)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + + # one byte range + req = Request.blank( + path, + environ={'REQUEST_METHOD': 'GET'}, + headers={'Content-Type': 'application/octet-stream', + 'Range': 'bytes=10-200'}) + res = req.get_response(prosrv) + self.assertEqual(res.status_int, 206) + self.assertEqual(res.body, obj[10:201]) + + # multiple byte ranges + req = Request.blank( + path, + environ={'REQUEST_METHOD': 'GET'}, + headers={'Content-Type': 'application/octet-stream', + 'Range': 'bytes=10-200,1000-1099,4123-4523'}) + res = req.get_response(prosrv) + self.assertEqual(res.status_int, 206) + ct, params = parse_content_type(res.headers['Content-Type']) + self.assertEqual(ct, 'multipart/byteranges') + + boundary = dict(params).get('boundary') + self.assertTrue(boundary is not None) + + got_mime_docs = [] + for mime_doc_fh in iter_multipart_mime_documents(StringIO(res.body), + boundary): + headers = HeaderKeyDict(rfc822.Message(mime_doc_fh, 0).items()) + body = mime_doc_fh.read() + got_mime_docs.append((headers, body)) + self.assertEqual(len(got_mime_docs), 3) + + first_range_headers = got_mime_docs[0][0] + first_range_body = got_mime_docs[0][1] + self.assertEqual(first_range_headers['Content-Range'], + 'bytes 10-200/5800') + self.assertEqual(first_range_body, obj[10:201]) + + second_range_headers = got_mime_docs[1][0] + second_range_body = got_mime_docs[1][1] + self.assertEqual(second_range_headers['Content-Range'], + 'bytes 1000-1099/5800') + self.assertEqual(second_range_body, obj[1000:1100]) + + second_range_headers = got_mime_docs[2][0] + second_range_body = got_mime_docs[2][1] + self.assertEqual(second_range_headers['Content-Range'], + 'bytes 4123-4523/5800') + self.assertEqual(second_range_body, obj[4123:4524]) + + @unpatch_policies + def test_GET_ranges_resuming(self): + prolis = _test_sockets[0] + prosrv = _test_servers[0] + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + obj = (''.join( + ('Smurf! The smurfing smurf is completely smurfed. %03d ' % i) + for i in range(1000))) + + path = '/v1/a/c/o.smurfs' + fd.write('PUT %s HTTP/1.1\r\n' + 'Host: localhost\r\n' + 'Connection: close\r\n' + 'X-Storage-Token: t\r\n' + 'Content-Length: %s\r\n' + 'Content-Type: application/smurftet-stream\r\n' + '\r\n%s' % (path, str(len(obj)), obj)) + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + self.assertEqual(headers[:len(exp)], exp) + + kaboomed = [0] + bytes_before_timeout = [None] + + class FileLikeKaboom(object): + def __init__(self, inner_file_like): + self.inner_file_like = inner_file_like + + # close(), etc. + def __getattr__(self, attr): + return getattr(self.inner_file_like, attr) + + def readline(self, *a, **kw): + if bytes_before_timeout[0] <= 0: + kaboomed[0] += 1 + raise ChunkReadTimeout(None) + result = self.inner_file_like.readline(*a, **kw) + if len(result) > bytes_before_timeout[0]: + result = result[:bytes_before_timeout[0]] + bytes_before_timeout[0] -= len(result) + return result + + def read(self, length=None): + result = self.inner_file_like.read(length) + if bytes_before_timeout[0] <= 0: + kaboomed[0] += 1 + raise ChunkReadTimeout(None) + if len(result) > bytes_before_timeout[0]: + result = result[:bytes_before_timeout[0]] + bytes_before_timeout[0] -= len(result) + return result + + orig_hrtdi = proxy_base.http_response_to_document_iters + + # Use this to mock out http_response_to_document_iters. On the first + # call, the result will be sabotaged to blow up with + # ChunkReadTimeout after some number of bytes are read. On + # subsequent calls, no sabotage will be added. + + def sabotaged_hrtdi(*a, **kw): + resp_parts = orig_hrtdi(*a, **kw) + for sb, eb, l, h, range_file in resp_parts: + if bytes_before_timeout[0] <= 0: + # simulate being unable to read MIME part of + # multipart/byteranges response + kaboomed[0] += 1 + raise ChunkReadTimeout(None) + boomer = FileLikeKaboom(range_file) + yield sb, eb, l, h, boomer + + sabotaged = [False] + + def single_sabotage_hrtdi(*a, **kw): + if not sabotaged[0]: + sabotaged[0] = True + return sabotaged_hrtdi(*a, **kw) + else: + return orig_hrtdi(*a, **kw) + + # We want sort of an end-to-end test of object resuming, so what we + # do is mock out stuff so the proxy thinks it only read a certain + # number of bytes before it got a timeout. + bytes_before_timeout[0] = 300 + with mock.patch.object(proxy_base, 'http_response_to_document_iters', + single_sabotage_hrtdi): + req = Request.blank( + path, + environ={'REQUEST_METHOD': 'GET'}, + headers={'Content-Type': 'application/octet-stream', + 'Range': 'bytes=0-500'}) + res = req.get_response(prosrv) + body = res.body # read the whole thing + self.assertEqual(kaboomed[0], 1) # sanity check + self.assertEqual(res.status_int, 206) + self.assertEqual(len(body), 501) + self.assertEqual(body, obj[:501]) + + # Sanity-check for multi-range resume: make sure we actually break + # in the middle of the second byterange. This test is partially + # about what happens when all the object servers break at once, and + # partially about validating all these mocks we do. After all, the + # point of resuming is that the client can't tell anything went + # wrong, so we need a test where we can't resume and something + # *does* go wrong so we can observe it. + bytes_before_timeout[0] = 700 + kaboomed[0] = 0 + sabotaged[0] = False + prosrv._error_limiting = {} # clear out errors + with mock.patch.object(proxy_base, 'http_response_to_document_iters', + sabotaged_hrtdi): # perma-broken + req = Request.blank( + path, + environ={'REQUEST_METHOD': 'GET'}, + headers={'Range': 'bytes=0-500,1000-1500,2000-2500'}) + res = req.get_response(prosrv) + body = '' + try: + for chunk in res.app_iter: + body += chunk + except ChunkReadTimeout: + pass + + self.assertEqual(res.status_int, 206) + self.assertTrue(kaboomed[0] > 0) # sanity check + + ct, params = parse_content_type(res.headers['Content-Type']) + self.assertEqual(ct, 'multipart/byteranges') # sanity check + boundary = dict(params).get('boundary') + self.assertTrue(boundary is not None) # sanity check + got_byteranges = [] + for mime_doc_fh in iter_multipart_mime_documents(StringIO(body), + boundary): + rfc822.Message(mime_doc_fh, 0) + body = mime_doc_fh.read() + got_byteranges.append(body) + + self.assertEqual(len(got_byteranges), 2) + self.assertEqual(len(got_byteranges[0]), 501) + self.assertEqual(len(got_byteranges[1]), 199) # partial + + # Multi-range resume, resuming in the middle of the first byterange + bytes_before_timeout[0] = 300 + kaboomed[0] = 0 + sabotaged[0] = False + prosrv._error_limiting = {} # clear out errors + with mock.patch.object(proxy_base, 'http_response_to_document_iters', + single_sabotage_hrtdi): + req = Request.blank( + path, + environ={'REQUEST_METHOD': 'GET'}, + headers={'Range': 'bytes=0-500,1000-1500,2000-2500'}) + res = req.get_response(prosrv) + body = ''.join(res.app_iter) + + self.assertEqual(res.status_int, 206) + self.assertEqual(kaboomed[0], 1) # sanity check + + ct, params = parse_content_type(res.headers['Content-Type']) + self.assertEqual(ct, 'multipart/byteranges') # sanity check + boundary = dict(params).get('boundary') + self.assertTrue(boundary is not None) # sanity check + got_byteranges = [] + for mime_doc_fh in iter_multipart_mime_documents(StringIO(body), + boundary): + rfc822.Message(mime_doc_fh, 0) + body = mime_doc_fh.read() + got_byteranges.append(body) + + self.assertEqual(len(got_byteranges), 3) + self.assertEqual(len(got_byteranges[0]), 501) + self.assertEqual(got_byteranges[0], obj[:501]) + self.assertEqual(len(got_byteranges[1]), 501) + self.assertEqual(got_byteranges[1], obj[1000:1501]) + self.assertEqual(len(got_byteranges[2]), 501) + self.assertEqual(got_byteranges[2], obj[2000:2501]) + + # Multi-range resume, first GET dies in the middle of the second set + # of MIME headers + bytes_before_timeout[0] = 501 + kaboomed[0] = 0 + sabotaged[0] = False + prosrv._error_limiting = {} # clear out errors + with mock.patch.object(proxy_base, 'http_response_to_document_iters', + single_sabotage_hrtdi): + req = Request.blank( + path, + environ={'REQUEST_METHOD': 'GET'}, + headers={'Range': 'bytes=0-500,1000-1500,2000-2500'}) + res = req.get_response(prosrv) + body = ''.join(res.app_iter) + + self.assertEqual(res.status_int, 206) + self.assertTrue(kaboomed[0] >= 1) # sanity check + + ct, params = parse_content_type(res.headers['Content-Type']) + self.assertEqual(ct, 'multipart/byteranges') # sanity check + boundary = dict(params).get('boundary') + self.assertTrue(boundary is not None) # sanity check + got_byteranges = [] + for mime_doc_fh in iter_multipart_mime_documents(StringIO(body), + boundary): + rfc822.Message(mime_doc_fh, 0) + body = mime_doc_fh.read() + got_byteranges.append(body) + + self.assertEqual(len(got_byteranges), 3) + self.assertEqual(len(got_byteranges[0]), 501) + self.assertEqual(got_byteranges[0], obj[:501]) + self.assertEqual(len(got_byteranges[1]), 501) + self.assertEqual(got_byteranges[1], obj[1000:1501]) + self.assertEqual(len(got_byteranges[2]), 501) + self.assertEqual(got_byteranges[2], obj[2000:2501]) + + # Multi-range resume, first GET dies in the middle of the second + # byterange + bytes_before_timeout[0] = 750 + kaboomed[0] = 0 + sabotaged[0] = False + prosrv._error_limiting = {} # clear out errors + with mock.patch.object(proxy_base, 'http_response_to_document_iters', + single_sabotage_hrtdi): + req = Request.blank( + path, + environ={'REQUEST_METHOD': 'GET'}, + headers={'Range': 'bytes=0-500,1000-1500,2000-2500'}) + res = req.get_response(prosrv) + body = ''.join(res.app_iter) + + self.assertEqual(res.status_int, 206) + self.assertTrue(kaboomed[0] >= 1) # sanity check + + ct, params = parse_content_type(res.headers['Content-Type']) + self.assertEqual(ct, 'multipart/byteranges') # sanity check + boundary = dict(params).get('boundary') + self.assertTrue(boundary is not None) # sanity check + got_byteranges = [] + for mime_doc_fh in iter_multipart_mime_documents(StringIO(body), + boundary): + rfc822.Message(mime_doc_fh, 0) + body = mime_doc_fh.read() + got_byteranges.append(body) + + self.assertEqual(len(got_byteranges), 3) + self.assertEqual(len(got_byteranges[0]), 501) + self.assertEqual(got_byteranges[0], obj[:501]) + self.assertEqual(len(got_byteranges[1]), 501) + self.assertEqual(got_byteranges[1], obj[1000:1501]) + self.assertEqual(len(got_byteranges[2]), 501) + self.assertEqual(got_byteranges[2], obj[2000:2501]) + @unpatch_policies def test_PUT_ec(self): policy = POLICIES[3] @@ -1872,6 +2200,12 @@ class TestObjectController(unittest.TestCase): yield next(inner_iter) raise Exception("doom ba doom") + def explodey_doc_parts_iter(inner_iter_iter): + for item in inner_iter_iter: + item = item.copy() # paranoia about mutable data + item['part_iter'] = explodey_iter(item['part_iter']) + yield item + real_ec_app_iter = swift.proxy.controllers.obj.ECAppIter def explodey_ec_app_iter(path, policy, iterators, *a, **kw): @@ -1882,7 +2216,7 @@ class TestObjectController(unittest.TestCase): # the client when things go wrong. return real_ec_app_iter( path, policy, - [explodey_iter(i) for i in iterators], + [explodey_doc_parts_iter(i) for i in iterators], *a, **kw) with mock.patch("swift.proxy.controllers.obj.ECAppIter", @@ -6325,7 +6659,7 @@ class TestObjectECRangedGET(unittest.TestCase): 'Connection: close\r\n' 'Content-Length: %d\r\n' 'X-Storage-Token: t\r\n' - 'Content-Type: application/octet-stream\r\n' + 'Content-Type: donuts\r\n' '\r\n%s' % (obj_name, len(obj), obj)) fd.flush() headers = readuntil2crlfs(fd) @@ -6359,8 +6693,44 @@ class TestObjectECRangedGET(unittest.TestCase): break gotten_obj += buf + # if we get this wrong, clients will either get truncated data or + # they'll hang waiting for bytes that aren't coming, so it warrants + # being asserted for every test case + if 'Content-Length' in headers: + self.assertEqual(int(headers['Content-Length']), len(gotten_obj)) + + # likewise, if we say MIME and don't send MIME or vice versa, + # clients will be horribly confused + if headers.get('Content-Type', '').startswith('multipart/byteranges'): + self.assertEqual(gotten_obj[:2], "--") + else: + # In general, this isn't true, as you can start an object with + # "--". However, in this test, we don't start any objects with + # "--", or even include "--" in their contents anywhere. + self.assertNotEqual(gotten_obj[:2], "--") + return (status_code, headers, gotten_obj) + def _parse_multipart(self, content_type, body): + parser = email.parser.FeedParser() + parser.feed("Content-Type: %s\r\n\r\n" % content_type) + parser.feed(body) + root_message = parser.close() + self.assertTrue(root_message.is_multipart()) + byteranges = root_message.get_payload() + self.assertFalse(root_message.defects) + for i, message in enumerate(byteranges): + self.assertFalse(message.defects, "Part %d had defects" % i) + self.assertFalse(message.is_multipart(), + "Nested multipart at %d" % i) + return byteranges + + def test_bogus(self): + status, headers, gotten_obj = self._get_obj("tacos=3-5") + self.assertEqual(status, 200) + self.assertEqual(len(gotten_obj), len(self.obj)) + self.assertEqual(gotten_obj, self.obj) + def test_unaligned(self): # One segment's worth of data, but straddling two segment boundaries # (so it has data from three segments) @@ -6372,7 +6742,7 @@ class TestObjectECRangedGET(unittest.TestCase): self.assertEqual(gotten_obj, self.obj[3783:7879]) def test_aligned_left(self): - # First byte is aligned to a segment boundary, last byte is not + # Firts byte is aligned to a segment boundary, last byte is not status, headers, gotten_obj = self._get_obj("bytes=0-5500") self.assertEqual(status, 206) self.assertEqual(headers['Content-Length'], "5501") @@ -6540,6 +6910,168 @@ class TestObjectECRangedGET(unittest.TestCase): self.assertEqual(len(gotten_obj), len(self.tiny_obj)) self.assertEqual(gotten_obj, self.tiny_obj) + def test_multiple_ranges(self): + status, headers, gotten_obj = self._get_obj( + "bytes=0-100,4490-5010", self.obj_name) + self.assertEqual(status, 206) + self.assertEqual(headers["Content-Length"], str(len(gotten_obj))) + + content_type, content_type_params = parse_content_type( + headers['Content-Type']) + content_type_params = dict(content_type_params) + + self.assertEqual(content_type, 'multipart/byteranges') + boundary = content_type_params.get('boundary') + self.assertTrue(boundary is not None) + + got_byteranges = self._parse_multipart(headers['Content-Type'], + gotten_obj) + self.assertEqual(len(got_byteranges), 2) + first_byterange, second_byterange = got_byteranges + + self.assertEqual(first_byterange['Content-Range'], + 'bytes 0-100/14513') + self.assertEqual(first_byterange.get_payload(), self.obj[:101]) + + self.assertEqual(second_byterange['Content-Range'], + 'bytes 4490-5010/14513') + self.assertEqual(second_byterange.get_payload(), self.obj[4490:5011]) + + def test_multiple_ranges_overlapping_in_segment(self): + status, headers, gotten_obj = self._get_obj( + "bytes=0-9,20-29,40-49,60-69,80-89") + self.assertEqual(status, 206) + got_byteranges = self._parse_multipart(headers['Content-Type'], + gotten_obj) + self.assertEqual(len(got_byteranges), 5) + + def test_multiple_ranges_off_end(self): + status, headers, gotten_obj = self._get_obj( + "bytes=0-10,14500-14513") # there is no byte 14513, only 0-14512 + self.assertEqual(status, 206) + got_byteranges = self._parse_multipart(headers['Content-Type'], + gotten_obj) + self.assertEqual(len(got_byteranges), 2) + self.assertEqual(got_byteranges[0]['Content-Range'], + "bytes 0-10/14513") + self.assertEqual(got_byteranges[1]['Content-Range'], + "bytes 14500-14512/14513") + + def test_multiple_ranges_suffix_off_end(self): + status, headers, gotten_obj = self._get_obj( + "bytes=0-10,-13") + self.assertEqual(status, 206) + got_byteranges = self._parse_multipart(headers['Content-Type'], + gotten_obj) + self.assertEqual(len(got_byteranges), 2) + self.assertEqual(got_byteranges[0]['Content-Range'], + "bytes 0-10/14513") + self.assertEqual(got_byteranges[1]['Content-Range'], + "bytes 14500-14512/14513") + + def test_multiple_ranges_one_barely_unsatisfiable(self): + # The thing about 14515-14520 is that it comes from the last segment + # in the object. When we turn this range into a fragment range, + # it'll be for the last fragment, so the object servers see + # something satisfiable. + # + # Basically, we'll get 3 byteranges from the object server, but we + # have to filter out the unsatisfiable one on our own. + status, headers, gotten_obj = self._get_obj( + "bytes=0-10,14515-14520,40-50") + self.assertEqual(status, 206) + got_byteranges = self._parse_multipart(headers['Content-Type'], + gotten_obj) + self.assertEqual(len(got_byteranges), 2) + self.assertEqual(got_byteranges[0]['Content-Range'], + "bytes 0-10/14513") + self.assertEqual(got_byteranges[0].get_payload(), self.obj[0:11]) + self.assertEqual(got_byteranges[1]['Content-Range'], + "bytes 40-50/14513") + self.assertEqual(got_byteranges[1].get_payload(), self.obj[40:51]) + + def test_multiple_ranges_some_unsatisfiable(self): + status, headers, gotten_obj = self._get_obj( + "bytes=0-100,4090-5010,999999-9999999", self.obj_name) + self.assertEqual(status, 206) + + content_type, content_type_params = parse_content_type( + headers['Content-Type']) + content_type_params = dict(content_type_params) + + self.assertEqual(content_type, 'multipart/byteranges') + boundary = content_type_params.get('boundary') + self.assertTrue(boundary is not None) + + got_byteranges = self._parse_multipart(headers['Content-Type'], + gotten_obj) + self.assertEqual(len(got_byteranges), 2) + first_byterange, second_byterange = got_byteranges + + self.assertEqual(first_byterange['Content-Range'], + 'bytes 0-100/14513') + self.assertEqual(first_byterange.get_payload(), self.obj[:101]) + + self.assertEqual(second_byterange['Content-Range'], + 'bytes 4090-5010/14513') + self.assertEqual(second_byterange.get_payload(), self.obj[4090:5011]) + + def test_two_ranges_one_unsatisfiable(self): + status, headers, gotten_obj = self._get_obj( + "bytes=0-100,999999-9999999", self.obj_name) + self.assertEqual(status, 206) + + content_type, content_type_params = parse_content_type( + headers['Content-Type']) + + # According to RFC 7233, this could be either a multipart/byteranges + # response with one part or it could be a single-part response (just + # the bytes, no MIME). We're locking it down here: single-part + # response. That's what replicated objects do, and we don't want any + # client-visible differences between EC objects and replicated ones. + self.assertEqual(content_type, 'donuts') + self.assertEqual(gotten_obj, self.obj[:101]) + + def test_two_ranges_one_unsatisfiable_same_segment(self): + # Like test_two_ranges_one_unsatisfiable(), but where both ranges + # fall within the same EC segment. + status, headers, gotten_obj = self._get_obj( + "bytes=14500-14510,14520-14530") + + self.assertEqual(status, 206) + + content_type, content_type_params = parse_content_type( + headers['Content-Type']) + + self.assertEqual(content_type, 'donuts') + self.assertEqual(gotten_obj, self.obj[14500:14511]) + + def test_multiple_ranges_some_unsatisfiable_out_of_order(self): + status, headers, gotten_obj = self._get_obj( + "bytes=0-100,99999998-99999999,4090-5010", self.obj_name) + self.assertEqual(status, 206) + + content_type, content_type_params = parse_content_type( + headers['Content-Type']) + content_type_params = dict(content_type_params) + + self.assertEqual(content_type, 'multipart/byteranges') + boundary = content_type_params.get('boundary') + self.assertTrue(boundary is not None) + + got_byteranges = self._parse_multipart(headers['Content-Type'], + gotten_obj) + self.assertEqual(len(got_byteranges), 2) + first_byterange, second_byterange = got_byteranges + + self.assertEqual(first_byterange['Content-Range'], + 'bytes 0-100/14513') + self.assertEqual(first_byterange.get_payload(), self.obj[:101]) + + self.assertEqual(second_byterange['Content-Range'], + 'bytes 4090-5010/14513') + self.assertEqual(second_byterange.get_payload(), self.obj[4090:5011]) + @patch_policies([ StoragePolicy(0, 'zero', True, object_ring=FakeRing(base_port=3000)), From af734b3fb6fdbc11b8a0b775256e7642eb485b8d Mon Sep 17 00:00:00 2001 From: Mark Seger Date: Wed, 3 Jun 2015 08:16:06 -0400 Subject: [PATCH 56/98] Change usage help and Attention messages to warnings Change-Id: I1396aaffe36e739606f15f7fef37b11bd83f1fc1 --- bin/swift-get-nodes | 2 +- doc/manpages/swift-get-nodes.1 | 2 +- swift/cli/info.py | 12 +++++++----- test/unit/cli/test_info.py | 2 +- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/bin/swift-get-nodes b/bin/swift-get-nodes index 435cf0bda6..b8d068bc37 100755 --- a/bin/swift-get-nodes +++ b/bin/swift-get-nodes @@ -26,7 +26,7 @@ if __name__ == '__main__': usage = ''' Shows the nodes responsible for the item specified. - Usage: %prog [-a] [] [] + Usage: %prog [-a] [ []] Or: %prog [-a] -p partition Or: %prog [-a] -P policy_name Note: account, container, object can also be a single arg separated by / diff --git a/doc/manpages/swift-get-nodes.1 b/doc/manpages/swift-get-nodes.1 index e268ec2690..608a18488d 100644 --- a/doc/manpages/swift-get-nodes.1 +++ b/doc/manpages/swift-get-nodes.1 @@ -25,7 +25,7 @@ .SH SYNOPSIS .LP .B swift-get-nodes -\ [] [] +\ [ []] .SH DESCRIPTION .PP diff --git a/swift/cli/info.py b/swift/cli/info.py index 2f140afee8..ab6d32e490 100644 --- a/swift/cli/info.py +++ b/swift/cli/info.py @@ -382,7 +382,7 @@ def print_obj(datafile, check_etag=True, swift_dir='/etc/swift', if (policy_index is not None and policy_index_for_name is not None and policy_index != policy_index_for_name): - print 'Attention: Ring does not match policy!' + print 'Warning: Ring does not match policy!' print 'Double check your policy name!' if not ring and policy_index_for_name: ring = POLICIES.get_object_ring(policy_index_for_name, @@ -472,9 +472,9 @@ def print_item_locations(ring, ring_name=None, account=None, container=None, policy = POLICIES.get_by_name(policy_name) if policy: if ring_name != policy.ring_name: - print 'Attention! mismatch between ring and policy detected!' + print 'Warning: mismatch between ring and policy name!' else: - print 'Attention! Policy %s is not valid' % policy_name + print 'Warning: Policy %s is not valid' % policy_name policy_index = None if ring is None and (obj or part): @@ -518,14 +518,16 @@ def print_item_locations(ring, ring_name=None, account=None, container=None, ring = Ring(swift_dir, ring_name='container') else: if ring_name != 'container': - print 'Attention! mismatch between ring and item detected!' + print 'Warning: account/container specified ' + \ + 'but ring not named "container"' if account and not container and not obj: loc = 'accounts' if not any([ring, ring_name]): ring = Ring(swift_dir, ring_name='account') else: if ring_name != 'account': - print 'Attention! mismatch between ring and item detected!' + print 'Warning: account specified ' + \ + 'but ring not named "account"' print '\nAccount \t%s' % account print 'Container\t%s' % container diff --git a/test/unit/cli/test_info.py b/test/unit/cli/test_info.py index 4e702abd5f..c9452ae5ac 100644 --- a/test/unit/cli/test_info.py +++ b/test/unit/cli/test_info.py @@ -411,7 +411,7 @@ class TestPrintObjFullMeta(TestCliInfoBase): out = StringIO() with mock.patch('sys.stdout', out): print_obj(self.datafile, policy_name='two', swift_dir=self.testdir) - ring_alert_msg = 'Attention: Ring does not match policy' + ring_alert_msg = 'Warning: Ring does not match policy!' self.assertTrue(ring_alert_msg in out.getvalue()) def test_valid_etag(self): From 93de6c73e587a672663fa0997f439b73b336769c Mon Sep 17 00:00:00 2001 From: Hisashi Osanai Date: Tue, 2 Jun 2015 17:00:42 +0900 Subject: [PATCH 57/98] Add metadata size tests on the border This patch adds a test for a 204 when a single metadata item in a POST causes the backend aggregate constraints check to be on the border. Background: Overall metadata size constraint is enforced in the container and account backends as well as in the proxy controllers. Whereas the proxy controller can check that constraints are not exceeded by a single PUT or POST request, the backend checks that constraints are not exceeded by the aggregate of all PUTs and POSTs. The change [1] added a test for a 400 when a single metadata item in a POST causes the backend aggregate constraints check to go over limit. [1] I1489e29686013cbd3d70283d8756b548aea3c2e1 Change-Id: Iac86ea71240ddde177e625c279c21aef67659d10 --- test/functional/test_account.py | 11 +++++++++-- test/functional/test_container.py | 11 +++++++++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/test/functional/test_account.py b/test/functional/test_account.py index d0d18c0529..0bd7b886c3 100755 --- a/test/functional/test_account.py +++ b/test/functional/test_account.py @@ -829,14 +829,21 @@ class TestAccount(unittest.TestCase): self.assertEqual(resp.status, 204) # this POST includes metadata size that is over limit headers['X-Account-Meta-k'] = \ - 'v' * (self.max_meta_overall_size - size) + 'x' * (self.max_meta_overall_size - size) resp = retry(post, headers) resp.read() self.assertEqual(resp.status, 400) + # this POST would be ok and the aggregate backend metadata + # size is on the border + headers = {'X-Account-Meta-k': + 'y' * (self.max_meta_overall_size - size - 1)} + resp = retry(post, headers) + resp.read() + self.assertEqual(resp.status, 204) # this last POST would be ok by itself but takes the aggregate # backend metadata size over limit headers = {'X-Account-Meta-k': - 'v' * (self.max_meta_overall_size - size)} + 'z' * (self.max_meta_overall_size - size)} resp = retry(post, headers) resp.read() self.assertEqual(resp.status, 400) diff --git a/test/functional/test_container.py b/test/functional/test_container.py index 5de866b97e..de72526de6 100755 --- a/test/functional/test_container.py +++ b/test/functional/test_container.py @@ -451,14 +451,21 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) # this POST includes metadata size that is over limit headers['X-Container-Meta-k'] = \ - 'v' * (self.max_meta_overall_size - size) + 'x' * (self.max_meta_overall_size - size) resp = retry(post, headers) resp.read() self.assertEqual(resp.status, 400) + # this POST would be ok and the aggregate backend metadata + # size is on the border + headers = {'X-Container-Meta-k': + 'y' * (self.max_meta_overall_size - size - 1)} + resp = retry(post, headers) + resp.read() + self.assertEqual(resp.status, 204) # this last POST would be ok by itself but takes the aggregate # backend metadata size over limit headers = {'X-Container-Meta-k': - 'v' * (self.max_meta_overall_size - size)} + 'z' * (self.max_meta_overall_size - size)} resp = retry(post, headers) resp.read() self.assertEqual(resp.status, 400) From c953e8cc9049e94cd01239d5e4a30a56f26a97d3 Mon Sep 17 00:00:00 2001 From: Minwoo Bae Date: Tue, 28 Apr 2015 13:18:32 -0500 Subject: [PATCH 58/98] The hash_cleanup_listdir function should only be called when necessary. Introduced a conditional that will specify to _finalize_put() whether the put() method is being called within the context of ECDiskFileWriter. Included some unit tests that will test the number of calls to hash_cleanup_listdir() during put(), commit(), and delete() operations. Change-Id: I287b2648591fdbf686898dc9047beda80a64da0c --- swift/obj/diskfile.py | 19 +++++++----- test/unit/obj/test_diskfile.py | 57 ++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 7 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 39eff67bde..ed96be7829 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -910,7 +910,7 @@ class DiskFileWriter(object): return self._upload_size - def _finalize_put(self, metadata, target_path): + def _finalize_put(self, metadata, target_path, cleanup): # Write the metadata before calling fsync() so that both data and # metadata are flushed to disk. write_metadata(self._fd, metadata) @@ -930,10 +930,11 @@ class DiskFileWriter(object): # unnecessary os.unlink() of tempfile later. As renamer() has # succeeded, the tempfile would no longer exist at its original path. self._put_succeeded = True - try: - self.manager.hash_cleanup_listdir(self._datadir) - except OSError: - logging.exception(_('Problem cleaning up %s'), self._datadir) + if cleanup: + try: + self.manager.hash_cleanup_listdir(self._datadir) + except OSError: + logging.exception(_('Problem cleaning up %s'), self._datadir) def put(self, metadata): """ @@ -950,9 +951,10 @@ class DiskFileWriter(object): timestamp = Timestamp(metadata['X-Timestamp']).internal metadata['name'] = self._name target_path = join(self._datadir, timestamp + self._extension) + cleanup = True self._threadpool.force_run_in_thread( - self._finalize_put, metadata, target_path) + self._finalize_put, metadata, target_path, cleanup) def commit(self, timestamp): """ @@ -1832,6 +1834,7 @@ class ECDiskFileWriter(DiskFileWriter): """ timestamp = Timestamp(metadata['X-Timestamp']) fi = None + cleanup = True if self._extension == '.data': # generally we treat the fragment index provided in metadata as # canon, but if it's unavailable (e.g. tests) it's reasonable to @@ -1839,13 +1842,15 @@ class ECDiskFileWriter(DiskFileWriter): # sure that the fragment index is included in object sysmeta. fi = metadata.setdefault('X-Object-Sysmeta-Ec-Frag-Index', self._diskfile._frag_index) + # defer cleanup until commit() writes .durable + cleanup = False filename = self.manager.make_on_disk_filename( timestamp, self._extension, frag_index=fi) metadata['name'] = self._name target_path = join(self._datadir, filename) self._threadpool.force_run_in_thread( - self._finalize_put, metadata, target_path) + self._finalize_put, metadata, target_path, cleanup) class ECDiskFile(DiskFile): diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 2ccf3b1364..ed8c03a2fb 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -2685,6 +2685,63 @@ class DiskFileMixin(BaseDiskFileTestMixin): 'Unexpected dir listing %s' % dl) self.assertEqual(sorted(expected), sorted(dl)) + def test_number_calls_to_hash_cleanup_listdir_during_create(self): + # Check how many calls are made to hash_cleanup_listdir, and when, + # during put(), commit() sequence + for policy in POLICIES: + expected = { + EC_POLICY: (0, 1), + REPL_POLICY: (1, 0), + }[policy.policy_type] + df = self._simple_get_diskfile(account='a', container='c', + obj='o_hcl_error', policy=policy) + timestamp = Timestamp(time()) + with df.create() as writer: + metadata = { + 'ETag': 'bogus_etag', + 'X-Timestamp': timestamp.internal, + 'Content-Length': '0', + } + with mock.patch(self._manager_mock( + 'hash_cleanup_listdir', df)) as mock_hcl: + writer.put(metadata) + self.assertEqual(expected[0], mock_hcl.call_count) + with mock.patch(self._manager_mock( + 'hash_cleanup_listdir', df)) as mock_hcl: + writer.commit(timestamp) + self.assertEqual(expected[1], mock_hcl.call_count) + + def test_number_calls_to_hash_cleanup_listdir_during_delete(self): + # Check how many calls are made to hash_cleanup_listdir, and when, + # for delete() and necessary prerequisite steps + for policy in POLICIES: + expected = { + EC_POLICY: (0, 1, 1), + REPL_POLICY: (1, 0, 1), + }[policy.policy_type] + df = self._simple_get_diskfile(account='a', container='c', + obj='o_hcl_error', policy=policy) + timestamp = Timestamp(time()) + with df.create() as writer: + metadata = { + 'ETag': 'bogus_etag', + 'X-Timestamp': timestamp.internal, + 'Content-Length': '0', + } + with mock.patch(self._manager_mock( + 'hash_cleanup_listdir', df)) as mock_hcl: + writer.put(metadata) + self.assertEqual(expected[0], mock_hcl.call_count) + with mock.patch(self._manager_mock( + 'hash_cleanup_listdir', df)) as mock_hcl: + writer.commit(timestamp) + self.assertEqual(expected[1], mock_hcl.call_count) + with mock.patch(self._manager_mock( + 'hash_cleanup_listdir', df)) as mock_hcl: + timestamp = Timestamp(time()) + df.delete(timestamp) + self.assertEqual(expected[2], mock_hcl.call_count) + def test_delete(self): for policy in POLICIES: if policy.policy_type == EC_POLICY: From 11e5c4adf0e02c8fdab0d8584fc943d96e1165cd Mon Sep 17 00:00:00 2001 From: Koert van der Veer Date: Tue, 16 Dec 2014 11:15:19 +0100 Subject: [PATCH 59/98] Allow default reseller prefix in domain_remap middleware Previously, the reseller prefix needed to be provided in the host name even when the domain was unique to that reseller. With the default_reseller_prefix, any domain which matches in this middleware, will will be passed on with a reseller prefix, whether or not it was provided. Change-Id: I5aa5ce78ad1ee2e3660cce4c3e07306f8999f02a Implements: blueprint domainremap-reseller-domains --- doc/manpages/proxy-server.conf.5 | 10 +++++-- etc/proxy-server.conf-sample | 9 ++++++ swift/common/middleware/domain_remap.py | 28 ++++++++++++------- .../common/middleware/test_domain_remap.py | 16 +++++++++++ 4 files changed, 50 insertions(+), 13 deletions(-) diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 index 17197453ac..7ce480b0ca 100644 --- a/doc/manpages/proxy-server.conf.5 +++ b/doc/manpages/proxy-server.conf.5 @@ -296,9 +296,13 @@ Browsers can convert a host header to lowercase, so check that reseller prefix on the account is the correct case. This is done by comparing the items in the reseller_prefixes config option to the found prefix. If they match except for case, the item from reseller_prefixes will be used -instead of the found reseller prefix. The reseller_prefixes list is exclusive. -If defined, any request with an account prefix not in that list will be ignored -by this middleware. Defaults to 'AUTH'. +instead of the found reseller prefix. When none match, the default reseller +prefix is used. When no default reseller prefix is configured, any request with +an account prefix not in that list will be ignored by this middleware. +Defaults to 'AUTH'. +.IP \fBdefault_reseller_prefix\fR +The default reseller prefix. This is used when none of the configured +reseller_prefixes match. When not set, no reseller prefix is added. .RE diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 37fc7d4564..55b6137ae0 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -460,7 +460,16 @@ use = egg:swift#domain_remap # # storage_domain = example.com # path_root = v1 + +# Browsers can convert a host header to lowercase, so check that reseller +# prefix on the account is the correct case. This is done by comparing the +# items in the reseller_prefixes config option to the found prefix. If they +# match except for case, the item from reseller_prefixes will be used +# instead of the found reseller prefix. When none match, the default reseller +# prefix is used. When no default reseller prefix is configured, any request +# with an account prefix not in that list will be ignored by this middleware. # reseller_prefixes = AUTH +# default_reseller_prefix = [filter:catch_errors] use = egg:swift#catch_errors diff --git a/swift/common/middleware/domain_remap.py b/swift/common/middleware/domain_remap.py index cabd32aca7..052f7728df 100644 --- a/swift/common/middleware/domain_remap.py +++ b/swift/common/middleware/domain_remap.py @@ -30,9 +30,10 @@ Browsers can convert a host header to lowercase, so check that reseller prefix on the account is the correct case. This is done by comparing the items in the reseller_prefixes config option to the found prefix. If they match except for case, the item from reseller_prefixes will be used -instead of the found reseller prefix. The reseller_prefixes list is -exclusive. If defined, any request with an account prefix not in that list -will be ignored by this middleware. reseller_prefixes defaults to 'AUTH'. +instead of the found reseller prefix. When none match, the default reseller +prefix is used. When no default reseller prefix is configured, any request with +an account prefix not in that list will be ignored by this middleware. +reseller_prefixes defaults to 'AUTH'. Note that this middleware requires that container names and account names (except as described above) must be DNS-compatible. This means that the @@ -74,6 +75,7 @@ class DomainRemapMiddleware(object): if x.strip()] self.reseller_prefixes_lower = [x.lower() for x in self.reseller_prefixes] + self.default_reseller_prefix = conf.get('default_reseller_prefix') def __call__(self, env, start_response): if not self.storage_domain: @@ -102,15 +104,21 @@ class DomainRemapMiddleware(object): if '_' not in account and '-' in account: account = account.replace('-', '_', 1) account_reseller_prefix = account.split('_', 1)[0].lower() - if account_reseller_prefix not in self.reseller_prefixes_lower: + + if account_reseller_prefix in self.reseller_prefixes_lower: + prefix_index = self.reseller_prefixes_lower.index( + account_reseller_prefix) + real_prefix = self.reseller_prefixes[prefix_index] + if not account.startswith(real_prefix): + account_suffix = account[len(real_prefix):] + account = real_prefix + account_suffix + elif self.default_reseller_prefix: + # account prefix is not in config list. Add default one. + account = "%s_%s" % (self.default_reseller_prefix, account) + else: # account prefix is not in config list. bail. return self.app(env, start_response) - prefix_index = self.reseller_prefixes_lower.index( - account_reseller_prefix) - real_prefix = self.reseller_prefixes[prefix_index] - if not account.startswith(real_prefix): - account_suffix = account[len(real_prefix):] - account = real_prefix + account_suffix + path = env['PATH_INFO'].strip('/') new_path_parts = ['', self.path_root, account] if container: diff --git a/test/unit/common/middleware/test_domain_remap.py b/test/unit/common/middleware/test_domain_remap.py index dd86da633f..b14dfbcb2e 100644 --- a/test/unit/common/middleware/test_domain_remap.py +++ b/test/unit/common/middleware/test_domain_remap.py @@ -138,6 +138,22 @@ class TestDomainRemap(unittest.TestCase): resp = self.app(req.environ, start_response) self.assertEquals(resp, '/v1/uuid/c/test') + def test_domain_remap_add_prefix(self): + conf = {'default_reseller_prefix': 'FOO'} + self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf) + req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'uuid.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEquals(resp, '/v1/FOO_uuid/test') + + def test_domain_remap_add_prefix_already_there(self): + conf = {'default_reseller_prefix': 'AUTH'} + self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf) + req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'auth-uuid.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEquals(resp, '/v1/AUTH_uuid/test') + if __name__ == '__main__': unittest.main() From 037a0c5dbe01a57b741d34988e21f4f763b6204b Mon Sep 17 00:00:00 2001 From: janonymous Date: Sat, 6 Jun 2015 17:42:41 +0530 Subject: [PATCH 60/98] Object are sorted and concatenated by swift in cardinal order. Modified values in example in decimal to be more precise, added a small description on Object are sorted and concatenated by swift in cardinal order as a small phrase would be sufficient. Change-Id: I914ded4e5726e50bb93b05759c3bfb76edda53ab backport: none Closes-Bug: #1383893 --- doc/source/overview_large_objects.rst | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/source/overview_large_objects.rst b/doc/source/overview_large_objects.rst index 1d5f1913a4..ab9e9d094b 100644 --- a/doc/source/overview_large_objects.rst +++ b/doc/source/overview_large_objects.rst @@ -70,7 +70,8 @@ is just a zero-byte (not enforced) file with an extra ``X-Object-Manifest`` header. All the object segments need to be in the same container, have a common object -name prefix, and their names sort in the order they should be concatenated. +name prefix, and sort in the order in which they should be concatenated. +Object names are sorted lexicographically as UTF-8 byte strings. They don't have to be in the same container as the manifest file will be, which is useful to keep container listings clean as explained above with ``swift``. @@ -101,11 +102,11 @@ Here's an example using ``curl`` with tiny 1-byte segments:: # First, upload the segments curl -X PUT -H 'X-Auth-Token: ' \ - http:///container/myobject/1 --data-binary '1' + http:///container/myobject/00000001 --data-binary '1' curl -X PUT -H 'X-Auth-Token: ' \ - http:///container/myobject/2 --data-binary '2' + http:///container/myobject/00000002 --data-binary '2' curl -X PUT -H 'X-Auth-Token: ' \ - http:///container/myobject/3 --data-binary '3' + http:///container/myobject/00000003 --data-binary '3' # Next, create the manifest file curl -X PUT -H 'X-Auth-Token: ' \ From 5ad369c1cb0f480b164a5b5e7eb194081835c266 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Tue, 9 Jun 2015 00:22:12 +0200 Subject: [PATCH 61/98] Add six requirement The six module is needed to add Python 3 support to Swift. Change-Id: Ie637ed3458c7ff56c26834bca73203ed55604d74 --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 9f81b844ae..35aab42f4b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,5 +8,6 @@ greenlet>=0.3.1 netifaces>=0.5,!=0.10.0,!=0.10.1 pastedeploy>=1.3.3 simplejson>=2.0.9 +six>=1.9.0 xattr>=0.4 PyECLib>=1.0.7 From 09e7477a391e7425af62a6516f43c69bc98b7080 Mon Sep 17 00:00:00 2001 From: janonymous Date: Mon, 15 Jun 2015 22:10:45 +0530 Subject: [PATCH 62/98] Replace it.next() with next(it) for py3 compat The Python 2 next() method of iterators was renamed to __next__() on Python 3. Use the builtin next() function instead which works on Python 2 and Python 3. Change-Id: Ic948bc574b58f1d28c5c58e3985906dee17fa51d --- swift/common/db_replicator.py | 4 +- swift/common/middleware/bulk.py | 2 +- swift/common/middleware/formpost.py | 2 +- swift/common/middleware/proxy_logging.py | 6 +- swift/common/request_helpers.py | 2 +- swift/common/utils.py | 12 +- swift/common/wsgi.py | 2 +- test/probe/brain.py | 2 +- test/probe/test_empty_device_handoff.py | 2 +- test/probe/test_object_handoff.py | 2 +- test/probe/test_reconstructor_durable.py | 2 +- test/probe/test_reconstructor_rebuild.py | 2 +- test/probe/test_reconstructor_revert.py | 2 +- test/unit/__init__.py | 14 +- test/unit/account/test_backend.py | 32 ++-- test/unit/account/test_server.py | 18 +- test/unit/account/test_utils.py | 8 +- test/unit/common/middleware/test_formpost.py | 2 +- .../common/middleware/test_keystoneauth.py | 2 +- .../common/middleware/test_proxy_logging.py | 4 +- test/unit/common/middleware/test_recon.py | 2 +- test/unit/common/middleware/test_tempauth.py | 4 +- test/unit/common/middleware/test_tempurl.py | 2 +- test/unit/common/ring/test_ring.py | 10 +- test/unit/common/test_db.py | 16 +- test/unit/common/test_db_replicator.py | 10 +- test/unit/common/test_manager.py | 12 +- test/unit/common/test_swob.py | 4 +- test/unit/common/test_utils.py | 56 +++--- test/unit/common/test_wsgi.py | 4 +- test/unit/container/test_backend.py | 44 ++--- test/unit/container/test_reconciler.py | 72 +++---- test/unit/container/test_replicator.py | 180 +++++++++--------- test/unit/container/test_server.py | 58 +++--- test/unit/obj/test_replicator.py | 6 +- test/unit/obj/test_server.py | 14 +- test/unit/obj/test_ssync_sender.py | 44 ++--- test/unit/obj/test_updater.py | 10 +- test/unit/proxy/controllers/test_base.py | 2 +- test/unit/proxy/controllers/test_obj.py | 22 +-- test/unit/proxy/test_server.py | 4 +- test/unit/proxy/test_sysmeta.py | 2 +- 42 files changed, 350 insertions(+), 350 deletions(-) diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 7f06edf851..aa91faaf33 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -105,7 +105,7 @@ def roundrobin_datadirs(datadirs): while its: for it in its: try: - yield it.next() + yield next(it) except StopIteration: its.remove(it) @@ -525,7 +525,7 @@ class Replicator(Daemon): success = self._repl_to_node(node, broker, partition, info, different_region) except DriveNotMounted: - repl_nodes.append(more_nodes.next()) + repl_nodes.append(next(more_nodes)) self.logger.error(_('ERROR Remote drive not mounted %s'), node) except (Exception, Timeout): self.logger.exception(_('ERROR syncing %(file)s with node' diff --git a/swift/common/middleware/bulk.py b/swift/common/middleware/bulk.py index 888ff2356a..d363068385 100644 --- a/swift/common/middleware/bulk.py +++ b/swift/common/middleware/bulk.py @@ -420,7 +420,7 @@ class Bulk(object): separator = '\r\n\r\n' last_yield = time() yield ' ' - tar_info = tar.next() + tar_info = next(tar) if tar_info is None or \ len(failed_files) >= self.max_failed_extractions: break diff --git a/swift/common/middleware/formpost.py b/swift/common/middleware/formpost.py index 56a6d20f3f..24b79a13ae 100644 --- a/swift/common/middleware/formpost.py +++ b/swift/common/middleware/formpost.py @@ -394,7 +394,7 @@ class FormPost(object): i = iter(self.app(subenv, _start_response)) try: - i.next() + next(i) except StopIteration: pass return substatus[0], subheaders[0], '' diff --git a/swift/common/middleware/proxy_logging.py b/swift/common/middleware/proxy_logging.py index 968af2dd71..5389ffbd70 100644 --- a/swift/common/middleware/proxy_logging.py +++ b/swift/common/middleware/proxy_logging.py @@ -248,9 +248,9 @@ class ProxyLoggingMiddleware(object): def iter_response(iterable): iterator = iter(iterable) try: - chunk = iterator.next() + chunk = next(iterator) while not chunk: - chunk = iterator.next() + chunk = next(iterator) except StopIteration: chunk = '' for h, v in start_response_args[0][1]: @@ -281,7 +281,7 @@ class ProxyLoggingMiddleware(object): while chunk: bytes_sent += len(chunk) yield chunk - chunk = iterator.next() + chunk = next(iterator) except GeneratorExit: # generator was closed before we finished client_disconnect = True raise diff --git a/swift/common/request_helpers.py b/swift/common/request_helpers.py index 14b9fd8849..c9da1cb754 100644 --- a/swift/common/request_helpers.py +++ b/swift/common/request_helpers.py @@ -420,7 +420,7 @@ class SegmentedIterable(object): self.validated_first_segment = True try: - self.peeked_chunk = self.app_iter.next() + self.peeked_chunk = next(self.app_iter) except StopIteration: pass diff --git a/swift/common/utils.py b/swift/common/utils.py index 856065a680..d470fb9970 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -459,7 +459,7 @@ class FileLikeIter(object): def next(self): """ - x.next() -> the next value, or raise StopIteration + next(x) -> the next value, or raise StopIteration """ if self.closed: raise ValueError('I/O operation on closed file') @@ -468,7 +468,7 @@ class FileLikeIter(object): self.buf = None return rv else: - return self.iterator.next() + return next(self.iterator) def read(self, size=-1): """ @@ -489,7 +489,7 @@ class FileLikeIter(object): self.buf = None else: try: - chunk = self.iterator.next() + chunk = next(self.iterator) except StopIteration: return '' if len(chunk) > size: @@ -1027,7 +1027,7 @@ class RateLimitedIterator(object): else: self.running_time = ratelimit_sleep(self.running_time, self.elements_per_second) - return self.iterator.next() + return next(self.iterator) class GreenthreadSafeIterator(object): @@ -1050,7 +1050,7 @@ class GreenthreadSafeIterator(object): def next(self): with self.semaphore: - return self.unsafe_iter.next() + return next(self.unsafe_iter) class NullLogger(object): @@ -2274,7 +2274,7 @@ class GreenAsyncPile(object): try: with GreenAsyncPileWaitallTimeout(timeout): while True: - results.append(self.next()) + results.append(next(self)) except (GreenAsyncPileWaitallTimeout, StopIteration): pass return results diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 35df2077f2..b87fde4a02 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -613,7 +613,7 @@ class WSGIContext(object): return resp resp = iter(resp) try: - first_chunk = resp.next() + first_chunk = next(resp) except StopIteration: return iter([]) else: # We got a first_chunk diff --git a/test/probe/brain.py b/test/probe/brain.py index 9ca931aac1..791d974b56 100644 --- a/test/probe/brain.py +++ b/test/probe/brain.py @@ -142,7 +142,7 @@ class BrainSplitter(object): """ put container with next storage policy """ - policy = self.policies.next() + policy = next(self.policies) if policy_index is not None: policy = POLICIES.get_by_index(int(policy_index)) if not policy: diff --git a/test/probe/test_empty_device_handoff.py b/test/probe/test_empty_device_handoff.py index e0e450a4b4..e4b2033e0f 100755 --- a/test/probe/test_empty_device_handoff.py +++ b/test/probe/test_empty_device_handoff.py @@ -89,7 +89,7 @@ class TestEmptyDevice(ReplProbeTest): # let's directly verify it. # Directly to handoff server assert we can get container/obj - another_onode = self.object_ring.get_more_nodes(opart).next() + another_onode = next(self.object_ring.get_more_nodes(opart)) odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py index f513eef2ec..c3e3990839 100755 --- a/test/probe/test_object_handoff.py +++ b/test/probe/test_object_handoff.py @@ -67,7 +67,7 @@ class TestObjectHandoff(ReplProbeTest): # We've indirectly verified the handoff node has the container/object, # but let's directly verify it. - another_onode = self.object_ring.get_more_nodes(opart).next() + another_onode = next(self.object_ring.get_more_nodes(opart)) odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] diff --git a/test/probe/test_reconstructor_durable.py b/test/probe/test_reconstructor_durable.py index eeef00e62c..cbb94163e9 100644 --- a/test/probe/test_reconstructor_durable.py +++ b/test/probe/test_reconstructor_durable.py @@ -53,7 +53,7 @@ class Body(object): return self.chunk def __next__(self): - return self.next() + return next(self) class TestReconstructorPropDurable(ECProbeTest): diff --git a/test/probe/test_reconstructor_rebuild.py b/test/probe/test_reconstructor_rebuild.py index bf568ccc68..844c5394a8 100644 --- a/test/probe/test_reconstructor_rebuild.py +++ b/test/probe/test_reconstructor_rebuild.py @@ -54,7 +54,7 @@ class Body(object): return self.chunk def __next__(self): - return self.next() + return next(self) class TestReconstructorRebuild(ECProbeTest): diff --git a/test/probe/test_reconstructor_revert.py b/test/probe/test_reconstructor_revert.py index 249a6b5d62..135d1ce421 100755 --- a/test/probe/test_reconstructor_revert.py +++ b/test/probe/test_reconstructor_revert.py @@ -54,7 +54,7 @@ class Body(object): return self.chunk def __next__(self): - return self.next() + return next(self) class TestReconstructorRevert(ECProbeTest): diff --git a/test/unit/__init__.py b/test/unit/__init__.py index e839e5568c..a4d1cd35ca 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -878,7 +878,7 @@ def fake_http_connect(*code_iter, **kwargs): # when timestamp is None, HeaderKeyDict raises KeyError headers.pop('x-timestamp', None) try: - if container_ts_iter.next() is False: + if next(container_ts_iter) is False: headers['x-container-timestamp'] = '1' except StopIteration: pass @@ -955,24 +955,24 @@ def fake_http_connect(*code_iter, **kwargs): kwargs['give_content_type'](args[6]['Content-Type']) else: kwargs['give_content_type']('') - i, status = conn_id_and_code_iter.next() + i, status = next(conn_id_and_code_iter) if 'give_connect' in kwargs: give_conn_fn = kwargs['give_connect'] argspec = inspect.getargspec(give_conn_fn) if argspec.keywords or 'connection_id' in argspec.args: ckwargs['connection_id'] = i give_conn_fn(*args, **ckwargs) - etag = etag_iter.next() - headers = headers_iter.next() - expect_headers = expect_headers_iter.next() - timestamp = timestamps_iter.next() + etag = next(etag_iter) + headers = next(headers_iter) + expect_headers = next(expect_headers_iter) + timestamp = next(timestamps_iter) if status <= 0: raise HTTPException() if body_iter is None: body = static_body or '' else: - body = body_iter.next() + body = next(body_iter) return FakeConn(status, etag, body=body, timestamp=timestamp, headers=headers, expect_headers=expect_headers, connection_id=i, give_send=kwargs.get('give_send')) diff --git a/test/unit/account/test_backend.py b/test/unit/account/test_backend.py index d262689e87..5e174d892f 100644 --- a/test/unit/account/test_backend.py +++ b/test/unit/account/test_backend.py @@ -180,7 +180,7 @@ class TestAccountBroker(unittest.TestCase): def test_delete_db_status(self): ts = (Timestamp(t).internal for t in itertools.count(int(time()))) - start = ts.next() + start = next(ts) broker = AccountBroker(':memory:', account='a') broker.initialize(start) info = broker.get_info() @@ -194,7 +194,7 @@ class TestAccountBroker(unittest.TestCase): Timestamp(start).internal) # delete it - delete_timestamp = ts.next() + delete_timestamp = next(ts) broker.delete_db(delete_timestamp) info = broker.get_info() self.assertEqual(info['put_timestamp'], Timestamp(start).internal) @@ -643,7 +643,7 @@ class TestAccountBroker(unittest.TestCase): def test_get_policy_stats(self): ts = (Timestamp(t).internal for t in itertools.count(int(time()))) broker = AccountBroker(':memory:', account='a') - broker.initialize(ts.next()) + broker.initialize(next(ts)) # check empty policy_stats self.assertTrue(broker.empty()) policy_stats = broker.get_policy_stats() @@ -652,7 +652,7 @@ class TestAccountBroker(unittest.TestCase): # add some empty containers for policy in POLICIES: container_name = 'c-%s' % policy.name - put_timestamp = ts.next() + put_timestamp = next(ts) broker.put_container(container_name, put_timestamp, 0, 0, 0, @@ -667,7 +667,7 @@ class TestAccountBroker(unittest.TestCase): # update the containers object & byte count for policy in POLICIES: container_name = 'c-%s' % policy.name - put_timestamp = ts.next() + put_timestamp = next(ts) count = policy.idx * 100 # good as any integer broker.put_container(container_name, put_timestamp, 0, @@ -693,7 +693,7 @@ class TestAccountBroker(unittest.TestCase): # now delete the containers one by one for policy in POLICIES: container_name = 'c-%s' % policy.name - delete_timestamp = ts.next() + delete_timestamp = next(ts) broker.put_container(container_name, 0, delete_timestamp, 0, 0, @@ -711,14 +711,14 @@ class TestAccountBroker(unittest.TestCase): def test_policy_stats_tracking(self): ts = (Timestamp(t).internal for t in itertools.count(int(time()))) broker = AccountBroker(':memory:', account='a') - broker.initialize(ts.next()) + broker.initialize(next(ts)) # policy 0 - broker.put_container('con1', ts.next(), 0, 12, 2798641, 0) - broker.put_container('con1', ts.next(), 0, 13, 8156441, 0) + broker.put_container('con1', next(ts), 0, 12, 2798641, 0) + broker.put_container('con1', next(ts), 0, 13, 8156441, 0) # policy 1 - broker.put_container('con2', ts.next(), 0, 7, 5751991, 1) - broker.put_container('con2', ts.next(), 0, 8, 6085379, 1) + broker.put_container('con2', next(ts), 0, 7, 5751991, 1) + broker.put_container('con2', next(ts), 0, 8, 6085379, 1) stats = broker.get_policy_stats() self.assertEqual(len(stats), 2) @@ -1064,12 +1064,12 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker): ts = (Timestamp(t).internal for t in itertools.count(int(time()))) broker = AccountBroker(db_path, account='a') - broker.initialize(ts.next()) + broker.initialize(next(ts)) self.assertTrue(broker.empty()) # add a container (to pending file) - broker.put_container('c', ts.next(), 0, 0, 0, + broker.put_container('c', next(ts), 0, 0, 0, POLICIES.default.idx) real_get = broker.get @@ -1127,10 +1127,10 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker): # make and two account database "replicas" old_broker = AccountBroker(os.path.join(tempdir, 'old_account.db'), account='a') - old_broker.initialize(ts.next().internal) + old_broker.initialize(next(ts).internal) new_broker = AccountBroker(os.path.join(tempdir, 'new_account.db'), account='a') - new_broker.initialize(ts.next().internal) + new_broker.initialize(next(ts).internal) # manually insert an existing row to avoid migration for old database with old_broker.get() as conn: @@ -1139,7 +1139,7 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker): delete_timestamp, object_count, bytes_used, deleted) VALUES (?, ?, ?, ?, ?, ?) - ''', ('test_name', ts.next().internal, 0, 1, 2, 0)) + ''', ('test_name', next(ts).internal, 0, 1, 2, 0)) conn.commit() # get replication info and rows form old database diff --git a/test/unit/account/test_server.py b/test/unit/account/test_server.py index 1be5575995..ef920d030f 100644 --- a/test/unit/account/test_server.py +++ b/test/unit/account/test_server.py @@ -1728,13 +1728,13 @@ class TestAccountController(unittest.TestCase): ts = itertools.count() # create the account req = Request.blank('/sda1/p/a', method='PUT', headers={ - 'X-Timestamp': normalize_timestamp(ts.next())}) + 'X-Timestamp': normalize_timestamp(next(ts))}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) # sanity # add a container req = Request.blank('/sda1/p/a/c1', method='PUT', headers={ - 'X-Put-Timestamp': normalize_timestamp(ts.next()), + 'X-Put-Timestamp': normalize_timestamp(next(ts)), 'X-Delete-Timestamp': '0', 'X-Object-Count': '2', 'X-Bytes-Used': '4', @@ -1763,7 +1763,7 @@ class TestAccountController(unittest.TestCase): ts = itertools.count() # create the account req = Request.blank('/sda1/p/a', method='PUT', headers={ - 'X-Timestamp': normalize_timestamp(ts.next())}) + 'X-Timestamp': normalize_timestamp(next(ts))}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) # sanity @@ -1771,7 +1771,7 @@ class TestAccountController(unittest.TestCase): non_default_policies = [p for p in POLICIES if not p.is_default] policy = random.choice(non_default_policies) req = Request.blank('/sda1/p/a/c1', method='PUT', headers={ - 'X-Put-Timestamp': normalize_timestamp(ts.next()), + 'X-Put-Timestamp': normalize_timestamp(next(ts)), 'X-Delete-Timestamp': '0', 'X-Object-Count': '2', 'X-Bytes-Used': '4', @@ -1801,7 +1801,7 @@ class TestAccountController(unittest.TestCase): ts = itertools.count() # create the account req = Request.blank('/sda1/p/a', method='PUT', headers={ - 'X-Timestamp': normalize_timestamp(ts.next())}) + 'X-Timestamp': normalize_timestamp(next(ts))}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) # sanity @@ -1816,7 +1816,7 @@ class TestAccountController(unittest.TestCase): ts = itertools.count() # create the account req = Request.blank('/sda1/p/a', method='PUT', headers={ - 'X-Timestamp': normalize_timestamp(ts.next())}) + 'X-Timestamp': normalize_timestamp(next(ts))}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) # sanity @@ -1831,7 +1831,7 @@ class TestAccountController(unittest.TestCase): # add a container policy = random.choice(POLICIES) req = Request.blank('/sda1/p/a/c1', method='PUT', headers={ - 'X-Put-Timestamp': normalize_timestamp(ts.next()), + 'X-Put-Timestamp': normalize_timestamp(next(ts)), 'X-Delete-Timestamp': '0', 'X-Object-Count': '2', 'X-Bytes-Used': '4', @@ -1853,7 +1853,7 @@ class TestAccountController(unittest.TestCase): ts = itertools.count() # create the account req = Request.blank('/sda1/p/a', method='PUT', headers={ - 'X-Timestamp': normalize_timestamp(ts.next())}) + 'X-Timestamp': normalize_timestamp(next(ts))}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) # sanity @@ -1863,7 +1863,7 @@ class TestAccountController(unittest.TestCase): container_path = '/sda1/p/a/c_%s' % policy.name req = Request.blank( container_path, method='PUT', headers={ - 'X-Put-Timestamp': normalize_timestamp(ts.next()), + 'X-Put-Timestamp': normalize_timestamp(next(ts)), 'X-Delete-Timestamp': '0', 'X-Object-Count': count, 'X-Bytes-Used': count, diff --git a/test/unit/account/test_utils.py b/test/unit/account/test_utils.py index ea90decfc9..35467d0dab 100644 --- a/test/unit/account/test_utils.py +++ b/test/unit/account/test_utils.py @@ -98,8 +98,8 @@ class TestAccountUtils(unittest.TestCase): total_objects = 0 total_bytes = 0 for policy in POLICIES: - delete_timestamp = ts.next() - put_timestamp = ts.next() + delete_timestamp = next(ts) + put_timestamp = next(ts) object_count = int(policy) bytes_used = int(policy) * 10 broker.put_container('c-%s' % policy.name, put_timestamp, @@ -145,8 +145,8 @@ class TestAccountUtils(unittest.TestCase): total_objects = 0 total_bytes = 0 for policy in POLICIES: - delete_timestamp = ts.next() - put_timestamp = ts.next() + delete_timestamp = next(ts) + put_timestamp = next(ts) object_count = int(policy) bytes_used = int(policy) * 10 broker.put_container('c-%s' % policy.name, put_timestamp, diff --git a/test/unit/common/middleware/test_formpost.py b/test/unit/common/middleware/test_formpost.py index c71eb7cc83..abc11edb85 100644 --- a/test/unit/common/middleware/test_formpost.py +++ b/test/unit/common/middleware/test_formpost.py @@ -59,7 +59,7 @@ class FakeApp(object): resp = env['swift.authorize'](self.requests[-1]) if resp: return resp(env, start_response) - status, headers, body = self.status_headers_body_iter.next() + status, headers, body = next(self.status_headers_body_iter) return Response(status=status, headers=headers, body=body)(env, start_response) except EOFError: diff --git a/test/unit/common/middleware/test_keystoneauth.py b/test/unit/common/middleware/test_keystoneauth.py index 078c275a7e..b9d216ae61 100644 --- a/test/unit/common/middleware/test_keystoneauth.py +++ b/test/unit/common/middleware/test_keystoneauth.py @@ -89,7 +89,7 @@ class FakeApp(object): context = {'method': self.request.method, 'headers': self.request.headers} self.call_contexts.append(context) - status, headers, body = self.status_headers_body_iter.next() + status, headers, body = next(self.status_headers_body_iter) return Response(status=status, headers=headers, body=body)(env, start_response) diff --git a/test/unit/common/middleware/test_proxy_logging.py b/test/unit/common/middleware/test_proxy_logging.py index 6f1fc9f5fa..f4a5049a49 100644 --- a/test/unit/common/middleware/test_proxy_logging.py +++ b/test/unit/common/middleware/test_proxy_logging.py @@ -551,7 +551,7 @@ class TestProxyLogging(unittest.TestCase): def test_no_content_length_no_transfer_encoding_with_list_body(self): app = proxy_logging.ProxyLoggingMiddleware( FakeAppNoContentLengthNoTransferEncoding( - # test the "while not chunk: chunk = iterator.next()" + # test the "while not chunk: chunk = next(iterator)" body=['', '', 'line1\n', 'line2\n'], ), {}) app.access_logger = FakeLogger() @@ -569,7 +569,7 @@ class TestProxyLogging(unittest.TestCase): def test_no_content_length_no_transfer_encoding_with_empty_strings(self): app = proxy_logging.ProxyLoggingMiddleware( FakeAppNoContentLengthNoTransferEncoding( - # test the "while not chunk: chunk = iterator.next()" + # test the "while not chunk: chunk = next(iterator)" body=['', '', ''], ), {}) app.access_logger = FakeLogger() diff --git a/test/unit/common/middleware/test_recon.py b/test/unit/common/middleware/test_recon.py index a46c4ae6c4..05a11ce859 100644 --- a/test/unit/common/middleware/test_recon.py +++ b/test/unit/common/middleware/test_recon.py @@ -76,7 +76,7 @@ class OpenAndReadTester(object): def read(self, *args, **kwargs): self.read_calls.append((args, kwargs)) try: - return self.output_iter.next() + return next(self.output_iter) except StopIteration: return '' diff --git a/test/unit/common/middleware/test_tempauth.py b/test/unit/common/middleware/test_tempauth.py index e8af310c82..5845bb1559 100644 --- a/test/unit/common/middleware/test_tempauth.py +++ b/test/unit/common/middleware/test_tempauth.py @@ -78,7 +78,7 @@ class FakeApp(object): resp = env['swift.authorize'](self.request) if resp: return resp(env, start_response) - status, headers, body = self.status_headers_body_iter.next() + status, headers, body = next(self.status_headers_body_iter) return Response(status=status, headers=headers, body=body)(env, start_response) @@ -95,7 +95,7 @@ class FakeConn(object): self.calls += 1 self.request_path = path self.status, self.headers, self.body = \ - self.status_headers_body_iter.next() + next(self.status_headers_body_iter) self.status, self.reason = self.status.split(' ', 1) self.status = int(self.status) diff --git a/test/unit/common/middleware/test_tempurl.py b/test/unit/common/middleware/test_tempurl.py index e66556334b..4b235022bc 100644 --- a/test/unit/common/middleware/test_tempurl.py +++ b/test/unit/common/middleware/test_tempurl.py @@ -57,7 +57,7 @@ class FakeApp(object): resp = env['swift.authorize'](self.request) if resp: return resp(env, start_response) - status, headers, body = self.status_headers_body_iter.next() + status, headers, body = next(self.status_headers_body_iter) return Response(status=status, headers=headers, body=body)(env, start_response) diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py index b97b60eeee..5904ea9a7b 100644 --- a/test/unit/common/ring/test_ring.py +++ b/test/unit/common/ring/test_ring.py @@ -234,7 +234,7 @@ class TestRing(TestRingBase): self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift).save(self.testgz) sleep(0.1) - self.ring.get_more_nodes(part).next() + next(self.ring.get_more_nodes(part)) self.assertEquals(len(self.ring.devs), 8) self.assertNotEquals(self.ring._mtime, orig_mtime) @@ -503,7 +503,7 @@ class TestRing(TestRingBase): # The first handoff nodes for each partition in the ring devs = [] for part in xrange(r.partition_count): - devs.append(r.get_more_nodes(part).next()['id']) + devs.append(next(r.get_more_nodes(part))['id']) self.assertEquals(devs, exp_first_handoffs) # Add a new device we can handoff to. @@ -539,7 +539,7 @@ class TestRing(TestRingBase): devs = [] for part in xrange(r.partition_count): - devs.append(r.get_more_nodes(part).next()['id']) + devs.append(next(r.get_more_nodes(part))['id']) for part in xrange(r.partition_count): self.assertEquals( devs[part], exp_first_handoffs[part], @@ -588,7 +588,7 @@ class TestRing(TestRingBase): devs = [] for part in xrange(r.partition_count): - devs.append(r.get_more_nodes(part).next()['id']) + devs.append(next(r.get_more_nodes(part))['id']) for part in xrange(r.partition_count): self.assertEquals( devs[part], exp_first_handoffs[part], @@ -669,7 +669,7 @@ class TestRing(TestRingBase): devs = [] for part in xrange(r.partition_count): - devs.append(r.get_more_nodes(part).next()['id']) + devs.append(next(r.get_more_nodes(part))['id']) for part in xrange(r.partition_count): self.assertEquals( devs[part], exp_first_handoffs[part], diff --git a/test/unit/common/test_db.py b/test/unit/common/test_db.py index cf19730947..e1d18d400a 100644 --- a/test/unit/common/test_db.py +++ b/test/unit/common/test_db.py @@ -69,9 +69,9 @@ class TestDictFactory(unittest.TestCase): conn.execute('INSERT INTO test (one, two) VALUES ("def", 456)') conn.commit() curs = conn.execute('SELECT one, two FROM test') - self.assertEquals(dict_factory(curs, curs.next()), + self.assertEquals(dict_factory(curs, next(curs)), {'one': 'abc', 'two': 123}) - self.assertEquals(dict_factory(curs, curs.next()), + self.assertEquals(dict_factory(curs, next(curs)), {'one': 'def', 'two': 456}) @@ -97,12 +97,12 @@ class TestChexor(unittest.TestCase): itertools.count(int(time.time()))) objects = [ - ('frank', ts.next()), - ('bob', ts.next()), - ('tom', ts.next()), - ('frank', ts.next()), - ('tom', ts.next()), - ('bob', ts.next()), + ('frank', next(ts)), + ('bob', next(ts)), + ('tom', next(ts)), + ('frank', next(ts)), + ('tom', next(ts)), + ('bob', next(ts)), ] hash_ = '0' random.shuffle(objects) diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py index 5244cacd35..5f5c6893fa 100644 --- a/test/unit/common/test_db_replicator.py +++ b/test/unit/common/test_db_replicator.py @@ -135,11 +135,11 @@ class FakeProcess(object): class Failure(object): def communicate(innerself): - next = self.codes.next() - if isinstance(next, int): - innerself.returncode = next - return next - raise next + next_item = next(self.codes) + if isinstance(next_item, int): + innerself.returncode = next_item + return next_item + raise next_item return Failure() diff --git a/test/unit/common/test_manager.py b/test/unit/common/test_manager.py index 5a9b3a6629..a42db5bf9b 100644 --- a/test/unit/common/test_manager.py +++ b/test/unit/common/test_manager.py @@ -156,7 +156,7 @@ class TestManagerModule(unittest.TestCase): def waitpid(self, pid, options): try: - rv = self.pid_map[pid].next() + rv = next(self.pid_map[pid]) except StopIteration: raise OSError(errno.ECHILD, os.strerror(errno.ECHILD)) except KeyError: @@ -176,7 +176,7 @@ class TestManagerModule(unittest.TestCase): def time(self): try: - self.tock += self.ticks.next() + self.tock += next(self.ticks) except StopIteration: self.tock += 1 return self.tock @@ -191,7 +191,7 @@ class TestManagerModule(unittest.TestCase): def get_running_pids(self): try: - rv = self.heartbeat.next() + rv = next(self.heartbeat) return rv except StopIteration: return {} @@ -602,7 +602,7 @@ class TestServer(unittest.TestCase): server = manager.Server('proxy', run_dir=t) # test get one file iter = server.iter_pid_files() - pid_file, pid = iter.next() + pid_file, pid = next(iter) self.assertEquals(pid_file, self.join_run_dir('proxy-server.pid')) self.assertEquals(pid, 1) # ... and only one file @@ -1021,7 +1021,7 @@ class TestServer(unittest.TestCase): self.pids = (p for p in pids) def Popen(self, args, **kwargs): - return MockProc(self.pids.next(), args, **kwargs) + return MockProc(next(self.pids), args, **kwargs) class MockProc(object): @@ -1295,7 +1295,7 @@ class TestServer(unittest.TestCase): def __call__(self, conf_file, **kwargs): self.conf_files.append(conf_file) self.kwargs.append(kwargs) - rv = self.pids.next() + rv = next(self.pids) if isinstance(rv, Exception): raise rv else: diff --git a/test/unit/common/test_swob.py b/test/unit/common/test_swob.py index 5dcd230849..9f3271c1f3 100644 --- a/test/unit/common/test_swob.py +++ b/test/unit/common/test_swob.py @@ -1060,8 +1060,8 @@ class TestResponse(unittest.TestCase): req.method = 'GET' status, headers, app_iter = req.call_application(test_app) iterator = iter(app_iter) - self.assertEqual('igloo', iterator.next()) - self.assertEqual('shindig', iterator.next()) + self.assertEqual('igloo', next(iterator)) + self.assertEqual('shindig', next(iterator)) app_iter.close() self.assertRaises(StopIteration, iterator.next) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 33298a6fcb..3072883b83 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -3298,7 +3298,7 @@ class TestFileLikeIter(unittest.TestCase): iter_file = utils.FileLikeIter(in_iter) while True: try: - chunk = iter_file.next() + chunk = next(iter_file) except StopIteration: break chunks.append(chunk) @@ -3388,7 +3388,7 @@ class TestFileLikeIter(unittest.TestCase): def test_close(self): iter_file = utils.FileLikeIter('abcdef') - self.assertEquals(iter_file.next(), 'a') + self.assertEquals(next(iter_file), 'a') iter_file.close() self.assertTrue(iter_file.closed) self.assertRaises(ValueError, iter_file.next) @@ -3719,7 +3719,7 @@ class TestRateLimitedIterator(unittest.TestCase): started_at = time.time() try: while time.time() - started_at < 0.1: - got.append(limited_iterator.next()) + got.append(next(limited_iterator)) except StopIteration: pass return got @@ -3738,7 +3738,7 @@ class TestRateLimitedIterator(unittest.TestCase): started_at = time.time() try: while time.time() - started_at < 0.1: - got.append(limited_iterator.next()) + got.append(next(limited_iterator)) except StopIteration: pass return got @@ -4642,7 +4642,7 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): it = utils.iter_multipart_mime_documents(StringIO('blah'), 'unique') exc = None try: - it.next() + next(it) except MimeInvalid as err: exc = err self.assertTrue('invalid starting boundary' in str(exc)) @@ -4651,11 +4651,11 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): def test_empty(self): it = utils.iter_multipart_mime_documents(StringIO('--unique'), 'unique') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(), '') exc = None try: - it.next() + next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) @@ -4663,11 +4663,11 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): def test_basic(self): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(), 'abcdefg') exc = None try: - it.next() + next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) @@ -4676,13 +4676,13 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), 'unique') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(), 'abcdefg') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(), 'hijkl') exc = None try: - it.next() + next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) @@ -4691,17 +4691,17 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), 'unique') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(2), 'ab') self.assertEquals(fp.read(2), 'cd') self.assertEquals(fp.read(2), 'ef') self.assertEquals(fp.read(2), 'g') self.assertEquals(fp.read(2), '') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(), 'hijkl') exc = None try: - it.next() + next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) @@ -4710,14 +4710,14 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'), 'unique') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(65536), 'abcdefg') self.assertEquals(fp.read(), '') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(), 'hijkl') exc = None try: - it.next() + next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) @@ -4727,10 +4727,10 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): StringIO('\r\n\r\n\r\n--unique\r\nabcdefg\r\n' '--unique\r\nhijkl\r\n--unique--'), 'unique') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(65536), 'abcdefg') self.assertEquals(fp.read(), '') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(), 'hijkl') self.assertRaises(StopIteration, it.next) @@ -4739,11 +4739,11 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): # whole request, in case the partial form is still useful. it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nabc'), 'unique') - fp = it.next() + fp = next(it) self.assertEquals(fp.read(), 'abc') exc = None try: - it.next() + next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) @@ -4752,17 +4752,17 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): it = utils.iter_multipart_mime_documents( StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n' 'jkl\r\n\r\n--unique--'), 'unique') - fp = it.next() + fp = next(it) self.assertEquals(fp.readline(), 'ab\r\n') self.assertEquals(fp.readline(), 'cd\ref\ng') self.assertEquals(fp.readline(), '') - fp = it.next() + fp = next(it) self.assertEquals(fp.readline(), 'hi\r\n') self.assertEquals(fp.readline(), '\r\n') self.assertEquals(fp.readline(), 'jkl\r\n') exc = None try: - it.next() + next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) @@ -4773,17 +4773,17 @@ class TestIterMultipartMimeDocuments(unittest.TestCase): '\r\njkl\r\n\r\n--unique--'), 'unique', read_chunk_size=2) - fp = it.next() + fp = next(it) self.assertEquals(fp.readline(), 'ab\r\n') self.assertEquals(fp.readline(), 'cd\ref\ng') self.assertEquals(fp.readline(), '') - fp = it.next() + fp = next(it) self.assertEquals(fp.readline(), 'hi\r\n') self.assertEquals(fp.readline(), '\r\n') self.assertEquals(fp.readline(), 'jkl\r\n') exc = None try: - it.next() + next(it) except StopIteration as err: exc = err self.assertTrue(exc is not None) diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index 279eb8624b..1fbd012dbe 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -786,8 +786,8 @@ class TestWSGIContext(unittest.TestCase): self.assertEquals(wc._response_status, '200 OK') iterator = iter(iterable) - self.assertEqual('aaaaa', iterator.next()) - self.assertEqual('bbbbb', iterator.next()) + self.assertEqual('aaaaa', next(iterator)) + self.assertEqual('bbbbb', next(iterator)) iterable.close() self.assertRaises(StopIteration, iterator.next) diff --git a/test/unit/container/test_backend.py b/test/unit/container/test_backend.py index 76a42d6e57..23cadd1f42 100644 --- a/test/unit/container/test_backend.py +++ b/test/unit/container/test_backend.py @@ -57,7 +57,7 @@ class TestContainerBroker(unittest.TestCase): for policy in POLICIES: broker = ContainerBroker(':memory:', account='a', container='policy_%s' % policy.name) - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) with broker.get() as conn: try: conn.execute('''SELECT storage_policy_index @@ -168,7 +168,7 @@ class TestContainerBroker(unittest.TestCase): broker = ContainerBroker(':memory:', account='test_account', container='test_container') # create it - broker.initialize(ts.next(), POLICIES.default.idx) + broker.initialize(next(ts), POLICIES.default.idx) info, is_deleted = broker.get_info_is_deleted() self.assertEqual(is_deleted, broker.is_deleted()) self.assertEqual(is_deleted, False) # sanity @@ -185,7 +185,7 @@ class TestContainerBroker(unittest.TestCase): Timestamp(start).internal) # delete it - delete_timestamp = ts.next() + delete_timestamp = next(ts) broker.delete_db(delete_timestamp) info, is_deleted = broker.get_info_is_deleted() self.assertEqual(is_deleted, True) # sanity @@ -197,7 +197,7 @@ class TestContainerBroker(unittest.TestCase): self.assertEqual(info['status_changed_at'], delete_timestamp) # bring back to life - broker.put_object('obj', ts.next(), 0, 'text/plain', 'etag', + broker.put_object('obj', next(ts), 0, 'text/plain', 'etag', storage_policy_index=broker.storage_policy_index) info, is_deleted = broker.get_info_is_deleted() self.assertEqual(is_deleted, False) # sanity @@ -437,14 +437,14 @@ class TestContainerBroker(unittest.TestCase): itertools.count(int(time()))) broker = ContainerBroker(':memory:', account='a', container='c') - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # migration tests may not honor policy on initialize if isinstance(self, ContainerBrokerMigrationMixin): real_storage_policy_index = \ broker.get_info()['storage_policy_index'] policy = filter(lambda p: p.idx == real_storage_policy_index, POLICIES)[0] - broker.put_object('correct_o', ts.next(), 123, 'text/plain', + broker.put_object('correct_o', next(ts), 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=policy.idx) info = broker.get_info() @@ -452,7 +452,7 @@ class TestContainerBroker(unittest.TestCase): self.assertEqual(123, info['bytes_used']) other_policy = random.choice([p for p in POLICIES if p is not policy]) - broker.put_object('wrong_o', ts.next(), 123, 'text/plain', + broker.put_object('wrong_o', next(ts), 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=other_policy.idx) self.assertEqual(1, info['object_count']) @@ -465,19 +465,19 @@ class TestContainerBroker(unittest.TestCase): itertools.count(int(time()))) broker = ContainerBroker(':memory:', account='a', container='c') - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # migration tests may not honor policy on initialize if isinstance(self, ContainerBrokerMigrationMixin): real_storage_policy_index = \ broker.get_info()['storage_policy_index'] policy = filter(lambda p: p.idx == real_storage_policy_index, POLICIES)[0] - broker.put_object('correct_o', ts.next(), 123, 'text/plain', + broker.put_object('correct_o', next(ts), 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=policy.idx) self.assertFalse(broker.has_multiple_policies()) other_policy = [p for p in POLICIES if p is not policy][0] - broker.put_object('wrong_o', ts.next(), 123, 'text/plain', + broker.put_object('wrong_o', next(ts), 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=other_policy.idx) self.assert_(broker.has_multiple_policies()) @@ -489,7 +489,7 @@ class TestContainerBroker(unittest.TestCase): itertools.count(int(time()))) broker = ContainerBroker(':memory:', account='a', container='c') - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # migration tests may not honor policy on initialize if isinstance(self, ContainerBrokerMigrationMixin): real_storage_policy_index = \ @@ -501,7 +501,7 @@ class TestContainerBroker(unittest.TestCase): self.assertEqual(policy_stats, expected) # add an object - broker.put_object('correct_o', ts.next(), 123, 'text/plain', + broker.put_object('correct_o', next(ts), 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=policy.idx) policy_stats = broker.get_policy_stats() @@ -511,7 +511,7 @@ class TestContainerBroker(unittest.TestCase): # add a misplaced object other_policy = random.choice([p for p in POLICIES if p is not policy]) - broker.put_object('wrong_o', ts.next(), 123, 'text/plain', + broker.put_object('wrong_o', next(ts), 123, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=other_policy.idx) policy_stats = broker.get_policy_stats() @@ -526,7 +526,7 @@ class TestContainerBroker(unittest.TestCase): itertools.count(int(time()))) broker = ContainerBroker(':memory:', account='a', container='c') - broker.initialize(ts.next(), POLICIES.default.idx) + broker.initialize(next(ts), POLICIES.default.idx) stats = defaultdict(dict) iters = 100 @@ -534,7 +534,7 @@ class TestContainerBroker(unittest.TestCase): policy_index = random.randint(0, iters * 0.1) name = 'object-%s' % random.randint(0, iters * 0.1) size = random.randint(0, iters) - broker.put_object(name, ts.next(), size, 'text/plain', + broker.put_object(name, next(ts), size, 'text/plain', '5af83e3196bf99f440f31f2e1a6c9afe', storage_policy_index=policy_index) # track the size of the latest timestamp put for each object @@ -1343,7 +1343,7 @@ class TestContainerBroker(unittest.TestCase): itertools.count(int(time()))) broker = ContainerBroker(':memory:', account='test_account', container='test_container') - timestamp = ts.next() + timestamp = next(ts) broker.initialize(timestamp, 0) info = broker.get_info() @@ -1359,7 +1359,7 @@ class TestContainerBroker(unittest.TestCase): expected = {0: {'object_count': 0, 'bytes_used': 0}} self.assertEqual(expected, broker.get_policy_stats()) - timestamp = ts.next() + timestamp = next(ts) broker.set_storage_policy_index(111, timestamp) self.assertEqual(broker.storage_policy_index, 111) info = broker.get_info() @@ -1370,7 +1370,7 @@ class TestContainerBroker(unittest.TestCase): expected[111] = {'object_count': 0, 'bytes_used': 0} self.assertEqual(expected, broker.get_policy_stats()) - timestamp = ts.next() + timestamp = next(ts) broker.set_storage_policy_index(222, timestamp) self.assertEqual(broker.storage_policy_index, 222) info = broker.get_info() @@ -1381,7 +1381,7 @@ class TestContainerBroker(unittest.TestCase): expected[222] = {'object_count': 0, 'bytes_used': 0} self.assertEqual(expected, broker.get_policy_stats()) - old_timestamp, timestamp = timestamp, ts.next() + old_timestamp, timestamp = timestamp, next(ts) broker.set_storage_policy_index(222, timestamp) # it's idempotent info = broker.get_info() self.assertEqual(222, info['storage_policy_index']) @@ -1419,13 +1419,13 @@ class TestContainerBroker(unittest.TestCase): # first init an acct DB without the policy_stat table present broker = ContainerBroker(db_path, account='a', container='c') - broker.initialize(ts.next(), 1) + broker.initialize(next(ts), 1) # manually make some pending entries lacking storage_policy_index with open(broker.pending_file, 'a+b') as fp: for i in range(10): name, timestamp, size, content_type, etag, deleted = ( - 'o%s' % i, ts.next(), 0, 'c', 'e', 0) + 'o%s' % i, next(ts), 0, 'c', 'e', 0) fp.write(':') fp.write(pickle.dumps( (name, timestamp, size, content_type, etag, deleted), @@ -1442,7 +1442,7 @@ class TestContainerBroker(unittest.TestCase): else: size = 2 storage_policy_index = 1 - broker.put_object(name, ts.next(), size, 'c', 'e', 0, + broker.put_object(name, next(ts), size, 'c', 'e', 0, storage_policy_index=storage_policy_index) broker._commit_puts_stale_ok() diff --git a/test/unit/container/test_reconciler.py b/test/unit/container/test_reconciler.py index ca01dafc20..9466fbcb94 100644 --- a/test/unit/container/test_reconciler.py +++ b/test/unit/container/test_reconciler.py @@ -237,15 +237,15 @@ class TestReconcilerUtils(unittest.TestCase): mock_path = 'swift.container.reconciler.direct_head_container' stub_resp_headers = [ container_resp_headers( - status_changed_at=Timestamp(ts.next()).internal, + status_changed_at=Timestamp(next(ts)).internal, storage_policy_index=0, ), container_resp_headers( - status_changed_at=Timestamp(ts.next()).internal, + status_changed_at=Timestamp(next(ts)).internal, storage_policy_index=1, ), container_resp_headers( - status_changed_at=Timestamp(ts.next()).internal, + status_changed_at=Timestamp(next(ts)).internal, storage_policy_index=0, ), ] @@ -268,11 +268,11 @@ class TestReconcilerUtils(unittest.TestCase): mock_path = 'swift.container.reconciler.direct_head_container' stub_resp_headers = [ container_resp_headers( - status_change_at=ts.next(), + status_change_at=next(ts), storage_policy_index=2, ), container_resp_headers( - status_changed_at=ts.next(), + status_changed_at=next(ts), storage_policy_index=1, ), # old timestamp, but 500 should be ignored... @@ -297,11 +297,11 @@ class TestReconcilerUtils(unittest.TestCase): mock_path = 'swift.container.reconciler.direct_head_container' stub_resp_headers = [ container_resp_headers( - status_changed_at=Timestamp(ts.next()).internal, + status_changed_at=Timestamp(next(ts)).internal, storage_policy_index=1, ), container_resp_headers( - status_changed_at=Timestamp(ts.next()).internal, + status_changed_at=Timestamp(next(ts)).internal, storage_policy_index=0, ), socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)), @@ -318,7 +318,7 @@ class TestReconcilerUtils(unittest.TestCase): mock_path = 'swift.container.reconciler.direct_head_container' stub_resp_headers = [ container_resp_headers( - status_changed_at=Timestamp(ts.next()).internal, + status_changed_at=Timestamp(next(ts)).internal, storage_policy_index=0, ), socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)), @@ -326,7 +326,7 @@ class TestReconcilerUtils(unittest.TestCase): 'Container Server blew up', http_status=500, http_reason='Server Error', http_headers=container_resp_headers( - status_changed_at=Timestamp(ts.next()).internal, + status_changed_at=Timestamp(next(ts)).internal, storage_policy_index=1, ), ), @@ -376,9 +376,9 @@ class TestReconcilerUtils(unittest.TestCase): 'Container Not Found', http_status=404, http_reason='Not Found', http_headers=container_resp_headers( - put_timestamp=ts.next(), - delete_timestamp=ts.next(), - status_changed_at=ts.next(), + put_timestamp=next(ts), + delete_timestamp=next(ts), + status_changed_at=next(ts), storage_policy_index=0, ), ), @@ -386,9 +386,9 @@ class TestReconcilerUtils(unittest.TestCase): 'Container Not Found', http_status=404, http_reason='Not Found', http_headers=container_resp_headers( - put_timestamp=ts.next(), - delete_timestamp=ts.next(), - status_changed_at=ts.next(), + put_timestamp=next(ts), + delete_timestamp=next(ts), + status_changed_at=next(ts), storage_policy_index=1, ), ), @@ -396,9 +396,9 @@ class TestReconcilerUtils(unittest.TestCase): 'Container Not Found', http_status=404, http_reason='Not Found', http_headers=container_resp_headers( - put_timestamp=ts.next(), - delete_timestamp=ts.next(), - status_changed_at=ts.next(), + put_timestamp=next(ts), + delete_timestamp=next(ts), + status_changed_at=next(ts), storage_policy_index=2, ), ), @@ -417,8 +417,8 @@ class TestReconcilerUtils(unittest.TestCase): # old put, no recreate container_resp_headers( delete_timestamp=0, - put_timestamp=ts.next(), - status_changed_at=ts.next(), + put_timestamp=next(ts), + status_changed_at=next(ts), storage_policy_index=0, ), # recently deleted @@ -426,17 +426,17 @@ class TestReconcilerUtils(unittest.TestCase): 'Container Not Found', http_status=404, http_reason='Not Found', http_headers=container_resp_headers( - put_timestamp=ts.next(), - delete_timestamp=ts.next(), - status_changed_at=ts.next(), + put_timestamp=next(ts), + delete_timestamp=next(ts), + status_changed_at=next(ts), storage_policy_index=1, ), ), # recently recreated container_resp_headers( - delete_timestamp=ts.next(), - put_timestamp=ts.next(), - status_changed_at=ts.next(), + delete_timestamp=next(ts), + put_timestamp=next(ts), + status_changed_at=next(ts), storage_policy_index=2, ), ] @@ -454,22 +454,22 @@ class TestReconcilerUtils(unittest.TestCase): # oldest put container_resp_headers( delete_timestamp=0, - put_timestamp=ts.next(), - status_changed_at=ts.next(), + put_timestamp=next(ts), + status_changed_at=next(ts), storage_policy_index=0, ), # old recreate container_resp_headers( - delete_timestamp=ts.next(), - put_timestamp=ts.next(), - status_changed_at=ts.next(), + delete_timestamp=next(ts), + put_timestamp=next(ts), + status_changed_at=next(ts), storage_policy_index=1, ), # recently put container_resp_headers( delete_timestamp=0, - put_timestamp=ts.next(), - status_changed_at=ts.next(), + put_timestamp=next(ts), + status_changed_at=next(ts), storage_policy_index=2, ), ] @@ -486,15 +486,15 @@ class TestReconcilerUtils(unittest.TestCase): mock_path = 'swift.container.reconciler.direct_head_container' stub_resp_headers = [ container_resp_headers( - status_changed_at=Timestamp(ts.next()).internal, + status_changed_at=Timestamp(next(ts)).internal, storage_policy_index=0, ), container_resp_headers( - status_changed_at=Timestamp(ts.next()).internal, + status_changed_at=Timestamp(next(ts)).internal, storage_policy_index=1, ), container_resp_headers( - status_changed_at=Timestamp(ts.next()).internal, + status_changed_at=Timestamp(next(ts)).internal, storage_policy_index=0, ), ] diff --git a/test/unit/container/test_replicator.py b/test/unit/container/test_replicator.py index 49fea253cc..86a8880737 100644 --- a/test/unit/container/test_replicator.py +++ b/test/unit/container/test_replicator.py @@ -102,14 +102,14 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): ts_iter = make_timestamp_iter() # setup a local container broker = self._get_broker('a', 'c', node_index=0) - put_timestamp = ts_iter.next() + put_timestamp = next(ts_iter) broker.initialize(put_timestamp.internal, POLICIES.default.idx) broker.update_metadata( {'x-container-meta-test': ('foo', put_timestamp.internal)}) # setup remote container remote_broker = self._get_broker('a', 'c', node_index=1) - remote_broker.initialize(ts_iter.next().internal, POLICIES.default.idx) - timestamp = ts_iter.next() + remote_broker.initialize(next(ts_iter).internal, POLICIES.default.idx) + timestamp = next(ts_iter) for db in (broker, remote_broker): db.put_object( '/a/c/o', timestamp.internal, 0, 'content-type', 'etag', @@ -277,7 +277,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): def put_more_objects(op, *args): if op != 'merge_items': return - path = '/a/c/o_missing_%s' % missing_counter.next() + path = '/a/c/o_missing_%s' % next(missing_counter) broker.put_object(path, time.time(), 0, 'content-type', 'etag', storage_policy_index=db.storage_policy_index) test_db_replicator.FakeReplConnection = \ @@ -415,11 +415,11 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): itertools.count(int(time.time()))) # setup a local container broker = self._get_broker('a', 'c', node_index=0) - put_timestamp = ts.next() + put_timestamp = next(ts) broker.initialize(put_timestamp, POLICIES.default.idx) # setup remote container remote_broker = self._get_broker('a', 'c', node_index=1) - remote_put_timestamp = ts.next() + remote_put_timestamp = next(ts) remote_broker.initialize(remote_put_timestamp, POLICIES.default.idx) # replicate, expect call to merge_timestamps on remote and local daemon = replicator.ContainerReplicator({}) @@ -460,11 +460,11 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): # create "local" broker local_broker = self._get_broker('a', 'c', node_index=0) - local_broker.initialize(ts.next(), policy.idx) + local_broker.initialize(next(ts), policy.idx) # create "remote" broker remote_broker = self._get_broker('a', 'c', node_index=1) - remote_broker.initialize(ts.next(), policy.idx) + remote_broker.initialize(next(ts), policy.idx) db_path = local_broker.db_file self.assertTrue(os.path.exists(db_path)) # sanity check @@ -515,7 +515,7 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): 'both_rows': (broker, remote_broker), } dbs = variations[scenario_name] - obj_ts = ts.next() + obj_ts = next(ts) for db in dbs: db.put_object('/a/c/o', obj_ts, 0, 'content-type', 'etag', storage_policy_index=db.storage_policy_index) @@ -547,19 +547,19 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) def test_sync_local_create_policy_over_newer_remote_delete(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create older "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # delete "remote" broker - remote_broker.delete_db(ts.next()) + remote_broker.delete_db(next(ts)) def test_sync_local_create_policy_over_older_remote_delete(self): # remote_row & both_rows cases are covered by @@ -568,11 +568,11 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): 'no_row', 'local_row'): ts, policy, remote_policy, broker, remote_broker = setup # create older "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # delete older "remote" broker - remote_broker.delete_db(ts.next()) + remote_broker.delete_db(next(ts)) # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) def test_sync_local_half_delete_policy_over_newer_remote_create(self): # no_row & remote_row cases are covered by @@ -580,35 +580,35 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): for setup in self._replication_scenarios('local_row', 'both_rows'): ts, policy, remote_policy, broker, remote_broker = setup # create older "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # half delete older "local" broker - broker.delete_db(ts.next()) + broker.delete_db(next(ts)) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) def test_sync_local_recreate_policy_over_newer_remote_create(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # older recreate "local" broker - broker.delete_db(ts.next()) - recreate_timestamp = ts.next() + broker.delete_db(next(ts)) + recreate_timestamp = next(ts) broker.update_put_timestamp(recreate_timestamp) broker.update_status_changed_at(recreate_timestamp) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) def test_sync_local_recreate_policy_over_older_remote_create(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create older "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # recreate "local" broker - broker.delete_db(ts.next()) - recreate_timestamp = ts.next() + broker.delete_db(next(ts)) + recreate_timestamp = next(ts) broker.update_put_timestamp(recreate_timestamp) broker.update_status_changed_at(recreate_timestamp) @@ -616,29 +616,29 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # recreate "local" broker - broker.delete_db(ts.next()) - recreate_timestamp = ts.next() + broker.delete_db(next(ts)) + recreate_timestamp = next(ts) broker.update_put_timestamp(recreate_timestamp) broker.update_status_changed_at(recreate_timestamp) # older delete "remote" broker - remote_broker.delete_db(ts.next()) + remote_broker.delete_db(next(ts)) def test_sync_local_recreate_policy_over_older_remote_delete(self): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # older delete "remote" broker - remote_broker.delete_db(ts.next()) + remote_broker.delete_db(next(ts)) # recreate "local" broker - broker.delete_db(ts.next()) - recreate_timestamp = ts.next() + broker.delete_db(next(ts)) + recreate_timestamp = next(ts) broker.update_put_timestamp(recreate_timestamp) broker.update_status_changed_at(recreate_timestamp) @@ -646,17 +646,17 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): for setup in self._replication_scenarios(): ts, policy, remote_policy, broker, remote_broker = setup # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # older recreate "remote" broker - remote_broker.delete_db(ts.next()) - remote_recreate_timestamp = ts.next() + remote_broker.delete_db(next(ts)) + remote_recreate_timestamp = next(ts) remote_broker.update_put_timestamp(remote_recreate_timestamp) remote_broker.update_status_changed_at(remote_recreate_timestamp) # recreate "local" broker - broker.delete_db(ts.next()) - local_recreate_timestamp = ts.next() + broker.delete_db(next(ts)) + local_recreate_timestamp = next(ts) broker.update_put_timestamp(local_recreate_timestamp) broker.update_status_changed_at(local_recreate_timestamp) @@ -664,19 +664,19 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) def test_sync_remote_create_policy_over_newer_local_delete(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # delete "local" broker - broker.delete_db(ts.next()) + broker.delete_db(next(ts)) def test_sync_remote_create_policy_over_older_local_delete(self): # local_row & both_rows cases are covered by @@ -685,11 +685,11 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): 'no_row', 'remote_row', remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # delete older "local" broker - broker.delete_db(ts.next()) + broker.delete_db(next(ts)) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) def test_sync_remote_half_delete_policy_over_newer_local_create(self): # no_row & both_rows cases are covered by @@ -698,35 +698,35 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # half delete older "remote" broker - remote_broker.delete_db(ts.next()) + remote_broker.delete_db(next(ts)) # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) def test_sync_remote_recreate_policy_over_newer_local_create(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # older recreate "remote" broker - remote_broker.delete_db(ts.next()) - recreate_timestamp = ts.next() + remote_broker.delete_db(next(ts)) + recreate_timestamp = next(ts) remote_broker.update_put_timestamp(recreate_timestamp) remote_broker.update_status_changed_at(recreate_timestamp) # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) def test_sync_remote_recreate_policy_over_older_local_create(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # recreate "remote" broker - remote_broker.delete_db(ts.next()) - recreate_timestamp = ts.next() + remote_broker.delete_db(next(ts)) + recreate_timestamp = next(ts) remote_broker.update_put_timestamp(recreate_timestamp) remote_broker.update_status_changed_at(recreate_timestamp) @@ -734,29 +734,29 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # recreate "remote" broker - remote_broker.delete_db(ts.next()) - remote_recreate_timestamp = ts.next() + remote_broker.delete_db(next(ts)) + remote_recreate_timestamp = next(ts) remote_broker.update_put_timestamp(remote_recreate_timestamp) remote_broker.update_status_changed_at(remote_recreate_timestamp) # older delete "local" broker - broker.delete_db(ts.next()) + broker.delete_db(next(ts)) def test_sync_remote_recreate_policy_over_older_local_delete(self): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # older delete "local" broker - broker.delete_db(ts.next()) + broker.delete_db(next(ts)) # recreate "remote" broker - remote_broker.delete_db(ts.next()) - remote_recreate_timestamp = ts.next() + remote_broker.delete_db(next(ts)) + remote_recreate_timestamp = next(ts) remote_broker.update_put_timestamp(remote_recreate_timestamp) remote_broker.update_status_changed_at(remote_recreate_timestamp) @@ -764,17 +764,17 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): for setup in self._replication_scenarios(remote_wins=True): ts, policy, remote_policy, broker, remote_broker = setup # create older "local" broker - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # create "remote" broker - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # older recreate "local" broker - broker.delete_db(ts.next()) - local_recreate_timestamp = ts.next() + broker.delete_db(next(ts)) + local_recreate_timestamp = next(ts) broker.update_put_timestamp(local_recreate_timestamp) broker.update_status_changed_at(local_recreate_timestamp) # recreate "remote" broker - remote_broker.delete_db(ts.next()) - remote_recreate_timestamp = ts.next() + remote_broker.delete_db(next(ts)) + remote_recreate_timestamp = next(ts) remote_broker.update_put_timestamp(remote_recreate_timestamp) remote_broker.update_status_changed_at(remote_recreate_timestamp) @@ -784,16 +784,16 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): # create "local" broker policy = random.choice(list(POLICIES)) broker = self._get_broker('a', 'c', node_index=0) - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) # create "remote" broker remote_policy = random.choice([p for p in POLICIES if p is not policy]) remote_broker = self._get_broker('a', 'c', node_index=1) - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # add misplaced row to remote_broker remote_broker.put_object( - '/a/c/o', ts.next(), 0, 'content-type', + '/a/c/o', next(ts), 0, 'content-type', 'etag', storage_policy_index=remote_broker.storage_policy_index) # since this row matches policy index or remote, it shows up in count self.assertEqual(remote_broker.get_info()['object_count'], 1) @@ -831,14 +831,14 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): itertools.count(int(time.time()))) policy = random.choice(list(POLICIES)) broker = self._get_broker('a', 'c', node_index=0) - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) remote_policy = random.choice([p for p in POLICIES if p is not policy]) remote_broker = self._get_broker('a', 'c', node_index=1) - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # add a misplaced row to *local* broker - obj_put_timestamp = ts.next() + obj_put_timestamp = next(ts) broker.put_object( 'o', obj_put_timestamp, 0, 'content-type', 'etag', storage_policy_index=remote_policy.idx) @@ -891,16 +891,16 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): itertools.count(int(time.time()))) policy = random.choice(list(POLICIES)) broker = self._get_broker('a', 'c', node_index=0) - broker.initialize(ts.next(), policy.idx) + broker.initialize(next(ts), policy.idx) remote_policy = random.choice([p for p in POLICIES if p is not policy]) remote_broker = self._get_broker('a', 'c', node_index=1) - remote_broker.initialize(ts.next(), remote_policy.idx) + remote_broker.initialize(next(ts), remote_policy.idx) # add some rows to brokers for db in (broker, remote_broker): for p in (policy, remote_policy): - db.put_object('o-%s' % p.name, ts.next(), 0, 'content-type', + db.put_object('o-%s' % p.name, next(ts), 0, 'content-type', 'etag', storage_policy_index=p.idx) db._commit_puts() @@ -980,8 +980,8 @@ class TestReplicatorSync(test_db_replicator.TestReplicatorSync): ts = (Timestamp(t).internal for t in itertools.count(int(time.time()))) broker = self._get_broker('a', 'c', node_index=0) - broker.initialize(ts.next(), 0) - broker.put_object('foo', ts.next(), 0, 'text/plain', 'xyz', deleted=0, + broker.initialize(next(ts), 0) + broker.put_object('foo', next(ts), 0, 'text/plain', 'xyz', deleted=0, storage_policy_index=0) info = broker.get_replication_info() self.assertEqual(1, info['max_row']) diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index 166c542883..e76e9b3625 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -175,7 +175,7 @@ class TestContainerController(unittest.TestCase): start = int(time.time()) ts = (Timestamp(t).internal for t in itertools.count(start)) req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'x-timestamp': ts.next()}) + 'x-timestamp': next(ts)}) req.get_response(self.controller) req = Request.blank('/sda1/p/a/c', method='HEAD') response = req.get_response(self.controller) @@ -184,7 +184,7 @@ class TestContainerController(unittest.TestCase): self.assertEqual(response.headers['x-container-object-count'], '0') obj_put_request = Request.blank( '/sda1/p/a/c/o', method='PUT', headers={ - 'x-timestamp': ts.next(), + 'x-timestamp': next(ts), 'x-size': 42, 'x-content-type': 'text/plain', 'x-etag': 'x', @@ -240,8 +240,8 @@ class TestContainerController(unittest.TestCase): ts = (Timestamp(t).internal for t in itertools.count(int(time.time()))) request_method_times = { - 'PUT': ts.next(), - 'DELETE': ts.next(), + 'PUT': next(ts), + 'DELETE': next(ts), } # setup a deleted container for method in ('PUT', 'DELETE'): @@ -425,7 +425,7 @@ class TestContainerController(unittest.TestCase): policy = random.choice(list(POLICIES)) # Set metadata header req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': policy.idx}) resp = req.get_response(self.controller) self.assertEquals(resp.status_int, 201) @@ -439,7 +439,7 @@ class TestContainerController(unittest.TestCase): # now try to update w/o changing the policy for method in ('POST', 'PUT'): req = Request.blank('/sda1/p/a/c', method=method, headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': policy.idx }) resp = req.get_response(self.controller) @@ -456,7 +456,7 @@ class TestContainerController(unittest.TestCase): policy = random.choice(list(POLICIES)) # Set metadata header req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': policy.idx}) resp = req.get_response(self.controller) self.assertEquals(resp.status_int, 201) @@ -471,7 +471,7 @@ class TestContainerController(unittest.TestCase): for other_policy in other_policies: # now try to change it and make sure we get a conflict req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': other_policy.idx }) resp = req.get_response(self.controller) @@ -492,7 +492,7 @@ class TestContainerController(unittest.TestCase): ts = (Timestamp(t).internal for t in itertools.count(time.time())) policy = random.choice(list(POLICIES)) req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': policy.idx}) resp = req.get_response(self.controller) self.assertEquals(resp.status_int, 201) @@ -507,7 +507,7 @@ class TestContainerController(unittest.TestCase): for other_policy in other_policies: # now try to change it and make sure we get a conflict req = Request.blank('/sda1/p/a/c', method='POST', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': other_policy.idx }) resp = req.get_response(self.controller) @@ -528,7 +528,7 @@ class TestContainerController(unittest.TestCase): itertools.count(int(time.time()))) # create a container with the default storage policy req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), }) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) # sanity check @@ -542,7 +542,7 @@ class TestContainerController(unittest.TestCase): # put again without specifying the storage policy req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), }) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 202) # sanity check @@ -563,7 +563,7 @@ class TestContainerController(unittest.TestCase): itertools.count(int(time.time()))) # create a container with the default storage policy req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Default': int(proxy_default), }) resp = req.get_response(self.controller) @@ -578,7 +578,7 @@ class TestContainerController(unittest.TestCase): # put again without proxy specifying the different default req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Default': int(POLICIES.default), }) resp = req.get_response(self.controller) @@ -596,7 +596,7 @@ class TestContainerController(unittest.TestCase): non_default_policy = [p for p in POLICIES if not p.is_default][0] # create a container with the non-default storage policy req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': non_default_policy.idx, }) resp = req.get_response(self.controller) @@ -611,7 +611,7 @@ class TestContainerController(unittest.TestCase): # put again without specifying the storage policy req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), }) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 202) # sanity check @@ -1279,7 +1279,7 @@ class TestContainerController(unittest.TestCase): policy = random.choice(list(POLICIES)) req = Request.blank( '/sda1/p/a/c', method='PUT', - headers={'X-Timestamp': ts.next(), + headers={'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': policy.idx}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) # sanity check @@ -1289,14 +1289,14 @@ class TestContainerController(unittest.TestCase): for other_policy in other_policies: # first delete the existing container req = Request.blank('/sda1/p/a/c', method='DELETE', headers={ - 'X-Timestamp': ts.next()}) + 'X-Timestamp': next(ts)}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 204) # sanity check # at this point, the DB should still exist but be in a deleted # state, so changing the policy index is perfectly acceptable req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': other_policy.idx}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) # sanity check @@ -1313,7 +1313,7 @@ class TestContainerController(unittest.TestCase): non_default_policy = random.choice([p for p in POLICIES if not p.is_default]) req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'X-Backend-Storage-Policy-Index': non_default_policy.idx, }) resp = req.get_response(self.controller) @@ -1321,7 +1321,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank( '/sda1/p/a/c', method='DELETE', - headers={'X-Timestamp': ts.next()}) + headers={'X-Timestamp': next(ts)}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 204) # sanity check @@ -1329,7 +1329,7 @@ class TestContainerController(unittest.TestCase): # so changing the policy index is perfectly acceptable req = Request.blank( '/sda1/p/a/c', method='PUT', - headers={'X-Timestamp': ts.next()}) + headers={'X-Timestamp': next(ts)}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) # sanity check @@ -1354,20 +1354,20 @@ class TestContainerController(unittest.TestCase): ts = (Timestamp(t).internal for t in itertools.count(3)) req = Request.blank('/sda1/p/a/c', method='DELETE', headers={ - 'X-Timestamp': ts.next()}) + 'X-Timestamp': next(ts)}) resp = req.get_response(self.controller) self.assertEquals(resp.status_int, 409) req = Request.blank('/sda1/p/a/c/o', method='DELETE', headers={ - 'X-Timestamp': ts.next()}) + 'X-Timestamp': next(ts)}) self._update_object_put_headers(req) resp = req.get_response(self.controller) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', method='DELETE', headers={ - 'X-Timestamp': ts.next()}) + 'X-Timestamp': next(ts)}) resp = req.get_response(self.controller) self.assertEquals(resp.status_int, 204) req = Request.blank('/sda1/p/a/c', method='GET', headers={ - 'X-Timestamp': ts.next()}) + 'X-Timestamp': next(ts)}) resp = req.get_response(self.controller) self.assertEquals(resp.status_int, 404) @@ -1376,7 +1376,7 @@ class TestContainerController(unittest.TestCase): itertools.count(int(time.time()))) # create container req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'X-Timestamp': ts.next()}) + 'X-Timestamp': next(ts)}) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 201) # check status @@ -1386,7 +1386,7 @@ class TestContainerController(unittest.TestCase): self.assertEqual(int(resp.headers['X-Backend-Storage-Policy-Index']), int(POLICIES.default)) # create object - obj_timestamp = ts.next() + obj_timestamp = next(ts) req = Request.blank( '/sda1/p/a/c/o', method='PUT', headers={ 'X-Timestamp': obj_timestamp, 'X-Size': 1, @@ -1432,7 +1432,7 @@ class TestContainerController(unittest.TestCase): self.assertEqual(obj['hash'], 'y') self.assertEqual(obj['content_type'], 'text/html') # now overwrite with a newer time - delete_timestamp = ts.next() + delete_timestamp = next(ts) req = Request.blank( '/sda1/p/a/c/o', method='DELETE', headers={ 'X-Timestamp': delete_timestamp}) diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py index 1a1c4443cb..a0844ebb8f 100644 --- a/test/unit/obj/test_replicator.py +++ b/test/unit/obj/test_replicator.py @@ -81,10 +81,10 @@ class MockProcess(object): class Stream(object): def read(self): - return MockProcess.ret_log.next() + return next(MockProcess.ret_log) def __init__(self, *args, **kwargs): - targs = MockProcess.check_args.next() + targs = next(MockProcess.check_args) for targ in targs: # Allow more than 2 candidate targs # (e.g. a case that either node is fine when nodes shuffled) @@ -103,7 +103,7 @@ class MockProcess(object): self.stdout = self.Stream() def wait(self): - return self.ret_code.next() + return next(self.ret_code) @contextmanager diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index fe9ac5794f..9b29dcfff4 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -2342,7 +2342,7 @@ class TestObjectController(unittest.TestCase): def capture_updates(ip, port, method, path, headers, *args, **kwargs): container_updates.append((ip, port, method, path, headers)) # create a new object - create_timestamp = ts.next() + create_timestamp = next(ts) req = Request.blank('/sda1/p/a/c/o', method='PUT', body='test1', headers={'X-Timestamp': create_timestamp, 'X-Container-Host': '10.0.0.1:8080', @@ -2419,7 +2419,7 @@ class TestObjectController(unittest.TestCase): offset_timestamp) self.assertEqual(resp.body, 'test2') # now overwrite with a newer time - overwrite_timestamp = ts.next() + overwrite_timestamp = next(ts) req = Request.blank('/sda1/p/a/c/o', method='PUT', body='test3', headers={'X-Timestamp': overwrite_timestamp, 'X-Container-Host': '10.0.0.1:8080', @@ -2489,7 +2489,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.headers['X-Timestamp'], None) self.assertEqual(resp.headers['X-Backend-Timestamp'], offset_delete) # and one more delete with a newer timestamp - delete_timestamp = ts.next() + delete_timestamp = next(ts) req = Request.blank('/sda1/p/a/c/o', method='DELETE', headers={'X-Timestamp': delete_timestamp, 'X-Container-Host': '10.0.0.1:8080', @@ -3131,9 +3131,9 @@ class TestObjectController(unittest.TestCase): def capture_updates(ip, port, method, path, headers, *args, **kwargs): container_updates.append((ip, port, method, path, headers)) - put_timestamp = ts.next().internal + put_timestamp = next(ts).internal delete_at_timestamp = utils.normalize_delete_at_timestamp( - ts.next().normal) + next(ts).normal) delete_at_container = ( int(delete_at_timestamp) / self.object_controller.expiring_objects_container_divisor * @@ -4831,7 +4831,7 @@ class TestObjectController(unittest.TestCase): self.assertFalse(os.path.isdir(object_dir)) for method in methods: headers = { - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'Content-Type': 'application/x-test', 'X-Backend-Storage-Policy-Index': index} if POLICIES[index].policy_type == EC_POLICY: @@ -4851,7 +4851,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': method}, headers={ - 'X-Timestamp': ts.next(), + 'X-Timestamp': next(ts), 'Content-Type': 'application/x-test', 'X-Backend-Storage-Policy-Index': index}) req.body = 'VERIFY' diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py index c48a239351..32a6dae02a 100644 --- a/test/unit/obj/test_ssync_sender.py +++ b/test/unit/obj/test_ssync_sender.py @@ -1440,7 +1440,7 @@ class TestBaseSsync(BaseTestSender): (('tx', ':UPDATES: END'), unexpected), (('rx', ':UPDATES: START'), rx_updates), (('rx', ':UPDATES: END'), unexpected)]) - expect_handshake = handshakes.next() + expect_handshake = next(handshakes) phases = ('tx_missing', 'rx_missing', 'tx_updates', 'rx_updates') results = dict((k, []) for k in phases) handler = unexpected @@ -1451,7 +1451,7 @@ class TestBaseSsync(BaseTestSender): if line == expect_handshake[0]: handler = expect_handshake[1] try: - expect_handshake = handshakes.next() + expect_handshake = next(handshakes) except StopIteration: # should be the last line self.assertFalse( @@ -1461,7 +1461,7 @@ class TestBaseSsync(BaseTestSender): try: # check all handshakes occurred - missed = handshakes.next() + missed = next(handshakes) self.fail('Handshake %s not found' % str(missed[0])) except StopIteration: pass @@ -1536,25 +1536,25 @@ class TestSsyncEC(TestBaseSsync): tx_df_mgr = self.daemon._diskfile_router[policy] rx_df_mgr = self.rx_controller._diskfile_router[policy] # o1 has primary and handoff fragment archives - t1 = self.ts_iter.next() + t1 = next(self.ts_iter) tx_objs['o1'] = self._create_ondisk_files( tx_df_mgr, 'o1', policy, t1, (rx_node_index, tx_node_index)) # o2 only has primary - t2 = self.ts_iter.next() + t2 = next(self.ts_iter) tx_objs['o2'] = self._create_ondisk_files( tx_df_mgr, 'o2', policy, t2, (tx_node_index,)) # o3 only has handoff - t3 = self.ts_iter.next() + t3 = next(self.ts_iter) tx_objs['o3'] = self._create_ondisk_files( tx_df_mgr, 'o3', policy, t3, (rx_node_index,)) # o4 primary and handoff fragment archives on tx, handoff in sync on rx - t4 = self.ts_iter.next() + t4 = next(self.ts_iter) tx_objs['o4'] = self._create_ondisk_files( tx_df_mgr, 'o4', policy, t4, (tx_node_index, rx_node_index,)) rx_objs['o4'] = self._create_ondisk_files( rx_df_mgr, 'o4', policy, t4, (rx_node_index,)) # o5 is a tombstone, missing on receiver - t5 = self.ts_iter.next() + t5 = next(self.ts_iter) tx_tombstones['o5'] = self._create_ondisk_files( tx_df_mgr, 'o5', policy, t5, (tx_node_index,)) tx_tombstones['o5'][0].delete(t5) @@ -1621,25 +1621,25 @@ class TestSsyncEC(TestBaseSsync): tx_df_mgr = self.daemon._diskfile_router[policy] rx_df_mgr = self.rx_controller._diskfile_router[policy] # o1 only has primary - t1 = self.ts_iter.next() + t1 = next(self.ts_iter) tx_objs['o1'] = self._create_ondisk_files( tx_df_mgr, 'o1', policy, t1, (tx_node_index,)) # o2 only has primary - t2 = self.ts_iter.next() + t2 = next(self.ts_iter) tx_objs['o2'] = self._create_ondisk_files( tx_df_mgr, 'o2', policy, t2, (tx_node_index,)) # o3 only has primary - t3 = self.ts_iter.next() + t3 = next(self.ts_iter) tx_objs['o3'] = self._create_ondisk_files( tx_df_mgr, 'o3', policy, t3, (tx_node_index,)) # o4 primary fragment archives on tx, handoff in sync on rx - t4 = self.ts_iter.next() + t4 = next(self.ts_iter) tx_objs['o4'] = self._create_ondisk_files( tx_df_mgr, 'o4', policy, t4, (tx_node_index,)) rx_objs['o4'] = self._create_ondisk_files( rx_df_mgr, 'o4', policy, t4, (rx_node_index,)) # o5 is a tombstone, missing on receiver - t5 = self.ts_iter.next() + t5 = next(self.ts_iter) tx_tombstones['o5'] = self._create_ondisk_files( tx_df_mgr, 'o5', policy, t5, (tx_node_index,)) tx_tombstones['o5'][0].delete(t5) @@ -1729,26 +1729,26 @@ class TestSsyncReplication(TestBaseSsync): tx_df_mgr = self.daemon._diskfile_router[policy] rx_df_mgr = self.rx_controller._diskfile_router[policy] # o1 and o2 are on tx only - t1 = self.ts_iter.next() + t1 = next(self.ts_iter) tx_objs['o1'] = self._create_ondisk_files(tx_df_mgr, 'o1', policy, t1) - t2 = self.ts_iter.next() + t2 = next(self.ts_iter) tx_objs['o2'] = self._create_ondisk_files(tx_df_mgr, 'o2', policy, t2) # o3 is on tx and older copy on rx - t3a = self.ts_iter.next() + t3a = next(self.ts_iter) rx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3a) - t3b = self.ts_iter.next() + t3b = next(self.ts_iter) tx_objs['o3'] = self._create_ondisk_files(tx_df_mgr, 'o3', policy, t3b) # o4 in sync on rx and tx - t4 = self.ts_iter.next() + t4 = next(self.ts_iter) tx_objs['o4'] = self._create_ondisk_files(tx_df_mgr, 'o4', policy, t4) rx_objs['o4'] = self._create_ondisk_files(rx_df_mgr, 'o4', policy, t4) # o5 is a tombstone, missing on receiver - t5 = self.ts_iter.next() + t5 = next(self.ts_iter) tx_tombstones['o5'] = self._create_ondisk_files( tx_df_mgr, 'o5', policy, t5) tx_tombstones['o5'][0].delete(t5) # o6 is a tombstone, in sync on tx and rx - t6 = self.ts_iter.next() + t6 = next(self.ts_iter) tx_tombstones['o6'] = self._create_ondisk_files( tx_df_mgr, 'o6', policy, t6) tx_tombstones['o6'][0].delete(t6) @@ -1756,9 +1756,9 @@ class TestSsyncReplication(TestBaseSsync): rx_df_mgr, 'o6', policy, t6) rx_tombstones['o6'][0].delete(t6) # o7 is a tombstone on tx, older data on rx - t7a = self.ts_iter.next() + t7a = next(self.ts_iter) rx_objs['o7'] = self._create_ondisk_files(rx_df_mgr, 'o7', policy, t7a) - t7b = self.ts_iter.next() + t7b = next(self.ts_iter) tx_tombstones['o7'] = self._create_ondisk_files( tx_df_mgr, 'o7', policy, t7b) tx_tombstones['o7'][0].delete(t7b) diff --git a/test/unit/obj/test_updater.py b/test/unit/obj/test_updater.py index 2ca3965453..5f4a407b40 100644 --- a/test/unit/obj/test_updater.py +++ b/test/unit/obj/test_updater.py @@ -336,7 +336,7 @@ class TestObjectUpdater(unittest.TestCase): with Timeout(3): sock, addr = bindsock.accept() events.append( - spawn(accepter, sock, codes.next())) + spawn(accepter, sock, next(codes))) for event in events: err = event.wait() if err: @@ -407,12 +407,12 @@ class TestObjectUpdater(unittest.TestCase): 'x-size': 0, 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e', - 'x-timestamp': ts.next(), + 'x-timestamp': next(ts), }) data = {'op': op, 'account': account, 'container': container, 'obj': obj, 'headers': headers_out} dfmanager.pickle_async_update(self.sda1, account, container, obj, - data, ts.next(), policy) + data, next(ts), policy) request_log = [] @@ -455,13 +455,13 @@ class TestObjectUpdater(unittest.TestCase): 'x-size': 0, 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e', - 'x-timestamp': ts.next(), + 'x-timestamp': next(ts), 'X-Backend-Storage-Policy-Index': int(policy), }) data = {'op': op, 'account': account, 'container': container, 'obj': obj, 'headers': headers_out} dfmanager.pickle_async_update(self.sda1, account, container, obj, - data, ts.next(), policy) + data, next(ts), policy) request_log = [] diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index d3fafd8b91..30c213e0b4 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -95,7 +95,7 @@ class DynamicResponseFactory(object): def _get_response(self, type_): self.stats[type_] += 1 class_ = self.response_type[type_] - return class_(self.statuses.next()) + return class_(next(self.statuses)) def get_response(self, environ): (version, account, container, obj) = split_path( diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index d16f66c34b..751c388272 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -127,7 +127,7 @@ class BaseObjectControllerMixin(object): itertools.count(int(time.time()))) def ts(self): - return self._ts_iter.next() + return next(self._ts_iter) def replicas(self, policy=None): policy = policy or POLICIES.default @@ -464,9 +464,9 @@ class BaseObjectControllerMixin(object): for policy_index in test_indexes: req = swob.Request.blank( '/v1/a/c/o', method='DELETE', headers={ - 'X-Timestamp': ts.next().internal}) + 'X-Timestamp': next(ts).internal}) codes = [409] * self.obj_ring.replicas - ts_iter = itertools.repeat(ts.next().internal) + ts_iter = itertools.repeat(next(ts).internal) with set_http_connect(*codes, timestamps=ts_iter): resp = req.get_response(self.app) self.assertEqual(resp.status_int, 409) @@ -736,8 +736,8 @@ class TestReplicatedObjController(BaseObjectControllerMixin, req = swob.Request.blank( '/v1/a/c/o', method='PUT', headers={ 'Content-Length': 0, - 'X-Timestamp': ts.next().internal}) - ts_iter = itertools.repeat(ts.next().internal) + 'X-Timestamp': next(ts).internal}) + ts_iter = itertools.repeat(next(ts).internal) codes = [409] * self.obj_ring.replicas with set_http_connect(*codes, timestamps=ts_iter): resp = req.get_response(self.app) @@ -747,11 +747,11 @@ class TestReplicatedObjController(BaseObjectControllerMixin, ts = (utils.Timestamp(t) for t in itertools.count(int(time.time()))) test_indexes = [None] + [int(p) for p in POLICIES] for policy_index in test_indexes: - orig_timestamp = ts.next().internal + orig_timestamp = next(ts).internal req = swob.Request.blank( '/v1/a/c/o', method='PUT', headers={ 'Content-Length': 0, - 'X-Timestamp': ts.next().internal}) + 'X-Timestamp': next(ts).internal}) ts_iter = itertools.repeat(orig_timestamp) codes = [201] * self.obj_ring.replicas with set_http_connect(*codes, timestamps=ts_iter): @@ -763,8 +763,8 @@ class TestReplicatedObjController(BaseObjectControllerMixin, req = swob.Request.blank( '/v1/a/c/o', method='PUT', headers={ 'Content-Length': 0, - 'X-Timestamp': ts.next().internal}) - ts_iter = iter([ts.next().internal, None, None]) + 'X-Timestamp': next(ts).internal}) + ts_iter = iter([next(ts).internal, None, None]) codes = [409] + [201] * (self.obj_ring.replicas - 1) with set_http_connect(*codes, timestamps=ts_iter): resp = req.get_response(self.app) @@ -774,7 +774,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, ts = (utils.Timestamp(t) for t in itertools.count(int(time.time()))) test_indexes = [None] + [int(p) for p in POLICIES] for policy_index in test_indexes: - put_timestamp = ts.next().internal + put_timestamp = next(ts).internal req = swob.Request.blank( '/v1/a/c/o', method='PUT', headers={ 'Content-Length': 0, @@ -794,7 +794,7 @@ class TestReplicatedObjController(BaseObjectControllerMixin, ts = (utils.Timestamp(t) for t in itertools.count(int(time.time()))) test_indexes = [None] + [int(p) for p in POLICIES] for policy_index in test_indexes: - put_timestamp = ts.next().internal + put_timestamp = next(ts).internal req = swob.Request.blank( '/v1/a/c/o', method='PUT', headers={ 'Content-Length': 0, diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 5f85c55e4a..46370590eb 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -2879,7 +2879,7 @@ class TestObjectController(unittest.TestCase): set_http_connect(201, 201, 201, 201, 201, give_content_type=lambda content_type: self.assertEquals(content_type, - expected.next())) + next(expected))) # We need into include a transfer-encoding to get past # constraints.check_object_creation() req = Request.blank('/v1/a/c/%s' % filename, {}, @@ -4283,7 +4283,7 @@ class TestObjectController(unittest.TestCase): unused_status_list = [] while True: try: - unused_status_list.append(new_connect.code_iter.next()) + unused_status_list.append(next(new_connect.code_iter)) except StopIteration: break if unused_status_list: diff --git a/test/unit/proxy/test_sysmeta.py b/test/unit/proxy/test_sysmeta.py index 6b5727a461..cc86960987 100644 --- a/test/unit/proxy/test_sysmeta.py +++ b/test/unit/proxy/test_sysmeta.py @@ -39,7 +39,7 @@ class FakeServerConnection(WSGIContext): def read(self, amt=None): try: - result = self.resp_iter.next() + result = next(self.resp_iter) return result except StopIteration: return '' From af72881d1dcc46486d8d652b92a0ec9aa9ca2bfa Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Mon, 15 Jun 2015 13:36:36 -0700 Subject: [PATCH 63/98] Use just IP, not port, when determining partition placement In the ring builder, we place partitions with maximum possible dispersion across tiers, where a "tier" is region, then zone, then IP/port,then device. Now, instead of IP/port, just use IP. The port wasn't really getting us anything; two different object servers on two different ports on one machine aren't separate failure domains. However, if someone has only a few machines and is using one object server on its own port per disk, then the ring builder would end up with every disk in its own IP/port tier, resulting in bad (with respect to durability) partition placement. For example: assume 1 region, 1 zone, 4 machines, 48 total disks (12 per machine), and one object server (and hence one port) per disk. With the old behavior, partition replicas will all go in the one region, then the one zone, then pick one of 48 IP/port pairs, then pick the one disk therein. This gives the same result as randomly picking 3 disks (without replacement) to store data on; it completely ignores machine boundaries. With the new behavior, the replica placer will pick the one region, then the one zone, then one of 4 IPs, then one of 12 disks therein. This gives the optimal placement with respect to durability. The same applies to Ring.get_more_nodes(). Co-Authored-By: Kota Tsuyuzaki Change-Id: Ibbd740c51296b7e360845b5309d276d7383a3742 --- swift/common/ring/ring.py | 34 ++++----- swift/common/ring/utils.py | 104 +++++++++++++------------- test/unit/common/ring/test_builder.py | 68 +++++++++++++---- test/unit/common/ring/test_ring.py | 3 +- test/unit/common/ring/test_utils.py | 72 ++++++++---------- 5 files changed, 157 insertions(+), 124 deletions(-) diff --git a/swift/common/ring/ring.py b/swift/common/ring/ring.py index 62e19951d3..d4feaa8e23 100644 --- a/swift/common/ring/ring.py +++ b/swift/common/ring/ring.py @@ -179,18 +179,17 @@ class Ring(object): # doing it on every call to get_more_nodes(). regions = set() zones = set() - ip_ports = set() + ips = set() self._num_devs = 0 for dev in self._devs: if dev: regions.add(dev['region']) zones.add((dev['region'], dev['zone'])) - ip_ports.add((dev['region'], dev['zone'], - dev['ip'], dev['port'])) + ips.add((dev['region'], dev['zone'], dev['ip'])) self._num_devs += 1 self._num_regions = len(regions) self._num_zones = len(zones) - self._num_ip_ports = len(ip_ports) + self._num_ips = len(ips) def _rebuild_tier_data(self): self.tier2devs = defaultdict(list) @@ -329,8 +328,8 @@ class Ring(object): used = set(d['id'] for d in primary_nodes) same_regions = set(d['region'] for d in primary_nodes) same_zones = set((d['region'], d['zone']) for d in primary_nodes) - same_ip_ports = set((d['region'], d['zone'], d['ip'], d['port']) - for d in primary_nodes) + same_ips = set( + (d['region'], d['zone'], d['ip']) for d in primary_nodes) parts = len(self._replica2part2dev_id[0]) start = struct.unpack_from( @@ -356,9 +355,9 @@ class Ring(object): used.add(dev_id) same_regions.add(region) zone = dev['zone'] - ip_port = (region, zone, dev['ip'], dev['port']) + ip = (region, zone, dev['ip']) same_zones.add((region, zone)) - same_ip_ports.add(ip_port) + same_ips.add(ip) if len(same_regions) == self._num_regions: hit_all_regions = True break @@ -380,17 +379,17 @@ class Ring(object): yield dev used.add(dev_id) same_zones.add(zone) - ip_port = zone + (dev['ip'], dev['port']) - same_ip_ports.add(ip_port) + ip = zone + (dev['ip'],) + same_ips.add(ip) if len(same_zones) == self._num_zones: hit_all_zones = True break - hit_all_ip_ports = len(same_ip_ports) == self._num_ip_ports + hit_all_ips = len(same_ips) == self._num_ips for handoff_part in chain(xrange(start, parts, inc), xrange(inc - ((parts - start) % inc), start, inc)): - if hit_all_ip_ports: + if hit_all_ips: # We've exhausted the pool of unused backends, so stop # looking. break @@ -398,14 +397,13 @@ class Ring(object): if handoff_part < len(part2dev_id): dev_id = part2dev_id[handoff_part] dev = self._devs[dev_id] - ip_port = (dev['region'], dev['zone'], - dev['ip'], dev['port']) - if dev_id not in used and ip_port not in same_ip_ports: + ip = (dev['region'], dev['zone'], dev['ip']) + if dev_id not in used and ip not in same_ips: yield dev used.add(dev_id) - same_ip_ports.add(ip_port) - if len(same_ip_ports) == self._num_ip_ports: - hit_all_ip_ports = True + same_ips.add(ip) + if len(same_ips) == self._num_ips: + hit_all_ips = True break hit_all_devs = len(used) == self._num_devs diff --git a/swift/common/ring/utils.py b/swift/common/ring/utils.py index b00ef825d4..4fcee2eb24 100644 --- a/swift/common/ring/utils.py +++ b/swift/common/ring/utils.py @@ -29,7 +29,7 @@ def tiers_for_dev(dev): """ t1 = dev['region'] t2 = dev['zone'] - t3 = "{ip}:{port}".format(ip=dev.get('ip'), port=dev.get('port')) + t3 = dev['ip'] t4 = dev['id'] return ((t1,), @@ -48,40 +48,40 @@ def build_tier_tree(devices): Example: - region 1 -+---- zone 1 -+---- 192.168.101.1:6000 -+---- device id 0 - | | | - | | +---- device id 1 - | | | - | | +---- device id 2 + region 1 -+---- zone 1 -+---- 192.168.101.1 -+---- device id 0 + | | | + | | +---- device id 1 + | | | + | | +---- device id 2 | | - | +---- 192.168.101.2:6000 -+---- device id 3 - | | - | +---- device id 4 - | | - | +---- device id 5 + | +---- 192.168.101.2 -+---- device id 3 + | | + | +---- device id 4 + | | + | +---- device id 5 | - +---- zone 2 -+---- 192.168.102.1:6000 -+---- device id 6 - | | - | +---- device id 7 - | | - | +---- device id 8 + +---- zone 2 -+---- 192.168.102.1 -+---- device id 6 + | | + | +---- device id 7 + | | + | +---- device id 8 | - +---- 192.168.102.2:6000 -+---- device id 9 - | - +---- device id 10 + +---- 192.168.102.2 -+---- device id 9 + | + +---- device id 10 - region 2 -+---- zone 1 -+---- 192.168.201.1:6000 -+---- device id 12 - | | - | +---- device id 13 - | | - | +---- device id 14 + region 2 -+---- zone 1 -+---- 192.168.201.1 -+---- device id 12 + | | + | +---- device id 13 + | | + | +---- device id 14 | - +---- 192.168.201.2:6000 -+---- device id 15 - | - +---- device id 16 - | - +---- device id 17 + +---- 192.168.201.2 -+---- device id 15 + | + +---- device id 16 + | + +---- device id 17 The tier tree would look like: { @@ -90,30 +90,30 @@ def build_tier_tree(devices): (1,): [(1, 1), (1, 2)], (2,): [(2, 1)], - (1, 1): [(1, 1, 192.168.101.1:6000), - (1, 1, 192.168.101.2:6000)], - (1, 2): [(1, 2, 192.168.102.1:6000), - (1, 2, 192.168.102.2:6000)], - (2, 1): [(2, 1, 192.168.201.1:6000), - (2, 1, 192.168.201.2:6000)], + (1, 1): [(1, 1, 192.168.101.1), + (1, 1, 192.168.101.2)], + (1, 2): [(1, 2, 192.168.102.1), + (1, 2, 192.168.102.2)], + (2, 1): [(2, 1, 192.168.201.1), + (2, 1, 192.168.201.2)], - (1, 1, 192.168.101.1:6000): [(1, 1, 192.168.101.1:6000, 0), - (1, 1, 192.168.101.1:6000, 1), - (1, 1, 192.168.101.1:6000, 2)], - (1, 1, 192.168.101.2:6000): [(1, 1, 192.168.101.2:6000, 3), - (1, 1, 192.168.101.2:6000, 4), - (1, 1, 192.168.101.2:6000, 5)], - (1, 2, 192.168.102.1:6000): [(1, 2, 192.168.102.1:6000, 6), - (1, 2, 192.168.102.1:6000, 7), - (1, 2, 192.168.102.1:6000, 8)], - (1, 2, 192.168.102.2:6000): [(1, 2, 192.168.102.2:6000, 9), - (1, 2, 192.168.102.2:6000, 10)], - (2, 1, 192.168.201.1:6000): [(2, 1, 192.168.201.1:6000, 12), - (2, 1, 192.168.201.1:6000, 13), - (2, 1, 192.168.201.1:6000, 14)], - (2, 1, 192.168.201.2:6000): [(2, 1, 192.168.201.2:6000, 15), - (2, 1, 192.168.201.2:6000, 16), - (2, 1, 192.168.201.2:6000, 17)], + (1, 1, 192.168.101.1): [(1, 1, 192.168.101.1, 0), + (1, 1, 192.168.101.1, 1), + (1, 1, 192.168.101.1, 2)], + (1, 1, 192.168.101.2): [(1, 1, 192.168.101.2, 3), + (1, 1, 192.168.101.2, 4), + (1, 1, 192.168.101.2, 5)], + (1, 2, 192.168.102.1): [(1, 2, 192.168.102.1, 6), + (1, 2, 192.168.102.1, 7), + (1, 2, 192.168.102.1, 8)], + (1, 2, 192.168.102.2): [(1, 2, 192.168.102.2, 9), + (1, 2, 192.168.102.2, 10)], + (2, 1, 192.168.201.1): [(2, 1, 192.168.201.1, 12), + (2, 1, 192.168.201.1, 13), + (2, 1, 192.168.201.1, 14)], + (2, 1, 192.168.201.2): [(2, 1, 192.168.201.2, 15), + (2, 1, 192.168.201.2, 16), + (2, 1, 192.168.201.2, 17)], } :devices: device dicts from which to generate the tree diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index a05823368c..f1840b8210 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -1178,6 +1178,46 @@ class TestRingBuilder(unittest.TestCase): 9: 64, }) + def test_server_per_port(self): + # 3 servers, 3 disks each, with each disk on its own port + rb = ring.RingBuilder(8, 3, 1) + rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '10.0.0.1', 'port': 10000, 'device': 'sdx'}) + rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '10.0.0.1', 'port': 10001, 'device': 'sdy'}) + + rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '10.0.0.2', 'port': 10000, 'device': 'sdx'}) + rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '10.0.0.2', 'port': 10001, 'device': 'sdy'}) + + rb.add_dev({'id': 6, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '10.0.0.3', 'port': 10000, 'device': 'sdx'}) + rb.add_dev({'id': 7, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '10.0.0.3', 'port': 10001, 'device': 'sdy'}) + + rb.rebalance(seed=1) + + rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '10.0.0.1', 'port': 10002, 'device': 'sdz'}) + rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '10.0.0.2', 'port': 10002, 'device': 'sdz'}) + rb.add_dev({'id': 8, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '10.0.0.3', 'port': 10002, 'device': 'sdz'}) + + rb.pretend_min_part_hours_passed() + rb.rebalance(seed=1) + + poorly_dispersed = [] + for part in range(rb.parts): + on_nodes = set() + for replica in range(rb.replicas): + dev_id = rb._replica2part2dev[replica][part] + on_nodes.add(rb.devs[dev_id]['ip']) + if len(on_nodes) < rb.replicas: + poorly_dispersed.append(part) + self.assertEqual(poorly_dispersed, []) + def test_load(self): rb = ring.RingBuilder(8, 3, 1) devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1, @@ -1503,9 +1543,9 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(rb._dispersion_graph, { (0,): [0, 0, 0, 256], (0, 0): [0, 0, 0, 256], - (0, 0, '127.0.0.1:10000'): [0, 0, 0, 256], - (0, 0, '127.0.0.1:10000', 0): [0, 128, 128, 0], - (0, 0, '127.0.0.1:10000', 1): [0, 128, 128, 0], + (0, 0, '127.0.0.1'): [0, 0, 0, 256], + (0, 0, '127.0.0.1', 0): [0, 128, 128, 0], + (0, 0, '127.0.0.1', 1): [0, 128, 128, 0], }) def test_dispersion_with_zero_weight_devices_with_parts(self): @@ -1522,10 +1562,10 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(rb._dispersion_graph, { (0,): [0, 0, 0, 256], (0, 0): [0, 0, 0, 256], - (0, 0, '127.0.0.1:10000'): [0, 0, 0, 256], - (0, 0, '127.0.0.1:10000', 0): [0, 256, 0, 0], - (0, 0, '127.0.0.1:10000', 1): [0, 256, 0, 0], - (0, 0, '127.0.0.1:10000', 2): [0, 256, 0, 0], + (0, 0, '127.0.0.1'): [0, 0, 0, 256], + (0, 0, '127.0.0.1', 0): [0, 256, 0, 0], + (0, 0, '127.0.0.1', 1): [0, 256, 0, 0], + (0, 0, '127.0.0.1', 2): [0, 256, 0, 0], }) # now mark a device 2 for decom rb.set_dev_weight(2, 0.0) @@ -1536,10 +1576,10 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(rb._dispersion_graph, { (0,): [0, 0, 0, 256], (0, 0): [0, 0, 0, 256], - (0, 0, '127.0.0.1:10000'): [0, 0, 0, 256], - (0, 0, '127.0.0.1:10000', 0): [0, 256, 0, 0], - (0, 0, '127.0.0.1:10000', 1): [0, 256, 0, 0], - (0, 0, '127.0.0.1:10000', 2): [0, 256, 0, 0], + (0, 0, '127.0.0.1'): [0, 0, 0, 256], + (0, 0, '127.0.0.1', 0): [0, 256, 0, 0], + (0, 0, '127.0.0.1', 1): [0, 256, 0, 0], + (0, 0, '127.0.0.1', 2): [0, 256, 0, 0], }) rb.pretend_min_part_hours_passed() rb.rebalance(seed=3) @@ -1547,9 +1587,9 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(rb._dispersion_graph, { (0,): [0, 0, 0, 256], (0, 0): [0, 0, 0, 256], - (0, 0, '127.0.0.1:10000'): [0, 0, 0, 256], - (0, 0, '127.0.0.1:10000', 0): [0, 128, 128, 0], - (0, 0, '127.0.0.1:10000', 1): [0, 128, 128, 0], + (0, 0, '127.0.0.1'): [0, 0, 0, 256], + (0, 0, '127.0.0.1', 0): [0, 128, 128, 0], + (0, 0, '127.0.0.1', 1): [0, 128, 128, 0], }) diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py index b97b60eeee..376eac674c 100644 --- a/test/unit/common/ring/test_ring.py +++ b/test/unit/common/ring/test_ring.py @@ -480,7 +480,8 @@ class TestRing(TestRingBase): for device in xrange(1, 4): rb.add_dev({'id': next_dev_id, 'ip': '1.2.%d.%d' % (zone, server), - 'port': 1234, 'zone': zone, 'region': 0, + 'port': 1234 + device, + 'zone': zone, 'region': 0, 'weight': 1.0}) next_dev_id += 1 rb.rebalance(seed=1) diff --git a/test/unit/common/ring/test_utils.py b/test/unit/common/ring/test_utils.py index bf59a33be1..8eaca09756 100644 --- a/test/unit/common/ring/test_utils.py +++ b/test/unit/common/ring/test_utils.py @@ -70,8 +70,8 @@ class TestUtils(unittest.TestCase): tiers_for_dev(self.test_dev), ((1,), (1, 1), - (1, 1, '192.168.1.1:6000'), - (1, 1, '192.168.1.1:6000', 0))) + (1, 1, '192.168.1.1'), + (1, 1, '192.168.1.1', 0))) def test_build_tier_tree(self): ret = build_tier_tree(self.test_devs) @@ -79,27 +79,27 @@ class TestUtils(unittest.TestCase): self.assertEqual(ret[()], set([(1,)])) self.assertEqual(ret[(1,)], set([(1, 1), (1, 2)])) self.assertEqual(ret[(1, 1)], - set([(1, 1, '192.168.1.2:6000'), - (1, 1, '192.168.1.1:6000')])) + set([(1, 1, '192.168.1.2'), + (1, 1, '192.168.1.1')])) self.assertEqual(ret[(1, 2)], - set([(1, 2, '192.168.2.2:6000'), - (1, 2, '192.168.2.1:6000')])) - self.assertEqual(ret[(1, 1, '192.168.1.1:6000')], - set([(1, 1, '192.168.1.1:6000', 0), - (1, 1, '192.168.1.1:6000', 1), - (1, 1, '192.168.1.1:6000', 2)])) - self.assertEqual(ret[(1, 1, '192.168.1.2:6000')], - set([(1, 1, '192.168.1.2:6000', 3), - (1, 1, '192.168.1.2:6000', 4), - (1, 1, '192.168.1.2:6000', 5)])) - self.assertEqual(ret[(1, 2, '192.168.2.1:6000')], - set([(1, 2, '192.168.2.1:6000', 6), - (1, 2, '192.168.2.1:6000', 7), - (1, 2, '192.168.2.1:6000', 8)])) - self.assertEqual(ret[(1, 2, '192.168.2.2:6000')], - set([(1, 2, '192.168.2.2:6000', 9), - (1, 2, '192.168.2.2:6000', 10), - (1, 2, '192.168.2.2:6000', 11)])) + set([(1, 2, '192.168.2.2'), + (1, 2, '192.168.2.1')])) + self.assertEqual(ret[(1, 1, '192.168.1.1')], + set([(1, 1, '192.168.1.1', 0), + (1, 1, '192.168.1.1', 1), + (1, 1, '192.168.1.1', 2)])) + self.assertEqual(ret[(1, 1, '192.168.1.2')], + set([(1, 1, '192.168.1.2', 3), + (1, 1, '192.168.1.2', 4), + (1, 1, '192.168.1.2', 5)])) + self.assertEqual(ret[(1, 2, '192.168.2.1')], + set([(1, 2, '192.168.2.1', 6), + (1, 2, '192.168.2.1', 7), + (1, 2, '192.168.2.1', 8)])) + self.assertEqual(ret[(1, 2, '192.168.2.2')], + set([(1, 2, '192.168.2.2', 9), + (1, 2, '192.168.2.2', 10), + (1, 2, '192.168.2.2', 11)])) def test_is_valid_ip(self): self.assertTrue(is_valid_ip("127.0.0.1")) @@ -623,11 +623,11 @@ class TestUtils(unittest.TestCase): def test_dispersion_report(self): rb = ring.RingBuilder(8, 3, 0) rb.add_dev({'id': 0, 'region': 1, 'zone': 0, 'weight': 100, - 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda1'}) + 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1'}) rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 200, 'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'}) rb.add_dev({'id': 2, 'region': 1, 'zone': 1, 'weight': 200, - 'ip': '127.0.0.1', 'port': 10002, 'device': 'sda1'}) + 'ip': '127.0.0.2', 'port': 10002, 'device': 'sda1'}) rb.rebalance(seed=10) self.assertEqual(rb.dispersion, 39.84375) @@ -635,16 +635,6 @@ class TestUtils(unittest.TestCase): self.assertEqual(report['worst_tier'], 'r1z1') self.assertEqual(report['max_dispersion'], 39.84375) - # Each node should store 256 partitions to avoid multiple replicas - # 2/5 of total weight * 768 ~= 307 -> 51 partitions on each node in - # zone 1 are stored at least twice on the nodes - expected = [ - ['r1z1', 2, '0', '154', '102'], - ['r1z1-127.0.0.1:10001', 1, '205', '51', '0'], - ['r1z1-127.0.0.1:10001/sda1', 1, '205', '51', '0'], - ['r1z1-127.0.0.1:10002', 1, '205', '51', '0'], - ['r1z1-127.0.0.1:10002/sda1', 1, '205', '51', '0']] - def build_tier_report(max_replicas, placed_parts, dispersion, replicas): return { @@ -653,16 +643,20 @@ class TestUtils(unittest.TestCase): 'dispersion': dispersion, 'replicas': replicas, } + + # Each node should store 256 partitions to avoid multiple replicas + # 2/5 of total weight * 768 ~= 307 -> 51 partitions on each node in + # zone 1 are stored at least twice on the nodes expected = [ ['r1z1', build_tier_report( 2, 256, 39.84375, [0, 0, 154, 102])], - ['r1z1-127.0.0.1:10001', build_tier_report( + ['r1z1-127.0.0.1', build_tier_report( 1, 256, 19.921875, [0, 205, 51, 0])], - ['r1z1-127.0.0.1:10001/sda1', build_tier_report( + ['r1z1-127.0.0.1/sda1', build_tier_report( 1, 256, 19.921875, [0, 205, 51, 0])], - ['r1z1-127.0.0.1:10002', build_tier_report( + ['r1z1-127.0.0.2', build_tier_report( 1, 256, 19.921875, [0, 205, 51, 0])], - ['r1z1-127.0.0.1:10002/sda1', build_tier_report( + ['r1z1-127.0.0.2/sda1', build_tier_report( 1, 256, 19.921875, [0, 205, 51, 0])], ] report = dispersion_report(rb, 'r1z1.*', verbose=True) @@ -678,7 +672,7 @@ class TestUtils(unittest.TestCase): report = dispersion_report(rb) self.assertEqual(rb.dispersion, 40.234375) - self.assertEqual(report['worst_tier'], 'r1z0-127.0.0.1:10003') + self.assertEqual(report['worst_tier'], 'r1z0-127.0.0.1') self.assertEqual(report['max_dispersion'], 30.078125) From df134df901a13c2261a8205826ea1aa8d75dc283 Mon Sep 17 00:00:00 2001 From: Darrell Bishop Date: Thu, 14 May 2015 22:14:15 -0700 Subject: [PATCH 64/98] Allow 1+ object-servers-per-disk deployment Enabled by a new > 0 integer config value, "servers_per_port" in the [DEFAULT] config section for object-server and/or replication server configs. The setting's integer value determines how many different object-server workers handle requests for any single unique local port in the ring. In this mode, the parent swift-object-server process continues to run as the original user (i.e. root if low-port binding is required), binds to all ports as defined in the ring, and forks off the specified number of workers per listen socket. The child, per-port servers drop privileges and behave pretty much how object-server workers always have, except that because the ring has unique ports per disk, the object-servers will only be handling requests for a single disk. The parent process detects dead servers and restarts them (with the correct listen socket), starts missing servers when an updated ring file is found with a device on the server with a new port, and kills extraneous servers when their port is found to no longer be in the ring. The ring files are stat'ed at most every "ring_check_interval" seconds, as configured in the object-server config (same default of 15s). Immediately stopping all swift-object-worker processes still works by sending the parent a SIGTERM. Likewise, a SIGHUP to the parent process still causes the parent process to close all listen sockets and exit, allowing existing children to finish serving their existing requests. The drop_privileges helper function now has an optional param to suppress the setsid() call, which otherwise screws up the child workers' process management. The class method RingData.load() can be told to only load the ring metadata (i.e. everything except replica2part2dev_id) with the optional kwarg, header_only=True. This is used to keep the parent and all forked off workers from unnecessarily having full copies of all storage policy rings in memory. A new helper class, swift.common.storage_policy.BindPortsCache, provides a method to return a set of all device ports in all rings for the server on which it is instantiated (identified by its set of IP addresses). The BindPortsCache instance will track mtimes of ring files, so they are not opened more frequently than necessary. This patch includes enhancements to the probe tests and object-replicator/object-reconstructor config plumbing to allow the probe tests to work correctly both in the "normal" config (same IP but unique ports for each SAIO "server") and a server-per-port setup where each SAIO "server" must have a unique IP address and unique port per disk within each "server". The main probe tests only work with 4 servers and 4 disks, but you can see the difference in the rings for the EC probe tests where there are 2 disks per server for a total of 8 disks. Specifically, swift.common.ring.utils.is_local_device() will ignore the ports when the "my_port" argument is None. Then, object-replicator and object-reconstructor both set self.bind_port to None if server_per_port is enabled. Bonus improvement for IPv6 addresses in is_local_device(). This PR for vagrant-swift-all-in-one will aid in testing this patch: https://github.com/swiftstack/vagrant-swift-all-in-one/pull/16/ Also allow SAIO to answer is_local_device() better; common SAIO setups have multiple "servers" all on the same host with different ports for the different "servers" (which happen to match the IPs specified in the rings for the devices on each of those "servers"). However, you can configure the SAIO to have different localhost IP addresses (e.g. 127.0.0.1, 127.0.0.2, etc.) in the ring and in the servers' config files' bind_ip setting. This new whataremyips() implementation combined with a little plumbing allows is_local_device() to accurately answer, even on an SAIO. In the default case (an unspecified bind_ip defaults to '0.0.0.0') as well as an explict "bind to everything" like '0.0.0.0' or '::', whataremyips() behaves as it always has, returning all IP addresses for the server. Also updated probe tests to handle each "server" in the SAIO having a unique IP address. For some (noisy) benchmarks that show servers_per_port=X is at least as good as the same number of "normal" workers: https://gist.github.com/dbishop/c214f89ca708a6b1624a#file-summary-md Benchmarks showing the benefits of I/O isolation with a small number of slow disks: https://gist.github.com/dbishop/fd0ab067babdecfb07ca#file-results-md If you were wondering what the overhead of threads_per_disk looks like: https://gist.github.com/dbishop/1d14755fedc86a161718#file-tabular_results-md DocImpact Change-Id: I2239a4000b41a7e7cc53465ce794af49d44796c6 --- doc/source/deployment_guide.rst | 123 +++++- etc/object-server.conf-sample | 9 +- swift/account/reaper.py | 2 +- swift/common/db_replicator.py | 3 +- swift/common/ring/ring.py | 35 +- swift/common/ring/utils.py | 20 +- swift/common/storage_policy.py | 62 ++- swift/common/utils.py | 31 +- swift/common/wsgi.py | 492 ++++++++++++++++++++++-- swift/container/sync.py | 3 +- swift/obj/reconstructor.py | 8 +- swift/obj/replicator.py | 7 +- test/probe/common.py | 171 +++++--- test/probe/test_account_failures.py | 8 +- test/probe/test_container_failures.py | 25 +- test/probe/test_empty_device_handoff.py | 36 +- test/probe/test_object_async_update.py | 8 +- test/probe/test_object_handoff.py | 18 +- test/probe/test_reconstructor_revert.py | 4 +- test/unit/__init__.py | 14 +- test/unit/common/ring/test_ring.py | 15 + test/unit/common/ring/test_utils.py | 35 +- test/unit/common/test_db_replicator.py | 12 +- test/unit/common/test_storage_policy.py | 140 ++++++- test/unit/common/test_utils.py | 27 ++ test/unit/common/test_wsgi.py | 436 ++++++++++++++++++++- test/unit/container/test_sync.py | 6 +- test/unit/obj/test_reconstructor.py | 123 +++++- test/unit/obj/test_replicator.py | 350 +++++++++++++++-- 29 files changed, 1979 insertions(+), 244 deletions(-) diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 552dfea314..a137d786b0 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -139,6 +139,72 @@ swift-ring-builder with no options will display help text with available commands and options. More information on how the ring works internally can be found in the :doc:`Ring Overview `. +.. _server-per-port-configuration: + +------------------------------- +Running object-servers Per Disk +------------------------------- + +The lack of true asynchronous file I/O on Linux leaves the object-server +workers vulnerable to misbehaving disks. Because any object-server worker can +service a request for any disk, and a slow I/O request blocks the eventlet hub, +a single slow disk can impair an entire storage node. This also prevents +object servers from fully utilizing all their disks during heavy load. + +The :ref:`threads_per_disk ` option was one way to +address this, but came with severe performance overhead which was worse +than the benefit of I/O isolation. Any clusters using threads_per_disk should +switch to using `servers_per_port`. + +Another way to get full I/O isolation is to give each disk on a storage node a +different port in the storage policy rings. Then set the +:ref:`servers_per_port ` +option in the object-server config. NOTE: while the purpose of this config +setting is to run one or more object-server worker processes per *disk*, the +implementation just runs object-servers per unique port of local devices in the +rings. The deployer must combine this option with appropriately-configured +rings to benefit from this feature. + +Here's an example (abbreviated) old-style ring (2 node cluster with 2 disks +each):: + + Devices: id region zone ip address port replication ip replication port name + 0 1 1 1.1.0.1 6000 1.1.0.1 6000 d1 + 1 1 1 1.1.0.1 6000 1.1.0.1 6000 d2 + 2 1 2 1.1.0.2 6000 1.1.0.2 6000 d3 + 3 1 2 1.1.0.2 6000 1.1.0.2 6000 d4 + +And here's the same ring set up for `servers_per_port`:: + + Devices: id region zone ip address port replication ip replication port name + 0 1 1 1.1.0.1 6000 1.1.0.1 6000 d1 + 1 1 1 1.1.0.1 6001 1.1.0.1 6001 d2 + 2 1 2 1.1.0.2 6000 1.1.0.2 6000 d3 + 3 1 2 1.1.0.2 6001 1.1.0.2 6001 d4 + +When migrating from normal to `servers_per_port`, perform these steps in order: + + #. Upgrade Swift code to a version capable of doing `servers_per_port`. + + #. Enable `servers_per_port` with a > 0 value + + #. Restart `swift-object-server` processes with a SIGHUP. At this point, you + will have the `servers_per_port` number of `swift-object-server` processes + serving all requests for all disks on each node. This preserves + availability, but you should perform the next step as quickly as possible. + + #. Push out new rings that actually have different ports per disk on each + server. One of the ports in the new ring should be the same as the port + used in the old ring ("6000" in the example above). This will cover + existing proxy-server processes who haven't loaded the new ring yet. They + can still talk to any storage node regardless of whether or not that + storage node has loaded the ring and started object-server processes on the + new ports. + +If you do not run a separate object-server for replication, then this setting +must be available to the object-replicator and object-reconstructor (i.e. +appear in the [DEFAULT] config section). + .. _general-service-configuration: ----------------------------- @@ -149,14 +215,14 @@ Most Swift services fall into two categories. Swift's wsgi servers and background daemons. For more information specific to the configuration of Swift's wsgi servers -with paste deploy see :ref:`general-server-configuration` +with paste deploy see :ref:`general-server-configuration`. Configuration for servers and daemons can be expressed together in the same file for each type of server, or separately. If a required section for the service trying to start is missing there will be an error. The sections not used by the service are ignored. -Consider the example of an object storage node. By convention configuration +Consider the example of an object storage node. By convention, configuration for the object-server, object-updater, object-replicator, and object-auditor exist in a single file ``/etc/swift/object-server.conf``:: @@ -323,7 +389,7 @@ max_header_size 8192 max_header_size is the max number of bytes in tokens including more than 7 catalog entries. See also include_service_catalog in proxy-server.conf-sample (documented in - overview_auth.rst) + overview_auth.rst). =================== ========== ============================================= --------------------------- @@ -335,6 +401,8 @@ etc/object-server.conf-sample in the source code repository. The following configuration options are available: +.. _object-server-default-options: + [DEFAULT] =================== ========== ============================================= @@ -353,12 +421,30 @@ workers auto Override the number of pre-forked workers should be an integer, zero means no fork. If unset, it will try to default to the number of effective cpu cores and fallback to one. - Increasing the number of workers may reduce - the possibility of slow file system - operations in one request from negatively - impacting other requests, but may not be as - efficient as tuning :ref:`threads_per_disk - ` + Increasing the number of workers helps slow + filesystem operations in one request from + negatively impacting other requests, but only + the :ref:`servers_per_port + ` + option provides complete I/O isolation with + no measurable overhead. +servers_per_port 0 If each disk in each storage policy ring has + unique port numbers for its "ip" value, you + can use this setting to have each + object-server worker only service requests + for the single disk matching the port in the + ring. The value of this setting determines + how many worker processes run for each port + (disk) in the ring. If you have 24 disks + per server, and this setting is 4, then + each storage node will have 1 + (24 * 4) = + 97 total object-server processes running. + This gives complete I/O isolation, drastically + reducing the impact of slow disks on storage + node performance. The object-replicator and + object-reconstructor need to see this setting + too, so it must be in the [DEFAULT] section. + See :ref:`server-per-port-configuration`. max_clients 1024 Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) @@ -421,13 +507,12 @@ keep_cache_private false Allow non-public objects to stay threads_per_disk 0 Size of the per-disk thread pool used for performing disk I/O. The default of 0 means to not use a - per-disk thread pool. It is - recommended to keep this value - small, as large values can result - in high read latencies due to - large queue depths. A good - starting point is 4 threads per - disk. + per-disk thread pool. + This option is no longer + recommended and the + :ref:`servers_per_port + ` + should be used instead. replication_concurrency 4 Set to restrict the number of concurrent incoming REPLICATION requests; set to 0 for unlimited @@ -562,7 +647,7 @@ workers auto Override the number of pre-forked workers the possibility of slow file system operations in one request from negatively impacting other requests. See - :ref:`general-service-tuning` + :ref:`general-service-tuning`. max_clients 1024 Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) @@ -690,7 +775,7 @@ workers auto Override the number of pre-forked workers the possibility of slow file system operations in one request from negatively impacting other requests. See - :ref:`general-service-tuning` + :ref:`general-service-tuning`. max_clients 1024 Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) @@ -813,7 +898,7 @@ workers auto Override the number of will try to default to the number of effective cpu cores and fallback to one. See - :ref:`general-service-tuning` + :ref:`general-service-tuning`. max_clients 1024 Maximum number of clients one worker can process simultaneously (it will diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index 4fafa7c18b..b36ec29aa6 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -12,9 +12,16 @@ bind_port = 6000 # expiring_objects_account_name = expiring_objects # # Use an integer to override the number of pre-forked processes that will -# accept connections. +# accept connections. NOTE: if servers_per_port is set, this setting is +# ignored. # workers = auto # +# Make object-server run this many worker processes per unique port of +# "local" ring devices across all storage policies. This can help provide +# the isolation of threads_per_disk without the severe overhead. The default +# value of 0 disables this feature. +# servers_per_port = 0 +# # Maximum concurrent requests per worker # max_clients = 1024 # diff --git a/swift/account/reaper.py b/swift/account/reaper.py index 9eaee561ec..c121bf0ea5 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -69,7 +69,7 @@ class AccountReaper(Daemon): self.object_ring = None self.node_timeout = int(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) - self.myips = whataremyips() + self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0')) self.concurrency = int(conf.get('concurrency', 25)) self.container_concurrency = self.object_concurrency = \ sqrt(self.concurrency) diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index aa91faaf33..151a070c07 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -154,6 +154,7 @@ class Replicator(Daemon): self.logger = logger or get_logger(conf, log_route='replicator') self.root = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) + self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.port = int(conf.get('bind_port', self.default_port)) concurrency = int(conf.get('concurrency', 8)) self.cpool = GreenPool(size=concurrency) @@ -580,7 +581,7 @@ class Replicator(Daemon): """Run a replication pass once.""" self._zero_stats() dirs = [] - ips = whataremyips() + ips = whataremyips(self.bind_ip) if not ips: self.logger.error(_('ERROR Failed to get my own IPs?')) return diff --git a/swift/common/ring/ring.py b/swift/common/ring/ring.py index d4feaa8e23..461ccae640 100644 --- a/swift/common/ring/ring.py +++ b/swift/common/ring/ring.py @@ -44,10 +44,29 @@ class RingData(object): dev.setdefault("region", 1) @classmethod - def deserialize_v1(cls, gz_file): + def deserialize_v1(cls, gz_file, metadata_only=False): + """ + Deserialize a v1 ring file into a dictionary with `devs`, `part_shift`, + and `replica2part2dev_id` keys. + + If the optional kwarg `metadata_only` is True, then the + `replica2part2dev_id` is not loaded and that key in the returned + dictionary just has the value `[]`. + + :param file gz_file: An opened file-like object which has already + consumed the 6 bytes of magic and version. + :param bool metadata_only: If True, only load `devs` and `part_shift` + :returns: A dict containing `devs`, `part_shift`, and + `replica2part2dev_id` + """ + json_len, = struct.unpack('!I', gz_file.read(4)) ring_dict = json.loads(gz_file.read(json_len)) ring_dict['replica2part2dev_id'] = [] + + if metadata_only: + return ring_dict + partition_count = 1 << (32 - ring_dict['part_shift']) for x in xrange(ring_dict['replica_count']): ring_dict['replica2part2dev_id'].append( @@ -55,11 +74,12 @@ class RingData(object): return ring_dict @classmethod - def load(cls, filename): + def load(cls, filename, metadata_only=False): """ Load ring data from a file. :param filename: Path to a file serialized by the save() method. + :param bool metadata_only: If True, only load `devs` and `part_shift`. :returns: A RingData instance containing the loaded data. """ gz_file = GzipFile(filename, 'rb') @@ -70,15 +90,18 @@ class RingData(object): # See if the file is in the new format magic = gz_file.read(4) if magic == 'R1NG': - version, = struct.unpack('!H', gz_file.read(2)) - if version == 1: - ring_data = cls.deserialize_v1(gz_file) + format_version, = struct.unpack('!H', gz_file.read(2)) + if format_version == 1: + ring_data = cls.deserialize_v1( + gz_file, metadata_only=metadata_only) else: - raise Exception('Unknown ring format version %d' % version) + raise Exception('Unknown ring format version %d' % + format_version) else: # Assume old-style pickled ring gz_file.seek(0) ring_data = pickle.load(gz_file) + if not hasattr(ring_data, 'devs'): ring_data = RingData(ring_data['replica2part2dev_id'], ring_data['devs'], ring_data['part_shift']) diff --git a/swift/common/ring/utils.py b/swift/common/ring/utils.py index 4fcee2eb24..7d7856ebfc 100644 --- a/swift/common/ring/utils.py +++ b/swift/common/ring/utils.py @@ -235,9 +235,14 @@ def is_local_device(my_ips, my_port, dev_ip, dev_port): Return True if the provided dev_ip and dev_port are among the IP addresses specified in my_ips and my_port respectively. + To support accurate locality determination in the server-per-port + deployment, when my_port is None, only IP addresses are used for + determining locality (dev_port is ignored). + If dev_ip is a hostname then it is first translated to an IP address before checking it against my_ips. """ + candidate_ips = [] if not is_valid_ip(dev_ip) and is_valid_hostname(dev_ip): try: # get the ip for this host; use getaddrinfo so that @@ -248,12 +253,19 @@ def is_local_device(my_ips, my_port, dev_ip, dev_port): dev_ip = addr[4][0] # get the ip-address if family == socket.AF_INET6: dev_ip = expand_ipv6(dev_ip) - if dev_ip in my_ips and dev_port == my_port: - return True - return False + candidate_ips.append(dev_ip) except socket.gaierror: return False - return dev_ip in my_ips and dev_port == my_port + else: + if is_valid_ipv6(dev_ip): + dev_ip = expand_ipv6(dev_ip) + candidate_ips = [dev_ip] + + for dev_ip in candidate_ips: + if dev_ip in my_ips and (my_port is None or dev_port == my_port): + return True + + return False def parse_search_value(search_value): diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py index e45ab018c5..fcda344b56 100644 --- a/swift/common/storage_policy.py +++ b/swift/common/storage_policy.py @@ -12,11 +12,13 @@ # limitations under the License. from ConfigParser import ConfigParser -import textwrap +import os import string +import textwrap -from swift.common.utils import config_true_value, SWIFT_CONF_FILE -from swift.common.ring import Ring +from swift.common.utils import ( + config_true_value, SWIFT_CONF_FILE, whataremyips) +from swift.common.ring import Ring, RingData from swift.common.utils import quorum_size from swift.common.exceptions import RingValidationError from pyeclib.ec_iface import ECDriver, ECDriverError, VALID_EC_TYPES @@ -30,6 +32,53 @@ EC_POLICY = 'erasure_coding' DEFAULT_EC_OBJECT_SEGMENT_SIZE = 1048576 +class BindPortsCache(object): + def __init__(self, swift_dir, bind_ip): + self.swift_dir = swift_dir + self.mtimes_by_ring_path = {} + self.portsets_by_ring_path = {} + self.my_ips = set(whataremyips(bind_ip)) + + def all_bind_ports_for_node(self): + """ + Given an iterable of IP addresses identifying a storage backend server, + return a set of all bind ports defined in all rings for this storage + backend server. + + The caller is responsible for not calling this method (which performs + at least a stat on all ring files) too frequently. + """ + # NOTE: we don't worry about disappearing rings here because you can't + # ever delete a storage policy. + + for policy in POLICIES: + # NOTE: we must NOT use policy.load_ring to load the ring. Users + # of this utility function will not need the actual ring data, just + # the bind ports. + # + # This is duplicated with Ring.__init__ just a bit... + serialized_path = os.path.join(self.swift_dir, + policy.ring_name + '.ring.gz') + try: + new_mtime = os.path.getmtime(serialized_path) + except OSError: + continue + old_mtime = self.mtimes_by_ring_path.get(serialized_path) + if not old_mtime or old_mtime != new_mtime: + self.portsets_by_ring_path[serialized_path] = set( + dev['port'] + for dev in RingData.load(serialized_path, + metadata_only=True).devs + if dev and dev['ip'] in self.my_ips) + self.mtimes_by_ring_path[serialized_path] = new_mtime + # No "break" here so that the above line will update the + # mtimes_by_ring_path entry for any ring that changes, not just + # the first one we notice. + + # Return the requested set of ports from our (now-freshened) cache + return reduce(set.union, self.portsets_by_ring_path.values(), set()) + + class PolicyError(ValueError): def __init__(self, msg, index=None): @@ -291,7 +340,7 @@ class ECStoragePolicy(BaseStoragePolicy): if ec_type not in VALID_EC_TYPES: raise PolicyError('Wrong ec_type %s for policy %s, should be one' ' of "%s"' % (ec_type, self.name, - ', '.join(VALID_EC_TYPES))) + ', '.join(VALID_EC_TYPES))) self._ec_type = ec_type # Define _ec_ndata as the number of EC data fragments @@ -427,8 +476,9 @@ class ECStoragePolicy(BaseStoragePolicy): if nodes_configured != (self.ec_ndata + self.ec_nparity): raise RingValidationError( 'EC ring for policy %s needs to be configured with ' - 'exactly %d nodes. Got %d.' % (self.name, - self.ec_ndata + self.ec_nparity, nodes_configured)) + 'exactly %d nodes. Got %d.' % ( + self.name, self.ec_ndata + self.ec_nparity, + nodes_configured)) @property def quorum(self): diff --git a/swift/common/utils.py b/swift/common/utils.py index d470fb9970..63919af1ec 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -1589,7 +1589,7 @@ def get_hub(): return None -def drop_privileges(user): +def drop_privileges(user, call_setsid=True): """ Sets the userid/groupid of the current process, get session leader, etc. @@ -1602,10 +1602,11 @@ def drop_privileges(user): os.setgid(user[3]) os.setuid(user[2]) os.environ['HOME'] = user[5] - try: - os.setsid() - except OSError: - pass + if call_setsid: + try: + os.setsid() + except OSError: + pass os.chdir('/') # in case you need to rmdir on where you started the daemon os.umask(0o22) # ensure files are created with the correct privileges @@ -1706,12 +1707,28 @@ def expand_ipv6(address): return socket.inet_ntop(socket.AF_INET6, packed_ip) -def whataremyips(): +def whataremyips(bind_ip=None): """ - Get the machine's ip addresses + Get "our" IP addresses ("us" being the set of services configured by + one *.conf file). If our REST listens on a specific address, return it. + Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including + the loopback. + :param str bind_ip: Optional bind_ip from a config file; may be IP address + or hostname. :returns: list of Strings of ip addresses """ + if bind_ip: + # See if bind_ip is '0.0.0.0'/'::' + try: + _, _, _, _, sockaddr = socket.getaddrinfo( + bind_ip, None, 0, socket.SOCK_STREAM, 0, + socket.AI_NUMERICHOST)[0] + if sockaddr[0] not in ('0.0.0.0', '::'): + return [bind_ip] + except socket.gaierror: + pass + addresses = [] for interface in netifaces.interfaces(): try: diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index b87fde4a02..d7a6102d62 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -29,12 +29,13 @@ from textwrap import dedent import eventlet import eventlet.debug -from eventlet import greenio, GreenPool, sleep, wsgi, listen +from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout from paste.deploy import loadwsgi -from eventlet.green import socket, ssl +from eventlet.green import socket, ssl, os as green_os from urllib import unquote from swift.common import utils, constraints +from swift.common.storage_policy import BindPortsCache from swift.common.swob import Request from swift.common.utils import capture_stdio, disable_fallocate, \ drop_privileges, get_logger, NullLogger, config_true_value, \ @@ -437,10 +438,414 @@ def run_server(conf, logger, sock, global_conf=None): pool.waitall() -#TODO(clayg): pull more pieces of this to test more +class WorkersStrategy(object): + """ + WSGI server management strategy object for a single bind port and listen + socket shared by a configured number of forked-off workers. + + Used in :py:func:`run_wsgi`. + + :param dict conf: Server configuration dictionary. + :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` + object. + """ + + def __init__(self, conf, logger): + self.conf = conf + self.logger = logger + self.sock = None + self.children = [] + self.worker_count = config_auto_int_value(conf.get('workers'), + CPU_COUNT) + + def loop_timeout(self): + """ + :returns: None; to block in :py:func:`green.os.wait` + """ + + return None + + def bind_ports(self): + """ + Bind the one listen socket for this strategy and drop privileges + (since the parent process will never need to bind again). + """ + + try: + self.sock = get_socket(self.conf) + except ConfigFilePortError: + msg = 'bind_port wasn\'t properly set in the config file. ' \ + 'It must be explicitly set to a valid port number.' + return msg + drop_privileges(self.conf.get('user', 'swift')) + + def no_fork_sock(self): + """ + Return a server listen socket if the server should run in the + foreground (no fork). + """ + + # Useful for profiling [no forks]. + if self.worker_count == 0: + return self.sock + + def new_worker_socks(self): + """ + Yield a sequence of (socket, opqaue_data) tuples for each server which + should be forked-off and started. + + The opaque_data item for each socket will passed into the + :py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods + where it will be ignored. + """ + + while len(self.children) < self.worker_count: + yield self.sock, None + + def post_fork_hook(self): + """ + Perform any initialization in a forked-off child process prior to + starting the wsgi server. + """ + + pass + + def log_sock_exit(self, sock, _unused): + """ + Log a server's exit. + + :param socket sock: The listen socket for the worker just started. + :param _unused: The socket's opaque_data yielded by + :py:meth:`new_worker_socks`. + """ + + self.logger.notice('Child %d exiting normally' % os.getpid()) + + def register_worker_start(self, sock, _unused, pid): + """ + Called when a new worker is started. + + :param socket sock: The listen socket for the worker just started. + :param _unused: The socket's opaque_data yielded by new_worker_socks(). + :param int pid: The new worker process' PID + """ + + self.logger.notice('Started child %s' % pid) + self.children.append(pid) + + def register_worker_exit(self, pid): + """ + Called when a worker has exited. + + :param int pid: The PID of the worker that exited. + """ + + self.logger.error('Removing dead child %s' % pid) + self.children.remove(pid) + + def shutdown_sockets(self): + """ + Shutdown any listen sockets. + """ + + greenio.shutdown_safe(self.sock) + self.sock.close() + + +class PortPidState(object): + """ + A helper class for :py:class:`ServersPerPortStrategy` to track listen + sockets and PIDs for each port. + + :param int servers_per_port: The configured number of servers per port. + :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` + """ + + def __init__(self, servers_per_port, logger): + self.servers_per_port = servers_per_port + self.logger = logger + self.sock_data_by_port = {} + + def sock_for_port(self, port): + """ + :param int port: The port whose socket is desired. + :returns: The bound listen socket for the given port. + """ + + return self.sock_data_by_port[port]['sock'] + + def port_for_sock(self, sock): + """ + :param socket sock: A tracked bound listen socket + :returns: The port the socket is bound to. + """ + + for port, sock_data in self.sock_data_by_port.iteritems(): + if sock_data['sock'] == sock: + return port + + def _pid_to_port_and_index(self, pid): + for port, sock_data in self.sock_data_by_port.iteritems(): + for server_idx, a_pid in enumerate(sock_data['pids']): + if pid == a_pid: + return port, server_idx + + def port_index_pairs(self): + """ + :returns: A set of (port, server_idx) tuples for currently-tracked + ports, sockets, and PIDs. + """ + + current_port_index_pairs = set() + for port, pid_state in self.sock_data_by_port.iteritems(): + current_port_index_pairs |= set( + (port, i) + for i, pid in enumerate(pid_state['pids']) + if pid is not None) + return current_port_index_pairs + + def track_port(self, port, sock): + """ + Start tracking servers for the given port and listen socket. + + :param int port: The port to start tracking + :param socket sock: The bound listen socket for the port. + """ + + self.sock_data_by_port[port] = { + 'sock': sock, + 'pids': [None] * self.servers_per_port, + } + + def not_tracking(self, port): + """ + Return True if the specified port is not being tracked. + + :param int port: A port to check. + """ + + return port not in self.sock_data_by_port + + def all_socks(self): + """ + Yield all current listen sockets. + """ + + for orphan_data in self.sock_data_by_port.itervalues(): + yield orphan_data['sock'] + + def forget_port(self, port): + """ + Idempotently forget a port, closing the listen socket at most once. + """ + + orphan_data = self.sock_data_by_port.pop(port, None) + if orphan_data: + greenio.shutdown_safe(orphan_data['sock']) + orphan_data['sock'].close() + self.logger.notice('Closing unnecessary sock for port %d', port) + + def add_pid(self, port, index, pid): + self.sock_data_by_port[port]['pids'][index] = pid + + def forget_pid(self, pid): + """ + Idempotently forget a PID. It's okay if the PID is no longer in our + data structure (it could have been removed by the "orphan port" removal + in :py:meth:`new_worker_socks`). + + :param int pid: The PID which exited. + """ + + port_server_idx = self._pid_to_port_and_index(pid) + if port_server_idx is None: + # This method can lose a race with the "orphan port" removal, when + # a ring reload no longer contains a port. So it's okay if we were + # unable to find a (port, server_idx) pair. + return + dead_port, server_idx = port_server_idx + self.logger.error('Removing dead child %d (PID: %s) for port %s', + server_idx, pid, dead_port) + self.sock_data_by_port[dead_port]['pids'][server_idx] = None + + +class ServersPerPortStrategy(object): + """ + WSGI server management strategy object for an object-server with one listen + port per unique local port in the storage policy rings. The + `servers_per_port` integer config setting determines how many workers are + run per port. + + Used in :py:func:`run_wsgi`. + + :param dict conf: Server configuration dictionary. + :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` + object. + :param int servers_per_port: The number of workers to run per port. + """ + + def __init__(self, conf, logger, servers_per_port): + self.conf = conf + self.logger = logger + self.servers_per_port = servers_per_port + self.swift_dir = conf.get('swift_dir', '/etc/swift') + self.ring_check_interval = int(conf.get('ring_check_interval', 15)) + self.port_pid_state = PortPidState(servers_per_port, logger) + + bind_ip = conf.get('bind_ip', '0.0.0.0') + self.cache = BindPortsCache(self.swift_dir, bind_ip) + + def _reload_bind_ports(self): + self.bind_ports = self.cache.all_bind_ports_for_node() + + def _bind_port(self, port): + new_conf = self.conf.copy() + new_conf['bind_port'] = port + sock = get_socket(new_conf) + self.port_pid_state.track_port(port, sock) + + def loop_timeout(self): + """ + :returns: The time to wait for a child to exit before checking for + reloaded rings (new ports). + """ + + return self.ring_check_interval + + def bind_ports(self): + """ + Bind one listen socket per unique local storage policy ring port. Then + do all the work of drop_privileges except the actual dropping of + privileges (each forked-off worker will do that post-fork in + :py:meth:`post_fork_hook`). + """ + + self._reload_bind_ports() + for port in self.bind_ports: + self._bind_port(port) + + # The workers strategy drops privileges here, which we obviously cannot + # do if we want to support binding to low ports. But we do want some + # of the actions that drop_privileges did. + try: + os.setsid() + except OSError: + pass + # In case you need to rmdir where you started the daemon: + os.chdir('/') + # Ensure files are created with the correct privileges: + os.umask(0o22) + + def no_fork_sock(self): + """ + This strategy does not support running in the foreground. + """ + + pass + + def new_worker_socks(self): + """ + Yield a sequence of (socket, server_idx) tuples for each server which + should be forked-off and started. + + Any sockets for "orphaned" ports no longer in any ring will be closed + (causing their associated workers to gracefully exit) after all new + sockets have been yielded. + + The server_idx item for each socket will passed into the + :py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods. + """ + + self._reload_bind_ports() + desired_port_index_pairs = set( + (p, i) for p in self.bind_ports + for i in range(self.servers_per_port)) + + current_port_index_pairs = self.port_pid_state.port_index_pairs() + + if desired_port_index_pairs != current_port_index_pairs: + # Orphan ports are ports which had object-server processes running, + # but which no longer appear in the ring. We'll kill them after we + # start missing workers. + orphan_port_index_pairs = current_port_index_pairs - \ + desired_port_index_pairs + + # Fork off worker(s) for every port who's supposed to have + # worker(s) but doesn't + missing_port_index_pairs = desired_port_index_pairs - \ + current_port_index_pairs + for port, server_idx in sorted(missing_port_index_pairs): + if self.port_pid_state.not_tracking(port): + try: + self._bind_port(port) + except Exception as e: + self.logger.critical('Unable to bind to port %d: %s', + port, e) + continue + yield self.port_pid_state.sock_for_port(port), server_idx + + for orphan_pair in orphan_port_index_pairs: + # For any port in orphan_port_index_pairs, it is guaranteed + # that there should be no listen socket for that port, so we + # can close and forget them. + self.port_pid_state.forget_port(orphan_pair[0]) + + def post_fork_hook(self): + """ + Called in each child process, prior to starting the actual wsgi server, + to drop privileges. + """ + + drop_privileges(self.conf.get('user', 'swift'), call_setsid=False) + + def log_sock_exit(self, sock, server_idx): + """ + Log a server's exit. + """ + + port = self.port_pid_state.port_for_sock(sock) + self.logger.notice('Child %d (PID %d, port %d) exiting normally', + server_idx, os.getpid(), port) + + def register_worker_start(self, sock, server_idx, pid): + """ + Called when a new worker is started. + + :param socket sock: The listen socket for the worker just started. + :param server_idx: The socket's server_idx as yielded by + :py:meth:`new_worker_socks`. + :param int pid: The new worker process' PID + """ + port = self.port_pid_state.port_for_sock(sock) + self.logger.notice('Started child %d (PID %d) for port %d', + server_idx, pid, port) + self.port_pid_state.add_pid(port, server_idx, pid) + + def register_worker_exit(self, pid): + """ + Called when a worker has exited. + + :param int pid: The PID of the worker that exited. + """ + + self.port_pid_state.forget_pid(pid) + + def shutdown_sockets(self): + """ + Shutdown any listen sockets. + """ + + for sock in self.port_pid_state.all_socks(): + greenio.shutdown_safe(sock) + sock.close() + + def run_wsgi(conf_path, app_section, *args, **kwargs): """ - Runs the server using the specified number of workers. + Runs the server according to some strategy. The default strategy runs a + specified number of workers in pre-fork model. The object-server (only) + may use a servers-per-port strategy if its config has a servers_per_port + setting with a value greater than zero. :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from @@ -454,17 +859,22 @@ def run_wsgi(conf_path, app_section, *args, **kwargs): print(e) return 1 - # bind to address and port - try: - sock = get_socket(conf) - except ConfigFilePortError: - msg = 'bind_port wasn\'t properly set in the config file. ' \ - 'It must be explicitly set to a valid port number.' - logger.error(msg) - print(msg) + servers_per_port = int(conf.get('servers_per_port', '0') or 0) + + # NOTE: for now servers_per_port is object-server-only; future work could + # be done to test and allow it to be used for account and container + # servers, but that has not been done yet. + if servers_per_port and app_section == 'object-server': + strategy = ServersPerPortStrategy( + conf, logger, servers_per_port=servers_per_port) + else: + strategy = WorkersStrategy(conf, logger) + + error_msg = strategy.bind_ports() + if error_msg: + logger.error(error_msg) + print(error_msg) return 1 - # remaining tasks should not require elevated privileges - drop_privileges(conf.get('user', 'swift')) # Ensure the configuration and application can be loaded before proceeding. global_conf = {'log_name': log_name} @@ -479,11 +889,9 @@ def run_wsgi(conf_path, app_section, *args, **kwargs): # redirect errors to logger and close stdio capture_stdio(logger) - worker_count = config_auto_int_value(conf.get('workers'), CPU_COUNT) - - # Useful for profiling [no forks]. - if worker_count == 0: - run_server(conf, logger, sock, global_conf=global_conf) + no_fork_sock = strategy.no_fork_sock() + if no_fork_sock: + run_server(conf, logger, no_fork_sock, global_conf=global_conf) return 0 def kill_children(*args): @@ -502,32 +910,42 @@ def run_wsgi(conf_path, app_section, *args, **kwargs): running = [True] signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGHUP, hup) - children = [] + while running[0]: - while len(children) < worker_count: + for sock, sock_info in strategy.new_worker_socks(): pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) + strategy.post_fork_hook() run_server(conf, logger, sock) - logger.notice('Child %d exiting normally' % os.getpid()) + strategy.log_sock_exit(sock, sock_info) return 0 else: - logger.notice('Started child %s' % pid) - children.append(pid) - try: - pid, status = os.wait() - if os.WIFEXITED(status) or os.WIFSIGNALED(status): - logger.error('Removing dead child %s' % pid) - children.remove(pid) - except OSError as err: - if err.errno not in (errno.EINTR, errno.ECHILD): - raise - except KeyboardInterrupt: - logger.notice('User quit') - break - greenio.shutdown_safe(sock) - sock.close() + strategy.register_worker_start(sock, sock_info, pid) + + # The strategy may need to pay attention to something in addition to + # child process exits (like new ports showing up in a ring). + # + # NOTE: a timeout value of None will just instantiate the Timeout + # object and not actually schedule it, which is equivalent to no + # timeout for the green_os.wait(). + loop_timeout = strategy.loop_timeout() + + with Timeout(loop_timeout, exception=False): + try: + pid, status = green_os.wait() + if os.WIFEXITED(status) or os.WIFSIGNALED(status): + strategy.register_worker_exit(pid) + except OSError as err: + if err.errno not in (errno.EINTR, errno.ECHILD): + raise + except KeyboardInterrupt: + logger.notice('User quit') + running[0] = False + break + + strategy.shutdown_sockets() logger.notice('Exited') return 0 diff --git a/swift/container/sync.py b/swift/container/sync.py index a409de4ac7..c6161883c4 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -204,7 +204,8 @@ class ContainerSync(Daemon): #: swift.common.ring.Ring for locating containers. self.container_ring = container_ring or Ring(self.swift_dir, ring_name='container') - self._myips = whataremyips() + bind_ip = conf.get('bind_ip', '0.0.0.0') + self._myips = whataremyips(bind_ip) self._myport = int(conf.get('bind_port', 6001)) swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index f9aa5f15d8..8f84b06c7a 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -119,7 +119,10 @@ class ObjectReconstructor(Daemon): self.devices_dir = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.swift_dir = conf.get('swift_dir', '/etc/swift') - self.port = int(conf.get('bind_port', 6000)) + self.bind_ip = conf.get('bind_ip', '0.0.0.0') + self.servers_per_port = int(conf.get('servers_per_port', '0') or 0) + self.port = None if self.servers_per_port else \ + int(conf.get('bind_port', 6000)) self.concurrency = int(conf.get('concurrency', 1)) self.stats_interval = int(conf.get('stats_interval', '300')) self.ring_check_interval = int(conf.get('ring_check_interval', 15)) @@ -764,7 +767,7 @@ class ObjectReconstructor(Daemon): """ override_devices = override_devices or [] override_partitions = override_partitions or [] - ips = whataremyips() + ips = whataremyips(self.bind_ip) for policy in POLICIES: if policy.policy_type != EC_POLICY: continue @@ -776,6 +779,7 @@ class ObjectReconstructor(Daemon): ips, self.port, dev['replication_ip'], dev['replication_port']), policy.object_ring.devs) + for local_dev in local_devices: if override_devices and (local_dev['device'] not in override_devices): diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index d23624b382..de2ec8d85f 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -65,7 +65,10 @@ class ObjectReplicator(Daemon): self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no')) self.swift_dir = conf.get('swift_dir', '/etc/swift') - self.port = int(conf.get('bind_port', 6000)) + self.bind_ip = conf.get('bind_ip', '0.0.0.0') + self.servers_per_port = int(conf.get('servers_per_port', '0') or 0) + self.port = None if self.servers_per_port else \ + int(conf.get('bind_port', 6000)) self.concurrency = int(conf.get('concurrency', 1)) self.stats_interval = int(conf.get('stats_interval', '300')) self.ring_check_interval = int(conf.get('ring_check_interval', 15)) @@ -539,7 +542,7 @@ class ObjectReplicator(Daemon): policies will be returned """ jobs = [] - ips = whataremyips() + ips = whataremyips(self.bind_ip) for policy in POLICIES: if policy.policy_type == REPL_POLICY: if (override_policies is not None and diff --git a/test/probe/common.py b/test/probe/common.py index ca1225f9fb..467598caec 100644 --- a/test/probe/common.py +++ b/test/probe/common.py @@ -39,8 +39,8 @@ for p in POLICIES: POLICIES_BY_TYPE[p.policy_type].append(p) -def get_server_number(port, port2server): - server_number = port2server[port] +def get_server_number(ipport, ipport2server): + server_number = ipport2server[ipport] server, number = server_number[:-1], server_number[-1:] try: number = int(number) @@ -50,19 +50,19 @@ def get_server_number(port, port2server): return server, number -def start_server(port, port2server, pids, check=True): - server, number = get_server_number(port, port2server) +def start_server(ipport, ipport2server, pids, check=True): + server, number = get_server_number(ipport, ipport2server) err = Manager([server]).start(number=number, wait=False) if err: raise Exception('unable to start %s' % ( server if not number else '%s%s' % (server, number))) if check: - return check_server(port, port2server, pids) + return check_server(ipport, ipport2server, pids) return None -def check_server(port, port2server, pids, timeout=CHECK_SERVER_TIMEOUT): - server = port2server[port] +def check_server(ipport, ipport2server, pids, timeout=CHECK_SERVER_TIMEOUT): + server = ipport2server[ipport] if server[:-1] in ('account', 'container', 'object'): if int(server[-1]) > 4: return None @@ -74,7 +74,7 @@ def check_server(port, port2server, pids, timeout=CHECK_SERVER_TIMEOUT): try_until = time() + timeout while True: try: - conn = HTTPConnection('127.0.0.1', port) + conn = HTTPConnection(*ipport) conn.request('GET', path) resp = conn.getresponse() # 404 because it's a nonsense path (and mount_check is false) @@ -87,14 +87,14 @@ def check_server(port, port2server, pids, timeout=CHECK_SERVER_TIMEOUT): if time() > try_until: print err print 'Giving up on %s:%s after %s seconds.' % ( - server, port, timeout) + server, ipport, timeout) raise err sleep(0.1) else: try_until = time() + timeout while True: try: - url, token = get_auth('http://127.0.0.1:8080/auth/v1.0', + url, token = get_auth('http://%s:%d/auth/v1.0' % ipport, 'test:tester', 'testing') account = url.split('/')[-1] head_account(url, token) @@ -108,8 +108,8 @@ def check_server(port, port2server, pids, timeout=CHECK_SERVER_TIMEOUT): return None -def kill_server(port, port2server, pids): - server, number = get_server_number(port, port2server) +def kill_server(ipport, ipport2server, pids): + server, number = get_server_number(ipport, ipport2server) err = Manager([server]).kill(number=number) if err: raise Exception('unable to kill %s' % (server if not number else @@ -117,47 +117,77 @@ def kill_server(port, port2server, pids): try_until = time() + 30 while True: try: - conn = HTTPConnection('127.0.0.1', port) + conn = HTTPConnection(*ipport) conn.request('GET', '/') conn.getresponse() except Exception as err: break if time() > try_until: raise Exception( - 'Still answering on port %s after 30 seconds' % port) + 'Still answering on %s:%s after 30 seconds' % ipport) sleep(0.1) -def kill_nonprimary_server(primary_nodes, port2server, pids): - primary_ports = [n['port'] for n in primary_nodes] - for port, server in port2server.iteritems(): - if port in primary_ports: +def kill_nonprimary_server(primary_nodes, ipport2server, pids): + primary_ipports = [(n['ip'], n['port']) for n in primary_nodes] + for ipport, server in ipport2server.iteritems(): + if ipport in primary_ipports: server_type = server[:-1] break else: raise Exception('Cannot figure out server type for %r' % primary_nodes) - for port, server in list(port2server.iteritems()): - if server[:-1] == server_type and port not in primary_ports: - kill_server(port, port2server, pids) - return port + for ipport, server in list(ipport2server.iteritems()): + if server[:-1] == server_type and ipport not in primary_ipports: + kill_server(ipport, ipport2server, pids) + return ipport -def build_port_to_conf(server): - # map server to config by port - port_to_config = {} - for server_ in Manager([server]): - for config_path in server_.conf_files(): - conf = readconf(config_path, - section_name='%s-replicator' % server_.type) - port_to_config[int(conf['bind_port'])] = conf - return port_to_config +def add_ring_devs_to_ipport2server(ring, server_type, ipport2server, + servers_per_port=0): + # We'll number the servers by order of unique occurrence of: + # IP, if servers_per_port > 0 OR there > 1 IP in ring + # ipport, otherwise + unique_ip_count = len(set(dev['ip'] for dev in ring.devs if dev)) + things_to_number = {} + number = 0 + for dev in filter(None, ring.devs): + ip = dev['ip'] + ipport = (ip, dev['port']) + unique_by = ip if servers_per_port or unique_ip_count > 1 else ipport + if unique_by not in things_to_number: + number += 1 + things_to_number[unique_by] = number + ipport2server[ipport] = '%s%d' % (server_type, + things_to_number[unique_by]) + + +def store_config_paths(name, configs): + for server_name in (name, '%s-replicator' % name): + for server in Manager([server_name]): + for i, conf in enumerate(server.conf_files(), 1): + configs[server.server][i] = conf def get_ring(ring_name, required_replicas, required_devices, - server=None, force_validate=None): + server=None, force_validate=None, ipport2server=None, + config_paths=None): if not server: server = ring_name ring = Ring('/etc/swift', ring_name=ring_name) + if ipport2server is None: + ipport2server = {} # used internally, even if not passed in + if config_paths is None: + config_paths = defaultdict(dict) + store_config_paths(server, config_paths) + + repl_name = '%s-replicator' % server + repl_configs = {i: readconf(c, section_name=repl_name) + for i, c in config_paths[repl_name].iteritems()} + servers_per_port = any(int(c.get('servers_per_port', '0')) + for c in repl_configs.values()) + + add_ring_devs_to_ipport2server(ring, server, ipport2server, + servers_per_port=servers_per_port) if not VALIDATE_RSYNC and not force_validate: return ring # easy sanity checks @@ -167,10 +197,11 @@ def get_ring(ring_name, required_replicas, required_devices, if len(ring.devs) != required_devices: raise SkipTest('%s has %s devices instead of %s' % ( ring.serialized_path, len(ring.devs), required_devices)) - port_to_config = build_port_to_conf(server) for dev in ring.devs: # verify server is exposing mounted device - conf = port_to_config[dev['port']] + ipport = (dev['ip'], dev['port']) + _, server_number = get_server_number(ipport, ipport2server) + conf = repl_configs[server_number] for device in os.listdir(conf['devices']): if device == dev['device']: dev_path = os.path.join(conf['devices'], device) @@ -185,7 +216,7 @@ def get_ring(ring_name, required_replicas, required_devices, "unable to find ring device %s under %s's devices (%s)" % ( dev['device'], server, conf['devices'])) # verify server is exposing rsync device - if port_to_config[dev['port']].get('vm_test_mode', False): + if conf.get('vm_test_mode', False): rsync_export = '%s%s' % (server, dev['replication_port']) else: rsync_export = server @@ -235,46 +266,45 @@ class ProbeTest(unittest.TestCase): Manager(['all']).stop() self.pids = {} try: + self.ipport2server = {} + self.configs = defaultdict(dict) self.account_ring = get_ring( 'account', self.acct_cont_required_replicas, - self.acct_cont_required_devices) + self.acct_cont_required_devices, + ipport2server=self.ipport2server, + config_paths=self.configs) self.container_ring = get_ring( 'container', self.acct_cont_required_replicas, - self.acct_cont_required_devices) + self.acct_cont_required_devices, + ipport2server=self.ipport2server, + config_paths=self.configs) self.policy = get_policy(**self.policy_requirements) self.object_ring = get_ring( self.policy.ring_name, self.obj_required_replicas, self.obj_required_devices, - server='object') + server='object', + ipport2server=self.ipport2server, + config_paths=self.configs) + + self.servers_per_port = any( + int(readconf(c, section_name='object-replicator').get( + 'servers_per_port', '0')) + for c in self.configs['object-replicator'].values()) + Manager(['main']).start(wait=False) - self.port2server = {} - for server, port in [('account', 6002), ('container', 6001), - ('object', 6000)]: - for number in xrange(1, 9): - self.port2server[port + (number * 10)] = \ - '%s%d' % (server, number) - for port in self.port2server: - check_server(port, self.port2server, self.pids) - self.port2server[8080] = 'proxy' - self.url, self.token, self.account = \ - check_server(8080, self.port2server, self.pids) - self.configs = defaultdict(dict) - for name in ('account', 'container', 'object'): - for server_name in (name, '%s-replicator' % name): - for server in Manager([server_name]): - for i, conf in enumerate(server.conf_files(), 1): - self.configs[server.server][i] = conf + for ipport in self.ipport2server: + check_server(ipport, self.ipport2server, self.pids) + proxy_ipport = ('127.0.0.1', 8080) + self.ipport2server[proxy_ipport] = 'proxy' + self.url, self.token, self.account = check_server( + proxy_ipport, self.ipport2server, self.pids) self.replicators = Manager( ['account-replicator', 'container-replicator', 'object-replicator']) self.updaters = Manager(['container-updater', 'object-updater']) - self.server_port_to_conf = {} - # get some configs backend daemon configs loaded up - for server in ('account', 'container', 'object'): - self.server_port_to_conf[server] = build_port_to_conf(server) except BaseException: try: raise @@ -288,7 +318,11 @@ class ProbeTest(unittest.TestCase): Manager(['all']).kill() def device_dir(self, server, node): - conf = self.server_port_to_conf[server][node['port']] + server_type, config_number = get_server_number( + (node['ip'], node['port']), self.ipport2server) + repl_server = '%s-replicator' % server_type + conf = readconf(self.configs[repl_server][config_number], + section_name=repl_server) return os.path.join(conf['devices'], node['device']) def storage_dir(self, server, node, part=None, policy=None): @@ -301,9 +335,24 @@ class ProbeTest(unittest.TestCase): def config_number(self, node): _server_type, config_number = get_server_number( - node['port'], self.port2server) + (node['ip'], node['port']), self.ipport2server) return config_number + def is_local_to(self, node1, node2): + """ + Return True if both ring devices are "local" to each other (on the same + "server". + """ + if self.servers_per_port: + return node1['ip'] == node2['ip'] + + # Without a disambiguating IP, for SAIOs, we have to assume ports + # uniquely identify "servers". SAIOs should be configured to *either* + # have unique IPs per node (e.g. 127.0.0.1, 127.0.0.2, etc.) OR unique + # ports per server (i.e. sdb1 & sdb5 would have same port numbers in + # the 8-disk EC ring). + return node1['port'] == node2['port'] + def get_to_final_state(self): # these .stop()s are probably not strictly necessary, # but may prevent race conditions diff --git a/test/probe/test_account_failures.py b/test/probe/test_account_failures.py index e1fd2cb93f..783d3da9b8 100755 --- a/test/probe/test_account_failures.py +++ b/test/probe/test_account_failures.py @@ -97,8 +97,9 @@ class TestAccountFailures(ReplProbeTest): self.assert_(found2) apart, anodes = self.account_ring.get_nodes(self.account) - kill_nonprimary_server(anodes, self.port2server, self.pids) - kill_server(anodes[0]['port'], self.port2server, self.pids) + kill_nonprimary_server(anodes, self.ipport2server, self.pids) + kill_server((anodes[0]['ip'], anodes[0]['port']), + self.ipport2server, self.pids) # Kill account servers excepting two of the primaries # Delete container1 @@ -146,7 +147,8 @@ class TestAccountFailures(ReplProbeTest): self.assert_(found2) # Restart other primary account server - start_server(anodes[0]['port'], self.port2server, self.pids) + start_server((anodes[0]['ip'], anodes[0]['port']), + self.ipport2server, self.pids) # Assert that server doesn't know about container1's deletion or the # new container2/object2 yet diff --git a/test/probe/test_container_failures.py b/test/probe/test_container_failures.py index fe6aa49dfa..5eddad1464 100755 --- a/test/probe/test_container_failures.py +++ b/test/probe/test_container_failures.py @@ -49,14 +49,16 @@ class TestContainerFailures(ReplProbeTest): client.put_container(self.url, self.token, container1) # Kill container1 servers excepting two of the primaries - kill_nonprimary_server(cnodes, self.port2server, self.pids) - kill_server(cnodes[0]['port'], self.port2server, self.pids) + kill_nonprimary_server(cnodes, self.ipport2server, self.pids) + kill_server((cnodes[0]['ip'], cnodes[0]['port']), + self.ipport2server, self.pids) # Delete container1 client.delete_container(self.url, self.token, container1) # Restart other container1 primary server - start_server(cnodes[0]['port'], self.port2server, self.pids) + start_server((cnodes[0]['ip'], cnodes[0]['port']), + self.ipport2server, self.pids) # Create container1/object1 (allowed because at least server thinks the # container exists) @@ -87,18 +89,23 @@ class TestContainerFailures(ReplProbeTest): client.put_container(self.url, self.token, container1) # Kill container1 servers excepting one of the primaries - cnp_port = kill_nonprimary_server(cnodes, self.port2server, self.pids) - kill_server(cnodes[0]['port'], self.port2server, self.pids) - kill_server(cnodes[1]['port'], self.port2server, self.pids) + cnp_ipport = kill_nonprimary_server(cnodes, self.ipport2server, + self.pids) + kill_server((cnodes[0]['ip'], cnodes[0]['port']), + self.ipport2server, self.pids) + kill_server((cnodes[1]['ip'], cnodes[1]['port']), + self.ipport2server, self.pids) # Delete container1 directly to the one primary still up direct_client.direct_delete_container(cnodes[2], cpart, self.account, container1) # Restart other container1 servers - start_server(cnodes[0]['port'], self.port2server, self.pids) - start_server(cnodes[1]['port'], self.port2server, self.pids) - start_server(cnp_port, self.port2server, self.pids) + start_server((cnodes[0]['ip'], cnodes[0]['port']), + self.ipport2server, self.pids) + start_server((cnodes[1]['ip'], cnodes[1]['port']), + self.ipport2server, self.pids) + start_server(cnp_ipport, self.ipport2server, self.pids) # Get to a final state self.get_to_final_state() diff --git a/test/probe/test_empty_device_handoff.py b/test/probe/test_empty_device_handoff.py index e4b2033e0f..f68ee6692b 100755 --- a/test/probe/test_empty_device_handoff.py +++ b/test/probe/test_empty_device_handoff.py @@ -26,7 +26,8 @@ from swiftclient import client from swift.common import direct_client from swift.obj.diskfile import get_data_dir from swift.common.exceptions import ClientException -from test.probe.common import kill_server, ReplProbeTest, start_server +from test.probe.common import ( + kill_server, ReplProbeTest, start_server, get_server_number) from swift.common.utils import readconf from swift.common.manager import Manager @@ -35,7 +36,8 @@ class TestEmptyDevice(ReplProbeTest): def _get_objects_dir(self, onode): device = onode['device'] - node_id = (onode['port'] - 6000) / 10 + _, node_id = get_server_number((onode['ip'], onode['port']), + self.ipport2server) obj_server_conf = readconf(self.configs['object-server'][node_id]) devices = obj_server_conf['app:object-server']['devices'] obj_dir = '%s/%s' % (devices, device) @@ -56,7 +58,8 @@ class TestEmptyDevice(ReplProbeTest): onode = onodes[0] # Kill one container/obj primary server - kill_server(onode['port'], self.port2server, self.pids) + kill_server((onode['ip'], onode['port']), + self.ipport2server, self.pids) # Delete the default data directory for objects on the primary server obj_dir = '%s/%s' % (self._get_objects_dir(onode), @@ -74,7 +77,8 @@ class TestEmptyDevice(ReplProbeTest): # Kill other two container/obj primary servers # to ensure GET handoff works for node in onodes[1:]: - kill_server(node['port'], self.port2server, self.pids) + kill_server((node['ip'], node['port']), + self.ipport2server, self.pids) # Indirectly through proxy assert we can get container/obj odata = client.get_object(self.url, self.token, container, obj)[-1] @@ -83,7 +87,8 @@ class TestEmptyDevice(ReplProbeTest): 'returned: %s' % repr(odata)) # Restart those other two container/obj primary servers for node in onodes[1:]: - start_server(node['port'], self.port2server, self.pids) + start_server((node['ip'], node['port']), + self.ipport2server, self.pids) self.assertFalse(os.path.exists(obj_dir)) # We've indirectly verified the handoff node has the object, but # let's directly verify it. @@ -122,7 +127,8 @@ class TestEmptyDevice(ReplProbeTest): missing) # Bring the first container/obj primary server back up - start_server(onode['port'], self.port2server, self.pids) + start_server((onode['ip'], onode['port']), + self.ipport2server, self.pids) # Assert that it doesn't have container/obj yet self.assertFalse(os.path.exists(obj_dir)) @@ -136,21 +142,17 @@ class TestEmptyDevice(ReplProbeTest): else: self.fail("Expected ClientException but didn't get it") - try: - port_num = onode['replication_port'] - except KeyError: - port_num = onode['port'] - try: - another_port_num = another_onode['replication_port'] - except KeyError: - another_port_num = another_onode['port'] - # Run object replication for first container/obj primary server - num = (port_num - 6000) / 10 + _, num = get_server_number( + (onode['ip'], onode.get('replication_port', onode['port'])), + self.ipport2server) Manager(['object-replicator']).once(number=num) # Run object replication for handoff node - another_num = (another_port_num - 6000) / 10 + _, another_num = get_server_number( + (another_onode['ip'], + another_onode.get('replication_port', another_onode['port'])), + self.ipport2server) Manager(['object-replicator']).once(number=another_num) # Assert the first container/obj primary server now has container/obj diff --git a/test/probe/test_object_async_update.py b/test/probe/test_object_async_update.py index 05d05b3adf..8657314fc7 100755 --- a/test/probe/test_object_async_update.py +++ b/test/probe/test_object_async_update.py @@ -41,15 +41,17 @@ class TestObjectAsyncUpdate(ReplProbeTest): # Kill container servers excepting two of the primaries cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] - kill_nonprimary_server(cnodes, self.port2server, self.pids) - kill_server(cnode['port'], self.port2server, self.pids) + kill_nonprimary_server(cnodes, self.ipport2server, self.pids) + kill_server((cnode['ip'], cnode['port']), + self.ipport2server, self.pids) # Create container/obj obj = 'object-%s' % uuid4() client.put_object(self.url, self.token, container, obj, '') # Restart other primary server - start_server(cnode['port'], self.port2server, self.pids) + start_server((cnode['ip'], cnode['port']), + self.ipport2server, self.pids) # Assert it does not know about container/obj self.assert_(not direct_client.direct_get_container( diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py index c3e3990839..37fb7626b5 100755 --- a/test/probe/test_object_handoff.py +++ b/test/probe/test_object_handoff.py @@ -41,7 +41,8 @@ class TestObjectHandoff(ReplProbeTest): opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] - kill_server(onode['port'], self.port2server, self.pids) + kill_server((onode['ip'], onode['port']), + self.ipport2server, self.pids) # Create container/obj (goes to two primary servers and one handoff) client.put_object(self.url, self.token, container, obj, 'VERIFY') @@ -53,7 +54,8 @@ class TestObjectHandoff(ReplProbeTest): # Kill other two container/obj primary servers # to ensure GET handoff works for node in onodes[1:]: - kill_server(node['port'], self.port2server, self.pids) + kill_server((node['ip'], node['port']), + self.ipport2server, self.pids) # Indirectly through proxy assert we can get container/obj odata = client.get_object(self.url, self.token, container, obj)[-1] @@ -63,7 +65,8 @@ class TestObjectHandoff(ReplProbeTest): # Restart those other two container/obj primary servers for node in onodes[1:]: - start_server(node['port'], self.port2server, self.pids) + start_server((node['ip'], node['port']), + self.ipport2server, self.pids) # We've indirectly verified the handoff node has the container/object, # but let's directly verify it. @@ -90,7 +93,8 @@ class TestObjectHandoff(ReplProbeTest): (cnode['ip'], cnode['port'])) # Bring the first container/obj primary server back up - start_server(onode['port'], self.port2server, self.pids) + start_server((onode['ip'], onode['port']), + self.ipport2server, self.pids) # Assert that it doesn't have container/obj yet try: @@ -138,7 +142,8 @@ class TestObjectHandoff(ReplProbeTest): # Kill the first container/obj primary server again (we have two # primaries and the handoff up now) - kill_server(onode['port'], self.port2server, self.pids) + kill_server((onode['ip'], onode['port']), + self.ipport2server, self.pids) # Delete container/obj try: @@ -175,7 +180,8 @@ class TestObjectHandoff(ReplProbeTest): (cnode['ip'], cnode['port'])) # Restart the first container/obj primary server again - start_server(onode['port'], self.port2server, self.pids) + start_server((onode['ip'], onode['port']), + self.ipport2server, self.pids) # Assert it still has container/obj direct_client.direct_get_object( diff --git a/test/probe/test_reconstructor_revert.py b/test/probe/test_reconstructor_revert.py index 135d1ce421..1daf7a3725 100755 --- a/test/probe/test_reconstructor_revert.py +++ b/test/probe/test_reconstructor_revert.py @@ -294,7 +294,7 @@ class TestReconstructorRevert(ECProbeTest): # the same server handoff_fragment_etag = None for node in onodes: - if node['port'] == hnode['port']: + if self.is_local_to(node, hnode): # we'll keep track of the etag of this fragment we're removing # in case we need it later (queue forshadowing music)... try: @@ -327,7 +327,7 @@ class TestReconstructorRevert(ECProbeTest): raise # partner already had it's fragment removed if (handoff_fragment_etag is not None and - hnode['port'] == partner['port']): + self.is_local_to(hnode, partner)): # oh, well that makes sense then... rebuilt_fragment_etag = handoff_fragment_etag else: diff --git a/test/unit/__init__.py b/test/unit/__init__.py index a4d1cd35ca..0929293b54 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -30,7 +30,7 @@ import eventlet from eventlet.green import socket from tempfile import mkdtemp from shutil import rmtree -from swift.common.utils import Timestamp +from swift.common.utils import Timestamp, NOTICE from test import get_config from swift.common import swob, utils from swift.common.ring import Ring, RingData @@ -478,8 +478,18 @@ class FakeLogger(logging.Logger, object): logging.INFO: 'info', logging.DEBUG: 'debug', logging.CRITICAL: 'critical', + NOTICE: 'notice', } + def notice(self, msg, *args, **kwargs): + """ + Convenience function for syslog priority LOG_NOTICE. The python + logging lvl is set to 25, just above info. SysLogHandler is + monkey patched to map this log lvl to the LOG_NOTICE syslog + priority. + """ + self.log(NOTICE, msg, *args, **kwargs) + def _log(self, level, msg, *args, **kwargs): store_name = self.store_in[level] cargs = [msg] @@ -495,7 +505,7 @@ class FakeLogger(logging.Logger, object): def _clear(self): self.log_dict = defaultdict(list) self.lines_dict = {'critical': [], 'error': [], 'info': [], - 'warning': [], 'debug': []} + 'warning': [], 'debug': [], 'notice': []} def get_lines_for_level(self, level): if level not in self.lines_dict: diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py index 77b57a9a85..5ee1af0ca6 100644 --- a/test/unit/common/ring/test_ring.py +++ b/test/unit/common/ring/test_ring.py @@ -77,6 +77,15 @@ class TestRingData(unittest.TestCase): for p in xrange(pickle.HIGHEST_PROTOCOL): with closing(GzipFile(ring_fname, 'wb')) as f: pickle.dump(rd, f, protocol=p) + meta_only = ring.RingData.load(ring_fname, metadata_only=True) + self.assertEqual([ + {'id': 0, 'zone': 0, 'region': 1, 'ip': '10.1.1.0', + 'port': 7000}, + {'id': 1, 'zone': 1, 'region': 1, 'ip': '10.1.1.1', + 'port': 7000}, + ], meta_only.devs) + # Pickled rings can't load only metadata, so you get it all + self.assert_ring_data_equal(rd, meta_only) ring_data = ring.RingData.load(ring_fname) self.assert_ring_data_equal(rd, ring_data) @@ -86,6 +95,12 @@ class TestRingData(unittest.TestCase): [array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1])], [{'id': 0, 'zone': 0}, {'id': 1, 'zone': 1}], 30) rd.save(ring_fname) + meta_only = ring.RingData.load(ring_fname, metadata_only=True) + self.assertEqual([ + {'id': 0, 'zone': 0, 'region': 1}, + {'id': 1, 'zone': 1, 'region': 1}, + ], meta_only.devs) + self.assertEqual([], meta_only._replica2part2dev_id) rd2 = ring.RingData.load(ring_fname) self.assert_ring_data_equal(rd, rd2) diff --git a/test/unit/common/ring/test_utils.py b/test/unit/common/ring/test_utils.py index 8eaca09756..4d078e6c00 100644 --- a/test/unit/common/ring/test_utils.py +++ b/test/unit/common/ring/test_utils.py @@ -185,22 +185,41 @@ class TestUtils(unittest.TestCase): self.assertFalse(is_valid_hostname("$blah#")) def test_is_local_device(self): - my_ips = ["127.0.0.1", - "0000:0000:0000:0000:0000:0000:0000:0001"] + # localhost shows up in whataremyips() output as "::1" for IPv6 + my_ips = ["127.0.0.1", "::1"] my_port = 6000 self.assertTrue(is_local_device(my_ips, my_port, - "localhost", - my_port)) + "127.0.0.1", my_port)) + self.assertTrue(is_local_device(my_ips, my_port, + "::1", my_port)) + self.assertTrue(is_local_device( + my_ips, my_port, + "0000:0000:0000:0000:0000:0000:0000:0001", my_port)) + self.assertTrue(is_local_device(my_ips, my_port, + "localhost", my_port)) self.assertFalse(is_local_device(my_ips, my_port, - "localhost", - my_port + 1)) + "localhost", my_port + 1)) self.assertFalse(is_local_device(my_ips, my_port, - "127.0.0.2", - my_port)) + "127.0.0.2", my_port)) # for those that don't have a local port self.assertTrue(is_local_device(my_ips, None, my_ips[0], None)) + # When servers_per_port is active, the "my_port" passed in is None + # which means "don't include port in the determination of locality + # because it's not reliable in this deployment scenario" + self.assertTrue(is_local_device(my_ips, None, + "127.0.0.1", 6666)) + self.assertTrue(is_local_device(my_ips, None, + "::1", 6666)) + self.assertTrue(is_local_device( + my_ips, None, + "0000:0000:0000:0000:0000:0000:0000:0001", 6666)) + self.assertTrue(is_local_device(my_ips, None, + "localhost", 6666)) + self.assertFalse(is_local_device(my_ips, None, + "127.0.0.2", my_port)) + def test_validate_and_normalize_ip(self): ipv4 = "10.0.0.1" self.assertEqual(ipv4, validate_and_normalize_ip(ipv4)) diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py index 5f5c6893fa..8cc556127e 100644 --- a/test/unit/common/test_db_replicator.py +++ b/test/unit/common/test_db_replicator.py @@ -477,7 +477,7 @@ class TestDBReplicator(unittest.TestCase): def test_run_once_no_ips(self): replicator = TestReplicator({}, logger=unit.FakeLogger()) self._patch(patch.object, db_replicator, 'whataremyips', - lambda *args: []) + lambda *a, **kw: []) replicator.run_once() @@ -487,7 +487,9 @@ class TestDBReplicator(unittest.TestCase): def test_run_once_node_is_not_mounted(self): db_replicator.ring = FakeRingWithSingleNode() - conf = {'mount_check': 'true', 'bind_port': 6000} + # If a bind_ip is specified, it's plumbed into whataremyips() and + # returned by itself. + conf = {'mount_check': 'true', 'bind_ip': '1.1.1.1', 'bind_port': 6000} replicator = TestReplicator(conf, logger=unit.FakeLogger()) self.assertEqual(replicator.mount_check, True) self.assertEqual(replicator.port, 6000) @@ -498,8 +500,6 @@ class TestDBReplicator(unittest.TestCase): replicator.ring.devs[0]['device'])) return False - self._patch(patch.object, db_replicator, 'whataremyips', - lambda *args: ['1.1.1.1']) self._patch(patch.object, db_replicator, 'ismount', mock_ismount) replicator.run_once() @@ -528,7 +528,7 @@ class TestDBReplicator(unittest.TestCase): self.assertEquals(1, node_id) self._patch(patch.object, db_replicator, 'whataremyips', - lambda *args: ['1.1.1.1']) + lambda *a, **kw: ['1.1.1.1']) self._patch(patch.object, db_replicator, 'ismount', lambda *args: True) self._patch(patch.object, db_replicator, 'unlink_older_than', mock_unlink_older_than) @@ -1390,7 +1390,7 @@ class TestReplicatorSync(unittest.TestCase): return True daemon._rsync_file = _rsync_file with mock.patch('swift.common.db_replicator.whataremyips', - new=lambda: [node['replication_ip']]): + new=lambda *a, **kw: [node['replication_ip']]): daemon.run_once() return daemon diff --git a/test/unit/common/test_storage_policy.py b/test/unit/common/test_storage_policy.py index 6406dc1923..6e3f217db0 100644 --- a/test/unit/common/test_storage_policy.py +++ b/test/unit/common/test_storage_policy.py @@ -15,14 +15,17 @@ import unittest import StringIO from ConfigParser import ConfigParser +import os import mock +from functools import partial from tempfile import NamedTemporaryFile -from test.unit import patch_policies, FakeRing +from test.unit import patch_policies, FakeRing, temptree from swift.common.storage_policy import ( StoragePolicyCollection, POLICIES, PolicyError, parse_storage_policies, reload_storage_policies, get_policy_string, split_policy_string, BaseStoragePolicy, StoragePolicy, ECStoragePolicy, REPL_POLICY, EC_POLICY, - VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE) + VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE, BindPortsCache) +from swift.common.ring import RingData from swift.common.exceptions import RingValidationError @@ -740,6 +743,139 @@ class TestStoragePolicies(unittest.TestCase): self.assertRaises(PolicyError, policies.get_object_ring, 99, '/path/not/used') + def test_bind_ports_cache(self): + test_policies = [StoragePolicy(0, 'aay', True), + StoragePolicy(1, 'bee', False), + StoragePolicy(2, 'cee', False)] + + my_ips = ['1.2.3.4', '2.3.4.5'] + other_ips = ['3.4.5.6', '4.5.6.7'] + bind_ip = my_ips[1] + devs_by_ring_name1 = { + 'object': [ # 'aay' + {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0], + 'port': 6006}, + {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0], + 'port': 6007}, + {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1], + 'port': 6008}, + None, + {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], + 'port': 6009}], + 'object-1': [ # 'bee' + {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1], + 'port': 6006}, # dupe + {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0], + 'port': 6010}, + {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1], + 'port': 6011}, + {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], + 'port': 6012}], + 'object-2': [ # 'cee' + {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0], + 'port': 6010}, # on our IP and a not-us IP + {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0], + 'port': 6013}, + None, + {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1], + 'port': 6014}, + {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], + 'port': 6015}], + } + devs_by_ring_name2 = { + 'object': [ # 'aay' + {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0], + 'port': 6016}, + {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], + 'port': 6019}], + 'object-1': [ # 'bee' + {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1], + 'port': 6016}, # dupe + {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], + 'port': 6022}], + 'object-2': [ # 'cee' + {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0], + 'port': 6020}, + {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], + 'port': 6025}], + } + ring_files = [ring_name + '.ring.gz' + for ring_name in sorted(devs_by_ring_name1)] + + def _fake_load(gz_path, stub_objs, metadata_only=False): + return RingData( + devs=stub_objs[os.path.basename(gz_path)[:-8]], + replica2part2dev_id=[], + part_shift=24) + + with mock.patch( + 'swift.common.storage_policy.RingData.load' + ) as mock_ld, \ + patch_policies(test_policies), \ + mock.patch('swift.common.storage_policy.whataremyips') \ + as mock_whataremyips, \ + temptree(ring_files) as tempdir: + mock_whataremyips.return_value = my_ips + + cache = BindPortsCache(tempdir, bind_ip) + + self.assertEqual([ + mock.call(bind_ip), + ], mock_whataremyips.mock_calls) + mock_whataremyips.reset_mock() + + mock_ld.side_effect = partial(_fake_load, + stub_objs=devs_by_ring_name1) + self.assertEqual(set([ + 6006, 6008, 6011, 6010, 6014, + ]), cache.all_bind_ports_for_node()) + self.assertEqual([ + mock.call(os.path.join(tempdir, ring_files[0]), + metadata_only=True), + mock.call(os.path.join(tempdir, ring_files[1]), + metadata_only=True), + mock.call(os.path.join(tempdir, ring_files[2]), + metadata_only=True), + ], mock_ld.mock_calls) + mock_ld.reset_mock() + + mock_ld.side_effect = partial(_fake_load, + stub_objs=devs_by_ring_name2) + self.assertEqual(set([ + 6006, 6008, 6011, 6010, 6014, + ]), cache.all_bind_ports_for_node()) + self.assertEqual([], mock_ld.mock_calls) + + # but when all the file mtimes are made different, it'll + # reload + for gz_file in [os.path.join(tempdir, n) + for n in ring_files]: + os.utime(gz_file, (88, 88)) + + self.assertEqual(set([ + 6016, 6020, + ]), cache.all_bind_ports_for_node()) + self.assertEqual([ + mock.call(os.path.join(tempdir, ring_files[0]), + metadata_only=True), + mock.call(os.path.join(tempdir, ring_files[1]), + metadata_only=True), + mock.call(os.path.join(tempdir, ring_files[2]), + metadata_only=True), + ], mock_ld.mock_calls) + mock_ld.reset_mock() + + # Don't do something stupid like crash if a ring file is missing. + os.unlink(os.path.join(tempdir, 'object-2.ring.gz')) + + self.assertEqual(set([ + 6016, 6020, + ]), cache.all_bind_ports_for_node()) + self.assertEqual([], mock_ld.mock_calls) + + # whataremyips() is only called in the constructor + self.assertEqual([], mock_whataremyips.mock_calls) + def test_singleton_passthrough(self): test_policies = [StoragePolicy(0, 'aay', True), StoragePolicy(1, 'bee', False), diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 3072883b83..a668e0ff52 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -1488,6 +1488,18 @@ class TestUtils(unittest.TestCase): self.assert_(len(myips) > 1) self.assert_('127.0.0.1' in myips) + def test_whataremyips_bind_to_all(self): + for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000', + '::0', '::0000', '::', + # Wacky parse-error input produces all IPs + 'I am a bear'): + myips = utils.whataremyips(any_addr) + self.assert_(len(myips) > 1) + self.assert_('127.0.0.1' in myips) + + def test_whataremyips_bind_ip_specific(self): + self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4')) + def test_whataremyips_error(self): def my_interfaces(): return ['eth0'] @@ -1725,6 +1737,21 @@ log_name = %(yarr)s''' for func in required_func_calls: self.assert_(utils.os.called_funcs[func]) + def test_drop_privileges_no_call_setsid(self): + user = getuser() + # over-ride os with mock + required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir', + 'umask') + bad_func_calls = ('setsid',) + utils.os = MockOs(called_funcs=required_func_calls, + raise_funcs=bad_func_calls) + # exercise the code + utils.drop_privileges(user, call_setsid=False) + for func in required_func_calls: + self.assert_(utils.os.called_funcs[func]) + for func in bad_func_calls: + self.assert_(func not in utils.os.called_funcs) + @reset_logger_state def test_capture_stdio(self): # stubs diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index 1fbd012dbe..a4da9effef 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -42,7 +42,8 @@ from swift.common.swob import Request from swift.common import wsgi, utils from swift.common.storage_policy import POLICIES -from test.unit import temptree, with_tempdir, write_fake_ring, patch_policies +from test.unit import ( + temptree, with_tempdir, write_fake_ring, patch_policies, FakeLogger) from paste.deploy import loadwsgi @@ -688,6 +689,65 @@ class TestWSGI(unittest.TestCase): self.assertEqual(calls['_loadapp'], 1) self.assertEqual(rc, 0) + @mock.patch('swift.common.wsgi.run_server') + @mock.patch('swift.common.wsgi.WorkersStrategy') + @mock.patch('swift.common.wsgi.ServersPerPortStrategy') + def test_run_server_strategy_plumbing(self, mock_per_port, mock_workers, + mock_run_server): + # Make sure the right strategy gets used in a number of different + # config cases. + mock_per_port().bind_ports.return_value = 'stop early' + mock_workers().bind_ports.return_value = 'stop early' + logger = FakeLogger() + stub__initrp = [ + {'__file__': 'test', 'workers': 2}, # conf + logger, + 'log_name', + ] + with mock.patch.object(wsgi, '_initrp', return_value=stub__initrp): + for server_type in ('account-server', 'container-server', + 'object-server'): + mock_per_port.reset_mock() + mock_workers.reset_mock() + logger._clear() + self.assertEqual(1, wsgi.run_wsgi('conf_file', server_type)) + self.assertEqual([ + 'stop early', + ], logger.get_lines_for_level('error')) + self.assertEqual([], mock_per_port.mock_calls) + self.assertEqual([ + mock.call(stub__initrp[0], logger), + mock.call().bind_ports(), + ], mock_workers.mock_calls) + + stub__initrp[0]['servers_per_port'] = 3 + for server_type in ('account-server', 'container-server'): + mock_per_port.reset_mock() + mock_workers.reset_mock() + logger._clear() + self.assertEqual(1, wsgi.run_wsgi('conf_file', server_type)) + self.assertEqual([ + 'stop early', + ], logger.get_lines_for_level('error')) + self.assertEqual([], mock_per_port.mock_calls) + self.assertEqual([ + mock.call(stub__initrp[0], logger), + mock.call().bind_ports(), + ], mock_workers.mock_calls) + + mock_per_port.reset_mock() + mock_workers.reset_mock() + logger._clear() + self.assertEqual(1, wsgi.run_wsgi('conf_file', 'object-server')) + self.assertEqual([ + 'stop early', + ], logger.get_lines_for_level('error')) + self.assertEqual([ + mock.call(stub__initrp[0], logger, servers_per_port=3), + mock.call().bind_ports(), + ], mock_per_port.mock_calls) + self.assertEqual([], mock_workers.mock_calls) + def test_run_server_failure1(self): calls = defaultdict(lambda: 0) @@ -751,6 +811,380 @@ class TestWSGI(unittest.TestCase): self.assertEquals(r.environ['PATH_INFO'], '/override') +class TestServersPerPortStrategy(unittest.TestCase): + def setUp(self): + self.logger = FakeLogger() + self.conf = { + 'workers': 100, # ignored + 'user': 'bob', + 'swift_dir': '/jim/cricket', + 'ring_check_interval': '76', + 'bind_ip': '2.3.4.5', + } + self.servers_per_port = 3 + self.s1, self.s2 = mock.MagicMock(), mock.MagicMock() + patcher = mock.patch('swift.common.wsgi.get_socket', + side_effect=[self.s1, self.s2]) + self.mock_get_socket = patcher.start() + self.addCleanup(patcher.stop) + patcher = mock.patch('swift.common.wsgi.drop_privileges') + self.mock_drop_privileges = patcher.start() + self.addCleanup(patcher.stop) + patcher = mock.patch('swift.common.wsgi.BindPortsCache') + self.mock_cache_class = patcher.start() + self.addCleanup(patcher.stop) + patcher = mock.patch('swift.common.wsgi.os.setsid') + self.mock_setsid = patcher.start() + self.addCleanup(patcher.stop) + patcher = mock.patch('swift.common.wsgi.os.chdir') + self.mock_chdir = patcher.start() + self.addCleanup(patcher.stop) + patcher = mock.patch('swift.common.wsgi.os.umask') + self.mock_umask = patcher.start() + self.addCleanup(patcher.stop) + + self.all_bind_ports_for_node = \ + self.mock_cache_class().all_bind_ports_for_node + self.ports = (6006, 6007) + self.all_bind_ports_for_node.return_value = set(self.ports) + + self.strategy = wsgi.ServersPerPortStrategy(self.conf, self.logger, + self.servers_per_port) + + def test_loop_timeout(self): + # This strategy should loop every ring_check_interval seconds, even if + # no workers exit. + self.assertEqual(76, self.strategy.loop_timeout()) + + # Check the default + del self.conf['ring_check_interval'] + self.strategy = wsgi.ServersPerPortStrategy(self.conf, self.logger, + self.servers_per_port) + + self.assertEqual(15, self.strategy.loop_timeout()) + + def test_bind_ports(self): + self.strategy.bind_ports() + + self.assertEqual(set((6006, 6007)), self.strategy.bind_ports) + self.assertEqual([ + mock.call({'workers': 100, # ignored + 'user': 'bob', + 'swift_dir': '/jim/cricket', + 'ring_check_interval': '76', + 'bind_ip': '2.3.4.5', + 'bind_port': 6006}), + mock.call({'workers': 100, # ignored + 'user': 'bob', + 'swift_dir': '/jim/cricket', + 'ring_check_interval': '76', + 'bind_ip': '2.3.4.5', + 'bind_port': 6007}), + ], self.mock_get_socket.mock_calls) + self.assertEqual( + 6006, self.strategy.port_pid_state.port_for_sock(self.s1)) + self.assertEqual( + 6007, self.strategy.port_pid_state.port_for_sock(self.s2)) + self.assertEqual([mock.call()], self.mock_setsid.mock_calls) + self.assertEqual([mock.call('/')], self.mock_chdir.mock_calls) + self.assertEqual([mock.call(0o22)], self.mock_umask.mock_calls) + + def test_bind_ports_ignores_setsid_errors(self): + self.mock_setsid.side_effect = OSError() + self.strategy.bind_ports() + + self.assertEqual(set((6006, 6007)), self.strategy.bind_ports) + self.assertEqual([ + mock.call({'workers': 100, # ignored + 'user': 'bob', + 'swift_dir': '/jim/cricket', + 'ring_check_interval': '76', + 'bind_ip': '2.3.4.5', + 'bind_port': 6006}), + mock.call({'workers': 100, # ignored + 'user': 'bob', + 'swift_dir': '/jim/cricket', + 'ring_check_interval': '76', + 'bind_ip': '2.3.4.5', + 'bind_port': 6007}), + ], self.mock_get_socket.mock_calls) + self.assertEqual( + 6006, self.strategy.port_pid_state.port_for_sock(self.s1)) + self.assertEqual( + 6007, self.strategy.port_pid_state.port_for_sock(self.s2)) + self.assertEqual([mock.call()], self.mock_setsid.mock_calls) + self.assertEqual([mock.call('/')], self.mock_chdir.mock_calls) + self.assertEqual([mock.call(0o22)], self.mock_umask.mock_calls) + + def test_no_fork_sock(self): + self.assertEqual(None, self.strategy.no_fork_sock()) + + def test_new_worker_socks(self): + self.strategy.bind_ports() + self.all_bind_ports_for_node.reset_mock() + + pid = 88 + got_si = [] + for s, i in self.strategy.new_worker_socks(): + got_si.append((s, i)) + self.strategy.register_worker_start(s, i, pid) + pid += 1 + + self.assertEqual([ + (self.s1, 0), (self.s1, 1), (self.s1, 2), + (self.s2, 0), (self.s2, 1), (self.s2, 2), + ], got_si) + self.assertEqual([ + 'Started child %d (PID %d) for port %d' % (0, 88, 6006), + 'Started child %d (PID %d) for port %d' % (1, 89, 6006), + 'Started child %d (PID %d) for port %d' % (2, 90, 6006), + 'Started child %d (PID %d) for port %d' % (0, 91, 6007), + 'Started child %d (PID %d) for port %d' % (1, 92, 6007), + 'Started child %d (PID %d) for port %d' % (2, 93, 6007), + ], self.logger.get_lines_for_level('notice')) + self.logger._clear() + + # Steady-state... + self.assertEqual([], list(self.strategy.new_worker_socks())) + self.all_bind_ports_for_node.reset_mock() + + # Get rid of servers for ports which disappear from the ring + self.ports = (6007,) + self.all_bind_ports_for_node.return_value = set(self.ports) + self.s1.reset_mock() + self.s2.reset_mock() + + with mock.patch('swift.common.wsgi.greenio') as mock_greenio: + self.assertEqual([], list(self.strategy.new_worker_socks())) + + self.assertEqual([ + mock.call(), # ring_check_interval has passed... + ], self.all_bind_ports_for_node.mock_calls) + self.assertEqual([ + mock.call.shutdown_safe(self.s1), + ], mock_greenio.mock_calls) + self.assertEqual([ + mock.call.close(), + ], self.s1.mock_calls) + self.assertEqual([], self.s2.mock_calls) # not closed + self.assertEqual([ + 'Closing unnecessary sock for port %d' % 6006, + ], self.logger.get_lines_for_level('notice')) + self.logger._clear() + + # Create new socket & workers for new ports that appear in ring + self.ports = (6007, 6009) + self.all_bind_ports_for_node.return_value = set(self.ports) + self.s1.reset_mock() + self.s2.reset_mock() + s3 = mock.MagicMock() + self.mock_get_socket.side_effect = Exception('ack') + + # But first make sure we handle failure to bind to the requested port! + got_si = [] + for s, i in self.strategy.new_worker_socks(): + got_si.append((s, i)) + self.strategy.register_worker_start(s, i, pid) + pid += 1 + + self.assertEqual([], got_si) + self.assertEqual([ + 'Unable to bind to port %d: %s' % (6009, Exception('ack')), + 'Unable to bind to port %d: %s' % (6009, Exception('ack')), + 'Unable to bind to port %d: %s' % (6009, Exception('ack')), + ], self.logger.get_lines_for_level('critical')) + self.logger._clear() + + # Will keep trying, so let it succeed again + self.mock_get_socket.side_effect = [s3] + + got_si = [] + for s, i in self.strategy.new_worker_socks(): + got_si.append((s, i)) + self.strategy.register_worker_start(s, i, pid) + pid += 1 + + self.assertEqual([ + (s3, 0), (s3, 1), (s3, 2), + ], got_si) + self.assertEqual([ + 'Started child %d (PID %d) for port %d' % (0, 94, 6009), + 'Started child %d (PID %d) for port %d' % (1, 95, 6009), + 'Started child %d (PID %d) for port %d' % (2, 96, 6009), + ], self.logger.get_lines_for_level('notice')) + self.logger._clear() + + # Steady-state... + self.assertEqual([], list(self.strategy.new_worker_socks())) + self.all_bind_ports_for_node.reset_mock() + + # Restart a guy who died on us + self.strategy.register_worker_exit(95) # server_idx == 1 + + got_si = [] + for s, i in self.strategy.new_worker_socks(): + got_si.append((s, i)) + self.strategy.register_worker_start(s, i, pid) + pid += 1 + + self.assertEqual([ + (s3, 1), + ], got_si) + self.assertEqual([ + 'Started child %d (PID %d) for port %d' % (1, 97, 6009), + ], self.logger.get_lines_for_level('notice')) + self.logger._clear() + + # Check log_sock_exit + self.strategy.log_sock_exit(self.s2, 2) + self.assertEqual([ + 'Child %d (PID %d, port %d) exiting normally' % ( + 2, os.getpid(), 6007), + ], self.logger.get_lines_for_level('notice')) + + # It's ok to register_worker_exit for a PID that's already had its + # socket closed due to orphaning. + # This is one of the workers for port 6006 that already got reaped. + self.assertEqual(None, self.strategy.register_worker_exit(89)) + + def test_post_fork_hook(self): + self.strategy.post_fork_hook() + + self.assertEqual([ + mock.call('bob', call_setsid=False), + ], self.mock_drop_privileges.mock_calls) + + def test_shutdown_sockets(self): + self.strategy.bind_ports() + + with mock.patch('swift.common.wsgi.greenio') as mock_greenio: + self.strategy.shutdown_sockets() + + self.assertEqual([ + mock.call.shutdown_safe(self.s1), + mock.call.shutdown_safe(self.s2), + ], mock_greenio.mock_calls) + self.assertEqual([ + mock.call.close(), + ], self.s1.mock_calls) + self.assertEqual([ + mock.call.close(), + ], self.s2.mock_calls) + + +class TestWorkersStrategy(unittest.TestCase): + def setUp(self): + self.logger = FakeLogger() + self.conf = { + 'workers': 2, + 'user': 'bob', + } + self.strategy = wsgi.WorkersStrategy(self.conf, self.logger) + patcher = mock.patch('swift.common.wsgi.get_socket', + return_value='abc') + self.mock_get_socket = patcher.start() + self.addCleanup(patcher.stop) + patcher = mock.patch('swift.common.wsgi.drop_privileges') + self.mock_drop_privileges = patcher.start() + self.addCleanup(patcher.stop) + + def test_loop_timeout(self): + # This strategy should block in the green.os.wait() until a worker + # process exits. + self.assertEqual(None, self.strategy.loop_timeout()) + + def test_binding(self): + self.assertEqual(None, self.strategy.bind_ports()) + + self.assertEqual('abc', self.strategy.sock) + self.assertEqual([ + mock.call(self.conf), + ], self.mock_get_socket.mock_calls) + self.assertEqual([ + mock.call('bob'), + ], self.mock_drop_privileges.mock_calls) + + self.mock_get_socket.side_effect = wsgi.ConfigFilePortError() + + self.assertEqual( + 'bind_port wasn\'t properly set in the config file. ' + 'It must be explicitly set to a valid port number.', + self.strategy.bind_ports()) + + def test_no_fork_sock(self): + self.strategy.bind_ports() + self.assertEqual(None, self.strategy.no_fork_sock()) + + self.conf['workers'] = 0 + self.strategy = wsgi.WorkersStrategy(self.conf, self.logger) + self.strategy.bind_ports() + + self.assertEqual('abc', self.strategy.no_fork_sock()) + + def test_new_worker_socks(self): + self.strategy.bind_ports() + pid = 88 + sock_count = 0 + for s, i in self.strategy.new_worker_socks(): + self.assertEqual('abc', s) + self.assertEqual(None, i) # unused for this strategy + self.strategy.register_worker_start(s, 'unused', pid) + pid += 1 + sock_count += 1 + + self.assertEqual([ + 'Started child %s' % 88, + 'Started child %s' % 89, + ], self.logger.get_lines_for_level('notice')) + + self.assertEqual(2, sock_count) + self.assertEqual([], list(self.strategy.new_worker_socks())) + + sock_count = 0 + self.strategy.register_worker_exit(88) + + self.assertEqual([ + 'Removing dead child %s' % 88, + ], self.logger.get_lines_for_level('error')) + + for s, i in self.strategy.new_worker_socks(): + self.assertEqual('abc', s) + self.assertEqual(None, i) # unused for this strategy + self.strategy.register_worker_start(s, 'unused', pid) + pid += 1 + sock_count += 1 + + self.assertEqual(1, sock_count) + self.assertEqual([ + 'Started child %s' % 88, + 'Started child %s' % 89, + 'Started child %s' % 90, + ], self.logger.get_lines_for_level('notice')) + + def test_post_fork_hook(self): + # Just don't crash or do something stupid + self.assertEqual(None, self.strategy.post_fork_hook()) + + def test_shutdown_sockets(self): + self.mock_get_socket.return_value = mock.MagicMock() + self.strategy.bind_ports() + with mock.patch('swift.common.wsgi.greenio') as mock_greenio: + self.strategy.shutdown_sockets() + self.assertEqual([ + mock.call.shutdown_safe(self.mock_get_socket.return_value), + ], mock_greenio.mock_calls) + self.assertEqual([ + mock.call.close(), + ], self.mock_get_socket.return_value.mock_calls) + + def test_log_sock_exit(self): + self.strategy.log_sock_exit('blahblah', 'blahblah') + my_pid = os.getpid() + self.assertEqual([ + 'Child %d exiting normally' % my_pid, + ], self.logger.get_lines_for_level('notice')) + + class TestWSGIContext(unittest.TestCase): def test_app_call(self): diff --git a/test/unit/container/test_sync.py b/test/unit/container/test_sync.py index 8c6d895323..bdf59f9f3e 100644 --- a/test/unit/container/test_sync.py +++ b/test/unit/container/test_sync.py @@ -289,7 +289,11 @@ class TestContainerSync(unittest.TestCase): # those. cring = FakeRing() with mock.patch('swift.container.sync.InternalClient'): - cs = sync.ContainerSync({}, container_ring=cring) + cs = sync.ContainerSync({ + 'bind_ip': '10.0.0.0', + }, container_ring=cring) + # Plumbing test for bind_ip and whataremyips() + self.assertEqual(['10.0.0.0'], cs._myips) orig_ContainerBroker = sync.ContainerBroker try: sync.ContainerBroker = lambda p: FakeContainerBroker( diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index 321ea3751d..a52e64bd1a 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -73,16 +73,10 @@ def make_ec_archive_bodies(policy, test_body): fragment_payloads.append(fragments) # join up the fragment payloads per node - ec_archive_bodies = [''.join(fragments) - for fragments in zip(*fragment_payloads)] + ec_archive_bodies = [''.join(frags) for frags in zip(*fragment_payloads)] return ec_archive_bodies -def _ips(): - return ['127.0.0.1'] -object_reconstructor.whataremyips = _ips - - def _create_test_rings(path): testgz = os.path.join(path, 'object.ring.gz') intended_replica2part2dev_id = [ @@ -582,7 +576,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase): except AssertionError as e: extra_info = \ '\n\n... for %r in part num %s job %r' % ( - k, part_num, job_key) + k, part_num, job_key) raise AssertionError(str(e) + extra_info) else: self.fail( @@ -1001,6 +995,7 @@ class TestObjectReconstructor(unittest.TestCase): def setUp(self): self.policy = POLICIES.default + self.policy.object_ring._rtime = time.time() + 3600 self.testdir = tempfile.mkdtemp() self.devices = os.path.join(self.testdir, 'devices') self.local_dev = self.policy.object_ring.devs[0] @@ -1009,6 +1004,7 @@ class TestObjectReconstructor(unittest.TestCase): self.conf = { 'devices': self.devices, 'mount_check': False, + 'bind_ip': self.ip, 'bind_port': self.port, } self.logger = debug_logger('object-reconstructor') @@ -1042,9 +1038,7 @@ class TestObjectReconstructor(unittest.TestCase): utils.mkdirs(os.path.join( self.devices, self.local_dev['device'], datadir, str(part))) - with mock.patch('swift.obj.reconstructor.whataremyips', - return_value=[self.ip]): - part_infos = list(self.reconstructor.collect_parts()) + part_infos = list(self.reconstructor.collect_parts()) found_parts = sorted(int(p['partition']) for p in part_infos) self.assertEqual(found_parts, sorted(stub_parts)) for part_info in part_infos: @@ -1056,10 +1050,112 @@ class TestObjectReconstructor(unittest.TestCase): diskfile.get_data_dir(self.policy), str(part_info['partition']))) + def test_collect_parts_skips_non_local_devs_servers_per_port(self): + self._configure_reconstructor(devices=self.devices, mount_check=False, + bind_ip=self.ip, bind_port=self.port, + servers_per_port=2) + + device_parts = { + 'sda': (374,), + 'sdb': (179, 807), # w/one-serv-per-port, same IP alone is local + 'sdc': (363, 468, 843), + 'sdd': (912,), # "not local" via different IP + } + for policy in POLICIES: + datadir = diskfile.get_data_dir(policy) + for dev, parts in device_parts.items(): + for part in parts: + utils.mkdirs(os.path.join( + self.devices, dev, + datadir, str(part))) + + # we're only going to add sda and sdc into the ring + local_devs = ('sda', 'sdb', 'sdc') + stub_ring_devs = [{ + 'device': dev, + 'replication_ip': self.ip, + 'replication_port': self.port + 1 if dev == 'sdb' else self.port, + } for dev in local_devs] + stub_ring_devs.append({ + 'device': 'sdd', + 'replication_ip': '127.0.0.88', # not local via IP + 'replication_port': self.port, + }) + self.reconstructor.bind_ip = '0.0.0.0' # use whataremyips + with nested(mock.patch('swift.obj.reconstructor.whataremyips', + return_value=[self.ip]), + mock.patch.object(self.policy.object_ring, '_devs', + new=stub_ring_devs)): + part_infos = list(self.reconstructor.collect_parts()) + found_parts = sorted(int(p['partition']) for p in part_infos) + expected_parts = sorted(itertools.chain( + *(device_parts[d] for d in local_devs))) + self.assertEqual(found_parts, expected_parts) + for part_info in part_infos: + self.assertEqual(part_info['policy'], self.policy) + self.assertTrue(part_info['local_dev'] in stub_ring_devs) + dev = part_info['local_dev'] + self.assertEqual(part_info['part_path'], + os.path.join(self.devices, + dev['device'], + diskfile.get_data_dir(self.policy), + str(part_info['partition']))) + + def test_collect_parts_multi_device_skips_non_non_local_devs(self): + device_parts = { + 'sda': (374,), + 'sdb': (179, 807), # "not local" via different port + 'sdc': (363, 468, 843), + 'sdd': (912,), # "not local" via different IP + } + for policy in POLICIES: + datadir = diskfile.get_data_dir(policy) + for dev, parts in device_parts.items(): + for part in parts: + utils.mkdirs(os.path.join( + self.devices, dev, + datadir, str(part))) + + # we're only going to add sda and sdc into the ring + local_devs = ('sda', 'sdc') + stub_ring_devs = [{ + 'device': dev, + 'replication_ip': self.ip, + 'replication_port': self.port, + } for dev in local_devs] + stub_ring_devs.append({ + 'device': 'sdb', + 'replication_ip': self.ip, + 'replication_port': self.port + 1, # not local via port + }) + stub_ring_devs.append({ + 'device': 'sdd', + 'replication_ip': '127.0.0.88', # not local via IP + 'replication_port': self.port, + }) + self.reconstructor.bind_ip = '0.0.0.0' # use whataremyips + with nested(mock.patch('swift.obj.reconstructor.whataremyips', + return_value=[self.ip]), + mock.patch.object(self.policy.object_ring, '_devs', + new=stub_ring_devs)): + part_infos = list(self.reconstructor.collect_parts()) + found_parts = sorted(int(p['partition']) for p in part_infos) + expected_parts = sorted(itertools.chain( + *(device_parts[d] for d in local_devs))) + self.assertEqual(found_parts, expected_parts) + for part_info in part_infos: + self.assertEqual(part_info['policy'], self.policy) + self.assertTrue(part_info['local_dev'] in stub_ring_devs) + dev = part_info['local_dev'] + self.assertEqual(part_info['part_path'], + os.path.join(self.devices, + dev['device'], + diskfile.get_data_dir(self.policy), + str(part_info['partition']))) + def test_collect_parts_multi_device_skips_non_ring_devices(self): device_parts = { 'sda': (374,), - 'sdb': (179, 807), 'sdc': (363, 468, 843), } for policy in POLICIES: @@ -1075,8 +1171,9 @@ class TestObjectReconstructor(unittest.TestCase): stub_ring_devs = [{ 'device': dev, 'replication_ip': self.ip, - 'replication_port': self.port + 'replication_port': self.port, } for dev in local_devs] + self.reconstructor.bind_ip = '0.0.0.0' # use whataremyips with nested(mock.patch('swift.obj.reconstructor.whataremyips', return_value=[self.ip]), mock.patch.object(self.policy.object_ring, '_devs', diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py index a0844ebb8f..08eb88b9aa 100644 --- a/test/unit/obj/test_replicator.py +++ b/test/unit/obj/test_replicator.py @@ -36,9 +36,8 @@ from swift.obj import diskfile, replicator as object_replicator from swift.common.storage_policy import StoragePolicy, POLICIES -def _ips(): +def _ips(*args, **kwargs): return ['127.0.0.0'] -object_replicator.whataremyips = _ips def mock_http_connect(status): @@ -171,34 +170,46 @@ class TestObjectReplicator(unittest.TestCase): rmtree(self.testdir, ignore_errors=1) os.mkdir(self.testdir) os.mkdir(self.devices) - os.mkdir(os.path.join(self.devices, 'sda')) - self.objects = os.path.join(self.devices, 'sda', - diskfile.get_data_dir(POLICIES[0])) - self.objects_1 = os.path.join(self.devices, 'sda', - diskfile.get_data_dir(POLICIES[1])) - os.mkdir(self.objects) - os.mkdir(self.objects_1) - self.parts = {} - self.parts_1 = {} - for part in ['0', '1', '2', '3']: - self.parts[part] = os.path.join(self.objects, part) - os.mkdir(self.parts[part]) - self.parts_1[part] = os.path.join(self.objects_1, part) - os.mkdir(self.parts_1[part]) + + self.objects, self.objects_1, self.parts, self.parts_1 = \ + self._write_disk_data('sda') _create_test_rings(self.testdir) + self.logger = debug_logger('test-replicator') self.conf = dict( + bind_ip=_ips()[0], bind_port=6000, swift_dir=self.testdir, devices=self.devices, mount_check='false', timeout='300', stats_interval='1', sync_method='rsync') - self.replicator = object_replicator.ObjectReplicator(self.conf) - self.logger = self.replicator.logger = debug_logger('test-replicator') - self.df_mgr = diskfile.DiskFileManager(self.conf, - self.replicator.logger) + self._create_replicator() def tearDown(self): rmtree(self.testdir, ignore_errors=1) + def _write_disk_data(self, disk_name): + os.mkdir(os.path.join(self.devices, disk_name)) + objects = os.path.join(self.devices, disk_name, + diskfile.get_data_dir(POLICIES[0])) + objects_1 = os.path.join(self.devices, disk_name, + diskfile.get_data_dir(POLICIES[1])) + os.mkdir(objects) + os.mkdir(objects_1) + parts = {} + parts_1 = {} + for part in ['0', '1', '2', '3']: + parts[part] = os.path.join(objects, part) + os.mkdir(parts[part]) + parts_1[part] = os.path.join(objects_1, part) + os.mkdir(parts_1[part]) + + return objects, objects_1, parts, parts_1 + + def _create_replicator(self): + self.replicator = object_replicator.ObjectReplicator(self.conf) + self.replicator.logger = self.logger + self.df_mgr = diskfile.DiskFileManager(self.conf, self.logger) + def test_run_once(self): conf = dict(swift_dir=self.testdir, devices=self.devices, + bind_ip=_ips()[0], mount_check='false', timeout='300', stats_interval='1') replicator = object_replicator.ObjectReplicator(conf) was_connector = object_replicator.http_connect @@ -260,7 +271,9 @@ class TestObjectReplicator(unittest.TestCase): process_arg_checker.append( (0, '', ['rsync', whole_path_from, rsync_mods])) with _mock_process(process_arg_checker): - replicator.run_once() + with mock.patch('swift.obj.replicator.whataremyips', + side_effect=_ips): + replicator.run_once() self.assertFalse(process_errors) object_replicator.http_connect = was_connector @@ -321,17 +334,306 @@ class TestObjectReplicator(unittest.TestCase): [node['id'] for node in jobs_by_pol_part['12']['nodes']], [2, 3]) self.assertEquals( [node['id'] for node in jobs_by_pol_part['13']['nodes']], [3, 1]) - for part in ['00', '01', '02', '03', ]: + for part in ['00', '01', '02', '03']: for node in jobs_by_pol_part[part]['nodes']: self.assertEquals(node['device'], 'sda') self.assertEquals(jobs_by_pol_part[part]['path'], os.path.join(self.objects, part[1:])) - for part in ['10', '11', '12', '13', ]: + for part in ['10', '11', '12', '13']: for node in jobs_by_pol_part[part]['nodes']: self.assertEquals(node['device'], 'sda') self.assertEquals(jobs_by_pol_part[part]['path'], os.path.join(self.objects_1, part[1:])) + @mock.patch('swift.obj.replicator.random.shuffle', side_effect=lambda l: l) + def test_collect_jobs_multi_disk(self, mock_shuffle): + devs = [ + # Two disks on same IP/port + {'id': 0, 'device': 'sda', 'zone': 0, + 'region': 1, 'ip': '1.1.1.1', 'port': 1111, + 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + {'id': 1, 'device': 'sdb', 'zone': 1, + 'region': 1, 'ip': '1.1.1.1', 'port': 1111, + 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + # Two disks on same server, different ports + {'id': 2, 'device': 'sdc', 'zone': 2, + 'region': 2, 'ip': '1.1.1.2', 'port': 1112, + 'replication_ip': '127.0.0.1', 'replication_port': 6000}, + {'id': 3, 'device': 'sdd', 'zone': 4, + 'region': 2, 'ip': '1.1.1.2', 'port': 1112, + 'replication_ip': '127.0.0.1', 'replication_port': 6001}, + ] + objects_sdb, objects_1_sdb, _, _ = self._write_disk_data('sdb') + objects_sdc, objects_1_sdc, _, _ = self._write_disk_data('sdc') + objects_sdd, objects_1_sdd, _, _ = self._write_disk_data('sdd') + _create_test_rings(self.testdir, devs) + + jobs = self.replicator.collect_jobs() + + self.assertEqual([mock.call(jobs)], mock_shuffle.mock_calls) + + jobs_to_delete = [j for j in jobs if j['delete']] + self.assertEquals(len(jobs_to_delete), 4) + self.assertEqual([ + '1', '2', # policy 0; 1 not on sda, 2 not on sdb + '1', '2', # policy 1; 1 not on sda, 2 not on sdb + ], [j['partition'] for j in jobs_to_delete]) + + jobs_by_pol_part_dev = {} + for job in jobs: + # There should be no jobs with a device not in just sda & sdb + self.assertTrue(job['device'] in ('sda', 'sdb')) + jobs_by_pol_part_dev[ + str(int(job['policy'])) + job['partition'] + job['device'] + ] = job + + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['00sda']['nodes']], + [1, 2]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['00sdb']['nodes']], + [0, 2]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['01sda']['nodes']], + [1, 2, 3]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['01sdb']['nodes']], + [2, 3]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['02sda']['nodes']], + [2, 3]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['02sdb']['nodes']], + [2, 3, 0]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['03sda']['nodes']], + [3, 1]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['03sdb']['nodes']], + [3, 0]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['10sda']['nodes']], + [1, 2]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['10sdb']['nodes']], + [0, 2]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['11sda']['nodes']], + [1, 2, 3]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['11sdb']['nodes']], + [2, 3]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['12sda']['nodes']], + [2, 3]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['12sdb']['nodes']], + [2, 3, 0]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['13sda']['nodes']], + [3, 1]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['13sdb']['nodes']], + [3, 0]) + for part in ['00', '01', '02', '03']: + self.assertEquals(jobs_by_pol_part_dev[part + 'sda']['path'], + os.path.join(self.objects, part[1:])) + self.assertEquals(jobs_by_pol_part_dev[part + 'sdb']['path'], + os.path.join(objects_sdb, part[1:])) + for part in ['10', '11', '12', '13']: + self.assertEquals(jobs_by_pol_part_dev[part + 'sda']['path'], + os.path.join(self.objects_1, part[1:])) + self.assertEquals(jobs_by_pol_part_dev[part + 'sdb']['path'], + os.path.join(objects_1_sdb, part[1:])) + + @mock.patch('swift.obj.replicator.random.shuffle', side_effect=lambda l: l) + def test_collect_jobs_multi_disk_diff_ports_normal(self, mock_shuffle): + # Normally (servers_per_port=0), replication_ip AND replication_port + # are used to determine local ring device entries. Here we show that + # with bind_ip='127.0.0.1', bind_port=6000, only "sdc" is local. + devs = [ + # Two disks on same IP/port + {'id': 0, 'device': 'sda', 'zone': 0, + 'region': 1, 'ip': '1.1.1.1', 'port': 1111, + 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + {'id': 1, 'device': 'sdb', 'zone': 1, + 'region': 1, 'ip': '1.1.1.1', 'port': 1111, + 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + # Two disks on same server, different ports + {'id': 2, 'device': 'sdc', 'zone': 2, + 'region': 2, 'ip': '1.1.1.2', 'port': 1112, + 'replication_ip': '127.0.0.1', 'replication_port': 6000}, + {'id': 3, 'device': 'sdd', 'zone': 4, + 'region': 2, 'ip': '1.1.1.2', 'port': 1112, + 'replication_ip': '127.0.0.1', 'replication_port': 6001}, + ] + objects_sdb, objects_1_sdb, _, _ = self._write_disk_data('sdb') + objects_sdc, objects_1_sdc, _, _ = self._write_disk_data('sdc') + objects_sdd, objects_1_sdd, _, _ = self._write_disk_data('sdd') + _create_test_rings(self.testdir, devs) + + self.conf['bind_ip'] = '127.0.0.1' + self._create_replicator() + + jobs = self.replicator.collect_jobs() + + self.assertEqual([mock.call(jobs)], mock_shuffle.mock_calls) + + jobs_to_delete = [j for j in jobs if j['delete']] + self.assertEquals(len(jobs_to_delete), 2) + self.assertEqual([ + '3', # policy 0; 3 not on sdc + '3', # policy 1; 3 not on sdc + ], [j['partition'] for j in jobs_to_delete]) + + jobs_by_pol_part_dev = {} + for job in jobs: + # There should be no jobs with a device not sdc + self.assertEqual(job['device'], 'sdc') + jobs_by_pol_part_dev[ + str(int(job['policy'])) + job['partition'] + job['device'] + ] = job + + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['00sdc']['nodes']], + [0, 1]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['01sdc']['nodes']], + [1, 3]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['02sdc']['nodes']], + [3, 0]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['03sdc']['nodes']], + [3, 0, 1]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['10sdc']['nodes']], + [0, 1]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['11sdc']['nodes']], + [1, 3]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['12sdc']['nodes']], + [3, 0]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['13sdc']['nodes']], + [3, 0, 1]) + for part in ['00', '01', '02', '03']: + self.assertEquals(jobs_by_pol_part_dev[part + 'sdc']['path'], + os.path.join(objects_sdc, part[1:])) + for part in ['10', '11', '12', '13']: + self.assertEquals(jobs_by_pol_part_dev[part + 'sdc']['path'], + os.path.join(objects_1_sdc, part[1:])) + + @mock.patch('swift.obj.replicator.random.shuffle', side_effect=lambda l: l) + def test_collect_jobs_multi_disk_servers_per_port(self, mock_shuffle): + # Normally (servers_per_port=0), replication_ip AND replication_port + # are used to determine local ring device entries. Here we show that + # with servers_per_port > 0 and bind_ip='127.0.0.1', bind_port=6000, + # then both "sdc" and "sdd" are local. + devs = [ + # Two disks on same IP/port + {'id': 0, 'device': 'sda', 'zone': 0, + 'region': 1, 'ip': '1.1.1.1', 'port': 1111, + 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + {'id': 1, 'device': 'sdb', 'zone': 1, + 'region': 1, 'ip': '1.1.1.1', 'port': 1111, + 'replication_ip': '127.0.0.0', 'replication_port': 6000}, + # Two disks on same server, different ports + {'id': 2, 'device': 'sdc', 'zone': 2, + 'region': 2, 'ip': '1.1.1.2', 'port': 1112, + 'replication_ip': '127.0.0.1', 'replication_port': 6000}, + {'id': 3, 'device': 'sdd', 'zone': 4, + 'region': 2, 'ip': '1.1.1.2', 'port': 1112, + 'replication_ip': '127.0.0.1', 'replication_port': 6001}, + ] + objects_sdb, objects_1_sdb, _, _ = self._write_disk_data('sdb') + objects_sdc, objects_1_sdc, _, _ = self._write_disk_data('sdc') + objects_sdd, objects_1_sdd, _, _ = self._write_disk_data('sdd') + _create_test_rings(self.testdir, devs) + + self.conf['bind_ip'] = '127.0.0.1' + self.conf['servers_per_port'] = 1 # diff port ok + self._create_replicator() + + jobs = self.replicator.collect_jobs() + + self.assertEqual([mock.call(jobs)], mock_shuffle.mock_calls) + + jobs_to_delete = [j for j in jobs if j['delete']] + self.assertEquals(len(jobs_to_delete), 4) + self.assertEqual([ + '3', '0', # policy 0; 3 not on sdc, 0 not on sdd + '3', '0', # policy 1; 3 not on sdc, 0 not on sdd + ], [j['partition'] for j in jobs_to_delete]) + + jobs_by_pol_part_dev = {} + for job in jobs: + # There should be no jobs with a device not in just sdc & sdd + self.assertTrue(job['device'] in ('sdc', 'sdd')) + jobs_by_pol_part_dev[ + str(int(job['policy'])) + job['partition'] + job['device'] + ] = job + + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['00sdc']['nodes']], + [0, 1]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['00sdd']['nodes']], + [0, 1, 2]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['01sdc']['nodes']], + [1, 3]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['01sdd']['nodes']], + [1, 2]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['02sdc']['nodes']], + [3, 0]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['02sdd']['nodes']], + [2, 0]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['03sdc']['nodes']], + [3, 0, 1]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['03sdd']['nodes']], + [0, 1]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['10sdc']['nodes']], + [0, 1]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['10sdd']['nodes']], + [0, 1, 2]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['11sdc']['nodes']], + [1, 3]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['11sdd']['nodes']], + [1, 2]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['12sdc']['nodes']], + [3, 0]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['12sdd']['nodes']], + [2, 0]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['13sdc']['nodes']], + [3, 0, 1]) + self.assertEquals([node['id'] + for node in jobs_by_pol_part_dev['13sdd']['nodes']], + [0, 1]) + for part in ['00', '01', '02', '03']: + self.assertEquals(jobs_by_pol_part_dev[part + 'sdc']['path'], + os.path.join(objects_sdc, part[1:])) + self.assertEquals(jobs_by_pol_part_dev[part + 'sdd']['path'], + os.path.join(objects_sdd, part[1:])) + for part in ['10', '11', '12', '13']: + self.assertEquals(jobs_by_pol_part_dev[part + 'sdc']['path'], + os.path.join(objects_1_sdc, part[1:])) + self.assertEquals(jobs_by_pol_part_dev[part + 'sdd']['path'], + os.path.join(objects_1_sdd, part[1:])) + def test_collect_jobs_handoffs_first(self): self.replicator.handoffs_first = True jobs = self.replicator.collect_jobs() @@ -929,6 +1231,7 @@ class TestObjectReplicator(unittest.TestCase): def test_run_once_recover_from_failure(self): conf = dict(swift_dir=self.testdir, devices=self.devices, + bind_ip=_ips()[0], mount_check='false', timeout='300', stats_interval='1') replicator = object_replicator.ObjectReplicator(conf) was_connector = object_replicator.http_connect @@ -975,6 +1278,7 @@ class TestObjectReplicator(unittest.TestCase): def test_run_once_recover_from_timeout(self): conf = dict(swift_dir=self.testdir, devices=self.devices, + bind_ips=_ips()[0], mount_check='false', timeout='300', stats_interval='1') replicator = object_replicator.ObjectReplicator(conf) was_connector = object_replicator.http_connect From 12d8a53fffea6e4bed8ba3d502ce625f5c6710b9 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Thu, 18 Jun 2015 12:58:03 -0700 Subject: [PATCH 65/98] Get better at closing WSGI iterables. PEP 333 (WSGI) says: "If the iterable returned by the application has a close() method, the server or gateway must call that method upon completion of the current request[.]" There's a bunch of places where we weren't doing that; some of them matter more than others. Calling .close() can prevent a connection leak in some cases. In others, it just provides a certain pedantic smugness. Either way, we should do what WSGI requires. Noteworthy goofs include: * If a client is downloading a large object and disconnects halfway through, a proxy -> obj connection may be leaked. In this case, the WSGI iterable is a SegmentedIterable, which lacked a close() method. Thus, when the WSGI server noticed the client disconnect, it had no way of telling the SegmentedIterable about it, and so the underlying iterable for the segment's data didn't get closed. Here, it seems likely (though unproven) that the object server would time out and kill the connection, or that a ChunkWriteTimeout would fire down in the proxy server, so the leaked connection would eventually go away. However, a flurry of client disconnects could leave a big pile of useless connections. * If a conditional request receives a 304 or 412, the underlying app_iter is not closed. This mostly affects conditional requests for large objects. The leaked connections were noticed by this patch's co-author, who made the changes to SegmentedIterable. Those changes helped, but did not completely fix, the issue. The rest of the patch is an attempt to plug the rest of the holes. Co-Authored-By: Romain LE DISEZ Change-Id: I168e147aae7c1728e7e3fdabb7fba6f2d747d937 Closes-Bug: #1466549 --- swift/common/middleware/dlo.py | 8 ++++-- swift/common/middleware/slo.py | 10 ++++--- swift/common/request_helpers.py | 35 +++++++++---------------- swift/common/swob.py | 9 ++++++- swift/common/utils.py | 22 ++++++++++++++++ swift/proxy/controllers/obj.py | 4 +-- test/unit/common/middleware/helpers.py | 32 +++++++++++++++++++++- test/unit/common/middleware/test_dlo.py | 10 +++++-- test/unit/common/middleware/test_slo.py | 13 ++++++--- 9 files changed, 105 insertions(+), 38 deletions(-) diff --git a/swift/common/middleware/dlo.py b/swift/common/middleware/dlo.py index d2761acb67..9330ccb8cb 100644 --- a/swift/common/middleware/dlo.py +++ b/swift/common/middleware/dlo.py @@ -22,7 +22,8 @@ from swift.common.http import is_success from swift.common.swob import Request, Response, \ HTTPRequestedRangeNotSatisfiable, HTTPBadRequest, HTTPConflict from swift.common.utils import get_logger, json, \ - RateLimitedIterator, read_conf_dir, quote + RateLimitedIterator, read_conf_dir, quote, close_if_possible, \ + closing_if_possible from swift.common.request_helpers import SegmentedIterable from swift.common.wsgi import WSGIContext, make_subrequest from urllib import unquote @@ -48,7 +49,8 @@ class GetContext(WSGIContext): con_resp = con_req.get_response(self.dlo.app) if not is_success(con_resp.status_int): return con_resp, None - return None, json.loads(''.join(con_resp.app_iter)) + with closing_if_possible(con_resp.app_iter): + return None, json.loads(''.join(con_resp.app_iter)) def _segment_listing_iterator(self, req, version, account, container, prefix, segments, first_byte=None, @@ -107,6 +109,7 @@ class GetContext(WSGIContext): # we've already started sending the response body to the # client, so all we can do is raise an exception to make the # WSGI server close the connection early + close_if_possible(error_response.app_iter) raise ListingIterError( "Got status %d listing container /%s/%s" % (error_response.status_int, account, container)) @@ -233,6 +236,7 @@ class GetContext(WSGIContext): # make sure this response is for a dynamic large object manifest for header, value in self._response_headers: if (header.lower() == 'x-object-manifest'): + close_if_possible(resp_iter) response = self.get_or_head_response(req, value) return response(req.environ, start_response) else: diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index 241210d6ae..4fce4f9d32 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -159,9 +159,9 @@ from swift.common.swob import Request, HTTPBadRequest, HTTPServerError, \ Response from swift.common.utils import json, get_logger, config_true_value, \ get_valid_utf8_str, override_bytes_from_content_type, split_path, \ - register_swift_info, RateLimitedIterator, quote -from swift.common.request_helpers import SegmentedIterable, \ - closing_if_possible, close_if_possible + register_swift_info, RateLimitedIterator, quote, close_if_possible, \ + closing_if_possible +from swift.common.request_helpers import SegmentedIterable from swift.common.constraints import check_utf8, MAX_BUFFERED_SLO_SEGMENTS from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED, is_success from swift.common.wsgi import WSGIContext, make_subrequest @@ -239,6 +239,7 @@ class SloGetContext(WSGIContext): sub_resp = sub_req.get_response(self.slo.app) if not is_success(sub_resp.status_int): + close_if_possible(sub_resp.app_iter) raise ListingIterError( 'ERROR: while fetching %s, GET of submanifest %s ' 'failed with status %d' % (req.path, sub_req.path, @@ -412,7 +413,8 @@ class SloGetContext(WSGIContext): return response(req.environ, start_response) def get_or_head_response(self, req, resp_headers, resp_iter): - resp_body = ''.join(resp_iter) + with closing_if_possible(resp_iter): + resp_body = ''.join(resp_iter) try: segments = json.loads(resp_body) except ValueError: diff --git a/swift/common/request_helpers.py b/swift/common/request_helpers.py index c9da1cb754..c7d551c307 100644 --- a/swift/common/request_helpers.py +++ b/swift/common/request_helpers.py @@ -23,7 +23,6 @@ from swob in here without creating circular imports. import hashlib import itertools import time -from contextlib import contextmanager from urllib import unquote from swift import gettext_ as _ from swift.common.storage_policy import POLICIES @@ -32,7 +31,8 @@ from swift.common.exceptions import ListingIterError, SegmentError from swift.common.http import is_success from swift.common.swob import (HTTPBadRequest, HTTPNotAcceptable, HTTPServiceUnavailable) -from swift.common.utils import split_path, validate_device_partition +from swift.common.utils import split_path, validate_device_partition, \ + close_if_possible from swift.common.wsgi import make_subrequest @@ -249,26 +249,6 @@ def copy_header_subset(from_r, to_r, condition): to_r.headers[k] = v -def close_if_possible(maybe_closable): - close_method = getattr(maybe_closable, 'close', None) - if callable(close_method): - return close_method() - - -@contextmanager -def closing_if_possible(maybe_closable): - """ - Like contextlib.closing(), but doesn't crash if the object lacks a close() - method. - - PEP 333 (WSGI) says: "If the iterable returned by the application has a - close() method, the server or gateway must call that method upon - completion of the current request[.]" This function makes that easier. - """ - yield maybe_closable - close_if_possible(maybe_closable) - - class SegmentedIterable(object): """ Iterable that returns the object contents for a large object. @@ -304,6 +284,7 @@ class SegmentedIterable(object): self.peeked_chunk = None self.app_iter = self._internal_iter() self.validated_first_segment = False + self.current_resp = None def _internal_iter(self): start_time = time.time() @@ -360,6 +341,8 @@ class SegmentedIterable(object): 'r_size': seg_resp.content_length, 's_etag': seg_etag, 's_size': seg_size}) + else: + self.current_resp = seg_resp seg_hash = hashlib.md5() for chunk in seg_resp.app_iter: @@ -431,3 +414,11 @@ class SegmentedIterable(object): return itertools.chain([pc], self.app_iter) else: return self.app_iter + + def close(self): + """ + Called when the client disconnect. Ensure that the connection to the + backend server is closed. + """ + if self.current_resp: + close_if_possible(self.current_resp.app_iter) diff --git a/swift/common/swob.py b/swift/common/swob.py index 39f0c0e3cb..b35be6849f 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -49,7 +49,8 @@ import random import functools import inspect -from swift.common.utils import reiterate, split_path, Timestamp, pairs +from swift.common.utils import reiterate, split_path, Timestamp, pairs, \ + close_if_possible from swift.common.exceptions import InvalidTimestamp @@ -1220,12 +1221,14 @@ class Response(object): etag in self.request.if_none_match: self.status = 304 self.content_length = 0 + close_if_possible(app_iter) return [''] if etag and self.request.if_match and \ etag not in self.request.if_match: self.status = 412 self.content_length = 0 + close_if_possible(app_iter) return [''] if self.status_int == 404 and self.request.if_match \ @@ -1236,18 +1239,21 @@ class Response(object): # Failed) response. [RFC 2616 section 14.24] self.status = 412 self.content_length = 0 + close_if_possible(app_iter) return [''] if self.last_modified and self.request.if_modified_since \ and self.last_modified <= self.request.if_modified_since: self.status = 304 self.content_length = 0 + close_if_possible(app_iter) return [''] if self.last_modified and self.request.if_unmodified_since \ and self.last_modified > self.request.if_unmodified_since: self.status = 412 self.content_length = 0 + close_if_possible(app_iter) return [''] if self.request and self.request.method == 'HEAD': @@ -1261,6 +1267,7 @@ class Response(object): if ranges == []: self.status = 416 self.content_length = 0 + close_if_possible(app_iter) return [''] elif ranges: range_size = len(ranges) diff --git a/swift/common/utils.py b/swift/common/utils.py index d470fb9970..47aff54e9a 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -3144,6 +3144,28 @@ def ismount_raw(path): return False +def close_if_possible(maybe_closable): + close_method = getattr(maybe_closable, 'close', None) + if callable(close_method): + return close_method() + + +@contextmanager +def closing_if_possible(maybe_closable): + """ + Like contextlib.closing(), but doesn't crash if the object lacks a close() + method. + + PEP 333 (WSGI) says: "If the iterable returned by the application has a + close() method, the server or gateway must call that method upon + completion of the current request[.]" This function makes that easier. + """ + try: + yield maybe_closable + finally: + close_if_possible(maybe_closable) + + _rfc_token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+' _rfc_extension_pattern = re.compile( r'(?:\s*;\s*(' + _rfc_token + r")\s*(?:=\s*(" + _rfc_token + diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index 10e83bcad7..609f21b5d9 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -44,7 +44,7 @@ from swift.common.utils import ( GreenAsyncPile, GreenthreadSafeIterator, json, Timestamp, normalize_delete_at_timestamp, public, get_expirer_container, document_iters_to_http_response_body, parse_content_range, - quorum_size, reiterate) + quorum_size, reiterate, close_if_possible) from swift.common.bufferedhttp import http_connect from swift.common.constraints import check_metadata, check_object_creation, \ check_copy_from_header, check_destination_header, \ @@ -70,7 +70,7 @@ from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \ HTTPClientDisconnect, HTTPUnprocessableEntity, Response, HTTPException, \ HTTPRequestedRangeNotSatisfiable, Range from swift.common.request_helpers import is_sys_or_user_meta, is_sys_meta, \ - remove_items, copy_header_subset, close_if_possible + remove_items, copy_header_subset def copy_headers_into(from_r, to_r): diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index 68a4bfee3d..7c1b45571e 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -15,6 +15,7 @@ # This stuff can't live in test/unit/__init__.py due to its swob dependency. +from collections import defaultdict from copy import deepcopy from hashlib import md5 from swift.common import swob @@ -23,6 +24,20 @@ from swift.common.utils import split_path from test.unit import FakeLogger, FakeRing +class LeakTrackingIter(object): + def __init__(self, inner_iter, fake_swift, path): + self.inner_iter = inner_iter + self.fake_swift = fake_swift + self.path = path + + def __iter__(self): + for x in self.inner_iter: + yield x + + def close(self): + self.fake_swift.mark_closed(self.path) + + class FakeSwift(object): """ A good-enough fake Swift proxy server to use in testing middleware. @@ -30,6 +45,7 @@ class FakeSwift(object): def __init__(self): self._calls = [] + self._unclosed_req_paths = defaultdict(int) self.req_method_paths = [] self.swift_sources = [] self.uploaded = {} @@ -105,7 +121,21 @@ class FakeSwift(object): req = swob.Request(env) resp = resp_class(req=req, headers=headers, body=body, conditional_response=True) - return resp(env, start_response) + wsgi_iter = resp(env, start_response) + self.mark_opened(path) + return LeakTrackingIter(wsgi_iter, self, path) + + def mark_opened(self, path): + self._unclosed_req_paths[path] += 1 + + def mark_closed(self, path): + self._unclosed_req_paths[path] -= 1 + + @property + def unclosed_requests(self): + return {path: count + for path, count in self._unclosed_req_paths.items() + if count > 0} @property def calls(self): diff --git a/test/unit/common/middleware/test_dlo.py b/test/unit/common/middleware/test_dlo.py index 16237eb1d1..119e4aba55 100644 --- a/test/unit/common/middleware/test_dlo.py +++ b/test/unit/common/middleware/test_dlo.py @@ -26,6 +26,7 @@ import unittest from swift.common import exceptions, swob from swift.common.middleware import dlo +from swift.common.utils import closing_if_possible from test.unit.common.middleware.helpers import FakeSwift @@ -54,8 +55,10 @@ class DloTestCase(unittest.TestCase): body = '' caught_exc = None try: - for chunk in body_iter: - body += chunk + # appease the close-checker + with closing_if_possible(body_iter): + for chunk in body_iter: + body += chunk except Exception as exc: if expect_exception: caught_exc = exc @@ -279,6 +282,9 @@ class TestDloHeadManifest(DloTestCase): class TestDloGetManifest(DloTestCase): + def tearDown(self): + self.assertEqual(self.app.unclosed_requests, {}) + def test_get_manifest(self): expected_etag = '"%s"' % md5hex( md5hex("aaaaa") + md5hex("bbbbb") + md5hex("ccccc") + diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 86a11734d3..d5129da4aa 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -24,7 +24,7 @@ from swift.common import swob, utils from swift.common.exceptions import ListingIterError, SegmentError from swift.common.middleware import slo from swift.common.swob import Request, Response, HTTPException -from swift.common.utils import quote, json +from swift.common.utils import quote, json, closing_if_possible from test.unit.common.middleware.helpers import FakeSwift @@ -74,8 +74,10 @@ class SloTestCase(unittest.TestCase): body = '' caught_exc = None try: - for chunk in body_iter: - body += chunk + # appease the close-checker + with closing_if_possible(body_iter): + for chunk in body_iter: + body += chunk except Exception as exc: if expect_exception: caught_exc = exc @@ -232,7 +234,7 @@ class TestSloPutManifest(SloTestCase): '/?multipart-manifest=put', environ={'REQUEST_METHOD': 'PUT'}, body=test_json_data) self.assertEquals( - self.slo.handle_multipart_put(req, fake_start_response), + list(self.slo.handle_multipart_put(req, fake_start_response)), ['passed']) def test_handle_multipart_put_success(self): @@ -949,6 +951,9 @@ class TestSloGetManifest(SloTestCase): 'X-Object-Meta-Fish': 'Bass'}, "[not {json (at ++++all") + def tearDown(self): + self.assertEqual(self.app.unclosed_requests, {}) + def test_get_manifest_passthrough(self): req = Request.blank( '/v1/AUTH_test/gettest/manifest-bc?multipart-manifest=get', From ac8a76958510646c369df9ffc2bf22005e43d153 Mon Sep 17 00:00:00 2001 From: paul luse Date: Sat, 13 Jun 2015 11:03:56 -0700 Subject: [PATCH 66/98] EC Ssync: Update parms to include node and frag indices Previously we sent the ssync backend frag index based on the node index. We need to be more specific for ssync to handle both sync and revert cases so now we send the frag index based on the job contents (as determined by the ec recon)) and the node index as a new header based on, well, the node index. The rcvr can now validate the incoming pair to reject (400) when a primary node is being asked to accept fragments that don't belong to it. Additionally, by having the frag index the rcvr can reject (409) an attempt to accept a fragment when its a handoff and already has one that needs to be reverted. Fixes-bug: #1452619 Change-Id: I8287b274bbbd00903c1975fe49375590af697be4 --- swift/obj/server.py | 6 ++- swift/obj/ssync_receiver.py | 16 +++++-- swift/obj/ssync_sender.py | 11 ++++- test/unit/obj/test_server.py | 55 +++++++++++++++++++++++ test/unit/obj/test_ssync_receiver.py | 65 +++++++++++++++++++++++++++- test/unit/obj/test_ssync_sender.py | 1 + 6 files changed, 147 insertions(+), 7 deletions(-) diff --git a/swift/obj/server.py b/swift/obj/server.py index 658f207a8d..97424cf787 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -498,10 +498,14 @@ class ObjectController(BaseStorageServer): except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') + # SSYNC will include Frag-Index header for subrequests to primary + # nodes; handoff nodes should 409 subrequests to over-write an + # existing data fragment until they offloaded the existing fragment + frag_index = request.headers.get('X-Backend-Ssync-Frag-Index') try: disk_file = self.get_diskfile( device, partition, account, container, obj, - policy=policy) + policy=policy, frag_index=frag_index) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: diff --git a/swift/obj/ssync_receiver.py b/swift/obj/ssync_receiver.py index 5f2461d62e..6aeb4c401f 100644 --- a/swift/obj/ssync_receiver.py +++ b/swift/obj/ssync_receiver.py @@ -156,11 +156,18 @@ class Receiver(object): self.request.environ['eventlet.minimum_write_chunk_size'] = 0 self.device, self.partition, self.policy = \ request_helpers.get_name_and_placement(self.request, 2, 2, False) - if 'X-Backend-Ssync-Frag-Index' in self.request.headers: + self.frag_index = self.node_index = None + if self.request.headers.get('X-Backend-Ssync-Frag-Index'): self.frag_index = int( self.request.headers['X-Backend-Ssync-Frag-Index']) - else: - self.frag_index = None + if self.request.headers.get('X-Backend-Ssync-Node-Index'): + self.node_index = int( + self.request.headers['X-Backend-Ssync-Node-Index']) + if self.node_index != self.frag_index: + # a primary node should only recieve it's own fragments + raise swob.HTTPBadRequest( + 'Frag-Index (%s) != Node-Index (%s)' % ( + self.frag_index, self.node_index)) utils.validate_device_partition(self.device, self.partition) self.diskfile_mgr = self.app._diskfile_router[self.policy] if not self.diskfile_mgr.get_dev_path(self.device): @@ -344,6 +351,9 @@ class Receiver(object): raise Exception('Invalid subrequest method %s' % method) subreq.headers['X-Backend-Storage-Policy-Index'] = int(self.policy) subreq.headers['X-Backend-Replication'] = 'True' + if self.node_index is not None: + # primary node should not 409 if it has a non-primary fragment + subreq.headers['X-Backend-Ssync-Frag-Index'] = self.node_index if replication_headers: subreq.headers['X-Backend-Replication-Headers'] = \ ' '.join(replication_headers) diff --git a/swift/obj/ssync_sender.py b/swift/obj/ssync_sender.py index 8e9202c004..50662da84c 100644 --- a/swift/obj/ssync_sender.py +++ b/swift/obj/ssync_sender.py @@ -129,8 +129,15 @@ class Sender(object): self.connection.putheader('Transfer-Encoding', 'chunked') self.connection.putheader('X-Backend-Storage-Policy-Index', int(self.job['policy'])) - self.connection.putheader('X-Backend-Ssync-Frag-Index', - self.node['index']) + # a sync job must use the node's index for the frag_index of the + # rebuilt fragments instead of the frag_index from the job which + # will be rebuilding them + self.connection.putheader( + 'X-Backend-Ssync-Frag-Index', self.node.get( + 'index', self.job.get('frag_index'))) + # a revert job to a handoff will not have a node index + self.connection.putheader('X-Backend-Ssync-Node-Index', + self.node.get('index')) self.connection.endheaders() with exceptions.MessageTimeout( self.daemon.node_timeout, 'connect receive'): diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index fe9ac5794f..6e1f16e34f 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -1266,6 +1266,61 @@ class TestObjectController(unittest.TestCase): finally: object_server.http_connect = old_http_connect + def test_PUT_ssync_multi_frag(self): + def put_with_index(expected_rsp, frag_index, node_index=None): + timestamp = utils.Timestamp(int(time())).internal + data_file_tail = '#%d.data' % frag_index + headers = {'X-Timestamp': timestamp, + 'Content-Length': '6', + 'Content-Type': 'application/octet-stream', + 'X-Backend-Ssync-Frag-Index': node_index, + 'X-Object-Sysmeta-Ec-Frag-Index': frag_index, + 'X-Backend-Storage-Policy-Index': int(policy)} + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers=headers) + req.body = 'VERIFY' + resp = req.get_response(self.object_controller) + + self.assertEquals( + resp.status_int, expected_rsp, + 'got %s != %s for frag_index=%s node_index=%s' % ( + resp.status_int, expected_rsp, + frag_index, node_index)) + if expected_rsp == 409: + return + obj_dir = os.path.join( + self.testdir, 'sda1', + storage_directory(diskfile.get_data_dir(int(policy)), + 'p', hash_path('a', 'c', 'o'))) + data_file = os.path.join(obj_dir, timestamp) + data_file_tail + self.assertTrue(os.path.isfile(data_file), + 'Expected file %r not found in %r for policy %r' + % (data_file, os.listdir(obj_dir), int(policy))) + + for policy in POLICIES: + if policy.policy_type == EC_POLICY: + # upload with a ec-frag-index + put_with_index(201, 3) + # same timestamp will conflict a different ec-frag-index + put_with_index(409, 2) + # but with the ssync-frag-index (primary node) it will just + # save both! + put_with_index(201, 2, 2) + # but even with the ssync-frag-index we can still get a + # timestamp collisison if the file already exists + put_with_index(409, 3, 3) + + # FWIW, ssync will never send in-consistent indexes - but if + # something else did, from the object server perspective ... + + # ... the ssync-frag-index is canonical on the + # read/pre-existance check + put_with_index(409, 7, 2) + # ... but the ec-frag-index is canonical when it comes to on + # disk file + put_with_index(201, 7, 6) + def test_PUT_durable_files(self): for policy in POLICIES: timestamp = utils.Timestamp(int(time())).internal diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py index 8b652ad2ec..4a8ee4541b 100644 --- a/test/unit/obj/test_ssync_receiver.py +++ b/test/unit/obj/test_ssync_receiver.py @@ -188,7 +188,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual('No policy with index 2', err.body) @unit.patch_policies() - def test_Receiver_with_frag_index_header(self): + def test_Receiver_with_only_frag_index_header(self): # update router post policy patch self.controller._diskfile_router = diskfile.DiskFileRouter( self.conf, self.controller.logger) @@ -208,6 +208,69 @@ class TestReceiver(unittest.TestCase): ':UPDATES: START', ':UPDATES: END']) self.assertEqual(rcvr.policy, POLICIES[1]) self.assertEqual(rcvr.frag_index, 7) + self.assertEqual(rcvr.node_index, None) + + @unit.patch_policies() + def test_Receiver_with_only_node_index_header(self): + # update router post policy patch + self.controller._diskfile_router = diskfile.DiskFileRouter( + self.conf, self.controller.logger) + req = swob.Request.blank( + '/sda1/1', + environ={'REQUEST_METHOD': 'SSYNC', + 'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '7', + 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'}, + body=':MISSING_CHECK: START\r\n' + ':MISSING_CHECK: END\r\n' + ':UPDATES: START\r\n:UPDATES: END\r\n') + with self.assertRaises(HTTPException) as e: + ssync_receiver.Receiver(self.controller, req) + self.assertEqual(e.exception.status_int, 400) + # if a node index is included - it *must* be + # the same value of frag index + self.assertEqual(e.exception.body, + 'Frag-Index (None) != Node-Index (7)') + + @unit.patch_policies() + def test_Receiver_with_matched_indexes(self): + # update router post policy patch + self.controller._diskfile_router = diskfile.DiskFileRouter( + self.conf, self.controller.logger) + req = swob.Request.blank( + '/sda1/1', + environ={'REQUEST_METHOD': 'SSYNC', + 'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '7', + 'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7', + 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'}, + body=':MISSING_CHECK: START\r\n' + ':MISSING_CHECK: END\r\n' + ':UPDATES: START\r\n:UPDATES: END\r\n') + rcvr = ssync_receiver.Receiver(self.controller, req) + body_lines = [chunk.strip() for chunk in rcvr() if chunk.strip()] + self.assertEqual( + body_lines, + [':MISSING_CHECK: START', ':MISSING_CHECK: END', + ':UPDATES: START', ':UPDATES: END']) + self.assertEqual(rcvr.policy, POLICIES[1]) + self.assertEqual(rcvr.frag_index, 7) + self.assertEqual(rcvr.node_index, 7) + + @unit.patch_policies() + def test_Receiver_with_mismatched_indexes(self): + # update router post policy patch + self.controller._diskfile_router = diskfile.DiskFileRouter( + self.conf, self.controller.logger) + req = swob.Request.blank( + '/sda1/1', + environ={'REQUEST_METHOD': 'SSYNC', + 'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '6', + 'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7', + 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '1'}, + body=':MISSING_CHECK: START\r\n' + ':MISSING_CHECK: END\r\n' + ':UPDATES: START\r\n:UPDATES: END\r\n') + self.assertRaises(HTTPException, ssync_receiver.Receiver, + self.controller, req) def test_SSYNC_replication_lock_fail(self): def _mock(path): diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py index c48a239351..ce4171c6c9 100644 --- a/test/unit/obj/test_ssync_sender.py +++ b/test/unit/obj/test_ssync_sender.py @@ -261,6 +261,7 @@ class TestSender(BaseTestSender): mock.call('Transfer-Encoding', 'chunked'), mock.call('X-Backend-Storage-Policy-Index', 1), mock.call('X-Backend-Ssync-Frag-Index', 0), + mock.call('X-Backend-Ssync-Node-Index', 0), ], 'endheaders': [mock.call()], } From 4a5b851207c98a380a32e27a183dd98022be9d16 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Mon, 22 Jun 2015 14:48:41 +0200 Subject: [PATCH 67/98] Add note about updatedb to the docs Change-Id: Ia1aa0bb1f93ee487e2f7ddf76a7a08efa8f3ba41 --- doc/source/deployment_guide.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index a137d786b0..b26f3ceff1 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -1234,6 +1234,16 @@ the system. If your filesystem does not support `fallocate()` or `posix_fallocate()`, be sure to set the `disable_fallocate = true` config parameter in account, container, and object server configs. +Most current Linux distributions ship with a default installation of updatedb. +This tool runs periodically and updates the file name database that is used by +the GNU locate tool. However, including Swift object and container database +files is most likely not required and the periodic update affects the +performance quite a bit. To disable the inclusion of these files add the path +where Swift stores its data to the setting PRUNEPATHS in `/etc/updatedb.conf`:: + + PRUNEPATHS="... /tmp ... /var/spool ... /srv/node" + + --------------------- General System Tuning --------------------- From e5c962a28ce99f01c6cd0b68b7434ddcd0e55f62 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Mon, 25 May 2015 18:28:02 +0200 Subject: [PATCH 68/98] Replace xrange() with six.moves.range() Patch generated by the xrange operation of the sixer tool: https://pypi.python.org/pypi/sixer Manual changes: * Fix indentation for pep8 checks * Fix TestGreenthreadSafeIterator.test_access_is_serialized of test.unit.common.test_utils: replace range(1, 11) with list(range(1, 11)) * Fix UnsafeXrange docstring, revert change Change-Id: Icb7e26135c5e57b5302b8bfe066b33cafe69fe4d --- swift/account/backend.py | 2 +- swift/common/internal_client.py | 3 +- swift/common/memcached.py | 3 +- swift/common/middleware/cname_lookup.py | 4 ++- swift/common/middleware/slo.py | 4 ++- swift/common/ring/builder.py | 25 ++++++++-------- swift/common/ring/ring.py | 28 +++++++++--------- swift/common/utils.py | 3 +- swift/container/backend.py | 3 +- test/functional/swift_test_client.py | 2 +- test/functional/test_account.py | 5 ++-- test/functional/test_container.py | 10 ++++--- test/functional/test_object.py | 13 +++++---- test/functional/tests.py | 8 +++--- test/probe/test_object_failures.py | 2 +- test/unit/__init__.py | 5 ++-- test/unit/account/test_backend.py | 8 +++--- test/unit/account/test_server.py | 6 ++-- test/unit/common/middleware/test_bulk.py | 2 +- test/unit/common/middleware/test_slo.py | 14 +++++---- test/unit/common/ring/test_builder.py | 36 +++++++++++++----------- test/unit/common/ring/test_ring.py | 26 +++++++++-------- test/unit/common/test_constraints.py | 3 +- test/unit/common/test_db.py | 5 ++-- test/unit/common/test_internal_client.py | 5 ++-- test/unit/common/test_swob.py | 4 +-- test/unit/common/test_utils.py | 15 +++++----- test/unit/common/test_wsgi.py | 2 +- test/unit/container/test_backend.py | 16 +++++------ test/unit/container/test_sync.py | 2 +- test/unit/container/test_updater.py | 4 +-- test/unit/obj/test_expirer.py | 2 +- test/unit/obj/test_updater.py | 3 +- test/unit/proxy/controllers/test_obj.py | 3 +- test/unit/proxy/test_server.py | 35 ++++++++++++----------- 35 files changed, 170 insertions(+), 141 deletions(-) diff --git a/swift/account/backend.py b/swift/account/backend.py index ec28394626..0500c4f6de 100644 --- a/swift/account/backend.py +++ b/swift/account/backend.py @@ -478,7 +478,7 @@ class AccountBroker(DatabaseBroker): row = curs_row.fetchone() if row: row = list(row) - for i in xrange(5): + for i in range(5): if record[i] is None and row[i] is not None: record[i] = row[i] if row[1] > record[1]: # Keep newest put_timestamp diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py index cc1f022e1e..37406ed485 100644 --- a/swift/common/internal_client.py +++ b/swift/common/internal_client.py @@ -16,6 +16,7 @@ from eventlet import sleep, Timeout from eventlet.green import httplib, socket, urllib2 import json +from six.moves import range import struct from sys import exc_info import zlib @@ -171,7 +172,7 @@ class InternalClient(object): headers = dict(headers) headers['user-agent'] = self.user_agent resp = exc_type = exc_value = exc_traceback = None - for attempt in xrange(self.request_tries): + for attempt in range(self.request_tries): req = Request.blank( path, environ={'REQUEST_METHOD': method}, headers=headers) if body_file is not None: diff --git a/swift/common/memcached.py b/swift/common/memcached.py index 2e1ed4c08d..0a2971ac8d 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -54,6 +54,7 @@ from hashlib import md5 from eventlet.green import socket from eventlet.pools import Pool from eventlet import Timeout +from six.moves import range from swift.common.utils import json @@ -140,7 +141,7 @@ class MemcacheRing(object): self._errors = dict(((serv, []) for serv in servers)) self._error_limited = dict(((serv, 0) for serv in servers)) for server in sorted(servers): - for i in xrange(NODE_WEIGHT): + for i in range(NODE_WEIGHT): self._ring[md5hash('%s-%s' % (server, i))] = server self._tries = tries if tries <= len(servers) else len(servers) self._sorted = sorted(self._ring) diff --git a/swift/common/middleware/cname_lookup.py b/swift/common/middleware/cname_lookup.py index 50a85566b5..9f26b9aa4b 100644 --- a/swift/common/middleware/cname_lookup.py +++ b/swift/common/middleware/cname_lookup.py @@ -27,6 +27,8 @@ maximum lookup depth. If a match is found, the environment's Host header is rewritten and the request is passed further down the WSGI chain. """ +from six.moves import range + import socket from swift import gettext_ as _ @@ -122,7 +124,7 @@ class CNAMELookupMiddleware(object): if self.memcache is None: self.memcache = cache_from_env(env) error = True - for tries in xrange(self.lookup_depth): + for tries in range(self.lookup_depth): found_domain = None if self.memcache: memcache_key = ''.join(['cname-', a_domain]) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index 241210d6ae..81cb8474dd 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -146,6 +146,8 @@ the manifest and the segments it's referring to) in the container and account metadata which can be used for stats purposes. """ +from six.moves import range + from cStringIO import StringIO from datetime import datetime import mimetypes @@ -205,7 +207,7 @@ class SloPutContext(WSGIContext): def handle_slo_put(self, req, start_response): app_resp = self._app_call(req.environ) - for i in xrange(len(self._response_headers)): + for i in range(len(self._response_headers)): if self._response_headers[i][0].lower() == 'etag': self._response_headers[i] = ('Etag', self.slo_etag) break diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index e51ef71c9f..d161bb5561 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -25,6 +25,7 @@ from copy import deepcopy from array import array from collections import defaultdict +from six.moves import range from time import time from swift.common import exceptions @@ -534,7 +535,7 @@ class RingBuilder(object): if stats: # dev_usage[dev_id] will equal the number of partitions assigned to # that device. - dev_usage = array('I', (0 for _junk in xrange(dev_len))) + dev_usage = array('I', (0 for _junk in range(dev_len))) for part2dev in self._replica2part2dev: for dev_id in part2dev: dev_usage[dev_id] += 1 @@ -607,7 +608,7 @@ class RingBuilder(object): 255 hours ago. This can be used to force a full rebalance on the next call to rebalance. """ - for part in xrange(self.parts): + for part in range(self.parts): self._last_part_moves[part] = 0xff def get_part_devices(self, part): @@ -713,12 +714,12 @@ class RingBuilder(object): if len(part2dev) < desired_length: # Not long enough: needs to be extended and the # newly-added pieces assigned to devices. - for part in xrange(len(part2dev), desired_length): + for part in range(len(part2dev), desired_length): to_assign[part].append(replica) part2dev.append(0) elif len(part2dev) > desired_length: # Too long: truncate this mapping. - for part in xrange(desired_length, len(part2dev)): + for part in range(desired_length, len(part2dev)): dev_losing_part = self.devs[part2dev[part]] dev_losing_part['parts'] -= 1 removed_replicas += 1 @@ -726,10 +727,10 @@ class RingBuilder(object): else: # Mapping not present at all: make one up and assign # all of it. - for part in xrange(desired_length): + for part in range(desired_length): to_assign[part].append(replica) self._replica2part2dev.append( - array('H', (0 for _junk in xrange(desired_length)))) + array('H', (0 for _junk in range(desired_length)))) return (to_assign.items(), removed_replicas) @@ -738,7 +739,7 @@ class RingBuilder(object): Initial partition assignment is the same as rebalancing an existing ring, but with some initial setup beforehand. """ - self._last_part_moves = array('B', (0 for _junk in xrange(self.parts))) + self._last_part_moves = array('B', (0 for _junk in range(self.parts))) self._last_part_moves_epoch = int(time()) self._set_parts_wanted() @@ -751,7 +752,7 @@ class RingBuilder(object): more recently than min_part_hours. """ elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600 - for part in xrange(self.parts): + for part in range(self.parts): # The "min(self._last_part_moves[part] + elapsed_hours, 0xff)" # which was here showed up in profiling, so it got inlined. last_plus_elapsed = self._last_part_moves[part] + elapsed_hours @@ -830,7 +831,7 @@ class RingBuilder(object): max_allowed_replicas = self._build_max_replicas_by_tier() wanted_parts_for_tier = self._get_available_parts() moved_parts = 0 - for part in xrange(self.parts): + for part in range(self.parts): # Only move one replica at a time if possible. if part in removed_dev_parts: continue @@ -922,8 +923,8 @@ class RingBuilder(object): # pattern (but scaled down) on sequential runs. this_start = int(float(start) * len(part2dev) / self.parts) - for part in itertools.chain(xrange(this_start, len(part2dev)), - xrange(0, this_start)): + for part in itertools.chain(range(this_start, len(part2dev)), + range(0, this_start)): if self._last_part_moves[part] < self.min_part_hours: continue if part in removed_dev_parts or part in spread_out_parts: @@ -1270,7 +1271,7 @@ class RingBuilder(object): Generator yielding every (partition, replica) pair in the ring. """ for replica, part2dev in enumerate(self._replica2part2dev): - for part in xrange(len(part2dev)): + for part in range(len(part2dev)): yield (part, replica) @classmethod diff --git a/swift/common/ring/ring.py b/swift/common/ring/ring.py index 461ccae640..861eccbf84 100644 --- a/swift/common/ring/ring.py +++ b/swift/common/ring/ring.py @@ -27,6 +27,8 @@ from hashlib import md5 from itertools import chain from tempfile import NamedTemporaryFile +from six.moves import range + from swift.common.utils import hash_path, validate_configuration, json from swift.common.ring.utils import tiers_for_dev @@ -68,7 +70,7 @@ class RingData(object): return ring_dict partition_count = 1 << (32 - ring_dict['part_shift']) - for x in xrange(ring_dict['replica_count']): + for x in range(ring_dict['replica_count']): ring_dict['replica2part2dev_id'].append( array.array('H', gz_file.read(2 * partition_count))) return ring_dict @@ -361,9 +363,9 @@ class Ring(object): # Multiple loops for execution speed; the checks and bookkeeping get # simpler as you go along hit_all_regions = len(same_regions) == self._num_regions - for handoff_part in chain(xrange(start, parts, inc), - xrange(inc - ((parts - start) % inc), - start, inc)): + for handoff_part in chain(range(start, parts, inc), + range(inc - ((parts - start) % inc), + start, inc)): if hit_all_regions: # At this point, there are no regions left untouched, so we # can stop looking. @@ -386,9 +388,9 @@ class Ring(object): break hit_all_zones = len(same_zones) == self._num_zones - for handoff_part in chain(xrange(start, parts, inc), - xrange(inc - ((parts - start) % inc), - start, inc)): + for handoff_part in chain(range(start, parts, inc), + range(inc - ((parts - start) % inc), + start, inc)): if hit_all_zones: # Much like we stopped looking for fresh regions before, we # can now stop looking for fresh zones; there are no more. @@ -409,9 +411,9 @@ class Ring(object): break hit_all_ips = len(same_ips) == self._num_ips - for handoff_part in chain(xrange(start, parts, inc), - xrange(inc - ((parts - start) % inc), - start, inc)): + for handoff_part in chain(range(start, parts, inc), + range(inc - ((parts - start) % inc), + start, inc)): if hit_all_ips: # We've exhausted the pool of unused backends, so stop # looking. @@ -430,9 +432,9 @@ class Ring(object): break hit_all_devs = len(used) == self._num_devs - for handoff_part in chain(xrange(start, parts, inc), - xrange(inc - ((parts - start) % inc), - start, inc)): + for handoff_part in chain(range(start, parts, inc), + range(inc - ((parts - start) % inc), + start, inc)): if hit_all_devs: # We've used every device we have, so let's stop looking for # unused devices now. diff --git a/swift/common/utils.py b/swift/common/utils.py index 63919af1ec..26ae5f68cd 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -64,6 +64,7 @@ import netifaces import codecs utf8_decoder = codecs.getdecoder('utf-8') utf8_encoder = codecs.getencoder('utf-8') +from six.moves import range from swift import gettext_ as _ import swift.common.exceptions @@ -2953,7 +2954,7 @@ class ThreadPool(object): _raw_rpipe, self.wpipe = os.pipe() self.rpipe = greenio.GreenPipe(_raw_rpipe, 'rb', bufsize=0) - for _junk in xrange(nthreads): + for _junk in range(nthreads): thr = stdlib_threading.Thread( target=self._worker, args=(self._run_queue, self._result_queue)) diff --git a/swift/container/backend.py b/swift/container/backend.py index de42f4bde8..7aad12dd2f 100644 --- a/swift/container/backend.py +++ b/swift/container/backend.py @@ -21,6 +21,7 @@ from uuid import uuid4 import time import cPickle as pickle +from six.moves import range import sqlite3 from swift.common.utils import Timestamp @@ -698,7 +699,7 @@ class ContainerBroker(DatabaseBroker): # Get created_at times for objects in item_list that already exist. # We must chunk it up to avoid sqlite's limit of 999 args. created_at = {} - for offset in xrange(0, len(item_list), SQLITE_ARG_LIMIT): + for offset in range(0, len(item_list), SQLITE_ARG_LIMIT): chunk = [rec['name'] for rec in item_list[offset:offset + SQLITE_ARG_LIMIT]] created_at.update( diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 695ea202d7..036f31012f 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -68,7 +68,7 @@ class ResponseError(Exception): def listing_empty(method): - for i in xrange(6): + for i in range(6): if len(method()) == 0: return True diff --git a/test/functional/test_account.py b/test/functional/test_account.py index 0bd7b886c3..b6c5abedc6 100755 --- a/test/functional/test_account.py +++ b/test/functional/test_account.py @@ -21,6 +21,7 @@ from uuid import uuid4 from nose import SkipTest from string import letters +from six.moves import range from swift.common.middleware.acl import format_acl from test.functional import check_response, retry, requires_acls, \ @@ -790,13 +791,13 @@ class TestAccount(unittest.TestCase): resp = retry(post, headers) headers = {} - for x in xrange(self.max_meta_count): + for x in range(self.max_meta_count): headers['X-Account-Meta-%d' % x] = 'v' resp = retry(post, headers) resp.read() self.assertEqual(resp.status, 204) headers = {} - for x in xrange(self.max_meta_count + 1): + for x in range(self.max_meta_count + 1): headers['X-Account-Meta-%d' % x] = 'v' resp = retry(post, headers) resp.read() diff --git a/test/functional/test_container.py b/test/functional/test_container.py index de72526de6..dfbec4eed2 100755 --- a/test/functional/test_container.py +++ b/test/functional/test_container.py @@ -24,6 +24,8 @@ from test.functional import check_response, retry, requires_acls, \ load_constraint, requires_policies import test.functional as tf +from six.moves import range + class TestContainer(unittest.TestCase): @@ -319,7 +321,7 @@ class TestContainer(unittest.TestCase): name = uuid4().hex headers = {} - for x in xrange(self.max_meta_count): + for x in range(self.max_meta_count): headers['X-Container-Meta-%d' % x] = 'v' resp = retry(put, name, headers) resp.read() @@ -329,7 +331,7 @@ class TestContainer(unittest.TestCase): self.assertEqual(resp.status, 204) name = uuid4().hex headers = {} - for x in xrange(self.max_meta_count + 1): + for x in range(self.max_meta_count + 1): headers['X-Container-Meta-%d' % x] = 'v' resp = retry(put, name, headers) resp.read() @@ -412,13 +414,13 @@ class TestContainer(unittest.TestCase): return check_response(conn) headers = {} - for x in xrange(self.max_meta_count): + for x in range(self.max_meta_count): headers['X-Container-Meta-%d' % x] = 'v' resp = retry(post, headers) resp.read() self.assertEqual(resp.status, 204) headers = {} - for x in xrange(self.max_meta_count + 1): + for x in range(self.max_meta_count + 1): headers['X-Container-Meta-%d' % x] = 'v' resp = retry(post, headers) resp.read() diff --git a/test/functional/test_object.py b/test/functional/test_object.py index 4a62da1a77..beb52047fd 100755 --- a/test/functional/test_object.py +++ b/test/functional/test_object.py @@ -20,6 +20,7 @@ import unittest from nose import SkipTest from uuid import uuid4 +from six.moves import range from test.functional import check_response, retry, requires_acls, \ requires_policies @@ -746,7 +747,7 @@ class TestObject(unittest.TestCase): parsed.path, self.container, str(objnum)), segments1[objnum], {'X-Auth-Token': token}) return check_response(conn) - for objnum in xrange(len(segments1)): + for objnum in range(len(segments1)): resp = retry(put, objnum) resp.read() self.assertEqual(resp.status, 201) @@ -809,7 +810,7 @@ class TestObject(unittest.TestCase): parsed.path, self.container, str(objnum)), segments2[objnum], {'X-Auth-Token': token}) return check_response(conn) - for objnum in xrange(len(segments2)): + for objnum in range(len(segments2)): resp = retry(put, objnum) resp.read() self.assertEqual(resp.status, 201) @@ -891,7 +892,7 @@ class TestObject(unittest.TestCase): parsed.path, acontainer, str(objnum)), segments3[objnum], {'X-Auth-Token': token}) return check_response(conn) - for objnum in xrange(len(segments3)): + for objnum in range(len(segments3)): resp = retry(put, objnum) resp.read() self.assertEqual(resp.status, 201) @@ -966,7 +967,7 @@ class TestObject(unittest.TestCase): parsed.path, acontainer, str(objnum)), '', {'X-Auth-Token': token}) return check_response(conn) - for objnum in xrange(len(segments3)): + for objnum in range(len(segments3)): resp = retry(delete, objnum) resp.read() self.assertEqual(resp.status, 204) @@ -977,7 +978,7 @@ class TestObject(unittest.TestCase): parsed.path, self.container, str(objnum)), '', {'X-Auth-Token': token}) return check_response(conn) - for objnum in xrange(len(segments2)): + for objnum in range(len(segments2)): resp = retry(delete, objnum) resp.read() self.assertEqual(resp.status, 204) @@ -988,7 +989,7 @@ class TestObject(unittest.TestCase): parsed.path, self.container, str(objnum)), '', {'X-Auth-Token': token}) return check_response(conn) - for objnum in xrange(len(segments1)): + for objnum in range(len(segments1)): resp = retry(delete, objnum) resp.read() self.assertEqual(resp.status, 204) diff --git a/test/functional/tests.py b/test/functional/tests.py index aa3d440c2b..f6fe875374 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -55,7 +55,7 @@ class Utils(object): u'\u3705\u1803\u0902\uF112\uD210\uB30E\u940C\u850B'\ u'\u5608\u3706\u1804\u0903\u03A9\u2603' return ''.join([random.choice(utf8_chars) - for x in xrange(length)]).encode('utf-8') + for x in range(length)]).encode('utf-8') create_name = create_ascii_name @@ -393,14 +393,14 @@ class TestContainer(Base): cont = self.env.account.container(Utils.create_name()) self.assert_(cont.create()) - files = sorted([Utils.create_name() for x in xrange(10)]) + files = sorted([Utils.create_name() for x in range(10)]) for f in files: file_item = cont.file(f) self.assert_(file_item.write_random()) - for i in xrange(len(files)): + for i in range(len(files)): f = files[i] - for j in xrange(1, len(files) - i): + for j in range(1, len(files) - i): self.assert_(cont.files(parms={'limit': j, 'marker': f}) == files[i + 1: i + j + 1]) self.assert_(cont.files(parms={'marker': f}) == files[i + 1:]) diff --git a/test/probe/test_object_failures.py b/test/probe/test_object_failures.py index 469683a10e..eed5aca6a3 100755 --- a/test/probe/test_object_failures.py +++ b/test/probe/test_object_failures.py @@ -36,7 +36,7 @@ def get_data_file_path(obj_dir): files = [] # We might need to try a few times if a request hasn't yet settled. For # instance, a PUT can return success when just 2 of 3 nodes has completed. - for attempt in xrange(RETRIES + 1): + for attempt in range(RETRIES + 1): try: files = sorted(listdir(obj_dir), reverse=True) break diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 0929293b54..a5d77062fe 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -19,6 +19,7 @@ import os import copy import logging import errno +from six.moves import range import sys from contextlib import contextmanager, closing from collections import defaultdict, Iterable @@ -227,8 +228,8 @@ class FakeRing(Ring): def get_more_nodes(self, part): # replicas^2 is the true cap - for x in xrange(self.replicas, min(self.replicas + self.max_more_nodes, - self.replicas * self.replicas)): + for x in range(self.replicas, min(self.replicas + self.max_more_nodes, + self.replicas * self.replicas)): yield {'ip': '10.0.0.%s' % x, 'replication_ip': '10.0.0.%s' % x, 'port': self._base_port + x, diff --git a/test/unit/account/test_backend.py b/test/unit/account/test_backend.py index 5e174d892f..6598046487 100644 --- a/test/unit/account/test_backend.py +++ b/test/unit/account/test_backend.py @@ -385,17 +385,17 @@ class TestAccountBroker(unittest.TestCase): # Test AccountBroker.list_containers_iter broker = AccountBroker(':memory:', account='a') broker.initialize(Timestamp('1').internal) - for cont1 in xrange(4): - for cont2 in xrange(125): + for cont1 in range(4): + for cont2 in range(125): broker.put_container('%d-%04d' % (cont1, cont2), Timestamp(time()).internal, 0, 0, 0, POLICIES.default.idx) - for cont in xrange(125): + for cont in range(125): broker.put_container('2-0051-%04d' % cont, Timestamp(time()).internal, 0, 0, 0, POLICIES.default.idx) - for cont in xrange(125): + for cont in range(125): broker.put_container('3-%04d-0049' % cont, Timestamp(time()).internal, 0, 0, 0, POLICIES.default.idx) diff --git a/test/unit/account/test_server.py b/test/unit/account/test_server.py index ef920d030f..d3fbb90f41 100644 --- a/test/unit/account/test_server.py +++ b/test/unit/account/test_server.py @@ -947,7 +947,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) req.get_response(self.controller) - for c in xrange(5): + for c in range(5): req = Request.blank( '/sda1/p/a/c%d' % c, environ={'REQUEST_METHOD': 'PUT'}, @@ -972,7 +972,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) req.get_response(self.controller) - for c in xrange(5): + for c in range(5): req = Request.blank( '/sda1/p/a/c%d' % c, environ={'REQUEST_METHOD': 'PUT'}, @@ -1002,7 +1002,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) req.get_response(self.controller) - for c in xrange(5): + for c in range(5): req = Request.blank( '/sda1/p/a/c%d' % c, environ={'REQUEST_METHOD': 'PUT'}, diff --git a/test/unit/common/middleware/test_bulk.py b/test/unit/common/middleware/test_bulk.py index 2bd0b78158..614b4155ca 100644 --- a/test/unit/common/middleware/test_bulk.py +++ b/test/unit/common/middleware/test_bulk.py @@ -713,7 +713,7 @@ class TestDelete(unittest.TestCase): with patch.object(self.bulk, 'max_deletes_per_request', 9): with patch.object(self.bulk, 'max_path_length', 1): - req_body = '\n'.join([str(i) for i in xrange(10)]) + req_body = '\n'.join([str(i) for i in range(10)]) req = Request.blank('/delete_works/AUTH_Acc', body=req_body) self.assertRaises( HTTPException, self.bulk.get_objs_to_delete, req) diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 86a11734d3..8f539388ad 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from six.moves import range + import hashlib import time import unittest @@ -1374,7 +1376,7 @@ class TestSloGetManifest(SloTestCase): def test_recursion_limit(self): # man1 points to obj1 and man2, man2 points to obj2 and man3... - for i in xrange(20): + for i in range(20): self.app.register('GET', '/v1/AUTH_test/gettest/obj%d' % i, swob.HTTPOk, {'Content-Type': 'text/plain', 'Etag': md5hex('body%02d' % i)}, @@ -1391,7 +1393,7 @@ class TestSloGetManifest(SloTestCase): 'Etag': 'man%d' % i}, manifest_json) - for i in xrange(19, 0, -1): + for i in range(19, 0, -1): manifest_data = [ {'name': '/gettest/obj%d' % i, 'hash': md5hex('body%02d' % i), @@ -1429,7 +1431,7 @@ class TestSloGetManifest(SloTestCase): def test_sub_slo_recursion(self): # man1 points to man2 and obj1, man2 points to man3 and obj2... - for i in xrange(11): + for i in range(11): self.app.register('GET', '/v1/AUTH_test/gettest/obj%d' % i, swob.HTTPOk, {'Content-Type': 'text/plain', 'Content-Length': '6', @@ -1452,7 +1454,7 @@ class TestSloGetManifest(SloTestCase): 'Etag': md5hex('body%2d' % i)}, None) - for i in xrange(9, 0, -1): + for i in range(9, 0, -1): manifest_data = [ {'name': '/gettest/man%d' % (i + 1), 'hash': 'man%d' % (i + 1), @@ -1486,7 +1488,7 @@ class TestSloGetManifest(SloTestCase): def test_sub_slo_recursion_limit(self): # man1 points to man2 and obj1, man2 points to man3 and obj2... - for i in xrange(12): + for i in range(12): self.app.register('GET', '/v1/AUTH_test/gettest/obj%d' % i, swob.HTTPOk, {'Content-Type': 'text/plain', @@ -1509,7 +1511,7 @@ class TestSloGetManifest(SloTestCase): 'Etag': md5hex('body%2d' % i)}, None) - for i in xrange(11, 0, -1): + for i in range(11, 0, -1): manifest_data = [ {'name': '/gettest/man%d' % (i + 1), 'hash': 'man%d' % (i + 1), diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index f1840b8210..769937b6e6 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -26,6 +26,8 @@ from math import ceil from tempfile import mkdtemp from shutil import rmtree +from six.moves import range + from swift.common import exceptions from swift.common import ring from swift.common.ring.builder import MAX_BALANCE @@ -373,9 +375,9 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance() rb.validate() - for part in xrange(rb.parts): + for part in range(rb.parts): counts = defaultdict(lambda: defaultdict(int)) - for replica in xrange(rb.replicas): + for replica in range(rb.replicas): dev = rb.devs[rb._replica2part2dev[replica][part]] counts['region'][dev['region']] += 1 counts['zone'][dev['zone']] += 1 @@ -415,9 +417,9 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance() rb.validate() - for part in xrange(rb.parts): + for part in range(rb.parts): counts = defaultdict(lambda: defaultdict(int)) - for replica in xrange(rb.replicas): + for replica in range(rb.replicas): dev = rb.devs[rb._replica2part2dev[replica][part]] counts['zone'][dev['zone']] += 1 counts['dev_id'][dev['id']] += 1 @@ -452,9 +454,9 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance() rb.validate() - for part in xrange(rb.parts): + for part in range(rb.parts): counts = defaultdict(lambda: defaultdict(int)) - for replica in xrange(rb.replicas): + for replica in range(rb.replicas): dev = rb.devs[rb._replica2part2dev[replica][part]] counts['zone'][dev['zone']] += 1 counts['dev_id'][dev['id']] += 1 @@ -489,9 +491,9 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance() rb.validate() - for part in xrange(rb.parts): + for part in range(rb.parts): counts = defaultdict(lambda: defaultdict(int)) - for replica in xrange(rb.replicas): + for replica in range(rb.replicas): dev = rb.devs[rb._replica2part2dev[replica][part]] counts['zone'][dev['zone']] += 1 counts['dev_id'][dev['id']] += 1 @@ -528,15 +530,15 @@ class TestRingBuilder(unittest.TestCase): rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 1, 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'}) - for _ in xrange(5): + for _ in range(5): rb.pretend_min_part_hours_passed() rb.rebalance() rb.validate() - for part in xrange(rb.parts): + for part in range(rb.parts): counts = dict(zone=defaultdict(int), dev_id=defaultdict(int)) - for replica in xrange(rb.replicas): + for replica in range(rb.replicas): dev = rb.devs[rb._replica2part2dev[replica][part]] counts['zone'][dev['zone']] += 1 counts['dev_id'][dev['id']] += 1 @@ -561,9 +563,9 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance() rb.validate() - for part in xrange(rb.parts): + for part in range(rb.parts): devs = set() - for replica in xrange(rb.replicas): + for replica in range(rb.replicas): devs.add(rb._replica2part2dev[replica][part]) if len(devs) != 3: @@ -587,9 +589,9 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance() rb.validate() - for part in xrange(rb.parts): + for part in range(rb.parts): devs = set() - for replica in xrange(rb.replicas): + for replica in range(rb.replicas): devs.add(rb._replica2part2dev[replica][part]) if len(devs) != 2: @@ -616,9 +618,9 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance() rb.validate() - for part in xrange(rb.parts): + for part in range(rb.parts): zones = set() - for replica in xrange(rb.replicas): + for replica in range(rb.replicas): zones.add(rb.devs[rb._replica2part2dev[replica][part]]['zone']) if len(zones) != 3: diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py index 5ee1af0ca6..0e0cfe567c 100644 --- a/test/unit/common/ring/test_ring.py +++ b/test/unit/common/ring/test_ring.py @@ -25,6 +25,8 @@ from tempfile import mkdtemp from shutil import rmtree from time import sleep, time +from six.moves import range + from swift.common import ring, utils @@ -74,7 +76,7 @@ class TestRingData(unittest.TestCase): {'id': 1, 'zone': 1, 'ip': '10.1.1.1', 'port': 7000}], 30) ring_fname = os.path.join(self.testdir, 'foo.ring.gz') - for p in xrange(pickle.HIGHEST_PROTOCOL): + for p in range(pickle.HIGHEST_PROTOCOL): with closing(GzipFile(ring_fname, 'wb')) as f: pickle.dump(rd, f, protocol=p) meta_only = ring.RingData.load(ring_fname, metadata_only=True) @@ -490,9 +492,9 @@ class TestRing(TestRingBase): 19, 55] rb = ring.RingBuilder(8, 3, 1) next_dev_id = 0 - for zone in xrange(1, 10): - for server in xrange(1, 5): - for device in xrange(1, 4): + for zone in range(1, 10): + for server in range(1, 5): + for device in range(1, 4): rb.add_dev({'id': next_dev_id, 'ip': '1.2.%d.%d' % (zone, server), 'port': 1234 + device, @@ -518,7 +520,7 @@ class TestRing(TestRingBase): # The first handoff nodes for each partition in the ring devs = [] - for part in xrange(r.partition_count): + for part in range(r.partition_count): devs.append(next(r.get_more_nodes(part))['id']) self.assertEquals(devs, exp_first_handoffs) @@ -554,9 +556,9 @@ class TestRing(TestRingBase): self.assertEquals(seen_zones, set(range(1, 10))) devs = [] - for part in xrange(r.partition_count): + for part in range(r.partition_count): devs.append(next(r.get_more_nodes(part))['id']) - for part in xrange(r.partition_count): + for part in range(r.partition_count): self.assertEquals( devs[part], exp_first_handoffs[part], 'handoff for partitition %d is now device id %d' % ( @@ -603,9 +605,9 @@ class TestRing(TestRingBase): self.assertEquals(seen_zones, set(range(1, 10))) devs = [] - for part in xrange(r.partition_count): + for part in range(r.partition_count): devs.append(next(r.get_more_nodes(part))['id']) - for part in xrange(r.partition_count): + for part in range(r.partition_count): self.assertEquals( devs[part], exp_first_handoffs[part], 'handoff for partitition %d is now device id %d' % ( @@ -684,9 +686,9 @@ class TestRing(TestRingBase): self.assertEquals(seen_zones, set(range(1, 10))) devs = [] - for part in xrange(r.partition_count): + for part in range(r.partition_count): devs.append(next(r.get_more_nodes(part))['id']) - for part in xrange(r.partition_count): + for part in range(r.partition_count): self.assertEquals( devs[part], exp_first_handoffs[part], 'handoff for partitition %d is now device id %d' % ( @@ -727,7 +729,7 @@ class TestRing(TestRingBase): # Test distribution across regions rb.set_replicas(3) - for region in xrange(1, 5): + for region in range(1, 5): rb.add_dev({'id': next_dev_id, 'ip': '1.%d.1.%d' % (region, server), 'port': 1234, # 108.0 is the weight of all devices created prior to diff --git a/test/unit/common/test_constraints.py b/test/unit/common/test_constraints.py index 61231d3f02..0cca36d8e6 100644 --- a/test/unit/common/test_constraints.py +++ b/test/unit/common/test_constraints.py @@ -18,6 +18,7 @@ import mock import tempfile import time +from six.moves import range from test import safe_repr from test.unit import MockTrue @@ -87,7 +88,7 @@ class TestConstraints(unittest.TestCase): def test_check_metadata_count(self): headers = {} - for x in xrange(constraints.MAX_META_COUNT): + for x in range(constraints.MAX_META_COUNT): headers['X-Object-Meta-%d' % x] = 'v' self.assertEquals(constraints.check_metadata(Request.blank( '/', headers=headers), 'object'), None) diff --git a/test/unit/common/test_db.py b/test/unit/common/test_db.py index e1d18d400a..6e0606e79d 100644 --- a/test/unit/common/test_db.py +++ b/test/unit/common/test_db.py @@ -31,6 +31,7 @@ import random from mock import patch, MagicMock from eventlet.timeout import Timeout +from six.moves import range import swift.common.db from swift.common.constraints import \ @@ -1136,7 +1137,7 @@ class TestDatabaseBroker(unittest.TestCase): def test_metadata_with_max_count(self): metadata = {} - for c in xrange(MAX_META_COUNT): + for c in range(MAX_META_COUNT): key = 'X-Account-Meta-F{0}'.format(c) metadata[key] = ('B', normalize_timestamp(1)) key = 'X-Account-Meta-Foo'.format(c) @@ -1148,7 +1149,7 @@ class TestDatabaseBroker(unittest.TestCase): def test_metadata_raises_exception_over_max_count(self): metadata = {} - for c in xrange(MAX_META_COUNT + 1): + for c in range(MAX_META_COUNT + 1): key = 'X-Account-Meta-F{0}'.format(c) metadata[key] = ('B', normalize_timestamp(1)) message = '' diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py index b7d6806880..4b9c56d8ad 100644 --- a/test/unit/common/test_internal_client.py +++ b/test/unit/common/test_internal_client.py @@ -22,6 +22,7 @@ import zlib from textwrap import dedent import os +from six.moves import range from test.unit import FakeLogger import eventlet from eventlet.green import urllib2 @@ -38,7 +39,7 @@ def not_sleep(seconds): def unicode_string(start, length): - return u''.join([unichr(x) for x in xrange(start, start + length)]) + return u''.join([unichr(x) for x in range(start, start + length)]) def path_parts(): @@ -575,7 +576,7 @@ class TestInternalClient(unittest.TestCase): exp_items = [] responses = [] - for i in xrange(3): + for i in range(3): data = [ {'name': 'item%02d' % (2 * i)}, {'name': 'item%02d' % (2 * i + 1)}] diff --git a/test/unit/common/test_swob.py b/test/unit/common/test_swob.py index 9f3271c1f3..65764e38d9 100644 --- a/test/unit/common/test_swob.py +++ b/test/unit/common/test_swob.py @@ -1234,12 +1234,12 @@ class TestResponse(unittest.TestCase): def app_iter_ranges(self, ranges, content_type, boundary, size): app_iter_ranges_args.append((ranges, content_type, boundary, size)) - for i in xrange(3): + for i in range(3): yield str(i) + 'fun' yield boundary def __iter__(self): - for i in xrange(3): + for i in range(3): yield str(i) + 'fun' req = swift.common.swob.Request.blank( diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index a668e0ff52..be98ed8cfb 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -28,6 +28,7 @@ import os import mock import random import re +from six.moves import range import socket import stat import sys @@ -3741,7 +3742,7 @@ class TestRateLimitedIterator(unittest.TestCase): def test_rate_limiting(self): def testfunc(): - limited_iterator = utils.RateLimitedIterator(xrange(9999), 100) + limited_iterator = utils.RateLimitedIterator(range(9999), 100) got = [] started_at = time.time() try: @@ -3760,7 +3761,7 @@ class TestRateLimitedIterator(unittest.TestCase): def testfunc(): limited_iterator = utils.RateLimitedIterator( - xrange(9999), 100, limit_after=5) + range(9999), 100, limit_after=5) got = [] started_at = time.time() try: @@ -3790,7 +3791,7 @@ class TestGreenthreadSafeIterator(unittest.TestCase): iterable = UnsafeXrange(10) pile = eventlet.GreenPile(2) - for _ in xrange(2): + for _ in range(2): pile.spawn(self.increment, iterable) sorted([resp for resp in pile]) @@ -3801,10 +3802,10 @@ class TestGreenthreadSafeIterator(unittest.TestCase): pile = eventlet.GreenPile(2) unsafe_iterable = UnsafeXrange(10) iterable = utils.GreenthreadSafeIterator(unsafe_iterable) - for _ in xrange(2): + for _ in range(2): pile.spawn(self.increment, iterable) response = sorted(sum([resp for resp in pile], [])) - self.assertEquals(range(1, 11), response) + self.assertEquals(list(range(1, 11)), response) self.assertTrue( not unsafe_iterable.concurrent_call, 'concurrent call occurred') @@ -4472,7 +4473,7 @@ class TestGreenAsyncPile(unittest.TestCase): return tests_ran[0] tests_ran = [0] pile = utils.GreenAsyncPile(3) - for x in xrange(3): + for x in range(3): pile.spawn(run_test) self.assertEqual(sorted(x for x in pile), [1, 2, 3]) @@ -4485,7 +4486,7 @@ class TestGreenAsyncPile(unittest.TestCase): for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)): events = [eventlet.event.Event(), eventlet.event.Event(), eventlet.event.Event()] - for x in xrange(3): + for x in range(3): pile.spawn(run_test, x) for x in order: events[x].send() diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index a4da9effef..5189a6b7cb 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -1317,7 +1317,7 @@ class TestPipelineModification(unittest.TestCase): # This is rather brittle; it'll break if a middleware stores its app # anywhere other than an attribute named "app", but it works for now. pipe = [] - for _ in xrange(1000): + for _ in range(1000): pipe.append(app.__class__.__module__) if not hasattr(app, 'app'): break diff --git a/test/unit/container/test_backend.py b/test/unit/container/test_backend.py index 23cadd1f42..7955b2ac7a 100644 --- a/test/unit/container/test_backend.py +++ b/test/unit/container/test_backend.py @@ -738,17 +738,17 @@ class TestContainerBroker(unittest.TestCase): # Test ContainerBroker.list_objects_iter broker = ContainerBroker(':memory:', account='a', container='c') broker.initialize(Timestamp('1').internal, 0) - for obj1 in xrange(4): - for obj2 in xrange(125): + for obj1 in range(4): + for obj2 in range(125): broker.put_object('%d/%04d' % (obj1, obj2), Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') - for obj in xrange(125): + for obj in range(125): broker.put_object('2/0051/%04d' % obj, Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') - for obj in xrange(125): + for obj in range(125): broker.put_object('3/%04d/0049' % obj, Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') @@ -857,17 +857,17 @@ class TestContainerBroker(unittest.TestCase): # delimiter that is not a slash broker = ContainerBroker(':memory:', account='a', container='c') broker.initialize(Timestamp('1').internal, 0) - for obj1 in xrange(4): - for obj2 in xrange(125): + for obj1 in range(4): + for obj2 in range(125): broker.put_object('%d:%04d' % (obj1, obj2), Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') - for obj in xrange(125): + for obj in range(125): broker.put_object('2:0051:%04d' % obj, Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') - for obj in xrange(125): + for obj in range(125): broker.put_object('3:%04d:0049' % obj, Timestamp(time()).internal, 0, 'text/plain', 'd41d8cd98f00b204e9800998ecf8427e') diff --git a/test/unit/container/test_sync.py b/test/unit/container/test_sync.py index bdf59f9f3e..9251e6c378 100644 --- a/test/unit/container/test_sync.py +++ b/test/unit/container/test_sync.py @@ -37,7 +37,7 @@ class FakeRing(object): def __init__(self): self.devs = [{'ip': '10.0.0.%s' % x, 'port': 1000 + x, 'device': 'sda'} - for x in xrange(3)] + for x in range(3)] def get_nodes(self, account, container=None, obj=None): return 1, list(self.devs) diff --git a/test/unit/container/test_updater.py b/test/unit/container/test_updater.py index 4e9e1ffc9d..0b3d33c56c 100644 --- a/test/unit/container/test_updater.py +++ b/test/unit/container/test_updater.py @@ -139,7 +139,7 @@ class TestContainerUpdater(unittest.TestCase): def spawn_accepts(): events = [] - for _junk in xrange(2): + for _junk in range(2): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr, 201)) return events @@ -234,7 +234,7 @@ class TestContainerUpdater(unittest.TestCase): def spawn_accepts(): events = [] - for _junk in xrange(2): + for _junk in range(2): with Timeout(3): sock, addr = bindsock.accept() events.append(spawn(accept, sock, addr)) diff --git a/test/unit/obj/test_expirer.py b/test/unit/obj/test_expirer.py index ca815d358c..a65c8fba42 100644 --- a/test/unit/obj/test_expirer.py +++ b/test/unit/obj/test_expirer.py @@ -172,7 +172,7 @@ class TestObjectExpirer(TestCase): x.swift = InternalClient(containers) deleted_objects = {} - for i in xrange(3): + for i in range(3): x.process = i x.run_once() self.assertNotEqual(deleted_objects, x.deleted_objects) diff --git a/test/unit/obj/test_updater.py b/test/unit/obj/test_updater.py index 5f4a407b40..10b7d809a4 100644 --- a/test/unit/obj/test_updater.py +++ b/test/unit/obj/test_updater.py @@ -28,6 +28,7 @@ from time import time from distutils.dir_util import mkpath from eventlet import spawn, Timeout, listen +from six.moves import range from swift.obj import updater as object_updater from swift.obj.diskfile import (ASYNCDIR_BASE, get_async_dir, DiskFileManager, @@ -332,7 +333,7 @@ class TestObjectUpdater(unittest.TestCase): codes = iter(return_codes) try: events = [] - for x in xrange(len(return_codes)): + for x in range(len(return_codes)): with Timeout(3): sock, addr = bindsock.accept() events.append( diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index 751c388272..eeeae1218b 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -26,6 +26,7 @@ from hashlib import md5 import mock from eventlet import Timeout +from six.moves import range import swift from swift.common import utils, swob @@ -1442,7 +1443,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): # swap out some with regular fast responses number_of_fast_responses_needed_to_be_quick_enough = 2 fast_indexes = random.sample( - xrange(self.replicas()), + range(self.replicas()), number_of_fast_responses_needed_to_be_quick_enough) for i in fast_indexes: codes[i] = 201 diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 46370590eb..5b82f15353 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -42,6 +42,7 @@ import random import mock from eventlet import sleep, spawn, wsgi, listen, Timeout +from six.moves import range from swift.common.utils import hash_path, json, storage_directory, \ parse_content_type, iter_multipart_mime_documents, public @@ -3416,7 +3417,7 @@ class TestObjectController(unittest.TestCase): with save_globals(): limit = constraints.MAX_META_COUNT headers = dict( - (('X-Object-Meta-' + str(i), 'a') for i in xrange(limit + 1))) + (('X-Object-Meta-' + str(i), 'a') for i in range(limit + 1))) headers.update({'Content-Type': 'foo/bar'}) set_http_connect(202, 202, 202) req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, @@ -3431,7 +3432,7 @@ class TestObjectController(unittest.TestCase): count = limit / 256 # enough to cause the limit to be reached headers = dict( (('X-Object-Meta-' + str(i), 'a' * 256) - for i in xrange(count + 1))) + for i in range(count + 1))) headers.update({'Content-Type': 'foo/bar'}) set_http_connect(202, 202, 202) req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, @@ -3860,7 +3861,7 @@ class TestObjectController(unittest.TestCase): def test_iter_nodes_with_custom_node_iter(self): object_ring = self.app.get_object_ring(None) node_list = [dict(id=n, ip='1.2.3.4', port=n, device='D') - for n in xrange(10)] + for n in range(10)] with nested( mock.patch.object(self.app, 'sort_nodes', lambda n: n), mock.patch.object(self.app, 'request_node_count', @@ -3945,7 +3946,7 @@ class TestObjectController(unittest.TestCase): node_error_count(controller.app, object_ring.devs[0]), 2) self.assert_(node_last_error(controller.app, object_ring.devs[0]) is not None) - for _junk in xrange(self.app.error_suppression_limit): + for _junk in range(self.app.error_suppression_limit): self.assert_status_map(controller.HEAD, (200, 200, 503, 503, 503), 503) self.assertEquals( @@ -3982,7 +3983,7 @@ class TestObjectController(unittest.TestCase): node_error_count(controller.app, object_ring.devs[0]), 2) self.assert_(node_last_error(controller.app, object_ring.devs[0]) is not None) - for _junk in xrange(self.app.error_suppression_limit): + for _junk in range(self.app.error_suppression_limit): self.assert_status_map(controller.HEAD, (200, 200, 503, 503, 503), 503) self.assertEquals( @@ -4226,7 +4227,7 @@ class TestObjectController(unittest.TestCase): set_http_connect(201, 201, 201) headers = {'Content-Length': '0'} - for x in xrange(constraints.MAX_META_COUNT): + for x in range(constraints.MAX_META_COUNT): headers['X-Object-Meta-%d' % x] = 'v' req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=headers) @@ -4235,7 +4236,7 @@ class TestObjectController(unittest.TestCase): self.assertEquals(resp.status_int, 201) set_http_connect(201, 201, 201) headers = {'Content-Length': '0'} - for x in xrange(constraints.MAX_META_COUNT + 1): + for x in range(constraints.MAX_META_COUNT + 1): headers['X-Object-Meta-%d' % x] = 'v' req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers=headers) @@ -5359,7 +5360,7 @@ class TestObjectController(unittest.TestCase): exp = 'HTTP/1.1 201' self.assertEquals(headers[:len(exp)], exp) # Create the object versions - for segment in xrange(1, versions_to_create): + for segment in range(1, versions_to_create): sleep(.01) # guarantee that the timestamp changes sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -5445,7 +5446,7 @@ class TestObjectController(unittest.TestCase): body = fd.read() self.assertEquals(body, '%05d' % segment) # Delete the object versions - for segment in xrange(versions_to_create - 1, 0, -1): + for segment in range(versions_to_create - 1, 0, -1): sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write('DELETE /v1/a/%s/%s HTTP/1.1\r\nHost: localhost\r' @@ -5514,7 +5515,7 @@ class TestObjectController(unittest.TestCase): self.assertEquals(headers[:len(exp)], exp) # make sure dlo manifest files don't get versioned - for _junk in xrange(1, versions_to_create): + for _junk in range(1, versions_to_create): sleep(.01) # guarantee that the timestamp changes sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -6071,7 +6072,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(headers[:len(exp)], exp) # Remember Request instance count, make sure the GC is run for # pythons without reference counting. - for i in xrange(4): + for i in range(4): sleep(0) # let eventlet do its thing gc.collect() else: @@ -6094,7 +6095,7 @@ class TestObjectController(unittest.TestCase): sock.close() # Make sure the GC is run again for pythons without reference # counting - for i in xrange(4): + for i in range(4): sleep(0) # let eventlet do its thing gc.collect() else: @@ -7593,7 +7594,7 @@ class TestContainerController(unittest.TestCase): self.assert_( node_last_error(controller.app, container_ring.devs[0]) is not None) - for _junk in xrange(self.app.error_suppression_limit): + for _junk in range(self.app.error_suppression_limit): self.assert_status_map(controller.HEAD, (200, 503, 503, 503), 503) self.assertEquals( @@ -7749,7 +7750,7 @@ class TestContainerController(unittest.TestCase): set_http_connect(201, 201, 201) headers = {} - for x in xrange(constraints.MAX_META_COUNT): + for x in range(constraints.MAX_META_COUNT): headers['X-Container-Meta-%d' % x] = 'v' req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}, headers=headers) @@ -7758,7 +7759,7 @@ class TestContainerController(unittest.TestCase): self.assertEquals(resp.status_int, 201) set_http_connect(201, 201, 201) headers = {} - for x in xrange(constraints.MAX_META_COUNT + 1): + for x in range(constraints.MAX_META_COUNT + 1): headers['X-Container-Meta-%d' % x] = 'v' req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}, headers=headers) @@ -8704,7 +8705,7 @@ class TestAccountController(unittest.TestCase): set_http_connect(201, 201, 201) headers = {} - for x in xrange(constraints.MAX_META_COUNT): + for x in range(constraints.MAX_META_COUNT): headers['X-Account-Meta-%d' % x] = 'v' req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}, headers=headers) @@ -8713,7 +8714,7 @@ class TestAccountController(unittest.TestCase): self.assertEquals(resp.status_int, 201) set_http_connect(201, 201, 201) headers = {} - for x in xrange(constraints.MAX_META_COUNT + 1): + for x in range(constraints.MAX_META_COUNT + 1): headers['X-Account-Meta-%d' % x] = 'v' req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': method}, headers=headers) From e6165a7879d796efd3992260ef23a7f95ceeab32 Mon Sep 17 00:00:00 2001 From: paul luse Date: Mon, 25 May 2015 14:41:42 -0700 Subject: [PATCH 69/98] Add policy support to dispersion tools Doesn't work for anything other than policy 0. updated to allow user to specify policy name on cmd line (as with object-info) which then makes populate/report work with 3x, 2x, or EC style policies Change-Id: Ib7c298f0f6d666b1ecca25315b88539f45cf9f95 Closes-Bug: 1458688 --- bin/swift-dispersion-populate | 32 ++++++++++++++++++++------- bin/swift-dispersion-report | 41 +++++++++++++++++++++++++---------- doc/source/admin_guide.rst | 6 +++++ 3 files changed, 60 insertions(+), 19 deletions(-) diff --git a/bin/swift-dispersion-populate b/bin/swift-dispersion-populate index 93ca6ee2cc..dd736aabd2 100755 --- a/bin/swift-dispersion-populate +++ b/bin/swift-dispersion-populate @@ -31,16 +31,17 @@ except ImportError: from swift.common.internal_client import SimpleClient from swift.common.ring import Ring from swift.common.utils import compute_eta, get_time_units, config_true_value +from swift.common.storage_policy import POLICIES insecure = False -def put_container(connpool, container, report): +def put_container(connpool, container, report, headers): global retries_done try: with connpool.item() as conn: - conn.put_container(container) + conn.put_container(container, headers=headers) retries_done += conn.attempts - 1 if report: report(True) @@ -105,6 +106,9 @@ Usage: %%prog [options] [conf_file] help='No overlap of partitions if running populate \ more than once. Will increase coverage by amount shown \ in dispersion.conf file') + parser.add_option('-P', '--policy-name', dest='policy_name', + help="Specify storage policy name") + options, args = parser.parse_args() if args: @@ -114,6 +118,15 @@ Usage: %%prog [options] [conf_file] if not c.read(conffile): exit('Unable to read config file: %s' % conffile) conf = dict(c.items('dispersion')) + + if options.policy_name is None: + policy = POLICIES.default + else: + policy = POLICIES.get_by_name(options.policy_name) + if policy is None: + exit('Unable to find policy: %s' % options.policy_name) + print 'Using storage policy: %s ' % policy.name + swift_dir = conf.get('swift_dir', '/etc/swift') dispersion_coverage = float(conf.get('dispersion_coverage', 1)) retries = int(conf.get('retries', 5)) @@ -141,6 +154,8 @@ Usage: %%prog [options] [conf_file] insecure=insecure) account = url.rsplit('/', 1)[1] connpool = Pool(max_size=concurrency) + headers = {} + headers['X-Storage-Policy'] = policy.name connpool.create = lambda: SimpleClient( url=url, token=token, retries=retries) @@ -152,7 +167,7 @@ Usage: %%prog [options] [conf_file] if options.no_overlap: with connpool.item() as conn: containers = [cont['name'] for cont in conn.get_account( - prefix='dispersion_', full_listing=True)[1]] + prefix='dispersion_%d' % policy.idx, full_listing=True)[1]] containers_listed = len(containers) if containers_listed > 0: for container in containers: @@ -170,11 +185,12 @@ Usage: %%prog [options] [conf_file] next_report += 2 suffix = 0 while need_to_queue >= 1 and parts_left: - container = 'dispersion_%d' % suffix + container = 'dispersion_%d_%d' % (policy.idx, suffix) part = container_ring.get_part(account, container) if part in parts_left: if suffix >= options.container_suffix_start: - coropool.spawn(put_container, connpool, container, report) + coropool.spawn(put_container, connpool, container, report, + headers) sleep() else: report(True) @@ -195,9 +211,9 @@ Usage: %%prog [options] [conf_file] stdout.flush() if object_populate: - container = 'dispersion_objects' - put_container(connpool, container, None) - object_ring = Ring(swift_dir, ring_name='object') + container = 'dispersion_objects_%d' % policy.idx + put_container(connpool, container, None, headers) + object_ring = Ring(swift_dir, ring_name=policy.ring_name) parts_left = dict((x, x) for x in xrange(object_ring.partition_count)) if options.no_overlap: diff --git a/bin/swift-dispersion-report b/bin/swift-dispersion-report index 34f239c876..2a1b0c1d48 100755 --- a/bin/swift-dispersion-report +++ b/bin/swift-dispersion-report @@ -36,6 +36,7 @@ from swift.common.internal_client import SimpleClient from swift.common.ring import Ring from swift.common.exceptions import ClientException from swift.common.utils import compute_eta, get_time_units, config_true_value +from swift.common.storage_policy import POLICIES unmounted = [] @@ -73,10 +74,10 @@ def get_error_log(prefix): def container_dispersion_report(coropool, connpool, account, container_ring, - retries, output_missing_partitions): + retries, output_missing_partitions, policy): with connpool.item() as conn: containers = [c['name'] for c in conn.get_account( - prefix='dispersion_', full_listing=True)[1]] + prefix='dispersion_%d' % policy.idx, full_listing=True)[1]] containers_listed = len(containers) if not containers_listed: print >>stderr, 'No containers to query. Has ' \ @@ -169,8 +170,8 @@ def container_dispersion_report(coropool, connpool, account, container_ring, def object_dispersion_report(coropool, connpool, account, object_ring, - retries, output_missing_partitions): - container = 'dispersion_objects' + retries, output_missing_partitions, policy): + container = 'dispersion_objects_%d' % policy.idx with connpool.item() as conn: try: objects = [o['name'] for o in conn.get_container( @@ -196,6 +197,11 @@ def object_dispersion_report(coropool, connpool, account, object_ring, begun = time() next_report = [time() + 2] + headers = None + if policy is not None: + headers = {} + headers['X-Backend-Storage-Policy-Index'] = int(policy) + def direct(obj, part, nodes): found_count = 0 for node in nodes: @@ -203,7 +209,8 @@ def object_dispersion_report(coropool, connpool, account, object_ring, try: attempts, _junk = direct_client.retry( direct_client.direct_head_object, node, part, account, - container, obj, error_log=error_log, retries=retries) + container, obj, error_log=error_log, retries=retries, + headers=headers) retries_done[0] += attempts - 1 found_count += 1 except ClientException as err: @@ -290,9 +297,9 @@ def missing_string(partition_count, missing_copies, copy_count): verb_string = 'were' partition_string = 'partitions' - copy_string = 'copy' - if missing_copies > 1: - copy_string = 'copies' + copy_string = 'copies' + if missing_copies == 1: + copy_string = 'copy' return '%sThere %s %d %s missing %s %s.' % ( exclamations, verb_string, partition_count, partition_string, @@ -323,6 +330,9 @@ Usage: %%prog [options] [conf_file] parser.add_option('--insecure', action='store_true', default=False, help='Allow accessing insecure keystone server. ' 'The keystone\'s certificate will not be verified.') + parser.add_option('-P', '--policy-name', dest='policy_name', + help="Specify storage policy name") + options, args = parser.parse_args() if args: @@ -332,6 +342,15 @@ Usage: %%prog [options] [conf_file] if not c.read(conffile): exit('Unable to read config file: %s' % conffile) conf = dict(c.items('dispersion')) + + if options.policy_name is None: + policy = POLICIES.default + else: + policy = POLICIES.get_by_name(options.policy_name) + if policy is None: + exit('Unable to find policy: %s' % options.policy_name) + print 'Using storage policy: %s ' % policy.name + swift_dir = conf.get('swift_dir', '/etc/swift') retries = int(conf.get('retries', 5)) concurrency = int(conf.get('concurrency', 25)) @@ -364,16 +383,16 @@ Usage: %%prog [options] [conf_file] url=url, token=token, retries=retries) container_ring = Ring(swift_dir, ring_name='container') - object_ring = Ring(swift_dir, ring_name='object') + object_ring = Ring(swift_dir, ring_name=policy.ring_name) output = {} if container_report: output['container'] = container_dispersion_report( coropool, connpool, account, container_ring, retries, - options.partitions) + options.partitions, policy) if object_report: output['object'] = object_dispersion_report( coropool, connpool, account, object_ring, retries, - options.partitions) + options.partitions, policy) if json_output: print json.dumps(output) diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index 50eb9bd5e6..f27c20741e 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -339,6 +339,12 @@ allows it to be more easily consumed by third party utilities:: $ swift-dispersion-report -j {"object": {"retries:": 0, "missing_two": 0, "copies_found": 7863, "missing_one": 0, "copies_expected": 7863, "pct_found": 100.0, "overlapping": 0, "missing_all": 0}, "container": {"retries:": 0, "missing_two": 0, "copies_found": 12534, "missing_one": 0, "copies_expected": 12534, "pct_found": 100.0, "overlapping": 15, "missing_all": 0}} +Note that you may select which storage policy to use by setting the option +'--policy-name silver' or '-P silver' (silver is the example policy name here). +If no policy is specified, the default will be used per the swift.conf file. +When you specify a policy the containers created also include the policy index, +thus even when running a container_only report, you will need to specify the +policy not using the default. ----------------------------------- Geographically Distributed Clusters From e70b66586e13718e7ac0078c2f2cd1135c61a1f1 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Wed, 24 Jun 2015 09:36:37 +0200 Subject: [PATCH 70/98] Replace dict.iteritems() with dict.items() The iteritems() of Python 2 dictionaries has been renamed to items() on Python 3. According to a discussion on the openstack-dev mailing list, the overhead of creating a temporary list using dict.items() on Python 2 is very low because most dictionaries are small: http://lists.openstack.org/pipermail/openstack-dev/2015-June/066391.html Patch generated by the following command: sed -i 's,iteritems,items,g' \ $(find swift -name "*.py") \ $(find test -name "*.py") Change-Id: I6070bb6c684be76e8e77222a7d280ec6edd43496 --- swift/account/server.py | 4 ++-- swift/account/utils.py | 2 +- swift/cli/info.py | 6 +++--- swift/cli/ringbuilder.py | 4 ++-- swift/common/bufferedhttp.py | 2 +- swift/common/constraints.py | 2 +- swift/common/db.py | 8 ++++---- swift/common/direct_client.py | 2 +- swift/common/internal_client.py | 4 ++-- swift/common/memcached.py | 2 +- swift/common/middleware/tempurl.py | 2 +- swift/common/ring/builder.py | 2 +- swift/common/swob.py | 4 ++-- swift/common/utils.py | 2 +- swift/common/wsgi.py | 6 +++--- swift/container/server.py | 8 ++++---- swift/container/sync.py | 2 +- swift/container/updater.py | 2 +- swift/obj/diskfile.py | 2 +- swift/obj/reconstructor.py | 2 +- swift/obj/replicator.py | 2 +- swift/obj/server.py | 12 ++++++------ swift/obj/ssync_sender.py | 2 +- swift/proxy/controllers/base.py | 6 +++--- test/functional/swift_test_client.py | 2 +- test/probe/common.py | 6 +++--- test/unit/common/middleware/test_bulk.py | 4 ++-- test/unit/common/middleware/test_proxy_logging.py | 6 +++--- test/unit/common/middleware/test_tempurl.py | 10 +++++----- test/unit/common/ring/test_builder.py | 8 ++++---- test/unit/container/test_server.py | 2 +- test/unit/obj/test_server.py | 4 ++-- test/unit/obj/test_ssync_sender.py | 8 ++++---- test/unit/obj/test_updater.py | 2 +- test/unit/proxy/controllers/test_base.py | 6 +++--- test/unit/proxy/test_server.py | 4 ++-- test/unit/proxy/test_sysmeta.py | 4 ++-- test/unit/test_locale/test_locale.py | 2 +- 38 files changed, 79 insertions(+), 79 deletions(-) diff --git a/swift/account/server.py b/swift/account/server.py index 10093dc8b9..c48b191795 100644 --- a/swift/account/server.py +++ b/swift/account/server.py @@ -153,7 +153,7 @@ class AccountController(BaseStorageServer): return HTTPConflict(request=req) metadata = {} metadata.update((key, (value, timestamp.internal)) - for key, value in req.headers.iteritems() + for key, value in req.headers.items() if is_sys_or_user_meta('account', key)) if metadata: broker.update_metadata(metadata, validate_metadata=True) @@ -246,7 +246,7 @@ class AccountController(BaseStorageServer): return self._deleted_response(broker, req, HTTPNotFound) metadata = {} metadata.update((key, (value, req_timestamp.internal)) - for key, value in req.headers.iteritems() + for key, value in req.headers.items() if is_sys_or_user_meta('account', key)) if metadata: broker.update_metadata(metadata, validate_metadata=True) diff --git a/swift/account/utils.py b/swift/account/utils.py index 6cc8700961..d9b2b77396 100644 --- a/swift/account/utils.py +++ b/swift/account/utils.py @@ -64,7 +64,7 @@ def get_response_headers(broker): resp_headers[header_name] = value resp_headers.update((key, value) for key, (value, timestamp) in - broker.metadata.iteritems() if value != '') + broker.metadata.items() if value != '') return resp_headers diff --git a/swift/cli/info.py b/swift/cli/info.py index ab6d32e490..cc23b58650 100644 --- a/swift/cli/info.py +++ b/swift/cli/info.py @@ -212,13 +212,13 @@ def print_db_info_metadata(db_type, info, metadata): raise ValueError('Info is incomplete: %s' % e) meta_prefix = 'x_' + db_type + '_' - for key, value in info.iteritems(): + for key, value in info.items(): if key.lower().startswith(meta_prefix): title = key.replace('_', '-').title() print ' %s: %s' % (title, value) user_metadata = {} sys_metadata = {} - for key, (value, timestamp) in metadata.iteritems(): + for key, (value, timestamp) in metadata.items(): if is_user_meta(db_type, key): user_metadata[strip_user_meta_prefix(db_type, key)] = value elif is_sys_meta(db_type, key): @@ -284,7 +284,7 @@ def print_obj_metadata(metadata): else: print 'Timestamp: Not found in metadata' - for key, value in metadata.iteritems(): + for key, value in metadata.items(): if is_user_meta('Object', key): user_metadata[key] = value elif is_sys_meta('Object', key): diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index f5c8c14792..a886ff28ba 100755 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -97,7 +97,7 @@ def _find_parts(devs): # Sort by number of found replicas to keep the output format sorted_partition_count = sorted( - partition_count.iteritems(), key=itemgetter(1), reverse=True) + partition_count.items(), key=itemgetter(1), reverse=True) return sorted_partition_count @@ -1189,7 +1189,7 @@ def main(arguments=None): globals() print Commands.default.__doc__.strip() print - cmds = [c for c, f in Commands.__dict__.iteritems() + cmds = [c for c, f in Commands.__dict__.items() if f.__doc__ and c[0] != '_' and c != 'default'] cmds.sort() for cmd in cmds: diff --git a/swift/common/bufferedhttp.py b/swift/common/bufferedhttp.py index c7acccc27c..d5a7a922d4 100644 --- a/swift/common/bufferedhttp.py +++ b/swift/common/bufferedhttp.py @@ -235,7 +235,7 @@ def http_connect_raw(ipaddr, port, method, path, headers=None, conn.path = path conn.putrequest(method, path, skip_host=(headers and 'Host' in headers)) if headers: - for header, value in headers.iteritems(): + for header, value in headers.items(): conn.putheader(header, str(value)) conn.endheaders() return conn diff --git a/swift/common/constraints.py b/swift/common/constraints.py index 4cee56ab3c..f5fe855288 100644 --- a/swift/common/constraints.py +++ b/swift/common/constraints.py @@ -120,7 +120,7 @@ def check_metadata(req, target_type): prefix = 'x-%s-meta-' % target_type.lower() meta_count = 0 meta_size = 0 - for key, value in req.headers.iteritems(): + for key, value in req.headers.items(): if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE: return HTTPBadRequest(body='Header value too long: %s' % key[:MAX_META_NAME_LENGTH], diff --git a/swift/common/db.py b/swift/common/db.py index 80a855ac1c..308a1bb83e 100644 --- a/swift/common/db.py +++ b/swift/common/db.py @@ -734,7 +734,7 @@ class DatabaseBroker(object): """ meta_count = 0 meta_size = 0 - for key, (value, timestamp) in metadata.iteritems(): + for key, (value, timestamp) in metadata.items(): key = key.lower() if value != '' and (key.startswith('x-account-meta') or key.startswith('x-container-meta')): @@ -762,7 +762,7 @@ class DatabaseBroker(object): """ old_metadata = self.metadata if set(metadata_updates).issubset(set(old_metadata)): - for key, (value, timestamp) in metadata_updates.iteritems(): + for key, (value, timestamp) in metadata_updates.items(): if timestamp > old_metadata[key][1]: break else: @@ -780,7 +780,7 @@ class DatabaseBroker(object): ALTER TABLE %s_stat ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type) md = {} - for key, value_timestamp in metadata_updates.iteritems(): + for key, value_timestamp in metadata_updates.items(): value, timestamp = value_timestamp if key not in md or timestamp > md[key][1]: md[key] = value_timestamp @@ -844,7 +844,7 @@ class DatabaseBroker(object): if md: md = json.loads(md) keys_to_delete = [] - for key, (value, value_timestamp) in md.iteritems(): + for key, (value, value_timestamp) in md.items(): if value == '' and value_timestamp < timestamp: keys_to_delete.append(key) if keys_to_delete: diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py index 3933c93b17..9d36757649 100644 --- a/swift/common/direct_client.py +++ b/swift/common/direct_client.py @@ -390,7 +390,7 @@ def direct_put_object(node, part, account, container, name, contents, if content_length is not None: headers['Content-Length'] = str(content_length) else: - for n, v in headers.iteritems(): + for n, v in headers.items(): if n.lower() == 'content-length': content_length = int(v) if content_type is not None: diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py index cc1f022e1e..5d44b22cd1 100644 --- a/swift/common/internal_client.py +++ b/swift/common/internal_client.py @@ -223,7 +223,7 @@ class InternalClient(object): resp = self.make_request('HEAD', path, headers, acceptable_statuses) metadata_prefix = metadata_prefix.lower() metadata = {} - for k, v in resp.headers.iteritems(): + for k, v in resp.headers.items(): if k.lower().startswith(metadata_prefix): metadata[k[len(metadata_prefix):].lower()] = v return metadata @@ -307,7 +307,7 @@ class InternalClient(object): """ headers = {} - for k, v in metadata.iteritems(): + for k, v in metadata.items(): if k.lower().startswith(metadata_prefix): headers[k] = v else: diff --git a/swift/common/memcached.py b/swift/common/memcached.py index 2e1ed4c08d..ae10bf7330 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -426,7 +426,7 @@ class MemcacheRing(object): server_key = md5hash(server_key) timeout = sanitize_timeout(time or timeout) msg = '' - for key, value in mapping.iteritems(): + for key, value in mapping.items(): key = md5hash(key) flags = 0 if serialize and self._allow_pickle: diff --git a/swift/common/middleware/tempurl.py b/swift/common/middleware/tempurl.py index 3dd1448583..cf3afe3de4 100644 --- a/swift/common/middleware/tempurl.py +++ b/swift/common/middleware/tempurl.py @@ -161,7 +161,7 @@ def get_tempurl_keys_from_metadata(meta): meta = get_account_info(...)['meta'] keys = get_tempurl_keys_from_metadata(meta) """ - return [get_valid_utf8_str(value) for key, value in meta.iteritems() + return [get_valid_utf8_str(value) for key, value in meta.items() if key.lower() in ('temp-url-key', 'temp-url-key-2')] diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index e51ef71c9f..f274ccd9b9 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -941,7 +941,7 @@ class RingBuilder(object): reassign_parts.update(spread_out_parts) reassign_parts.update(removed_dev_parts) - reassign_parts_list = list(reassign_parts.iteritems()) + reassign_parts_list = list(reassign_parts.items()) # We shuffle the partitions to reassign so we get a more even # distribution later. There has been discussion of trying to distribute # partitions more "regularly" because that would actually reduce risk diff --git a/swift/common/swob.py b/swift/common/swob.py index b35be6849f..36f871415e 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -875,7 +875,7 @@ class Request(object): elif 'wsgi.input' not in env: env['wsgi.input'] = WsgiStringIO('') req = Request(env) - for key, val in headers.iteritems(): + for key, val in headers.items(): req.headers[key] = val for key, val in kwargs.items(): prop = getattr(Request, key, None) @@ -1141,7 +1141,7 @@ class Response(object): self.headers.update(headers) if self.status_int == 401 and 'www-authenticate' not in self.headers: self.headers.update({'www-authenticate': self.www_authenticate()}) - for key, value in kw.iteritems(): + for key, value in kw.items(): setattr(self, key, value) # When specifying both 'content_type' and 'charset' in the kwargs, # charset needs to be applied *after* content_type, otherwise charset diff --git a/swift/common/utils.py b/swift/common/utils.py index e2e94b2eed..f55de656ab 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -225,7 +225,7 @@ def register_swift_info(name='swift', admin=False, **kwargs): if "." in name: raise ValueError('Cannot use "." in a swift_info key: %s' % name) dict_to_use[name] = {} - for key, val in kwargs.iteritems(): + for key, val in kwargs.items(): if "." in key: raise ValueError('Cannot use "." in a swift_info key: %s' % key) dict_to_use[name][key] = val diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index d7a6102d62..6e89d4fc3d 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -580,12 +580,12 @@ class PortPidState(object): :returns: The port the socket is bound to. """ - for port, sock_data in self.sock_data_by_port.iteritems(): + for port, sock_data in self.sock_data_by_port.items(): if sock_data['sock'] == sock: return port def _pid_to_port_and_index(self, pid): - for port, sock_data in self.sock_data_by_port.iteritems(): + for port, sock_data in self.sock_data_by_port.items(): for server_idx, a_pid in enumerate(sock_data['pids']): if pid == a_pid: return port, server_idx @@ -597,7 +597,7 @@ class PortPidState(object): """ current_port_index_pairs = set() - for port, pid_state in self.sock_data_by_port.iteritems(): + for port, pid_state in self.sock_data_by_port.items(): current_port_index_pairs |= set( (port, i) for i, pid in enumerate(pid_state['pids']) diff --git a/swift/container/server.py b/swift/container/server.py index 3cc960b2a8..12e078f7ce 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -367,7 +367,7 @@ class ContainerController(BaseStorageServer): metadata = {} metadata.update( (key, (value, req_timestamp.internal)) - for key, value in req.headers.iteritems() + for key, value in req.headers.items() if key.lower() in self.save_headers or is_sys_or_user_meta('container', key)) if 'X-Container-Sync-To' in metadata: @@ -406,7 +406,7 @@ class ContainerController(BaseStorageServer): return HTTPNotFound(request=req, headers=headers) headers.update( (key, value) - for key, (value, timestamp) in broker.metadata.iteritems() + for key, (value, timestamp) in broker.metadata.items() if value != '' and (key.lower() in self.save_headers or is_sys_or_user_meta('container', key))) headers['Content-Type'] = out_content_type @@ -473,7 +473,7 @@ class ContainerController(BaseStorageServer): def create_listing(self, req, out_content_type, info, resp_headers, metadata, container_list, container): - for key, (value, timestamp) in metadata.iteritems(): + for key, (value, timestamp) in metadata.items(): if value and (key.lower() in self.save_headers or is_sys_or_user_meta('container', key)): resp_headers[key] = value @@ -547,7 +547,7 @@ class ContainerController(BaseStorageServer): metadata = {} metadata.update( (key, (value, req_timestamp.internal)) - for key, value in req.headers.iteritems() + for key, value in req.headers.items() if key.lower() in self.save_headers or is_sys_or_user_meta('container', key)) if metadata: diff --git a/swift/container/sync.py b/swift/container/sync.py index c6161883c4..089c9a7481 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -322,7 +322,7 @@ class ContainerSync(Daemon): user_key = None sync_point1 = info['x_container_sync_point1'] sync_point2 = info['x_container_sync_point2'] - for key, (value, timestamp) in broker.metadata.iteritems(): + for key, (value, timestamp) in broker.metadata.items(): if key.lower() == 'x-container-sync-to': sync_to = value elif key.lower() == 'x-container-sync-key': diff --git a/swift/container/updater.py b/swift/container/updater.py index b3116aa412..8e8aa42cc0 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -121,7 +121,7 @@ class ContainerUpdater(Daemon): begin = time.time() now = time.time() expired_suppressions = \ - [a for a, u in self.account_suppressions.iteritems() + [a for a, u in self.account_suppressions.items() if u < now] for account in expired_suppressions: del self.account_suppressions[account] diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 252df42a51..0a0e4e8df2 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -1620,7 +1620,7 @@ class DiskFile(object): if meta_file: self._metadata = self._failsafe_read_metadata(meta_file, meta_file) sys_metadata = dict( - [(key, val) for key, val in datafile_metadata.iteritems() + [(key, val) for key, val in datafile_metadata.items() if key.lower() in DATAFILE_SYSTEM_META or is_sys_meta('object', key)]) self._metadata.update(sys_metadata) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 8f84b06c7a..7cd96c2615 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -419,7 +419,7 @@ class ObjectReconstructor(Daemon): :returns: a list of strings, the suffix dirs to sync """ suffixes = [] - for suffix, sub_dict_local in local_suff.iteritems(): + for suffix, sub_dict_local in local_suff.items(): sub_dict_remote = remote_suff.get(suffix, {}) if (sub_dict_local.get(None) != sub_dict_remote.get(None) or sub_dict_local.get(local_index) != diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index de2ec8d85f..5156ad7d8b 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -273,7 +273,7 @@ class ObjectReplicator(Daemon): synced_remote_regions[node['region']] = \ candidates.keys() responses.append(success) - for region, cand_objs in synced_remote_regions.iteritems(): + for region, cand_objs in synced_remote_regions.items(): if delete_objs is None: delete_objs = cand_objs else: diff --git a/swift/obj/server.py b/swift/obj/server.py index 97424cf787..85c85544e4 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -411,7 +411,7 @@ class ObjectController(BaseStorageServer): raise HTTPBadRequest("invalid JSON for footer doc") def _check_container_override(self, update_headers, metadata): - for key, val in metadata.iteritems(): + for key, val in metadata.items(): override_prefix = 'x-backend-container-update-override-' if key.lower().startswith(override_prefix): override = key.lower().replace(override_prefix, 'x-') @@ -446,7 +446,7 @@ class ObjectController(BaseStorageServer): request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) metadata = {'X-Timestamp': req_timestamp.internal} - metadata.update(val for val in request.headers.iteritems() + metadata.update(val for val in request.headers.items() if is_user_meta('object', val[0])) for header_key in self.allowed_headers: if header_key in request.headers: @@ -614,9 +614,9 @@ class ObjectController(BaseStorageServer): 'ETag': etag, 'Content-Length': str(upload_size), } - metadata.update(val for val in request.headers.iteritems() + metadata.update(val for val in request.headers.items() if is_sys_or_user_meta('object', val[0])) - metadata.update(val for val in footer_meta.iteritems() + metadata.update(val for val in footer_meta.items() if is_sys_or_user_meta('object', val[0])) headers_to_copy = ( request.headers.get( @@ -712,7 +712,7 @@ class ObjectController(BaseStorageServer): conditional_etag=conditional_etag) response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream') - for key, value in metadata.iteritems(): + for key, value in metadata.items(): if is_sys_or_user_meta('object', key) or \ key.lower() in self.allowed_headers: response.headers[key] = value @@ -767,7 +767,7 @@ class ObjectController(BaseStorageServer): conditional_etag=conditional_etag) response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream') - for key, value in metadata.iteritems(): + for key, value in metadata.items(): if is_sys_or_user_meta('object', key) or \ key.lower() in self.allowed_headers: response.headers[key] = value diff --git a/swift/obj/ssync_sender.py b/swift/obj/ssync_sender.py index 50662da84c..d39dbc2668 100644 --- a/swift/obj/ssync_sender.py +++ b/swift/obj/ssync_sender.py @@ -332,7 +332,7 @@ class Sender(object): """ msg = ['PUT ' + url_path, 'Content-Length: ' + str(df.content_length)] # Sorted to make it easier to test. - for key, value in sorted(df.get_metadata().iteritems()): + for key, value in sorted(df.get_metadata().items()): if key not in ('name', 'Content-Length'): msg.append('%s: %s' % (key, value)) msg = '\r\n'.join(msg) + '\r\n\r\n' diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 953a85af58..bb3e4898c4 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -122,7 +122,7 @@ def _prep_headers_to_info(headers, server_type): meta = {} sysmeta = {} other = {} - for key, val in dict(headers).iteritems(): + for key, val in dict(headers).items(): lkey = key.lower() if is_user_meta(server_type, lkey): meta[strip_user_meta_prefix(server_type, lkey)] = val @@ -1166,7 +1166,7 @@ class Controller(object): k.lower() in self._x_remove_headers()) dst_headers.update((k.lower(), v) - for k, v in src_headers.iteritems() + for k, v in src_headers.items() if k.lower() in self.pass_through_headers or is_sys_or_user_meta(st, k)) @@ -1488,7 +1488,7 @@ class Controller(object): # transfer any x-account-sysmeta headers from original request # to the autocreate PUT headers.update((k, v) - for k, v in req.headers.iteritems() + for k, v in req.headers.items() if is_sys_meta('account', k)) resp = self.make_requests(Request.blank('/v1' + path), self.app.account_ring, partition, 'PUT', diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 695ea202d7..2ac95df0d6 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -334,7 +334,7 @@ class Connection(object): port=self.storage_port) #self.connection.set_debuglevel(3) self.connection.putrequest('PUT', path) - for key, value in headers.iteritems(): + for key, value in headers.items(): self.connection.putheader(key, value) self.connection.endheaders() diff --git a/test/probe/common.py b/test/probe/common.py index 467598caec..093194aed9 100644 --- a/test/probe/common.py +++ b/test/probe/common.py @@ -130,13 +130,13 @@ def kill_server(ipport, ipport2server, pids): def kill_nonprimary_server(primary_nodes, ipport2server, pids): primary_ipports = [(n['ip'], n['port']) for n in primary_nodes] - for ipport, server in ipport2server.iteritems(): + for ipport, server in ipport2server.items(): if ipport in primary_ipports: server_type = server[:-1] break else: raise Exception('Cannot figure out server type for %r' % primary_nodes) - for ipport, server in list(ipport2server.iteritems()): + for ipport, server in list(ipport2server.items()): if server[:-1] == server_type and ipport not in primary_ipports: kill_server(ipport, ipport2server, pids) return ipport @@ -182,7 +182,7 @@ def get_ring(ring_name, required_replicas, required_devices, repl_name = '%s-replicator' % server repl_configs = {i: readconf(c, section_name=repl_name) - for i, c in config_paths[repl_name].iteritems()} + for i, c in config_paths[repl_name].items()} servers_per_port = any(int(c.get('servers_per_port', '0')) for c in repl_configs.values()) diff --git a/test/unit/common/middleware/test_bulk.py b/test/unit/common/middleware/test_bulk.py index 2bd0b78158..4cdc754ea2 100644 --- a/test/unit/common/middleware/test_bulk.py +++ b/test/unit/common/middleware/test_bulk.py @@ -98,7 +98,7 @@ def build_dir_tree(start_path, tree_obj): for obj in tree_obj: build_dir_tree(start_path, obj) if isinstance(tree_obj, dict): - for dir_name, obj in tree_obj.iteritems(): + for dir_name, obj in tree_obj.items(): dir_path = os.path.join(start_path, dir_name) os.mkdir(dir_path) build_dir_tree(dir_path, obj) @@ -115,7 +115,7 @@ def build_tar_tree(tar, start_path, tree_obj, base_path=''): for obj in tree_obj: build_tar_tree(tar, start_path, obj, base_path=base_path) if isinstance(tree_obj, dict): - for dir_name, obj in tree_obj.iteritems(): + for dir_name, obj in tree_obj.items(): dir_path = os.path.join(start_path, dir_name) tar_info = tarfile.TarInfo(dir_path[len(base_path):]) tar_info.type = tarfile.DIRTYPE diff --git a/test/unit/common/middleware/test_proxy_logging.py b/test/unit/common/middleware/test_proxy_logging.py index f4a5049a49..3b2f973a96 100644 --- a/test/unit/common/middleware/test_proxy_logging.py +++ b/test/unit/common/middleware/test_proxy_logging.py @@ -187,7 +187,7 @@ class TestProxyLogging(unittest.TestCase): '/v1/a/c/o/p/p2': 'object', } with mock.patch("time.time", stub_time): - for path, exp_type in path_types.iteritems(): + for path, exp_type in path_types.items(): # GET app = proxy_logging.ProxyLoggingMiddleware( FakeApp(body='7654321', response_str='321 Fubar'), {}) @@ -257,7 +257,7 @@ class TestProxyLogging(unittest.TestCase): 'DELETE': 'DELETE', 'OPTIONS': 'OPTIONS', } - for method, exp_method in method_map.iteritems(): + for method, exp_method in method_map.items(): app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {}) app.access_logger = FakeLogger() req = Request.blank('/v1/a/', environ={'REQUEST_METHOD': method}) @@ -281,7 +281,7 @@ class TestProxyLogging(unittest.TestCase): # this conf var supports optional leading access_ for conf_key in ['access_log_statsd_valid_http_methods', 'log_statsd_valid_http_methods']: - for method, exp_method in method_map.iteritems(): + for method, exp_method in method_map.items(): app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), { conf_key: 'SPECIAL, GET,PUT ', # crazy spaces ok }) diff --git a/test/unit/common/middleware/test_tempurl.py b/test/unit/common/middleware/test_tempurl.py index 4b235022bc..3f4dca2139 100644 --- a/test/unit/common/middleware/test_tempurl.py +++ b/test/unit/common/middleware/test_tempurl.py @@ -994,7 +994,7 @@ class TestTempURL(unittest.TestCase): hdrs = HeaderKeyDict(tempurl.TempURL( None, {'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah} - )._clean_outgoing_headers(hdrs.iteritems())) + )._clean_outgoing_headers(hdrs.items())) self.assertTrue('test-header' in hdrs) orh = 'test-header' @@ -1003,7 +1003,7 @@ class TestTempURL(unittest.TestCase): hdrs = HeaderKeyDict(tempurl.TempURL( None, {'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah} - )._clean_outgoing_headers(hdrs.iteritems())) + )._clean_outgoing_headers(hdrs.items())) self.assertTrue('test-header' not in hdrs) orh = 'test-header-*' @@ -1013,7 +1013,7 @@ class TestTempURL(unittest.TestCase): hdrs = HeaderKeyDict(tempurl.TempURL( None, {'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah} - )._clean_outgoing_headers(hdrs.iteritems())) + )._clean_outgoing_headers(hdrs.items())) self.assertTrue('test-header-one' not in hdrs) self.assertTrue('test-header-two' not in hdrs) @@ -1024,7 +1024,7 @@ class TestTempURL(unittest.TestCase): hdrs = HeaderKeyDict(tempurl.TempURL( None, {'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah} - )._clean_outgoing_headers(hdrs.iteritems())) + )._clean_outgoing_headers(hdrs.items())) self.assertTrue('test-header-one' not in hdrs) self.assertTrue('test-header-two' in hdrs) @@ -1038,7 +1038,7 @@ class TestTempURL(unittest.TestCase): hdrs = HeaderKeyDict(tempurl.TempURL( None, {'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah} - )._clean_outgoing_headers(hdrs.iteritems())) + )._clean_outgoing_headers(hdrs.items())) self.assertTrue('test-header-one' not in hdrs) self.assertTrue('test-header-two' in hdrs) self.assertTrue('test-other-header' not in hdrs) diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index f1840b8210..9d8e4f140a 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -425,7 +425,7 @@ class TestRingBuilder(unittest.TestCase): raise AssertionError( "Partition %d not evenly distributed (got %r)" % (part, counts['zone'])) - for dev_id, replica_count in counts['dev_id'].iteritems(): + for dev_id, replica_count in counts['dev_id'].items(): if replica_count > 1: raise AssertionError( "Partition %d is on device %d more than once (%r)" % @@ -462,7 +462,7 @@ class TestRingBuilder(unittest.TestCase): raise AssertionError( "Partition %d not evenly distributed (got %r)" % (part, counts['zone'])) - for dev_id, replica_count in counts['dev_id'].iteritems(): + for dev_id, replica_count in counts['dev_id'].items(): if replica_count != 1: raise AssertionError( "Partition %d is on device %d %d times, not 1 (%r)" % @@ -497,12 +497,12 @@ class TestRingBuilder(unittest.TestCase): counts['dev_id'][dev['id']] += 1 self.assertEquals(8, sum(counts['zone'].values())) - for zone, replica_count in counts['zone'].iteritems(): + for zone, replica_count in counts['zone'].items(): if replica_count not in (2, 3): raise AssertionError( "Partition %d not evenly distributed (got %r)" % (part, counts['zone'])) - for dev_id, replica_count in counts['dev_id'].iteritems(): + for dev_id, replica_count in counts['dev_id'].items(): if replica_count not in (1, 2): raise AssertionError( "Partition %d is on device %d %d times, " diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index e76e9b3625..c62ccc6464 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -2382,7 +2382,7 @@ class TestContainerController(unittest.TestCase): 'headers': headers, 'query_string': query_string} http_connect_args.append( - dict((k, v) for k, v in captured_args.iteritems() + dict((k, v) for k, v in captured_args.items() if v is not None)) req = Request.blank( diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index d0a6865a4c..45b434271c 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -2998,7 +2998,7 @@ class TestObjectController(unittest.TestCase): 'headers': headers, 'query_string': query_string} http_connect_args.append( - dict((k, v) for k, v in captured_args.iteritems() + dict((k, v) for k, v in captured_args.items() if v is not None)) return SuccessfulFakeConn() @@ -3116,7 +3116,7 @@ class TestObjectController(unittest.TestCase): 'headers': headers, 'query_string': query_string} http_connect_args.append( - dict((k, v) for k, v in captured_args.iteritems() + dict((k, v) for k, v in captured_args.items() if v is not None)) return SuccessfulFakeConn() diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py index b6f75ba659..ebfeaf9def 100644 --- a/test/unit/obj/test_ssync_sender.py +++ b/test/unit/obj/test_ssync_sender.py @@ -1371,7 +1371,7 @@ class TestBaseSsync(BaseTestSender): # sanity check, they are not the same ondisk files! self.assertNotEqual(tx_df._datadir, rx_df._datadir) rx_metadata = dict(rx_df.get_metadata()) - for k, v in tx_df.get_metadata().iteritems(): + for k, v in tx_df.get_metadata().items(): if k == 'X-Object-Sysmeta-Ec-Frag-Index': # if tx_df had a frag_index then rx_df should also have one self.assertTrue(k in rx_metadata) @@ -1481,7 +1481,7 @@ class TestBaseSsync(BaseTestSender): have been used as a source for sync'ing :param rx_frag_index: the fragment index of expected rx diskfiles """ - for o_name, diskfiles in tx_objs.iteritems(): + for o_name, diskfiles in tx_objs.items(): for tx_df in diskfiles: if tx_frag_index is None or tx_df._frag_index == tx_frag_index: # this diskfile should have been sync'd, @@ -1503,7 +1503,7 @@ class TestBaseSsync(BaseTestSender): def _verify_tombstones(self, tx_objs, policy): # verify tx and rx tombstones that should be in sync - for o_name, diskfiles in tx_objs.iteritems(): + for o_name, diskfiles in tx_objs.items(): for tx_df_ in diskfiles: try: self._open_tx_diskfile(o_name, policy) @@ -1703,7 +1703,7 @@ class TestSsyncEC(TestBaseSsync): failed_path = reconstruct_fa_calls[1][3]['name'] expect_sync_paths.remove(failed_path) failed_obj = None - for obj, diskfiles in tx_objs.iteritems(): + for obj, diskfiles in tx_objs.items(): if diskfiles[0]._name == failed_path: failed_obj = obj # sanity check diff --git a/test/unit/obj/test_updater.py b/test/unit/obj/test_updater.py index 5f4a407b40..22482dd77b 100644 --- a/test/unit/obj/test_updater.py +++ b/test/unit/obj/test_updater.py @@ -157,7 +157,7 @@ class TestObjectUpdater(unittest.TestCase): } expected = set() - for o, timestamps in objects.iteritems(): + for o, timestamps in objects.items(): ohash = hash_path('account', 'container', o) for t in timestamps: o_path = os.path.join(prefix_dir, ohash + '-' + diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 30c213e0b4..d07a6496ed 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -633,7 +633,7 @@ class TestFuncs(unittest.TestCase): expected_headers = {'x-base-meta-owner': '', 'x-base-meta-size': '151M', 'connection': 'close'} - for k, v in expected_headers.iteritems(): + for k, v in expected_headers.items(): self.assertTrue(k in dst_headers) self.assertEqual(v, dst_headers[k]) self.assertFalse('new-owner' in dst_headers) @@ -647,10 +647,10 @@ class TestFuncs(unittest.TestCase): hdrs.update(bad_hdrs) req = Request.blank('/v1/a/c/o', headers=hdrs) dst_headers = base.generate_request_headers(req, transfer=True) - for k, v in good_hdrs.iteritems(): + for k, v in good_hdrs.items(): self.assertTrue(k.lower() in dst_headers) self.assertEqual(v, dst_headers[k.lower()]) - for k, v in bad_hdrs.iteritems(): + for k, v in bad_hdrs.items(): self.assertFalse(k.lower() in dst_headers) def test_client_chunk_size(self): diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 46370590eb..78d3a9d128 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -7682,7 +7682,7 @@ class TestContainerController(unittest.TestCase): find_header = \ find_header.lower().replace('-remove', '', 1) find_value = '' - for k, v in headers.iteritems(): + for k, v in headers.items(): if k.lower() == find_header.lower() and \ v == find_value: break @@ -8636,7 +8636,7 @@ class TestAccountController(unittest.TestCase): find_header = \ find_header.lower().replace('-remove', '', 1) find_value = '' - for k, v in headers.iteritems(): + for k, v in headers.items(): if k.lower() == find_header.lower() and \ v == find_value: break diff --git a/test/unit/proxy/test_sysmeta.py b/test/unit/proxy/test_sysmeta.py index cc86960987..98e9a76e9c 100644 --- a/test/unit/proxy/test_sysmeta.py +++ b/test/unit/proxy/test_sysmeta.py @@ -113,7 +113,7 @@ class TestObjectSysmeta(unittest.TestCase): % (expected, resp.status)) def _assertInHeaders(self, resp, expected): - for key, val in expected.iteritems(): + for key, val in expected.items(): self.assertTrue(key in resp.headers, 'Header %s missing from %s' % (key, resp.headers)) self.assertEqual(val, resp.headers[key], @@ -121,7 +121,7 @@ class TestObjectSysmeta(unittest.TestCase): % (key, val, key, resp.headers[key])) def _assertNotInHeaders(self, resp, unexpected): - for key, val in unexpected.iteritems(): + for key, val in unexpected.items(): self.assertFalse(key in resp.headers, 'Header %s not expected in %s' % (key, resp.headers)) diff --git a/test/unit/test_locale/test_locale.py b/test/unit/test_locale/test_locale.py index 177248317a..a0804ed0eb 100644 --- a/test/unit/test_locale/test_locale.py +++ b/test/unit/test_locale/test_locale.py @@ -57,7 +57,7 @@ class TestTranslations(unittest.TestCase): threading._DummyThread._Thread__stop = lambda x: 42 def tearDown(self): - for var, val in self.orig_env.iteritems(): + for var, val in self.orig_env.items(): if val is not None: os.environ[var] = val else: From 514daea6317edc02587f7abfaae8770e1ef9cc4b Mon Sep 17 00:00:00 2001 From: janonymous Date: Fri, 12 Jun 2015 23:40:51 +0530 Subject: [PATCH 71/98] Fixed Formatting Error in Swift -Form Post middleware section. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed Formatting error in ``action=”SWIFT_URL``” => action=”``SWIFT_URL``” Change-Id: Id461129ef7f5b6412f94d36920c942a4181c0eb7 Closes-Bug: #1464740 --- doc/source/api/form_post_middleware.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/api/form_post_middleware.rst b/doc/source/api/form_post_middleware.rst index 24af0b6be4..3a0e04505b 100644 --- a/doc/source/api/form_post_middleware.rst +++ b/doc/source/api/form_post_middleware.rst @@ -51,7 +51,7 @@ The format of the form **POST** request is: ]]> -**``action="SWIFT_URL``"** +**action="SWIFT_URL"** Set to full URL where the objects are to be uploaded. The names of uploaded files are appended to the specified *``SWIFT_URL``*. So, you From c2c1366b0794bcdbbbaef7fedaef2cab19cd8db8 Mon Sep 17 00:00:00 2001 From: janonymous Date: Wed, 24 Jun 2015 21:07:16 +0530 Subject: [PATCH 72/98] Fixed Errors in Swift-Form Post middleware section rst. Inline markup cannot be nested in RST. Fixing markups by using a single option for the whole line and making some minor error corrections. Change-Id: I42bfd7dfe5c93a69436ecc5d154f2e61ca83fa82 --- doc/source/api/form_post_middleware.rst | 34 ++++++++++++------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/doc/source/api/form_post_middleware.rst b/doc/source/api/form_post_middleware.rst index 3a0e04505b..7e72ead68e 100644 --- a/doc/source/api/form_post_middleware.rst +++ b/doc/source/api/form_post_middleware.rst @@ -5,7 +5,7 @@ Form POST middleware ==================== To discover whether your Object Storage system supports this feature, -check with your service provider or send a **GET** request using the ``/info`` +check with your service provider or send a **GET** request using the :file:`/info` path. You can upload objects directly to the Object Storage system from a @@ -35,7 +35,7 @@ The format of the form **POST** request is: .. code:: - <![CDATA[ + @@ -48,13 +48,13 @@ The format of the form **POST** request is:
- ]]> + ]]> **action="SWIFT_URL"** Set to full URL where the objects are to be uploaded. The names of -uploaded files are appended to the specified *``SWIFT_URL``*. So, you +uploaded files are appended to the specified *SWIFT_URL*. So, you can upload directly to the root of a container with a URL like: .. code:: @@ -79,39 +79,39 @@ Must be ``POST``. Must be ``multipart/form-data``. -**name="redirect" value="*``REDIRECT_URL``*\ "** +**name="redirect" value="REDIRECT_URL"** -Redirects the browser to the *``REDIRECT_URL``* after the upload +Redirects the browser to the *REDIRECT_URL* after the upload completes. The URL has status and message query parameters added to it, which specify the HTTP status code for the upload and an optional error -message. The 2\ *``nn``* status code indicates success. +message. The 2\ *nn* status code indicates success. -The *``REDIRECT_URL``* can be an empty string. If so, the ``Location`` +The *REDIRECT_URL* can be an empty string. If so, the ``Location`` response header is not set. -**name="max\_file\_size" value="*``BYTES``*\ "** +**name="max\_file\_size" value="BYTES"** Required. Indicates the size, in bytes, of the maximum single file upload. -**name="max\_file\_count" value= "*``COUNT``*\ "** +**name="max\_file\_count" value= "COUNT"** Required. Indicates the maximum number of files that can be uploaded with the form. -**name="expires" value="*``UNIX_TIMESTAMP``*\ "** +**name="expires" value="UNIX_TIMESTAMP"** The UNIX timestamp that specifies the time before which the form must be submitted before it becomes no longer valid. -**name="signature" value="*``HMAC``*\ "** +**name="signature" value="HMAC"** The HMAC-SHA1 signature of the form. -**type="file" name="*``FILE_NAME``*\ "** +**type="file" name="FILE_NAME"** File name of the file to be uploaded. You can include from one to the ``max_file_count`` value of files. @@ -127,7 +127,7 @@ follow the file attributes are ignored. Optionally, if you want the uploaded files to be temporary you can set x-delete-at or x-delete-after attributes by adding one of these as a form input: -..code:: +.. code:: @@ -144,7 +144,7 @@ Form **POST** middleware uses an HMAC-SHA1 cryptographic signature. This signature includes these elements from the form: - The path. Starting with ``/v1/`` onwards and including a container - name and, optionally, an object prefix. In `Example 1.15, “HMAC-SHA1 + name and, optionally, an object prefix. In `Example 1.15`, “HMAC-SHA1 signature for form POST” the path is ``/v1/my_account/container/object_prefix``. Do not URL-encode the @@ -152,11 +152,11 @@ signature includes these elements from the form: - A redirect URL. If there is no redirect URL, use the empty string. -- Maximum file size. In `Example 1.15, “HMAC-SHA1 signature for form +- Maximum file size. In `Example 1.15`, “HMAC-SHA1 signature for form POST” the ``max_file_size`` is ``104857600`` bytes. -- The maximum number of objects to upload. In `Example 1.15, “HMAC-SHA1 +- The maximum number of objects to upload. In `Example 1.15`, “HMAC-SHA1 signature for form POST” ``max_file_count`` is ``10``. From 81ceee056de604d81a921280f453731eadf7be09 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Mon, 22 Jun 2015 23:24:19 -0700 Subject: [PATCH 73/98] Add one more test for ssync_receiver To prevent 409 conflict on a primary node during ssyncing, ssync-receiver should add x-ssync-backend-frag-index generated from x-ssync-backend-node-index of the SSYNC replication request header. The change is done by previous work[1], but we need more test for that. This patch addes one more assertion if the x-ssync-backend-frag-index is in the ssync subrequest correctly. *BONUS* Fix some weird mock and add some sanities. 1: https://review.openstack.org/#/c/191521/ Change-Id: I27aff713c69017c0bc4c60b4833184e1285595d7 --- test/unit/obj/test_ssync_receiver.py | 76 +++++++++++++++++++++++++--- 1 file changed, 70 insertions(+), 6 deletions(-) diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py index 4a8ee4541b..a6eddf0bf3 100644 --- a/test/unit/obj/test_ssync_receiver.py +++ b/test/unit/obj/test_ssync_receiver.py @@ -1064,7 +1064,7 @@ class TestReceiver(unittest.TestCase): @server.public def _DELETE(request): if request.path == '/device/partition/a/c/works': - return swob.HTTPOk() + return swob.HTTPNoContent() else: return swob.HTTPInternalServerError() @@ -1179,7 +1179,7 @@ class TestReceiver(unittest.TestCase): def _PUT(request): _PUT_request[0] = request request.read_body = request.environ['wsgi.input'].read() - return swob.HTTPOk() + return swob.HTTPCreated() with mock.patch.object(self.controller, 'PUT', _PUT): self.controller.logger = mock.MagicMock() @@ -1204,6 +1204,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) + self.assertEquals(len(_PUT_request), 1) # sanity req = _PUT_request[0] self.assertEqual(req.path, '/device/partition/a/c/o') self.assertEqual(req.content_length, 1) @@ -1231,7 +1232,7 @@ class TestReceiver(unittest.TestCase): def _PUT(request): _PUT_request[0] = request request.read_body = request.environ['wsgi.input'].read() - return swob.HTTPOk() + return swob.HTTPCreated() with mock.patch.object(self.controller, 'PUT', _PUT): self.controller.logger = mock.MagicMock() @@ -1257,6 +1258,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) + self.assertEquals(len(_PUT_request), 1) # sanity req = _PUT_request[0] self.assertEqual(req.path, '/device/partition/a/c/o') self.assertEqual(req.content_length, 1) @@ -1274,13 +1276,71 @@ class TestReceiver(unittest.TestCase): 'content-encoding specialty-header')}) self.assertEqual(req.read_body, '1') + def test_UPDATES_PUT_with_storage_policy_and_node_index(self): + # update router post policy patch + self.controller._diskfile_router = diskfile.DiskFileRouter( + self.conf, self.controller.logger) + + _PUT_request = [None] + + @server.public + def _PUT(request): + _PUT_request[0] = request + request.read_body = request.environ['wsgi.input'].read() + return swob.HTTPCreated() + + with mock.patch.object(self.controller, 'PUT', _PUT): + self.controller.logger = mock.MagicMock() + req = swob.Request.blank( + '/device/partition', + environ={'REQUEST_METHOD': 'SSYNC', + 'HTTP_X_BACKEND_SSYNC_NODE_INDEX': '7', + 'HTTP_X_BACKEND_SSYNC_FRAG_INDEX': '7', + 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX': '0'}, + body=':MISSING_CHECK: START\r\n:MISSING_CHECK: END\r\n' + ':UPDATES: START\r\n' + 'PUT /a/c/o\r\n' + 'Content-Length: 1\r\n' + 'X-Timestamp: 1364456113.12344\r\n' + 'X-Object-Meta-Test1: one\r\n' + 'Content-Encoding: gzip\r\n' + 'Specialty-Header: value\r\n' + '\r\n' + '1') + resp = req.get_response(self.controller) + self.assertEqual( + self.body_lines(resp.body), + [':MISSING_CHECK: START', ':MISSING_CHECK: END', + ':UPDATES: START', ':UPDATES: END']) + self.assertEqual(resp.status_int, 200) + self.assertFalse(self.controller.logger.exception.called) + self.assertFalse(self.controller.logger.error.called) + self.assertEquals(len(_PUT_request), 1) # sanity + req = _PUT_request[0] + self.assertEqual(req.path, '/device/partition/a/c/o') + self.assertEqual(req.content_length, 1) + self.assertEqual(req.headers, { + 'Content-Length': '1', + 'X-Timestamp': '1364456113.12344', + 'X-Object-Meta-Test1': 'one', + 'Content-Encoding': 'gzip', + 'Specialty-Header': 'value', + 'Host': 'localhost:80', + 'X-Backend-Storage-Policy-Index': '0', + 'X-Backend-Ssync-Frag-Index': '7', + 'X-Backend-Replication': 'True', + 'X-Backend-Replication-Headers': ( + 'content-length x-timestamp x-object-meta-test1 ' + 'content-encoding specialty-header')}) + self.assertEqual(req.read_body, '1') + def test_UPDATES_DELETE(self): _DELETE_request = [None] @server.public def _DELETE(request): _DELETE_request[0] = request - return swob.HTTPOk() + return swob.HTTPNoContent() with mock.patch.object(self.controller, 'DELETE', _DELETE): self.controller.logger = mock.MagicMock() @@ -1300,6 +1360,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) + self.assertEquals(len(_DELETE_request), 1) # sanity req = _DELETE_request[0] self.assertEqual(req.path, '/device/partition/a/c/o') self.assertEqual(req.headers, { @@ -1335,6 +1396,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.controller.logger.exception.assert_called_once_with( 'None/device/partition EXCEPTION in replication.Receiver') + self.assertEquals(len(_BONK_request), 1) # sanity self.assertEqual(_BONK_request[0], None) def test_UPDATES_multiple(self): @@ -1344,12 +1406,12 @@ class TestReceiver(unittest.TestCase): def _PUT(request): _requests.append(request) request.read_body = request.environ['wsgi.input'].read() - return swob.HTTPOk() + return swob.HTTPCreated() @server.public def _DELETE(request): _requests.append(request) - return swob.HTTPOk() + return swob.HTTPNoContent() with contextlib.nested( mock.patch.object(self.controller, 'PUT', _PUT), @@ -1395,6 +1457,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) + self.assertEquals(len(_requests), 6) # sanity req = _requests.pop(0) self.assertEqual(req.method, 'PUT') self.assertEqual(req.path, '/device/partition/a/c/o1') @@ -1519,6 +1582,7 @@ class TestReceiver(unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertFalse(self.controller.logger.exception.called) self.assertFalse(self.controller.logger.error.called) + self.assertEquals(len(_requests), 2) # sanity req = _requests.pop(0) self.assertEqual(req.path, '/device/partition/a/c/o1') self.assertEqual(req.content_length, 3) From edfca861b6fa39972df276fb1f37aa81583a605d Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Fri, 26 Jun 2015 08:54:22 +0000 Subject: [PATCH 74/98] Increase httplib._MAXHEADERS Python 2.7.9+ and 3.2.6+ limits the number of maximum headers in httplib to 100 [1,2,3]. This setting is too low for Swift. By default the maximum number of allowed headers depends on the number of max allowed metadata settings plus a default value of 32 for regular http headers. If for some reason this is not enough (custom middleware for example) it can be increased with the extra_header_count constraint. [1] https://bugs.python.org/issue16037 [2] https://hg.python.org/cpython/raw-file/15c95b7d81dc/Misc/NEWS [3] https://hg.python.org/cpython/raw-file/v3.2.6/Misc/NEWS Co-Authored-By: Clay Gerrard Co-Authored-By: Matthew Oliver Co-Authored-By: Thomas Herve Change-Id: I388fd697ec88476024b0e9f1ae75ba35ff765282 --- etc/swift.conf-sample | 8 ++++++++ swift/common/bufferedhttp.py | 5 +++++ swift/common/constraints.py | 9 +++++++++ test/functional/__init__.py | 3 ++- test/functional/swift_test_client.py | 3 +++ test/unit/proxy/test_server.py | 2 +- 6 files changed, 28 insertions(+), 2 deletions(-) diff --git a/etc/swift.conf-sample b/etc/swift.conf-sample index f8accabaec..76d1e876ae 100644 --- a/etc/swift.conf-sample +++ b/etc/swift.conf-sample @@ -129,6 +129,14 @@ default = yes #max_header_size = 8192 +# By default the maximum number of allowed headers depends on the number of max +# allowed metadata settings plus a default value of 32 for regular http +# headers. If for some reason this is not enough (custom middleware for +# example) it can be increased with the extra_header_count constraint. + +#extra_header_count = 32 + + # max_object_name_length is the max number of bytes in the utf8 encoding # of an object name diff --git a/swift/common/bufferedhttp.py b/swift/common/bufferedhttp.py index c7acccc27c..528ac214cf 100644 --- a/swift/common/bufferedhttp.py +++ b/swift/common/bufferedhttp.py @@ -27,14 +27,19 @@ BufferedHTTPResponse. """ from swift import gettext_ as _ +from swift.common import constraints from urllib import quote import logging import time import socket +import eventlet from eventlet.green.httplib import CONTINUE, HTTPConnection, HTTPMessage, \ HTTPResponse, HTTPSConnection, _UNKNOWN +httplib = eventlet.import_patched('httplib') +httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT + class BufferedHTTPResponse(HTTPResponse): """HTTPResponse class that buffers reading of headers""" diff --git a/swift/common/constraints.py b/swift/common/constraints.py index 4cee56ab3c..591168b5e9 100644 --- a/swift/common/constraints.py +++ b/swift/common/constraints.py @@ -36,6 +36,7 @@ ACCOUNT_LISTING_LIMIT = 10000 MAX_ACCOUNT_NAME_LENGTH = 256 MAX_CONTAINER_NAME_LENGTH = 256 VALID_API_VERSIONS = ["v1", "v1.0"] +EXTRA_HEADER_COUNT = 0 # If adding an entry to DEFAULT_CONSTRAINTS, note that # these constraints are automatically published by the @@ -54,6 +55,7 @@ DEFAULT_CONSTRAINTS = { 'max_account_name_length': MAX_ACCOUNT_NAME_LENGTH, 'max_container_name_length': MAX_CONTAINER_NAME_LENGTH, 'valid_api_versions': VALID_API_VERSIONS, + 'extra_header_count': EXTRA_HEADER_COUNT, } SWIFT_CONSTRAINTS_LOADED = False @@ -105,6 +107,13 @@ FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json', 'xml': 'application/xml'} +# By default the maximum number of allowed headers depends on the number of max +# allowed metadata settings plus a default value of 32 for regular http +# headers. If for some reason this is not enough (custom middleware for +# example) it can be increased with the extra_header_count constraint. +MAX_HEADER_COUNT = MAX_META_COUNT + 32 + max(EXTRA_HEADER_COUNT, 0) + + def check_metadata(req, target_type): """ Check metadata sent in the request headers. This should only check diff --git a/test/functional/__init__.py b/test/functional/__init__.py index 73e5006638..580de56c81 100644 --- a/test/functional/__init__.py +++ b/test/functional/__init__.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import httplib import mock import os import sys @@ -52,7 +53,7 @@ from swift.container import server as container_server from swift.obj import server as object_server, mem_server as mem_object_server import swift.proxy.controllers.obj - +httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT DEBUG = True # In order to get the proper blocking behavior of sockets without using diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 695ea202d7..8767e62476 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -29,10 +29,13 @@ from xml.dom import minidom from swiftclient import get_auth +from swift.common import constraints from swift.common.utils import config_true_value from test import safe_repr +httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT + class AuthenticationFailed(Exception): pass diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 46370590eb..500ab6105a 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -9168,7 +9168,7 @@ class TestSwiftInfo(unittest.TestCase): constraints.VALID_API_VERSIONS) # this next test is deliberately brittle in order to alert if # other items are added to swift info - self.assertEqual(len(si), 17) + self.assertEqual(len(si), 18) self.assertTrue('policies' in si) sorted_pols = sorted(si['policies'], key=operator.itemgetter('name')) From d124ce5792d93e1efcdb083211ecef381f7b7173 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Thu, 25 Jun 2015 01:35:07 -0700 Subject: [PATCH 75/98] Fix ValueError in ssync_receiver httplib's putheader method will cast whatever you give it to a string. where we allow the default dict.get default of None to be passed to putheader unmodified ssync_receiver is surpised that the non-empty string isn't able to be converted to an integer. We can avoid surprising the ssync_receiver in this way by sending the empty string as a better default. Change-Id: Ie9df9927ff4d3dd3f334647f883b2937d0d81030 --- swift/obj/ssync_sender.py | 4 +- test/unit/obj/test_ssync_sender.py | 69 ++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) diff --git a/swift/obj/ssync_sender.py b/swift/obj/ssync_sender.py index 50662da84c..0657b0fd59 100644 --- a/swift/obj/ssync_sender.py +++ b/swift/obj/ssync_sender.py @@ -134,10 +134,10 @@ class Sender(object): # will be rebuilding them self.connection.putheader( 'X-Backend-Ssync-Frag-Index', self.node.get( - 'index', self.job.get('frag_index'))) + 'index', self.job.get('frag_index', ''))) # a revert job to a handoff will not have a node index self.connection.putheader('X-Backend-Ssync-Node-Index', - self.node.get('index')) + self.node.get('index', '')) self.connection.endheaders() with exceptions.MessageTimeout( self.daemon.node_timeout, 'connect receive'): diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py index b6f75ba659..11cd06f22e 100644 --- a/test/unit/obj/test_ssync_sender.py +++ b/test/unit/obj/test_ssync_sender.py @@ -272,6 +272,75 @@ class TestSender(BaseTestSender): method_name, mock_method.mock_calls, expected_calls)) + def test_connect_handoff(self): + node = dict(replication_ip='1.2.3.4', replication_port=5678, + device='sda1') + job = dict(partition='9', policy=POLICIES[1], frag_index=9) + self.sender = ssync_sender.Sender(self.daemon, node, job, None) + self.sender.suffixes = ['abc'] + with mock.patch( + 'swift.obj.ssync_sender.bufferedhttp.BufferedHTTPConnection' + ) as mock_conn_class: + mock_conn = mock_conn_class.return_value + mock_resp = mock.MagicMock() + mock_resp.status = 200 + mock_conn.getresponse.return_value = mock_resp + self.sender.connect() + mock_conn_class.assert_called_once_with('1.2.3.4:5678') + expectations = { + 'putrequest': [ + mock.call('SSYNC', '/sda1/9'), + ], + 'putheader': [ + mock.call('Transfer-Encoding', 'chunked'), + mock.call('X-Backend-Storage-Policy-Index', 1), + mock.call('X-Backend-Ssync-Frag-Index', 9), + mock.call('X-Backend-Ssync-Node-Index', ''), + ], + 'endheaders': [mock.call()], + } + for method_name, expected_calls in expectations.items(): + mock_method = getattr(mock_conn, method_name) + self.assertEquals(expected_calls, mock_method.mock_calls, + 'connection method "%s" got %r not %r' % ( + method_name, mock_method.mock_calls, + expected_calls)) + + def test_connect_handoff_replicated(self): + node = dict(replication_ip='1.2.3.4', replication_port=5678, + device='sda1') + # no frag_index in rsync job + job = dict(partition='9', policy=POLICIES[1]) + self.sender = ssync_sender.Sender(self.daemon, node, job, None) + self.sender.suffixes = ['abc'] + with mock.patch( + 'swift.obj.ssync_sender.bufferedhttp.BufferedHTTPConnection' + ) as mock_conn_class: + mock_conn = mock_conn_class.return_value + mock_resp = mock.MagicMock() + mock_resp.status = 200 + mock_conn.getresponse.return_value = mock_resp + self.sender.connect() + mock_conn_class.assert_called_once_with('1.2.3.4:5678') + expectations = { + 'putrequest': [ + mock.call('SSYNC', '/sda1/9'), + ], + 'putheader': [ + mock.call('Transfer-Encoding', 'chunked'), + mock.call('X-Backend-Storage-Policy-Index', 1), + mock.call('X-Backend-Ssync-Frag-Index', ''), + mock.call('X-Backend-Ssync-Node-Index', ''), + ], + 'endheaders': [mock.call()], + } + for method_name, expected_calls in expectations.items(): + mock_method = getattr(mock_conn, method_name) + self.assertEquals(expected_calls, mock_method.mock_calls, + 'connection method "%s" got %r not %r' % ( + method_name, mock_method.mock_calls, + expected_calls)) + def test_call(self): def patch_sender(sender): sender.connect = mock.MagicMock() From 44b76a1b1b83ac9563010f1ddfca5fca76e567bf Mon Sep 17 00:00:00 2001 From: Minwoo Bae Date: Thu, 18 Jun 2015 14:21:06 -0500 Subject: [PATCH 76/98] EC Reconstructor: Do not reconstruct existing fragments. The EC reconstructor needs to verify that the fragment needing to be reconstructed does not reside in the collection of node responses. Otherwise, resources will be spent unnecessarily reconstructing the fragment. Moreover, this could cause a segfault on some backends. This change adds the necessary verification steps to make sure that a fragment will only be rebuilt in the case it is missing from the other fragment archives. Added some tests to provide coverage for these scenarios. Change-Id: I91f3d4af52cbc66c9f7ce00726f247b5462e66f9 Closes-Bug: #1452553 --- swift/obj/reconstructor.py | 7 ++ test/unit/obj/test_reconstructor.py | 183 ++++++++++++++++++++++------ 2 files changed, 153 insertions(+), 37 deletions(-) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index f9aa5f15d8..5b282de87c 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -249,6 +249,13 @@ class ObjectReconstructor(Daemon): if not resp: continue resp.headers = HeaderKeyDict(resp.getheaders()) + if str(fi_to_rebuild) == \ + resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index'): + continue + if resp.headers.get('X-Object-Sysmeta-Ec-Frag-Index') in set( + r.headers.get('X-Object-Sysmeta-Ec-Frag-Index') + for r in responses): + continue responses.append(resp) etag = sorted(responses, reverse=True, key=lambda r: Timestamp( diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index 321ea3751d..60b2b4ff6b 100755 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -23,6 +23,7 @@ import time import shutil import re import random +import struct from eventlet import Timeout from contextlib import closing, nested, contextmanager @@ -126,6 +127,14 @@ def count_stats(logger, key, metric): return count +def get_header_frag_index(self, body): + metadata = self.policy.pyeclib_driver.get_metadata(body) + frag_index = struct.unpack('h', metadata[:2])[0] + return { + 'X-Object-Sysmeta-Ec-Frag-Index': frag_index, + } + + @patch_policies([StoragePolicy(0, name='zero', is_default=True), ECStoragePolicy(1, name='one', ec_type='jerasure_rs_vand', ec_ndata=2, ec_nparity=1)]) @@ -2309,9 +2318,13 @@ class TestObjectReconstructor(unittest.TestCase): broken_body = ec_archive_bodies.pop(1) - responses = list((200, body) for body in ec_archive_bodies) - headers = {'X-Object-Sysmeta-Ec-Etag': etag} - codes, body_iter = zip(*responses) + responses = list() + for body in ec_archive_bodies: + headers = get_header_frag_index(self, body) + headers.update({'X-Object-Sysmeta-Ec-Etag': etag}) + responses.append((200, body, headers)) + + codes, body_iter, headers = zip(*responses) with mocked_http_conn(*codes, body_iter=body_iter, headers=headers): df = self.reconstructor.reconstruct_fa( job, node, metadata) @@ -2339,17 +2352,21 @@ class TestObjectReconstructor(unittest.TestCase): broken_body = ec_archive_bodies.pop(4) - base_responses = list((200, body) for body in ec_archive_bodies) + base_responses = list() + for body in ec_archive_bodies: + headers = get_header_frag_index(self, body) + headers.update({'X-Object-Sysmeta-Ec-Etag': etag}) + base_responses.append((200, body, headers)) + # since we're already missing a fragment a +2 scheme can only support # one additional failure at a time for error in (Timeout(), 404, Exception('kaboom!')): - responses = list(base_responses) + responses = base_responses error_index = random.randint(0, len(responses) - 1) - responses[error_index] = (error, '') - headers = {'X-Object-Sysmeta-Ec-Etag': etag} - codes, body_iter = zip(*responses) + responses[error_index] = (error, '', '') + codes, body_iter, headers_iter = zip(*responses) with mocked_http_conn(*codes, body_iter=body_iter, - headers=headers): + headers=headers_iter): df = self.reconstructor.reconstruct_fa( job, node, dict(metadata)) fixed_body = ''.join(df.reader()) @@ -2379,16 +2396,19 @@ class TestObjectReconstructor(unittest.TestCase): # the scheme is 10+4, so this gets a parity node broken_body = ec_archive_bodies.pop(-4) - base_responses = list((200, body) for body in ec_archive_bodies) + responses = list() + for body in ec_archive_bodies: + headers = get_header_frag_index(self, body) + headers.update({'X-Object-Sysmeta-Ec-Etag': etag}) + responses.append((200, body, headers)) + for error in (Timeout(), 404, Exception('kaboom!')): - responses = list(base_responses) # grab a data node index error_index = random.randint(0, self.policy.ec_ndata - 1) - responses[error_index] = (error, '') - headers = {'X-Object-Sysmeta-Ec-Etag': etag} - codes, body_iter = zip(*responses) + responses[error_index] = (error, '', '') + codes, body_iter, headers_iter = zip(*responses) with mocked_http_conn(*codes, body_iter=body_iter, - headers=headers): + headers=headers_iter): df = self.reconstructor.reconstruct_fa( job, node, dict(metadata)) fixed_body = ''.join(df.reader()) @@ -2435,23 +2455,28 @@ class TestObjectReconstructor(unittest.TestCase): ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data) broken_body = ec_archive_bodies.pop(1) - ts = (utils.Timestamp(t) for t in itertools.count(int(time.time()))) # bad response - bad_response = (200, '', { + bad_headers = { 'X-Object-Sysmeta-Ec-Etag': 'some garbage', 'X-Backend-Timestamp': next(ts).internal, - }) + } # good responses - headers = { - 'X-Object-Sysmeta-Ec-Etag': etag, - 'X-Backend-Timestamp': next(ts).internal - } - responses = [(200, body, headers) - for body in ec_archive_bodies] + responses = list() + t1 = next(ts).internal + for body in ec_archive_bodies: + headers = get_header_frag_index(self, body) + headers.update({'X-Object-Sysmeta-Ec-Etag': etag, + 'X-Backend-Timestamp': t1}) + responses.append((200, body, headers)) + # mixed together - error_index = random.randint(0, len(responses) - 2) + error_index = random.randint(0, self.policy.ec_ndata) + error_headers = get_header_frag_index(self, + (responses[error_index])[1]) + error_headers.update(bad_headers) + bad_response = (200, '', bad_headers) responses[error_index] = bad_response codes, body_iter, headers = zip(*responses) with mocked_http_conn(*codes, body_iter=body_iter, headers=headers): @@ -2480,18 +2505,19 @@ class TestObjectReconstructor(unittest.TestCase): ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data) broken_body = ec_archive_bodies.pop(1) - ts = (utils.Timestamp(t) for t in itertools.count(int(time.time()))) + # good responses - headers = { - 'X-Object-Sysmeta-Ec-Etag': etag, - 'X-Backend-Timestamp': next(ts).internal - } - responses = [(200, body, headers) - for body in ec_archive_bodies] - codes, body_iter, headers = zip(*responses) + responses = list() + t0 = next(ts).internal + for body in ec_archive_bodies: + headers = get_header_frag_index(self, body) + headers.update({'X-Object-Sysmeta-Ec-Etag': etag, + 'X-Backend-Timestamp': t0}) + responses.append((200, body, headers)) # sanity check before negative test + codes, body_iter, headers = zip(*responses) with mocked_http_conn(*codes, body_iter=body_iter, headers=headers): df = self.reconstructor.reconstruct_fa( job, node, dict(metadata)) @@ -2501,17 +2527,100 @@ class TestObjectReconstructor(unittest.TestCase): md5(broken_body).hexdigest()) # one newer etag can spoil the bunch - new_response = (200, '', { - 'X-Object-Sysmeta-Ec-Etag': 'some garbage', - 'X-Backend-Timestamp': next(ts).internal, - }) new_index = random.randint(0, len(responses) - self.policy.ec_nparity) + new_headers = get_header_frag_index(self, (responses[new_index])[1]) + new_headers.update({'X-Object-Sysmeta-Ec-Etag': 'some garbage', + 'X-Backend-Timestamp': next(ts).internal}) + new_response = (200, '', new_headers) responses[new_index] = new_response codes, body_iter, headers = zip(*responses) with mocked_http_conn(*codes, body_iter=body_iter, headers=headers): self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa, job, node, dict(metadata)) + def test_reconstruct_fa_finds_itself_does_not_fail(self): + job = { + 'partition': 0, + 'policy': self.policy, + } + part_nodes = self.policy.object_ring.get_part_nodes(0) + node = part_nodes[1] + metadata = { + 'name': '/a/c/o', + 'Content-Length': 0, + 'ETag': 'etag', + } + + test_data = ('rebuild' * self.policy.ec_segment_size)[:-777] + etag = md5(test_data).hexdigest() + ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data) + + # instead of popping the broken body, we'll just leave it in the list + # of responses and take away something else. + broken_body = ec_archive_bodies[1] + ec_archive_bodies = ec_archive_bodies[:-1] + + def make_header(body): + metadata = self.policy.pyeclib_driver.get_metadata(body) + frag_index = struct.unpack('h', metadata[:2])[0] + return { + 'X-Object-Sysmeta-Ec-Frag-Index': frag_index, + 'X-Object-Sysmeta-Ec-Etag': etag, + } + + responses = [(200, body, make_header(body)) + for body in ec_archive_bodies] + codes, body_iter, headers = zip(*responses) + with mocked_http_conn(*codes, body_iter=body_iter, headers=headers): + df = self.reconstructor.reconstruct_fa( + job, node, metadata) + fixed_body = ''.join(df.reader()) + self.assertEqual(len(fixed_body), len(broken_body)) + self.assertEqual(md5(fixed_body).hexdigest(), + md5(broken_body).hexdigest()) + + def test_reconstruct_fa_finds_duplicate_does_not_fail(self): + job = { + 'partition': 0, + 'policy': self.policy, + } + part_nodes = self.policy.object_ring.get_part_nodes(0) + node = part_nodes[1] + metadata = { + 'name': '/a/c/o', + 'Content-Length': 0, + 'ETag': 'etag', + } + + test_data = ('rebuild' * self.policy.ec_segment_size)[:-777] + etag = md5(test_data).hexdigest() + ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data) + + broken_body = ec_archive_bodies.pop(1) + # add some duplicates + num_duplicates = self.policy.ec_nparity - 1 + ec_archive_bodies = (ec_archive_bodies[:num_duplicates] + + ec_archive_bodies)[:-num_duplicates] + + def make_header(body): + metadata = self.policy.pyeclib_driver.get_metadata(body) + frag_index = struct.unpack('h', metadata[:2])[0] + return { + 'X-Object-Sysmeta-Ec-Frag-Index': frag_index, + 'X-Object-Sysmeta-Ec-Etag': etag, + } + + responses = [(200, body, make_header(body)) + for body in ec_archive_bodies] + codes, body_iter, headers = zip(*responses) + with mocked_http_conn(*codes, body_iter=body_iter, headers=headers): + df = self.reconstructor.reconstruct_fa( + job, node, metadata) + fixed_body = ''.join(df.reader()) + self.assertEqual(len(fixed_body), len(broken_body)) + self.assertEqual(md5(fixed_body).hexdigest(), + md5(broken_body).hexdigest()) + if __name__ == '__main__': unittest.main() From c95a0efe7973a660f4abcd13f4ea6211d72c4baa Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Thu, 25 Jun 2015 01:44:10 -0700 Subject: [PATCH 77/98] Make ssync_sender a better HTTP client When a server responses with an error - if that error includes a body - the client should read the body. This cleans up some ugly eventlet/wsgi.server log output related to chunked transfer disconnect (invalid literal for int() with base 16). Change-Id: Ibd06ddee9f216fce07fa33c3a7d8306b59eb6d77 Closes-Bug: #1466138 --- swift/obj/ssync_sender.py | 1 + test/unit/obj/test_ssync_sender.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/swift/obj/ssync_sender.py b/swift/obj/ssync_sender.py index 0657b0fd59..3c099bfa2c 100644 --- a/swift/obj/ssync_sender.py +++ b/swift/obj/ssync_sender.py @@ -143,6 +143,7 @@ class Sender(object): self.daemon.node_timeout, 'connect receive'): self.response = self.connection.getresponse() if self.response.status != http.HTTP_OK: + self.response.read() raise exceptions.ReplicationException( 'Expected status %s; got %s' % (http.HTTP_OK, self.response.status)) diff --git a/test/unit/obj/test_ssync_sender.py b/test/unit/obj/test_ssync_sender.py index 11cd06f22e..c73bf6a609 100644 --- a/test/unit/obj/test_ssync_sender.py +++ b/test/unit/obj/test_ssync_sender.py @@ -80,6 +80,9 @@ class FakeResponse(object): self.fp = StringIO.StringIO( '%x\r\n%s\r\n0\r\n\r\n' % (len(chunk_body), chunk_body)) + def read(self, *args, **kwargs): + return '' + def close(self): self.close_called = True From a88bd6e6b9253b6488a2bf1a354ce29b51f37fe1 Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Sun, 28 Jun 2015 22:04:46 -0700 Subject: [PATCH 78/98] fix api overview document The content about Object Storage HTTP requests constraints seems to be a table but broken, this patch fixes it into correct rst table format. Change-Id: I1b4c62da3e6d91add3ee0218707c1628c8f04b33 --- doc/source/api/object_api_v1_overview.rst | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/doc/source/api/object_api_v1_overview.rst b/doc/source/api/object_api_v1_overview.rst index 8a4f9bcb66..3c1748d1fc 100644 --- a/doc/source/api/object_api_v1_overview.rst +++ b/doc/source/api/object_api_v1_overview.rst @@ -133,21 +133,16 @@ or ends. Object Storage HTTP requests have the following default constraints. Your service provider might use different default values. -==== ============= ===== +============================ ============= ===== Item Maximum value Notes -==== ============= ===== - +============================ ============= ===== Number of HTTP headers 90 - Length of HTTP headers 4096 bytes - Length per HTTP request line 8192 bytes - Length of HTTP request 5 GB - Length of container names 256 bytes Cannot contain the ``/`` character. - Length of object names 1024 bytes By default, there are no character restrictions. +============================ ============= ===== You must UTF-8-encode and then URL-encode container and object names before you call the API binding. If you use an API binding that performs From d4d7c0e1b11af8f20f965f410901a56e1f8ee69e Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Tue, 30 Jun 2015 17:24:13 -0700 Subject: [PATCH 79/98] Fix race in test_server.TestObjectController.test_PUT_ssync_multi_frag If the test ran across a one second boundary it would fail because while the timestamp normalization was doing some rounding it was making no attempt to reuse the same timestamp on subsequent requests. Change-Id: Ic560032bcfacd6f0d10cfc0f4f10e5d6c2bc8dd5 --- test/unit/obj/test_server.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index d0a6865a4c..410797c4dd 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -1267,8 +1267,9 @@ class TestObjectController(unittest.TestCase): object_server.http_connect = old_http_connect def test_PUT_ssync_multi_frag(self): + timestamp = utils.Timestamp(time()).internal + def put_with_index(expected_rsp, frag_index, node_index=None): - timestamp = utils.Timestamp(int(time())).internal data_file_tail = '#%d.data' % frag_index headers = {'X-Timestamp': timestamp, 'Content-Length': '6', From 4d4db054d35c7e9de9ed28ac24965e8408dd9f82 Mon Sep 17 00:00:00 2001 From: Minwoo Bae Date: Mon, 18 May 2015 14:08:25 -0500 Subject: [PATCH 80/98] After the .durable has been written, fsync the directory. Added try-except statements in _finalize_durable() to fsync the directory after a successful fsync of the .durable file. Added test_commit_fsync_dir_raises_DiskFileErrors() for testing whether certain assertions hold for the change to include fsync_dir(). Some more error details have been included in the logger. Closes-Bug: #1470651 Change-Id: I4791d75ade8542678369ba0811ef39af6e955cc6 --- swift/obj/diskfile.py | 46 ++++++++++++++++++++++-------- test/unit/obj/test_diskfile.py | 52 ++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 12 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 3920315551..2e01137dc1 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -53,8 +53,8 @@ from swift import gettext_ as _ from swift.common.constraints import check_mount, check_dir from swift.common.request_helpers import is_sys_meta from swift.common.utils import mkdirs, Timestamp, \ - storage_directory, hash_path, renamer, fallocate, fsync, \ - fdatasync, drop_buffer_cache, ThreadPool, lock_path, write_pickle, \ + storage_directory, hash_path, renamer, fallocate, fsync, fdatasync, \ + fsync_dir, drop_buffer_cache, ThreadPool, lock_path, write_pickle, \ config_true_value, listdir, split_path, ismount, remove_file, \ get_md5_socket, F_SETPIPE_SZ from swift.common.splice import splice, tee @@ -1788,23 +1788,45 @@ class ECDiskFileWriter(DiskFileWriter): try: with open(durable_file_path, 'w') as _fp: fsync(_fp.fileno()) + try: + fsync_dir(self._datadir) + except OSError as os_err: + msg = (_('%s \nProblem fsyncing dir' + 'after writing .durable: %s') % + (os_err, self._datadir)) + exc = DiskFileError(msg) + except IOError as io_err: + if io_err.errno in (errno.ENOSPC, errno.EDQUOT): + msg = (_('%s \nNo space left on device' + 'for updates to: %s') % + (io_err, self._datadir)) + exc = DiskFileNoSpace(msg) + else: + msg = (_('%s \nProblem fsyncing dir' + 'after writing .durable: %s') % + (io_err, self._datadir)) + exc = DiskFileError(msg) + if exc: + self.manager.logger.exception(msg) + raise exc try: self.manager.hash_cleanup_listdir(self._datadir) - except OSError: + except OSError as os_err: self.manager.logger.exception( - _('Problem cleaning up %s'), self._datadir) - except OSError: - msg = (_('Problem fsyncing durable state file: %s'), - durable_file_path) + _('%s \nProblem cleaning up %s') % + (os_err, self._datadir)) + except OSError as os_err: + msg = (_('%s \nProblem fsyncing durable state file: %s') % + (os_err, durable_file_path)) exc = DiskFileError(msg) except IOError as io_err: if io_err.errno in (errno.ENOSPC, errno.EDQUOT): - msg = (_("No space left on device for %s"), - durable_file_path) - exc = DiskFileNoSpace() + msg = (_('%s \nNo space left on device for %s') % + (io_err, durable_file_path)) + exc = DiskFileNoSpace(msg) else: - msg = (_('Problem writing durable state file: %s'), - durable_file_path) + msg = (_('%s \nProblem writing durable state file: %s') % + (io_err, durable_file_path)) exc = DiskFileError(msg) if exc: self.manager.logger.exception(msg) diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index a84cafc8b4..67e01ecec8 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -3262,6 +3262,58 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase): self.assertRaises(DiskFileError, writer.commit, timestamp) + def test_commit_fsync_dir_raises_DiskFileErrors(self): + scenarios = ((errno.ENOSPC, DiskFileNoSpace), + (errno.EDQUOT, DiskFileNoSpace), + (errno.ENOTDIR, DiskFileError), + (errno.EPERM, DiskFileError)) + + # Check IOErrors from fsync_dir() is handled + for err_number, expected_exception in scenarios: + io_error = IOError() + io_error.errno = err_number + mock_open = mock.MagicMock(side_effect=io_error) + mock_io_error = mock.MagicMock(side_effect=io_error) + df = self._simple_get_diskfile(account='a', container='c', + obj='o_%s' % err_number, + policy=POLICIES.default) + timestamp = Timestamp(time()) + with df.create() as writer: + metadata = { + 'ETag': 'bogus_etag', + 'X-Timestamp': timestamp.internal, + 'Content-Length': '0', + } + writer.put(metadata) + with mock.patch('__builtin__.open', mock_open): + self.assertRaises(expected_exception, + writer.commit, + timestamp) + with mock.patch('swift.obj.diskfile.fsync_dir', mock_io_error): + self.assertRaises(expected_exception, + writer.commit, + timestamp) + dl = os.listdir(df._datadir) + self.assertEqual(2, len(dl), dl) + rmtree(df._datadir) + + # Check OSError from fsync_dir() is handled + mock_os_error = mock.MagicMock(side_effect=OSError) + df = self._simple_get_diskfile(account='a', container='c', + obj='o_fsync_dir_error') + + timestamp = Timestamp(time()) + with df.create() as writer: + metadata = { + 'ETag': 'bogus_etag', + 'X-Timestamp': timestamp.internal, + 'Content-Length': '0', + } + writer.put(metadata) + with mock.patch('swift.obj.diskfile.fsync_dir', mock_os_error): + self.assertRaises(DiskFileError, + writer.commit, timestamp) + def test_data_file_has_frag_index(self): policy = POLICIES.default for good_value in (0, '0', 2, '2', 14, '14'): From 0a6f2ab870a868e3036dbb212c0e7dd8375a39eb Mon Sep 17 00:00:00 2001 From: asettle Date: Fri, 26 Jun 2015 10:57:35 +1000 Subject: [PATCH 81/98] Correcting minor grammatical errors 1. Removing unnecessary semi colon 2. Removing unnecessary content Change-Id: Ie95403a4d96db2b8465e75495061fc059098c922 --- doc/source/development_saio.rst | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index be8de9fb00..1e6bed03fb 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -8,7 +8,7 @@ Instructions for setting up a development VM This section documents setting up a virtual machine for doing Swift development. The virtual machine will emulate running a four node Swift -cluster. +cluster. To begin: * Get an Ubuntu 14.04 LTS server image or try something Fedora/CentOS. @@ -55,10 +55,9 @@ Installing dependencies python-netifaces python-pip python-dns \ python-mock - This installs necessary system dependencies; and *most* of the python - dependencies. Later in the process setuptools/distribute or pip will - install and/or upgrade some other stuff - it's getting harder to avoid. - You can also install anything else you want, like screen, ssh, vim, etc. + Note: This installs necessary system dependencies and *most* of the python + dependencies. Later in the process setuptools/distribute or pip will install + and/or upgrade packages. Next, choose either :ref:`partition-section` or :ref:`loopback-section`. From e860ec138913dc4c450725766a77e76a7101d06e Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Fri, 29 May 2015 10:11:44 +0000 Subject: [PATCH 82/98] Ignore invalid PIDs in swift-init While reviewing ab9f6340 I recognized that invalid pids in the pid files raise an exceptions (in case it is not a number), and also accept a 0 that is impossible to use as a pid for any Swift process. In the latter case the process was recognized as runnning, even if it did not - and thus restarting didn't worked. This patch ensures a better handling of any non-numeric value and also removes a pid file if it uses 0 as the pid. Change-Id: I09c415c7a75e4b9a0574a08f46f48fff5bb11d02 --- swift/common/manager.py | 10 +++++++++- test/unit/common/test_manager.py | 28 +++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/swift/common/manager.py b/swift/common/manager.py index afed0bb8ca..e9aa8ea138 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -497,7 +497,11 @@ class Server(object): """Generator, yields (pid_file, pids) """ for pid_file in self.pid_files(**kwargs): - yield pid_file, int(open(pid_file).read().strip()) + try: + pid = int(open(pid_file).read().strip()) + except ValueError: + pid = None + yield pid_file, pid def signal_pids(self, sig, **kwargs): """Send a signal to pids for this server @@ -509,6 +513,10 @@ class Server(object): """ pids = {} for pid_file, pid in self.iter_pid_files(**kwargs): + if not pid: # Catches None and 0 + print _('Removing pid file %s with invalid pid') % pid_file + remove_file(pid_file) + continue try: if sig != signal.SIG_DFL: print _('Signal %s pid: %s signal: %s') % (self.server, diff --git a/test/unit/common/test_manager.py b/test/unit/common/test_manager.py index 5a9b3a6629..bb9108ded3 100644 --- a/test/unit/common/test_manager.py +++ b/test/unit/common/test_manager.py @@ -609,7 +609,8 @@ class TestServer(unittest.TestCase): self.assertRaises(StopIteration, iter.next) # test invalid value in pid file server = manager.Server('auth', run_dir=t) - self.assertRaises(ValueError, server.iter_pid_files().next) + pid_file, pid = server.iter_pid_files().next() + self.assertEqual(None, pid) # test object-server doesn't steal pids from object-replicator server = manager.Server('object', run_dir=t) self.assertRaises(StopIteration, server.iter_pid_files().next) @@ -698,10 +699,12 @@ class TestServer(unittest.TestCase): def test_signal_pids(self): temp_files = ( + ('var/run/zero-server.pid', 0), ('var/run/proxy-server.pid', 1), ('var/run/auth-server.pid', 2), ('var/run/one-server.pid', 3), ('var/run/object-server.pid', 4), + ('var/run/invalid-server.pid', 'Forty-Two'), ('proc/3/cmdline', 'swift-another-server') ) with temptree(*zip(*temp_files)) as t: @@ -758,6 +761,29 @@ class TestServer(unittest.TestCase): self.assert_('removing pid file' in output.lower()) one_pid = self.join_run_dir('one-server.pid') self.assert_(one_pid in output) + + server = manager.Server('zero', run_dir=manager.RUN_DIR) + self.assertTrue(os.path.exists( + self.join_run_dir('zero-server.pid'))) # sanity + # test verbose warns on removing pid file with invalid pid + pids = server.signal_pids(signal.SIG_DFL, verbose=True) + output = pop_stream(f) + old_stdout.write('output %s' % output) + self.assert_('with invalid pid' in output.lower()) + self.assertFalse(os.path.exists( + self.join_run_dir('zero-server.pid'))) + server = manager.Server('invalid-server', + run_dir=manager.RUN_DIR) + self.assertTrue(os.path.exists( + self.join_run_dir('invalid-server.pid'))) # sanity + # test verbose warns on removing pid file with invalid pid + pids = server.signal_pids(signal.SIG_DFL, verbose=True) + output = pop_stream(f) + old_stdout.write('output %s' % output) + self.assert_('with invalid pid' in output.lower()) + self.assertFalse(os.path.exists( + self.join_run_dir('invalid-server.pid'))) + # reset mock os with no running pids manager.os = MockOs([]) # test warning with insufficient permissions From ccf0758ef1f362312a6197f86455f680ebd990d3 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Thu, 11 Jun 2015 15:40:28 -0700 Subject: [PATCH 83/98] Add ring-builder analyzer. This is a tool to help developers quantify changes to the ring builder. It takes a scenario (JSON file) describing the builder's basic parameters (part_power, replicas, etc.) and a number of "rounds", where each round is a set of operations to perform on the builder. For each round, the operations are applied, and then the builder is rebalanced until it reaches a steady state. The idea is that a developer observes the ring builder behaving suboptimally, writes a scenario to reproduce the behavior, modifies the ring builder to fix it, and references the scenario with the commit so that others can see that things have improved. I decided to write this after writing my fourth or fifth hacky one-off script to reproduce some bad behavior in the ring builder. Change-Id: I114242748368f142304aab90a6d99c1337bced4c --- bin/swift-ring-builder-analyzer | 22 ++ doc/source/overview_ring.rst | 6 + setup.cfg | 1 + swift/cli/ring_builder_analyzer.py | 325 ++++++++++++++++++++ swift/cli/ringbuilder.py | 90 +----- swift/common/ring/utils.py | 96 +++++- test/unit/cli/test_ring_builder_analyzer.py | 227 ++++++++++++++ test/unit/cli/test_ringbuilder.py | 8 - test/unit/common/ring/test_utils.py | 11 +- 9 files changed, 697 insertions(+), 89 deletions(-) create mode 100755 bin/swift-ring-builder-analyzer create mode 100644 swift/cli/ring_builder_analyzer.py create mode 100644 test/unit/cli/test_ring_builder_analyzer.py diff --git a/bin/swift-ring-builder-analyzer b/bin/swift-ring-builder-analyzer new file mode 100755 index 0000000000..18365777f3 --- /dev/null +++ b/bin/swift-ring-builder-analyzer @@ -0,0 +1,22 @@ +#!/usr/bin/python +# Copyright (c) 2015 Samuel Merritt +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from swift.cli.ring_builder_analyzer import main + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/doc/source/overview_ring.rst b/doc/source/overview_ring.rst index 118a437788..d1f43affa5 100644 --- a/doc/source/overview_ring.rst +++ b/doc/source/overview_ring.rst @@ -237,6 +237,12 @@ when the balance doesn't improve by at least 1% (indicating we probably can't get perfect balance due to wildly imbalanced zones or too many partitions recently moved). +--------------------- +Ring Builder Analyzer +--------------------- +.. automodule:: swift.cli.ring_builder_analyzer + + ------- History ------- diff --git a/setup.cfg b/setup.cfg index 4b648b1109..d983a11a41 100644 --- a/setup.cfg +++ b/setup.cfg @@ -60,6 +60,7 @@ scripts = bin/swift-recon bin/swift-recon-cron bin/swift-ring-builder + bin/swift-ring-builder-analyzer bin/swift-temp-url [entry_points] diff --git a/swift/cli/ring_builder_analyzer.py b/swift/cli/ring_builder_analyzer.py new file mode 100644 index 0000000000..26d964bb8b --- /dev/null +++ b/swift/cli/ring_builder_analyzer.py @@ -0,0 +1,325 @@ +#! /usr/bin/env python +# Copyright (c) 2015 Samuel Merritt +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This is a tool for analyzing how well the ring builder performs its job +in a particular scenario. It is intended to help developers quantify any +improvements or regressions in the ring builder; it is probably not useful +to others. + +The ring builder analyzer takes a scenario file containing some initial +parameters for a ring builder plus a certain number of rounds. In each +round, some modifications are made to the builder, e.g. add a device, remove +a device, change a device's weight. Then, the builder is repeatedly +rebalanced until it settles down. Data about that round is printed, and the +next round begins. + +Scenarios are specified in JSON. Example scenario for a gradual device +addition:: + + { + "part_power": 12, + "replicas": 3, + "overload": 0.1, + "random_seed": 203488, + + "rounds": [ + [ + ["add", "r1z2-10.20.30.40:6000/sda", 8000], + ["add", "r1z2-10.20.30.40:6000/sdb", 8000], + ["add", "r1z2-10.20.30.40:6000/sdc", 8000], + ["add", "r1z2-10.20.30.40:6000/sdd", 8000], + + ["add", "r1z2-10.20.30.41:6000/sda", 8000], + ["add", "r1z2-10.20.30.41:6000/sdb", 8000], + ["add", "r1z2-10.20.30.41:6000/sdc", 8000], + ["add", "r1z2-10.20.30.41:6000/sdd", 8000], + + ["add", "r1z2-10.20.30.43:6000/sda", 8000], + ["add", "r1z2-10.20.30.43:6000/sdb", 8000], + ["add", "r1z2-10.20.30.43:6000/sdc", 8000], + ["add", "r1z2-10.20.30.43:6000/sdd", 8000], + + ["add", "r1z2-10.20.30.44:6000/sda", 8000], + ["add", "r1z2-10.20.30.44:6000/sdb", 8000], + ["add", "r1z2-10.20.30.44:6000/sdc", 8000] + ], [ + ["add", "r1z2-10.20.30.44:6000/sdd", 1000] + ], [ + ["set_weight", 15, 2000] + ], [ + ["remove", 3], + ["set_weight", 15, 3000] + ], [ + ["set_weight", 15, 4000] + ], [ + ["set_weight", 15, 5000] + ], [ + ["set_weight", 15, 6000] + ], [ + ["set_weight", 15, 7000] + ], [ + ["set_weight", 15, 8000] + ]] + } + +""" + +import argparse +import json +import sys + +from swift.common.ring import builder +from swift.common.ring.utils import parse_add_value + + +ARG_PARSER = argparse.ArgumentParser( + description='Put the ring builder through its paces') +ARG_PARSER.add_argument( + '--check', '-c', action='store_true', + help="Just check the scenario, don't execute it.") +ARG_PARSER.add_argument( + 'scenario_path', + help="Path to the scenario file") + + +def _parse_weight(round_index, command_index, weight_str): + try: + weight = float(weight_str) + except ValueError as err: + raise ValueError( + "Invalid weight %r (round %d, command %d): %s" + % (weight_str, round_index, command_index, err)) + if weight < 0: + raise ValueError( + "Negative weight (round %d, command %d)" + % (round_index, command_index)) + return weight + + +def _parse_add_command(round_index, command_index, command): + if len(command) != 3: + raise ValueError( + "Invalid add command (round %d, command %d): expected array of " + "length 3, but got %d" + % (round_index, command_index, len(command))) + + dev_str = command[1] + weight_str = command[2] + + try: + dev = parse_add_value(dev_str) + except ValueError as err: + raise ValueError( + "Invalid device specifier '%s' in add (round %d, command %d): %s" + % (dev_str, round_index, command_index, err)) + + dev['weight'] = _parse_weight(round_index, command_index, weight_str) + + if dev['region'] is None: + dev['region'] = 1 + + return ['add', dev] + + +def _parse_remove_command(round_index, command_index, command): + if len(command) != 2: + raise ValueError( + "Invalid remove command (round %d, command %d): expected array of " + "length 2, but got %d" + % (round_index, command_index, len(command))) + + dev_str = command[1] + + try: + dev_id = int(dev_str) + except ValueError as err: + raise ValueError( + "Invalid device ID '%s' in remove (round %d, command %d): %s" + % (dev_str, round_index, command_index, err)) + + return ['remove', dev_id] + + +def _parse_set_weight_command(round_index, command_index, command): + if len(command) != 3: + raise ValueError( + "Invalid remove command (round %d, command %d): expected array of " + "length 3, but got %d" + % (round_index, command_index, len(command))) + + dev_str = command[1] + weight_str = command[2] + + try: + dev_id = int(dev_str) + except ValueError as err: + raise ValueError( + "Invalid device ID '%s' in set_weight (round %d, command %d): %s" + % (dev_str, round_index, command_index, err)) + + weight = _parse_weight(round_index, command_index, weight_str) + return ['set_weight', dev_id, weight] + + +def parse_scenario(scenario_data): + """ + Takes a serialized scenario and turns it into a data structure suitable + for feeding to run_scenario(). + + :returns: scenario + :raises: ValueError on invalid scenario + """ + + parsed_scenario = {} + + try: + raw_scenario = json.loads(scenario_data) + except ValueError as err: + raise ValueError("Invalid JSON in scenario file: %s" % err) + + if not isinstance(raw_scenario, dict): + raise ValueError("Scenario must be a JSON object, not array or string") + + if 'part_power' not in raw_scenario: + raise ValueError("part_power missing") + try: + parsed_scenario['part_power'] = int(raw_scenario['part_power']) + except ValueError as err: + raise ValueError("part_power not an integer: %s" % err) + if not 1 <= parsed_scenario['part_power'] <= 32: + raise ValueError("part_power must be between 1 and 32, but was %d" + % raw_scenario['part_power']) + + if 'replicas' not in raw_scenario: + raise ValueError("replicas missing") + try: + parsed_scenario['replicas'] = float(raw_scenario['replicas']) + except ValueError as err: + raise ValueError("replicas not a float: %s" % err) + if parsed_scenario['replicas'] < 1: + raise ValueError("replicas must be at least 1, but is %f" + % parsed_scenario['replicas']) + + if 'overload' not in raw_scenario: + raise ValueError("overload missing") + try: + parsed_scenario['overload'] = float(raw_scenario['overload']) + except ValueError as err: + raise ValueError("overload not a float: %s" % err) + if parsed_scenario['overload'] < 0: + raise ValueError("overload must be non-negative, but is %f" + % parsed_scenario['overload']) + + if 'random_seed' not in raw_scenario: + raise ValueError("random_seed missing") + try: + parsed_scenario['random_seed'] = int(raw_scenario['random_seed']) + except ValueError as err: + raise ValueError("replicas not an integer: %s" % err) + + if 'rounds' not in raw_scenario: + raise ValueError("rounds missing") + if not isinstance(raw_scenario['rounds'], list): + raise ValueError("rounds must be an array") + + parser_for_command = {'add': _parse_add_command, + 'remove': _parse_remove_command, + 'set_weight': _parse_set_weight_command} + + parsed_scenario['rounds'] = [] + for round_index, raw_round in enumerate(raw_scenario['rounds']): + if not isinstance(raw_round, list): + raise ValueError("round %d not an array" % round_index) + + parsed_round = [] + for command_index, command in enumerate(raw_round): + if command[0] not in parser_for_command: + raise ValueError( + "Unknown command (round %d, command %d): " + "'%s' should be one of %s" % + (round_index, command_index, command[0], + parser_for_command.keys())) + parsed_round.append( + parser_for_command[command[0]]( + round_index, command_index, command)) + parsed_scenario['rounds'].append(parsed_round) + return parsed_scenario + + +def run_scenario(scenario): + """ + Takes a parsed scenario (like from parse_scenario()) and runs it. + """ + seed = scenario['random_seed'] + + rb = builder.RingBuilder(scenario['part_power'], scenario['replicas'], 1) + rb.set_overload(scenario['overload']) + for round_index, commands in enumerate(scenario['rounds']): + print "Round %d" % (round_index + 1) + + for command in commands: + if command[0] == 'add': + rb.add_dev(command[1]) + elif command[0] == 'remove': + rb.remove_dev(command[1]) + elif command[0] == 'set_weight': + rb.set_dev_weight(command[1], command[2]) + else: + raise ValueError("unknown command %r" % (command[0],)) + + rebalance_number = 1 + parts_moved, old_balance = rb.rebalance(seed=seed) + rb.pretend_min_part_hours_passed() + print "\tRebalance 1: moved %d parts, balance is %.6f" % ( + parts_moved, old_balance) + + while True: + rebalance_number += 1 + parts_moved, new_balance = rb.rebalance(seed=seed) + rb.pretend_min_part_hours_passed() + print "\tRebalance %d: moved %d parts, balance is %.6f" % ( + rebalance_number, parts_moved, new_balance) + if parts_moved == 0: + break + if abs(new_balance - old_balance) < 1 and not ( + old_balance == builder.MAX_BALANCE and + new_balance == builder.MAX_BALANCE): + break + old_balance = new_balance + + +def main(argv=None): + args = ARG_PARSER.parse_args(argv) + + try: + with open(args.scenario_path) as sfh: + scenario_data = sfh.read() + except OSError as err: + sys.stderr.write("Error opening scenario %s: %s\n" % + (args.scenario_path, err)) + return 1 + + try: + scenario = parse_scenario(scenario_data) + except ValueError as err: + sys.stderr.write("Invalid scenario %s: %s\n" % + (args.scenario_path, err)) + return 1 + + if not args.check: + run_scenario(scenario) + return 0 diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index f5c8c14792..0c5d8b7c8f 100755 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -34,7 +34,7 @@ from swift.common.ring.utils import validate_args, \ validate_and_normalize_ip, build_dev_from_opts, \ parse_builder_ring_filename_args, parse_search_value, \ parse_search_values_from_opts, parse_change_values_from_opts, \ - dispersion_report, validate_device_name + dispersion_report, parse_add_value from swift.common.utils import lock_parent_directory MAJOR_VERSION = 1 @@ -129,37 +129,6 @@ def _parse_list_parts_values(argvish): exit(EXIT_ERROR) -def _parse_address(rest): - if rest.startswith('['): - # remove first [] for ip - rest = rest.replace('[', '', 1).replace(']', '', 1) - - pos = 0 - while (pos < len(rest) and - not (rest[pos] == 'R' or rest[pos] == '/')): - pos += 1 - address = rest[:pos] - rest = rest[pos:] - - port_start = address.rfind(':') - if port_start == -1: - raise ValueError('Invalid port in add value') - - ip = address[:port_start] - try: - port = int(address[(port_start + 1):]) - except (TypeError, ValueError): - raise ValueError( - 'Invalid port %s in add value' % address[port_start:]) - - # if this is an ipv6 address then we want to convert it - # to all lowercase and use its fully expanded representation - # to make searches easier - ip = validate_and_normalize_ip(ip) - - return (ip, port, rest) - - def _parse_add_values(argvish): """ Parse devices to add as specified on the command line. @@ -183,62 +152,25 @@ def _parse_add_values(argvish): islice(args, 1, len(args), 2)) for devstr, weightstr in devs_and_weights: - region = 1 - rest = devstr - if devstr.startswith('r'): - i = 1 - while i < len(devstr) and devstr[i].isdigit(): - i += 1 - region = int(devstr[1:i]) - rest = devstr[i:] - else: + dev_dict = parse_add_value(devstr) + + if dev_dict['region'] is None: stderr.write('WARNING: No region specified for %s. ' 'Defaulting to region 1.\n' % devstr) + dev_dict['region'] = 1 - if not rest.startswith('z'): - raise ValueError('Invalid add value: %s' % devstr) - i = 1 - while i < len(rest) and rest[i].isdigit(): - i += 1 - zone = int(rest[1:i]) - rest = rest[i:] + if dev_dict['replication_ip'] is None: + dev_dict['replication_ip'] = dev_dict['ip'] - if not rest.startswith('-'): - raise ValueError('Invalid add value: %s' % devstr) - - ip, port, rest = _parse_address(rest[1:]) - - replication_ip = ip - replication_port = port - if rest.startswith('R'): - replication_ip, replication_port, rest = \ - _parse_address(rest[1:]) - if not rest.startswith('/'): - raise ValueError( - 'Invalid add value: %s' % devstr) - i = 1 - while i < len(rest) and rest[i] != '_': - i += 1 - device_name = rest[1:i] - if not validate_device_name(device_name): - raise ValueError('Invalid device name') - - rest = rest[i:] - - meta = '' - if rest.startswith('_'): - meta = rest[1:] + if dev_dict['replication_port'] is None: + dev_dict['replication_port'] = dev_dict['port'] weight = float(weightstr) - if weight < 0: raise ValueError('Invalid weight value: %s' % devstr) + dev_dict['weight'] = weight - parsed_devs.append({'region': region, 'zone': zone, 'ip': ip, - 'port': port, 'device': device_name, - 'replication_ip': replication_ip, - 'replication_port': replication_port, - 'weight': weight, 'meta': meta}) + parsed_devs.append(dev_dict) else: parsed_devs.append(build_dev_from_opts(opts)) diff --git a/swift/common/ring/utils.py b/swift/common/ring/utils.py index 7d7856ebfc..ad9ff3fddd 100644 --- a/swift/common/ring/utils.py +++ b/swift/common/ring/utils.py @@ -403,7 +403,7 @@ def parse_search_values_from_opts(opts): Convert optparse style options into a dictionary for searching. :param opts: optparse style options - :returns: a dictonary with search values to filter devices, + :returns: a dictionary with search values to filter devices, supported parameters are id, region, zone, ip, port, replication_ip, replication_port, device, weight, meta """ @@ -440,6 +440,100 @@ def parse_change_values_from_opts(opts): return change_values +def parse_add_value(add_value): + """ + Convert an add value, like 'r1z2-10.1.2.3:7878/sdf', to a dictionary. + + If the string does not start with 'r', then the value of 'region' in + the returned dictionary will be None. Callers should check for this and + set a reasonable default. This is done so callers can emit errors or + warnings if desired. + + Similarly, 'replication_ip' and 'replication_port' will be None if not + specified. + + :returns: dictionary with keys 'region', 'zone', 'ip', 'port', 'device', + 'replication_ip', 'replication_port', 'meta' + :raises: ValueError if add_value is malformed + """ + region = None + rest = add_value + if add_value.startswith('r'): + i = 1 + while i < len(add_value) and add_value[i].isdigit(): + i += 1 + region = int(add_value[1:i]) + rest = add_value[i:] + + if not rest.startswith('z'): + raise ValueError('Invalid add value: %s' % add_value) + i = 1 + while i < len(rest) and rest[i].isdigit(): + i += 1 + zone = int(rest[1:i]) + rest = rest[i:] + + if not rest.startswith('-'): + raise ValueError('Invalid add value: %s' % add_value) + + ip, port, rest = parse_address(rest[1:]) + + replication_ip = replication_port = None + if rest.startswith('R'): + replication_ip, replication_port, rest = \ + parse_address(rest[1:]) + if not rest.startswith('/'): + raise ValueError( + 'Invalid add value: %s' % add_value) + i = 1 + while i < len(rest) and rest[i] != '_': + i += 1 + device_name = rest[1:i] + if not validate_device_name(device_name): + raise ValueError('Invalid device name') + + rest = rest[i:] + + meta = '' + if rest.startswith('_'): + meta = rest[1:] + + return {'region': region, 'zone': zone, 'ip': ip, 'port': port, + 'device': device_name, 'replication_ip': replication_ip, + 'replication_port': replication_port, 'meta': meta} + + +def parse_address(rest): + if rest.startswith('['): + # remove first [] for ip + rest = rest.replace('[', '', 1).replace(']', '', 1) + + pos = 0 + while (pos < len(rest) and + not (rest[pos] == 'R' or rest[pos] == '/')): + pos += 1 + address = rest[:pos] + rest = rest[pos:] + + port_start = address.rfind(':') + if port_start == -1: + raise ValueError('Invalid port in add value') + + ip = address[:port_start] + try: + port = int(address[(port_start + 1):]) + except (TypeError, ValueError): + raise ValueError( + 'Invalid port %s in add value' % address[port_start:]) + + # if this is an ipv6 address then we want to convert it + # to all lowercase and use its fully expanded representation + # to make searches easier + ip = validate_and_normalize_ip(ip) + + return (ip, port, rest) + + def validate_args(argvish): """ Build OptionParse and validate it whether the format is new command-line diff --git a/test/unit/cli/test_ring_builder_analyzer.py b/test/unit/cli/test_ring_builder_analyzer.py new file mode 100644 index 0000000000..52ceb8e354 --- /dev/null +++ b/test/unit/cli/test_ring_builder_analyzer.py @@ -0,0 +1,227 @@ +#! /usr/bin/env python +# Copyright (c) 2015 Samuel Merritt +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import mock +import unittest +from StringIO import StringIO + +from swift.cli.ring_builder_analyzer import parse_scenario, run_scenario + + +class TestRunScenario(unittest.TestCase): + def test_it_runs(self): + scenario = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0, + 'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100], + ['add', 'z2-3.4.5.6:7/sda9', 200]], + [['set_weight', 0, 150]], + [['remove', 1]]]} + parsed = parse_scenario(json.dumps(scenario)) + + fake_stdout = StringIO() + with mock.patch('sys.stdout', fake_stdout): + run_scenario(parsed) + + # Just test that it produced some output as it ran; the fact that + # this doesn't crash and produces output that resembles something + # useful is good enough. + self.assertTrue('Rebalance' in fake_stdout.getvalue()) + + +class TestParseScenario(unittest.TestCase): + def test_good(self): + scenario = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0, + 'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100], + ['add', 'z2-3.4.5.6:7/sda9', 200]], + [['set_weight', 0, 150]], + [['remove', 1]]]} + parsed = parse_scenario(json.dumps(scenario)) + + self.assertEqual(parsed['replicas'], 3) + self.assertEqual(parsed['part_power'], 8) + self.assertEqual(parsed['random_seed'], 123) + self.assertEqual(parsed['overload'], 0) + self.assertEqual(parsed['rounds'], [ + [['add', {'device': 'sda8', + 'ip': '3.4.5.6', + 'meta': '', + 'port': 7, + 'region': 1, + 'replication_ip': None, + 'replication_port': None, + 'weight': 100.0, + 'zone': 2}], + ['add', {'device': u'sda9', + 'ip': u'3.4.5.6', + 'meta': '', + 'port': 7, + 'region': 1, + 'replication_ip': None, + 'replication_port': None, + 'weight': 200.0, + 'zone': 2}]], + [['set_weight', 0, 150.0]], + [['remove', 1]]]) + + # The rest of this test class is just a catalog of the myriad ways that + # the input can be malformed. + def test_invalid_json(self): + self.assertRaises(ValueError, parse_scenario, "{") + + def test_json_not_object(self): + self.assertRaises(ValueError, parse_scenario, "[]") + self.assertRaises(ValueError, parse_scenario, "\"stuff\"") + + def test_bad_replicas(self): + working_scenario = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0, + 'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]} + + busted = dict(working_scenario) + del busted['replicas'] + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + busted = dict(working_scenario, replicas='blahblah') + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + busted = dict(working_scenario, replicas=-1) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + def test_bad_part_power(self): + working_scenario = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0, + 'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]} + + busted = dict(working_scenario) + del busted['part_power'] + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + busted = dict(working_scenario, part_power='blahblah') + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + busted = dict(working_scenario, part_power=0) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + busted = dict(working_scenario, part_power=33) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + def test_bad_random_seed(self): + working_scenario = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0, + 'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]} + + busted = dict(working_scenario) + del busted['random_seed'] + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + busted = dict(working_scenario, random_seed='blahblah') + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + def test_bad_overload(self): + working_scenario = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0, + 'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]} + + busted = dict(working_scenario) + del busted['overload'] + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + busted = dict(working_scenario, overload='blahblah') + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + busted = dict(working_scenario, overload=-0.01) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + def test_bad_rounds(self): + base = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0} + + self.assertRaises(ValueError, parse_scenario, json.dumps(base)) + + busted = dict(base, rounds={}) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + busted = dict(base, rounds=[{}]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + busted = dict(base, rounds=[[['bork']]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + def test_bad_add(self): + base = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0} + + # no dev + busted = dict(base, rounds=[[['add']]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + # no weight + busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6000/d7']]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + # too many fields + busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6000/d7', 1, 2]]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + # can't parse + busted = dict(base, rounds=[[['add', 'not a good value', 100]]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + # negative weight + busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6000/d7', -1]]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + def test_bad_remove(self): + base = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0} + + # no dev + busted = dict(base, rounds=[[['remove']]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + # bad dev id + busted = dict(base, rounds=[[['remove', 'not an int']]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + # too many fields + busted = dict(base, rounds=[[['remove', 1, 2]]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + def test_bad_set_weight(self): + base = { + 'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0} + + # no dev + busted = dict(base, rounds=[[['set_weight']]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + # no weight + busted = dict(base, rounds=[[['set_weight', 0]]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + # bad dev id + busted = dict(base, rounds=[[['set_weight', 'not an int', 90]]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + # negative weight + busted = dict(base, rounds=[[['set_weight', 1, -1]]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) + + # bogus weight + busted = dict(base, rounds=[[['set_weight', 1, 'bogus']]]) + self.assertRaises(ValueError, parse_scenario, json.dumps(busted)) diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index 246f282f38..f3df11dc1f 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -180,14 +180,6 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): err = e self.assertEquals(err.code, 2) - def test_parse_address_old_format(self): - # Test old format - argv = "127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data" - ip, port, rest = ringbuilder._parse_address(argv) - self.assertEqual(ip, '127.0.0.1') - self.assertEqual(port, 6000) - self.assertEqual(rest, 'R127.0.0.1:6000/sda1_some meta data') - def test_parse_add_values_number_of_arguments(self): # Test Number of arguments abnormal argv = ["--region", "2", "test"] diff --git a/test/unit/common/ring/test_utils.py b/test/unit/common/ring/test_utils.py index 4d078e6c00..efd073fde5 100644 --- a/test/unit/common/ring/test_utils.py +++ b/test/unit/common/ring/test_utils.py @@ -26,7 +26,8 @@ from swift.common.ring.utils import (tiers_for_dev, build_tier_tree, parse_change_values_from_opts, validate_args, parse_args, parse_builder_ring_filename_args, - build_dev_from_opts, dispersion_report) + build_dev_from_opts, dispersion_report, + parse_address) class TestUtils(unittest.TestCase): @@ -694,6 +695,14 @@ class TestUtils(unittest.TestCase): self.assertEqual(report['worst_tier'], 'r1z0-127.0.0.1') self.assertEqual(report['max_dispersion'], 30.078125) + def test_parse_address_old_format(self): + # Test old format + argv = "127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data" + ip, port, rest = parse_address(argv) + self.assertEqual(ip, '127.0.0.1') + self.assertEqual(port, 6000) + self.assertEqual(rest, 'R127.0.0.1:6000/sda1_some meta data') + if __name__ == '__main__': unittest.main() From 2328983b7535d24d08942e62095b4346f2626566 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Wed, 24 Jun 2015 17:07:32 -0700 Subject: [PATCH 84/98] Stop moving partitions unnecessarily when overload is on. When overload was on and in use, the ring builder was unnecessarily moving partitions. It would converge on a good solution and settle down eventually, but it moved more partitions than necessary along the way. There are three partition gatherers used in the ring builder: dev-removed, dispersion, and weight, in that order. The dev-removed gatherer will pick up all partitions on removed devices. The dispersion gatherer picks up replicas of partitions that are suboptimally dispersed. The weight gatherer picks up partitions on devices which are overweight. The weight gatherer was not overload-aware, so it would pick up partitions that did not need to move. Consider a device that would normally have 100 partitions assigned, assume we set overload to 0.1 so that this device will hold up to 110 (10 extra) for the sake of dispersion, and assume the device actually has 104 partitions assigned to it. The correct behavior is to gather nothing from this device because it has fewer than the maximum. Prior to this commit, the weight gatherer would remove 4 partitions from this device; they would subsequently be reassigned by the overload-aware partition placer (_reassign_parts()). In a ring with multiple overloaded devices, the builder would pick up some partitions from each, shuffle them, and then assign them back to those same devices. Obviously, this just created extra replication work for no benefit. Now, the weight gatherer takes overload into account, and will no longer needlessly gather partitions. That's not all, though; this change worsened the behavior of a ring with more overload than necessary. Before, the ring would balance as best it could, using the minimal amount of overload. With the weight-gatherer change, the ring builder will stop gathering partitions once a device reaches its maximum-permissible assignment including overload. For example, imagine a 3-replica, 4-zone ring with overload=0.2 and weights: z1: 100 z2: 60 z3: 60 z4: 60 Since z1 has more than 1/3 of the weight, z2, z3, and z4 must take more than their fair share for the sake of dispersion. Now, turn up the weights some: z1: 100 z2: 80 z3: 80 z4: 80 Overload is no longer needed; this ring can balance. However, z2, z3, and z4 would end up keeping more than their fair share since (a) they already had extra due to earlier conditions, and (b) the weight gatherer won't pick up partitions from them since they're not overburdened once it takes overload into account. To fix this, we compute the minimum overload factor required for optimal dispersion and then use min(user-input-overload, minimum-overload) during rebalance. This way, we don't overload devices more than the user says, but if the user sets overload higher than necessary, we'll still give the best balance possible. Change-Id: If5666ba654ee25da54f9144f3b78840273a49627 --- swift/common/ring/builder.py | 206 +++++++++++++++++++++++-- test/unit/common/ring/test_builder.py | 213 ++++++++++++++++++++++++++ 2 files changed, 402 insertions(+), 17 deletions(-) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index d161bb5561..bf0c39dcd2 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -81,6 +81,7 @@ class RingBuilder(object): self.devs_changed = False self.version = 0 self.overload = 0.0 + self._effective_overload = None # _replica2part2dev maps from replica number to partition number to # device id. So, for a three replica, 2**23 ring, it's an array of @@ -380,6 +381,11 @@ class RingBuilder(object): if seed is not None: random.seed(seed) + self._effective_overload = min(self.overload, + self.get_required_overload()) + self.logger.debug("Using effective overload of %f", + self._effective_overload) + self._ring = None if self._last_part_moves_epoch is None: self.logger.debug("New builder; performing initial balance") @@ -401,7 +407,8 @@ class RingBuilder(object): while True: reassign_parts = self._gather_reassign_parts() changed_parts += len(reassign_parts) - self.logger.debug("Gathered %d parts", changed_parts) + self.logger.debug("Gathered %d parts thus far (%d this pass)", + changed_parts, len(reassign_parts)) self._reassign_parts(reassign_parts) self.logger.debug("Assigned %d parts", changed_parts) while self._remove_devs: @@ -602,6 +609,151 @@ class RingBuilder(object): balance = dev_balance return balance + def get_required_overload(self): + """ + Returns the minimum overload value required to make the ring maximally + dispersed. + """ + self.logger.debug("computing required overload") + tfd, sibling_tiers = self._compute_sibling_tiers() + max_allowed_replicas = self._build_max_replicas_by_tier() + + # We're computing a bunch of different things here, but iterating + # over all the devs once is more efficient than doing it a bunch of + # times. + all_tiers = set([()]) + tier_weight = defaultdict(float) + total_weight = 0.0 + tier2children = defaultdict(set) + for dev in self._iter_devs(): + dev_weight = dev['weight'] + total_weight += dev_weight + for tier in tfd[dev['id']]: + all_tiers.add(tier) + tier_weight[tier] += dev_weight + tier2children[tier[:-1]].add(tier) + tier_weight[()] = total_weight + + max_required_overload = 0.0 + for tier in all_tiers: + if tier not in tier2children: + continue + if tier_weight[tier] <= 0: + continue + # Example 1: Consider a 3-replica cluster with 2 regions. If one + # region has more than 2/3 the total weight, then (ignoring + # overload) some partitions will reside entirely in the big + # region. + # + # Example 2: Consider a 3-replica cluster with 3 regions. If any + # region has more than 1/3 the total weight, some partitions will + # not have replicas spread across all regions. + # + # Example 3: Consider a 3-replica cluster with 4 regions. If any + # region has more than 1/3 the total weight, some partitions will + # not have replicas spread across all regions. + # + # Example 4: Consider a 3-replica cluster with 100 regions. If + # any region has more than 1/3 the total weight, some partitions + # will not have replicas spread across all regions. The fact + # that there's 100 regions doesn't matter; if one region is big + # enough, it'll get multiple replicas of some partitions. + # + # Example 5: Consider a 5-replica cluster with 2 regions. If the + # bigger region has more than 3/5 the weight, some partitions + # will have more than 3 replicas in the big region. (Optimal + # dispersion is 3 replicas in some region and 2 in the other; 4 + # and 1 is not good enough.) + # + # In general, what we do is split this tier's child tiers + # into two groups: "big" and "small". "Big" child tiers are + # ones whose weight exceeds their fraction of the replicas. + # For example, given 3 replicas and 4 zones of total weight + # 12,000, a zone with weight greater than 1/3 of 12,000 (= + # 4,000) would be considered big. "Small" child tiers are + # those which are not big. + # + # Once we've divided the child tiers into big and small, we + # figure out how many replicas should wind up on the small + # child tiers (all together), and then compute the needed + # overload factor to boost their weights so they can take + # that many replicas. + child_tiers = tier2children[tier] + tier_replicas = max_allowed_replicas[tier] + big_child_count = small_child_count = 0 + big_child_weight = small_child_weight = 0.0 + + max_child_replicas = math.ceil(tier_replicas / len(child_tiers)) + bigness_threshold = ( + max_child_replicas / tier_replicas * tier_weight[tier]) + + for child_tier in tier2children[tier]: + child_weight = tier_weight[child_tier] + if child_weight == 0: + # If it's got 0 weight, it's not taking any + # partitions at all, so it doesn't count. + continue + if child_weight >= bigness_threshold: + big_child_count += 1 + big_child_weight += child_weight + else: + small_child_count += 1 + small_child_weight += child_weight + + if big_child_count == 0 or small_child_count == 0: + # We only need overload if we have both big and small + # tiers. Usually, all small tiers means things can + # balance, while all big tiers means that we have + # exactly one child tier (e.g. a cluster with only one + # region). + continue + + # We assume each big child tier takes the maximum possible + # number of replicas for optimal dispersion, but no more. + # That leaves the remainder for the small child tiers. + big_child_replicas = max_child_replicas * big_child_count + small_child_replicas = tier_replicas - big_child_replicas + + if small_child_replicas == 0: + # If we're not putting any replicas on small child + # tiers, then there's no need for overload. This also + # avoids a division-by-zero below. + continue + + # We want the overloaded small tiers to take up their fair + # share of the replicas. We can express this as follows: + # + # Let Ws be the sum of the weights of the small child tiers. + # + # Let Wb be the sum of the weights of the big child tiers. + # + # Let Rt be the number of replicas at the current tier. + # + # Let Rs be the desired number of replicas for the small + # child tiers. + # + # Let L be the overload. + # + # Then, we have the following: + # + # (L * Ws) / (Wb + L * Ws) = Rs / Rt + # + # Solving for L, we get: + # + # L = 1 / (Ws / Wb * (Rt / Rs - 1)) + required_overload = 1.0 / ( + (small_child_weight / big_child_weight) + * (tier_replicas / small_child_replicas - 1)) - 1 + + if required_overload > max_required_overload: + self.logger.debug("Required overload for %r is %f [NEW HIGH]", + tier, required_overload) + max_required_overload = required_overload + else: + self.logger.debug("Required overload for %r is %f", + tier, required_overload) + return max_required_overload + def pretend_min_part_hours_passed(self): """ Override min_part_hours by marking all partitions as having been moved @@ -643,6 +795,8 @@ class RingBuilder(object): used to sort the devices according to "most wanted" during rebalancing to best distribute partitions. A negative parts_wanted indicates the device is "overweight" and wishes to give partitions away if possible. + + Note: parts_wanted does *not* consider overload. """ weight_of_one_part = self.weight_of_one_part() @@ -767,29 +921,30 @@ class RingBuilder(object): Returns a dict of (tier: available parts in other tiers) for all tiers in the ring. - Devices that have too much partitions (negative parts_wanted) are - ignored, otherwise the sum of all parts_wanted is 0 +/- rounding - errors. + Devices that have too many partitions (negative parts_wanted plus + overload) are ignored, otherwise the sum of all returned values is 0 + +/- rounding errors. + + This takes overload into account. """ wanted_parts_for_tier = {} for dev in self._iter_devs(): - pw = (max(0, dev['parts_wanted']) + - max(int(math.ceil( - (dev['parts_wanted'] + dev['parts']) * self.overload)), - 0)) + extra_overload_parts = self._n_overload_parts(dev) + pw = max(dev['parts_wanted'] + extra_overload_parts, 0) for tier in tiers_for_dev(dev): wanted_parts_for_tier.setdefault(tier, 0) wanted_parts_for_tier[tier] += pw return wanted_parts_for_tier - def _gather_reassign_parts(self): + def _compute_sibling_tiers(self): """ - Returns a list of (partition, replicas) pairs to be reassigned by - gathering from removed devices, insufficiently-far-apart replicas, and - overweight drives. + Returns a 2-tuple; the first value is a dictionary mapping each + device's id to its tiers, and the second is a dictionary mapping + a-tier: list-of-sibling-tiers. """ # inline memoization of tiers_for_dev() results (profiling reveals it - # as a hot-spot). + # as a hot-spot). We also return it so callers don't have to + # rebuild it. tfd = {} tiers_by_len = defaultdict(set) @@ -807,6 +962,15 @@ class RingBuilder(object): for i, tier in enumerate(tiers): sibling_tiers[tier] = [t for t in (tiers[:i] + tiers[(i + 1):]) if t[:-1] == tier[:-1]] + return (tfd, sibling_tiers) + + def _gather_reassign_parts(self): + """ + Returns a list of (partition, replicas) pairs to be reassigned by + gathering from removed devices, insufficiently-far-apart replicas, and + overweight drives. + """ + tfd, sibling_tiers = self._compute_sibling_tiers() # First we gather partitions from removed devices. Since removed # devices usually indicate device failures, we have no choice but to @@ -917,6 +1081,7 @@ class RingBuilder(object): start += random.randint(0, self.parts / 2) # GRAH PEP8!!! self._last_part_gather_start = start + for replica, part2dev in enumerate(self._replica2part2dev): # If we've got a partial replica, start may be out of # range. Scale it down so that we get a similar movement @@ -930,7 +1095,8 @@ class RingBuilder(object): if part in removed_dev_parts or part in spread_out_parts: continue dev = self.devs[part2dev[part]] - if dev['parts_wanted'] < 0: + fudge = self._n_overload_parts(dev) + if dev['parts_wanted'] + fudge < 0: self._last_part_moves[part] = 0 dev['parts_wanted'] += 1 dev['parts'] -= 1 @@ -953,6 +1119,14 @@ class RingBuilder(object): random.shuffle(reassign_parts_list) return reassign_parts_list + def _n_overload_parts(self, dev): + """ + The number of extra partitions a device can take due to overload. + """ + return max(int(math.ceil( + (dev['parts_wanted'] + dev['parts']) + * self._effective_overload)), 0) + def _reassign_parts(self, reassign_parts): """ For an existing ring data set, partitions are reassigned similarly to @@ -992,9 +1166,7 @@ class RingBuilder(object): # with partitions to shed, which is any time a device is being # removed, which is a pretty frequent operation. wanted = max(dev['parts_wanted'], 0) - fudge = max(int(math.ceil( - (dev['parts_wanted'] + dev['parts']) * self.overload)), - 0) + fudge = self._n_overload_parts(dev) for tier in tiers: fudge_available_in_tier[tier] += (wanted + fudge) parts_available_in_tier[tier] += wanted diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index 769937b6e6..03ecb5167a 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -1071,6 +1071,75 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(part_counts[1], 256) self.assertEqual(part_counts[2], 256) + def test_unoverload(self): + # Start off needing overload to balance, then add capacity until we + # don't need overload any more and see that things still balance. + # Overload doesn't prevent optimal balancing. + rb = ring.RingBuilder(8, 3, 1) + rb.set_overload(0.125) + rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 1, 'region': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) + rb.add_dev({'id': 2, 'region': 0, 'region': 0, 'zone': 0, 'weight': 2, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) + rb.rebalance(seed=12345) + + # sanity check: our overload is big enough to balance things + part_counts = self._partition_counts(rb) + self.assertEqual(part_counts[0], 216) + self.assertEqual(part_counts[1], 216) + self.assertEqual(part_counts[2], 336) + + # Add some weight: balance improves + rb.set_dev_weight(0, 1.5) + rb.set_dev_weight(1, 1.5) + rb.pretend_min_part_hours_passed() + rb.rebalance(seed=12345) + + part_counts = self._partition_counts(rb) + self.assertEqual(part_counts[0], 236) + self.assertEqual(part_counts[1], 236) + self.assertEqual(part_counts[2], 296) + + # Even out the weights: balance becomes perfect + rb.set_dev_weight(0, 2) + rb.set_dev_weight(1, 2) + + rb.pretend_min_part_hours_passed() + rb.rebalance(seed=12345) + + part_counts = self._partition_counts(rb) + self.assertEqual(part_counts[0], 256) + self.assertEqual(part_counts[1], 256) + self.assertEqual(part_counts[2], 256) + + # Add some new devices: balance stays optimal + rb.add_dev({'id': 3, 'region': 0, 'region': 0, 'zone': 0, + 'weight': 2.0 / 3, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) + rb.add_dev({'id': 4, 'region': 0, 'region': 0, 'zone': 0, + 'weight': 2.0 / 3, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'}) + rb.add_dev({'id': 5, 'region': 0, 'region': 0, 'zone': 0, + 'weight': 2.0 / 3, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'}) + + # we're moving more than 1/3 of the replicas but fewer than 2/3, so + # we have to do this twice + rb.pretend_min_part_hours_passed() + rb.rebalance(seed=12345) + rb.pretend_min_part_hours_passed() + rb.rebalance(seed=12345) + + part_counts = self._partition_counts(rb) + self.assertEqual(part_counts[0], 192) + self.assertEqual(part_counts[1], 192) + self.assertEqual(part_counts[2], 192) + self.assertEqual(part_counts[3], 64) + self.assertEqual(part_counts[4], 64) + self.assertEqual(part_counts[5], 64) + def test_overload_keeps_balanceable_things_balanced_initially(self): rb = ring.RingBuilder(8, 3, 1) rb.add_dev({'id': 0, 'region': 0, 'region': 0, 'zone': 0, 'weight': 8, @@ -1595,5 +1664,149 @@ class TestRingBuilder(unittest.TestCase): }) +class TestGetRequiredOverload(unittest.TestCase): + def assertApproximately(self, a, b, error=1e-6): + self.assertTrue(abs(a - b) < error, + "%f and %f differ by more than %f" % (a, b, error)) + + def test_none_needed(self): + rb = ring.RingBuilder(8, 3, 1) + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) + rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) + rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) + + # 4 equal-weight devs and 3 replicas: this can be balanced without + # resorting to overload at all + self.assertApproximately(rb.get_required_overload(), 0) + + # 3 equal-weight devs and 3 replicas: this can also be balanced + rb.remove_dev(3) + self.assertApproximately(rb.get_required_overload(), 0) + + def test_small_zone(self): + rb = ring.RingBuilder(8, 3, 1) + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 4, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 4, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 4, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) + rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 4, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) + + rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 4, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'}) + rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 3, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'}) + + # Zone 2 has 7/8 of the capacity of the other two zones, so an + # overload of 1/7 will allow things to balance out. + self.assertApproximately(rb.get_required_overload(), 1.0 / 7) + + def test_big_zone(self): + rb = ring.RingBuilder(8, 3, 1) + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100, + 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100, + 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 60, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 60, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 60, + 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 60, + 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 60, + 'ip': '127.0.0.3', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 60, + 'ip': '127.0.0.3', 'port': 10000, 'device': 'sdb'}) + + # Zone 1 has weight 200, while zones 2, 3, and 4 together have only + # 360. The small zones would need to go from 360 to 400 to balance + # out zone 1, for an overload of 40/360 = 1/9. + self.assertApproximately(rb.get_required_overload(), 1.0 / 9) + + def test_enormous_zone(self): + rb = ring.RingBuilder(8, 3, 1) + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1000, + 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1000, + 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 60, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 60, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 60, + 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 60, + 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 60, + 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 60, + 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) + + # Zone 1 has weight 2000, while zones 2, 3, and 4 together have only + # 360. The small zones would need to go from 360 to 4000 to balance + # out zone 1, for an overload of 3640/360. + self.assertApproximately(rb.get_required_overload(), 3640.0 / 360) + + def test_two_big_two_small(self): + rb = ring.RingBuilder(8, 3, 1) + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100, + 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100, + 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 100, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 100, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 4, 'region': 0, 'zone': 2, 'weight': 45, + 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 5, 'region': 0, 'zone': 2, 'weight': 45, + 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 6, 'region': 0, 'zone': 3, 'weight': 35, + 'ip': '127.0.0.2', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 7, 'region': 0, 'zone': 3, 'weight': 35, + 'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'}) + + # Zones 1 and 2 each have weight 200, while zones 3 and 4 together + # have only 160. The small zones would need to go from 160 to 200 to + # balance out the big zones, for an overload of 40/160 = 1/4. + self.assertApproximately(rb.get_required_overload(), 1.0 / 4) + + def test_multiple_replicas_each(self): + rb = ring.RingBuilder(8, 7, 1) + rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 100, + 'ip': '127.0.0.0', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 100, + 'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb'}) + + rb.add_dev({'id': 2, 'region': 0, 'zone': 1, 'weight': 70, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'}) + rb.add_dev({'id': 3, 'region': 0, 'zone': 1, 'weight': 70, + 'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'}) + + # Zone 0 has more than 4/7 of the weight, so we'll need to bring + # zone 1 up to a total of 150 so it can take 3 replicas, so the + # overload should be 10/140. + self.assertApproximately(rb.get_required_overload(), 10.0 / 140) + + if __name__ == '__main__': unittest.main() From 10f367224d5f30c3398169ffe3087f225f3343fc Mon Sep 17 00:00:00 2001 From: janonymous Date: Fri, 3 Jul 2015 00:05:36 +0530 Subject: [PATCH 85/98] Fix Python 3 issues * Replace long with six.integer_type six.integer_type :In Python 2, this is long and int, and in Python 3, just int. 'long' is an undefined variable in py3. Change-Id: Ibe4c5d0d81fe883f4fe33be93a68d814a228ad28 --- swift/common/splice.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/swift/common/splice.py b/swift/common/splice.py index f27ba6cd55..7bc279c5dc 100644 --- a/swift/common/splice.py +++ b/swift/common/splice.py @@ -19,7 +19,7 @@ Bindings to the `tee` and `splice` system calls import os import operator - +import six import ctypes import ctypes.util @@ -85,7 +85,7 @@ class Tee(object): if not self.available: raise EnvironmentError('tee not available') - if not isinstance(flags, (int, long)): + if not isinstance(flags, six.integer_types): c_flags = reduce(operator.or_, flags, 0) else: c_flags = flags @@ -176,7 +176,7 @@ class Splice(object): if not self.available: raise EnvironmentError('splice not available') - if not isinstance(flags, (int, long)): + if not isinstance(flags, six.integer_types): c_flags = reduce(operator.or_, flags, 0) else: c_flags = flags From 8eca02d3a1db1e0ed68b15ed41bc62e6c89870c7 Mon Sep 17 00:00:00 2001 From: janonymous Date: Fri, 3 Jul 2015 00:45:58 +0530 Subject: [PATCH 86/98] Replace xrange() with six.moves.range() xrange is no longer available in py3. replacing xrange with range from six.moves Change-Id: Ib863c316a0724bd9c4f53c2e5a8d1bcd42c4dc5a --- bin/swift-dispersion-populate | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/swift-dispersion-populate b/bin/swift-dispersion-populate index dd736aabd2..2484592201 100755 --- a/bin/swift-dispersion-populate +++ b/bin/swift-dispersion-populate @@ -20,7 +20,7 @@ from cStringIO import StringIO from optparse import OptionParser from sys import exit, stdout from time import time - +from six.moves import range from eventlet import GreenPool, patcher, sleep from eventlet.pools import Pool @@ -162,7 +162,7 @@ Usage: %%prog [options] [conf_file] if container_populate: container_ring = Ring(swift_dir, ring_name='container') parts_left = dict((x, x) - for x in xrange(container_ring.partition_count)) + for x in range(container_ring.partition_count)) if options.no_overlap: with connpool.item() as conn: @@ -214,7 +214,7 @@ Usage: %%prog [options] [conf_file] container = 'dispersion_objects_%d' % policy.idx put_container(connpool, container, None, headers) object_ring = Ring(swift_dir, ring_name=policy.ring_name) - parts_left = dict((x, x) for x in xrange(object_ring.partition_count)) + parts_left = dict((x, x) for x in range(object_ring.partition_count)) if options.no_overlap: with connpool.item() as conn: From cd20961abd20f01a14554c14d4f61a7f0d8deb88 Mon Sep 17 00:00:00 2001 From: janonymous Date: Sun, 5 Jul 2015 10:59:50 +0530 Subject: [PATCH 87/98] Replace dict.iteritems() with dict.items() in bin directory of swift. The iteritems() of Python 2 dictionaries has been renamed to items() on Python 3. Change-Id: I4bdc064c90bab56cd60f2dca2a5a78426ffbb31c --- bin/swift-dispersion-report | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/swift-dispersion-report b/bin/swift-dispersion-report index 2a1b0c1d48..b08d02717f 100755 --- a/bin/swift-dispersion-report +++ b/bin/swift-dispersion-report @@ -149,7 +149,7 @@ def container_dispersion_report(coropool, connpool, account, container_ring, if containers_listed - distinct_partitions: print 'There were %d overlapping partitions' % ( containers_listed - distinct_partitions) - for missing_copies, num_parts in container_copies_missing.iteritems(): + for missing_copies, num_parts in container_copies_missing.items(): print missing_string(num_parts, missing_copies, container_ring.replica_count) print '%.02f%% of container copies found (%d of %d)' % ( @@ -164,7 +164,7 @@ def container_dispersion_report(coropool, connpool, account, container_ring, 'pct_found': value, 'copies_found': copies_found, 'copies_expected': copies_expected} - for missing_copies, num_parts in container_copies_missing.iteritems(): + for missing_copies, num_parts in container_copies_missing.items(): results['missing_%d' % (missing_copies)] = num_parts return results @@ -260,7 +260,7 @@ def object_dispersion_report(coropool, connpool, account, object_ring, print 'There were %d overlapping partitions' % ( objects_listed - distinct_partitions) - for missing_copies, num_parts in object_copies_missing.iteritems(): + for missing_copies, num_parts in object_copies_missing.items(): print missing_string(num_parts, missing_copies, object_ring.replica_count) @@ -277,7 +277,7 @@ def object_dispersion_report(coropool, connpool, account, object_ring, 'copies_found': copies_found, 'copies_expected': copies_expected} - for missing_copies, num_parts in object_copies_missing.iteritems(): + for missing_copies, num_parts in object_copies_missing.items(): results['missing_%d' % (missing_copies,)] = num_parts return results From bd3717df65f57da9e7f06292ba349442d17fc939 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Thu, 2 Jul 2015 09:54:30 -0700 Subject: [PATCH 88/98] update AUTHORS file Change-Id: I509ceeb522bd83c407beb40c22c2924a1d92b8ff --- .mailmap | 5 +++++ AUTHORS | 13 +++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index 172c761c06..4a6368f24a 100644 --- a/.mailmap +++ b/.mailmap @@ -73,3 +73,8 @@ Eohyung Lee Harshit Chitalia Richard Hawkins Sarvesh Ranjan +Minwoo Bae Minwoo B +Jaivish Kothari +Michael Matur +Kazuhiro Miyahara +Alexandra Settle diff --git a/AUTHORS b/AUTHORS index ce680dae15..ebde31a39a 100644 --- a/AUTHORS +++ b/AUTHORS @@ -29,6 +29,7 @@ Mehdi Abaakouk (mehdi.abaakouk@enovance.com) Jesse Andrews (anotherjesse@gmail.com) Joe Arnold (joe@swiftstack.com) Ionuț Arțăriși (iartarisi@suse.cz) +Minwoo Bae (minwoob@us.ibm.com) Bob Ball (bob.ball@citrix.com) Christian Berendt (berendt@b1-systems.de) Luis de Bethencourt (luis@debethencourt.com) @@ -42,9 +43,11 @@ Pádraig Brady (pbrady@redhat.com) Lorcan Browne (lorcan.browne@hp.com) Russell Bryant (rbryant@redhat.com) Jay S. Bryant (jsbryant@us.ibm.com) +Tim Burke (tim.burke@gmail.com) Brian D. Burns (iosctr@gmail.com) Devin Carlen (devin.carlen@gmail.com) Thierry Carrez (thierry@openstack.org) +Emmanuel Cazenave (contact@emcaz.fr) Mahati Chamarthy (mahati.chamarthy@gmail.com) Zap Chang (zapchang@gmail.com) François Charlier (francois.charlier@enovance.com) @@ -88,6 +91,7 @@ Dan Hersam (dan.hersam@hp.com) Derek Higgins (derekh@redhat.com) Alex Holden (alex@alexjonasholden.com) Edward Hope-Morley (opentastic@gmail.com) +Joanna H. Huang (joanna.huitzu.huang@gmail.com) Kun Huang (gareth@unitedstack.com) Matthieu Huin (mhu@enovance.com) Hodong Hwang (hodong.hwang@kt.com) @@ -111,6 +115,7 @@ Nathan Kinder (nkinder@redhat.com) Eugene Kirpichov (ekirpichov@gmail.com) Leah Klearman (lklrmn@gmail.com) Martin Kletzander (mkletzan@redhat.com) +Jaivish Kothari (jaivish.kothari@nectechnologies.in) Steve Kowalik (steven@wedontsleep.org) Sergey Kraynev (skraynev@mirantis.com) Sushil Kumar (sushil.kumar2@globallogic.com) @@ -135,6 +140,7 @@ Steve Martinelli (stevemar@ca.ibm.com) Juan J. Martinez (juan@memset.com) Marcelo Martins (btorch@gmail.com) Dolph Mathews (dolph.mathews@gmail.com) +Michael Matur (michael.matur@gmail.com) Donagh McCabe (donagh.mccabe@hp.com) Andy McCrae (andy.mccrae@gmail.com) Paul McMillan (paul.mcmillan@nebula.com) @@ -142,6 +148,7 @@ Ewan Mellor (ewan.mellor@citrix.com) Samuel Merritt (sam@swiftstack.com) Stephen Milton (milton@isomedia.com) Jola Mirecka (jola.mirecka@hp.com) +Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp) Daisuke Morita (morita.daisuke@lab.ntt.co.jp) Dirk Mueller (dirk@dmllr.de) Russ Nelson (russ@crynwr.com) @@ -161,6 +168,7 @@ Sascha Peilicke (saschpe@gmx.de) Constantine Peresypkin (constantine.peresypk@rackspace.com) Dieter Plaetinck (dieter@vimeo.com) Dan Prince (dprince@redhat.com) +Sarvesh Ranjan (saranjan@cisco.com) Felipe Reyes (freyes@tty.cl) Janie Richling (jrichli@us.ibm.com) Matt Riedemann (mriedem@us.ibm.com) @@ -171,9 +179,9 @@ Aaron Rosen (arosen@nicira.com) Brent Roskos (broskos@internap.com) Shilla Saebi (shilla.saebi@gmail.com) Cristian A Sanchez (cristian.a.sanchez@intel.com) -Sarvesh Ranjan (saranjan@cisco.com) Christian Schwede (cschwede@redhat.com) Mark Seger (Mark.Seger@hp.com) +Alexandra Settle (alexandra.settle@rackspace.com) Andrew Clay Shafer (acs@parvuscaptus.com) Mitsuhiro SHIGEMATSU (shigematsu.mitsuhiro@lab.ntt.co.jp) Dhriti Shikhar (dhrish20@gmail.com) @@ -181,6 +189,7 @@ Chuck Short (chuck.short@canonical.com) Michael Shuler (mshuler@gmail.com) David Moreau Simard (dmsimard@iweb.com) Scott Simpson (sasimpson@gmail.com) +Pradeep Kumar Singh (pradeep.singh@nectechnologies.in) Liu Siqi (meizu647@gmail.com) Adrian Smith (adrian_f_smith@dell.com) Jon Snitow (otherjon@swiftstack.com) @@ -188,6 +197,7 @@ TheSriram (sriram@klusterkloud.com) Jeremy Stanley (fungi@yuggoth.org) Mauro Stettler (mauro.stettler@gmail.com) Tobias Stevenson (tstevenson@vbridges.com) +Victor Stinner (vstinner@redhat.com) Pearl Yajing Tan (pearl.y.tan@seagate.com) Yuriy Taraday (yorik.sar@gmail.com) Monty Taylor (mordred@inaugust.com) @@ -223,4 +233,3 @@ Hua Zhang (zhuadl@cn.ibm.com) Jian Zhang (jian.zhang@intel.com) Ning Zhang (ning@zmanda.com) Yuan Zhou (yuan.zhou@intel.com) -Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp) From 12473104f005b76ebe4b6dbe118a5ac37518eb52 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Tue, 7 Jul 2015 18:02:53 -0700 Subject: [PATCH 89/98] Cleanup error messages in ECDiskFileWriter.commit I think the error messages make better sense and it's more obvious what the error handling code was trying to bring to the surface for operators. If you run this test with nosetests --verbose --nocapture test_diskfile.py:TestECDiskFile.test_commit_fsync_dir_raises_DiskFileErrors ... you can review most of the potential log output Change-Id: I40c9d77f44e087ee61d9642e924b0a4039d6ca9a --- swift/obj/diskfile.py | 61 ++++++++++++---------------------- test/unit/obj/test_diskfile.py | 6 ++-- 2 files changed, 24 insertions(+), 43 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 2e01137dc1..91ad8d9370 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -1784,52 +1784,33 @@ class ECDiskFileReader(DiskFileReader): class ECDiskFileWriter(DiskFileWriter): def _finalize_durable(self, durable_file_path): - exc = msg = None + exc = None try: - with open(durable_file_path, 'w') as _fp: - fsync(_fp.fileno()) - try: - fsync_dir(self._datadir) - except OSError as os_err: - msg = (_('%s \nProblem fsyncing dir' - 'after writing .durable: %s') % - (os_err, self._datadir)) - exc = DiskFileError(msg) - except IOError as io_err: - if io_err.errno in (errno.ENOSPC, errno.EDQUOT): - msg = (_('%s \nNo space left on device' - 'for updates to: %s') % - (io_err, self._datadir)) - exc = DiskFileNoSpace(msg) - else: - msg = (_('%s \nProblem fsyncing dir' - 'after writing .durable: %s') % - (io_err, self._datadir)) - exc = DiskFileError(msg) - if exc: - self.manager.logger.exception(msg) - raise exc + try: + with open(durable_file_path, 'w') as _fp: + fsync(_fp.fileno()) + fsync_dir(self._datadir) + except (OSError, IOError) as err: + if err.errno not in (errno.ENOSPC, errno.EDQUOT): + # re-raise to catch all handler + raise + msg = (_('No space left on device for %s (%s)') % + (durable_file_path, err)) + self.manager.logger.error(msg) + exc = DiskFileNoSpace(str(err)) + else: try: self.manager.hash_cleanup_listdir(self._datadir) except OSError as os_err: self.manager.logger.exception( - _('%s \nProblem cleaning up %s') % - (os_err, self._datadir)) - except OSError as os_err: - msg = (_('%s \nProblem fsyncing durable state file: %s') % - (os_err, durable_file_path)) - exc = DiskFileError(msg) - except IOError as io_err: - if io_err.errno in (errno.ENOSPC, errno.EDQUOT): - msg = (_('%s \nNo space left on device for %s') % - (io_err, durable_file_path)) - exc = DiskFileNoSpace(msg) - else: - msg = (_('%s \nProblem writing durable state file: %s') % - (io_err, durable_file_path)) - exc = DiskFileError(msg) - if exc: + _('Problem cleaning up %s (%s)') % + (self._datadir, os_err)) + except Exception as err: + msg = (_('Problem writing durable state file %s (%s)') % + (durable_file_path, err)) self.manager.logger.exception(msg) + exc = DiskFileError(msg) + if exc: raise exc def commit(self, timestamp): diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 67e01ecec8..a5724c2e2f 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -3270,8 +3270,7 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase): # Check IOErrors from fsync_dir() is handled for err_number, expected_exception in scenarios: - io_error = IOError() - io_error.errno = err_number + io_error = IOError(err_number, os.strerror(err_number)) mock_open = mock.MagicMock(side_effect=io_error) mock_io_error = mock.MagicMock(side_effect=io_error) df = self._simple_get_diskfile(account='a', container='c', @@ -3298,7 +3297,8 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase): rmtree(df._datadir) # Check OSError from fsync_dir() is handled - mock_os_error = mock.MagicMock(side_effect=OSError) + mock_os_error = mock.MagicMock( + side_effect=OSError(100, 'Some Error')) df = self._simple_get_diskfile(account='a', container='c', obj='o_fsync_dir_error') From e87a74695e2f55dbb0e01721de9d46e230e36008 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Sat, 6 Jun 2015 13:03:15 -0700 Subject: [PATCH 90/98] add domain_remap to /info Also added the value of default_reseller_prefix to /info. I did not add the reseller_prefixes value to /info because deployers may not want to expose all of the resellers that are available. Change-Id: I9ca2f002dc395913bb646390d6908dfb8f554df1 --- swift/common/middleware/domain_remap.py | 8 +++++-- .../common/middleware/test_domain_remap.py | 22 +++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/swift/common/middleware/domain_remap.py b/swift/common/middleware/domain_remap.py index 052f7728df..ee74b4fd83 100644 --- a/swift/common/middleware/domain_remap.py +++ b/swift/common/middleware/domain_remap.py @@ -51,6 +51,7 @@ sync destinations. """ from swift.common.swob import Request, HTTPBadRequest +from swift.common.utils import list_from_csv, register_swift_info class DomainRemapMiddleware(object): @@ -71,8 +72,7 @@ class DomainRemapMiddleware(object): self.storage_domain = '.' + self.storage_domain self.path_root = conf.get('path_root', 'v1').strip('/') prefixes = conf.get('reseller_prefixes', 'AUTH') - self.reseller_prefixes = [x.strip() for x in prefixes.split(',') - if x.strip()] + self.reseller_prefixes = list_from_csv(prefixes) self.reseller_prefixes_lower = [x.lower() for x in self.reseller_prefixes] self.default_reseller_prefix = conf.get('default_reseller_prefix') @@ -136,6 +136,10 @@ def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) + register_swift_info( + 'domain_remap', + default_reseller_prefix=conf.get('default_reseller_prefix')) + def domain_filter(app): return DomainRemapMiddleware(app, conf) return domain_filter diff --git a/test/unit/common/middleware/test_domain_remap.py b/test/unit/common/middleware/test_domain_remap.py index b14dfbcb2e..225e947a76 100644 --- a/test/unit/common/middleware/test_domain_remap.py +++ b/test/unit/common/middleware/test_domain_remap.py @@ -17,6 +17,7 @@ import unittest from swift.common.swob import Request from swift.common.middleware import domain_remap +from swift.common import utils class FakeApp(object): @@ -155,5 +156,26 @@ class TestDomainRemap(unittest.TestCase): self.assertEquals(resp, '/v1/AUTH_uuid/test') +class TestSwiftInfo(unittest.TestCase): + def setUp(self): + utils._swift_info = {} + utils._swift_admin_info = {} + + def test_registered_defaults(self): + domain_remap.filter_factory({}) + swift_info = utils.get_swift_info() + self.assertTrue('domain_remap' in swift_info) + self.assertTrue( + swift_info['domain_remap'].get('default_reseller_prefix') is None) + + def test_registered_nondefaults(self): + domain_remap.filter_factory({'default_reseller_prefix': 'cupcake'}) + swift_info = utils.get_swift_info() + self.assertTrue('domain_remap' in swift_info) + self.assertEquals( + swift_info['domain_remap'].get('default_reseller_prefix'), + 'cupcake') + + if __name__ == '__main__': unittest.main() From 125238612f58481316db68d7087252bb7729f447 Mon Sep 17 00:00:00 2001 From: Janie Richling Date: Sat, 4 Jul 2015 17:08:32 -0500 Subject: [PATCH 91/98] Add CORS unit tests to base In earlier versions of swift when a request was made with an existing origin, but without any CORS settings in the container, it was possible to get an unhandled exception due to a method call on the "None" return of cors.get('allow_origin', ''). Unit tests have been added to assert that this problem cannot go undetected again. Change-Id: Ia74896dabe1cf5a307c551b15a43ab1fd789c213 Fixes: bug 1468782 --- swift/proxy/controllers/base.py | 3 ++- test/unit/proxy/controllers/test_base.py | 33 +++++++++++++++++++++++- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 953a85af58..89f3e8e4f2 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -1617,7 +1617,8 @@ class Controller(object): list_from_csv(req.headers['Access-Control-Request-Headers'])) # Populate the response with the CORS preflight headers - if cors.get('allow_origin', '').strip() == '*': + if cors.get('allow_origin') and \ + cors.get('allow_origin').strip() == '*': headers['access-control-allow-origin'] = '*' else: headers['access-control-allow-origin'] = req_origin_value diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 30c213e0b4..8df8b37bf2 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -128,7 +128,7 @@ class FakeApp(object): reason = RESPONSE_REASONS[response.status_int][0] start_response('%d %s' % (response.status_int, reason), [(k, v) for k, v in response.headers.items()]) - # It's a bit strnage, but the get_info cache stuff relies on the + # It's a bit strange, but the get_info cache stuff relies on the # app setting some keys in the environment as it makes requests # (in particular GETorHEAD_base) - so our fake does the same _set_info_cache(self, environ, response.account, @@ -436,6 +436,37 @@ class TestFuncs(unittest.TestCase): self.assertEquals(resp['length'], 5555) self.assertEquals(resp['type'], 'text/plain') + def test_options(self): + base = Controller(self.app) + base.account_name = 'a' + base.container_name = 'c' + origin = 'http://m.com' + self.app.cors_allow_origin = [origin] + req = Request.blank('/v1/a/c/o', + environ={'swift.cache': FakeCache()}, + headers={'Origin': origin, + 'Access-Control-Request-Method': 'GET'}) + + with patch('swift.proxy.controllers.base.' + 'http_connect', fake_http_connect(200)): + resp = base.OPTIONS(req) + self.assertEqual(resp.status_int, 200) + + def test_options_unauthorized(self): + base = Controller(self.app) + base.account_name = 'a' + base.container_name = 'c' + self.app.cors_allow_origin = ['http://NOT_IT'] + req = Request.blank('/v1/a/c/o', + environ={'swift.cache': FakeCache()}, + headers={'Origin': 'http://m.com', + 'Access-Control-Request-Method': 'GET'}) + + with patch('swift.proxy.controllers.base.' + 'http_connect', fake_http_connect(200)): + resp = base.OPTIONS(req) + self.assertEqual(resp.status_int, 401) + def test_headers_to_container_info_missing(self): resp = headers_to_container_info({}, 404) self.assertEquals(resp['status'], 404) From 6cafd0a4c0bb8f311fc59df580b42e801214effd Mon Sep 17 00:00:00 2001 From: Oshrit Feder Date: Wed, 8 Jul 2015 15:18:22 +0300 Subject: [PATCH 92/98] Fix Container Sync example Container-sync realm uses cluster_ as a prefix to specify clusters' names. At use, the prefix should not be included. Fixing the examples and sample conf to make it clearer that only the name of the cluster should be passed. Change-Id: I2e521d86faffb59e1b45d3f039987ee023c5e939 --- doc/source/overview_container_sync.rst | 16 ++++++++-------- etc/container-sync-realms.conf-sample | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/doc/source/overview_container_sync.rst b/doc/source/overview_container_sync.rst index ee56daf6ca..8f03bf8174 100644 --- a/doc/source/overview_container_sync.rst +++ b/doc/source/overview_container_sync.rst @@ -35,14 +35,14 @@ and their information:: [realm1] key = realm1key key2 = realm1key2 - cluster_name1 = https://host1/v1/ - cluster_name2 = https://host2/v1/ + cluster_clustername1 = https://host1/v1/ + cluster_clustername2 = https://host2/v1/ [realm2] key = realm2key key2 = realm2key2 - cluster_name3 = https://host3/v1/ - cluster_name4 = https://host4/v1/ + cluster_clustername3 = https://host3/v1/ + cluster_clustername4 = https://host4/v1/ Each section name is the name of a sync realm. A sync realm is a set of @@ -165,12 +165,12 @@ Now, let's make our first container and tell it to synchronize to a second we'll make next:: $ swift -A http://cluster1/auth/v1.0 -U test:tester -K testing post \ - -t '//realm_name/cluster2_name/AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c/container2' \ + -t '//realm_name/clustername2/AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c/container2' \ -k 'secret' container1 The ``-t`` indicates the cluster to sync to, which is the realm name of the section from container-sync-realms.conf, followed by the cluster name from -that section, followed by the account and container names we want to sync to. +that section (without the cluster\_ prefix), followed by the account and container names we want to sync to. The ``-k`` specifies the secret key the two containers will share for synchronization; this is the user key, the cluster key in container-sync-realms.conf will also be used behind the scenes. @@ -178,7 +178,7 @@ container-sync-realms.conf will also be used behind the scenes. Now, we'll do something similar for the second cluster's container:: $ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 post \ - -t '//realm_name/cluster1_name/AUTH_208d1854-e475-4500-b315-81de645d060e/container1' \ + -t '//realm_name/clustername1/AUTH_208d1854-e475-4500-b315-81de645d060e/container1' \ -k 'secret' container2 That's it. Now we can upload a bunch of stuff to the first container and watch @@ -224,7 +224,7 @@ For instance, when we created the first container above and told it to synchronize to the second, we could have used this curl command:: $ curl -i -X POST -H 'X-Auth-Token: AUTH_tkd5359e46ff9e419fa193dbd367f3cd19' \ - -H 'X-Container-Sync-To: //realm_name/cluster2_name/AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c/container2' \ + -H 'X-Container-Sync-To: //realm_name/clustername2/AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c/container2' \ -H 'X-Container-Sync-Key: secret' \ 'http://cluster1/v1/AUTH_208d1854-e475-4500-b315-81de645d060e/container1' HTTP/1.1 204 No Content diff --git a/etc/container-sync-realms.conf-sample b/etc/container-sync-realms.conf-sample index 1eaddc19b3..29de0eb44d 100644 --- a/etc/container-sync-realms.conf-sample +++ b/etc/container-sync-realms.conf-sample @@ -7,14 +7,14 @@ # [realm1] # key = realm1key # key2 = realm1key2 -# cluster_name1 = https://host1/v1/ -# cluster_name2 = https://host2/v1/ +# cluster_clustername1 = https://host1/v1/ +# cluster_clustername2 = https://host2/v1/ # # [realm2] # key = realm2key # key2 = realm2key2 -# cluster_name3 = https://host3/v1/ -# cluster_name4 = https://host4/v1/ +# cluster_clustername3 = https://host3/v1/ +# cluster_clustername4 = https://host4/v1/ # Each section name is the name of a sync realm. A sync realm is a set of From 56ee39a7e13417203c5e1816d7a3184a07f85826 Mon Sep 17 00:00:00 2001 From: Matthew Oliver Date: Thu, 9 Jul 2015 15:19:32 +1000 Subject: [PATCH 93/98] Ring builder code clean up follow up patch This is a simple change that cleans up a NIT from Sam's 'stop moving partitions unnecessarily when overload is on' patch. Change-Id: I9d9f1cc23e2bb625d8e158f4d3f64e10973176a1 --- swift/common/ring/builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index bf0c39dcd2..4e41f31c98 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -687,7 +687,7 @@ class RingBuilder(object): bigness_threshold = ( max_child_replicas / tier_replicas * tier_weight[tier]) - for child_tier in tier2children[tier]: + for child_tier in child_tiers: child_weight = tier_weight[child_tier] if child_weight == 0: # If it's got 0 weight, it's not taking any From 4beceab4f4be99f14025815cf7ed4510ea77f460 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 9 Jul 2015 06:14:56 +0000 Subject: [PATCH 94/98] Imported Translations from Transifex For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I9ff1dde06be45fc7d6c441a1e1c07221f839a9a1 --- swift/locale/swift.pot | 403 +++++++++++++----------- swift/locale/zh_CN/LC_MESSAGES/swift.po | 8 +- 2 files changed, 224 insertions(+), 187 deletions(-) diff --git a/swift/locale/swift.pot b/swift/locale/swift.pot index 19690cf934..f7a41bba58 100644 --- a/swift/locale/swift.pot +++ b/swift/locale/swift.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: swift 2.3.0rc1.post7\n" +"Project-Id-Version: swift 2.3.1.dev133\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-16 06:06+0000\n" +"POT-Creation-Date: 2015-07-09 06:14+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -63,8 +63,8 @@ msgstr "" msgid "ERROR Could not get account info %s" msgstr "" -#: swift/account/reaper.py:134 swift/common/utils.py:2127 -#: swift/obj/diskfile.py:476 swift/obj/updater.py:88 swift/obj/updater.py:131 +#: swift/account/reaper.py:134 swift/common/utils.py:2146 +#: swift/obj/diskfile.py:480 swift/obj/updater.py:88 swift/obj/updater.py:131 #, python-format msgid "Skipping %s as it is not mounted" msgstr "" @@ -142,8 +142,8 @@ msgstr "" msgid "Account %s has not been reaped since %s" msgstr "" -#: swift/account/reaper.py:349 swift/account/reaper.py:397 -#: swift/account/reaper.py:464 swift/container/updater.py:306 +#: swift/account/reaper.py:349 swift/account/reaper.py:399 +#: swift/account/reaper.py:469 swift/container/updater.py:306 #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "" @@ -154,12 +154,12 @@ msgid "Exception with objects for container %(container)s for account %(account) msgstr "" #: swift/account/server.py:275 swift/container/server.py:582 -#: swift/obj/server.py:910 +#: swift/obj/server.py:914 #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "" -#: swift/common/bufferedhttp.py:157 +#: swift/common/bufferedhttp.py:205 swift/common/bufferedhttp.py:210 #, python-format msgid "Error encoding to UTF-8: %s" msgstr "" @@ -175,16 +175,16 @@ msgstr "" msgid "Error in %r with mtime_check_interval: %s" msgstr "" -#: swift/common/db.py:347 +#: swift/common/db.py:349 #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "" -#: swift/common/db.py:402 +#: swift/common/db.py:404 msgid "Broker error trying to rollback locked connection" msgstr "" -#: swift/common/db.py:605 +#: swift/common/db.py:607 #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "" @@ -194,186 +194,196 @@ msgstr "" msgid "ERROR reading HTTP response from %s" msgstr "" -#: swift/common/db_replicator.py:193 +#: swift/common/db_replicator.py:196 #, python-format msgid "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" -#: swift/common/db_replicator.py:199 +#: swift/common/db_replicator.py:202 #, python-format msgid "Removed %(remove)d dbs" msgstr "" -#: swift/common/db_replicator.py:200 +#: swift/common/db_replicator.py:203 #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "" -#: swift/common/db_replicator.py:231 +#: swift/common/db_replicator.py:243 #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "" -#: swift/common/db_replicator.py:294 +#: swift/common/db_replicator.py:312 #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "" -#: swift/common/db_replicator.py:453 swift/common/db_replicator.py:678 +#: swift/common/db_replicator.py:478 swift/common/db_replicator.py:721 #, python-format msgid "Quarantining DB %s" msgstr "" -#: swift/common/db_replicator.py:456 +#: swift/common/db_replicator.py:481 #, python-format msgid "ERROR reading db %s" msgstr "" -#: swift/common/db_replicator.py:487 +#: swift/common/db_replicator.py:530 #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "" -#: swift/common/db_replicator.py:489 +#: swift/common/db_replicator.py:532 #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "" -#: swift/common/db_replicator.py:517 +#: swift/common/db_replicator.py:560 #, python-format msgid "ERROR while trying to clean up %s" msgstr "" -#: swift/common/db_replicator.py:543 +#: swift/common/db_replicator.py:586 msgid "ERROR Failed to get my own IPs?" msgstr "" -#: swift/common/db_replicator.py:553 +#: swift/common/db_replicator.py:596 #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "" -#: swift/common/db_replicator.py:562 +#: swift/common/db_replicator.py:605 msgid "Beginning replication run" msgstr "" -#: swift/common/db_replicator.py:567 +#: swift/common/db_replicator.py:610 msgid "Replication run OVER" msgstr "" -#: swift/common/db_replicator.py:580 +#: swift/common/db_replicator.py:623 msgid "ERROR trying to replicate" msgstr "" -#: swift/common/internal_client.py:193 +#: swift/common/internal_client.py:194 #, python-format msgid "Unexpected response: %s" msgstr "" -#: swift/common/manager.py:63 +#: swift/common/manager.py:65 msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:70 +#: swift/common/manager.py:72 msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:77 +#: swift/common/manager.py:79 msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" -#: swift/common/manager.py:195 +#: swift/common/manager.py:220 msgid "" "\n" "user quit" msgstr "" -#: swift/common/manager.py:232 swift/common/manager.py:547 +#: swift/common/manager.py:257 swift/common/manager.py:585 #, python-format msgid "No %s running" msgstr "" -#: swift/common/manager.py:245 +#: swift/common/manager.py:270 #, python-format msgid "%s (%s) appears to have stopped" msgstr "" -#: swift/common/manager.py:255 +#: swift/common/manager.py:280 #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "" -#: swift/common/manager.py:439 +#: swift/common/manager.py:464 #, python-format msgid "Unable to locate config number %s for %s" msgstr "" -#: swift/common/manager.py:442 +#: swift/common/manager.py:467 #, python-format msgid "Unable to locate config for %s" msgstr "" -#: swift/common/manager.py:445 +#: swift/common/manager.py:470 msgid "Found configs:" msgstr "" -#: swift/common/manager.py:489 +#: swift/common/manager.py:517 +#, python-format +msgid "Removing pid file %s with invalid pid" +msgstr "" + +#: swift/common/manager.py:522 #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "" -#: swift/common/manager.py:496 +#: swift/common/manager.py:527 +#, python-format +msgid "Removing pid file %s with wrong pid %d" +msgstr "" + +#: swift/common/manager.py:534 #, python-format msgid "Removing stale pid file %s" msgstr "" -#: swift/common/manager.py:499 +#: swift/common/manager.py:537 #, python-format msgid "No permission to signal PID %d" msgstr "" -#: swift/common/manager.py:544 +#: swift/common/manager.py:582 #, python-format msgid "%s #%d not running (%s)" msgstr "" -#: swift/common/manager.py:551 swift/common/manager.py:644 -#: swift/common/manager.py:647 +#: swift/common/manager.py:589 swift/common/manager.py:682 +#: swift/common/manager.py:685 #, python-format msgid "%s running (%s - %s)" msgstr "" -#: swift/common/manager.py:650 +#: swift/common/manager.py:688 #, python-format msgid "%s already started..." msgstr "" -#: swift/common/manager.py:659 +#: swift/common/manager.py:697 #, python-format msgid "Running %s once" msgstr "" -#: swift/common/manager.py:661 +#: swift/common/manager.py:699 #, python-format msgid "Starting %s" msgstr "" -#: swift/common/manager.py:668 +#: swift/common/manager.py:706 #, python-format msgid "%s does not exist" msgstr "" -#: swift/common/memcached.py:191 +#: swift/common/memcached.py:161 #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "" -#: swift/common/memcached.py:194 +#: swift/common/memcached.py:164 #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "" -#: swift/common/memcached.py:219 +#: swift/common/memcached.py:189 #, python-format msgid "Error limiting server %s" msgstr "" @@ -383,114 +393,114 @@ msgstr "" msgid "No policy with index %s" msgstr "" -#: swift/common/request_helpers.py:395 +#: swift/common/request_helpers.py:378 msgid "ERROR: An error occurred while retrieving segments" msgstr "" -#: swift/common/utils.py:388 +#: swift/common/utils.py:390 #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" -#: swift/common/utils.py:578 +#: swift/common/utils.py:580 msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" -#: swift/common/utils.py:662 +#: swift/common/utils.py:664 #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "" -#: swift/common/utils.py:1074 +#: swift/common/utils.py:1076 #, python-format msgid "%s: Connection reset by peer" msgstr "" -#: swift/common/utils.py:1076 swift/common/utils.py:1079 +#: swift/common/utils.py:1078 swift/common/utils.py:1081 #, python-format msgid "%s: %s" msgstr "" -#: swift/common/utils.py:1314 +#: swift/common/utils.py:1316 msgid "Connection refused" msgstr "" -#: swift/common/utils.py:1316 +#: swift/common/utils.py:1318 msgid "Host unreachable" msgstr "" -#: swift/common/utils.py:1318 +#: swift/common/utils.py:1320 msgid "Connection timeout" msgstr "" -#: swift/common/utils.py:1620 +#: swift/common/utils.py:1623 msgid "UNCAUGHT EXCEPTION" msgstr "" -#: swift/common/utils.py:1675 +#: swift/common/utils.py:1678 msgid "Error: missing config path argument" msgstr "" -#: swift/common/utils.py:1680 +#: swift/common/utils.py:1683 #, python-format msgid "Error: unable to locate %s" msgstr "" -#: swift/common/utils.py:1988 +#: swift/common/utils.py:2007 #, python-format msgid "Unable to read config from %s" msgstr "" -#: swift/common/utils.py:1994 +#: swift/common/utils.py:2013 #, python-format msgid "Unable to find %s config section in %s" msgstr "" -#: swift/common/utils.py:2353 +#: swift/common/utils.py:2372 #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "" -#: swift/common/utils.py:2358 +#: swift/common/utils.py:2377 #, python-format msgid "No realm key for %r" msgstr "" -#: swift/common/utils.py:2362 +#: swift/common/utils.py:2381 #, python-format msgid "No cluster endpoint for %r %r" msgstr "" -#: swift/common/utils.py:2371 +#: swift/common/utils.py:2390 #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" -#: swift/common/utils.py:2375 +#: swift/common/utils.py:2394 msgid "Path required in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2378 +#: swift/common/utils.py:2397 msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2383 +#: swift/common/utils.py:2402 #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "" -#: swift/common/utils.py:2575 +#: swift/common/utils.py:2594 msgid "Exception dumping recon cache" msgstr "" -#: swift/common/wsgi.py:197 +#: swift/common/wsgi.py:198 #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "" -#: swift/common/wsgi.py:207 +#: swift/common/wsgi.py:208 msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external " "SSL termination for a production deployment." @@ -500,12 +510,12 @@ msgstr "" msgid "Error: An error occurred" msgstr "" -#: swift/common/middleware/cname_lookup.py:144 +#: swift/common/middleware/cname_lookup.py:146 #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "" -#: swift/common/middleware/cname_lookup.py:156 +#: swift/common/middleware/cname_lookup.py:158 #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "" @@ -658,61 +668,61 @@ msgid "" "later)" msgstr "" -#: swift/container/sync.py:217 +#: swift/container/sync.py:218 msgid "" "Configuration option internal_client_conf_path not defined. Using default" " configuration, See internal-client.conf-sample for options" msgstr "" -#: swift/container/sync.py:230 +#: swift/container/sync.py:231 #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "" -#: swift/container/sync.py:264 +#: swift/container/sync.py:265 msgid "Begin container sync \"once\" mode" msgstr "" -#: swift/container/sync.py:276 +#: swift/container/sync.py:277 #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" -#: swift/container/sync.py:284 +#: swift/container/sync.py:285 #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], " "%(skip)s skipped, %(fail)s failed" msgstr "" -#: swift/container/sync.py:337 +#: swift/container/sync.py:338 #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "" -#: swift/container/sync.py:393 +#: swift/container/sync.py:394 #, python-format msgid "ERROR Syncing %s" msgstr "" -#: swift/container/sync.py:476 +#: swift/container/sync.py:477 #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" -#: swift/container/sync.py:510 +#: swift/container/sync.py:511 #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "" -#: swift/container/sync.py:516 +#: swift/container/sync.py:517 #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" -#: swift/container/sync.py:523 swift/container/sync.py:530 +#: swift/container/sync.py:524 swift/container/sync.py:531 #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "" @@ -722,8 +732,8 @@ msgstr "" msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" -#: swift/container/updater.py:91 swift/obj/reconstructor.py:788 -#: swift/obj/replicator.py:487 swift/obj/replicator.py:575 +#: swift/container/updater.py:91 swift/obj/reconstructor.py:797 +#: swift/obj/replicator.py:498 swift/obj/replicator.py:586 #, python-format msgid "%s is not mounted" msgstr "" @@ -835,55 +845,82 @@ msgstr "" msgid "ERROR auditing: %s" msgstr "" -#: swift/obj/diskfile.py:323 swift/obj/diskfile.py:2305 +#: swift/obj/diskfile.py:327 swift/obj/diskfile.py:2339 #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:414 swift/obj/diskfile.py:2373 +#: swift/obj/diskfile.py:418 swift/obj/diskfile.py:2407 msgid "Error hashing suffix" msgstr "" -#: swift/obj/diskfile.py:486 swift/obj/updater.py:162 +#: swift/obj/diskfile.py:490 swift/obj/updater.py:162 #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "" -#: swift/obj/diskfile.py:737 +#: swift/obj/diskfile.py:741 #, python-format msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" -#: swift/obj/diskfile.py:936 swift/obj/diskfile.py:1795 +#: swift/obj/diskfile.py:941 #, python-format msgid "Problem cleaning up %s" msgstr "" -#: swift/obj/diskfile.py:1253 +#: swift/obj/diskfile.py:1259 #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" -#: swift/obj/diskfile.py:1543 +#: swift/obj/diskfile.py:1549 #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata " "%(meta)s" msgstr "" -#: swift/obj/diskfile.py:1797 +#: swift/obj/diskfile.py:1802 swift/obj/diskfile.py:1813 #, python-format -msgid "Problem fsyncing durable state file: %s" +msgid "" +"%s \n" +"Problem fsyncing dirafter writing .durable: %s" msgstr "" -#: swift/obj/diskfile.py:1802 +#: swift/obj/diskfile.py:1808 #, python-format -msgid "No space left on device for %s" +msgid "" +"%s \n" +"No space left on devicefor updates to: %s" msgstr "" -#: swift/obj/diskfile.py:1806 +#: swift/obj/diskfile.py:1824 #, python-format -msgid "Problem writing durable state file: %s" +msgid "" +"%s \n" +"Problem cleaning up %s" +msgstr "" + +#: swift/obj/diskfile.py:1827 +#, python-format +msgid "" +"%s \n" +"Problem fsyncing durable state file: %s" +msgstr "" + +#: swift/obj/diskfile.py:1832 +#, python-format +msgid "" +"%s \n" +"No space left on device for %s" +msgstr "" + +#: swift/obj/diskfile.py:1836 +#, python-format +msgid "" +"%s \n" +"Problem writing durable state file: %s" msgstr "" #: swift/obj/expirer.py:79 @@ -915,175 +952,175 @@ msgstr "" msgid "Exception while deleting object %s %s %s" msgstr "" -#: swift/obj/reconstructor.py:189 swift/obj/reconstructor.py:472 +#: swift/obj/reconstructor.py:208 swift/obj/reconstructor.py:478 #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "" -#: swift/obj/reconstructor.py:195 +#: swift/obj/reconstructor.py:214 #, python-format msgid "Trying to GET %(full_path)s" msgstr "" -#: swift/obj/reconstructor.py:301 +#: swift/obj/reconstructor.py:321 #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" -#: swift/obj/reconstructor.py:324 +#: swift/obj/reconstructor.py:344 #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed" " in %(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/reconstructor.py:337 swift/obj/replicator.py:419 +#: swift/obj/reconstructor.py:357 swift/obj/replicator.py:430 #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " "synced" msgstr "" -#: swift/obj/reconstructor.py:344 swift/obj/replicator.py:426 +#: swift/obj/reconstructor.py:364 swift/obj/replicator.py:437 #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" -#: swift/obj/reconstructor.py:352 +#: swift/obj/reconstructor.py:372 #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "" -#: swift/obj/reconstructor.py:381 swift/obj/replicator.py:463 +#: swift/obj/reconstructor.py:401 swift/obj/replicator.py:474 msgid "Lockup detected.. killing live coros." msgstr "" -#: swift/obj/reconstructor.py:442 +#: swift/obj/reconstructor.py:448 #, python-format msgid "Trying to sync suffixes with %s" msgstr "" -#: swift/obj/reconstructor.py:467 +#: swift/obj/reconstructor.py:473 #, python-format msgid "%s responded as unmounted" msgstr "" -#: swift/obj/reconstructor.py:849 swift/obj/replicator.py:295 +#: swift/obj/reconstructor.py:860 swift/obj/replicator.py:306 #, python-format msgid "Removing partition: %s" msgstr "" -#: swift/obj/reconstructor.py:865 +#: swift/obj/reconstructor.py:876 msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" -#: swift/obj/reconstructor.py:884 +#: swift/obj/reconstructor.py:895 msgid "Exception in top-levelreconstruction loop" msgstr "" -#: swift/obj/reconstructor.py:894 +#: swift/obj/reconstructor.py:905 msgid "Running object reconstructor in script mode." msgstr "" -#: swift/obj/reconstructor.py:903 +#: swift/obj/reconstructor.py:914 #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "" -#: swift/obj/reconstructor.py:910 +#: swift/obj/reconstructor.py:921 msgid "Starting object reconstructor in daemon mode." msgstr "" -#: swift/obj/reconstructor.py:914 +#: swift/obj/reconstructor.py:925 msgid "Starting object reconstruction pass." msgstr "" -#: swift/obj/reconstructor.py:919 +#: swift/obj/reconstructor.py:930 #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "" -#: swift/obj/replicator.py:139 +#: swift/obj/replicator.py:145 #, python-format msgid "Killing long-running rsync: %s" msgstr "" -#: swift/obj/replicator.py:153 +#: swift/obj/replicator.py:159 #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "" -#: swift/obj/replicator.py:160 swift/obj/replicator.py:164 +#: swift/obj/replicator.py:166 swift/obj/replicator.py:170 #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "" -#: swift/obj/replicator.py:281 +#: swift/obj/replicator.py:292 #, python-format msgid "Removing %s objects" msgstr "" -#: swift/obj/replicator.py:289 +#: swift/obj/replicator.py:300 msgid "Error syncing handoff partition" msgstr "" -#: swift/obj/replicator.py:351 +#: swift/obj/replicator.py:362 #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "" -#: swift/obj/replicator.py:356 +#: swift/obj/replicator.py:367 #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "" -#: swift/obj/replicator.py:391 +#: swift/obj/replicator.py:402 #, python-format msgid "Error syncing with node: %s" msgstr "" -#: swift/obj/replicator.py:395 +#: swift/obj/replicator.py:406 msgid "Error syncing partition" msgstr "" -#: swift/obj/replicator.py:408 +#: swift/obj/replicator.py:419 #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" -#: swift/obj/replicator.py:434 +#: swift/obj/replicator.py:445 #, python-format msgid "Nothing replicated for %s seconds." msgstr "" -#: swift/obj/replicator.py:578 +#: swift/obj/replicator.py:589 msgid "Ring change detected. Aborting current replication pass." msgstr "" -#: swift/obj/replicator.py:599 +#: swift/obj/replicator.py:610 msgid "Exception in top-level replication loop" msgstr "" -#: swift/obj/replicator.py:608 +#: swift/obj/replicator.py:619 msgid "Running object replicator in script mode." msgstr "" -#: swift/obj/replicator.py:626 +#: swift/obj/replicator.py:637 #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" -#: swift/obj/replicator.py:633 +#: swift/obj/replicator.py:644 msgid "Starting object replicator in daemon mode." msgstr "" -#: swift/obj/replicator.py:637 +#: swift/obj/replicator.py:648 msgid "Starting object replication pass." msgstr "" -#: swift/obj/replicator.py:642 +#: swift/obj/replicator.py:653 #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "" @@ -1179,129 +1216,129 @@ msgstr "" msgid "Account" msgstr "" -#: swift/proxy/controllers/base.py:752 swift/proxy/controllers/base.py:814 -#: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:411 -#: swift/proxy/controllers/obj.py:427 swift/proxy/controllers/obj.py:643 -#: swift/proxy/controllers/obj.py:1130 swift/proxy/controllers/obj.py:1591 -#: swift/proxy/controllers/obj.py:1763 swift/proxy/controllers/obj.py:1908 -#: swift/proxy/controllers/obj.py:2093 +#: swift/proxy/controllers/base.py:797 swift/proxy/controllers/base.py:836 +#: swift/proxy/controllers/base.py:928 swift/proxy/controllers/obj.py:364 +#: swift/proxy/controllers/obj.py:584 swift/proxy/controllers/obj.py:996 +#: swift/proxy/controllers/obj.py:1043 swift/proxy/controllers/obj.py:1057 +#: swift/proxy/controllers/obj.py:1864 swift/proxy/controllers/obj.py:2101 +#: swift/proxy/controllers/obj.py:2229 swift/proxy/controllers/obj.py:2414 msgid "Object" msgstr "" -#: swift/proxy/controllers/base.py:753 +#: swift/proxy/controllers/base.py:798 swift/proxy/controllers/base.py:837 msgid "Trying to read during GET (retrying)" msgstr "" -#: swift/proxy/controllers/base.py:815 +#: swift/proxy/controllers/base.py:929 msgid "Trying to read during GET" msgstr "" -#: swift/proxy/controllers/base.py:819 +#: swift/proxy/controllers/base.py:933 #, python-format msgid "Client did not read from proxy within %ss" msgstr "" -#: swift/proxy/controllers/base.py:824 +#: swift/proxy/controllers/base.py:938 msgid "Client disconnected on read" msgstr "" -#: swift/proxy/controllers/base.py:826 +#: swift/proxy/controllers/base.py:940 msgid "Trying to send to client" msgstr "" -#: swift/proxy/controllers/base.py:863 swift/proxy/controllers/base.py:1141 +#: swift/proxy/controllers/base.py:991 swift/proxy/controllers/base.py:1303 #, python-format msgid "Trying to %(method)s %(path)s" msgstr "" -#: swift/proxy/controllers/base.py:902 swift/proxy/controllers/base.py:1129 -#: swift/proxy/controllers/obj.py:402 swift/proxy/controllers/obj.py:450 -#: swift/proxy/controllers/obj.py:1900 swift/proxy/controllers/obj.py:2138 +#: swift/proxy/controllers/base.py:1030 swift/proxy/controllers/base.py:1291 +#: swift/proxy/controllers/obj.py:387 swift/proxy/controllers/obj.py:1034 +#: swift/proxy/controllers/obj.py:2221 swift/proxy/controllers/obj.py:2459 msgid "ERROR Insufficient Storage" msgstr "" -#: swift/proxy/controllers/base.py:905 +#: swift/proxy/controllers/base.py:1033 #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "" -#: swift/proxy/controllers/base.py:1132 +#: swift/proxy/controllers/base.py:1294 #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" -#: swift/proxy/controllers/base.py:1260 +#: swift/proxy/controllers/base.py:1424 #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "" -#: swift/proxy/controllers/container.py:98 swift/proxy/controllers/obj.py:161 +#: swift/proxy/controllers/container.py:98 swift/proxy/controllers/obj.py:163 msgid "Container" msgstr "" -#: swift/proxy/controllers/obj.py:365 swift/proxy/controllers/obj.py:1592 -#, python-format -msgid "Trying to write to %s" -msgstr "" - -#: swift/proxy/controllers/obj.py:406 swift/proxy/controllers/obj.py:1903 -#, python-format -msgid "ERROR %(status)d Expect: 100-continue From Object Server" -msgstr "" - -#: swift/proxy/controllers/obj.py:412 swift/proxy/controllers/obj.py:1909 -#, python-format -msgid "Expect: 100-continue on %s" -msgstr "" - -#: swift/proxy/controllers/obj.py:428 +#: swift/proxy/controllers/obj.py:365 #, python-format msgid "Trying to get final status of PUT to %s" msgstr "" -#: swift/proxy/controllers/obj.py:454 swift/proxy/controllers/obj.py:2143 +#: swift/proxy/controllers/obj.py:391 swift/proxy/controllers/obj.py:2464 #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "" -#: swift/proxy/controllers/obj.py:716 +#: swift/proxy/controllers/obj.py:657 #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "" -#: swift/proxy/controllers/obj.py:725 +#: swift/proxy/controllers/obj.py:666 #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" -#: swift/proxy/controllers/obj.py:811 swift/proxy/controllers/obj.py:2048 +#: swift/proxy/controllers/obj.py:1038 swift/proxy/controllers/obj.py:2224 +#, python-format +msgid "ERROR %(status)d Expect: 100-continue From Object Server" +msgstr "" + +#: swift/proxy/controllers/obj.py:1044 swift/proxy/controllers/obj.py:2230 +#, python-format +msgid "Expect: 100-continue on %s" +msgstr "" + +#: swift/proxy/controllers/obj.py:1058 swift/proxy/controllers/obj.py:1865 +#, python-format +msgid "Trying to write to %s" +msgstr "" + +#: swift/proxy/controllers/obj.py:1109 swift/proxy/controllers/obj.py:2369 #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "" -#: swift/proxy/controllers/obj.py:818 swift/proxy/controllers/obj.py:2055 +#: swift/proxy/controllers/obj.py:1116 swift/proxy/controllers/obj.py:2376 msgid "ERROR Exception causing client disconnect" msgstr "" -#: swift/proxy/controllers/obj.py:823 swift/proxy/controllers/obj.py:2060 +#: swift/proxy/controllers/obj.py:1121 swift/proxy/controllers/obj.py:2381 msgid "Client disconnected without sending enough data" msgstr "" -#: swift/proxy/controllers/obj.py:869 +#: swift/proxy/controllers/obj.py:1167 #, python-format msgid "Object servers returned %s mismatched etags" msgstr "" -#: swift/proxy/controllers/obj.py:873 swift/proxy/controllers/obj.py:2218 +#: swift/proxy/controllers/obj.py:1171 swift/proxy/controllers/obj.py:2544 msgid "Object PUT" msgstr "" -#: swift/proxy/controllers/obj.py:2035 +#: swift/proxy/controllers/obj.py:2356 #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" -#: swift/proxy/controllers/obj.py:2094 +#: swift/proxy/controllers/obj.py:2415 #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "" diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index 48f5ded42c..7f02e6febd 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -8,11 +8,11 @@ msgid "" msgstr "" "Project-Id-Version: Swift\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-28 06:08+0000\n" -"PO-Revision-Date: 2015-04-15 12:48+0000\n" +"POT-Creation-Date: 2015-07-09 06:14+0000\n" +"PO-Revision-Date: 2015-07-09 05:58+0000\n" "Last-Translator: openstackjenkins \n" -"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/swift/" -"language/zh_CN/)\n" +"Language-Team: Chinese (China) (http://www.transifex.com/p/swift/language/" +"zh_CN/)\n" "Plural-Forms: nplurals=1; plural=0\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" From ff192cfe5705324497a389aa2f22227d75dc0f8e Mon Sep 17 00:00:00 2001 From: janonymous Date: Wed, 8 Jul 2015 18:38:22 +0530 Subject: [PATCH 95/98] Replace reduce and unichr , these are no longer available in py3 * Replace reduce() with six.moves.reduce() * Replace unichr with six.unichr Change-Id: I2038e47e0a6522dd992fd2a4aeff981cf7750fe0 --- swift/common/splice.py | 4 ++-- swift/common/storage_policy.py | 4 +++- test/unit/common/test_internal_client.py | 3 ++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/swift/common/splice.py b/swift/common/splice.py index 7bc279c5dc..f5d81d8d10 100644 --- a/swift/common/splice.py +++ b/swift/common/splice.py @@ -86,7 +86,7 @@ class Tee(object): raise EnvironmentError('tee not available') if not isinstance(flags, six.integer_types): - c_flags = reduce(operator.or_, flags, 0) + c_flags = six.moves.reduce(operator.or_, flags, 0) else: c_flags = flags @@ -177,7 +177,7 @@ class Splice(object): raise EnvironmentError('splice not available') if not isinstance(flags, six.integer_types): - c_flags = reduce(operator.or_, flags, 0) + c_flags = six.moves.reduce(operator.or_, flags, 0) else: c_flags = flags diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py index fcda344b56..415aa55377 100644 --- a/swift/common/storage_policy.py +++ b/swift/common/storage_policy.py @@ -15,6 +15,7 @@ from ConfigParser import ConfigParser import os import string import textwrap +import six from swift.common.utils import ( config_true_value, SWIFT_CONF_FILE, whataremyips) @@ -76,7 +77,8 @@ class BindPortsCache(object): # the first one we notice. # Return the requested set of ports from our (now-freshened) cache - return reduce(set.union, self.portsets_by_ring_path.values(), set()) + return six.moves.reduce(set.union, + self.portsets_by_ring_path.values(), set()) class PolicyError(ValueError): diff --git a/test/unit/common/test_internal_client.py b/test/unit/common/test_internal_client.py index 4b9c56d8ad..4c931af549 100644 --- a/test/unit/common/test_internal_client.py +++ b/test/unit/common/test_internal_client.py @@ -22,6 +22,7 @@ import zlib from textwrap import dedent import os +import six from six.moves import range from test.unit import FakeLogger import eventlet @@ -39,7 +40,7 @@ def not_sleep(seconds): def unicode_string(start, length): - return u''.join([unichr(x) for x in range(start, start + length)]) + return u''.join([six.unichr(x) for x in range(start, start + length)]) def path_parts(): From 1cc3eff958fdd4fb07c2b74c52df7829d3125466 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Fri, 10 Jul 2015 13:04:44 +0200 Subject: [PATCH 96/98] Fixes for mock 1.1 The new release of mock 1.1 is more strict. It helped to find bugs in tests. Closes-Bug: #1473369 Change-Id: Id179513c6010d827cbcbdda7692a920e29213bcb --- test/unit/common/test_db_replicator.py | 4 ++-- test/unit/common/test_wsgi.py | 8 ++++---- test/unit/obj/test_diskfile.py | 2 +- test/unit/obj/test_expirer.py | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py index 8cc556127e..6bdf6f0cdf 100644 --- a/test/unit/common/test_db_replicator.py +++ b/test/unit/common/test_db_replicator.py @@ -1188,10 +1188,10 @@ class TestDBReplicator(unittest.TestCase): db_file = __file__ replicator = TestReplicator({}) replicator._http_connect(node, partition, db_file) - db_replicator.ReplConnection.assert_has_calls( + db_replicator.ReplConnection.assert_has_calls([ mock.call(node, partition, os.path.basename(db_file).split('.', 1)[0], - replicator.logger)) + replicator.logger)]) class TestReplToNode(unittest.TestCase): diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index 5189a6b7cb..27e39206c2 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -376,7 +376,7 @@ class TestWSGI(unittest.TestCase): _eventlet.patcher.monkey_patch.assert_called_with(all=False, socket=True) _eventlet.debug.hub_exceptions.assert_called_with(False) - _wsgi.server.assert_called() + self.assertTrue(_wsgi.server.called) args, kwargs = _wsgi.server.call_args server_sock, server_app, server_logger = args self.assertEquals(sock, server_sock) @@ -419,7 +419,7 @@ class TestWSGI(unittest.TestCase): sock = listen(('localhost', 0)) wsgi.run_server(conf, logger, sock) - _wsgi.server.assert_called() + self.assertTrue(_wsgi.server.called) args, kwargs = _wsgi.server.call_args self.assertEquals(kwargs.get('capitalize_response_headers'), False) @@ -464,7 +464,7 @@ class TestWSGI(unittest.TestCase): _eventlet.patcher.monkey_patch.assert_called_with(all=False, socket=True) _eventlet.debug.hub_exceptions.assert_called_with(False) - _wsgi.server.assert_called() + self.assertTrue(_wsgi.server.called) args, kwargs = _wsgi.server.call_args server_sock, server_app, server_logger = args self.assertEquals(sock, server_sock) @@ -515,7 +515,7 @@ class TestWSGI(unittest.TestCase): _eventlet.patcher.monkey_patch.assert_called_with(all=False, socket=True) _eventlet.debug.hub_exceptions.assert_called_with(True) - mock_server.assert_called() + self.assertTrue(mock_server.called) args, kwargs = mock_server.call_args server_sock, server_app, server_logger = args self.assertEquals(sock, server_sock) diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index fc9973e452..9e5703ebfe 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -3170,7 +3170,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): reader.zero_copy_send(devnull.fileno()) # Assert the end of `zero_copy_send` was reached - mock_close.assert_called() + self.assertTrue(mock_close.called) # Assert there was at least one call to `trampoline` waiting for # `write` access to the output FD mock_trampoline.assert_any_call(devnull.fileno(), write=True) diff --git a/test/unit/obj/test_expirer.py b/test/unit/obj/test_expirer.py index a65c8fba42..393bf4d503 100644 --- a/test/unit/obj/test_expirer.py +++ b/test/unit/obj/test_expirer.py @@ -742,7 +742,7 @@ class TestObjectExpirer(TestCase): x = expirer.ObjectExpirer({}) x.swift.make_request = mock.MagicMock() x.delete_actual_object(name, timestamp) - x.swift.make_request.assert_called_once() + self.assertEqual(x.swift.make_request.call_count, 1) self.assertEqual(x.swift.make_request.call_args[0][1], '/v1/' + urllib.quote(name)) From 278adf5c20101a191979ce1e4d6277e5f209149e Mon Sep 17 00:00:00 2001 From: Hisashi Osanai Date: Tue, 14 Jul 2015 15:33:45 +0900 Subject: [PATCH 97/98] Make logic of unit tests responsive to the method names The two methods, test_authorize_succeeds_for_tenant_name_in_roles and test_authorize_succeeds_for_tenant_id_in_roles, have names that don't match what they are testing. tenant_name and tenant_id need to be switched. Change-Id: I7cb0a7d2b2111127fd5d6b55f2da6a3eadf2235d --- test/unit/common/middleware/test_keystoneauth.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/unit/common/middleware/test_keystoneauth.py b/test/unit/common/middleware/test_keystoneauth.py index b9d216ae61..44195d30ff 100644 --- a/test/unit/common/middleware/test_keystoneauth.py +++ b/test/unit/common/middleware/test_keystoneauth.py @@ -664,18 +664,18 @@ class TestAuthorize(BaseTestAuthorize): identity = self._get_identity() user_name = identity['HTTP_X_USER_NAME'] user_id = identity['HTTP_X_USER_ID'] - tenant_id = identity['HTTP_X_TENANT_ID'] + tenant_name = identity['HTTP_X_TENANT_NAME'] for user in [user_id, user_name, '*']: - acl = '%s:%s' % (tenant_id, user) + acl = '%s:%s' % (tenant_name, user) self._check_authenticate(identity=identity, acl=acl) def test_authorize_succeeds_for_tenant_id_user_in_roles(self): identity = self._get_identity() user_name = identity['HTTP_X_USER_NAME'] user_id = identity['HTTP_X_USER_ID'] - tenant_name = identity['HTTP_X_TENANT_NAME'] + tenant_id = identity['HTTP_X_TENANT_ID'] for user in [user_id, user_name, '*']: - acl = '%s:%s' % (tenant_name, user) + acl = '%s:%s' % (tenant_id, user) self._check_authenticate(identity=identity, acl=acl) def test_authorize_succeeds_for_wildcard_tenant_user_in_roles(self): From 51f806d3e3d3a1fcbc80d2f7d7ddbe5cc4c024c9 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Tue, 14 Jul 2015 20:49:08 -0700 Subject: [PATCH 98/98] remove Python 2.6 from the classifier Change-Id: I67233e9c7b69826242546bd6bd98c24b81070579 --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index d983a11a41..a40fc535ee 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,7 +15,6 @@ classifier = Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 - Programming Language :: Python :: 2.6 Programming Language :: Python :: 2.7 [pbr]