From 8e59dfbee2a67f84a1fd1d5cc1679ec514255fdc Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 19 Apr 2017 00:28:50 +0000 Subject: [PATCH 01/43] Log remote_merges during DB replication Change-Id: I1850f09bab16401479b5a0cc521f67a32ea9c9f5 --- swift/common/db_replicator.py | 4 +- test/unit/common/test_db_replicator.py | 57 ++++++++++++++++++++++++-- 2 files changed, 55 insertions(+), 6 deletions(-) diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 30c6a35c12..4afa7c095a 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -229,9 +229,9 @@ class Replicator(Daemon): 'replication_last': now}, self.rcache, self.logger) self.logger.info(' '.join(['%s:%s' % item for item in - self.stats.items() if item[0] in + sorted(self.stats.items()) if item[0] in ('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', - 'empty', 'diff_capped')])) + 'empty', 'diff_capped', 'remote_merge')])) def _add_failure_stats(self, failure_devs_info): for node, dev in failure_devs_info: diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py index 50f83c2d70..20fd01fb60 100644 --- a/test/unit/common/test_db_replicator.py +++ b/test/unit/common/test_db_replicator.py @@ -584,12 +584,61 @@ class TestDBReplicator(unittest.TestCase): self.assertFalse( replicator._usync_db(0, FakeBroker(), fake_http, '12345', '67890')) - def test_stats(self): - # I'm not sure how to test that this logs the right thing, - # but we can at least make sure it gets covered. - replicator = TestReplicator({}) + @mock.patch('swift.common.db_replicator.dump_recon_cache') + @mock.patch('swift.common.db_replicator.time.time', return_value=1234.5678) + def test_stats(self, mock_time, mock_recon_cache): + logger = unit.debug_logger('test-replicator') + replicator = TestReplicator({}, logger=logger) replicator._zero_stats() + self.assertEqual(replicator.stats['start'], mock_time.return_value) replicator._report_stats() + self.assertEqual(logger.get_lines_for_level('info'), [ + 'Attempted to replicate 0 dbs in 0.00000 seconds (0.00000/s)', + 'Removed 0 dbs', + '0 successes, 0 failures', + 'diff:0 diff_capped:0 empty:0 hashmatch:0 no_change:0 ' + 'remote_merge:0 rsync:0 ts_repl:0', + ]) + self.assertEqual(1, len(mock_recon_cache.mock_calls)) + self.assertEqual(mock_recon_cache.mock_calls[0][1][0], { + 'replication_time': 0.0, + 'replication_last': mock_time.return_value, + 'replication_stats': replicator.stats, + }) + + mock_recon_cache.reset_mock() + logger.clear() + replicator.stats.update({ + 'attempted': 30, + 'success': 25, + 'remove': 9, + 'failure': 1, + + 'diff': 5, + 'diff_capped': 4, + 'empty': 7, + 'hashmatch': 8, + 'no_change': 6, + 'remote_merge': 2, + 'rsync': 3, + 'ts_repl': 10, + }) + mock_time.return_value += 246.813576 + replicator._report_stats() + self.maxDiff = None + self.assertEqual(logger.get_lines_for_level('info'), [ + 'Attempted to replicate 30 dbs in 246.81358 seconds (0.12155/s)', + 'Removed 9 dbs', + '25 successes, 1 failures', + 'diff:5 diff_capped:4 empty:7 hashmatch:8 no_change:6 ' + 'remote_merge:2 rsync:3 ts_repl:10', + ]) + self.assertEqual(1, len(mock_recon_cache.mock_calls)) + self.assertEqual(mock_recon_cache.mock_calls[0][1][0], { + 'replication_time': 246.813576, + 'replication_last': mock_time.return_value, + 'replication_stats': replicator.stats, + }) def test_replicate_object(self): db_replicator.ring = FakeRingWithNodes() From 5e673a542401a2d95249c3c03f26175214f08c79 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 25 May 2017 09:11:43 -0700 Subject: [PATCH 02/43] Log deprecation warning for allow_versions ...with the hope of removing support for the option in the future. We'll forever need to watch for X-Versions-Location in on-disk data, though. Change-Id: I19c4c66102aa96df393a642dbcf984f77aa3f25a Related-Change: Ie899290b3312e201979eafefb253d1a60b65b837 --- swift/container/server.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/swift/container/server.py b/swift/container/server.py index 152603f6df..6bfd0a167c 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -111,6 +111,11 @@ class ContainerController(BaseStorageServer): conf.get('auto_create_account_prefix') or '.' if config_true_value(conf.get('allow_versions', 'f')): self.save_headers.append('x-versions-location') + if 'allow_versions' in conf: + self.logger.warning('Option allow_versions is deprecated. ' + 'Configure the versioned_writes middleware in ' + 'the proxy-server instead. This option will ' + 'be ignored in a future release.') swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) self.sync_store = ContainerSyncStore(self.root, From 849d204c596c9089dab606ece72c84092ad156ca Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 12 May 2017 10:43:30 -0400 Subject: [PATCH 03/43] domain_remap: be more careful about client-path mangling The root_path option for domain_remap seems to serve two purposes: - provide the first component (version) for the backend request - be an optional leading component for the client request, which should be stripped off As a result, we have mappings like: c.a.example.com/ -> /v1/AUTH_a/c/ c.a.example.com/o -> /v1/AUTH_a/c/o c.a.example.com/v1/o -> /v1/AUTH_a/c/o Currently, we don't really care about whether there was a full- or partial-match in that first component, leading to mappings like c.a.example.com/v1o -> /v1/AUTH_a/c/o If we're going to continue supporting that second function, we should only consider full-matches, so we'll have c.a.example.com/v1o -> /v1/AUTH_a/c/v1o Change-Id: Ibdc97bb8daf117ad46177617f170d03e481b0007 --- swift/common/middleware/domain_remap.py | 11 +++++------ test/unit/common/middleware/test_domain_remap.py | 6 ++++++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/swift/common/middleware/domain_remap.py b/swift/common/middleware/domain_remap.py index 13cfee95cf..c0e6d91bc8 100644 --- a/swift/common/middleware/domain_remap.py +++ b/swift/common/middleware/domain_remap.py @@ -78,7 +78,7 @@ class DomainRemapMiddleware(object): if not s.startswith('.')] self.storage_domain += [s for s in list_from_csv(storage_domain) if s.startswith('.')] - self.path_root = '/' + conf.get('path_root', 'v1').strip('/') + self.path_root = conf.get('path_root', 'v1').strip('/') + '/' prefixes = conf.get('reseller_prefixes', 'AUTH') self.reseller_prefixes = list_from_csv(prefixes) self.reseller_prefixes_lower = [x.lower() @@ -129,14 +129,13 @@ class DomainRemapMiddleware(object): # account prefix is not in config list. bail. return self.app(env, start_response) - requested_path = path = env['PATH_INFO'] - new_path_parts = [self.path_root, account] + requested_path = env['PATH_INFO'] + path = requested_path[1:] + new_path_parts = ['', self.path_root[:-1], account] if container: new_path_parts.append(container) - if path.startswith(self.path_root): + if (path + '/').startswith(self.path_root): path = path[len(self.path_root):] - if path.startswith('/'): - path = path[1:] new_path_parts.append(path) new_path = '/'.join(new_path_parts) env['PATH_INFO'] = new_path diff --git a/test/unit/common/middleware/test_domain_remap.py b/test/unit/common/middleware/test_domain_remap.py index 0b5e2fab74..d304711a13 100644 --- a/test/unit/common/middleware/test_domain_remap.py +++ b/test/unit/common/middleware/test_domain_remap.py @@ -131,6 +131,12 @@ class TestDomainRemap(unittest.TestCase): resp = self.app(req.environ, start_response) self.assertEqual(resp, ['/v1/AUTH_a/c/obj']) + def test_domain_remap_with_path_root_and_path_no_slash(self): + req = Request.blank('/v1obj', environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'c.AUTH_a.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEqual(resp, ['/v1/AUTH_a/c/v1obj']) + def test_domain_remap_account_matching_ending_not_domain(self): req = Request.blank('/dontchange', environ={'REQUEST_METHOD': 'GET'}, headers={'Host': 'c.aexample.com'}) From 808ff4fff74415d94d620f74501d28d38f451990 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 15 Jun 2017 16:08:43 -0700 Subject: [PATCH 04/43] Ignore all auditor_status_*.json files in reconstructor ...just like we do for the replicator. This allows third parties to define custom audit types that re-use the object_audit_location_generator API without having the reconstructor yell at them. Change-Id: I4372a1712a112705c1f906386b1cb55901256295 Related-Change: I2f3d0bd2f1e242db6eb263c7755f1363d1430048 Related-Change: Ib15a0987288d9ee32432c1998aefe638ca3b223b --- swift/obj/reconstructor.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 76d4889044..3024d32198 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -949,8 +949,9 @@ class ObjectReconstructor(Daemon): self.part_count += len(partitions) for partition in partitions: part_path = join(obj_path, partition) - if partition in ('auditor_status_ALL.json', - 'auditor_status_ZBF.json'): + if (partition.startswith('auditor_status_') and + partition.endswith('.json')): + # ignore auditor status files continue if not partition.isdigit(): self.logger.warning( From 9c3c3880916dced3e04165c6a3dd79ec5ebb281b Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 7 Jun 2017 11:33:29 +0100 Subject: [PATCH 05/43] Improve domain_remap docs * Make the conditions for remapping clearer * Mention the path_root * Mention '-' -> '_' replacement in account names * Make example consistent with default options Change-Id: Ifd3f3775bb8b13367d964010f35813018b5b41b3 --- etc/proxy-server.conf-sample | 2 + swift/common/middleware/domain_remap.py | 98 ++++++++++++++++++------- 2 files changed, 75 insertions(+), 25 deletions(-) diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index ce4c656caf..512d494d99 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -529,6 +529,8 @@ use = egg:swift#domain_remap # can be specified separated by a comma # storage_domain = example.com +# Specify a root path part that will be added to the start of paths if not +# already present. # path_root = v1 # Browsers can convert a host header to lowercase, so check that reseller diff --git a/swift/common/middleware/domain_remap.py b/swift/common/middleware/domain_remap.py index c0e6d91bc8..34e01da531 100644 --- a/swift/common/middleware/domain_remap.py +++ b/swift/common/middleware/domain_remap.py @@ -17,37 +17,85 @@ """ Domain Remap Middleware -Middleware that translates container and account parts of a domain to -path parameters that the proxy server understands. +Middleware that translates container and account parts of a domain to path +parameters that the proxy server understands. -container.account.storageurl/object gets translated to -container.account.storageurl/path_root/account/container/object +Translation is only performed when the request URL's host domain matches one of +a list of domains. This list may be configured by the option +``storage_domain``, and defaults to the single domain ``example.com``. -account.storageurl/path_root/container/object gets translated to -account.storageurl/path_root/account/container/object +If not already present, a configurable ``path_root``, which defaults to ``v1``, +will be added to the start of the translated path. -Browsers can convert a host header to lowercase, so check that reseller -prefix on the account is the correct case. This is done by comparing the -items in the reseller_prefixes config option to the found prefix. If they -match except for case, the item from reseller_prefixes will be used -instead of the found reseller prefix. When none match, the default reseller -prefix is used. When no default reseller prefix is configured, any request with -an account prefix not in that list will be ignored by this middleware. -reseller_prefixes defaults to 'AUTH'. +For example, with the default configuration:: + + container.AUTH-account.example.com/object + container.AUTH-account.example.com/v1/object + +would both be translated to:: + + container.AUTH-account.example.com/v1/AUTH_account/container/object + +and:: + + AUTH-account.example.com/container/object + AUTH-account.example.com/v1/container/object + +would both be translated to:: + + AUTH-account.example.com/v1/AUTH_account/container/object + +Additionally, translation is only performed when the account name in the +translated path starts with a reseller prefix matching one of a list configured +by the option ``reseller_prefixes``, or when no match is found but a +``default_reseller_prefix`` has been configured. + +The ``reseller_prefixes`` list defaults to the single prefix ``AUTH``. The +``default_reseller_prefix`` is not configured by default. + +Browsers can convert a host header to lowercase, so the middleware checks that +the reseller prefix on the account name is the correct case. This is done by +comparing the items in the ``reseller_prefixes`` config option to the found +prefix. If they match except for case, the item from ``reseller_prefixes`` will +be used instead of the found reseller prefix. The middleware will also replace +any hyphen ('-') in the account name with an underscore ('_'). + +For example, with the default configuration:: + + auth-account.example.com/container/object + AUTH-account.example.com/container/object + auth_account.example.com/container/object + AUTH_account.example.com/container/object + +would all be translated to:: + + .example.com/v1/AUTH_account/container/object + +When no match is found in ``reseller_prefixes``, the +``default_reseller_prefix`` config option is used. When no +``default_reseller_prefix`` is configured, any request with an account prefix +not in the ``reseller_prefixes`` list will be ignored by this middleware. + +For example, with ``default_reseller_prefix = AUTH``:: + + account.example.com/container/object + +would be translated to:: + + account.example.com/v1/AUTH_account/container/object Note that this middleware requires that container names and account names -(except as described above) must be DNS-compatible. This means that the -account name created in the system and the containers created by users -cannot exceed 63 characters or have UTF-8 characters. These are -restrictions over and above what swift requires and are not explicitly -checked. Simply put, the this middleware will do a best-effort attempt to -derive account and container names from elements in the domain name and -put those derived values into the URL path (leaving the Host header -unchanged). +(except as described above) must be DNS-compatible. This means that the account +name created in the system and the containers created by users cannot exceed 63 +characters or have UTF-8 characters. These are restrictions over and above what +Swift requires and are not explicitly checked. Simply put, this middleware +will do a best-effort attempt to derive account and container names from +elements in the domain name and put those derived values into the URL path +(leaving the ``Host`` header unchanged). -Also note that using container sync with remapped domain names is not -advised. With container sync, you should use the true storage end points as -sync destinations. +Also note that using :doc:`overview_container_sync` with remapped domain names +is not advised. With :doc:`overview_container_sync`, you should use the true +storage end points as sync destinations. """ from swift.common.middleware import RewriteContext From 163fb4d52a85e0467c8c6b616e2cd9faa1faa41b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavel=20Kvasni=C4=8Dka?= Date: Wed, 19 Apr 2017 15:09:40 +0200 Subject: [PATCH 06/43] Always require device dir for containers For test purposes (e.g. saio probetests) even if mount_check is False, still require check_dir for account/container server storage when real mount points are not used. This behavior is consistent with the object-server's checks in diskfile. Co-Author: Clay Gerrard Related lp bug #1693005 Related-Change-Id: I344f9daaa038c6946be11e1cf8c4ef104a09e68b Depends-On: I52c4ecb70b1ae47e613ba243da5a4d94e5adedf2 Change-Id: I3362a6ebff423016bb367b4b6b322bb41ae08764 --- swift/account/server.py | 14 +-- swift/common/constraints.py | 30 +++++-- swift/common/middleware/recon.py | 4 +- swift/container/server.py | 14 +-- swift/obj/diskfile.py | 11 ++- test/unit/__init__.py | 57 ++++--------- test/unit/account/test_server.py | 98 ++++++++++----------- test/unit/common/test_constraints.py | 58 +++++++++---- test/unit/container/test_server.py | 122 +++++++++++++-------------- test/unit/obj/test_auditor.py | 4 +- test/unit/obj/test_diskfile.py | 26 +++--- test/unit/obj/test_reconstructor.py | 41 +++++---- test/unit/obj/test_server.py | 80 ++++++++++++++---- test/unit/obj/test_ssync_receiver.py | 23 +++-- 14 files changed, 319 insertions(+), 263 deletions(-) diff --git a/swift/account/server.py b/swift/account/server.py index 5334b97c1c..c67ac5d97d 100644 --- a/swift/account/server.py +++ b/swift/account/server.py @@ -29,7 +29,7 @@ from swift.common.request_helpers import get_param, get_listing_content_type, \ from swift.common.utils import get_logger, hash_path, public, \ Timestamp, storage_directory, config_true_value, \ json, timing_stats, replication, get_log_line -from swift.common.constraints import check_mount, valid_timestamp, check_utf8 +from swift.common.constraints import valid_timestamp, check_utf8, check_drive from swift.common import constraints from swift.common.db_replicator import ReplicatorRpc from swift.common.base_storage_server import BaseStorageServer @@ -87,7 +87,7 @@ class AccountController(BaseStorageServer): def DELETE(self, req): """Handle HTTP DELETE request.""" drive, part, account = split_and_validate_path(req, 3) - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) req_timestamp = valid_timestamp(req) broker = self._get_account_broker(drive, part, account) @@ -101,7 +101,7 @@ class AccountController(BaseStorageServer): def PUT(self, req): """Handle HTTP PUT request.""" drive, part, account, container = split_and_validate_path(req, 3, 4) - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) if container: # put account container if 'x-timestamp' not in req.headers: @@ -168,7 +168,7 @@ class AccountController(BaseStorageServer): """Handle HTTP HEAD request.""" drive, part, account = split_and_validate_path(req, 3) out_content_type = get_listing_content_type(req) - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_account_broker(drive, part, account, pending_timeout=0.1, @@ -203,7 +203,7 @@ class AccountController(BaseStorageServer): end_marker = get_param(req, 'end_marker') out_content_type = get_listing_content_type(req) - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_account_broker(drive, part, account, pending_timeout=0.1, @@ -224,7 +224,7 @@ class AccountController(BaseStorageServer): """ post_args = split_and_validate_path(req, 3) drive, partition, hash = post_args - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) try: args = json.load(req.environ['wsgi.input']) @@ -240,7 +240,7 @@ class AccountController(BaseStorageServer): """Handle HTTP POST request.""" drive, part, account = split_and_validate_path(req, 3) req_timestamp = valid_timestamp(req) - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_account_broker(drive, part, account) if broker.is_deleted(): diff --git a/swift/common/constraints.py b/swift/common/constraints.py index 679926356a..e0a0851fae 100644 --- a/swift/common/constraints.py +++ b/swift/common/constraints.py @@ -15,6 +15,7 @@ import functools import os +from os.path import isdir # tighter scoped import for mocking import time import six @@ -234,9 +235,9 @@ def check_dir(root, drive): :param root: base path where the dir is :param drive: drive name to be checked - :returns: True if it is a valid directoy, False otherwise + :returns: full path to the device, or None if drive fails to validate """ - return os.path.isdir(os.path.join(root, drive)) + return check_drive(root, drive, False) def check_mount(root, drive): @@ -248,12 +249,31 @@ def check_mount(root, drive): :param root: base path where the devices are mounted :param drive: drive name to be checked - :returns: True if it is a valid mounted device, False otherwise + :returns: full path to the device, or None if drive fails to validate + """ + return check_drive(root, drive, True) + + +def check_drive(root, drive, mount_check): + """ + Validate the path given by root and drive is a valid existing directory. + + :param root: base path where the devices are mounted + :param drive: drive name to be checked + :param mount_check: additionally require path is mounted + + :returns: full path to the device, or None if drive fails to validate """ if not (urllib.parse.quote_plus(drive) == drive): - return False + return None path = os.path.join(root, drive) - return utils.ismount(path) + if mount_check: + if utils.ismount(path): + return path + else: + if isdir(path): + return path + return None def check_float(string): diff --git a/swift/common/middleware/recon.py b/swift/common/middleware/recon.py index c9c994fe72..1ad36244b2 100644 --- a/swift/common/middleware/recon.py +++ b/swift/common/middleware/recon.py @@ -209,7 +209,7 @@ class ReconMiddleware(object): continue try: - mounted = check_mount(self.devices, entry) + mounted = bool(check_mount(self.devices, entry)) except OSError as err: mounted = str(err) mpoint = {'device': entry, 'mounted': mounted} @@ -225,7 +225,7 @@ class ReconMiddleware(object): continue try: - mounted = check_mount(self.devices, entry) + mounted = bool(check_mount(self.devices, entry)) except OSError as err: devices.append({'device': entry, 'mounted': str(err), 'size': '', 'used': '', 'avail': ''}) diff --git a/swift/container/server.py b/swift/container/server.py index c71925f1b1..0c58089c10 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -35,7 +35,7 @@ from swift.common.utils import get_logger, hash_path, public, \ Timestamp, storage_directory, validate_sync_to, \ config_true_value, timing_stats, replication, \ override_bytes_from_content_type, get_log_line -from swift.common.constraints import check_mount, valid_timestamp, check_utf8 +from swift.common.constraints import valid_timestamp, check_utf8, check_drive from swift.common import constraints from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ConnectionTimeout @@ -263,7 +263,7 @@ class ContainerController(BaseStorageServer): drive, part, account, container, obj = split_and_validate_path( req, 4, 5, True) req_timestamp = valid_timestamp(req) - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) # policy index is only relevant for delete_obj (and transitively for # auto create accounts) @@ -351,7 +351,7 @@ class ContainerController(BaseStorageServer): self.realms_conf) if err: return HTTPBadRequest(err) - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) requested_policy_index = self.get_and_validate_policy_index(req) broker = self._get_container_broker(drive, part, account, container) @@ -419,7 +419,7 @@ class ContainerController(BaseStorageServer): drive, part, account, container, obj = split_and_validate_path( req, 4, 5, True) out_content_type = get_listing_content_type(req) - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container, pending_timeout=0.1, @@ -483,7 +483,7 @@ class ContainerController(BaseStorageServer): body='Maximum limit is %d' % constraints.CONTAINER_LISTING_LIMIT) out_content_type = get_listing_content_type(req) - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container, pending_timeout=0.1, @@ -545,7 +545,7 @@ class ContainerController(BaseStorageServer): """ post_args = split_and_validate_path(req, 3) drive, partition, hash = post_args - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) try: args = json.load(req.environ['wsgi.input']) @@ -567,7 +567,7 @@ class ContainerController(BaseStorageServer): self.realms_conf) if err: return HTTPBadRequest(err) - if self.mount_check and not check_mount(self.root, drive): + if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container) if broker.is_deleted(): diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 362b3a3f34..42d5eacc47 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -57,7 +57,7 @@ from pyeclib.ec_iface import ECDriverError, ECInvalidFragmentMetadata, \ ECBadFragmentChecksum, ECInvalidParameter from swift import gettext_ as _ -from swift.common.constraints import check_mount, check_dir +from swift.common.constraints import check_drive from swift.common.request_helpers import is_sys_meta from swift.common.utils import mkdirs, Timestamp, \ storage_directory, hash_path, renamer, fallocate, fsync, fdatasync, \ @@ -1191,12 +1191,11 @@ class BaseDiskFileManager(object): # we'll do some kind of check unless explicitly forbidden if mount_check is not False: if mount_check or self.mount_check: - check = check_mount + mount_check = True else: - check = check_dir - if not check(self.devices, device): - return None - return os.path.join(self.devices, device) + mount_check = False + return check_drive(self.devices, device, mount_check) + return join(self.devices, device) @contextmanager def replication_lock(self, device): diff --git a/test/unit/__init__.py b/test/unit/__init__.py index d9750b7f8b..7f7726d25b 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -709,49 +709,28 @@ def quiet_eventlet_exceptions(): eventlet_debug.hub_exceptions(orig_state) -class MockTrue(object): +@contextmanager +def mock_check_drive(isdir=False, ismount=False): """ - Instances of MockTrue evaluate like True - Any attr accessed on an instance of MockTrue will return a MockTrue - instance. Any method called on an instance of MockTrue will return - a MockTrue instance. + All device/drive/mount checking should be done through the constraints + module if we keep the mocking consistly w/i that module we can keep our + test robust to further rework on that interface. - >>> thing = MockTrue() - >>> thing - True - >>> thing == True # True == True - True - >>> thing == False # True == False - False - >>> thing != True # True != True - False - >>> thing != False # True != False - True - >>> thing.attribute - True - >>> thing.method() - True - >>> thing.attribute.method() - True - >>> thing.method().attribute - True + Replace the constraint modules underlying os calls with mocks. + :param isdir: return value of constraints isdir calls, default False + :param ismount: return value of constraints ismount calls, default False + :returns: a dict of constraint module mocks """ - - def __getattribute__(self, *args, **kwargs): - return self - - def __call__(self, *args, **kwargs): - return self - - def __repr__(*args, **kwargs): - return repr(True) - - def __eq__(self, other): - return other is True - - def __ne__(self, other): - return other is not True + mock_base = 'swift.common.constraints.' + with mocklib.patch(mock_base + 'isdir') as mock_isdir, \ + mocklib.patch(mock_base + 'utils.ismount') as mock_ismount: + mock_isdir.return_value = isdir + mock_ismount.return_value = ismount + yield { + 'isdir': mock_isdir, + 'ismount': mock_ismount, + } @contextmanager diff --git a/test/unit/account/test_server.py b/test/unit/account/test_server.py index da1712c210..cef4efc345 100644 --- a/test/unit/account/test_server.py +++ b/test/unit/account/test_server.py @@ -36,7 +36,7 @@ from swift.account.server import AccountController from swift.common.utils import (normalize_timestamp, replication, public, mkdirs, storage_directory, Timestamp) from swift.common.request_helpers import get_sys_meta_prefix -from test.unit import patch_policies, debug_logger +from test.unit import patch_policies, debug_logger, mock_check_drive from swift.common.storage_policy import StoragePolicy, POLICIES @@ -47,6 +47,7 @@ class TestAccountController(unittest.TestCase): """Set up for testing swift.account.server.AccountController""" self.testdir_base = mkdtemp() self.testdir = os.path.join(self.testdir_base, 'account_server') + mkdirs(os.path.join(self.testdir, 'sda1')) self.controller = AccountController( {'devices': self.testdir, 'mount_check': 'false'}) @@ -71,6 +72,50 @@ class TestAccountController(unittest.TestCase): self.assertEqual(resp.headers['Server'], (server_handler.server_type + '/' + swift_version)) + def test_insufficient_storage_mount_check_true(self): + conf = {'devices': self.testdir, 'mount_check': 'true'} + account_controller = AccountController(conf) + self.assertTrue(account_controller.mount_check) + for method in account_controller.allowed_methods: + if method == 'OPTIONS': + continue + req = Request.blank('/sda1/p/a-or-suff', method=method, + headers={'x-timestamp': '1'}) + with mock_check_drive() as mocks: + try: + resp = req.get_response(account_controller) + self.assertEqual(resp.status_int, 507) + mocks['ismount'].return_value = True + resp = req.get_response(account_controller) + self.assertNotEqual(resp.status_int, 507) + # feel free to rip out this last assertion... + expected = 2 if method == 'PUT' else 4 + self.assertEqual(resp.status_int // 100, expected) + except AssertionError as e: + self.fail('%s for %s' % (e, method)) + + def test_insufficient_storage_mount_check_false(self): + conf = {'devices': self.testdir, 'mount_check': 'false'} + account_controller = AccountController(conf) + self.assertFalse(account_controller.mount_check) + for method in account_controller.allowed_methods: + if method == 'OPTIONS': + continue + req = Request.blank('/sda1/p/a-or-suff', method=method, + headers={'x-timestamp': '1'}) + with mock_check_drive() as mocks: + try: + resp = req.get_response(account_controller) + self.assertEqual(resp.status_int, 507) + mocks['isdir'].return_value = True + resp = req.get_response(account_controller) + self.assertNotEqual(resp.status_int, 507) + # feel free to rip out this last assertion... + expected = 2 if method == 'PUT' else 4 + self.assertEqual(resp.status_int // 100, expected) + except AssertionError as e: + self.fail('%s for %s' % (e, method)) + def test_DELETE_not_found(self): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE', 'HTTP_X_TIMESTAMP': '0'}) @@ -147,29 +192,6 @@ class TestAccountController(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 400) - def test_DELETE_insufficient_storage(self): - self.controller = AccountController({'devices': self.testdir}) - req = Request.blank( - '/sda-null/p/a', environ={'REQUEST_METHOD': 'DELETE', - 'HTTP_X_TIMESTAMP': '1'}) - resp = req.get_response(self.controller) - self.assertEqual(resp.status_int, 507) - - def test_REPLICATE_insufficient_storage(self): - conf = {'devices': self.testdir, 'mount_check': 'true'} - self.account_controller = AccountController(conf) - - def fake_check_mount(*args, **kwargs): - return False - - with mock.patch("swift.common.constraints.check_mount", - fake_check_mount): - req = Request.blank('/sda1/p/suff', - environ={'REQUEST_METHOD': 'REPLICATE'}, - headers={}) - resp = req.get_response(self.account_controller) - self.assertEqual(resp.status_int, 507) - def test_REPLICATE_rsync_then_merge_works(self): def fake_rsync_then_merge(self, drive, db_file, args): return HTTPNoContent() @@ -331,13 +353,6 @@ class TestAccountController(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 406) - def test_HEAD_insufficient_storage(self): - self.controller = AccountController({'devices': self.testdir}) - req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'HEAD', - 'HTTP_X_TIMESTAMP': '1'}) - resp = req.get_response(self.controller) - self.assertEqual(resp.status_int, 507) - def test_HEAD_invalid_format(self): format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D) req = Request.blank('/sda1/p/a?format=' + format, @@ -569,13 +584,6 @@ class TestAccountController(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 400) - def test_PUT_insufficient_storage(self): - self.controller = AccountController({'devices': self.testdir}) - req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'PUT', - 'HTTP_X_TIMESTAMP': '1'}) - resp = req.get_response(self.controller) - self.assertEqual(resp.status_int, 507) - def test_POST_HEAD_metadata(self): req = Request.blank( '/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'}, @@ -693,13 +701,6 @@ class TestAccountController(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 400) - def test_POST_insufficient_storage(self): - self.controller = AccountController({'devices': self.testdir}) - req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'POST', - 'HTTP_X_TIMESTAMP': '1'}) - resp = req.get_response(self.controller) - self.assertEqual(resp.status_int, 507) - def test_POST_after_DELETE_not_found(self): req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'}) @@ -1502,13 +1503,6 @@ class TestAccountController(unittest.TestCase): listing.append(node2.firstChild.nodeValue) self.assertEqual(listing, ['sub.1.0', 'sub.1.1', 'sub.1.2']) - def test_GET_insufficient_storage(self): - self.controller = AccountController({'devices': self.testdir}) - req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'GET', - 'HTTP_X_TIMESTAMP': '1'}) - resp = req.get_response(self.controller) - self.assertEqual(resp.status_int, 507) - def test_through_call(self): inbuf = BytesIO() errbuf = StringIO() diff --git a/test/unit/common/test_constraints.py b/test/unit/common/test_constraints.py index a15045313e..c975a135c3 100644 --- a/test/unit/common/test_constraints.py +++ b/test/unit/common/test_constraints.py @@ -20,7 +20,7 @@ import time from six.moves import range from test import safe_repr -from test.unit import MockTrue +from test.unit import mock_check_drive from swift.common.swob import Request, HTTPException from swift.common.http import HTTP_REQUEST_ENTITY_TOO_LARGE, \ @@ -372,21 +372,49 @@ class TestConstraints(unittest.TestCase): self.assertTrue('X-Delete-At' in req.headers) self.assertEqual(req.headers['X-Delete-At'], expected) - def test_check_dir(self): - self.assertFalse(constraints.check_dir('', '')) - with mock.patch("os.path.isdir", MockTrue()): - self.assertTrue(constraints.check_dir('/srv', 'foo/bar')) + def test_check_drive_invalid_path(self): + root = '/srv/' + with mock_check_drive() as mocks: + self.assertIsNone(constraints.check_dir(root, 'foo?bar')) + self.assertIsNone(constraints.check_mount(root, 'foo bar')) + self.assertIsNone(constraints.check_drive(root, 'foo/bar', True)) + self.assertIsNone(constraints.check_drive(root, 'foo%bar', False)) + self.assertEqual([], mocks['isdir'].call_args_list) + self.assertEqual([], mocks['ismount'].call_args_list) - def test_check_mount(self): - self.assertFalse(constraints.check_mount('', '')) - with mock.patch("swift.common.utils.ismount", MockTrue()): - self.assertTrue(constraints.check_mount('/srv', '1')) - self.assertTrue(constraints.check_mount('/srv', 'foo-bar')) - self.assertTrue(constraints.check_mount( - '/srv', '003ed03c-242a-4b2f-bee9-395f801d1699')) - self.assertFalse(constraints.check_mount('/srv', 'foo bar')) - self.assertFalse(constraints.check_mount('/srv', 'foo/bar')) - self.assertFalse(constraints.check_mount('/srv', 'foo?bar')) + def test_check_drive_ismount(self): + root = '/srv' + path = 'sdb1' + with mock_check_drive(ismount=True) as mocks: + self.assertIsNone(constraints.check_dir(root, path)) + self.assertIsNone(constraints.check_drive(root, path, False)) + self.assertEqual([mock.call('/srv/sdb1'), mock.call('/srv/sdb1')], + mocks['isdir'].call_args_list) + self.assertEqual([], mocks['ismount'].call_args_list) + with mock_check_drive(ismount=True) as mocks: + self.assertEqual('/srv/sdb1', constraints.check_mount(root, path)) + self.assertEqual('/srv/sdb1', constraints.check_drive( + root, path, True)) + self.assertEqual([], mocks['isdir'].call_args_list) + self.assertEqual([mock.call('/srv/sdb1'), mock.call('/srv/sdb1')], + mocks['ismount'].call_args_list) + + def test_check_drive_isdir(self): + root = '/srv' + path = 'sdb2' + with mock_check_drive(isdir=True) as mocks: + self.assertEqual('/srv/sdb2', constraints.check_dir(root, path)) + self.assertEqual('/srv/sdb2', constraints.check_drive( + root, path, False)) + self.assertEqual([mock.call('/srv/sdb2'), mock.call('/srv/sdb2')], + mocks['isdir'].call_args_list) + self.assertEqual([], mocks['ismount'].call_args_list) + with mock_check_drive(isdir=True) as mocks: + self.assertIsNone(constraints.check_mount(root, path)) + self.assertIsNone(constraints.check_drive(root, path, True)) + self.assertEqual([], mocks['isdir'].call_args_list) + self.assertEqual([mock.call('/srv/sdb2'), mock.call('/srv/sdb2')], + mocks['ismount'].call_args_list) def test_check_float(self): self.assertFalse(constraints.check_float('')) diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index 760b1c6557..f0339c7852 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -41,7 +41,7 @@ from swift.container import server as container_server from swift.common import constraints from swift.common.utils import (Timestamp, mkdirs, public, replication, storage_directory, lock_parent_directory) -from test.unit import fake_http_connect, debug_logger +from test.unit import fake_http_connect, debug_logger, mock_check_drive from swift.common.storage_policy import (POLICIES, StoragePolicy) from swift.common.request_helpers import get_sys_meta_prefix @@ -63,9 +63,8 @@ def save_globals(): class TestContainerController(unittest.TestCase): """Test swift.container.server.ContainerController""" def setUp(self): - """Set up for testing swift.object_server.ObjectController""" - self.testdir = os.path.join(mkdtemp(), - 'tmp_test_object_server_ObjectController') + self.testdir = os.path.join( + mkdtemp(), 'tmp_test_container_server_ContainerController') mkdirs(self.testdir) rmtree(self.testdir) mkdirs(os.path.join(self.testdir, 'sda1')) @@ -305,15 +304,6 @@ class TestContainerController(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 400) - def test_HEAD_insufficient_storage(self): - self.controller = container_server.ContainerController( - {'devices': self.testdir}) - req = Request.blank( - '/sda-null/p/a/c', environ={'REQUEST_METHOD': 'HEAD', - 'HTTP_X_TIMESTAMP': '1'}) - resp = req.get_response(self.controller) - self.assertEqual(resp.status_int, 507) - def test_HEAD_invalid_content_type(self): req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'HEAD'}, @@ -343,6 +333,60 @@ class TestContainerController(unittest.TestCase): self.assertEqual(resp.headers['Server'], (self.controller.server_type + '/' + swift_version)) + def test_insufficient_storage_mount_check_true(self): + conf = {'devices': self.testdir, 'mount_check': 'true'} + container_controller = container_server.ContainerController(conf) + self.assertTrue(container_controller.mount_check) + for method in container_controller.allowed_methods: + if method == 'OPTIONS': + continue + path = '/sda1/p/' + if method == 'REPLICATE': + path += 'suff' + else: + path += 'a/c' + req = Request.blank(path, method=method, + headers={'x-timestamp': '1'}) + with mock_check_drive() as mocks: + try: + resp = req.get_response(container_controller) + self.assertEqual(resp.status_int, 507) + mocks['ismount'].return_value = True + resp = req.get_response(container_controller) + self.assertNotEqual(resp.status_int, 507) + # feel free to rip out this last assertion... + expected = 2 if method == 'PUT' else 4 + self.assertEqual(resp.status_int // 100, expected) + except AssertionError as e: + self.fail('%s for %s' % (e, method)) + + def test_insufficient_storage_mount_check_false(self): + conf = {'devices': self.testdir, 'mount_check': 'false'} + container_controller = container_server.ContainerController(conf) + self.assertFalse(container_controller.mount_check) + for method in container_controller.allowed_methods: + if method == 'OPTIONS': + continue + path = '/sda1/p/' + if method == 'REPLICATE': + path += 'suff' + else: + path += 'a/c' + req = Request.blank(path, method=method, + headers={'x-timestamp': '1'}) + with mock_check_drive() as mocks: + try: + resp = req.get_response(container_controller) + self.assertEqual(resp.status_int, 507) + mocks['isdir'].return_value = True + resp = req.get_response(container_controller) + self.assertNotEqual(resp.status_int, 507) + # feel free to rip out this last assertion... + expected = 2 if method == 'PUT' else 4 + self.assertEqual(resp.status_int // 100, expected) + except AssertionError as e: + self.fail('%s for %s' % (e, method)) + def test_PUT(self): req = Request.blank( '/sda1/p/a/c', @@ -813,15 +857,6 @@ class TestContainerController(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 400) - def test_PUT_insufficient_storage(self): - self.controller = container_server.ContainerController( - {'devices': self.testdir}) - req = Request.blank( - '/sda-null/p/a/c', environ={'REQUEST_METHOD': 'PUT', - 'HTTP_X_TIMESTAMP': '1'}) - resp = req.get_response(self.controller) - self.assertEqual(resp.status_int, 507) - def test_POST_HEAD_metadata(self): req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'}, @@ -948,15 +983,6 @@ class TestContainerController(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 400) - def test_POST_insufficient_storage(self): - self.controller = container_server.ContainerController( - {'devices': self.testdir}) - req = Request.blank( - '/sda-null/p/a/c', environ={'REQUEST_METHOD': 'POST', - 'HTTP_X_TIMESTAMP': '1'}) - resp = req.get_response(self.controller) - self.assertEqual(resp.status_int, 507) - def test_POST_invalid_container_sync_to(self): self.controller = container_server.ContainerController( {'devices': self.testdir}) @@ -1270,22 +1296,6 @@ class TestContainerController(unittest.TestCase): sync_containers = [c for c in sync_store.synced_containers_generator()] self.assertFalse(sync_containers) - def test_REPLICATE_insufficient_storage(self): - conf = {'devices': self.testdir, 'mount_check': 'true'} - self.container_controller = container_server.ContainerController( - conf) - - def fake_check_mount(*args, **kwargs): - return False - - with mock.patch("swift.common.constraints.check_mount", - fake_check_mount): - req = Request.blank('/sda1/p/suff', - environ={'REQUEST_METHOD': 'REPLICATE'}, - headers={}) - resp = req.get_response(self.container_controller) - self.assertEqual(resp.status_int, 507) - def test_REPLICATE_rsync_then_merge_works(self): def fake_rsync_then_merge(self, drive, db_file, args): return HTTPNoContent() @@ -2012,15 +2022,6 @@ class TestContainerController(unittest.TestCase): resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 400) - def test_DELETE_insufficient_storage(self): - self.controller = container_server.ContainerController( - {'devices': self.testdir}) - req = Request.blank( - '/sda-null/p/a/c', environ={'REQUEST_METHOD': 'DELETE', - 'HTTP_X_TIMESTAMP': '1'}) - resp = req.get_response(self.controller) - self.assertEqual(resp.status_int, 507) - def test_GET_over_limit(self): req = Request.blank( '/sda1/p/a/c?limit=%d' % @@ -2603,15 +2604,6 @@ class TestContainerController(unittest.TestCase): "content_type": "text/plain", "last_modified": "1970-01-01T00:00:01.000000"}]) - def test_GET_insufficient_storage(self): - self.controller = container_server.ContainerController( - {'devices': self.testdir}) - req = Request.blank( - '/sda-null/p/a/c', environ={'REQUEST_METHOD': 'GET', - 'HTTP_X_TIMESTAMP': '1'}) - resp = req.get_response(self.controller) - self.assertEqual(resp.status_int, 507) - def test_through_call(self): inbuf = BytesIO() errbuf = StringIO() diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index 0947f2f870..95a7533ec2 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -906,7 +906,7 @@ class TestAuditor(unittest.TestCase): self.disk_file.delete(ts_tomb) # this get_hashes call will truncate the invalid hashes entry self.disk_file.manager.get_hashes( - self.devices + '/sda', '0', [], self.disk_file.policy) + 'sda', '0', [], self.disk_file.policy) suffix = basename(dirname(self.disk_file._datadir)) part_dir = dirname(dirname(self.disk_file._datadir)) # sanity checks... @@ -1011,7 +1011,7 @@ class TestAuditor(unittest.TestCase): # this get_hashes call will truncate the invalid hashes entry self.disk_file.manager.get_hashes( - self.devices + '/sda', '0', [], self.disk_file.policy) + 'sda', '0', [], self.disk_file.policy) with open(hash_invalid, 'rb') as fp: self.assertEqual('', fp.read().strip('\n')) # sanity check diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 7038a571ac..2d39c5c9af 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -44,7 +44,8 @@ from swift.obj.diskfile import MD5_OF_EMPTY_STRING, update_auditor_status from test.unit import (FakeLogger, mock as unit_mock, temptree, patch_policies, debug_logger, EMPTY_ETAG, make_timestamp_iter, DEFAULT_TEST_EC_TYPE, - requires_o_tmpfile_support, encode_frag_archive_bodies) + requires_o_tmpfile_support, encode_frag_archive_bodies, + mock_check_drive) from nose import SkipTest from swift.obj import diskfile from swift.common import utils @@ -2944,32 +2945,26 @@ class DiskFileMixin(BaseDiskFileTestMixin): mount_check = None self.df_mgr.mount_check = True - with mock.patch('swift.obj.diskfile.check_mount', - mock.MagicMock(return_value=False)): + with mock_check_drive(ismount=False): self.assertEqual(self.df_mgr.get_dev_path(device, mount_check), None) - with mock.patch('swift.obj.diskfile.check_mount', - mock.MagicMock(return_value=True)): + with mock_check_drive(ismount=True): self.assertEqual(self.df_mgr.get_dev_path(device, mount_check), dev_path) self.df_mgr.mount_check = False - with mock.patch('swift.obj.diskfile.check_dir', - mock.MagicMock(return_value=False)): + with mock_check_drive(isdir=False): self.assertEqual(self.df_mgr.get_dev_path(device, mount_check), None) - with mock.patch('swift.obj.diskfile.check_dir', - mock.MagicMock(return_value=True)): + with mock_check_drive(isdir=True): self.assertEqual(self.df_mgr.get_dev_path(device, mount_check), dev_path) mount_check = True - with mock.patch('swift.obj.diskfile.check_mount', - mock.MagicMock(return_value=False)): + with mock_check_drive(ismount=False): self.assertEqual(self.df_mgr.get_dev_path(device, mount_check), None) - with mock.patch('swift.obj.diskfile.check_mount', - mock.MagicMock(return_value=True)): + with mock_check_drive(ismount=True): self.assertEqual(self.df_mgr.get_dev_path(device, mount_check), dev_path) @@ -5855,7 +5850,7 @@ class TestSuffixHashes(unittest.TestCase): suffix_dir = os.path.dirname(df._datadir) for i in itertools.count(): df2 = df._manager.get_diskfile( - df._device_path, + os.path.basename(df._device_path), df._datadir.split('/')[-3], df._account, df._container, @@ -7706,8 +7701,7 @@ class TestSuffixHashes(unittest.TestCase): for policy in self.iter_policies(): df_mgr = self.df_router[policy] df_mgr.mount_check = True - with mock.patch('swift.obj.diskfile.check_mount', - mock.MagicMock(side_effect=[False])): + with mock_check_drive(ismount=False): self.assertRaises( DiskFileDeviceUnavailable, df_mgr.get_hashes, self.existing_device, '0', ['123'], diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py index 25ace7339a..176afdfca1 100644 --- a/test/unit/obj/test_reconstructor.py +++ b/test/unit/obj/test_reconstructor.py @@ -2762,46 +2762,51 @@ class TestObjectReconstructor(BaseTestObjectReconstructor): paths = [] - def fake_check_mount(devices, device): - paths.append(os.path.join(devices, device)) - return False + def fake_check_drive(devices, device, mount_check): + path = os.path.join(devices, device) + if (not mount_check) and os.path.isdir(path): + # while mount_check is false, the test still creates the dirs + paths.append(path) + return path + return None with mock.patch('swift.obj.reconstructor.whataremyips', return_value=[self.ip]), \ mock.patch.object(self.policy.object_ring, '_devs', new=stub_ring_devs), \ - mock.patch('swift.obj.diskfile.check_mount', - fake_check_mount): + mock.patch('swift.obj.diskfile.check_drive', + fake_check_drive): part_infos = list(self.reconstructor.collect_parts()) self.assertEqual(2, len(part_infos)) # sanity, same jobs self.assertEqual(set(int(p['partition']) for p in part_infos), set([0, 1])) - # ... because ismount was not called - self.assertEqual(paths, []) + # ... because fake_check_drive returned paths for both dirs + self.assertEqual(set(paths), set([ + os.path.join(self.devices, dev) for dev in local_devs])) # ... now with mount check self._configure_reconstructor(mount_check=True) self.assertTrue(self.reconstructor.mount_check) + paths = [] for policy in POLICIES: self.assertTrue(self.reconstructor._df_router[policy].mount_check) with mock.patch('swift.obj.reconstructor.whataremyips', return_value=[self.ip]), \ mock.patch.object(self.policy.object_ring, '_devs', new=stub_ring_devs), \ - mock.patch('swift.obj.diskfile.check_mount', - fake_check_mount): + mock.patch('swift.obj.diskfile.check_drive', + fake_check_drive): part_infos = list(self.reconstructor.collect_parts()) self.assertEqual([], part_infos) # sanity, no jobs - # ... because fake_ismount returned False for both paths - self.assertEqual(set(paths), set([ - os.path.join(self.devices, dev) for dev in local_devs])) + # ... because fake_check_drive returned False for both paths + self.assertFalse(paths) - def fake_check_mount(devices, device): - path = os.path.join(devices, device) - if path.endswith('sda'): - return True + def fake_check_drive(devices, device, mount_check): + self.assertTrue(mount_check) + if device == 'sda': + return os.path.join(devices, device) else: return False @@ -2809,8 +2814,8 @@ class TestObjectReconstructor(BaseTestObjectReconstructor): return_value=[self.ip]), \ mock.patch.object(self.policy.object_ring, '_devs', new=stub_ring_devs), \ - mock.patch('swift.obj.diskfile.check_mount', - fake_check_mount): + mock.patch('swift.obj.diskfile.check_drive', + fake_check_drive): part_infos = list(self.reconstructor.collect_parts()) self.assertEqual(1, len(part_infos)) # only sda picked up (part 0) self.assertEqual(part_infos[0]['partition'], 0) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 9ee2df1763..bc37d182a2 100644 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -45,7 +45,7 @@ from swift import __version__ as swift_version from swift.common.http import is_success from test import listen_zero from test.unit import FakeLogger, debug_logger, mocked_http_conn, \ - make_timestamp_iter, DEFAULT_TEST_EC_TYPE + make_timestamp_iter, DEFAULT_TEST_EC_TYPE, mock_check_drive from test.unit import connect_tcp, readuntil2crlfs, patch_policies, \ encode_frag_archive_bodies from swift.obj import server as object_server @@ -2762,6 +2762,68 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.headers['Server'], (server_handler.server_type + '/' + swift_version)) + def test_insufficient_storage_mount_check_true(self): + conf = {'devices': self.testdir, 'mount_check': 'true'} + object_controller = object_server.ObjectController(conf) + for policy in POLICIES: + mgr = object_controller._diskfile_router[policy] + self.assertTrue(mgr.mount_check) + for method in object_controller.allowed_methods: + if method in ('OPTIONS', 'SSYNC'): + continue + path = '/sda1/p/' + if method == 'REPLICATE': + path += 'suff' + else: + path += 'a/c/o' + req = Request.blank(path, method=method, + headers={'x-timestamp': '1', + 'content-type': 'app/test', + 'content-length': 0}) + with mock_check_drive() as mocks: + try: + resp = req.get_response(object_controller) + self.assertEqual(resp.status_int, 507) + mocks['ismount'].return_value = True + resp = req.get_response(object_controller) + self.assertNotEqual(resp.status_int, 507) + # feel free to rip out this last assertion... + expected = 2 if method in ('PUT', 'REPLICATE') else 4 + self.assertEqual(resp.status_int // 100, expected) + except AssertionError as e: + self.fail('%s for %s' % (e, method)) + + def test_insufficient_storage_mount_check_false(self): + conf = {'devices': self.testdir, 'mount_check': 'false'} + object_controller = object_server.ObjectController(conf) + for policy in POLICIES: + mgr = object_controller._diskfile_router[policy] + self.assertFalse(mgr.mount_check) + for method in object_controller.allowed_methods: + if method in ('OPTIONS', 'SSYNC'): + continue + path = '/sda1/p/' + if method == 'REPLICATE': + path += 'suff' + else: + path += 'a/c/o' + req = Request.blank(path, method=method, + headers={'x-timestamp': '1', + 'content-type': 'app/test', + 'content-length': 0}) + with mock_check_drive() as mocks: + try: + resp = req.get_response(object_controller) + self.assertEqual(resp.status_int, 507) + mocks['isdir'].return_value = True + resp = req.get_response(object_controller) + self.assertNotEqual(resp.status_int, 507) + # feel free to rip out this last assertion... + expected = 2 if method in ('PUT', 'REPLICATE') else 4 + self.assertEqual(resp.status_int // 100, expected) + except AssertionError as e: + self.fail('%s for %s' % (e, method)) + def test_GET(self): # Test swift.obj.server.ObjectController.GET req = Request.blank('/sda1/p/a/c', environ={'REQUEST_METHOD': 'GET'}) @@ -6225,22 +6287,6 @@ class TestObjectController(unittest.TestCase): tpool.execute = was_tpool_exe diskfile.DiskFileManager._get_hashes = was_get_hashes - def test_REPLICATE_insufficient_storage(self): - conf = {'devices': self.testdir, 'mount_check': 'true'} - self.object_controller = object_server.ObjectController( - conf, logger=debug_logger()) - self.object_controller.bytes_per_sync = 1 - - def fake_check_mount(*args, **kwargs): - return False - - with mock.patch("swift.obj.diskfile.check_mount", fake_check_mount): - req = Request.blank('/sda1/p/suff', - environ={'REQUEST_METHOD': 'REPLICATE'}, - headers={}) - resp = req.get_response(self.object_controller) - self.assertEqual(resp.status_int, 507) - def test_REPLICATE_reclaims_tombstones(self): conf = {'devices': self.testdir, 'mount_check': False, 'reclaim_age': 100} diff --git a/test/unit/obj/test_ssync_receiver.py b/test/unit/obj/test_ssync_receiver.py index 5821f6282e..739d28d38d 100644 --- a/test/unit/obj/test_ssync_receiver.py +++ b/test/unit/obj/test_ssync_receiver.py @@ -34,7 +34,8 @@ from swift.obj import ssync_receiver, ssync_sender from swift.obj.reconstructor import ObjectReconstructor from test import listen_zero, unit -from test.unit import debug_logger, patch_policies, make_timestamp_iter +from test.unit import (debug_logger, patch_policies, make_timestamp_iter, + mock_check_drive) from test.unit.obj.common import write_diskfile @@ -370,8 +371,7 @@ class TestReceiver(unittest.TestCase): mock.patch.object( self.controller._diskfile_router[POLICIES.legacy], 'mount_check', False), \ - mock.patch('swift.obj.diskfile.check_mount', - return_value=False) as mocked_check_mount: + mock_check_drive(isdir=True) as mocks: req = swob.Request.blank( '/device/partition', environ={'REQUEST_METHOD': 'SSYNC'}) resp = req.get_response(self.controller) @@ -379,14 +379,13 @@ class TestReceiver(unittest.TestCase): self.body_lines(resp.body), [':ERROR: 0 "Looking for :MISSING_CHECK: START got \'\'"']) self.assertEqual(resp.status_int, 200) - self.assertFalse(mocked_check_mount.called) + self.assertEqual([], mocks['ismount'].call_args_list) with mock.patch.object(self.controller, 'replication_semaphore'), \ mock.patch.object( self.controller._diskfile_router[POLICIES.legacy], 'mount_check', True), \ - mock.patch('swift.obj.diskfile.check_mount', - return_value=False) as mocked_check_mount: + mock_check_drive(ismount=False) as mocks: req = swob.Request.blank( '/device/partition', environ={'REQUEST_METHOD': 'SSYNC'}) resp = req.get_response(self.controller) @@ -396,12 +395,12 @@ class TestReceiver(unittest.TestCase): "was not enough space to save the resource. Drive: " "device

"]) self.assertEqual(resp.status_int, 507) - mocked_check_mount.assert_called_once_with( + self.assertEqual([mock.call(os.path.join( self.controller._diskfile_router[POLICIES.legacy].devices, - 'device') + 'device'))], mocks['ismount'].call_args_list) - mocked_check_mount.reset_mock() - mocked_check_mount.return_value = True + mocks['ismount'].reset_mock() + mocks['ismount'].return_value = True req = swob.Request.blank( '/device/partition', environ={'REQUEST_METHOD': 'SSYNC'}) resp = req.get_response(self.controller) @@ -409,9 +408,9 @@ class TestReceiver(unittest.TestCase): self.body_lines(resp.body), [':ERROR: 0 "Looking for :MISSING_CHECK: START got \'\'"']) self.assertEqual(resp.status_int, 200) - mocked_check_mount.assert_called_once_with( + self.assertEqual([mock.call(os.path.join( self.controller._diskfile_router[POLICIES.legacy].devices, - 'device') + 'device'))], mocks['ismount'].call_args_list) def test_SSYNC_Exception(self): From 244d7de388e7a62f488e9272efcb9adba8021274 Mon Sep 17 00:00:00 2001 From: Thomas Herve Date: Tue, 5 Sep 2017 22:42:32 +0200 Subject: [PATCH 07/43] Delay cache invalidation during container creation Having the cache being cleared before the PUT request creates a fairly big window where the cache can be inconsistent, if a concurrent GET happens. Let's move the cache clear after the requests to reduce it. Change-Id: I45130cc32ba3a23272c2a67c86b4063000379426 Closes-Bug: #1715177 --- swift/proxy/controllers/container.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/swift/proxy/controllers/container.py b/swift/proxy/controllers/container.py index faa2cdee84..c130958195 100644 --- a/swift/proxy/controllers/container.py +++ b/swift/proxy/controllers/container.py @@ -177,11 +177,11 @@ class ContainerController(Controller): headers = self._backend_requests(req, len(containers), account_partition, accounts, policy_index) - clear_info_cache(self.app, req.environ, - self.account_name, self.container_name) resp = self.make_requests( req, self.app.container_ring, container_partition, 'PUT', req.swift_entity_path, headers) + clear_info_cache(self.app, req.environ, + self.account_name, self.container_name) return resp @public From c6d00fe22f5c9962928cfb94635a79097d3f0c6b Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Mon, 11 Sep 2017 15:03:12 +0000 Subject: [PATCH 08/43] api-ref: Fix container PUT response codes Change-Id: I7b57b6ee7095105399518873f8ae59da63cd8ce5 Closes-Bug: #1549411 --- api-ref/source/storage-container-services.inc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api-ref/source/storage-container-services.inc b/api-ref/source/storage-container-services.inc index d2e32a0a73..a66f85fe40 100644 --- a/api-ref/source/storage-container-services.inc +++ b/api-ref/source/storage-container-services.inc @@ -186,8 +186,9 @@ Example requests and responses: X-Openstack-Request-Id: tx06021f10fc8642b2901e7-0052d58f37 Date: Tue, 14 Jan 2014 19:25:43 GMT -Error response codes:201,204, +Normal response codes: 201, 202 +Error response codes: 400, 404, 507 Request ------- From 8f843381cd30c1c3cb556b5abe1203d1e76b889b Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 12 Sep 2017 06:20:11 +0000 Subject: [PATCH 09/43] Add assertion about last-modified to object post test Change-Id: I850bf44ab9c9388cb6434e33ae0b20ec361aca0e --- test/functional/tests.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/functional/tests.py b/test/functional/tests.py index aa121d4b2e..05b062f43a 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -2531,6 +2531,7 @@ class TestFile(Base): self.assertEqual(1024, f_dict['bytes']) self.assertEqual('text/foobar', f_dict['content_type']) self.assertEqual(etag, f_dict['hash']) + put_last_modified = f_dict['last_modified'] # now POST updated content-type to each file file_item = self.env.container.file(file_name) @@ -2555,6 +2556,7 @@ class TestFile(Base): self.fail('Failed to find file %r in listing' % file_name) self.assertEqual(1024, f_dict['bytes']) self.assertEqual('image/foobarbaz', f_dict['content_type']) + self.assertLess(put_last_modified, f_dict['last_modified']) self.assertEqual(etag, f_dict['hash']) From f90ba1acb052ca5722eccbc8611d86efa81c3f6b Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 3 Mar 2017 20:56:39 +0000 Subject: [PATCH 10/43] Use swift3's check_signature function This adds support for v4 while getting us out of needing to know how signatures work. Related-Change: Iafb6114c12deb9a40d0f8324611de27b48ed95f6 Change-Id: I14be2845101f6af8f73bc46a416c09e4b9449515 --- swift/common/middleware/tempauth.py | 23 ++++----- test/unit/common/middleware/test_tempauth.py | 54 +++++++++++++++----- 2 files changed, 53 insertions(+), 24 deletions(-) diff --git a/swift/common/middleware/tempauth.py b/swift/common/middleware/tempauth.py index f6207ada99..9b774111e6 100644 --- a/swift/common/middleware/tempauth.py +++ b/swift/common/middleware/tempauth.py @@ -18,8 +18,6 @@ from __future__ import print_function from time import time from traceback import format_exc from uuid import uuid4 -from hashlib import sha1 -import hmac import base64 from eventlet import Timeout @@ -395,20 +393,21 @@ class TempAuth(object): s3_auth_details = env.get('swift3.auth_details') if s3_auth_details: + if 'check_signature' not in s3_auth_details: + self.logger.warning( + 'Swift3 did not provide a check_signature function; ' + 'upgrade Swift3 if you want to use it with tempauth') + return None account_user = s3_auth_details['access_key'] - signature_from_user = s3_auth_details['signature'] if account_user not in self.users: return None - account, user = account_user.split(':', 1) - account_id = self.users[account_user]['url'].rsplit('/', 1)[-1] - path = env['PATH_INFO'] - env['PATH_INFO'] = path.replace(account_user, account_id, 1) - valid_signature = base64.encodestring(hmac.new( - self.users[account_user]['key'], - s3_auth_details['string_to_sign'], - sha1).digest()).strip() - if signature_from_user != valid_signature: + user = self.users[account_user] + account = account_user.split(':', 1)[0] + account_id = user['url'].rsplit('/', 1)[-1] + if not s3_auth_details['check_signature'](user['key']): return None + env['PATH_INFO'] = env['PATH_INFO'].replace( + account_user, account_id, 1) groups = self._get_user_groups(account, account_user, account_id) return groups diff --git a/test/unit/common/middleware/test_tempauth.py b/test/unit/common/middleware/test_tempauth.py index 0f94c86125..62aa1568f5 100644 --- a/test/unit/common/middleware/test_tempauth.py +++ b/test/unit/common/middleware/test_tempauth.py @@ -19,7 +19,6 @@ import unittest from contextlib import contextmanager from base64 import b64encode from time import time -import mock from swift.common.middleware import tempauth as auth from swift.common.middleware.acl import format_acl @@ -265,27 +264,58 @@ class TestAuth(unittest.TestCase): self.assertEqual(req.environ['swift.authorize'], local_auth.denied_response) - def test_auth_with_s3_authorization(self): + def test_auth_with_s3_authorization_good(self): local_app = FakeApp() local_auth = auth.filter_factory( {'user_s3_s3': 'secret .admin'})(local_app) - req = self._make_request('/v1/AUTH_s3', environ={ + req = self._make_request('/v1/s3:s3', environ={ + 'swift3.auth_details': { + 'access_key': 's3:s3', + 'signature': b64encode('sig'), + 'string_to_sign': 't', + 'check_signature': lambda secret: True}}) + resp = req.get_response(local_auth) + + self.assertEqual(resp.status_int, 404) + self.assertEqual(local_app.calls, 1) + self.assertEqual(req.environ['PATH_INFO'], '/v1/AUTH_s3') + self.assertEqual(req.environ['swift.authorize'], + local_auth.authorize) + + def test_auth_with_s3_authorization_invalid(self): + local_app = FakeApp() + local_auth = auth.filter_factory( + {'user_s3_s3': 'secret .admin'})(local_app) + req = self._make_request('/v1/s3:s3', environ={ + 'swift3.auth_details': { + 'access_key': 's3:s3', + 'signature': b64encode('sig'), + 'string_to_sign': 't', + 'check_signature': lambda secret: False}}) + resp = req.get_response(local_auth) + + self.assertEqual(resp.status_int, 401) + self.assertEqual(local_app.calls, 1) + self.assertEqual(req.environ['PATH_INFO'], '/v1/s3:s3') + self.assertEqual(req.environ['swift.authorize'], + local_auth.denied_response) + + def test_auth_with_old_s3_details(self): + local_app = FakeApp() + local_auth = auth.filter_factory( + {'user_s3_s3': 'secret .admin'})(local_app) + req = self._make_request('/v1/s3:s3', environ={ 'swift3.auth_details': { 'access_key': 's3:s3', 'signature': b64encode('sig'), 'string_to_sign': 't'}}) + resp = req.get_response(local_auth) - with mock.patch('hmac.new') as hmac: - hmac.return_value.digest.return_value = 'sig' - resp = req.get_response(local_auth) - self.assertEqual(hmac.mock_calls, [ - mock.call('secret', 't', mock.ANY), - mock.call().digest()]) - - self.assertEqual(resp.status_int, 404) + self.assertEqual(resp.status_int, 401) self.assertEqual(local_app.calls, 1) + self.assertEqual(req.environ['PATH_INFO'], '/v1/s3:s3') self.assertEqual(req.environ['swift.authorize'], - local_auth.authorize) + local_auth.denied_response) def test_auth_no_reseller_prefix_no_token(self): # Check that normally we set up a call back to our authorize. From 6305993317934de8724a81afe7e21fb06c7d44d0 Mon Sep 17 00:00:00 2001 From: junboli Date: Tue, 5 Sep 2017 19:16:30 +0800 Subject: [PATCH 11/43] Correct the unused doc link address Update the doc link brought by the doc migration. Although we had some effort to fix these, it still left lots of bad doc link, I separate these changes into 3 patches aim to fix all of these, this is the 3rd patch for doc/source/install. Change-Id: I1b0c12cd5f893f1a84d12782ddc39f6d06beb2aa --- doc/source/install/controller-include.txt | 2 +- doc/source/install/controller-install-debian.rst | 2 +- doc/source/install/controller-install-obs.rst | 2 +- doc/source/install/controller-install-rdo.rst | 2 +- doc/source/install/controller-install-ubuntu.rst | 2 +- doc/source/install/get_started.rst | 4 ++-- doc/source/install/initial-rings.rst | 2 +- doc/source/install/storage-include1.txt | 2 +- doc/source/install/storage-include2.txt | 2 +- doc/source/install/storage-include3.txt | 2 +- doc/source/install/storage-install-obs.rst | 2 +- doc/source/install/storage-install-rdo.rst | 2 +- doc/source/install/storage-install-ubuntu-debian.rst | 2 +- 13 files changed, 14 insertions(+), 14 deletions(-) diff --git a/doc/source/install/controller-include.txt b/doc/source/install/controller-include.txt index 184e9cd7b5..cf9e9d1735 100644 --- a/doc/source/install/controller-include.txt +++ b/doc/source/install/controller-include.txt @@ -28,7 +28,7 @@ following actions: .. note:: For more information on other modules that enable additional features, - see the `Deployment Guide `__. + see the `Deployment Guide `__. * In the ``[app:proxy-server]`` section, enable automatic account creation: diff --git a/doc/source/install/controller-install-debian.rst b/doc/source/install/controller-install-debian.rst index 2a981fb0b5..55508902dd 100644 --- a/doc/source/install/controller-install-debian.rst +++ b/doc/source/install/controller-install-debian.rst @@ -10,7 +10,7 @@ the proxy service on the controller node. However, you can run the proxy service on any node with network connectivity to the storage nodes. Additionally, you can install and configure the proxy service on multiple nodes to increase performance and redundancy. For more information, see the -`Deployment Guide `__. +`Deployment Guide `__. This section applies to Debian. diff --git a/doc/source/install/controller-install-obs.rst b/doc/source/install/controller-install-obs.rst index 08cf54556e..f4a8ffcf71 100644 --- a/doc/source/install/controller-install-obs.rst +++ b/doc/source/install/controller-install-obs.rst @@ -10,7 +10,7 @@ the proxy service on the controller node. However, you can run the proxy service on any node with network connectivity to the storage nodes. Additionally, you can install and configure the proxy service on multiple nodes to increase performance and redundancy. For more information, see the -`Deployment Guide `__. +`Deployment Guide `__. This section applies to openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 SP2. diff --git a/doc/source/install/controller-install-rdo.rst b/doc/source/install/controller-install-rdo.rst index d6ccaa0d4d..44dd7cc40e 100644 --- a/doc/source/install/controller-install-rdo.rst +++ b/doc/source/install/controller-install-rdo.rst @@ -10,7 +10,7 @@ the proxy service on the controller node. However, you can run the proxy service on any node with network connectivity to the storage nodes. Additionally, you can install and configure the proxy service on multiple nodes to increase performance and redundancy. For more information, see the -`Deployment Guide `__. +`Deployment Guide `__. This section applies to Red Hat Enterprise Linux 7 and CentOS 7. diff --git a/doc/source/install/controller-install-ubuntu.rst b/doc/source/install/controller-install-ubuntu.rst index 368e6093c9..c1bbadf0e7 100644 --- a/doc/source/install/controller-install-ubuntu.rst +++ b/doc/source/install/controller-install-ubuntu.rst @@ -10,7 +10,7 @@ the proxy service on the controller node. However, you can run the proxy service on any node with network connectivity to the storage nodes. Additionally, you can install and configure the proxy service on multiple nodes to increase performance and redundancy. For more information, see the -`Deployment Guide `__. +`Deployment Guide `__. This section applies to Ubuntu 14.04 (LTS). diff --git a/doc/source/install/get_started.rst b/doc/source/install/get_started.rst index 301f6a2ebd..1cf963010f 100644 --- a/doc/source/install/get_started.rst +++ b/doc/source/install/get_started.rst @@ -40,7 +40,7 @@ swift client swift-init Script that initializes the building of the ring file, takes daemon names as parameter and offers commands. Documented in - http://docs.openstack.org/developer/swift/admin_guide.html#managing-services. + https://docs.openstack.org/swift/latest/admin_guide.html#managing-services. swift-recon A cli tool used to retrieve various metrics and telemetry information @@ -48,4 +48,4 @@ swift-recon swift-ring-builder Storage ring build and rebalance utility. Documented in - http://docs.openstack.org/developer/swift/admin_guide.html#managing-the-rings. + https://docs.openstack.org/swift/latest/admin_guide.html#managing-the-rings. diff --git a/doc/source/install/initial-rings.rst b/doc/source/install/initial-rings.rst index d7d0378b68..e09dfd4ed2 100644 --- a/doc/source/install/initial-rings.rst +++ b/doc/source/install/initial-rings.rst @@ -9,7 +9,7 @@ maximum partitions, 3 replicas of each object, and 1 hour minimum time between moving a partition more than once. For Object Storage, a partition indicates a directory on a storage device rather than a conventional partition table. For more information, see the -`Deployment Guide `__. +`Deployment Guide `__. .. note:: Perform these steps on the controller node. diff --git a/doc/source/install/storage-include1.txt b/doc/source/install/storage-include1.txt index a98f27bf6e..711782300a 100644 --- a/doc/source/install/storage-include1.txt +++ b/doc/source/install/storage-include1.txt @@ -28,7 +28,7 @@ following actions: .. note:: For more information on other modules that enable additional features, - see the `Deployment Guide `__. + see the `Deployment Guide `__. * In the ``[filter:recon]`` section, configure the recon (meters) cache directory: diff --git a/doc/source/install/storage-include2.txt b/doc/source/install/storage-include2.txt index 04cb2f350d..cb320d9a1b 100644 --- a/doc/source/install/storage-include2.txt +++ b/doc/source/install/storage-include2.txt @@ -28,7 +28,7 @@ following actions: .. note:: For more information on other modules that enable additional features, - see the `Deployment Guide `__. + see the `Deployment Guide `__. * In the ``[filter:recon]`` section, configure the recon (meters) cache directory: diff --git a/doc/source/install/storage-include3.txt b/doc/source/install/storage-include3.txt index 6551782c1c..2cc9e2d235 100644 --- a/doc/source/install/storage-include3.txt +++ b/doc/source/install/storage-include3.txt @@ -28,7 +28,7 @@ following actions: .. note:: For more information on other modules that enable additional features, - see the `Deployment Guide `__. + see the `Deployment Guide `__. * In the ``[filter:recon]`` section, configure the recon (meters) cache and lock directories: diff --git a/doc/source/install/storage-install-obs.rst b/doc/source/install/storage-install-obs.rst index 7ed11cce35..3199843132 100644 --- a/doc/source/install/storage-install-obs.rst +++ b/doc/source/install/storage-install-obs.rst @@ -14,7 +14,7 @@ Although Object Storage supports any file system with extended attributes (xattr), testing and benchmarking indicate the best performance and reliability on XFS. For more information on horizontally scaling your environment, see the -`Deployment Guide `_. +`Deployment Guide `_. This section applies to openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 SP2. diff --git a/doc/source/install/storage-install-rdo.rst b/doc/source/install/storage-install-rdo.rst index 873588592c..91a14bd7fc 100644 --- a/doc/source/install/storage-install-rdo.rst +++ b/doc/source/install/storage-install-rdo.rst @@ -14,7 +14,7 @@ Although Object Storage supports any file system with extended attributes (xattr), testing and benchmarking indicate the best performance and reliability on XFS. For more information on horizontally scaling your environment, see the -`Deployment Guide `_. +`Deployment Guide `_. This section applies to Red Hat Enterprise Linux 7 and CentOS 7. diff --git a/doc/source/install/storage-install-ubuntu-debian.rst b/doc/source/install/storage-install-ubuntu-debian.rst index 12fb4dae56..3f62bf2d8a 100644 --- a/doc/source/install/storage-install-ubuntu-debian.rst +++ b/doc/source/install/storage-install-ubuntu-debian.rst @@ -14,7 +14,7 @@ Although Object Storage supports any file system with extended attributes (xattr), testing and benchmarking indicate the best performance and reliability on XFS. For more information on horizontally scaling your environment, see the -`Deployment Guide `_. +`Deployment Guide `_. This section applies to Ubuntu 14.04 (LTS) and Debian. From 5622c1f959fac44e325768b6b3d9a1961c366dd2 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Wed, 13 Sep 2017 09:58:10 -0600 Subject: [PATCH 12/43] Test placeholder for dispersion_report module This module was recently moved from bin/ and the execute bit stuck around and wasn't imported by our test suite - which throws off accountability in coverage reporting. Remove the execute bit and add a placeholder unittest module. Related-Change-Id: Ie0d52a1a54fc152bb72cbb3f84dcc36a8dad972a Change-Id: Id9e678c2460cc889f682c5566a4418160db7878f --- swift/cli/dispersion_report.py | 0 test/unit/cli/test_dispersion_report.py | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+) mode change 100755 => 100644 swift/cli/dispersion_report.py create mode 100644 test/unit/cli/test_dispersion_report.py diff --git a/swift/cli/dispersion_report.py b/swift/cli/dispersion_report.py old mode 100755 new mode 100644 diff --git a/test/unit/cli/test_dispersion_report.py b/test/unit/cli/test_dispersion_report.py new file mode 100644 index 0000000000..0a6bd83d91 --- /dev/null +++ b/test/unit/cli/test_dispersion_report.py @@ -0,0 +1,21 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import unittest + +from swift.cli import dispersion_report + + +class TestDispersionReport(unittest.TestCase): + + def test_placeholder(self): + self.assertTrue(callable(dispersion_report.main)) From df00122e74c31ea6d5dec523c7d59a6ad2fedc26 Mon Sep 17 00:00:00 2001 From: junboli Date: Tue, 5 Sep 2017 19:01:48 +0800 Subject: [PATCH 13/43] doc migration: update the doc link address[2/3] Update the doc link brought by the doc migration. Although we had some effort to fix these, it still left lots of bad doc link, I separate these changes into 3 patches aim to fix all of these, this is the 2st patch for doc/manpages. Change-Id: Id426c5dd45a812ef801042834c93701bb6e63a05 --- CHANGELOG | 40 ++++++++++----------- CONTRIBUTING.rst | 4 +-- README.rst | 20 +++++------ doc/source/admin_guide.rst | 4 +-- doc/source/api/object_api_v1_overview.rst | 6 ++-- doc/source/api/temporary_url_middleware.rst | 2 +- doc/source/overview_auth.rst | 6 ++-- doc/source/overview_encryption.rst | 2 +- etc/proxy-server.conf-sample | 2 +- 9 files changed, 43 insertions(+), 43 deletions(-) diff --git a/CHANGELOG b/CHANGELOG index a305b67b4c..e4e9629475 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -244,7 +244,7 @@ swift (2.13.0, OpenStack Ocata) * PUT subrequests generated from a client-side COPY will now properly log the SSC (server-side copy) Swift source field. See - https://docs.openstack.org/developer/swift/logs.html#swift-source for + https://docs.openstack.org/swift/latest/logs.html#swift-source for more information. * Fixed a bug where an SLO download with a range request may have resulted @@ -391,13 +391,13 @@ swift (2.10.0, OpenStack Newton) * Object versioning now supports a "history" mode in addition to the older "stack" mode. The difference is in how DELETE requests are handled. For full details, please read - http://docs.openstack.org/developer/swift/overview_object_versioning.html. + https://docs.openstack.org/swift/latest/overview_object_versioning.html. * New config variables to change the schedule priority and I/O scheduling class. Servers and daemons now understand `nice_priority`, `ionice_class`, and `ionice_priority` to schedule their relative importance. Please read - http://docs.openstack.org/developer/swift/deployment_guide.html + https://docs.openstack.org/swift/latest/admin_guide.html for full config details. * On newer kernels (3.15+ when using xfs), Swift will use the O_TMPFILE @@ -410,7 +410,7 @@ swift (2.10.0, OpenStack Newton) improved in clusters that are not completely healthy. * Significant improvements to the api-ref doc available at - http://developer.openstack.org/api-ref/object-storage/. + https://developer.openstack.org/api-ref/object-storage/. * A PUT or POST to a container will now update the container's Last-Modified time, and that value will be included in a @@ -464,7 +464,7 @@ swift (2.9.0) For more information on the details of the at-rest encryption feature, please see the docs at - http://docs.openstack.org/developer/swift/overview_encryption.html. + https://docs.openstack.org/swift/latest/overview_encryption.html. * `swift-recon` can now be called with more than one server type. @@ -606,7 +606,7 @@ swift (2.7.0, OpenStack Mitaka) default it will stagger the firing. * Added an operational procedures guide to the docs. It can be - found at http://docs.openstack.org/developer/swift/ops_runbook/index.html and + found at https://docs.openstack.org/swift/latest/ops_runbook/index.html and includes information on detecting and handling day-to-day operational issues in a Swift cluster. @@ -776,7 +776,7 @@ swift (2.6.0) * Container sync has been improved to more quickly find and iterate over the containers to be synced. This reduced server load and lowers the time required to see data propagate between two clusters. Please see - http://docs.openstack.org/developer/swift/overview_container_sync.html for more details + https://docs.openstack.org/swift/latest/overview_container_sync.html for more details about the new on-disk structure for tracking synchronized containers. * A container POST will now update that container's put-timestamp value. @@ -862,7 +862,7 @@ swift (2.4.0) server config setting ("allow_versions"), if it is currently enabled. The existing container server config setting enables existing containers to continue being versioned. Please see - http://docs.openstack.org/developer/swift/middleware.html#how-to-enable-object-versioning-in-a-swift-cluster + https://docs.openstack.org/swift/latest/middleware.html#how-to-enable-object-versioning-in-a-swift-cluster for further upgrade notes. * Allow 1+ object-servers-per-disk deployment @@ -987,7 +987,7 @@ swift (2.3.0, OpenStack Kilo) ssync for durability. Deployers are urged to do extensive testing and not deploy production data using an erasure code storage policy. - Full docs are at http://docs.openstack.org/developer/swift/overview_erasure_code.html + Full docs are at https://docs.openstack.org/swift/latest/overview_erasure_code.html * Add support for container TempURL Keys. @@ -996,7 +996,7 @@ swift (2.3.0, OpenStack Kilo) * Swift now supports composite tokens. This allows another service to act on behalf of a user, but only with that user's consent. - See http://docs.openstack.org/developer/swift/overview_auth.html for more details. + See https://docs.openstack.org/swift/latest/overview_auth.html for more details. * Multi-region replication was improved. When replicating data to a different region, only one replica will be pushed per replication @@ -1004,7 +1004,7 @@ swift (2.3.0, OpenStack Kilo) locally instead of pushing more data over the inter-region network. * Internal requests from the ratelimit middleware now properly log a - swift_source. See http://docs.openstack.org/developer/swift/logs.html for details. + swift_source. See https://docs.openstack.org/swift/latest/logs.html for details. * Improved storage policy support for quarantine stats in swift-recon. @@ -1052,7 +1052,7 @@ swift (2.2.2) The overload and dispersion metrics have been exposed in the swift-ring-build CLI tools. - See http://docs.openstack.org/developer/swift/overview_ring.html + See https://docs.openstack.org/swift/latest/overview_ring.html for more info on how data placement works now. * Improve replication of large out-of-sync, out-of-date containers. @@ -1140,7 +1140,7 @@ swift (2.2.0, OpenStack Juno) now requires that ACLs be set on IDs, which are unique across domains, and further restricts setting new ACLs to only use IDs. - Please see http://docs.openstack.org/developer/swift/overview_auth.html for + Please see https://docs.openstack.org/swift/latest/overview_auth.html for more information on configuring Swift and Keystone together. * Swift now supports server-side account-to-account copy. Server- @@ -1257,7 +1257,7 @@ swift (2.0.0) them. A policy is set on a Swift container at container creation time and cannot be changed. - Full docs are at http://docs.openstack.org/developer/swift/overview_policies.html + Full docs are at https://docs.openstack.org/swift/latest/overview_policies.html * Add profiling middleware in Swift @@ -1351,7 +1351,7 @@ swift (1.13.0) the header is a JSON dictionary string to be interpreted by the auth system. A reference implementation is given in TempAuth. Please see the full docs at - http://docs.openstack.org/developer/swift/overview_auth.html + https://docs.openstack.org/swift/latest/overview_auth.html * Added a WSGI environment flag to stop swob from always using absolute location. This is useful if middleware needs to use @@ -1433,8 +1433,8 @@ swift (1.12.0) * New container sync configuration option, separating the end user from knowing the required end point and adding more secure signed requests. See - http://docs.openstack.org/developer/swift/overview_container_sync.html for full - information. + https://docs.openstack.org/swift/latest/overview_container_sync.html + for full information. * bulk middleware now can be configured to retry deleting containers. @@ -1699,7 +1699,7 @@ swift (1.9.0) bugrelated to content-disposition names. * Added crossdomain.xml middleware. See - http://docs.openstack.org/developer/swift/crossdomain.html for details + https://docs.openstack.org/swift/latest/crossdomain.html for details * Added rsync bandwidth limit setting for object replicator @@ -1720,7 +1720,7 @@ swift (1.9.0) * Improved container-sync resiliency * Added example Apache config files. See - http://docs.openstack.org/developer/swift/apache_deployment_guide.html + https://docs.openstack.org/swift/latest/apache_deployment_guide.html for more info * If an account is marked as deleted but hasn't been reaped and is still @@ -1768,7 +1768,7 @@ swift (1.8.0, OpenStack Grizzly) This is a change that may require an update to your proxy server config file or custom middleware that you may be using. See the full - docs at http://docs.openstack.org/developer/swift/misc.html#module-swift.common.middleware.proxy_logging. + docs at https://docs.openstack.org/swift/latest/misc.html. * Changed the default sample rate for a few high-traffic requests. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index e4958f8772..6f47ef18be 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -75,7 +75,7 @@ working on. Getting Started --------------- -http://docs.openstack.org/developer/swift/first_contribution_swift.html +https://docs.openstack.org/swift/latest/first_contribution_swift.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following @@ -116,7 +116,7 @@ Recommended workflow ==================== - Set up a `Swift All-In-One - VM `__\ (SAIO). + VM `__\ (SAIO). - Make your changes. Docs and tests for your patch must land before or with your patch. diff --git a/README.rst b/README.rst index 014cf821c2..6a5a1443d7 100644 --- a/README.rst +++ b/README.rst @@ -31,7 +31,7 @@ To build documentation install sphinx (``pip install sphinx``), run ``python setup.py build_sphinx``, and then browse to /doc/build/html/index.html. These docs are auto-generated after every commit and available online at -http://docs.openstack.org/developer/swift/. +https://docs.openstack.org/swift/latest/. For Developers -------------- @@ -39,13 +39,14 @@ For Developers Getting Started ~~~~~~~~~~~~~~~ -Swift is part of OpenStack and follows the code contribution, review, and testing processes common to all OpenStack projects. +Swift is part of OpenStack and follows the code contribution, review, and +testing processes common to all OpenStack projects. If you would like to start contributing, check out these `notes `__ to help you get started. The best place to get started is the -`"SAIO - Swift All In One" `__. +`"SAIO - Swift All In One" `__. This document will walk you through setting up a development cluster of Swift in a VM. The SAIO environment is ideal for running small-scale tests against swift and trying out new features and bug fixes. @@ -72,7 +73,7 @@ continue to work. Probe tests are "white box" tests that validate the internal workings of a Swift cluster. They are written to work against the -`"SAIO - Swift All In One" `__ +`"SAIO - Swift All In One" `__ dev environment. For example, a probe test may create an object, delete one replica, and ensure that the background consistency processes find and correct the error. @@ -119,10 +120,9 @@ For Deployers ------------- Deployer docs are also available at -http://docs.openstack.org/developer/swift/. A good starting point is at -http://docs.openstack.org/developer/swift/deployment_guide.html - -There is an `ops runbook `__ +https://docs.openstack.org/swift/latest/. A good starting point is at +https://docs.openstack.org/swift/latest/deployment_guide.html +There is an `ops runbook `__ that gives information about how to diagnose and troubleshoot common issues when running a Swift cluster. @@ -138,11 +138,11 @@ For client applications, official Python language bindings are provided at http://github.com/openstack/python-swiftclient. Complete API documentation at -http://developer.openstack.org/api-ref/object-store/ +https://developer.openstack.org/api-ref/object-store/ There is a large ecosystem of applications and libraries that support and work with OpenStack Swift. Several are listed on the -`associated projects `__ +`associated projects `__ page. -------------- diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index 497d22d180..10854a4446 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -1493,6 +1493,6 @@ See :ref:`custom-logger-hooks-label` for sample use cases. Securing OpenStack Swift ------------------------ -Please refer to the security guide at http://docs.openstack.org/security-guide +Please refer to the security guide at https://docs.openstack.org/security-guide and in particular the `Object Storage -`__ section. +`__ section. diff --git a/doc/source/api/object_api_v1_overview.rst b/doc/source/api/object_api_v1_overview.rst index 30d3f04f2c..7f8571bc87 100644 --- a/doc/source/api/object_api_v1_overview.rst +++ b/doc/source/api/object_api_v1_overview.rst @@ -169,14 +169,14 @@ The API Reference describes the operations that you can perform with the Object Storage API: - `Storage - accounts `__: + accounts `__: Use to perform account-level tasks. Lists containers for a specified account. Creates, updates, and deletes account metadata. Shows account metadata. - `Storage - containers `__: + containers `__: Use to perform container-level tasks. Lists objects in a specified container. Creates, shows details for, @@ -184,7 +184,7 @@ Object Storage API: container metadata. - `Storage - objects `__: + objects `__: Use to perform object-level tasks. Creates, replaces, shows details for, and deletes objects. Copies diff --git a/doc/source/api/temporary_url_middleware.rst b/doc/source/api/temporary_url_middleware.rst index 9acb31cadb..76f5dfa6cf 100644 --- a/doc/source/api/temporary_url_middleware.rst +++ b/doc/source/api/temporary_url_middleware.rst @@ -222,4 +222,4 @@ Note that if the above example is copied exactly, and used in a command shell, then the ampersand is interpreted as an operator and the URL will be truncated. Enclose the URL in quotation marks to avoid this. -.. _tempurl: http://docs.openstack.org/developer/python-swiftclient/cli.html#tempurl +.. _tempurl: https://docs.openstack.org/python-swiftclient/latest/cli/index.html#swift-tempurl diff --git a/doc/source/overview_auth.rst b/doc/source/overview_auth.rst index b3df7f88a0..ab87bea881 100644 --- a/doc/source/overview_auth.rst +++ b/doc/source/overview_auth.rst @@ -104,8 +104,8 @@ can be found in the KeystoneMiddleware_ distribution. The :ref:`keystoneauth` middleware performs authorization and mapping the Keystone roles to Swift's ACLs. -.. _KeystoneMiddleware: http://docs.openstack.org/developer/keystonemiddleware/ -.. _Keystone: http://docs.openstack.org/developer/keystone/ +.. _KeystoneMiddleware: https://docs.openstack.org/keystonemiddleware/latest/ +.. _Keystone: https://docs.openstack.org/keystone/latest/ .. _configuring_keystone_auth: @@ -167,7 +167,7 @@ your situation, but in short: service. The example values shown here assume a user named 'swift' with admin role on a project named 'service', both being in the Keystone domain with id 'default'. Refer to the `KeystoneMiddleware documentation - `_ + `_ for other examples. * ``cache`` is set to ``swift.cache``. This means that the middleware diff --git a/doc/source/overview_encryption.rst b/doc/source/overview_encryption.rst index 5ebbbc85fc..90ab897fd3 100644 --- a/doc/source/overview_encryption.rst +++ b/doc/source/overview_encryption.rst @@ -238,7 +238,7 @@ Keys currently stored in Barbican can be listed using the The keymaster uses the explicitly configured username and password (and project name etc.) from the `keymaster.conf` file for retrieving the encryption root secret from an external key management system. The `Castellan library -`_ is used to communicate with +`_ is used to communicate with Barbican. For the proxy server, reading the encryption root secret directly from the diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index c07c48ff35..b3f393c1e9 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -357,7 +357,7 @@ user_test5_tester5 = testing5 service # Following parameters are known to work with keystonemiddleware v2.3.0 # (above v2.0.0), but checking the latest information in the wiki page[1] # is recommended. -# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration +# 1. https://docs.openstack.org/keystonemiddleware/latest/middlewarearchitecture.html#configuration # # [filter:authtoken] # paste.filter_factory = keystonemiddleware.auth_token:filter_factory From 4806434cb0e857ce624c62df2a262e3c3bb9f4d1 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 23 Mar 2017 18:26:21 -0700 Subject: [PATCH 14/43] Move listing formatting out to proxy middleware Make some json -> (text, xml) stuff in a common module, reference that in account/container servers so we don't break existing clients (including out-of-date proxies), but have the proxy controllers always force a json listing. This simplifies operations on listings (such as the ones already happening in decrypter, or the ones planned for symlink and sharding) by only needing to consider a single response type. There is a downside of larger backend requests for text/plain listings, but it seems like a net win? Change-Id: Id3ce37aa0402e2d8dd5784ce329d7cb4fbaf700d --- doc/saio/swift/proxy-server.conf | 5 +- etc/proxy-server.conf-sample | 8 +- setup.cfg | 1 + swift/account/server.py | 7 +- swift/account/utils.py | 56 +-- swift/common/constraints.py | 5 - swift/common/direct_client.py | 15 +- swift/common/internal_client.py | 8 +- swift/common/middleware/crypto/decrypter.py | 36 +- swift/common/middleware/dlo.py | 2 +- swift/common/middleware/listing_formats.py | 211 +++++++++++ swift/common/middleware/staticweb.py | 6 +- swift/common/middleware/versioned_writes.py | 3 +- swift/common/request_helpers.py | 25 +- swift/container/server.py | 52 +-- swift/proxy/controllers/account.py | 11 +- swift/proxy/controllers/container.py | 3 + swift/proxy/server.py | 11 +- .../middleware/crypto/test_decrypter.py | 133 ------- test/unit/common/middleware/test_dlo.py | 24 +- .../common/middleware/test_listing_formats.py | 345 ++++++++++++++++++ test/unit/common/middleware/test_staticweb.py | 37 +- .../middleware/test_versioned_writes.py | 74 ++-- test/unit/common/test_wsgi.py | 26 +- test/unit/container/test_server.py | 82 +++++ test/unit/helpers.py | 6 +- test/unit/proxy/test_server.py | 24 +- 27 files changed, 834 insertions(+), 382 deletions(-) create mode 100644 swift/common/middleware/listing_formats.py create mode 100644 test/unit/common/middleware/test_listing_formats.py diff --git a/doc/saio/swift/proxy-server.conf b/doc/saio/swift/proxy-server.conf index 76b85d5818..12b0386840 100644 --- a/doc/saio/swift/proxy-server.conf +++ b/doc/saio/swift/proxy-server.conf @@ -9,7 +9,7 @@ eventlet_debug = true [pipeline:main] # Yes, proxy-logging appears twice. This is so that # middleware-originated requests get logged too. -pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit crossdomain container_sync tempauth staticweb copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache listing_formats bulk tempurl ratelimit crossdomain container_sync tempauth staticweb copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server [filter:catch_errors] use = egg:swift#catch_errors @@ -71,6 +71,9 @@ allow_versioned_writes = true [filter:copy] use = egg:swift#copy +[filter:listing_formats] +use = egg:swift#listing_formats + [app:proxy-server] use = egg:swift#proxy allow_account_management = true diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index c07c48ff35..586ef6c755 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -94,7 +94,7 @@ bind_port = 8080 [pipeline:main] # This sample pipeline uses tempauth and is used for SAIO dev work and # testing. See below for a pipeline using keystone. -pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache listing_formats container_sync bulk tempurl ratelimit tempauth copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server # The following pipeline shows keystone integration. Comment out the one # above and uncomment this one. Additional steps for integrating keystone are @@ -915,3 +915,9 @@ use = egg:swift#encryption # disable_encryption to True. However, all encryption middleware should remain # in the pipeline in order for existing encrypted data to be read. # disable_encryption = False + +# listing_formats should be just right of the first proxy-logging middleware, +# and left of most other middlewares. If it is not already present, it will +# be automatically inserted for you. +[filter:listing_formats] +use = egg:swift#listing_formats diff --git a/setup.cfg b/setup.cfg index e99d858108..f180ffc257 100644 --- a/setup.cfg +++ b/setup.cfg @@ -106,6 +106,7 @@ paste.filter_factory = keymaster = swift.common.middleware.crypto.keymaster:filter_factory encryption = swift.common.middleware.crypto:filter_factory kms_keymaster = swift.common.middleware.crypto.kms_keymaster:filter_factory + listing_formats = swift.common.middleware.listing_formats:filter_factory [build_sphinx] all_files = 1 diff --git a/swift/account/server.py b/swift/account/server.py index c67ac5d97d..0fe2647235 100644 --- a/swift/account/server.py +++ b/swift/account/server.py @@ -24,7 +24,7 @@ import swift.common.db from swift.account.backend import AccountBroker, DATADIR from swift.account.utils import account_listing_response, get_response_headers from swift.common.db import DatabaseConnectionError, DatabaseAlreadyExists -from swift.common.request_helpers import get_param, get_listing_content_type, \ +from swift.common.request_helpers import get_param, \ split_and_validate_path from swift.common.utils import get_logger, hash_path, public, \ Timestamp, storage_directory, config_true_value, \ @@ -33,6 +33,7 @@ from swift.common.constraints import valid_timestamp, check_utf8, check_drive from swift.common import constraints from swift.common.db_replicator import ReplicatorRpc from swift.common.base_storage_server import BaseStorageServer +from swift.common.middleware import listing_formats from swift.common.swob import HTTPAccepted, HTTPBadRequest, \ HTTPCreated, HTTPForbidden, HTTPInternalServerError, \ HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, \ @@ -167,7 +168,7 @@ class AccountController(BaseStorageServer): def HEAD(self, req): """Handle HTTP HEAD request.""" drive, part, account = split_and_validate_path(req, 3) - out_content_type = get_listing_content_type(req) + out_content_type = listing_formats.get_listing_content_type(req) if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_account_broker(drive, part, account, @@ -201,7 +202,7 @@ class AccountController(BaseStorageServer): constraints.ACCOUNT_LISTING_LIMIT) marker = get_param(req, 'marker', '') end_marker = get_param(req, 'end_marker') - out_content_type = get_listing_content_type(req) + out_content_type = listing_formats.get_listing_content_type(req) if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) diff --git a/swift/account/utils.py b/swift/account/utils.py index 7559d003d4..cf7da27e9b 100644 --- a/swift/account/utils.py +++ b/swift/account/utils.py @@ -14,8 +14,8 @@ # limitations under the License. import json -from xml.sax import saxutils +from swift.common.middleware import listing_formats from swift.common.swob import HTTPOk, HTTPNoContent from swift.common.utils import Timestamp from swift.common.storage_policy import POLICIES @@ -78,43 +78,27 @@ def account_listing_response(account, req, response_content_type, broker=None, account_list = broker.list_containers_iter(limit, marker, end_marker, prefix, delimiter, reverse) - if response_content_type == 'application/json': - data = [] - for (name, object_count, bytes_used, put_timestamp, is_subdir) \ - in account_list: - if is_subdir: - data.append({'subdir': name}) - else: - data.append( - {'name': name, 'count': object_count, - 'bytes': bytes_used, - 'last_modified': Timestamp(put_timestamp).isoformat}) + data = [] + for (name, object_count, bytes_used, put_timestamp, is_subdir) \ + in account_list: + if is_subdir: + data.append({'subdir': name.decode('utf8')}) + else: + data.append( + {'name': name.decode('utf8'), 'count': object_count, + 'bytes': bytes_used, + 'last_modified': Timestamp(put_timestamp).isoformat}) + if response_content_type.endswith('/xml'): + account_list = listing_formats.account_to_xml(data, account) + ret = HTTPOk(body=account_list, request=req, headers=resp_headers) + elif response_content_type.endswith('/json'): account_list = json.dumps(data) - elif response_content_type.endswith('/xml'): - output_list = ['', - '' % saxutils.quoteattr(account)] - for (name, object_count, bytes_used, put_timestamp, is_subdir) \ - in account_list: - if is_subdir: - output_list.append( - '' % saxutils.quoteattr(name)) - else: - item = '%s%s' \ - '%s%s' \ - '' % \ - (saxutils.escape(name), object_count, - bytes_used, Timestamp(put_timestamp).isoformat) - output_list.append(item) - output_list.append('') - account_list = '\n'.join(output_list) + ret = HTTPOk(body=account_list, request=req, headers=resp_headers) + elif data: + account_list = listing_formats.listing_to_text(data) + ret = HTTPOk(body=account_list, request=req, headers=resp_headers) else: - if not account_list: - resp = HTTPNoContent(request=req, headers=resp_headers) - resp.content_type = response_content_type - resp.charset = 'utf-8' - return resp - account_list = '\n'.join(r[0] for r in account_list) + '\n' - ret = HTTPOk(body=account_list, request=req, headers=resp_headers) + ret = HTTPNoContent(request=req, headers=resp_headers) ret.content_type = response_content_type ret.charset = 'utf-8' return ret diff --git a/swift/common/constraints.py b/swift/common/constraints.py index e0a0851fae..bb8fefcd88 100644 --- a/swift/common/constraints.py +++ b/swift/common/constraints.py @@ -105,11 +105,6 @@ reload_constraints() MAX_BUFFERED_SLO_SEGMENTS = 10000 -#: Query string format= values to their corresponding content-type values -FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json', - 'xml': 'application/xml'} - - # By default the maximum number of allowed headers depends on the number of max # allowed metadata settings plus a default value of 36 for swift internally # generated headers and regular http headers. If for some reason this is not diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py index 71b3d0799b..fad4440f64 100644 --- a/swift/common/direct_client.py +++ b/swift/common/direct_client.py @@ -88,19 +88,20 @@ def _get_direct_account_container(path, stype, node, part, Do not use directly use the get_direct_account or get_direct_container instead. """ - qs = 'format=json' + params = ['format=json'] if marker: - qs += '&marker=%s' % quote(marker) + params.append('marker=%s' % quote(marker)) if limit: - qs += '&limit=%d' % limit + params.append('limit=%d' % limit) if prefix: - qs += '&prefix=%s' % quote(prefix) + params.append('prefix=%s' % quote(prefix)) if delimiter: - qs += '&delimiter=%s' % quote(delimiter) + params.append('delimiter=%s' % quote(delimiter)) if end_marker: - qs += '&end_marker=%s' % quote(end_marker) + params.append('end_marker=%s' % quote(end_marker)) if reverse: - qs += '&reverse=%s' % quote(reverse) + params.append('reverse=%s' % quote(reverse)) + qs = '&'.join(params) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'GET', path, query_string=qs, diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py index 6eda3924ce..462491f8c0 100644 --- a/swift/common/internal_client.py +++ b/swift/common/internal_client.py @@ -772,12 +772,14 @@ class SimpleClient(object): if name: url = '%s/%s' % (url.rstrip('/'), quote(name)) else: - url += '?format=json' + params = ['format=json'] if prefix: - url += '&prefix=%s' % prefix + params.append('prefix=%s' % prefix) if marker: - url += '&marker=%s' % quote(marker) + params.append('marker=%s' % quote(marker)) + + url += '?' + '&'.join(params) req = urllib2.Request(url, headers=headers, data=contents) if proxy: diff --git a/swift/common/middleware/crypto/decrypter.py b/swift/common/middleware/crypto/decrypter.py index 3ae17e4d22..c8e78a59e4 100644 --- a/swift/common/middleware/crypto/decrypter.py +++ b/swift/common/middleware/crypto/decrypter.py @@ -15,7 +15,6 @@ import base64 import json -import xml.etree.cElementTree as ElementTree from swift import gettext_ as _ from swift.common.http import is_success @@ -23,7 +22,7 @@ from swift.common.middleware.crypto.crypto_utils import CryptoWSGIContext, \ load_crypto_meta, extract_crypto_meta, Crypto from swift.common.exceptions import EncryptionException from swift.common.request_helpers import get_object_transient_sysmeta, \ - get_listing_content_type, get_sys_meta_prefix, get_user_meta_prefix + get_sys_meta_prefix, get_user_meta_prefix from swift.common.swob import Request, HTTPException, HTTPInternalServerError from swift.common.utils import get_logger, config_true_value, \ parse_content_range, closing_if_possible, parse_content_type, \ @@ -352,15 +351,12 @@ class DecrypterContContext(BaseDecrypterContext): if is_success(self._get_status_int()): # only decrypt body of 2xx responses - out_content_type = get_listing_content_type(req) - if out_content_type == 'application/json': - handler = self.process_json_resp - keys = self.get_decryption_keys(req) - elif out_content_type.endswith('/xml'): - handler = self.process_xml_resp - keys = self.get_decryption_keys(req) - else: - handler = keys = None + handler = keys = None + for header, value in self._response_headers: + if header.lower() == 'content-type' and \ + value.split(';', 1)[0] == 'application/json': + handler = self.process_json_resp + keys = self.get_decryption_keys(req) if handler and keys: try: @@ -398,24 +394,6 @@ class DecrypterContContext(BaseDecrypterContext): obj_dict['hash'] = self.decrypt_value_with_meta(ciphertext, key) return obj_dict - def process_xml_resp(self, key, resp_iter): - """ - Parses xml body listing and decrypt encrypted entries. Updates - Content-Length header with new body length and return a body iter. - """ - with closing_if_possible(resp_iter): - resp_body = ''.join(resp_iter) - tree = ElementTree.fromstring(resp_body) - for elem in tree.iter('hash'): - ciphertext = elem.text.encode('utf8') - plain = self.decrypt_value_with_meta(ciphertext, key) - elem.text = plain.decode('utf8') - new_body = ElementTree.tostring(tree, encoding='UTF-8').replace( - "", - '', 1) - self.update_content_length(len(new_body)) - return [new_body] - class Decrypter(object): """Middleware for decrypting data and user metadata.""" diff --git a/swift/common/middleware/dlo.py b/swift/common/middleware/dlo.py index e7bb3feb5d..21752ba26e 100644 --- a/swift/common/middleware/dlo.py +++ b/swift/common/middleware/dlo.py @@ -151,7 +151,7 @@ class GetContext(WSGIContext): method='GET', headers={'x-auth-token': req.headers.get('x-auth-token')}, agent=('%(orig)s ' + 'DLO MultipartGET'), swift_source='DLO') - con_req.query_string = 'format=json&prefix=%s' % quote(prefix) + con_req.query_string = 'prefix=%s' % quote(prefix) if marker: con_req.query_string += '&marker=%s' % quote(marker) diff --git a/swift/common/middleware/listing_formats.py b/swift/common/middleware/listing_formats.py new file mode 100644 index 0000000000..53d5070429 --- /dev/null +++ b/swift/common/middleware/listing_formats.py @@ -0,0 +1,211 @@ +# Copyright (c) 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import six +from xml.etree.cElementTree import Element, SubElement, tostring + +from swift.common.constraints import valid_api_version +from swift.common.http import HTTP_NO_CONTENT +from swift.common.request_helpers import get_param +from swift.common.swob import HTTPException, HTTPNotAcceptable, Request, \ + RESPONSE_REASONS + + +#: Mapping of query string ``format=`` values to their corresponding +#: content-type values. +FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json', + 'xml': 'application/xml'} +#: Maximum size of a valid JSON container listing body. If we receive +#: a container listing response larger than this, assume it's a staticweb +#: response and pass it on to the client. +# Default max object length is 1024, default container listing limit is 1e4; +# add a fudge factor for things like hash, last_modified, etc. +MAX_CONTAINER_LISTING_CONTENT_LENGTH = 1024 * 10000 * 2 + + +def get_listing_content_type(req): + """ + Determine the content type to use for an account or container listing + response. + + :param req: request object + :returns: content type as a string (e.g. text/plain, application/json) + :raises HTTPNotAcceptable: if the requested content type is not acceptable + :raises HTTPBadRequest: if the 'format' query param is provided and + not valid UTF-8 + """ + query_format = get_param(req, 'format') + if query_format: + req.accept = FORMAT2CONTENT_TYPE.get( + query_format.lower(), FORMAT2CONTENT_TYPE['plain']) + out_content_type = req.accept.best_match( + ['text/plain', 'application/json', 'application/xml', 'text/xml']) + if not out_content_type: + raise HTTPNotAcceptable(request=req) + return out_content_type + + +def account_to_xml(listing, account_name): + doc = Element('account', name=account_name.decode('utf-8')) + doc.text = '\n' + for record in listing: + if 'subdir' in record: + name = record.pop('subdir') + sub = SubElement(doc, 'subdir', name=name) + else: + sub = SubElement(doc, 'container') + for field in ('name', 'count', 'bytes', 'last_modified'): + SubElement(sub, field).text = six.text_type( + record.pop(field)) + sub.tail = '\n' + return tostring(doc, encoding='UTF-8').replace( + "", + '', 1) + + +def container_to_xml(listing, base_name): + doc = Element('container', name=base_name.decode('utf-8')) + for record in listing: + if 'subdir' in record: + name = record.pop('subdir') + sub = SubElement(doc, 'subdir', name=name) + SubElement(sub, 'name').text = name + else: + sub = SubElement(doc, 'object') + for field in ('name', 'hash', 'bytes', 'content_type', + 'last_modified'): + SubElement(sub, field).text = six.text_type( + record.pop(field)) + return tostring(doc, encoding='UTF-8').replace( + "", + '', 1) + + +def listing_to_text(listing): + def get_lines(): + for item in listing: + if 'name' in item: + yield item['name'].encode('utf-8') + b'\n' + else: + yield item['subdir'].encode('utf-8') + b'\n' + return b''.join(get_lines()) + + +class ListingFilter(object): + def __init__(self, app): + self.app = app + + def __call__(self, env, start_response): + req = Request(env) + try: + # account and container only + version, acct, cont = req.split_path(2, 3) + except ValueError: + return self.app(env, start_response) + + if not valid_api_version(version) or req.method not in ('GET', 'HEAD'): + return self.app(env, start_response) + + # OK, definitely have an account/container request. + # Get the desired content-type, then force it to a JSON request. + try: + out_content_type = get_listing_content_type(req) + except HTTPException as err: + return err(env, start_response) + + params = req.params + params['format'] = 'json' + req.params = params + + status, headers, resp_iter = req.call_application(self.app) + + header_to_index = {} + resp_content_type = resp_length = None + for i, (header, value) in enumerate(headers): + header = header.lower() + if header == 'content-type': + header_to_index[header] = i + resp_content_type = value.partition(';')[0] + elif header == 'content-length': + header_to_index[header] = i + resp_length = int(value) + + if not status.startswith('200 '): + start_response(status, headers) + return resp_iter + + if resp_content_type != 'application/json': + start_response(status, headers) + return resp_iter + + if resp_length is None or \ + resp_length > MAX_CONTAINER_LISTING_CONTENT_LENGTH: + start_response(status, headers) + return resp_iter + + def set_header(header, value): + if value is None: + del headers[header_to_index[header]] + else: + headers[header_to_index[header]] = ( + headers[header_to_index[header]][0], str(value)) + + if req.method == 'HEAD': + set_header('content-type', out_content_type + '; charset=utf-8') + set_header('content-length', None) # don't know, can't determine + start_response(status, headers) + return resp_iter + + body = b''.join(resp_iter) + try: + listing = json.loads(body) + # Do a couple sanity checks + if not isinstance(listing, list): + raise ValueError + if not all(isinstance(item, dict) for item in listing): + raise ValueError + except ValueError: + # Static web listing that's returning invalid JSON? + # Just pass it straight through; that's about all we *can* do. + start_response(status, headers) + return [body] + + try: + if out_content_type.endswith('/xml'): + if cont: + body = container_to_xml(listing, cont) + else: + body = account_to_xml(listing, acct) + elif out_content_type == 'text/plain': + body = listing_to_text(listing) + # else, json -- we continue down here to be sure we set charset + except KeyError: + # listing was in a bad format -- funky static web listing?? + start_response(status, headers) + return [body] + + if not body: + status = '%s %s' % (HTTP_NO_CONTENT, + RESPONSE_REASONS[HTTP_NO_CONTENT][0]) + + set_header('content-type', out_content_type + '; charset=utf-8') + set_header('content-length', len(body)) + start_response(status, headers) + return [body] + + +def filter_factory(global_conf, **local_conf): + return ListingFilter diff --git a/swift/common/middleware/staticweb.py b/swift/common/middleware/staticweb.py index 786aef388b..d01c753b34 100644 --- a/swift/common/middleware/staticweb.py +++ b/swift/common/middleware/staticweb.py @@ -260,7 +260,7 @@ class _StaticWebContext(WSGIContext): env, 'GET', '/%s/%s/%s' % ( self.version, self.account, self.container), self.agent, swift_source='SW') - tmp_env['QUERY_STRING'] = 'delimiter=/&format=json' + tmp_env['QUERY_STRING'] = 'delimiter=/' if prefix: tmp_env['QUERY_STRING'] += '&prefix=%s' % quote(prefix) else: @@ -465,8 +465,8 @@ class _StaticWebContext(WSGIContext): env, 'GET', '/%s/%s/%s' % ( self.version, self.account, self.container), self.agent, swift_source='SW') - tmp_env['QUERY_STRING'] = 'limit=1&format=json&delimiter' \ - '=/&limit=1&prefix=%s' % quote(self.obj + '/') + tmp_env['QUERY_STRING'] = 'limit=1&delimiter=/&prefix=%s' % ( + quote(self.obj + '/'), ) resp = self._app_call(tmp_env) body = ''.join(resp) if not is_success(self._get_status_int()) or not body or \ diff --git a/swift/common/middleware/versioned_writes.py b/swift/common/middleware/versioned_writes.py index a21c95620c..31ec3ab44f 100644 --- a/swift/common/middleware/versioned_writes.py +++ b/swift/common/middleware/versioned_writes.py @@ -329,8 +329,7 @@ class VersionedWritesContext(WSGIContext): env, method='GET', swift_source='VW', path='/v1/%s/%s' % (account_name, lcontainer)) lreq.environ['QUERY_STRING'] = \ - 'format=json&prefix=%s&marker=%s' % ( - quote(lprefix), quote(marker)) + 'prefix=%s&marker=%s' % (quote(lprefix), quote(marker)) if end_marker: lreq.environ['QUERY_STRING'] += '&end_marker=%s' % ( quote(end_marker)) diff --git a/swift/common/request_helpers.py b/swift/common/request_helpers.py index 5fdf346ac1..5caa73c16c 100644 --- a/swift/common/request_helpers.py +++ b/swift/common/request_helpers.py @@ -31,10 +31,9 @@ from swift.common.header_key_dict import HeaderKeyDict from swift import gettext_ as _ from swift.common.storage_policy import POLICIES -from swift.common.constraints import FORMAT2CONTENT_TYPE from swift.common.exceptions import ListingIterError, SegmentError from swift.common.http import is_success -from swift.common.swob import HTTPBadRequest, HTTPNotAcceptable, \ +from swift.common.swob import HTTPBadRequest, \ HTTPServiceUnavailable, Range, is_chunked, multi_range_iterator from swift.common.utils import split_path, validate_device_partition, \ close_if_possible, maybe_multipart_byteranges_to_document_iters, \ @@ -70,28 +69,6 @@ def get_param(req, name, default=None): return value -def get_listing_content_type(req): - """ - Determine the content type to use for an account or container listing - response. - - :param req: request object - :returns: content type as a string (e.g. text/plain, application/json) - :raises HTTPNotAcceptable: if the requested content type is not acceptable - :raises HTTPBadRequest: if the 'format' query param is provided and - not valid UTF-8 - """ - query_format = get_param(req, 'format') - if query_format: - req.accept = FORMAT2CONTENT_TYPE.get( - query_format.lower(), FORMAT2CONTENT_TYPE['plain']) - out_content_type = req.accept.best_match( - ['text/plain', 'application/json', 'application/xml', 'text/xml']) - if not out_content_type: - raise HTTPNotAcceptable(request=req) - return out_content_type - - def get_name_and_placement(request, minsegs=1, maxsegs=None, rest_with_last=False): """ diff --git a/swift/container/server.py b/swift/container/server.py index 0c58089c10..53a85b926f 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -19,7 +19,6 @@ import time import traceback import math from swift import gettext_ as _ -from xml.etree.cElementTree import Element, SubElement, tostring from eventlet import Timeout @@ -29,7 +28,7 @@ from swift.container.backend import ContainerBroker, DATADIR from swift.container.replicator import ContainerReplicatorRpc from swift.common.db import DatabaseAlreadyExists from swift.common.container_sync_realms import ContainerSyncRealms -from swift.common.request_helpers import get_param, get_listing_content_type, \ +from swift.common.request_helpers import get_param, \ split_and_validate_path, is_sys_or_user_meta from swift.common.utils import get_logger, hash_path, public, \ Timestamp, storage_directory, validate_sync_to, \ @@ -40,6 +39,7 @@ from swift.common import constraints from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ConnectionTimeout from swift.common.http import HTTP_NOT_FOUND, is_success +from swift.common.middleware import listing_formats from swift.common.storage_policy import POLICIES from swift.common.base_storage_server import BaseStorageServer from swift.common.header_key_dict import HeaderKeyDict @@ -418,7 +418,7 @@ class ContainerController(BaseStorageServer): """Handle HTTP HEAD request.""" drive, part, account, container, obj = split_and_validate_path( req, 4, 5, True) - out_content_type = get_listing_content_type(req) + out_content_type = listing_formats.get_listing_content_type(req) if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container, @@ -451,8 +451,8 @@ class ContainerController(BaseStorageServer): """ (name, created, size, content_type, etag) = record[:5] if content_type is None: - return {'subdir': name} - response = {'bytes': size, 'hash': etag, 'name': name, + return {'subdir': name.decode('utf8')} + response = {'bytes': size, 'hash': etag, 'name': name.decode('utf8'), 'content_type': content_type} response['last_modified'] = Timestamp(created).isoformat override_bytes_from_content_type(response, logger=self.logger) @@ -482,7 +482,7 @@ class ContainerController(BaseStorageServer): request=req, body='Maximum limit is %d' % constraints.CONTAINER_LISTING_LIMIT) - out_content_type = get_listing_content_type(req) + out_content_type = listing_formats.get_listing_content_type(req) if not check_drive(self.root, drive, self.mount_check): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container, @@ -504,36 +504,20 @@ class ContainerController(BaseStorageServer): if value and (key.lower() in self.save_headers or is_sys_or_user_meta('container', key)): resp_headers[key] = value - ret = Response(request=req, headers=resp_headers, - content_type=out_content_type, charset='utf-8') - if out_content_type == 'application/json': - ret.body = json.dumps([self.update_data_record(record) - for record in container_list]) - elif out_content_type.endswith('/xml'): - doc = Element('container', name=container.decode('utf-8')) - for obj in container_list: - record = self.update_data_record(obj) - if 'subdir' in record: - name = record['subdir'].decode('utf-8') - sub = SubElement(doc, 'subdir', name=name) - SubElement(sub, 'name').text = name - else: - obj_element = SubElement(doc, 'object') - for field in ["name", "hash", "bytes", "content_type", - "last_modified"]: - SubElement(obj_element, field).text = str( - record.pop(field)).decode('utf-8') - for field in sorted(record): - SubElement(obj_element, field).text = str( - record[field]).decode('utf-8') - ret.body = tostring(doc, encoding='UTF-8').replace( - "", - '', 1) + listing = [self.update_data_record(record) + for record in container_list] + if out_content_type.endswith('/xml'): + body = listing_formats.container_to_xml(listing, container) + elif out_content_type.endswith('/json'): + body = json.dumps(listing) else: - if not container_list: - return HTTPNoContent(request=req, headers=resp_headers) - ret.body = '\n'.join(rec[0] for rec in container_list) + '\n' + body = listing_formats.listing_to_text(listing) + + ret = Response(request=req, headers=resp_headers, body=body, + content_type=out_content_type, charset='utf-8') ret.last_modified = math.ceil(float(resp_headers['X-PUT-Timestamp'])) + if not ret.body: + ret.status_int = 204 return ret @public diff --git a/swift/proxy/controllers/account.py b/swift/proxy/controllers/account.py index 6fc94a9891..7a42c57748 100644 --- a/swift/proxy/controllers/account.py +++ b/swift/proxy/controllers/account.py @@ -18,7 +18,6 @@ from six.moves.urllib.parse import unquote from swift import gettext_ as _ from swift.account.utils import account_listing_response -from swift.common.request_helpers import get_listing_content_type from swift.common.middleware.acl import parse_acl, format_acl from swift.common.utils import public from swift.common.constraints import check_metadata @@ -26,6 +25,7 @@ from swift.common import constraints from swift.common.http import HTTP_NOT_FOUND, HTTP_GONE from swift.proxy.controllers.base import Controller, clear_info_cache, \ set_info_cache +from swift.common.middleware import listing_formats from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed from swift.common.request_helpers import get_sys_meta_prefix @@ -67,6 +67,9 @@ class AccountController(Controller): concurrency = self.app.account_ring.replica_count \ if self.app.concurrent_gets else 1 node_iter = self.app.iter_nodes(self.app.account_ring, partition) + params = req.params + params['format'] = 'json' + req.params = params resp = self.GETorHEAD_base( req, _('Account'), node_iter, partition, req.swift_entity_path.rstrip('/'), concurrency) @@ -86,8 +89,10 @@ class AccountController(Controller): # creates the account if necessary. If we feed it a perfect # lie, it'll just try to create the container without # creating the account, and that'll fail. - resp = account_listing_response(self.account_name, req, - get_listing_content_type(req)) + req.params = {} # clear our format override + resp = account_listing_response( + self.account_name, req, + listing_formats.get_listing_content_type(req)) resp.headers['X-Backend-Fake-Account-Listing'] = 'yes' # Cache this. We just made a request to a storage node and got diff --git a/swift/proxy/controllers/container.py b/swift/proxy/controllers/container.py index faa2cdee84..43ecb28dc9 100644 --- a/swift/proxy/controllers/container.py +++ b/swift/proxy/controllers/container.py @@ -100,6 +100,9 @@ class ContainerController(Controller): concurrency = self.app.container_ring.replica_count \ if self.app.concurrent_gets else 1 node_iter = self.app.iter_nodes(self.app.container_ring, part) + params = req.params + params['format'] = 'json' + req.params = params resp = self.GETorHEAD_base( req, _('Container'), node_iter, part, req.swift_entity_path, concurrency) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 4766a8244e..77863fb74a 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -66,16 +66,19 @@ required_filters = [ 'after_fn': lambda pipe: (['catch_errors'] if pipe.startswith('catch_errors') else [])}, + {'name': 'listing_formats', 'after_fn': lambda _junk: [ + 'catch_errors', 'gatekeeper', 'proxy_logging', 'memcache']}, + # Put copy before dlo, slo and versioned_writes + {'name': 'copy', 'after_fn': lambda _junk: [ + 'staticweb', 'tempauth', 'keystoneauth', + 'catch_errors', 'gatekeeper', 'proxy_logging']}, {'name': 'dlo', 'after_fn': lambda _junk: [ 'copy', 'staticweb', 'tempauth', 'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']}, {'name': 'versioned_writes', 'after_fn': lambda _junk: [ 'slo', 'dlo', 'copy', 'staticweb', 'tempauth', 'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']}, - # Put copy before dlo, slo and versioned_writes - {'name': 'copy', 'after_fn': lambda _junk: [ - 'staticweb', 'tempauth', 'keystoneauth', - 'catch_errors', 'gatekeeper', 'proxy_logging']}] +] def _label_for_policy(policy): diff --git a/test/unit/common/middleware/crypto/test_decrypter.py b/test/unit/common/middleware/crypto/test_decrypter.py index d38cdb0950..79f1b0384c 100644 --- a/test/unit/common/middleware/crypto/test_decrypter.py +++ b/test/unit/common/middleware/crypto/test_decrypter.py @@ -16,7 +16,6 @@ import base64 import json import os import unittest -from xml.dom import minidom import mock @@ -961,138 +960,6 @@ class TestDecrypterContainerRequests(unittest.TestCase): self.assertIn("Cipher must be AES_CTR_256", self.decrypter.logger.get_lines_for_level('error')[0]) - def _assert_element(self, name, expected, element): - self.assertEqual(element.tagName, name) - self._assert_element_contains_dict(expected, element) - - def _assert_element_contains_dict(self, expected, element): - for k, v in expected.items(): - entry = element.getElementsByTagName(k) - self.assertIsNotNone(entry, 'Key %s not found' % k) - actual = entry[0].childNodes[0].nodeValue - self.assertEqual(v, actual, - "Expected %s but got %s for key %s" - % (v, actual, k)) - - def test_GET_container_xml(self): - content_type_1 = u'\uF10F\uD20D\uB30B\u9409' - content_type_2 = 'text/plain; param=foo' - pt_etag1 = 'c6e8196d7f0fff6444b90861fe8d609d' - pt_etag2 = 'ac0374ed4d43635f803c82469d0b5a10' - key = fetch_crypto_keys()['container'] - - fake_body = ''' -\ -test-subdir\ -\ -''' + encrypt_and_append_meta(pt_etag1.encode('utf8'), key) + '''\ -\ -''' + content_type_1 + '''\ -testfile16\ -2015-04-19T02:37:39.601660\ -\ -''' + encrypt_and_append_meta(pt_etag2.encode('utf8'), key) + '''\ -\ -''' + content_type_2 + '''\ -testfile224\ -2015-04-19T02:37:39.684740\ -''' - - resp = self._make_cont_get_req(fake_body, 'xml') - self.assertEqual('200 OK', resp.status) - body = resp.body - self.assertEqual(len(body), int(resp.headers['Content-Length'])) - - tree = minidom.parseString(body) - containers = tree.getElementsByTagName('container') - self.assertEqual(1, len(containers)) - self.assertEqual('testc', - containers[0].attributes.getNamedItem("name").value) - - results = containers[0].childNodes - self.assertEqual(3, len(results)) - - self._assert_element('subdir', {"name": "test-subdir"}, results[0]) - - obj_dict_1 = {"bytes": "16", - "last_modified": "2015-04-19T02:37:39.601660", - "hash": pt_etag1, - "name": "testfile", - "content_type": content_type_1} - self._assert_element('object', obj_dict_1, results[1]) - obj_dict_2 = {"bytes": "24", - "last_modified": "2015-04-19T02:37:39.684740", - "hash": pt_etag2, - "name": "testfile2", - "content_type": content_type_2} - self._assert_element('object', obj_dict_2, results[2]) - - def test_GET_container_xml_with_crypto_override(self): - content_type_1 = 'image/jpeg' - content_type_2 = 'text/plain; param=foo' - - fake_body = ''' -\ -c6e8196d7f0fff6444b90861fe8d609d\ -''' + content_type_1 + '''\ -testfile16\ -2015-04-19T02:37:39.601660\ -ac0374ed4d43635f803c82469d0b5a10\ -''' + content_type_2 + '''\ -testfile224\ -2015-04-19T02:37:39.684740\ -''' - - resp = self._make_cont_get_req(fake_body, 'xml', override=True) - - self.assertEqual('200 OK', resp.status) - body = resp.body - self.assertEqual(len(body), int(resp.headers['Content-Length'])) - - tree = minidom.parseString(body) - containers = tree.getElementsByTagName('container') - self.assertEqual(1, len(containers)) - self.assertEqual('testc', - containers[0].attributes.getNamedItem("name").value) - - objs = tree.getElementsByTagName('object') - self.assertEqual(2, len(objs)) - - obj_dict_1 = {"bytes": "16", - "last_modified": "2015-04-19T02:37:39.601660", - "hash": "c6e8196d7f0fff6444b90861fe8d609d", - "name": "testfile", - "content_type": content_type_1} - self._assert_element_contains_dict(obj_dict_1, objs[0]) - obj_dict_2 = {"bytes": "24", - "last_modified": "2015-04-19T02:37:39.684740", - "hash": "ac0374ed4d43635f803c82469d0b5a10", - "name": "testfile2", - "content_type": content_type_2} - self._assert_element_contains_dict(obj_dict_2, objs[1]) - - def test_cont_get_xml_req_with_cipher_mismatch(self): - bad_crypto_meta = fake_get_crypto_meta() - bad_crypto_meta['cipher'] = 'unknown_cipher' - - fake_body = ''' -\ -''' + encrypt_and_append_meta('c6e8196d7f0fff6444b90861fe8d609d', - fetch_crypto_keys()['container'], - crypto_meta=bad_crypto_meta) + '''\ -\ -image/jpeg\ -testfile16\ -2015-04-19T02:37:39.601660\ -''' - - resp = self._make_cont_get_req(fake_body, 'xml') - - self.assertEqual('500 Internal Error', resp.status) - self.assertEqual('Error decrypting container listing', resp.body) - self.assertIn("Cipher must be AES_CTR_256", - self.decrypter.logger.get_lines_for_level('error')[0]) - class TestModuleMethods(unittest.TestCase): def test_purge_crypto_sysmeta_headers(self): diff --git a/test/unit/common/middleware/test_dlo.py b/test/unit/common/middleware/test_dlo.py index ce91de5f5c..b0354b4b14 100644 --- a/test/unit/common/middleware/test_dlo.py +++ b/test/unit/common/middleware/test_dlo.py @@ -129,11 +129,11 @@ class DloTestCase(unittest.TestCase): "last_modified": lm, "content_type": "application/png"}] self.app.register( - 'GET', '/v1/AUTH_test/c?format=json', + 'GET', '/v1/AUTH_test/c', swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'}, json.dumps(full_container_listing)) self.app.register( - 'GET', '/v1/AUTH_test/c?format=json&prefix=seg', + 'GET', '/v1/AUTH_test/c?prefix=seg', swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'}, json.dumps(segs)) @@ -148,11 +148,11 @@ class DloTestCase(unittest.TestCase): 'X-Object-Manifest': 'c/seg_'}, 'manyseg') self.app.register( - 'GET', '/v1/AUTH_test/c?format=json&prefix=seg_', + 'GET', '/v1/AUTH_test/c?prefix=seg_', swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'}, json.dumps(segs[:3])) self.app.register( - 'GET', '/v1/AUTH_test/c?format=json&prefix=seg_&marker=seg_03', + 'GET', '/v1/AUTH_test/c?prefix=seg_&marker=seg_03', swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'}, json.dumps(segs[3:])) @@ -163,7 +163,7 @@ class DloTestCase(unittest.TestCase): 'X-Object-Manifest': 'c/noseg_'}, 'noseg') self.app.register( - 'GET', '/v1/AUTH_test/c?format=json&prefix=noseg_', + 'GET', '/v1/AUTH_test/c?prefix=noseg_', swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'}, json.dumps([])) @@ -278,7 +278,7 @@ class TestDloHeadManifest(DloTestCase): self.assertEqual( self.app.calls, [('HEAD', '/v1/AUTH_test/mancon/manifest-no-segments'), - ('GET', '/v1/AUTH_test/c?format=json&prefix=noseg_')]) + ('GET', '/v1/AUTH_test/c?prefix=noseg_')]) class TestDloGetManifest(DloTestCase): @@ -444,7 +444,7 @@ class TestDloGetManifest(DloTestCase): self.assertEqual( self.app.calls, [('GET', '/v1/AUTH_test/mancon/manifest-many-segments'), - ('GET', '/v1/AUTH_test/c?format=json&prefix=seg_'), + ('GET', '/v1/AUTH_test/c?prefix=seg_'), ('GET', '/v1/AUTH_test/c/seg_01?multipart-manifest=get'), ('GET', '/v1/AUTH_test/c/seg_02?multipart-manifest=get'), ('GET', '/v1/AUTH_test/c/seg_03?multipart-manifest=get')]) @@ -601,7 +601,7 @@ class TestDloGetManifest(DloTestCase): def test_error_listing_container_first_listing_request(self): self.app.register( - 'GET', '/v1/AUTH_test/c?format=json&prefix=seg_', + 'GET', '/v1/AUTH_test/c?prefix=seg_', swob.HTTPNotFound, {}, None) req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments', @@ -613,7 +613,7 @@ class TestDloGetManifest(DloTestCase): def test_error_listing_container_second_listing_request(self): self.app.register( - 'GET', '/v1/AUTH_test/c?format=json&prefix=seg_&marker=seg_03', + 'GET', '/v1/AUTH_test/c?prefix=seg_&marker=seg_03', swob.HTTPNotFound, {}, None) req = swob.Request.blank('/v1/AUTH_test/mancon/manifest-many-segments', @@ -648,7 +648,7 @@ class TestDloGetManifest(DloTestCase): swob.HTTPOk, {'Content-Length': '0', 'Etag': 'blah', 'X-Object-Manifest': 'c/quotetags'}, None) self.app.register( - 'GET', '/v1/AUTH_test/c?format=json&prefix=quotetags', + 'GET', '/v1/AUTH_test/c?prefix=quotetags', swob.HTTPOk, {'Content-Type': 'application/json; charset=utf-8'}, json.dumps([{"hash": "\"abc\"", "bytes": 5, "name": "quotetags1", "last_modified": "2013-11-22T02:42:14.261620", @@ -673,7 +673,7 @@ class TestDloGetManifest(DloTestCase): segs = [{"hash": md5hex("AAAAA"), "bytes": 5, "name": u"é1"}, {"hash": md5hex("AAAAA"), "bytes": 5, "name": u"é2"}] self.app.register( - 'GET', '/v1/AUTH_test/c?format=json&prefix=%C3%A9', + 'GET', '/v1/AUTH_test/c?prefix=%C3%A9', swob.HTTPOk, {'Content-Type': 'application/json'}, json.dumps(segs)) @@ -745,7 +745,7 @@ class TestDloGetManifest(DloTestCase): self.assertEqual( self.app.calls, [('GET', '/v1/AUTH_test/mancon/manifest'), - ('GET', '/v1/AUTH_test/c?format=json&prefix=seg'), + ('GET', '/v1/AUTH_test/c?prefix=seg'), ('GET', '/v1/AUTH_test/c/seg_01?multipart-manifest=get'), ('GET', '/v1/AUTH_test/c/seg_02?multipart-manifest=get'), ('GET', '/v1/AUTH_test/c/seg_03?multipart-manifest=get')]) diff --git a/test/unit/common/middleware/test_listing_formats.py b/test/unit/common/middleware/test_listing_formats.py new file mode 100644 index 0000000000..8577d867f6 --- /dev/null +++ b/test/unit/common/middleware/test_listing_formats.py @@ -0,0 +1,345 @@ +# Copyright (c) 2017 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import unittest + +from swift.common.swob import Request, HTTPOk +from swift.common.middleware import listing_formats +from test.unit.common.middleware.helpers import FakeSwift + + +class TestListingFormats(unittest.TestCase): + def setUp(self): + self.fake_swift = FakeSwift() + self.app = listing_formats.ListingFilter(self.fake_swift) + self.fake_account_listing = json.dumps([ + {'name': 'bar', 'bytes': 0, 'count': 0, + 'last_modified': '1970-01-01T00:00:00.000000'}, + {'subdir': 'foo_'}, + ]) + self.fake_container_listing = json.dumps([ + {'name': 'bar', 'hash': 'etag', 'bytes': 0, + 'content_type': 'text/plain', + 'last_modified': '1970-01-01T00:00:00.000000'}, + {'subdir': 'foo/'}, + ]) + + def test_valid_account(self): + self.fake_swift.register('GET', '/v1/a', HTTPOk, { + 'Content-Length': str(len(self.fake_account_listing)), + 'Content-Type': 'application/json'}, self.fake_account_listing) + + req = Request.blank('/v1/a') + resp = req.get_response(self.app) + self.assertEqual(resp.body, 'bar\nfoo_\n') + self.assertEqual(resp.headers['Content-Type'], + 'text/plain; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a?format=json')) + + req = Request.blank('/v1/a?format=txt') + resp = req.get_response(self.app) + self.assertEqual(resp.body, 'bar\nfoo_\n') + self.assertEqual(resp.headers['Content-Type'], + 'text/plain; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a?format=json')) + + req = Request.blank('/v1/a?format=json') + resp = req.get_response(self.app) + self.assertEqual(resp.body, self.fake_account_listing) + self.assertEqual(resp.headers['Content-Type'], + 'application/json; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a?format=json')) + + req = Request.blank('/v1/a?format=xml') + resp = req.get_response(self.app) + self.assertEqual(resp.body.split('\n'), [ + '', + '', + 'bar00' + '1970-01-01T00:00:00.000000' + '', + '', + '', + ]) + self.assertEqual(resp.headers['Content-Type'], + 'application/xml; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a?format=json')) + + def test_valid_container(self): + self.fake_swift.register('GET', '/v1/a/c', HTTPOk, { + 'Content-Length': str(len(self.fake_container_listing)), + 'Content-Type': 'application/json'}, self.fake_container_listing) + + req = Request.blank('/v1/a/c') + resp = req.get_response(self.app) + self.assertEqual(resp.body, 'bar\nfoo/\n') + self.assertEqual(resp.headers['Content-Type'], + 'text/plain; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a/c?format=json')) + + req = Request.blank('/v1/a/c?format=txt') + resp = req.get_response(self.app) + self.assertEqual(resp.body, 'bar\nfoo/\n') + self.assertEqual(resp.headers['Content-Type'], + 'text/plain; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a/c?format=json')) + + req = Request.blank('/v1/a/c?format=json') + resp = req.get_response(self.app) + self.assertEqual(resp.body, self.fake_container_listing) + self.assertEqual(resp.headers['Content-Type'], + 'application/json; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a/c?format=json')) + + req = Request.blank('/v1/a/c?format=xml') + resp = req.get_response(self.app) + self.assertEqual( + resp.body, + '\n' + '' + 'baretag0' + 'text/plain' + '1970-01-01T00:00:00.000000' + '' + 'foo/' + '' + ) + self.assertEqual(resp.headers['Content-Type'], + 'application/xml; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a/c?format=json')) + + def test_blank_account(self): + self.fake_swift.register('GET', '/v1/a', HTTPOk, { + 'Content-Length': '2', 'Content-Type': 'application/json'}, '[]') + + req = Request.blank('/v1/a') + resp = req.get_response(self.app) + self.assertEqual(resp.status, '204 No Content') + self.assertEqual(resp.body, '') + self.assertEqual(resp.headers['Content-Type'], + 'text/plain; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a?format=json')) + + req = Request.blank('/v1/a?format=txt') + resp = req.get_response(self.app) + self.assertEqual(resp.status, '204 No Content') + self.assertEqual(resp.body, '') + self.assertEqual(resp.headers['Content-Type'], + 'text/plain; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a?format=json')) + + req = Request.blank('/v1/a?format=json') + resp = req.get_response(self.app) + self.assertEqual(resp.status, '200 OK') + self.assertEqual(resp.body, '[]') + self.assertEqual(resp.headers['Content-Type'], + 'application/json; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a?format=json')) + + req = Request.blank('/v1/a?format=xml') + resp = req.get_response(self.app) + self.assertEqual(resp.status, '200 OK') + self.assertEqual(resp.body.split('\n'), [ + '', + '', + '', + ]) + self.assertEqual(resp.headers['Content-Type'], + 'application/xml; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a?format=json')) + + def test_blank_container(self): + self.fake_swift.register('GET', '/v1/a/c', HTTPOk, { + 'Content-Length': '2', 'Content-Type': 'application/json'}, '[]') + + req = Request.blank('/v1/a/c') + resp = req.get_response(self.app) + self.assertEqual(resp.status, '204 No Content') + self.assertEqual(resp.body, '') + self.assertEqual(resp.headers['Content-Type'], + 'text/plain; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a/c?format=json')) + + req = Request.blank('/v1/a/c?format=txt') + resp = req.get_response(self.app) + self.assertEqual(resp.status, '204 No Content') + self.assertEqual(resp.body, '') + self.assertEqual(resp.headers['Content-Type'], + 'text/plain; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a/c?format=json')) + + req = Request.blank('/v1/a/c?format=json') + resp = req.get_response(self.app) + self.assertEqual(resp.status, '200 OK') + self.assertEqual(resp.body, '[]') + self.assertEqual(resp.headers['Content-Type'], + 'application/json; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a/c?format=json')) + + req = Request.blank('/v1/a/c?format=xml') + resp = req.get_response(self.app) + self.assertEqual(resp.status, '200 OK') + self.assertEqual(resp.body.split('\n'), [ + '', + '', + ]) + self.assertEqual(resp.headers['Content-Type'], + 'application/xml; charset=utf-8') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/a/c?format=json')) + + def test_pass_through(self): + def do_test(path): + self.fake_swift.register( + 'GET', path, HTTPOk, { + 'Content-Length': str(len(self.fake_container_listing)), + 'Content-Type': 'application/json'}, + self.fake_container_listing) + req = Request.blank(path + '?format=xml') + resp = req.get_response(self.app) + self.assertEqual(resp.body, self.fake_container_listing) + self.assertEqual(resp.headers['Content-Type'], 'application/json') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', path + '?format=xml')) # query param is unchanged + + do_test('/') + do_test('/v1') + do_test('/auth/v1.0') + do_test('/v1/a/c/o') + + def test_static_web_not_json(self): + body = 'doesnt matter' + self.fake_swift.register( + 'GET', '/v1/staticweb/not-json', HTTPOk, + {'Content-Length': str(len(body)), + 'Content-Type': 'text/plain'}, + body) + + resp = Request.blank('/v1/staticweb/not-json').get_response(self.app) + self.assertEqual(resp.body, body) + self.assertEqual(resp.headers['Content-Type'], 'text/plain') + # We *did* try, though + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/staticweb/not-json?format=json')) + # TODO: add a similar test that has *no* content-type + # FakeSwift seems to make this hard to do + + def test_static_web_not_really_json(self): + body = 'raises ValueError' + self.fake_swift.register( + 'GET', '/v1/staticweb/not-json', HTTPOk, + {'Content-Length': str(len(body)), + 'Content-Type': 'application/json'}, + body) + + resp = Request.blank('/v1/staticweb/not-json').get_response(self.app) + self.assertEqual(resp.body, body) + self.assertEqual(resp.headers['Content-Type'], 'application/json') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/staticweb/not-json?format=json')) + + def test_static_web_pretend_to_be_giant_json(self): + body = json.dumps(self.fake_container_listing * 1000000) + self.assertGreater( # sanity + len(body), listing_formats.MAX_CONTAINER_LISTING_CONTENT_LENGTH) + + self.fake_swift.register( + 'GET', '/v1/staticweb/not-json', HTTPOk, + {'Content-Type': 'application/json'}, + body) + + resp = Request.blank('/v1/staticweb/not-json').get_response(self.app) + self.assertEqual(resp.body, body) + self.assertEqual(resp.headers['Content-Type'], 'application/json') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/staticweb/not-json?format=json')) + # TODO: add a similar test for chunked transfers + # (staticweb referencing a DLO that doesn't fit in a single listing?) + + def test_static_web_bad_json(self): + def do_test(body_obj): + body = json.dumps(body_obj) + self.fake_swift.register( + 'GET', '/v1/staticweb/bad-json', HTTPOk, + {'Content-Length': str(len(body)), + 'Content-Type': 'application/json'}, + body) + + def do_sub_test(path): + resp = Request.blank(path).get_response(self.app) + self.assertEqual(resp.body, body) + # NB: no charset is added; we pass through whatever we got + self.assertEqual(resp.headers['Content-Type'], + 'application/json') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/staticweb/bad-json?format=json')) + + do_sub_test('/v1/staticweb/bad-json') + do_sub_test('/v1/staticweb/bad-json?format=txt') + do_sub_test('/v1/staticweb/bad-json?format=xml') + do_sub_test('/v1/staticweb/bad-json?format=json') + + do_test({}) + do_test({'non-empty': 'hash'}) + do_test(None) + do_test(0) + do_test('some string') + do_test([None]) + do_test([0]) + do_test(['some string']) + + def test_static_web_bad_but_not_terrible_json(self): + body = json.dumps([{'no name': 'nor subdir'}]) + self.fake_swift.register( + 'GET', '/v1/staticweb/bad-json', HTTPOk, + {'Content-Length': str(len(body)), + 'Content-Type': 'application/json'}, + body) + + def do_test(path, expect_charset=False): + resp = Request.blank(path).get_response(self.app) + self.assertEqual(resp.body, body) + if expect_charset: + self.assertEqual(resp.headers['Content-Type'], + 'application/json; charset=utf-8') + else: + self.assertEqual(resp.headers['Content-Type'], + 'application/json') + self.assertEqual(self.fake_swift.calls[-1], ( + 'GET', '/v1/staticweb/bad-json?format=json')) + + do_test('/v1/staticweb/bad-json') + do_test('/v1/staticweb/bad-json?format=txt') + do_test('/v1/staticweb/bad-json?format=xml') + # The response we get is *just close enough* to being valid that we + # assume it is and slap on the missing charset. If you set up staticweb + # to serve back such responses, your clients are already hosed. + do_test('/v1/staticweb/bad-json?format=json', expect_charset=True) diff --git a/test/unit/common/middleware/test_staticweb.py b/test/unit/common/middleware/test_staticweb.py index e25028cc1d..ba6d1a705d 100644 --- a/test/unit/common/middleware/test_staticweb.py +++ b/test/unit/common/middleware/test_staticweb.py @@ -279,7 +279,7 @@ class FakeApp(object): if ((env['PATH_INFO'] in ( '/v1/a/c3', '/v1/a/c4', '/v1/a/c8', '/v1/a/c9')) and (env['QUERY_STRING'] == - 'delimiter=/&format=json&prefix=subdir/')): + 'delimiter=/&prefix=subdir/')): headers.update({'X-Container-Object-Count': '12', 'X-Container-Bytes-Used': '73763', 'X-Container-Read': '.r:*', @@ -296,14 +296,14 @@ class FakeApp(object): {"subdir":"subdir3/subsubdir/"}] '''.strip() elif env['PATH_INFO'] == '/v1/a/c3' and env['QUERY_STRING'] == \ - 'delimiter=/&format=json&prefix=subdiry/': + 'delimiter=/&prefix=subdiry/': headers.update({'X-Container-Object-Count': '12', 'X-Container-Bytes-Used': '73763', 'X-Container-Read': '.r:*', 'Content-Type': 'application/json; charset=utf-8'}) body = '[]' elif env['PATH_INFO'] == '/v1/a/c3' and env['QUERY_STRING'] == \ - 'limit=1&format=json&delimiter=/&limit=1&prefix=subdirz/': + 'limit=1&delimiter=/&prefix=subdirz/': headers.update({'X-Container-Object-Count': '12', 'X-Container-Bytes-Used': '73763', 'X-Container-Read': '.r:*', @@ -315,7 +315,7 @@ class FakeApp(object): "last_modified":"2011-03-24T04:27:52.709100"}] '''.strip() elif env['PATH_INFO'] == '/v1/a/c6' and env['QUERY_STRING'] == \ - 'limit=1&format=json&delimiter=/&limit=1&prefix=subdir/': + 'limit=1&delimiter=/&prefix=subdir/': headers.update({'X-Container-Object-Count': '12', 'X-Container-Bytes-Used': '73763', 'X-Container-Read': '.r:*', @@ -329,9 +329,9 @@ class FakeApp(object): '''.strip() elif env['PATH_INFO'] == '/v1/a/c10' and ( env['QUERY_STRING'] == - 'delimiter=/&format=json&prefix=%E2%98%83/' or + 'delimiter=/&prefix=%E2%98%83/' or env['QUERY_STRING'] == - 'delimiter=/&format=json&prefix=%E2%98%83/%E2%98%83/'): + 'delimiter=/&prefix=%E2%98%83/%E2%98%83/'): headers.update({'X-Container-Object-Count': '12', 'X-Container-Bytes-Used': '73763', 'X-Container-Read': '.r:*', @@ -346,7 +346,7 @@ class FakeApp(object): '''.strip() elif 'prefix=' in env['QUERY_STRING']: return Response(status='204 No Content')(env, start_response) - elif 'format=json' in env['QUERY_STRING']: + else: headers.update({'X-Container-Object-Count': '12', 'X-Container-Bytes-Used': '73763', 'Content-Type': 'application/json; charset=utf-8'}) @@ -397,15 +397,6 @@ class FakeApp(object): "content_type":"text/plain", "last_modified":"2011-03-24T04:27:52.935560"}] '''.strip() - else: - headers.update({'X-Container-Object-Count': '12', - 'X-Container-Bytes-Used': '73763', - 'Content-Type': 'text/plain; charset=utf-8'}) - body = '\n'.join(['401error.html', '404error.html', 'index.html', - 'listing.css', 'one.txt', 'subdir/1.txt', - 'subdir/2.txt', u'subdir/\u2603.txt', 'subdir2', - 'subdir3/subsubdir/index.html', 'two.txt', - u'\u2603/\u2603/one.txt']) return Response(status='200 Ok', headers=headers, body=body)(env, start_response) @@ -481,8 +472,8 @@ class TestStaticWeb(unittest.TestCase): def test_container2(self): resp = Request.blank('/v1/a/c2').get_response(self.test_staticweb) self.assertEqual(resp.status_int, 200) - self.assertEqual(resp.content_type, 'text/plain') - self.assertEqual(len(resp.body.split('\n')), + self.assertEqual(resp.content_type, 'application/json') + self.assertEqual(len(json.loads(resp.body)), int(resp.headers['x-container-object-count'])) def test_container2_web_mode_explicitly_off(self): @@ -490,8 +481,8 @@ class TestStaticWeb(unittest.TestCase): '/v1/a/c2', headers={'x-web-mode': 'false'}).get_response(self.test_staticweb) self.assertEqual(resp.status_int, 200) - self.assertEqual(resp.content_type, 'text/plain') - self.assertEqual(len(resp.body.split('\n')), + self.assertEqual(resp.content_type, 'application/json') + self.assertEqual(len(json.loads(resp.body)), int(resp.headers['x-container-object-count'])) def test_container2_web_mode_explicitly_on(self): @@ -507,7 +498,7 @@ class TestStaticWeb(unittest.TestCase): def test_container2json(self): resp = Request.blank( - '/v1/a/c2?format=json').get_response(self.test_staticweb) + '/v1/a/c2').get_response(self.test_staticweb) self.assertEqual(resp.status_int, 200) self.assertEqual(resp.content_type, 'application/json') self.assertEqual(len(json.loads(resp.body)), @@ -515,7 +506,7 @@ class TestStaticWeb(unittest.TestCase): def test_container2json_web_mode_explicitly_off(self): resp = Request.blank( - '/v1/a/c2?format=json', + '/v1/a/c2', headers={'x-web-mode': 'false'}).get_response(self.test_staticweb) self.assertEqual(resp.status_int, 200) self.assertEqual(resp.content_type, 'application/json') @@ -524,7 +515,7 @@ class TestStaticWeb(unittest.TestCase): def test_container2json_web_mode_explicitly_on(self): resp = Request.blank( - '/v1/a/c2?format=json', + '/v1/a/c2', headers={'x-web-mode': 'true'}).get_response(self.test_staticweb) self.assertEqual(resp.status_int, 404) diff --git a/test/unit/common/middleware/test_versioned_writes.py b/test/unit/common/middleware/test_versioned_writes.py index 007859b430..08d9b649bb 100644 --- a/test/unit/common/middleware/test_versioned_writes.py +++ b/test/unit/common/middleware/test_versioned_writes.py @@ -584,7 +584,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + '/v1/a/ver_cont?prefix=001o/&marker=&reverse=on', swob.HTTPNotFound, {}, None) cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) @@ -600,7 +600,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(['VW', None], self.app.swift_sources) self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids)) - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('DELETE', '/v1/a/c/o'), @@ -611,7 +611,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + '/v1/a/ver_cont?prefix=001o/&marker=&reverse=on', swob.HTTPOk, {}, '[]') cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) @@ -624,7 +624,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('DELETE', '/v1/a/c/o'), @@ -633,7 +633,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): def test_delete_latest_version_no_marker_success(self): self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + '/v1/a/ver_cont?prefix=001o/&marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "y", ' '"last_modified": "2014-11-21T14:23:02.206740", ' @@ -672,7 +672,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): req_headers = self.app.headers[-1] self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers]) - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('GET', '/v1/a/ver_cont/001o/2'), @@ -683,7 +683,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): def test_delete_latest_version_restores_marker_success(self): self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + '/v1/a/ver_cont?prefix=001o/&marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "x", ' '"last_modified": "2014-11-21T14:23:02.206740", ' @@ -731,7 +731,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): # in the base versioned container. self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + '/v1/a/ver_cont?prefix=001o/&marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "y", ' '"last_modified": "2014-11-21T14:23:02.206740", ' @@ -766,7 +766,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('HEAD', '/v1/a/c/o'), @@ -787,7 +787,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): def test_delete_latest_version_doubled_up_markers_success(self): self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/' + 'GET', '/v1/a/ver_cont?prefix=001o/' '&marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "x", ' @@ -905,7 +905,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + '/v1/a/ver_cont?prefix=001o/&marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "y", ' '"last_modified": "2014-11-21T14:23:02.206740", ' @@ -931,7 +931,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('GET', '/v1/a/ver_cont/001o/1'), @@ -942,7 +942,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): def test_DELETE_on_expired_versioned_object(self): self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + '/v1/a/ver_cont?prefix=001o/&marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "y", ' '"last_modified": "2014-11-21T14:23:02.206740", ' @@ -979,7 +979,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertRequestEqual(req, self.authorized[0]) self.assertEqual(5, self.app.call_count) - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('GET', '/v1/a/ver_cont/001o/2'), @@ -992,7 +992,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): authorize_call = [] self.app.register( 'GET', - '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + '/v1/a/ver_cont?prefix=001o/&marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "y", ' '"last_modified": "2014-11-21T14:23:02.206740", ' @@ -1021,7 +1021,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(len(authorize_call), 1) self.assertRequestEqual(req, authorize_call[0]) - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ]) @@ -1058,7 +1058,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): self.app.register( 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "x", ' @@ -1072,7 +1072,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): '"name": "001o/2", ' '"content_type": "text/plain"}]') self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/' + 'GET', '/v1/a/ver_cont?prefix=001o/' '&marker=001o/2', swob.HTTPNotFound, {}, None) self.app.register( @@ -1103,7 +1103,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): req_headers = self.app.headers[-1] self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers]) - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('GET', prefix_listing_prefix + 'marker=001o/2'), @@ -1114,7 +1114,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): def test_DELETE_on_expired_versioned_object(self): self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "x", ' @@ -1128,7 +1128,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): '"name": "001o/2", ' '"content_type": "text/plain"}]') self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/' + 'GET', '/v1/a/ver_cont?prefix=001o/' '&marker=001o/2', swob.HTTPNotFound, {}, None) @@ -1156,7 +1156,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): self.assertRequestEqual(req, self.authorized[0]) self.assertEqual(6, self.app.call_count) - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('GET', prefix_listing_prefix + 'marker=001o/2'), @@ -1171,7 +1171,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): self.app.register( 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=&reverse=on', swob.HTTPOk, {}, '[{"hash": "x", ' @@ -1185,7 +1185,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): '"name": "001o/2", ' '"content_type": "text/plain"}]') self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/' + 'GET', '/v1/a/ver_cont?prefix=001o/' '&marker=001o/2', swob.HTTPNotFound, {}, None) self.app.register( @@ -1206,7 +1206,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): self.assertEqual(status, '403 Forbidden') self.assertEqual(len(authorize_call), 1) self.assertRequestEqual(req, authorize_call[0]) - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('GET', prefix_listing_prefix + 'marker=001o/2'), @@ -1223,7 +1223,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): # first container server can reverse self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=&reverse=on', swob.HTTPOk, {}, json.dumps(list(reversed(old_versions[2:])))) # but all objects are already gone @@ -1239,21 +1239,21 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): # second container server can't reverse self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=001o/2&reverse=on', swob.HTTPOk, {}, json.dumps(old_versions[3:])) # subsequent requests shouldn't reverse self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=&end_marker=001o/2', swob.HTTPOk, {}, json.dumps(old_versions[:1])) self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=001o/0&end_marker=001o/2', swob.HTTPOk, {}, json.dumps(old_versions[1:2])) self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=001o/1&end_marker=001o/2', swob.HTTPOk, {}, '[]') self.app.register( @@ -1272,7 +1272,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): 'CONTENT_LENGTH': '0'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '204 No Content') - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('GET', '/v1/a/ver_cont/001o/4'), @@ -1298,7 +1298,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): # first container server can reverse self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=&reverse=on', swob.HTTPOk, {}, json.dumps(list(reversed(old_versions[-2:])))) # but both objects are already gone @@ -1311,21 +1311,21 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): # second container server can't reverse self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=001o/3&reverse=on', swob.HTTPOk, {}, json.dumps(old_versions[4:])) # subsequent requests shouldn't reverse self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=&end_marker=001o/3', swob.HTTPOk, {}, json.dumps(old_versions[:2])) self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=001o/1&end_marker=001o/3', swob.HTTPOk, {}, json.dumps(old_versions[2:3])) self.app.register( - 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&' + 'GET', '/v1/a/ver_cont?prefix=001o/&' 'marker=001o/2&end_marker=001o/3', swob.HTTPOk, {}, '[]') self.app.register( @@ -1344,7 +1344,7 @@ class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): 'CONTENT_LENGTH': '0'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '204 No Content') - prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&' self.assertEqual(self.app.calls, [ ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ('GET', '/v1/a/ver_cont/001o/4'), diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index f6c475e5e5..3d7d772ca9 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -136,22 +136,26 @@ class TestWSGI(unittest.TestCase): _fake_rings(t) app, conf, logger, log_name = wsgi.init_request_processor( conf_file, 'proxy-server') - # verify pipeline is catch_errors -> dlo -> proxy-server + # verify pipeline is: catch_errors -> gatekeeper -> listing_formats -> + # copy -> dlo -> proxy-server expected = swift.common.middleware.catch_errors.CatchErrorMiddleware - self.assertTrue(isinstance(app, expected)) + self.assertIsInstance(app, expected) app = app.app expected = swift.common.middleware.gatekeeper.GatekeeperMiddleware - self.assertTrue(isinstance(app, expected)) + self.assertIsInstance(app, expected) app = app.app - expected = \ - swift.common.middleware.copy.ServerSideCopyMiddleware + expected = swift.common.middleware.listing_formats.ListingFilter + self.assertIsInstance(app, expected) + + app = app.app + expected = swift.common.middleware.copy.ServerSideCopyMiddleware self.assertIsInstance(app, expected) app = app.app expected = swift.common.middleware.dlo.DynamicLargeObject - self.assertTrue(isinstance(app, expected)) + self.assertIsInstance(app, expected) app = app.app expected = \ @@ -160,7 +164,7 @@ class TestWSGI(unittest.TestCase): app = app.app expected = swift.proxy.server.Application - self.assertTrue(isinstance(app, expected)) + self.assertIsInstance(app, expected) # config settings applied to app instance self.assertEqual(0.2, app.conn_timeout) # appconfig returns values from 'proxy-server' section @@ -1478,6 +1482,7 @@ class TestPipelineModification(unittest.TestCase): self.assertEqual(self.pipeline_modules(app), ['swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', + 'swift.common.middleware.listing_formats', 'swift.common.middleware.copy', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', @@ -1510,6 +1515,7 @@ class TestPipelineModification(unittest.TestCase): self.assertEqual(self.pipeline_modules(app), ['swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', + 'swift.common.middleware.listing_formats', 'swift.common.middleware.copy', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', @@ -1549,6 +1555,7 @@ class TestPipelineModification(unittest.TestCase): self.assertEqual(self.pipeline_modules(app), ['swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', + 'swift.common.middleware.listing_formats', 'swift.common.middleware.copy', 'swift.common.middleware.slo', 'swift.common.middleware.dlo', @@ -1649,6 +1656,7 @@ class TestPipelineModification(unittest.TestCase): self.assertEqual(self.pipeline_modules(app), [ 'swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', + 'swift.common.middleware.listing_formats', 'swift.common.middleware.copy', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', @@ -1664,6 +1672,7 @@ class TestPipelineModification(unittest.TestCase): 'swift.common.middleware.gatekeeper', 'swift.common.middleware.healthcheck', 'swift.common.middleware.catch_errors', + 'swift.common.middleware.listing_formats', 'swift.common.middleware.copy', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', @@ -1678,6 +1687,7 @@ class TestPipelineModification(unittest.TestCase): 'swift.common.middleware.healthcheck', 'swift.common.middleware.catch_errors', 'swift.common.middleware.gatekeeper', + 'swift.common.middleware.listing_formats', 'swift.common.middleware.copy', 'swift.common.middleware.dlo', 'swift.common.middleware.versioned_writes', @@ -1713,7 +1723,7 @@ class TestPipelineModification(unittest.TestCase): tempdir, policy.ring_name + '.ring.gz') app = wsgi.loadapp(conf_path) - proxy_app = app.app.app.app.app.app.app + proxy_app = app.app.app.app.app.app.app.app self.assertEqual(proxy_app.account_ring.serialized_path, account_ring_path) self.assertEqual(proxy_app.container_ring.serialized_path, diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index f0339c7852..502f73948e 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -2112,6 +2113,54 @@ class TestContainerController(unittest.TestCase): resp.content_type, 'application/json', 'Invalid content_type for Accept: %s' % accept) + def test_GET_non_ascii(self): + # make a container + req = Request.blank( + '/sda1/p/a/jsonc', environ={'REQUEST_METHOD': 'PUT', + 'HTTP_X_TIMESTAMP': '0'}) + resp = req.get_response(self.controller) + + noodles = [u"Spätzle", u"ラーメン"] + for n in noodles: + req = Request.blank( + '/sda1/p/a/jsonc/%s' % n.encode("utf-8"), + environ={'REQUEST_METHOD': 'PUT', + 'HTTP_X_TIMESTAMP': '1', + 'HTTP_X_CONTENT_TYPE': 'text/plain', + 'HTTP_X_ETAG': 'x', + 'HTTP_X_SIZE': 0}) + self._update_object_put_headers(req) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 201) # sanity check + + json_body = [{"name": noodles[0], + "hash": "x", + "bytes": 0, + "content_type": "text/plain", + "last_modified": "1970-01-01T00:00:01.000000"}, + {"name": noodles[1], + "hash": "x", + "bytes": 0, + "content_type": "text/plain", + "last_modified": "1970-01-01T00:00:01.000000"}] + + # JSON + req = Request.blank( + '/sda1/p/a/jsonc?format=json', + environ={'REQUEST_METHOD': 'GET'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 200) # sanity check + self.assertEqual(json.loads(resp.body), json_body) + + # Plain text + text_body = u''.join(n + u"\n" for n in noodles).encode('utf-8') + req = Request.blank( + '/sda1/p/a/jsonc?format=text', + environ={'REQUEST_METHOD': 'GET'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 200) # sanity check + self.assertEqual(resp.body, text_body) + def test_GET_plain(self): # make a container req = Request.blank( @@ -2496,6 +2545,39 @@ class TestContainerController(unittest.TestCase): {"subdir": "US-TX-"}, {"subdir": "US-UT-"}]) + def test_GET_delimiter_non_ascii(self): + req = Request.blank( + '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', + 'HTTP_X_TIMESTAMP': '0'}) + resp = req.get_response(self.controller) + for obj_name in [u"a/❥/1", u"a/❥/2", u"a/ꙮ/1", u"a/ꙮ/2"]: + req = Request.blank( + '/sda1/p/a/c/%s' % obj_name.encode('utf-8'), + environ={ + 'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1', + 'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x', + 'HTTP_X_SIZE': 0}) + self._update_object_put_headers(req) + resp = req.get_response(self.controller) + self.assertEqual(resp.status_int, 201) + + # JSON + req = Request.blank( + '/sda1/p/a/c?prefix=a/&delimiter=/&format=json', + environ={'REQUEST_METHOD': 'GET'}) + resp = req.get_response(self.controller) + self.assertEqual( + json.loads(resp.body), + [{"subdir": u"a/❥/"}, + {"subdir": u"a/ꙮ/"}]) + + # Plain text + req = Request.blank( + '/sda1/p/a/c?prefix=a/&delimiter=/&format=text', + environ={'REQUEST_METHOD': 'GET'}) + resp = req.get_response(self.controller) + self.assertEqual(resp.body, u"a/❥/\na/ꙮ/\n".encode("utf-8")) + def test_GET_leading_delimiter(self): req = Request.blank( '/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT', diff --git a/test/unit/helpers.py b/test/unit/helpers.py index 1f3f58ca0a..9ed0f9c9fa 100644 --- a/test/unit/helpers.py +++ b/test/unit/helpers.py @@ -37,7 +37,7 @@ from swift.account import server as account_server from swift.common import storage_policy from swift.common.ring import RingData from swift.common.storage_policy import StoragePolicy, ECStoragePolicy -from swift.common.middleware import proxy_logging +from swift.common.middleware import listing_formats, proxy_logging from swift.common import utils from swift.common.utils import mkdirs, normalize_timestamp, NullLogger from swift.container import server as container_server @@ -210,8 +210,8 @@ def setup_servers(the_object_server=object_server, extra_conf=None): (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv, obj4srv, obj5srv, obj6srv) nl = NullLogger() - logging_prosv = proxy_logging.ProxyLoggingMiddleware(prosrv, conf, - logger=prosrv.logger) + logging_prosv = proxy_logging.ProxyLoggingMiddleware( + listing_formats.ListingFilter(prosrv), conf, logger=prosrv.logger) prospa = spawn(wsgi.server, prolis, logging_prosv, nl) acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl) acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 2766ebea22..e239bb5798 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -59,7 +59,7 @@ from swift.proxy import server as proxy_server from swift.proxy.controllers.obj import ReplicatedObjectController from swift.obj import server as object_server from swift.common.middleware import proxy_logging, versioned_writes, \ - copy + copy, listing_formats from swift.common.middleware.acl import parse_acl, format_acl from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \ APIVersionError, ChunkWriteTimeout @@ -9200,10 +9200,11 @@ class TestAccountControllerFakeGetResponse(unittest.TestCase): """ def setUp(self): conf = {'account_autocreate': 'yes'} - self.app = proxy_server.Application(conf, FakeMemcache(), - account_ring=FakeRing(), - container_ring=FakeRing()) - self.app.memcache = FakeMemcacheReturnsNone() + self.app = listing_formats.ListingFilter( + proxy_server.Application(conf, FakeMemcache(), + account_ring=FakeRing(), + container_ring=FakeRing())) + self.app.app.memcache = FakeMemcacheReturnsNone() def test_GET_autocreate_accept_json(self): with save_globals(): @@ -9593,12 +9594,15 @@ class TestSocketObjectVersions(unittest.TestCase): ]) conf = {'devices': _testdir, 'swift_dir': _testdir, 'mount_check': 'false', 'allowed_headers': allowed_headers} - prosrv = versioned_writes.VersionedWritesMiddleware( + prosrv = listing_formats.ListingFilter( copy.ServerSideCopyMiddleware( - proxy_logging.ProxyLoggingMiddleware( - _test_servers[0], conf, - logger=_test_servers[0].logger), conf), - {}) + versioned_writes.VersionedWritesMiddleware( + proxy_logging.ProxyLoggingMiddleware( + _test_servers[0], conf, + logger=_test_servers[0].logger), {}), + {} + ) + ) self.coro = spawn(wsgi.server, prolis, prosrv, NullLogger()) # replace global prosrv with one that's filtered with version # middleware From 1e79f828ad10918bd76ae8df6fe4c4dbf7bbf3c5 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Thu, 14 Sep 2017 20:57:46 +0900 Subject: [PATCH 15/43] Remove all post_as_copy related code and configes It was deprecated and we discussed on this topic in Denver PTG for Queen cycle. Main motivation for this work is that deprecated post_as_copy option and its gate blocks future symlink work. Change-Id: I411893db1565864ed5beb6ae75c38b982a574476 --- doc/manpages/proxy-server.conf.5 | 2 - doc/source/deployment_guide.rst | 1 - doc/source/development_guidelines.rst | 4 - etc/proxy-server.conf-sample | 2 - swift/common/middleware/copy.py | 68 +---- swift/common/middleware/versioned_writes.py | 3 +- test/functional/__init__.py | 9 - test/probe/common.py | 5 +- test/probe/test_container_sync.py | 14 +- test/probe/test_object_async_update.py | 6 +- .../probe/test_object_metadata_replication.py | 2 +- .../middleware/crypto/test_encryption.py | 9 - test/unit/common/middleware/test_copy.py | 250 +----------------- .../middleware/test_subrequest_logging.py | 47 +--- .../middleware/test_versioned_writes.py | 17 -- test/unit/proxy/test_server.py | 26 +- test/unit/proxy/test_sysmeta.py | 10 - 17 files changed, 30 insertions(+), 445 deletions(-) diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 index 8081b3491d..f3d0e3060f 100644 --- a/doc/manpages/proxy-server.conf.5 +++ b/doc/manpages/proxy-server.conf.5 @@ -996,8 +996,6 @@ Error count to consider a node error limited. The default is 10. Whether account PUTs and DELETEs are even callable. If set to 'true' any authorized user may create and delete accounts; if 'false' no one, even authorized, can. The default is false. -.IP \fBobject_post_as_copy\fR -Deprecated. The default is False. .IP \fBaccount_autocreate\fR If set to 'true' authorized accounts that do not yet exist within the Swift cluster will be automatically created. The default is set to false. diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 554302f30f..06c6f9b58f 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -1884,7 +1884,6 @@ error_suppression_limit 10 Error count to consider node error limited allow_account_management false Whether account PUTs and DELETEs are even callable -object_post_as_copy false Deprecated. account_autocreate false If set to 'true' authorized accounts that do not yet exist within the Swift cluster will diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst index 601db80a0a..a8d2295a0b 100644 --- a/doc/source/development_guidelines.rst +++ b/doc/source/development_guidelines.rst @@ -127,9 +127,6 @@ set using environment variables: environment variable ``SWIFT_TEST_IN_PROCESS_CONF_LOADER`` to ``ec``. -- the deprecated proxy-server ``object_post_as_copy`` option may be set using - the environment variable ``SWIFT_TEST_IN_PROCESS_OBJECT_POST_AS_COPY``. - - logging to stdout may be enabled by setting ``SWIFT_TEST_DEBUG_LOGS``. For example, this command would run the in-process mode functional tests with @@ -147,7 +144,6 @@ The ``tox.ini`` file also specifies test environments for running other in-process functional test configurations, e.g.:: tox -e func-ec - tox -e func-post-as-copy To debug the functional tests, use the 'in-process test' mode and pass the ``--pdb`` flag to ``tox``:: diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index c07c48ff35..80469993c1 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -860,8 +860,6 @@ use = egg:swift#copy # requests are transformed into COPY requests where source and destination are # the same. All client-visible behavior (save response time) should be # identical. -# This option is deprecated and will be ignored in a future release. -# object_post_as_copy = false # Note: To enable encryption, add the following 2 dependent pieces of crypto # middleware to the proxy-server pipeline. They should be to the right of all diff --git a/swift/common/middleware/copy.py b/swift/common/middleware/copy.py index d3718573bb..49e85c2b1a 100644 --- a/swift/common/middleware/copy.py +++ b/swift/common/middleware/copy.py @@ -112,20 +112,6 @@ If a request is sent without the query parameter, an attempt will be made to copy the whole object but will fail if the object size is greater than 5GB. -------------------- -Object Post as Copy -------------------- -Historically, this has been a feature (and a configurable option with default -set to True) in proxy server configuration. This has been moved to server side -copy middleware and the default changed to False. - -When ``object_post_as_copy`` is set to ``true``, an incoming POST request is -morphed into a COPY request where source and destination objects are same. - -This feature was necessary because of a previous behavior where POSTS would -update the metadata on the object but not on the container. As a result, -features like container sync would not work correctly. This is no longer the -case and this option is now deprecated. It will be removed in a future release. """ import os @@ -137,8 +123,7 @@ from swift.common.utils import get_logger, \ config_true_value, FileLikeIter, read_conf_dir, close_if_possible from swift.common.swob import Request, HTTPPreconditionFailed, \ HTTPRequestEntityTooLarge, HTTPBadRequest, HTTPException -from swift.common.http import HTTP_MULTIPLE_CHOICES, HTTP_CREATED, \ - is_success, HTTP_OK +from swift.common.http import HTTP_MULTIPLE_CHOICES, is_success, HTTP_OK from swift.common.constraints import check_account_format, MAX_FILE_SIZE from swift.common.request_helpers import copy_header_subset, remove_items, \ is_sys_meta, is_sys_or_user_meta, is_object_transient_sysmeta @@ -238,13 +223,7 @@ class ServerSideCopyWebContext(WSGIContext): return app_resp def _adjust_put_response(self, req, additional_resp_headers): - if 'swift.post_as_copy' in req.environ: - # Older editions returned 202 Accepted on object POSTs, so we'll - # convert any 201 Created responses to that for compatibility with - # picky clients. - if self._get_status_int() == HTTP_CREATED: - self._response_status = '202 Accepted' - elif is_success(self._get_status_int()): + if is_success(self._get_status_int()): for header, value in additional_resp_headers.items(): self._response_headers.append((header, value)) @@ -269,17 +248,12 @@ class ServerSideCopyMiddleware(object): def __init__(self, app, conf): self.app = app self.logger = get_logger(conf, log_route="copy") - # Read the old object_post_as_copy option from Proxy app just in case - # someone has set it to false (non default). This wouldn't cause - # problems during upgrade. self._load_object_post_as_copy_conf(conf) self.object_post_as_copy = \ config_true_value(conf.get('object_post_as_copy', 'false')) if self.object_post_as_copy: - msg = ('object_post_as_copy=true is deprecated; remove all ' - 'references to it from %s to disable this warning. This ' - 'option will be ignored in a future release' % conf.get( - '__file__', 'proxy-server.conf')) + msg = ('object_post_as_copy=true is deprecated; This ' + 'option is now ignored') self.logger.warning(msg) def _load_object_post_as_copy_conf(self, conf): @@ -330,9 +304,6 @@ class ServerSideCopyMiddleware(object): elif req.method == 'COPY': req.environ['swift.orig_req_method'] = req.method return self.handle_COPY(req, start_response) - elif req.method == 'POST' and self.object_post_as_copy: - req.environ['swift.orig_req_method'] = req.method - return self.handle_object_post_as_copy(req, start_response) elif req.method == 'OPTIONS': # Does not interfere with OPTIONS response from # (account,container) servers and /info response. @@ -343,21 +314,6 @@ class ServerSideCopyMiddleware(object): return self.app(env, start_response) - def handle_object_post_as_copy(self, req, start_response): - req.method = 'PUT' - req.path_info = '/v1/%s/%s/%s' % ( - self.account_name, self.container_name, self.object_name) - req.headers['Content-Length'] = 0 - req.headers.pop('Range', None) - req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name, - self.object_name)) - req.environ['swift.post_as_copy'] = True - params = req.params - # for post-as-copy always copy the manifest itself if source is *LO - params['multipart-manifest'] = 'get' - req.params = params - return self.handle_PUT(req, start_response) - def handle_COPY(self, req, start_response): if not req.headers.get('Destination'): return HTTPPreconditionFailed(request=req, @@ -394,11 +350,6 @@ class ServerSideCopyMiddleware(object): source_req.headers.pop('X-Backend-Storage-Policy-Index', None) source_req.path_info = quote(source_path) source_req.headers['X-Newest'] = 'true' - if 'swift.post_as_copy' in req.environ: - # We're COPYing one object over itself because of a POST; rely on - # the PUT for write authorization, don't require read authorization - source_req.environ['swift.authorize'] = lambda req: None - source_req.environ['swift.authorize_override'] = True # in case we are copying an SLO manifest, set format=raw parameter params = source_req.params @@ -470,11 +421,7 @@ class ServerSideCopyMiddleware(object): def is_object_sysmeta(k): return is_sys_meta('object', k) - if 'swift.post_as_copy' in sink_req.environ: - # Post-as-copy: ignore new sysmeta, copy existing sysmeta - remove_items(sink_req.headers, is_object_sysmeta) - copy_header_subset(source_resp, sink_req, is_object_sysmeta) - elif config_true_value(req.headers.get('x-fresh-metadata', 'false')): + if config_true_value(req.headers.get('x-fresh-metadata', 'false')): # x-fresh-metadata only applies to copy, not post-as-copy: ignore # existing user metadata, update existing sysmeta with new copy_header_subset(source_resp, sink_req, is_object_sysmeta) @@ -497,9 +444,8 @@ class ServerSideCopyMiddleware(object): params['multipart-manifest'] = 'put' if 'X-Object-Manifest' in source_resp.headers: del params['multipart-manifest'] - if 'swift.post_as_copy' not in sink_req.environ: - sink_req.headers['X-Object-Manifest'] = \ - source_resp.headers['X-Object-Manifest'] + sink_req.headers['X-Object-Manifest'] = \ + source_resp.headers['X-Object-Manifest'] sink_req.params = params # Set swift.source, data source, content length and etag diff --git a/swift/common/middleware/versioned_writes.py b/swift/common/middleware/versioned_writes.py index a21c95620c..0bac721bba 100644 --- a/swift/common/middleware/versioned_writes.py +++ b/swift/common/middleware/versioned_writes.py @@ -826,8 +826,7 @@ class VersionedWritesMiddleware(object): allow_versioned_writes) except HTTPException as error_response: return error_response(env, start_response) - elif (obj and req.method in ('PUT', 'DELETE') and - not req.environ.get('swift.post_as_copy')): + elif (obj and req.method in ('PUT', 'DELETE')): try: return self.object_request( req, api_version, account, container, obj, diff --git a/test/functional/__init__.py b/test/functional/__init__.py index 3e3c4486d7..25f1410436 100644 --- a/test/functional/__init__.py +++ b/test/functional/__init__.py @@ -505,15 +505,6 @@ def in_process_setup(the_object_server=object_server): 'password6': 'testing6' }) - # If an env var explicitly specifies the proxy-server object_post_as_copy - # option then use its value, otherwise leave default config unchanged. - object_post_as_copy = os.environ.get( - 'SWIFT_TEST_IN_PROCESS_OBJECT_POST_AS_COPY') - if object_post_as_copy is not None: - object_post_as_copy = config_true_value(object_post_as_copy) - config['object_post_as_copy'] = str(object_post_as_copy) - _debug('Setting object_post_as_copy to %r' % object_post_as_copy) - acc1lis = listen_zero() acc2lis = listen_zero() con1lis = listen_zero() diff --git a/test/probe/common.py b/test/probe/common.py index 0d00481ffe..0c1507536a 100644 --- a/test/probe/common.py +++ b/test/probe/common.py @@ -448,7 +448,7 @@ class ProbeTest(unittest.TestCase): else: os.system('sudo mount %s' % device) - def make_internal_client(self, object_post_as_copy=True): + def make_internal_client(self): tempdir = mkdtemp() try: conf_path = os.path.join(tempdir, 'internal_client.conf') @@ -464,14 +464,13 @@ class ProbeTest(unittest.TestCase): [filter:copy] use = egg:swift#copy - object_post_as_copy = %s [filter:cache] use = egg:swift#memcache [filter:catch_errors] use = egg:swift#catch_errors - """ % object_post_as_copy + """ with open(conf_path, 'w') as f: f.write(dedent(conf_body)) return internal_client.InternalClient(conf_path, 'test', 1) diff --git a/test/probe/test_container_sync.py b/test/probe/test_container_sync.py index a944dc7fee..7c3de1782b 100644 --- a/test/probe/test_container_sync.py +++ b/test/probe/test_container_sync.py @@ -93,7 +93,7 @@ class TestContainerSync(ReplProbeTest): return source['name'], dest['name'] - def _test_sync(self, object_post_as_copy): + def test_sync(self): source_container, dest_container = self._setup_synced_containers() # upload to source @@ -111,12 +111,10 @@ class TestContainerSync(ReplProbeTest): self.assertIn('x-object-meta-test', resp_headers) self.assertEqual('put_value', resp_headers['x-object-meta-test']) - # update metadata with a POST, using an internal client so we can - # vary the object_post_as_copy setting - first use post-as-copy + # update metadata with a POST post_headers = {'Content-Type': 'image/jpeg', 'X-Object-Meta-Test': 'post_value'} - int_client = self.make_internal_client( - object_post_as_copy=object_post_as_copy) + int_client = self.make_internal_client() int_client.set_object_metadata(self.account, source_container, object_name, post_headers) # sanity checks... @@ -154,12 +152,6 @@ class TestContainerSync(ReplProbeTest): self.url, self.token, dest_container, object_name) self.assertEqual(404, cm.exception.http_status) # sanity check - def test_sync_with_post_as_copy(self): - self._test_sync(True) - - def test_sync_with_fast_post(self): - self._test_sync(False) - def test_sync_slo_manifest(self): # Verify that SLO manifests are sync'd even if their segments can not # be found in the destination account at time of sync'ing. diff --git a/test/probe/test_object_async_update.py b/test/probe/test_object_async_update.py index 167f1b637e..33b7504a69 100644 --- a/test/probe/test_object_async_update.py +++ b/test/probe/test_object_async_update.py @@ -237,8 +237,7 @@ class TestUpdateOverridesEC(ECProbeTest): self.assertFalse(direct_client.direct_get_container( cnodes[0], cpart, self.account, 'c1')[1]) - # use internal client for POST so we can force fast-post mode - int_client = self.make_internal_client(object_post_as_copy=False) + int_client = self.make_internal_client() int_client.set_object_metadata( self.account, 'c1', 'o1', {'X-Object-Meta-Fruit': 'Tomato'}) self.assertEqual( @@ -296,8 +295,7 @@ class TestUpdateOverridesEC(ECProbeTest): content_type='test/ctype') meta = client.head_object(self.url, self.token, 'c1', 'o1') - # use internal client for POST so we can force fast-post mode - int_client = self.make_internal_client(object_post_as_copy=False) + int_client = self.make_internal_client() int_client.set_object_metadata( self.account, 'c1', 'o1', {'X-Object-Meta-Fruit': 'Tomato'}) self.assertEqual( diff --git a/test/probe/test_object_metadata_replication.py b/test/probe/test_object_metadata_replication.py index 042d9439fb..ca2e8dde1d 100644 --- a/test/probe/test_object_metadata_replication.py +++ b/test/probe/test_object_metadata_replication.py @@ -47,7 +47,7 @@ class Test(ReplProbeTest): policy=self.policy) self.container_brain = BrainSplitter(self.url, self.token, self.container_name) - self.int_client = self.make_internal_client(object_post_as_copy=False) + self.int_client = self.make_internal_client() def _get_object_info(self, account, container, obj, number): obj_conf = self.configs['object-server'] diff --git a/test/unit/common/middleware/crypto/test_encryption.py b/test/unit/common/middleware/crypto/test_encryption.py index e984a5f0ae..442ef40049 100644 --- a/test/unit/common/middleware/crypto/test_encryption.py +++ b/test/unit/common/middleware/crypto/test_encryption.py @@ -618,14 +618,5 @@ class TestCryptoPipelineChanges(unittest.TestCase): self._check_listing(self.crypto_app) -class TestCryptoPipelineChangesFastPost(TestCryptoPipelineChanges): - @classmethod - def setUpClass(cls): - # set proxy config to use fast post - extra_conf = {'object_post_as_copy': 'False'} - cls._test_context = setup_servers(extra_conf=extra_conf) - cls.proxy_app = cls._test_context["test_servers"][0] - - if __name__ == '__main__': unittest.main() diff --git a/test/unit/common/middleware/test_copy.py b/test/unit/common/middleware/test_copy.py index bbf74bbc1b..a7b44e4a0e 100644 --- a/test/unit/common/middleware/test_copy.py +++ b/test/unit/common/middleware/test_copy.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time import mock import shutil import tempfile @@ -93,9 +92,7 @@ class TestCopyConstraints(unittest.TestCase): class TestServerSideCopyMiddleware(unittest.TestCase): def setUp(self): self.app = FakeSwift() - self.ssc = copy.filter_factory({ - 'object_post_as_copy': 'yes', - })(self.app) + self.ssc = copy.filter_factory({})(self.app) self.ssc.logger = self.app.logger def tearDown(self): @@ -166,92 +163,6 @@ class TestServerSideCopyMiddleware(unittest.TestCase): self.assertRequestEqual(req, self.authorized[0]) self.assertNotIn('swift.orig_req_method', req.environ) - def test_POST_as_COPY_simple(self): - self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') - self.app.register('PUT', '/v1/a/c/o', swob.HTTPAccepted, {}) - req = Request.blank('/v1/a/c/o', method='POST') - status, headers, body = self.call_ssc(req) - self.assertEqual(status, '202 Accepted') - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - # For basic test cases, assert orig_req_method behavior - self.assertEqual(req.environ['swift.orig_req_method'], 'POST') - - def test_POST_as_COPY_201_return_202(self): - self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') - self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) - req = Request.blank('/v1/a/c/o', method='POST') - status, headers, body = self.call_ssc(req) - self.assertEqual(status, '202 Accepted') - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - - def test_POST_delete_at(self): - self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') - self.app.register('PUT', '/v1/a/c/o', swob.HTTPAccepted, {}) - t = str(int(time.time() + 100)) - req = Request.blank('/v1/a/c/o', method='POST', - headers={'Content-Type': 'foo/bar', - 'X-Delete-At': t}) - status, headers, body = self.call_ssc(req) - self.assertEqual(status, '202 Accepted') - - calls = self.app.calls_with_headers - method, path, req_headers = calls[1] - self.assertEqual('PUT', method) - self.assertTrue('X-Delete-At' in req_headers) - self.assertEqual(req_headers['X-Delete-At'], str(t)) - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - - def test_POST_as_COPY_static_large_object(self): - self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, - {'X-Static-Large-Object': True}, 'passed') - self.app.register('PUT', '/v1/a/c/o', swob.HTTPAccepted, {}) - req = Request.blank('/v1/a/c/o', method='POST', - headers={}) - status, headers, body = self.call_ssc(req) - self.assertEqual(status, '202 Accepted') - - calls = self.app.calls_with_headers - method, path, req_headers = calls[1] - self.assertEqual('PUT', method) - self.assertNotIn('X-Static-Large-Object', req_headers) - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - - def test_POST_as_COPY_dynamic_large_object_manifest(self): - self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, - {'X-Object-Manifest': 'orig_manifest'}, 'passed') - self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) - req = Request.blank('/v1/a/c/o', method='POST', - headers={'X-Object-Manifest': 'new_manifest'}) - status, headers, body = self.call_ssc(req) - self.assertEqual(status, '202 Accepted') - - calls = self.app.calls_with_headers - method, path, req_headers = calls[1] - self.assertEqual('PUT', method) - self.assertEqual('new_manifest', req_headers['x-object-manifest']) - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - - def test_POST_as_COPY_dynamic_large_object_no_manifest(self): - self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, - {'X-Object-Manifest': 'orig_manifest'}, 'passed') - self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) - req = Request.blank('/v1/a/c/o', method='POST', - headers={}) - status, headers, body = self.call_ssc(req) - self.assertEqual(status, '202 Accepted') - - calls = self.app.calls_with_headers - method, path, req_headers = calls[1] - self.assertEqual('PUT', method) - self.assertNotIn('X-Object-Manifest', req_headers) - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - def test_basic_put_with_x_copy_from(self): self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register('PUT', '/v1/a/c/o2', swob.HTTPCreated, {}) @@ -1345,100 +1256,6 @@ class TestServerSideCopyMiddleware(unittest.TestCase): req_headers.get('X-Object-Transient-Sysmeta-Test')) self.assertEqual('Not Bar', req_headers.get('X-Foo')) - def _test_POST_source_headers(self, extra_post_headers): - # helper method to perform a POST with metadata headers that should - # always be sent to the destination - post_headers = {'X-Object-Meta-Test2': 'added', - 'X-Object-Sysmeta-Test2': 'added', - 'X-Object-Transient-Sysmeta-Test2': 'added'} - post_headers.update(extra_post_headers) - get_resp_headers = { - 'X-Timestamp': '1234567890.12345', - 'X-Backend-Timestamp': '1234567890.12345', - 'Content-Type': 'text/original', - 'Content-Encoding': 'gzip', - 'Content-Disposition': 'attachment; filename=myfile', - 'X-Object-Meta-Test': 'original', - 'X-Object-Sysmeta-Test': 'original', - 'X-Object-Transient-Sysmeta-Test': 'original', - 'X-Foo': 'Bar'} - self.app.register( - 'GET', '/v1/a/c/o', swob.HTTPOk, headers=get_resp_headers) - self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) - req = Request.blank('/v1/a/c/o', method='POST', headers=post_headers) - status, headers, body = self.call_ssc(req) - self.assertEqual(status, '202 Accepted') - calls = self.app.calls_with_headers - self.assertEqual(2, len(calls)) - method, path, req_headers = calls[1] - self.assertEqual('PUT', method) - # these headers should always be applied to the destination - self.assertEqual('added', req_headers.get('X-Object-Meta-Test2')) - self.assertEqual('added', - req_headers.get('X-Object-Transient-Sysmeta-Test2')) - # POSTed sysmeta should never be applied to the destination - self.assertNotIn('X-Object-Sysmeta-Test2', req_headers) - # existing sysmeta should always be preserved - self.assertEqual('original', - req_headers.get('X-Object-Sysmeta-Test')) - return req_headers - - def test_POST_no_updates(self): - post_headers = {} - req_headers = self._test_POST_source_headers(post_headers) - self.assertEqual('text/original', req_headers.get('Content-Type')) - self.assertNotIn('X-Object-Meta-Test', req_headers) - self.assertNotIn('X-Object-Transient-Sysmeta-Test', req_headers) - self.assertNotIn('X-Timestamp', req_headers) - self.assertNotIn('X-Backend-Timestamp', req_headers) - self.assertNotIn('Content-Encoding', req_headers) - self.assertNotIn('Content-Disposition', req_headers) - self.assertNotIn('X-Foo', req_headers) - - def test_POST_with_updates(self): - post_headers = { - 'Content-Type': 'text/not_original', - 'Content-Encoding': 'not_gzip', - 'Content-Disposition': 'attachment; filename=notmyfile', - 'X-Object-Meta-Test': 'not_original', - 'X-Object-Sysmeta-Test': 'not_original', - 'X-Object-Transient-Sysmeta-Test': 'not_original', - 'X-Foo': 'Not Bar', - } - req_headers = self._test_POST_source_headers(post_headers) - self.assertEqual('text/not_original', req_headers.get('Content-Type')) - self.assertEqual('not_gzip', req_headers.get('Content-Encoding')) - self.assertEqual('attachment; filename=notmyfile', - req_headers.get('Content-Disposition')) - self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test')) - self.assertEqual('not_original', - req_headers.get('X-Object-Transient-Sysmeta-Test')) - self.assertEqual('Not Bar', req_headers.get('X-Foo')) - - def test_POST_x_fresh_metadata_with_updates(self): - # post-as-copy trumps x-fresh-metadata i.e. existing user metadata - # should not be copied, sysmeta is copied *and not updated with new* - post_headers = { - 'X-Fresh-Metadata': 'true', - 'Content-Type': 'text/not_original', - 'Content-Encoding': 'not_gzip', - 'Content-Disposition': 'attachment; filename=notmyfile', - 'X-Object-Meta-Test': 'not_original', - 'X-Object-Sysmeta-Test': 'not_original', - 'X-Object-Transient-Sysmeta-Test': 'not_original', - 'X-Foo': 'Not Bar', - } - req_headers = self._test_POST_source_headers(post_headers) - self.assertEqual('text/not_original', req_headers.get('Content-Type')) - self.assertEqual('not_gzip', req_headers.get('Content-Encoding')) - self.assertEqual('attachment; filename=notmyfile', - req_headers.get('Content-Disposition')) - self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test')) - self.assertEqual('not_original', - req_headers.get('X-Object-Transient-Sysmeta-Test')) - self.assertEqual('Not Bar', req_headers.get('X-Foo')) - self.assertIn('X-Fresh-Metadata', req_headers) - def test_COPY_with_single_range(self): # verify that source etag is not copied when copying a range self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, @@ -1472,67 +1289,6 @@ class TestServerSideCopyConfiguration(unittest.TestCase): def tearDown(self): shutil.rmtree(self.tmpdir) - def test_post_as_copy_defaults_to_false(self): - ssc = copy.filter_factory({})("no app here") - self.assertEqual(ssc.object_post_as_copy, False) - - def test_reading_proxy_conf_when_no_middleware_conf_present(self): - proxy_conf = dedent(""" - [DEFAULT] - bind_ip = 10.4.5.6 - - [pipeline:main] - pipeline = catch_errors copy ye-olde-proxy-server - - [filter:copy] - use = egg:swift#copy - - [app:ye-olde-proxy-server] - use = egg:swift#proxy - object_post_as_copy = no - """) - - conffile = tempfile.NamedTemporaryFile() - conffile.write(proxy_conf) - conffile.flush() - - ssc = copy.filter_factory({ - '__file__': conffile.name - })("no app here") - - self.assertEqual(ssc.object_post_as_copy, False) - - def test_middleware_conf_precedence(self): - proxy_conf = dedent(""" - [DEFAULT] - bind_ip = 10.4.5.6 - - [pipeline:main] - pipeline = catch_errors copy ye-olde-proxy-server - - [filter:copy] - use = egg:swift#copy - object_post_as_copy = no - - [app:ye-olde-proxy-server] - use = egg:swift#proxy - object_post_as_copy = yes - """) - - conffile = tempfile.NamedTemporaryFile() - conffile.write(proxy_conf) - conffile.flush() - - with mock.patch('swift.common.middleware.copy.get_logger', - return_value=debug_logger('copy')): - ssc = copy.filter_factory({ - 'object_post_as_copy': 'no', - '__file__': conffile.name - })("no app here") - - self.assertEqual(ssc.object_post_as_copy, False) - self.assertFalse(ssc.logger.get_lines_for_level('warning')) - def _test_post_as_copy_emits_warning(self, conf): with mock.patch('swift.common.middleware.copy.get_logger', return_value=debug_logger('copy')): @@ -1585,9 +1341,7 @@ class TestServerSideCopyMiddlewareWithEC(unittest.TestCase): self.app = PatchedObjControllerApp( None, FakeMemcache(), account_ring=FakeRing(), container_ring=FakeRing(), logger=self.logger) - self.ssc = copy.filter_factory({ - 'object_post_as_copy': 'yes', - })(self.app) + self.ssc = copy.filter_factory({})(self.app) self.ssc.logger = self.app.logger self.policy = POLICIES.default self.app.container_info = dict(self.container_info) diff --git a/test/unit/common/middleware/test_subrequest_logging.py b/test/unit/common/middleware/test_subrequest_logging.py index 8cd002c23c..3e51efd2b8 100644 --- a/test/unit/common/middleware/test_subrequest_logging.py +++ b/test/unit/common/middleware/test_subrequest_logging.py @@ -117,23 +117,14 @@ class TestSubRequestLogging(unittest.TestCase): self._test_subrequest_logged('PUT') self._test_subrequest_logged('DELETE') - def _test_subrequest_logged_POST(self, subrequest_type, - post_as_copy=False): - # Test that subrequests made downstream from Copy POST will be logged - # with the request type of the subrequest as opposed to the GET/PUT. - - app = FakeApp({'subrequest_type': subrequest_type, - 'object_post_as_copy': post_as_copy}) + def _test_subrequest_logged_POST(self, subrequest_type): + app = FakeApp({'subrequest_type': subrequest_type}) hdrs = {'content-type': 'text/plain'} req = Request.blank(self.path, method='POST', headers=hdrs) app.register('POST', self.path, HTTPOk, headers=hdrs) expect_lines = 2 - if post_as_copy: - app.register('PUT', self.path, HTTPOk, headers=hdrs) - app.register('GET', '/v1/a/c/o', HTTPOk, headers=hdrs) - expect_lines = 4 req.get_response(app) info_log_lines = app.fake_logger.get_lines_for_level('info') @@ -142,33 +133,17 @@ class TestSubRequestLogging(unittest.TestCase): subreq_put_post = '%s %s' % (subrequest_type, SUB_PUT_POST_PATH) origpost = 'POST %s' % self.path - copyget = 'GET %s' % self.path - if post_as_copy: - # post_as_copy expect GET subreq, copy GET, PUT subreq, orig POST - subreq_get = '%s %s' % (subrequest_type, SUB_GET_PATH) - self.assertTrue(subreq_get in info_log_lines[0]) - self.assertTrue(copyget in info_log_lines[1]) - self.assertTrue(subreq_put_post in info_log_lines[2]) - self.assertTrue(origpost in info_log_lines[3]) - else: - # fast post expect POST subreq, original POST - self.assertTrue(subreq_put_post in info_log_lines[0]) - self.assertTrue(origpost in info_log_lines[1]) + # fast post expect POST subreq, original POST + self.assertTrue(subreq_put_post in info_log_lines[0]) + self.assertTrue(origpost in info_log_lines[1]) - def test_subrequest_logged_post_as_copy_with_POST_fast_post(self): - self._test_subrequest_logged_POST('HEAD', post_as_copy=False) - self._test_subrequest_logged_POST('GET', post_as_copy=False) - self._test_subrequest_logged_POST('POST', post_as_copy=False) - self._test_subrequest_logged_POST('PUT', post_as_copy=False) - self._test_subrequest_logged_POST('DELETE', post_as_copy=False) - - def test_subrequest_logged_post_as_copy_with_POST(self): - self._test_subrequest_logged_POST('HEAD', post_as_copy=True) - self._test_subrequest_logged_POST('GET', post_as_copy=True) - self._test_subrequest_logged_POST('POST', post_as_copy=True) - self._test_subrequest_logged_POST('PUT', post_as_copy=True) - self._test_subrequest_logged_POST('DELETE', post_as_copy=True) + def test_subrequest_logged_with_POST(self): + self._test_subrequest_logged_POST('HEAD') + self._test_subrequest_logged_POST('GET') + self._test_subrequest_logged_POST('POST') + self._test_subrequest_logged_POST('PUT') + self._test_subrequest_logged_POST('DELETE') if __name__ == '__main__': diff --git a/test/unit/common/middleware/test_versioned_writes.py b/test/unit/common/middleware/test_versioned_writes.py index 007859b430..a160247881 100644 --- a/test/unit/common/middleware/test_versioned_writes.py +++ b/test/unit/common/middleware/test_versioned_writes.py @@ -330,23 +330,6 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) - def test_put_object_post_as_copy(self): - # PUTs due to a post-as-copy should NOT cause a versioning op - self.app.register( - 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed') - - cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) - req = Request.blank( - '/v1/a/c/o', - environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, - 'CONTENT_LENGTH': '100', - 'swift.post_as_copy': True}) - status, headers, body = self.call_vw(req) - self.assertEqual(status, '201 Created') - self.assertEqual(len(self.authorized), 1) - self.assertRequestEqual(req, self.authorized[0]) - self.assertEqual(1, self.app.call_count) - def test_put_first_object_success(self): self.app.register( 'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 2766ebea22..c4886e52f0 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -3192,8 +3192,6 @@ class TestReplicatedObjectController( def test_POST(self): with save_globals(): - self.app.object_post_as_copy = False - def test_status_map(statuses, expected): set_http_connect(*statuses) self.app.memcache.store = {} @@ -3218,7 +3216,6 @@ class TestReplicatedObjectController( def test_POST_backend_headers(self): # reset the router post patch_policies self.app.obj_controller_router = proxy_server.ObjectControllerRouter() - self.app.object_post_as_copy = False self.app.sort_nodes = lambda nodes, *args, **kwargs: nodes backend_requests = [] @@ -3436,7 +3433,6 @@ class TestReplicatedObjectController( def test_POST_meta_val_len(self): with save_globals(): limit = constraints.MAX_META_VALUE_LENGTH - self.app.object_post_as_copy = False ReplicatedObjectController( self.app, 'account', 'container', 'object') set_http_connect(200, 200, 202, 202, 202) @@ -3462,7 +3458,6 @@ class TestReplicatedObjectController( return with save_globals(): limit = constraints.MAX_META_VALUE_LENGTH - self.app.object_post_as_copy = False controller = ReplicatedObjectController( self.app, 'account', 'container', 'object') set_http_connect(200, 200, 202, 202, 202) @@ -3478,7 +3473,6 @@ class TestReplicatedObjectController( def test_POST_meta_key_len(self): with save_globals(): limit = constraints.MAX_META_NAME_LENGTH - self.app.object_post_as_copy = False set_http_connect(200, 200, 202, 202, 202) # acct cont obj obj obj req = Request.blank( @@ -4224,7 +4218,6 @@ class TestReplicatedObjectController( def test_PUT_POST_requires_container_exist(self): with save_globals(): - self.app.object_post_as_copy = False self.app.memcache = FakeMemcacheReturnsNone() controller = ReplicatedObjectController( self.app, 'account', 'container', 'object') @@ -4837,7 +4830,6 @@ class TestReplicatedObjectController( called[0] = True return HTTPUnauthorized(request=req) with save_globals(): - self.app.object_post_as_copy = False set_http_connect(200, 200, 201, 201, 201) controller = ReplicatedObjectController( self.app, 'account', 'container', 'object') @@ -4868,7 +4860,6 @@ class TestReplicatedObjectController( def test_POST_converts_delete_after_to_delete_at(self): with save_globals(): - self.app.object_post_as_copy = False controller = ReplicatedObjectController( self.app, 'account', 'container', 'object') set_http_connect(200, 200, 202, 202, 202) @@ -5365,7 +5356,6 @@ class TestReplicatedObjectController( def test_POST_x_container_headers_with_more_container_replicas(self): self.app.container_ring.set_replicas(4) - self.app.object_post_as_copy = False req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'POST'}, @@ -6258,21 +6248,7 @@ class BaseTestECObjectController(BaseTestObjectController): self.ec_policy.object_ring.replica_count) if method == 'POST': - # Take care fast post here! - orig_post_as_copy = getattr( - _test_servers[0], 'object_post_as_copy', None) - try: - _test_servers[0].object_post_as_copy = False - with mock.patch.object( - _test_servers[0], - 'object_post_as_copy', False): - headers = get_ring_reloaded_response(method) - finally: - if orig_post_as_copy is None: - del _test_servers[0].object_post_as_copy - else: - _test_servers[0].object_post_as_copy = \ - orig_post_as_copy + headers = get_ring_reloaded_response(method) exp = 'HTTP/1.1 20' self.assertEqual(headers[:len(exp)], exp) diff --git a/test/unit/proxy/test_sysmeta.py b/test/unit/proxy/test_sysmeta.py index 70757c63f1..0037e008ad 100644 --- a/test/unit/proxy/test_sysmeta.py +++ b/test/unit/proxy/test_sysmeta.py @@ -307,11 +307,6 @@ class TestObjectSysmeta(unittest.TestCase): # test fast-post by issuing requests to the proxy app self._test_sysmeta_not_updated_by_POST(self.app) - def test_sysmeta_not_updated_by_POST_as_copy(self): - # test post-as-copy by issuing requests to the copy middleware app - self.copy_app.object_post_as_copy = True - self._test_sysmeta_not_updated_by_POST(self.copy_app) - def test_sysmeta_updated_by_COPY(self): # check sysmeta is updated by a COPY in same way as user meta by # issuing requests to the copy middleware app @@ -482,8 +477,3 @@ class TestObjectSysmeta(unittest.TestCase): def test_transient_sysmeta_replaced_by_PUT_or_POST(self): self._test_transient_sysmeta_replaced_by_PUT_or_POST(self.app) - - def test_transient_sysmeta_replaced_by_PUT_or_POST_as_copy(self): - # test post-as-copy by issuing requests to the copy middleware app - self.copy_app.object_post_as_copy = True - self._test_transient_sysmeta_replaced_by_PUT_or_POST(self.copy_app) From d6fcf7459435077b525123e8b78e553070d5c070 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Sat, 16 Sep 2017 04:58:31 +0900 Subject: [PATCH 16/43] Make gate keeper to save relative location header path Why we need this: Some middlewares want to keep HTTP Location header as relative path (e.g. using Load balancer in front of proxy). What is the problem in current Swift: Current Swift already has the flag to keep it as relative when returning the reponse using swift.common.swob.Response. However, auth_token middleware, that is from keystonemiddleware, unfortunately can change the relative path to absolute because of using webob instead of swob. What this patch is doing: Make gate_keeper able to re-transform the location header from absolute path to relative path if 'swift.leave_relative_location' is explicitely set because gate_keeper should be the most left side middleware except catch_errors middleware in the pipeline. Change-Id: Ic634c3f1b1e26635206d5a54df8b15354e8df163 --- swift/common/middleware/gatekeeper.py | 21 +++++++++++++ .../unit/common/middleware/test_gatekeeper.py | 31 +++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/swift/common/middleware/gatekeeper.py b/swift/common/middleware/gatekeeper.py index e5df5bf44c..991ec86cd9 100644 --- a/swift/common/middleware/gatekeeper.py +++ b/swift/common/middleware/gatekeeper.py @@ -36,6 +36,7 @@ from swift.common.utils import get_logger, config_true_value from swift.common.request_helpers import ( remove_items, get_sys_meta_prefix, OBJECT_TRANSIENT_SYSMETA_PREFIX ) +from six.moves.urllib.parse import urlsplit import re #: A list of python regular expressions that will be used to @@ -89,9 +90,29 @@ class GatekeeperMiddleware(object): [('X-Timestamp', ts)]) def gatekeeper_response(status, response_headers, exc_info=None): + def fixed_response_headers(): + def relative_path(value): + parsed = urlsplit(v) + new_path = parsed.path + if parsed.query: + new_path += ('?%s' % parsed.query) + if parsed.fragment: + new_path += ('#%s' % parsed.fragment) + return new_path + + if not env.get('swift.leave_relative_location'): + return response_headers + else: + return [ + (k, v) if k.lower() != 'location' else + (k, relative_path(v)) for (k, v) in response_headers + ] + + response_headers = fixed_response_headers() removed = filter( lambda h: self.outbound_condition(h[0]), response_headers) + if removed: self.logger.debug('removed response headers: %s' % removed) new_headers = filter( diff --git a/test/unit/common/middleware/test_gatekeeper.py b/test/unit/common/middleware/test_gatekeeper.py index 888aeb6204..d8c09368c3 100644 --- a/test/unit/common/middleware/test_gatekeeper.py +++ b/test/unit/common/middleware/test_gatekeeper.py @@ -215,5 +215,36 @@ class TestGatekeeper(unittest.TestCase): for app_hdrs in ({}, self.forbidden_headers_out): self._test_duplicate_headers_not_removed(method, app_hdrs) + def _test_location_header(self, location_path): + headers = {'Location': location_path} + req = Request.blank( + '/v/a/c', environ={'REQUEST_METHOD': 'GET', + 'swift.leave_relative_location': True}) + + class SelfishApp(FakeApp): + def __call__(self, env, start_response): + self.req = Request(env) + resp = Response(request=self.req, body='FAKE APP', + headers=self.headers) + # like webob, middlewares in the pipeline may rewrite + # location header from relative to absolute + resp.location = resp.absolute_location() + return resp(env, start_response) + + selfish_app = SelfishApp(headers=headers) + + app = self.get_app(selfish_app, {}) + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + self.assertIn('Location', resp.headers) + self.assertEqual(resp.headers['Location'], location_path) + + def test_location_header_fixed(self): + self._test_location_header('/v/a/c/o2') + self._test_location_header('/v/a/c/o2?query=path&query2=doit') + self._test_location_header('/v/a/c/o2?query=path#test') + self._test_location_header('/v/a/c/o2;whatisparam?query=path#test') + + if __name__ == '__main__': unittest.main() From 6b19ca7a7d5833f5648976d8d30c776975e361db Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 15 Sep 2017 22:52:26 +0000 Subject: [PATCH 17/43] proxy: Use the right ranges when going to multiple object servers When the proxy times out talking to a backend server (say, because it was under heavy load and having trouble servicing the request), we catch the ChunkReadTimeout and try to get the rest from another server. The client by and large doesn't care; there may be a brief pause in the download while the proxy get the new connection, but all the bytes arrive and in the right order: GET from node1, serve bytes 0 through N, timeout GET from node2, serve bytes N through end When we calculate the range for the new request, we check to see if we already have one from the previous request -- if one exists, we adjust it based on the bytes sent to the client thus far. This works fine for single failures, but if we need to go back *again* we double up the offset and send the client incomplete, bad data: GET from node1, serve bytes 0 through N, timeout GET from node2, serve bytes N through M, timeout GET from node3, serve bytes N + M through end Leaving the client missing bytes M through N + M. We should adjust the range based on the number of bytes pulled from the *backend* rather than delivered to the *frontend*. This just requires that we reset our book-keeping after adjusting the Range header. Change-Id: Ie153d01479c4242c01f48bf0ada78c2f9b6c8ff0 Closes-Bug: 1717401 --- swift/proxy/controllers/base.py | 11 ++++++++--- test/unit/proxy/controllers/test_base.py | 22 ++++++++++++++++++---- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index a7ae38462d..a02de794d8 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -787,14 +787,16 @@ class ResumingGetter(object): this request. This will change the Range header so that the next req will start where it left off. - :raises ValueError: if invalid range header :raises HTTPRequestedRangeNotSatisfiable: if begin + num_bytes > end of range + 1 :raises RangeAlreadyComplete: if begin + num_bytes == end of range + 1 """ - if 'Range' in self.backend_headers: - req_range = Range(self.backend_headers['Range']) + try: + req_range = Range(self.backend_headers.get('Range')) + except ValueError: + req_range = None + if req_range: begin, end = req_range.ranges[0] if begin is None: # this is a -50 range req (last 50 bytes of file) @@ -818,6 +820,9 @@ class ResumingGetter(object): else: self.backend_headers['Range'] = 'bytes=%d-' % num_bytes + # Reset so if we need to do this more than once, we don't double-up + self.bytes_used_from_backend = 0 + def pop_range(self): """ Remove the first byterange from our Range header. diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 26135b76e3..cef1f90999 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -897,18 +897,32 @@ class TestFuncs(unittest.TestCase): node = {'ip': '1.2.3.4', 'port': 6200, 'device': 'sda'} - source1 = TestSource(['abcd', '1234', 'abc', None]) - source2 = TestSource(['efgh5678']) + data = ['abcd', '1234', 'efgh', '5678', 'lots', 'more', 'data'] + + # NB: content length on source1 should be correct + # but that reversed piece never makes it to the client + source1 = TestSource(data[:2] + [data[2][::-1], None] + data[3:]) + source2 = TestSource(data[2:4] + ['nope', None]) + source3 = TestSource(data[4:]) req = Request.blank('/v1/a/c/o') handler = GetOrHeadHandler( self.app, req, 'Object', None, None, None, {}, client_chunk_size=8) + range_headers = [] + sources = [(source2, node), (source3, node)] + + def mock_get_source_and_node(): + range_headers.append(handler.backend_headers['Range']) + return sources.pop(0) + app_iter = handler._make_app_iter(req, node, source1) with mock.patch.object(handler, '_get_source_and_node', - lambda: (source2, node)): + side_effect=mock_get_source_and_node): client_chunks = list(app_iter) - self.assertEqual(client_chunks, ['abcd1234', 'efgh5678']) + self.assertEqual(range_headers, ['bytes=8-27', 'bytes=16-27']) + self.assertEqual(client_chunks, [ + 'abcd1234', 'efgh5678', 'lotsmore', 'data']) def test_client_chunk_size_resuming_chunked(self): From 8556b06bf75e46369088f1cc6e2aa5d6cc00251b Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Wed, 20 Sep 2017 10:56:41 +0200 Subject: [PATCH 18/43] Add test to ensure cache cleared after container PUT The parent commit fixes a race condition. Let's make sure there won't be a regression in the future, thus testing the order to ensure the cache is cleared after the request is executed. Related-Bug: #1715177 Change-Id: I4f6750b7c556b498da0a2b56aa6c8cee5e42a90c --- test/unit/proxy/controllers/test_container.py | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py index 548e3342cf..03d53c2fde 100644 --- a/test/unit/proxy/controllers/test_container.py +++ b/test/unit/proxy/controllers/test_container.py @@ -21,7 +21,7 @@ from eventlet import Timeout from swift.common.swob import Request from swift.proxy import server as proxy_server -from swift.proxy.controllers.base import headers_to_container_info +from swift.proxy.controllers.base import headers_to_container_info, Controller from test.unit import fake_http_connect, FakeRing, FakeMemcache from swift.common.storage_policy import StoragePolicy from swift.common.request_helpers import get_sys_meta_prefix @@ -111,6 +111,24 @@ class TestContainerController(TestRingBase): from_memcache = self.app.memcache.get('container/a/c') self.assertTrue(from_memcache) + @mock.patch('swift.proxy.controllers.container.clear_info_cache') + @mock.patch.object(Controller, 'make_requests') + def test_container_cache_cleared_after_PUT( + self, mock_make_requests, mock_clear_info_cache): + parent_mock = mock.Mock() + parent_mock.attach_mock(mock_make_requests, 'make_requests') + parent_mock.attach_mock(mock_clear_info_cache, 'clear_info_cache') + controller = proxy_server.ContainerController(self.app, 'a', 'c') + callback = self._make_callback_func({}) + req = Request.blank('/v1/a/c') + with mock.patch('swift.proxy.controllers.base.http_connect', + fake_http_connect(200, 200, give_connect=callback)): + controller.PUT(req) + + # Ensure cache is cleared after the PUT request + self.assertEqual(parent_mock.mock_calls[0][0], 'make_requests') + self.assertEqual(parent_mock.mock_calls[1][0], 'clear_info_cache') + def test_swift_owner(self): owner_headers = { 'x-container-read': 'value', 'x-container-write': 'value', From cc17c99e73e9ddb1768f2979074c3ec043e0a3b4 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 21 Sep 2017 22:25:57 +0000 Subject: [PATCH 19/43] Stop reloading swift.common.utils in test_daemon This was causing some headaches over on feature/deep where a __eq__ wasn't working as expected because neither self nor other was an instance of the class we thought we were using. Apparently, this also fixes some issues when using fake_syslog = True? There are two other places that we use reload_module, in test_db_replicator and test_manager, but the monkey patching isn't nearly as straight-forward. Change-Id: I94d6578e275219e9687fee2f0c7cc4f99454b77f Related-Bug: 1704192 --- test/unit/common/test_daemon.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/unit/common/test_daemon.py b/test/unit/common/test_daemon.py index c9ec89bc26..fe5360dc70 100644 --- a/test/unit/common/test_daemon.py +++ b/test/unit/common/test_daemon.py @@ -15,7 +15,6 @@ import os from six import StringIO -from six.moves import reload_module import time import unittest from getpass import getuser @@ -102,13 +101,14 @@ class TestWorkerDaemon(unittest.TestCase): class TestRunDaemon(unittest.TestCase): def setUp(self): - utils.HASH_PATH_SUFFIX = 'endcap' - utils.HASH_PATH_PREFIX = 'startcap' - utils.drop_privileges = lambda *args: None - utils.capture_stdio = lambda *args: None - - def tearDown(self): - reload_module(utils) + for patcher in [ + mock.patch.object(utils, 'HASH_PATH_PREFIX', 'startcap'), + mock.patch.object(utils, 'HASH_PATH_SUFFIX', 'endcap'), + mock.patch.object(utils, 'drop_privileges', lambda *args: None), + mock.patch.object(utils, 'capture_stdio', lambda *args: None), + ]: + patcher.start() + self.addCleanup(patcher.stop) def test_run(self): d = MyDaemon({}) From 64d24076842559fcfc5d654eaf3a303b3112ea38 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Tue, 19 Sep 2017 18:11:09 -0700 Subject: [PATCH 20/43] Follow-up test fixup Use more literals to make test more obvious/readable - DAMP not DRY. Change-Id: I2562085c829dbc2c812d8e624d6b71a7ccee91ed Related-Change-Id: Ie153d01479c4242c01f48bf0ada78c2f9b6c8ff0 --- test/unit/proxy/controllers/test_base.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index cef1f90999..60d17c9ec8 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -888,6 +888,8 @@ class TestFuncs(unittest.TestCase): return '' def getheader(self, header): + # content-length for the whole object is generated dynamically + # by summing non-None chunks initialized as source1 if header.lower() == "content-length": return str(sum(len(c) for c in self.chunks if c is not None)) @@ -897,13 +899,11 @@ class TestFuncs(unittest.TestCase): node = {'ip': '1.2.3.4', 'port': 6200, 'device': 'sda'} - data = ['abcd', '1234', 'efgh', '5678', 'lots', 'more', 'data'] - - # NB: content length on source1 should be correct - # but that reversed piece never makes it to the client - source1 = TestSource(data[:2] + [data[2][::-1], None] + data[3:]) - source2 = TestSource(data[2:4] + ['nope', None]) - source3 = TestSource(data[4:]) + source1 = TestSource(['abcd', '1234', None, + 'efgh', '5678', 'lots', 'more', 'data']) + # incomplete reads of client_chunk_size will be re-fetched + source2 = TestSource(['efgh', '5678', 'lots', None]) + source3 = TestSource(['lots', 'more', 'data']) req = Request.blank('/v1/a/c/o') handler = GetOrHeadHandler( self.app, req, 'Object', None, None, None, {}, From 69a90dcd7511756ff72d89bb0b6f744e1a135456 Mon Sep 17 00:00:00 2001 From: Thiago da Silva Date: Mon, 25 Sep 2017 13:27:50 -0400 Subject: [PATCH 21/43] Remove reference to EC being in beta Closes-Bug: #1719095 Change-Id: I8051895987bf72c8095e72b5a521042a13993174 Signed-off-by: Thiago da Silva --- doc/source/admin/objectstorage-EC.rst | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/doc/source/admin/objectstorage-EC.rst b/doc/source/admin/objectstorage-EC.rst index 6f1996b537..2e324079d0 100644 --- a/doc/source/admin/objectstorage-EC.rst +++ b/doc/source/admin/objectstorage-EC.rst @@ -17,13 +17,3 @@ erasure coding capability. It is entirely possible to share devices between storage policies, but for erasure coding it may make more sense to use not only separate devices but possibly even entire nodes dedicated for erasure coding. - -.. important:: - - The erasure code support in Object Storage is considered beta in Kilo. - Most major functionality is included, but it has not been tested or - validated at large scale. This feature relies on ``ssync`` for durability. - We recommend deployers do extensive testing and not deploy production - data using an erasure code storage policy. - If any bugs are found during testing, please report them to - https://bugs.launchpad.net/swift From 23219664564d1b5a7ba02bbf8309ec699ab7a4cb Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Fri, 30 Jun 2017 02:03:48 -0700 Subject: [PATCH 22/43] Accept a trade off of dispersion for balance ... but only if we *have* to! During the initial gather for balance we prefer to avoid replicas on over-weight devices that are already under-represented in any of it's tiers (i.e. if a zone has to have at least one, but may have as many of two, don't take the only replica). Instead we hope by going for replicas on over-weight devices that are at the limits of their dispersion we might have a better than even chance we find a better place for them during placement! This normally works on out - and especially so for rings which can disperse and balance. But for existing rings where we'd have to sacrifice dispersion to improve balance the existing optimistic gather will end up refusing to trade dispersion for balance - and instead get stuck without solving either! You should always be able to solve for *either* dispersion or balance. But if you can't solve *both* - we bail out on our optimistic gather much more quickly and instead just focus on improving balance. With this change, the ring can get into balanced (and un-dispersed) states much more quickly! Change-Id: I17ac627f94f64211afaccad15596a9fcab2fada2 Related-Change-Id: Ie6e2d116b65938edac29efa6171e2470bb3e8e12 Closes-Bug: 1699636 Closes-Bug: 1701472 --- swift/common/ring/builder.py | 35 +++- test/unit/common/ring/test_builder.py | 171 ++++++++++++++++-- .../common/ring/test_composite_builder.py | 6 +- test/unit/common/ring/test_utils.py | 19 +- 4 files changed, 195 insertions(+), 36 deletions(-) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index e1b4ccd02b..d6902dc9f6 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -525,7 +525,9 @@ class RingBuilder(object): # we'll gather a few times, or until we archive the plan for gather_count in range(MAX_BALANCE_GATHER_COUNT): - self._gather_parts_for_balance(assign_parts, replica_plan) + self._gather_parts_for_balance(assign_parts, replica_plan, + # firsrt attempt go for disperse + gather_count == 0) if not assign_parts: # most likely min part hours finish_status = 'Unable to finish' @@ -1097,6 +1099,12 @@ class RingBuilder(object): :param start: offset into self.parts to begin search :param replica_plan: replicanth targets for tiers """ + tier2children = self._build_tier2children() + parts_wanted_in_tier = defaultdict(int) + for dev in self._iter_devs(): + wanted = max(dev['parts_wanted'], 0) + for tier in dev['tiers']: + parts_wanted_in_tier[tier] += wanted # Last, we gather partitions from devices that are "overweight" because # they have more partitions than their parts_wanted. for offset in range(self.parts): @@ -1128,8 +1136,17 @@ class RingBuilder(object): replicas_at_tier[tier] < replica_plan[tier]['max'] for tier in dev['tiers']): + # we're stuck by replica plan + continue + for t in reversed(dev['tiers']): + if replicas_at_tier[t] - 1 < replica_plan[t]['min']: + # we're stuck at tier t + break + if sum(parts_wanted_in_tier[c] + for c in tier2children[t] + if c not in dev['tiers']) <= 0: + # we're stuck by weight continue - # this is the most overweight_device holding a replica # of this part that can shed it according to the plan dev['parts_wanted'] += 1 @@ -1141,15 +1158,19 @@ class RingBuilder(object): self._replica2part2dev[replica][part] = NONE_DEV for tier in dev['tiers']: replicas_at_tier[tier] -= 1 + parts_wanted_in_tier[tier] -= 1 self._set_part_moved(part) break - def _gather_parts_for_balance(self, assign_parts, replica_plan): + def _gather_parts_for_balance(self, assign_parts, replica_plan, + disperse_first): """ Gather parts that look like they should move for balance reasons. A simple gathers of parts that looks dispersible normally works out, we'll switch strategies if things don't seem to move. + :param disperse_first: boolean, avoid replicas on overweight devices + that need to be there for dispersion """ # pick a random starting point on the other side of the ring quarter_turn = (self.parts // 4) @@ -1162,10 +1183,10 @@ class RingBuilder(object): 'last_start': self._last_part_gather_start}) self._last_part_gather_start = start - self._gather_parts_for_balance_can_disperse( - assign_parts, start, replica_plan) - if not assign_parts: - self._gather_parts_for_balance_forced(assign_parts, start) + if disperse_first: + self._gather_parts_for_balance_can_disperse( + assign_parts, start, replica_plan) + self._gather_parts_for_balance_forced(assign_parts, start) def _gather_parts_for_balance_forced(self, assign_parts, start, **kwargs): """ diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index 54e612cf07..72bd2039b2 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -400,7 +400,7 @@ class TestRingBuilder(unittest.TestCase): for dev in rb._iter_devs(): dev['tiers'] = utils.tiers_for_dev(dev) assign_parts = defaultdict(list) - rb._gather_parts_for_balance(assign_parts, replica_plan) + rb._gather_parts_for_balance(assign_parts, replica_plan, False) max_run = 0 run = 0 last_part = 0 @@ -1621,9 +1621,7 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance(seed=12345) part_counts = self._partition_counts(rb, key='zone') - self.assertEqual(part_counts[0], 212) - self.assertEqual(part_counts[1], 211) - self.assertEqual(part_counts[2], 345) + self.assertEqual({0: 212, 1: 211, 2: 345}, part_counts) # Now, devices 0 and 1 take 50% more than their fair shares by # weight. @@ -1633,9 +1631,7 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance(seed=12345) part_counts = self._partition_counts(rb, key='zone') - self.assertEqual(part_counts[0], 256) - self.assertEqual(part_counts[1], 256) - self.assertEqual(part_counts[2], 256) + self.assertEqual({0: 256, 1: 256, 2: 256}, part_counts) # Devices 0 and 1 may take up to 75% over their fair share, but the # placement algorithm only wants to spread things out evenly between @@ -1698,9 +1694,12 @@ class TestRingBuilder(unittest.TestCase): rb.rebalance(seed=12345) part_counts = self._partition_counts(rb, key='ip') - self.assertEqual(part_counts['127.0.0.1'], 238) - self.assertEqual(part_counts['127.0.0.2'], 237) - self.assertEqual(part_counts['127.0.0.3'], 293) + + self.assertEqual({ + '127.0.0.1': 237, + '127.0.0.2': 237, + '127.0.0.3': 294, + }, part_counts) # Even out the weights: balance becomes perfect for dev in rb.devs: @@ -2451,6 +2450,105 @@ class TestRingBuilder(unittest.TestCase): (0, 0, '127.0.0.1', 3): [0, 256, 0, 0], }) + def test_undispersable_zone_converge_on_balance(self): + rb = ring.RingBuilder(8, 6, 0) + dev_id = 0 + # 3 regions, 2 zone for each region, 1 server with only *one* device in + # each zone (this is an absolutely pathological case) + for r in range(3): + for z in range(2): + ip = '127.%s.%s.1' % (r, z) + dev_id += 1 + rb.add_dev({'id': dev_id, 'region': r, 'zone': z, + 'weight': 1000, 'ip': ip, 'port': 10000, + 'device': 'd%s' % dev_id}) + rb.rebalance(seed=7) + + # sanity, all balanced and 0 dispersion + self.assertEqual(rb.get_balance(), 0) + self.assertEqual(rb.dispersion, 0) + + # add one device to the server in z1 for each region, N.B. when we + # *balance* this topology we will have very bad dispersion (too much + # weight in z1 compared to z2!) + for r in range(3): + z = 0 + ip = '127.%s.%s.1' % (r, z) + dev_id += 1 + rb.add_dev({'id': dev_id, 'region': r, 'zone': z, + 'weight': 1000, 'ip': ip, 'port': 10000, + 'device': 'd%s' % dev_id}) + + changed_part, _, _ = rb.rebalance(seed=7) + + # sanity, all part but only one replica moved to new devices + self.assertEqual(changed_part, 2 ** 8) + # so the first time, rings are still unbalanced becase we'll only move + # one replica of each part. + self.assertEqual(rb.get_balance(), 50.1953125) + self.assertEqual(rb.dispersion, 99.609375) + + # N.B. since we mostly end up grabbing parts by "weight forced" some + # seeds given some specific ring state will randomly pick bad + # part-replicas that end up going back down onto the same devices + changed_part, _, _ = rb.rebalance(seed=7) + self.assertEqual(changed_part, 14) + # ... this isn't a really "desirable" behavior, but even with bad luck, + # things do get better + self.assertEqual(rb.get_balance(), 47.265625) + self.assertEqual(rb.dispersion, 99.609375) + + # but if you stick with it, eventually the next rebalance, will get to + # move "the right" part-replicas, resulting in near optimal balance + changed_part, _, _ = rb.rebalance(seed=7) + self.assertEqual(changed_part, 240) + self.assertEqual(rb.get_balance(), 0.390625) + self.assertEqual(rb.dispersion, 99.609375) + + def test_undispersable_server_converge_on_balance(self): + rb = ring.RingBuilder(8, 6, 0) + dev_id = 0 + # 3 zones, 2 server for each zone, 2 device for each server + for z in range(3): + for i in range(2): + ip = '127.0.%s.%s' % (z, i + 1) + for d in range(2): + dev_id += 1 + rb.add_dev({'id': dev_id, 'region': 1, 'zone': z, + 'weight': 1000, 'ip': ip, 'port': 10000, + 'device': 'd%s' % dev_id}) + rb.rebalance(seed=7) + + # sanity, all balanced and 0 dispersion + self.assertEqual(rb.get_balance(), 0) + self.assertEqual(rb.dispersion, 0) + + # add one device for first server for each zone + for z in range(3): + ip = '127.0.%s.1' % z + dev_id += 1 + rb.add_dev({'id': dev_id, 'region': 1, 'zone': z, + 'weight': 1000, 'ip': ip, 'port': 10000, + 'device': 'd%s' % dev_id}) + + changed_part, _, _ = rb.rebalance(seed=7) + + # sanity, all part but only one replica moved to new devices + self.assertEqual(changed_part, 2 ** 8) + + # but the first time, those are still unbalance becase ring builder + # can move only one replica for each part + self.assertEqual(rb.get_balance(), 16.9921875) + self.assertEqual(rb.dispersion, 59.765625) + + rb.rebalance(seed=7) + + # converge into around 0~1 + self.assertGreaterEqual(rb.get_balance(), 0) + self.assertLess(rb.get_balance(), 1) + # dispersion doesn't get any worse + self.assertEqual(rb.dispersion, 59.765625) + def test_effective_overload(self): rb = ring.RingBuilder(8, 3, 1) # z0 @@ -3595,12 +3693,12 @@ class TestGetRequiredOverload(unittest.TestCase): rb.rebalance(seed=17) self.assertEqual(rb.get_balance(), 1581.6406249999998) - # but despite the overall trend toward imbalance, in the tier - # with the huge device, the small device is trying to shed parts - # as effectively as it can (which would be useful if it was the - # only small device isolated in a tier with other huge devices - # trying to gobble up all the replicanths in the tier - see - # `test_one_small_guy_does_not_spoil_his_buddy`!) + # but despite the overall trend toward imbalance, in the tier with the + # huge device, we want to see the small device (d4) try to shed parts + # as effectively as it can to the huge device in the same tier (d5) + # this is a useful behavior anytime when for whatever reason a device + # w/i a tier wants parts from another device already in the same tier + # another example is `test_one_small_guy_does_not_spoil_his_buddy` expected = { 0: 123, 1: 123, @@ -3691,6 +3789,45 @@ class TestGetRequiredOverload(unittest.TestCase): self.assertEqual(rb.get_balance(), 30.46875) # increasing overload moves towards one replica in each tier + rb.set_overload(0.3) + expected = { + 0: 0.553443113772455, + 1: 0.553443113772455, + 2: 0.553443113772455, + 3: 0.553443113772455, + 4: 0.778443113772455, + 5: 0.007784431137724551, + } + target_replicas = rb._build_target_replicas_by_tier() + self.assertEqual(expected, {t[-1]: r for (t, r) in + target_replicas.items() + if len(t) == 4}) + # ... and as always increasing overload makes balance *worse* + rb.rebalance(seed=12) + self.assertEqual(rb.get_balance(), 30.46875) + + # the little guy it really struggling to take his share tho + expected = { + 0: 142, + 1: 141, + 2: 142, + 3: 141, + 4: 200, + 5: 2, + } + self.assertEqual(expected, { + d['id']: d['parts'] for d in rb._iter_devs()}) + # ... and you can see it in the balance! + expected = { + 0: -7.367187499999986, + 1: -8.019531249999986, + 2: -7.367187499999986, + 3: -8.019531249999986, + 4: 30.46875, + 5: 30.46875, + } + self.assertEqual(expected, rb._build_balance_per_dev()) + rb.set_overload(0.5) expected = { 0: 0.5232035928143712, @@ -3705,7 +3842,7 @@ class TestGetRequiredOverload(unittest.TestCase): target_replicas.items() if len(t) == 4}) - # ... and as always increasing overload makes balance *worse* + # because the device is so small, balance get's bad quick rb.rebalance(seed=17) self.assertEqual(rb.get_balance(), 95.703125) diff --git a/test/unit/common/ring/test_composite_builder.py b/test/unit/common/ring/test_composite_builder.py index c81f622ca2..4bf51b3227 100644 --- a/test/unit/common/ring/test_composite_builder.py +++ b/test/unit/common/ring/test_composite_builder.py @@ -1115,7 +1115,7 @@ class TestCooperativeRingBuilder(BaseTestCompositeBuilder): rb1.rebalance() self.assertEqual([rb1], update_calls) self.assertEqual([rb1], can_part_move_calls.keys()) - self.assertEqual(512, len(can_part_move_calls[rb1])) + self.assertEqual(768, len(can_part_move_calls[rb1])) # two component builders with same parent builder cb = CompositeRingBuilder() @@ -1139,8 +1139,8 @@ class TestCooperativeRingBuilder(BaseTestCompositeBuilder): # rb1 is being rebalanced so gets checked, and rb2 also gets checked self.assertEqual(sorted([rb1, rb2]), sorted(can_part_move_calls)) - self.assertEqual(512, len(can_part_move_calls[rb1])) - self.assertEqual(512, len(can_part_move_calls[rb2])) + self.assertEqual(768, len(can_part_move_calls[rb1])) + self.assertEqual(768, len(can_part_move_calls[rb2])) def test_save_then_load(self): cb = CompositeRingBuilder() diff --git a/test/unit/common/ring/test_utils.py b/test/unit/common/ring/test_utils.py index a18a8808d2..d77d5c8553 100644 --- a/test/unit/common/ring/test_utils.py +++ b/test/unit/common/ring/test_utils.py @@ -619,10 +619,10 @@ class TestUtils(unittest.TestCase): rb.rebalance(seed=100) rb.validate() - self.assertEqual(rb.dispersion, 39.84375) + self.assertEqual(rb.dispersion, 55.46875) report = dispersion_report(rb) self.assertEqual(report['worst_tier'], 'r1z1') - self.assertEqual(report['max_dispersion'], 39.84375) + self.assertEqual(report['max_dispersion'], 44.921875) def build_tier_report(max_replicas, placed_parts, dispersion, replicas): @@ -633,16 +633,17 @@ class TestUtils(unittest.TestCase): 'replicas': replicas, } - # Each node should store 256 partitions to avoid multiple replicas + # Each node should store less than or equal to 256 partitions to + # avoid multiple replicas. # 2/5 of total weight * 768 ~= 307 -> 51 partitions on each node in # zone 1 are stored at least twice on the nodes expected = [ ['r1z1', build_tier_report( - 2, 256, 39.84375, [0, 0, 154, 102])], + 2, 256, 44.921875, [0, 0, 141, 115])], ['r1z1-127.0.0.1', build_tier_report( - 1, 256, 19.921875, [0, 205, 51, 0])], + 1, 242, 29.33884297520661, [14, 171, 71, 0])], ['r1z1-127.0.0.2', build_tier_report( - 1, 256, 19.921875, [0, 205, 51, 0])], + 1, 243, 29.218106995884774, [13, 172, 71, 0])], ] report = dispersion_report(rb, 'r1z1[^/]*$', verbose=True) graph = report['graph'] @@ -667,9 +668,9 @@ class TestUtils(unittest.TestCase): # can't move all the part-replicas in one rebalance rb.rebalance(seed=100) report = dispersion_report(rb, verbose=True) - self.assertEqual(rb.dispersion, 9.375) - self.assertEqual(report['worst_tier'], 'r1z1-127.0.0.1') - self.assertEqual(report['max_dispersion'], 7.18562874251497) + self.assertEqual(rb.dispersion, 11.71875) + self.assertEqual(report['worst_tier'], 'r1z1-127.0.0.2') + self.assertEqual(report['max_dispersion'], 8.875739644970414) # do a sencond rebalance rb.rebalance(seed=100) report = dispersion_report(rb, verbose=True) From e501ac7d2be5c11b2ed0005885c84023054ec041 Mon Sep 17 00:00:00 2001 From: Matthew Oliver Date: Thu, 3 Sep 2015 12:19:05 +1000 Subject: [PATCH 23/43] Fix memcached exception out of range stacktrace When a memecached server goes offline in the middle of a MemcahceRing (swift memcache client) session then a request to memcached returns nothing and the client inside swift leaves an "IndexError: list index out of range" stacktrace. This change corrects that in all the places of MemcacheRing that is susceptible to it, and added some tests to stop regression. Clay added a diff to the bug that pretty much did the same thing I did, so I'll co-author him. Change-Id: I97c5420b4b4ecc127e9e94e9d0f91fbe92a5f623 Co-Authored-By: Clay Gerrard Closes-Bug: #897451 --- swift/common/memcached.py | 16 +++++-- test/unit/common/test_memcached.py | 67 ++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 3 deletions(-) diff --git a/swift/common/memcached.py b/swift/common/memcached.py index b948176bae..6644f2b73b 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -164,7 +164,7 @@ class MemcacheRing(object): if isinstance(e, Timeout): logging.error("Timeout %(action)s to memcached: %(server)s", {'action': action, 'server': server}) - elif isinstance(e, socket.error): + elif isinstance(e, (socket.error, MemcacheConnectionError)): logging.error("Error %(action)s to memcached: %(server)s: %(err)s", {'action': action, 'server': server, 'err': e}) else: @@ -283,7 +283,11 @@ class MemcacheRing(object): with Timeout(self._io_timeout): sock.sendall('get %s\r\n' % key) line = fp.readline().strip().split() - while line[0].upper() != 'END': + while True: + if not line: + raise MemcacheConnectionError('incomplete read') + if line[0].upper() == 'END': + break if line[0].upper() == 'VALUE' and line[1] == key: size = int(line[3]) value = fp.read(size) @@ -329,6 +333,8 @@ class MemcacheRing(object): with Timeout(self._io_timeout): sock.sendall('%s %s %s\r\n' % (command, key, delta)) line = fp.readline().strip().split() + if not line: + raise MemcacheConnectionError('incomplete read') if line[0].upper() == 'NOT_FOUND': add_val = delta if command == 'decr': @@ -444,7 +450,11 @@ class MemcacheRing(object): sock.sendall('get %s\r\n' % ' '.join(keys)) line = fp.readline().strip().split() responses = {} - while line[0].upper() != 'END': + while True: + if not line: + raise MemcacheConnectionError('incomplete read') + if line[0].upper() == 'END': + break if line[0].upper() == 'VALUE': size = int(line[3]) value = fp.read(size) diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py index 448122aff1..c3274f56cd 100644 --- a/test/unit/common/test_memcached.py +++ b/test/unit/common/test_memcached.py @@ -19,6 +19,7 @@ from collections import defaultdict import errno from hashlib import md5 +import six import socket import time import unittest @@ -76,6 +77,7 @@ class MockMemcached(object): self.down = False self.exc_on_delete = False self.read_return_none = False + self.read_return_empty_str = False self.close_called = False def sendall(self, string): @@ -148,6 +150,8 @@ class MockMemcached(object): self.outbuf += 'NOT_FOUND\r\n' def readline(self): + if self.read_return_empty_str: + return '' if self.read_return_none: return None if self.down: @@ -336,6 +340,31 @@ class TestMemcached(unittest.TestCase): _junk, cache_timeout, _junk = mock.cache[cache_key] self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1) + def test_get_failed_connection_mid_request(self): + memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) + mock = MockMemcached() + memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool( + [(mock, mock)] * 2) + memcache_client.set('some_key', [1, 2, 3]) + self.assertEqual(memcache_client.get('some_key'), [1, 2, 3]) + self.assertEqual(mock.cache.values()[0][1], '0') + + # Now lets return an empty string, and make sure we aren't logging + # the error. + fake_stdout = six.StringIO() + not_expected = "Traceback" + + # force the logging through the debug_handler instead of the nose + # handler. This will use stdout, so we can access the IndexError stack + # trace if there is one raised. + logger = debug_logger() + with patch("sys.stdout", fake_stdout),\ + patch('logging.exception', logger.exception),\ + patch('logging.error', logger.error): + mock.read_return_empty_str = True + self.assertEqual(memcache_client.get('some_key'), None) + self.assertNotIn(not_expected, fake_stdout.getvalue()) + def test_incr(self): memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) mock = MockMemcached() @@ -356,6 +385,33 @@ class TestMemcached(unittest.TestCase): memcache_client.incr, 'some_key', delta=-15) self.assertTrue(mock.close_called) + def test_incr_failed_connection_mid_request(self): + memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) + mock = MockMemcached() + memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool( + [(mock, mock)] * 2) + self.assertEqual(memcache_client.incr('some_key', delta=5), 5) + self.assertEqual(memcache_client.get('some_key'), '5') + self.assertEqual(memcache_client.incr('some_key', delta=5), 10) + self.assertEqual(memcache_client.get('some_key'), '10') + + # Now lets return an empty string, and make sure we aren't logging + # the error. + fake_stdout = six.StringIO() + not_expected = "IndexError: list index out of range" + + # force the logging through the debug_handler instead of the nose + # handler. This will use stdout, so we can access the IndexError stack + # trace if there is one raised. + logger = debug_logger() + with patch("sys.stdout", fake_stdout), \ + patch('logging.exception', logger.exception), \ + patch('logging.error', logger.error): + mock.read_return_empty_str = True + self.assertRaises(memcached.MemcacheConnectionError, + memcache_client.incr, 'some_key', delta=1) + self.assertFalse(not_expected in fake_stdout.getvalue()) + def test_incr_w_timeout(self): memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) mock = MockMemcached() @@ -481,6 +537,17 @@ class TestMemcached(unittest.TestCase): ('some_key2', 'some_key1', 'not_exists'), 'multi_key'), [[4, 5, 6], [1, 2, 3], None]) + # Now lets simulate a lost connection and make sure we don't get + # the index out of range stack trace when it does + mock_stderr = six.StringIO() + not_expected = "IndexError: list index out of range" + with patch("sys.stderr", mock_stderr): + mock.read_return_empty_str = True + self.assertEqual(memcache_client.get_multi( + ('some_key2', 'some_key1', 'not_exists'), 'multi_key'), + None) + self.assertFalse(not_expected in mock_stderr.getvalue()) + def test_serialization(self): memcache_client = memcached.MemcacheRing(['1.2.3.4:11211'], allow_pickle=True) From c662e5fc8e7800ca516468aaab582c146063c3d6 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 26 Sep 2017 10:15:59 +0100 Subject: [PATCH 24/43] Add account_autocreate=true to internal-client.conf-sample Closes-Bug: #1698426 Change-Id: I8a29a685bb12e60f4da4a0dc8270b408241ec415 --- etc/internal-client.conf-sample | 1 + swift/container/sync.py | 1 + 2 files changed, 2 insertions(+) diff --git a/etc/internal-client.conf-sample b/etc/internal-client.conf-sample index 916e424afc..aed3c9a63c 100644 --- a/etc/internal-client.conf-sample +++ b/etc/internal-client.conf-sample @@ -28,6 +28,7 @@ pipeline = catch_errors proxy-logging cache proxy-server [app:proxy-server] use = egg:swift#proxy +account_autocreate = true # See proxy-server.conf-sample for options [filter:cache] diff --git a/swift/container/sync.py b/swift/container/sync.py index 99df2e4be2..3d48541ada 100644 --- a/swift/container/sync.py +++ b/swift/container/sync.py @@ -77,6 +77,7 @@ pipeline = catch_errors proxy-logging cache proxy-server [app:proxy-server] use = egg:swift#proxy +account_autocreate = true # See proxy-server.conf-sample for options [filter:cache] From 53ab6f2907eff2bb90528010d881f2f87ee02505 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 26 Sep 2017 11:43:53 +0100 Subject: [PATCH 25/43] Assert memcached connection error is logged Follow up to [1] - change logger mocking so that we can assert the memcached connection error is logged. [1] Related-Change: I97c5420b4b4ecc127e9e94e9d0f91fbe92a5f623 Change-Id: I87cf4245082c5e0f0705c2c14ddfc0b5d5d89c06 --- test/unit/common/test_memcached.py | 32 +++++++++++++++--------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py index c3274f56cd..b31f328110 100644 --- a/test/unit/common/test_memcached.py +++ b/test/unit/common/test_memcached.py @@ -352,18 +352,18 @@ class TestMemcached(unittest.TestCase): # Now lets return an empty string, and make sure we aren't logging # the error. fake_stdout = six.StringIO() - not_expected = "Traceback" - - # force the logging through the debug_handler instead of the nose - # handler. This will use stdout, so we can access the IndexError stack - # trace if there is one raised. + # force the logging through the DebugLogger instead of the nose + # handler. This will use stdout, so we can assert that no stack trace + # is logged. logger = debug_logger() with patch("sys.stdout", fake_stdout),\ - patch('logging.exception', logger.exception),\ - patch('logging.error', logger.error): + patch('swift.common.memcached.logging', logger): mock.read_return_empty_str = True self.assertEqual(memcache_client.get('some_key'), None) - self.assertNotIn(not_expected, fake_stdout.getvalue()) + log_lines = logger.get_lines_for_level('error') + self.assertIn('Error talking to memcached', log_lines[0]) + self.assertFalse(log_lines[1:]) + self.assertNotIn("Traceback", fake_stdout.getvalue()) def test_incr(self): memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) @@ -398,19 +398,19 @@ class TestMemcached(unittest.TestCase): # Now lets return an empty string, and make sure we aren't logging # the error. fake_stdout = six.StringIO() - not_expected = "IndexError: list index out of range" - - # force the logging through the debug_handler instead of the nose - # handler. This will use stdout, so we can access the IndexError stack - # trace if there is one raised. + # force the logging through the DebugLogger instead of the nose + # handler. This will use stdout, so we can assert that no stack trace + # is logged. logger = debug_logger() with patch("sys.stdout", fake_stdout), \ - patch('logging.exception', logger.exception), \ - patch('logging.error', logger.error): + patch('swift.common.memcached.logging', logger): mock.read_return_empty_str = True self.assertRaises(memcached.MemcacheConnectionError, memcache_client.incr, 'some_key', delta=1) - self.assertFalse(not_expected in fake_stdout.getvalue()) + log_lines = logger.get_lines_for_level('error') + self.assertIn('Error talking to memcached', log_lines[0]) + self.assertFalse(log_lines[1:]) + self.assertNotIn('Traceback', fake_stdout.getvalue()) def test_incr_w_timeout(self): memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) From 36a843be73e2d58c3fe49a049d514b421124bd06 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Mon, 27 Jun 2016 17:31:12 -0700 Subject: [PATCH 26/43] Preserve X-Static-Large-Object from .data file after POST You can't modify the X-Static-Large-Object metadata with a POST, an object being a SLO is a property of the .data file. Revert the change from 4500ff which attempts to correctly handle X-Static-Large-Object metadata on a POST, but is subject to a race if the most recent SLO .data isn't available during the POST. Instead this change adjusts the reading of metadata such that the X-Static-Large-Object metadata is always preserved from the metadata on the datafile and bleeds through a .meta if any. Closes-bug: #1453807 Closes-bug: #1634723 Co-Authored-By: Kota Tsuyuzaki Change-Id: Ie48a38442559229a2993443ab0a04dc84717ca59 --- swift/obj/diskfile.py | 6 ++++-- swift/obj/mem_diskfile.py | 5 +++-- swift/obj/server.py | 10 ++-------- test/unit/obj/test_diskfile.py | 26 ++++++++++++++++++++++++++ test/unit/obj/test_server.py | 1 + 5 files changed, 36 insertions(+), 12 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 42d5eacc47..86e53b6b4d 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -86,7 +86,8 @@ METADATA_KEY = 'user.swift.metadata' DROP_CACHE_WINDOW = 1024 * 1024 # These are system-set metadata keys that cannot be changed with a POST. # They should be lowercase. -DATAFILE_SYSTEM_META = set('content-length deleted etag'.split()) +RESERVED_DATAFILE_META = {'content-length', 'deleted', 'etag'} +DATAFILE_SYSTEM_META = {'x-static-large-object'} DATADIR_BASE = 'objects' ASYNCDIR_BASE = 'async_pending' TMP_BASE = 'tmp' @@ -2415,7 +2416,8 @@ class BaseDiskFile(object): self._merge_content_type_metadata(ctype_file) sys_metadata = dict( [(key, val) for key, val in self._datafile_metadata.items() - if key.lower() in DATAFILE_SYSTEM_META + if key.lower() in (RESERVED_DATAFILE_META | + DATAFILE_SYSTEM_META) or is_sys_meta('object', key)]) self._metadata.update(self._metafile_metadata) self._metadata.update(sys_metadata) diff --git a/swift/obj/mem_diskfile.py b/swift/obj/mem_diskfile.py index 1764f8a2f2..83a7309447 100644 --- a/swift/obj/mem_diskfile.py +++ b/swift/obj/mem_diskfile.py @@ -27,7 +27,7 @@ from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \ DiskFileCollision, DiskFileDeleted, DiskFileNotOpen from swift.common.request_helpers import is_sys_meta from swift.common.swob import multi_range_iterator -from swift.obj.diskfile import DATAFILE_SYSTEM_META +from swift.obj.diskfile import DATAFILE_SYSTEM_META, RESERVED_DATAFILE_META class InMemoryFileSystem(object): @@ -433,7 +433,8 @@ class DiskFile(object): # with the object data. immutable_metadata = dict( [(key, val) for key, val in cur_mdata.items() - if key.lower() in DATAFILE_SYSTEM_META + if key.lower() in (RESERVED_DATAFILE_META | + DATAFILE_SYSTEM_META) or is_sys_meta('object', key)]) metadata.update(immutable_metadata) metadata['name'] = self._name diff --git a/swift/obj/server.py b/swift/obj/server.py index 88853831c9..563ccb9865 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -55,7 +55,7 @@ from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \ HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \ HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \ HTTPServerError -from swift.obj.diskfile import DATAFILE_SYSTEM_META, DiskFileRouter +from swift.obj.diskfile import RESERVED_DATAFILE_META, DiskFileRouter def iter_mime_headers_and_bodies(wsgi_input, mime_boundary, read_chunk_size): @@ -148,7 +148,7 @@ class ObjectController(BaseStorageServer): ] self.allowed_headers = set() for header in extra_allowed_headers: - if header not in DATAFILE_SYSTEM_META: + if header not in RESERVED_DATAFILE_META: self.allowed_headers.add(header) self.auto_create_account_prefix = \ conf.get('auto_create_account_prefix') or '.' @@ -526,11 +526,6 @@ class ObjectController(BaseStorageServer): override = key.lower().replace(override_prefix, 'x-') update_headers[override] = val - def _preserve_slo_manifest(self, update_metadata, orig_metadata): - if 'X-Static-Large-Object' in orig_metadata: - update_metadata['X-Static-Large-Object'] = \ - orig_metadata['X-Static-Large-Object'] - @public @timing_stats() def POST(self, request): @@ -573,7 +568,6 @@ class ObjectController(BaseStorageServer): if req_timestamp > orig_timestamp: metadata = {'X-Timestamp': req_timestamp.internal} - self._preserve_slo_manifest(metadata, orig_metadata) metadata.update(val for val in request.headers.items() if (is_user_meta('object', val[0]) or is_object_transient_sysmeta(val[0]))) diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 2d39c5c9af..8795ac40bc 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -3138,6 +3138,32 @@ class DiskFileMixin(BaseDiskFileTestMixin): # original sysmeta keys are preserved self.assertEqual('Value1', df._metadata['X-Object-Sysmeta-Key1']) + def test_disk_file_preserves_slo(self): + # build an object with some meta (at t0) + orig_metadata = {'X-Static-Large-Object': 'True', + 'Content-Type': 'text/garbage'} + df = self._get_open_disk_file(ts=self.ts().internal, + extra_metadata=orig_metadata) + + # sanity test + with df.open(): + self.assertEqual('True', df._metadata['X-Static-Large-Object']) + if df.policy.policy_type == EC_POLICY: + expected = df.policy.pyeclib_driver.get_segment_info( + 1024, df.policy.ec_segment_size)['fragment_size'] + else: + expected = 1024 + self.assertEqual(str(expected), df._metadata['Content-Length']) + + # write some new metadata (fast POST, don't send orig meta, at t0+1s) + df = self._simple_get_diskfile() + df.write_metadata({'X-Timestamp': self.ts().internal}) + df = self._simple_get_diskfile() + with df.open(): + # non-fast-post updateable keys are preserved + self.assertEqual('text/garbage', df._metadata['Content-Type']) + self.assertEqual('True', df._metadata['X-Static-Large-Object']) + def test_disk_file_reader_iter(self): df, df_data = self._create_test_file('1234567890') quarantine_msgs = [] diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index bc37d182a2..5f4d7ac96b 100644 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -346,6 +346,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 200) self.assertEqual(dict(resp.headers), { 'Content-Type': 'application/x-test', 'Content-Length': '6', From b4f08b6090057897ac647ba6331a4ec867b8e3b8 Mon Sep 17 00:00:00 2001 From: Jens Harbott Date: Wed, 27 Sep 2017 09:10:54 +0000 Subject: [PATCH 27/43] Fix functest for IPv6 endpoints Currently the functional tests fail if the storage_url contains a quoted IPv6 address because we try to split on ':'. But actually we don't need to split hostname and port only in order to combine it back together lateron. Use the standard urlparse() function instead and work with the 'netloc' part of the URL which keeps hostname and port together. Change-Id: I64589e5f2d6fb3cebc6768dc9e4de6264c09cbeb Partial-Bug: 1656329 --- test/functional/swift_test_client.py | 31 +++++++++++----------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 5dfcd0ff93..6105e2d267 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -122,8 +122,7 @@ class Connection(object): self.username = config['username'] self.password = config['password'] - self.storage_host = None - self.storage_port = None + self.storage_netloc = None self.storage_url = None self.conn_class = None @@ -134,9 +133,8 @@ class Connection(object): def authenticate(self, clone_conn=None): if clone_conn: self.conn_class = clone_conn.conn_class - self.storage_host = clone_conn.storage_host + self.storage_netloc = clone_conn.storage_netloc self.storage_url = clone_conn.storage_url - self.storage_port = clone_conn.storage_port self.storage_token = clone_conn.storage_token return @@ -162,26 +160,23 @@ class Connection(object): if not (storage_url and storage_token): raise AuthenticationFailed() - x = storage_url.split('/') + url = urllib.parse.urlparse(storage_url) - if x[0] == 'http:': + if url.scheme == 'http': self.conn_class = http_client.HTTPConnection - self.storage_port = 80 - elif x[0] == 'https:': + elif url.scheme == 'https': self.conn_class = http_client.HTTPSConnection - self.storage_port = 443 else: - raise ValueError('unexpected protocol %s' % (x[0])) + raise ValueError('unexpected protocol %s' % (url.scheme)) - self.storage_host = x[2].split(':')[0] - if ':' in x[2]: - self.storage_port = int(x[2].split(':')[1]) + self.storage_netloc = url.netloc # Make sure storage_url is a string and not unicode, since # keystoneclient (called by swiftclient) returns them in # unicode and this would cause troubles when doing # no_safe_quote query. - self.storage_url = str('/%s/%s' % (x[3], x[4])) - self.account_name = str(x[4]) + x = url.path.split('/') + self.storage_url = str('/%s/%s' % (x[1], x[2])) + self.account_name = str(x[2]) self.auth_user = auth_user # With v2 keystone, storage_token is unicode. # We want it to be string otherwise this would cause @@ -206,8 +201,7 @@ class Connection(object): return json.loads(self.response.read()) def http_connect(self): - self.connection = self.conn_class(self.storage_host, - port=self.storage_port) + self.connection = self.conn_class(self.storage_netloc) # self.connection.set_debuglevel(3) def make_path(self, path=None, cfg=None): @@ -335,8 +329,7 @@ class Connection(object): for (x, y) in parms.items()] path = '%s?%s' % (path, '&'.join(query_args)) - self.connection = self.conn_class(self.storage_host, - port=self.storage_port) + self.connection = self.conn_class(self.storage_netloc) # self.connection.set_debuglevel(3) self.connection.putrequest('PUT', path) for key, value in headers.items(): From 5c76b9e691166acc1f7b8483aaa3980ebc70bd3a Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 27 Sep 2017 14:11:14 +0100 Subject: [PATCH 28/43] Add concurrent_gets to proxy.conf man page Change-Id: Iab1beff4899d096936c0e5915f3ec32364b3e517 Closes-Bug: #1559347 --- doc/manpages/proxy-server.conf.5 | 9 +++++++++ doc/source/deployment_guide.rst | 4 ++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 index f3d0e3060f..5f0c78662a 100644 --- a/doc/manpages/proxy-server.conf.5 +++ b/doc/manpages/proxy-server.conf.5 @@ -1023,6 +1023,15 @@ The valid values for sorting_method are "affinity", "shuffle", and "timing". .IP \fBtiming_expiry\fR If the "timing" sorting_method is used, the timings will only be valid for the number of seconds configured by timing_expiry. The default is 300. +.IP \fBconcurrent_gets\fR +If "on" then use replica count number of threads concurrently during a GET/HEAD +and return with the first successful response. In the EC case, this parameter +only affects an EC HEAD as an EC GET behaves differently. Default is "off". +.IP \fBconcurrency_timeout\fR +This parameter controls how long to wait before firing off the next +concurrent_get thread. A value of 0 would we fully concurrent, any other number +will stagger the firing of the threads. This number should be between 0 and +node_timeout. The default is the value of conn_timeout (0.5). .IP \fBrequest_node_count\fR Set to the number of nodes to contact for a normal request. You can use '* replicas' at the end to have it use the number given times the number of diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 06c6f9b58f..cc43b1177b 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -1943,12 +1943,12 @@ concurrent_gets off Use replica count numbe GET/HEAD and return with the first successful response. In the EC case, this parameter only - effects an EC HEAD as an EC GET + affects an EC HEAD as an EC GET behaves differently. concurrency_timeout conn_timeout This parameter controls how long to wait before firing off the next concurrent_get thread. A - value of 0 would we fully concurrent + value of 0 would we fully concurrent, any other number will stagger the firing of the threads. This number should be between 0 and node_timeout. From c6aea4b3730c937c41815831a7b4d60ff2899fcb Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 27 Sep 2017 19:19:53 +0000 Subject: [PATCH 29/43] Fix intermittent failure in test_x_delete_after X-Delete-After: 1 is known to be flakey; use 2 instead. When the proxy receives an X-Delete-After header, it automatically converts it to an X-Delete-At header based on the current time. So far, so good. But in normalize_delete_at_timestamp we convert our time.time() + int(req.headers['X-Delete-After']) to a string representation of an integer and in the process always round *down*. As a result, we lose up to a second worth of object validity, meaning the object server can (rarely) respond 400, complaining that the X-Delete-At is in the past. Change-Id: Ib5e5a48f5cbed0eade8ba3bca96b26c82a9f9d84 Related-Change: I643be9af8f054f33897dd74071027a739eaa2c5c Related-Change: I10d3b9fcbefff3c415a92fa284a1ea1eda458581 Related-Change: Ifdb1920e5266aaa278baa0759fc0bfaa1aff2d0d Related-Bug: #1597520 Closes-Bug: #1699114 --- test/functional/test_object.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/functional/test_object.py b/test/functional/test_object.py index f6c82668f2..657ab9e064 100644 --- a/test/functional/test_object.py +++ b/test/functional/test_object.py @@ -379,7 +379,7 @@ class TestObject(unittest2.TestCase): 'x_delete_after'), '', {'X-Auth-Token': token, 'Content-Length': '0', - 'X-Delete-After': '1'}) + 'X-Delete-After': '2'}) return check_response(conn) resp = retry(put) resp.read() @@ -400,7 +400,7 @@ class TestObject(unittest2.TestCase): resp = retry(get) resp.read() count += 1 - time.sleep(1) + time.sleep(0.5) self.assertEqual(resp.status, 404) From 93fc9d2de86f37f62b1d6768600d0551e1b72fb6 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 27 Sep 2017 16:35:27 +0100 Subject: [PATCH 30/43] Add cautionary note re delay_reaping in account-server.conf-sample Change-Id: I2c3eea783321338316eecf467d30ba0b3217256c Related-Bug: #1514528 --- doc/manpages/account-server.conf.5 | 6 +++++- doc/source/deployment_guide.rst | 10 +++++++++- etc/account-server.conf-sample | 6 +++++- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5 index 3a06dbd76b..018ea4f663 100644 --- a/doc/manpages/account-server.conf.5 +++ b/doc/manpages/account-server.conf.5 @@ -386,7 +386,11 @@ Connection timeout to external services. The default is 0.5 seconds. .IP \fBdelay_reaping\fR Normally, the reaper begins deleting account information for deleted accounts immediately; you can set this to delay its work however. The value is in -seconds. The default is 0. +seconds. The default is 0. The sum of this value and the +container-updater interval should be less than the account-replicator +reclaim_age. This ensures that once the account-reaper has deleted a +container there is sufficient time for the container-updater to report to the +account before the account DB is removed. .IP \fBreap_warn_after\fR If the account fails to be be reaped due to a persistent error, the account reaper will log a message such as: diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 06c6f9b58f..1049d8f611 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -1650,7 +1650,15 @@ delay_reaping 0 Normally, the reaper begins deleting account information for deleted accounts immediately; you can set this to delay its work however. The value is in seconds, - 2592000 = 30 days, for example. + 2592000 = 30 days, for example. The sum of + this value and the container-updater + ``interval`` should be less than the + account-replicator ``reclaim_age``. This + ensures that once the account-reaper has + deleted a container there is sufficient + time for the container-updater to report + to the account before the account DB is + removed. reap_warn_after 2892000 If the account fails to be be reaped due to a persistent error, the account reaper will log a message such as: diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index 2be6b851d8..257c56be47 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -203,7 +203,11 @@ use = egg:swift#recon # # Normally, the reaper begins deleting account information for deleted accounts # immediately; you can set this to delay its work however. The value is in -# seconds; 2592000 = 30 days for example. +# seconds; 2592000 = 30 days for example. The sum of this value and the +# container-updater interval should be less than the account-replicator +# reclaim_age. This ensures that once the account-reaper has deleted a +# container there is sufficient time for the container-updater to report to the +# account before the account DB is removed. # delay_reaping = 0 # # If the account fails to be reaped due to a persistent error, the From 4716d3da1188eb2f2971004461554b05d0061ec6 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 27 Sep 2017 22:05:40 +0000 Subject: [PATCH 31/43] swift-account-audit: compare each etag to the hash from container ...rather than only comparing the ETag from the last response over and over again. NB: This tool *does not* like EC data :-( Change-Id: Idd37f94b07f607ab8a404dd986760361c39af029 Closes-Bug: 1266636 --- bin/swift-account-audit | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/swift-account-audit b/bin/swift-account-audit index 7e91ae312d..48ab82e854 100755 --- a/bin/swift-account-audit +++ b/bin/swift-account-audit @@ -108,7 +108,7 @@ class Auditor(object): consistent = False print(' MD5 does not match etag for "%s" on %s/%s' % (path, node['ip'], node['device'])) - etags.append(resp.getheader('ETag')) + etags.append((resp.getheader('ETag'), node)) else: conn = http_connect(node['ip'], node['port'], node['device'], part, 'HEAD', @@ -120,7 +120,7 @@ class Auditor(object): print(' Bad status HEADing object "%s" on %s/%s' % (path, node['ip'], node['device'])) continue - etags.append(resp.getheader('ETag')) + etags.append((resp.getheader('ETag'), node)) except Exception: self.object_exceptions += 1 consistent = False @@ -131,8 +131,8 @@ class Auditor(object): consistent = False print(" Failed fo fetch object %s at all!" % path) elif hash: - for etag in etags: - if resp.getheader('ETag').strip('"') != hash: + for etag, node in etags: + if etag.strip('"') != hash: consistent = False self.object_checksum_mismatch += 1 print(' ETag mismatch for "%s" on %s/%s' From 79905ae794db2da82c8834dc24177b1820b8c53a Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 27 Sep 2017 22:10:42 +0000 Subject: [PATCH 32/43] Replace SOSO auth prefix in examples with more-standard AUTH Change-Id: I98643d6acf248840a8360f31e446bc8ecb834898 --- bin/swift-account-audit | 6 +++--- doc/manpages/swift-account-audit.1 | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bin/swift-account-audit b/bin/swift-account-audit index 48ab82e854..f0ad4e7de0 100755 --- a/bin/swift-account-audit +++ b/bin/swift-account-audit @@ -44,9 +44,9 @@ You can also feed a list of urls to the script through stdin. Examples! - %(cmd)s SOSO_88ad0b83-b2c5-4fa1-b2d6-60c597202076 - %(cmd)s SOSO_88ad0b83-b2c5-4fa1-b2d6-60c597202076/container/object - %(cmd)s -e errors.txt SOSO_88ad0b83-b2c5-4fa1-b2d6-60c597202076/container + %(cmd)s AUTH_88ad0b83-b2c5-4fa1-b2d6-60c597202076 + %(cmd)s AUTH_88ad0b83-b2c5-4fa1-b2d6-60c597202076/container/object + %(cmd)s -e errors.txt AUTH_88ad0b83-b2c5-4fa1-b2d6-60c597202076/container %(cmd)s < errors.txt %(cmd)s -c 25 -d < errors.txt """ % {'cmd': sys.argv[0]} diff --git a/doc/manpages/swift-account-audit.1 b/doc/manpages/swift-account-audit.1 index 5f22cacac6..64d60c584b 100644 --- a/doc/manpages/swift-account-audit.1 +++ b/doc/manpages/swift-account-audit.1 @@ -46,9 +46,9 @@ Also download files and verify md5 .SH EXAMPLES .nf -/usr/bin/swift\-account\-audit\/ SOSO_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076 -/usr/bin/swift\-account\-audit\/ SOSO_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076/container/object -/usr/bin/swift\-account\-audit\/ \fB\-e\fR errors.txt SOSO_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076/container +/usr/bin/swift\-account\-audit\/ AUTH_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076 +/usr/bin/swift\-account\-audit\/ AUTH_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076/container/object +/usr/bin/swift\-account\-audit\/ \fB\-e\fR errors.txt AUTH_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076/container /usr/bin/swift\-account\-audit\/ < errors.txt /usr/bin/swift\-account\-audit\/ \fB\-c\fR 25 \fB\-d\fR < errors.txt .fi From 4665c175be7f5299b577925e922a59dfa33ada8c Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Mon, 2 Oct 2017 22:56:42 +0000 Subject: [PATCH 33/43] Clean up SLO tests and docs Change-Id: If7087cb674d6c575c4073ba09b5ef056d908655b --- swift/common/middleware/slo.py | 194 ++++++++++++------------ test/unit/common/middleware/test_slo.py | 157 ++++++++++--------- 2 files changed, 174 insertions(+), 177 deletions(-) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index c5a2a63994..fdc2e9efc1 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -27,7 +27,7 @@ Uploading the Manifest ---------------------- After the user has uploaded the objects to be concatenated, a manifest is -uploaded. The request must be a PUT with the query parameter:: +uploaded. The request must be a ``PUT`` with the query parameter:: ?multipart-manifest=put @@ -47,52 +47,49 @@ range (optional) the (inclusive) range within the object to use as a segment. If omitted, the entire object is used. =========== ======================================================== -The format of the list will be: - - .. code:: +The format of the list will be:: [{"path": "/cont/object", "etag": "etagoftheobjectsegment", "size_bytes": 10485760, - "range": "1048576-2097151"}, ...] + "range": "1048576-2097151"}, + ...] The number of object segments is limited to a configurable amount, default 1000. Each segment must be at least 1 byte. On upload, the middleware will head every segment passed in to verify: - 1. the segment exists (i.e. the HEAD was successful); - 2. the segment meets minimum size requirements; - 3. if the user provided a non-null etag, the etag matches; - 4. if the user provided a non-null size_bytes, the size_bytes matches; and - 5. if the user provided a range, it is a singular, syntactically correct range - that is satisfiable given the size of the object. +1. the segment exists (i.e. the ``HEAD`` was successful); +2. the segment meets minimum size requirements; +3. if the user provided a non-null ``etag``, the etag matches; +4. if the user provided a non-null ``size_bytes``, the size_bytes matches; and +5. if the user provided a ``range``, it is a singular, syntactically correct + range that is satisfiable given the size of the object. -Note that the etag and size_bytes keys are optional; if omitted, the +Note that the ``etag`` and ``size_bytes`` keys are optional; if omitted, the verification is not performed. If any of the objects fail to verify (not found, size/etag mismatch, below minimum size, invalid range) then the user will receive a 4xx error response. If everything does match, the user will receive a 2xx response and the SLO object is ready for downloading. -Behind the scenes, on success, a json manifest generated from the user input is -sent to object servers with an extra "X-Static-Large-Object: True" header -and a modified Content-Type. The items in this manifest will include the etag -and size_bytes for each segment, regardless of whether the client specified -them for verification. The parameter: swift_bytes=$total_size will be -appended to the existing Content-Type, where total_size is the sum of all -the included segments' size_bytes. This extra parameter will be hidden from -the user. +Behind the scenes, on success, a JSON manifest generated from the user input is +sent to object servers with an extra ``X-Static-Large-Object: True`` header +and a modified ``Content-Type``. The items in this manifest will include the +``etag`` and ``size_bytes`` for each segment, regardless of whether the client +specified them for verification. The parameter ``swift_bytes=$total_size`` will +be appended to the existing ``Content-Type``, where ``$total_size`` is the sum +of all the included segments' ``size_bytes``. This extra parameter will be +hidden from the user. Manifest files can reference objects in separate containers, which will improve concurrent upload speed. Objects can be referenced by multiple manifests. The segments of a SLO manifest can even be other SLO manifests. Treat them as any -other object i.e., use the Etag and Content-Length given on the PUT of the -sub-SLO in the manifest to the parent SLO. +other object i.e., use the ``Etag`` and ``Content-Length`` given on the ``PUT`` +of the sub-SLO in the manifest to the parent SLO. -While uploading a manifest, a user can send Etag for verification. It needs to -be md5 of the segments' etags, if there is no range specified. For example, if -the manifest to be uploaded looks like this: - - .. code:: +While uploading a manifest, a user can send ``Etag`` for verification. It needs +to be md5 of the segments' etags, if there is no range specified. For example, +if the manifest to be uploaded looks like this:: [{"path": "/cont/object1", "etag": "etagoftheobjectsegment1", @@ -101,16 +98,12 @@ the manifest to be uploaded looks like this: "etag": "etagoftheobjectsegment2", "size_bytes": 10485760}] -The Etag of the above manifest would be md5 of etagoftheobjectsegment1 and -etagoftheobjectsegment2. This could be computed in the following way: - - .. code:: +The Etag of the above manifest would be md5 of ``etagoftheobjectsegment1`` and +``etagoftheobjectsegment2``. This could be computed in the following way:: echo -n 'etagoftheobjectsegment1etagoftheobjectsegment2' | md5sum -If a manifest to be uploaded with a segment range looks like this: - - .. code:: +If a manifest to be uploaded with a segment range looks like this:: [{"path": "/cont/object1", "etag": "etagoftheobjectsegmentone", @@ -122,10 +115,8 @@ If a manifest to be uploaded with a segment range looks like this: "range": "3-4"}] While computing the Etag of the above manifest, internally each segment's etag -will be taken in the form of 'etagvalue:rangevalue;'. Hence the Etag of the -above manifest would be: - - .. code:: +will be taken in the form of ``etagvalue:rangevalue;``. Hence the Etag of the +above manifest would be:: echo -n 'etagoftheobjectsegmentone:1-2;etagoftheobjectsegmenttwo:3-4;' \ | md5sum @@ -136,65 +127,65 @@ Range Specification ------------------- Users now have the ability to specify ranges for SLO segments. -Users can now include an optional 'range' field in segment descriptions +Users can now include an optional ``range`` field in segment descriptions to specify which bytes from the underlying object should be used for the segment data. Only one range may be specified per segment. - .. note:: +.. note:: - The 'etag' and 'size_bytes' fields still describe the backing object as a - whole. + The ``etag`` and ``size_bytes`` fields still describe the backing object + as a whole. -If a user uploads this manifest: +If a user uploads this manifest:: - .. code:: - - [{"path": "/con/obj_seg_1", "size_bytes": 2097152, "range": "0-1048576"}, - {"path": "/con/obj_seg_2", "size_bytes": 2097152, - "range": "512-1550000"}, - {"path": "/con/obj_seg_1", "size_bytes": 2097152, "range": "-2048"}] + [{"path": "/con/obj_seg_1", "size_bytes": 2097152, "range": "0-1048576"}, + {"path": "/con/obj_seg_2", "size_bytes": 2097152, + "range": "512-1550000"}, + {"path": "/con/obj_seg_1", "size_bytes": 2097152, "range": "-2048"}] The segment will consist of the first 1048576 bytes of /con/obj_seg_1, followed by bytes 513 through 1550000 (inclusive) of /con/obj_seg_2, and finally bytes 2095104 through 2097152 (i.e., the last 2048 bytes) of /con/obj_seg_1. - .. note:: +.. note:: - The minimum sized range is 1 byte. This is the same as the minimum - segment size. + The minimum sized range is 1 byte. This is the same as the minimum + segment size. ------------------------- Retrieving a Large Object ------------------------- -A GET request to the manifest object will return the concatenation of the +A ``GET`` request to the manifest object will return the concatenation of the objects from the manifest much like DLO. If any of the segments from the -manifest are not found or their Etag/Content Length have changed since upload, -the connection will drop. In this case a 409 Conflict will be logged in the -proxy logs and the user will receive incomplete results. Note that this will be -enforced regardless of whether the user performed per-segment validation during -upload. +manifest are not found or their ``Etag``/``Content-Length`` have changed since +upload, the connection will drop. In this case a ``409 Conflict`` will be +logged in the proxy logs and the user will receive incomplete results. Note +that this will be enforced regardless of whether the user performed per-segment +validation during upload. -The headers from this GET or HEAD request will return the metadata attached -to the manifest object itself with some exceptions:: +The headers from this ``GET`` or ``HEAD`` request will return the metadata +attached to the manifest object itself with some exceptions: - Content-Length: the total size of the SLO (the sum of the sizes of - the segments in the manifest) - X-Static-Large-Object: True - Etag: the etag of the SLO (generated the same way as DLO) +===================== ================================================== +Header Value +===================== ================================================== +Content-Length the total size of the SLO (the sum of the sizes of + the segments in the manifest) +X-Static-Large-Object the string "True" +Etag the etag of the SLO (generated the same way as DLO) +===================== ================================================== -A GET request with the query parameter:: +A ``GET`` request with the query parameter:: ?multipart-manifest=get will return a transformed version of the original manifest, containing additional fields and different key names. For example, the first manifest in -the example above would look like this: - - .. code:: +the example above would look like this:: [{"name": "/cont/object", "hash": "etagoftheobjectsegment", @@ -222,9 +213,10 @@ left to the user to use caution in handling the segments. Deleting a Large Object ----------------------- -A DELETE request will just delete the manifest object itself. +A ``DELETE`` request will just delete the manifest object itself. The segment +data referenced by the manifest will remain unchanged. -A DELETE with a query parameter:: +A ``DELETE`` with a query parameter:: ?multipart-manifest=delete @@ -235,22 +227,22 @@ itself. The failure response will be similar to the bulk delete middleware. Modifying a Large Object ------------------------ -PUTs / POSTs will work as expected, PUTs will just overwrite the manifest -object for example. +``PUT`` and ``POST`` requests will work as expected; ``PUT``\s will just +overwrite the manifest object for example. ------------------ Container Listings ------------------ In a container listing the size listed for SLO manifest objects will be the -total_size of the concatenated segments in the manifest. The overall -X-Container-Bytes-Used for the container (and subsequently for the account) -will not reflect total_size of the manifest but the actual size of the json +``total_size`` of the concatenated segments in the manifest. The overall +``X-Container-Bytes-Used`` for the container (and subsequently for the account) +will not reflect ``total_size`` of the manifest but the actual size of the JSON data stored. The reason for this somewhat confusing discrepancy is we want the container listing to reflect the size of the manifest object when it is downloaded. We do not, however, want to count the bytes-used twice (for both the manifest and the segments it's referring to) in the container and account -metadata which can be used for stats purposes. +metadata which can be used for stats and billing purposes. """ from collections import defaultdict @@ -296,20 +288,20 @@ def parse_and_validate_input(req_body, req_path): Given a request body, parses it and returns a list of dictionaries. The output structure is nearly the same as the input structure, but it - is not an exact copy. Given a valid input dictionary `d_in`, its - corresponding output dictionary `d_out` will be as follows: + is not an exact copy. Given a valid input dictionary ``d_in``, its + corresponding output dictionary ``d_out`` will be as follows: - * d_out['etag'] == d_in['etag'] + * d_out['etag'] == d_in['etag'] - * d_out['path'] == d_in['path'] + * d_out['path'] == d_in['path'] - * d_in['size_bytes'] can be a string ("12") or an integer (12), but - d_out['size_bytes'] is an integer. + * d_in['size_bytes'] can be a string ("12") or an integer (12), but + d_out['size_bytes'] is an integer. - * (optional) d_in['range'] is a string of the form "M-N", "M-", or - "-N", where M and N are non-negative integers. d_out['range'] is the - corresponding swob.Range object. If d_in does not have a key - 'range', neither will d_out. + * (optional) d_in['range'] is a string of the form "M-N", "M-", or + "-N", where M and N are non-negative integers. d_out['range'] is the + corresponding swob.Range object. If d_in does not have a key + 'range', neither will d_out. :raises HTTPException: on parse errors or semantic errors (e.g. bogus JSON structure, syntactically invalid ranges) @@ -435,7 +427,7 @@ class SloGetContext(WSGIContext): agent='%(orig)s SLO MultipartGET', swift_source='SLO') sub_resp = sub_req.get_response(self.slo.app) - if not is_success(sub_resp.status_int): + if not sub_resp.is_success: close_if_possible(sub_resp.app_iter) raise ListingIterError( 'ERROR: while fetching %s, GET of submanifest %s ' @@ -615,8 +607,9 @@ class SloGetContext(WSGIContext): thing with them. Returns an iterator suitable for sending up the WSGI chain. - :param req: swob.Request object; is a GET or HEAD request aimed at - what may be a static large object manifest (or may not). + :param req: :class:`~swift.common.swob.Request` object; is a ``GET`` or + ``HEAD`` request aimed at what may (or may not) be a static + large object manifest. :param start_response: WSGI start_response callable """ if req.params.get('multipart-manifest') != 'get': @@ -898,7 +891,9 @@ class StaticLargeObject(object): The response body (only on GET, of course) will consist of the concatenation of the segments. - :params req: a swob.Request with a path referencing an object + :param req: a :class:`~swift.common.swob.Request` with a path + referencing an object + :param start_response: WSGI start_response callable :raises HttpException: on errors """ return SloGetContext(self).handle_slo_get_or_head(req, start_response) @@ -910,13 +905,11 @@ class StaticLargeObject(object): save a manifest generated from the user input. Uses WSGIContext to call self and start_response and returns a WSGI iterator. - :params req: a swob.Request with an obj in path + :param req: a :class:`~swift.common.swob.Request` with an obj in path + :param start_response: WSGI start_response callable :raises HttpException: on errors """ - try: - vrs, account, container, obj = req.split_path(1, 4, True) - except ValueError: - return self.app(req.environ, start_response) + vrs, account, container, obj = req.split_path(4, rest_with_last=True) if req.content_length > self.max_manifest_size: raise HTTPRequestEntityTooLarge( "Manifest File > %d bytes" % self.max_manifest_size) @@ -1073,7 +1066,8 @@ class StaticLargeObject(object): A generator function to be used to delete all the segments and sub-segments referenced in a manifest. - :params req: a swob.Request with an SLO manifest in path + :param req: a :class:`~swift.common.swob.Request` with an SLO manifest + in path :raises HTTPPreconditionFailed: on invalid UTF8 in request path :raises HTTPBadRequest: on too many buffered sub segments and on invalid SLO manifest path @@ -1109,8 +1103,12 @@ class StaticLargeObject(object): def get_slo_segments(self, obj_name, req): """ - Performs a swob.Request and returns the SLO manifest's segments. + Performs a :class:`~swift.common.swob.Request` and returns the SLO + manifest's segments. + :param obj_name: the name of the object being deleted, + as ``/container/object`` + :param req: the base :class:`~swift.common.swob.Request` :raises HTTPServerError: on unable to load obj_name or on unable to load the SLO manifest data. :raises HTTPBadRequest: on not an SLO manifest @@ -1151,7 +1149,7 @@ class StaticLargeObject(object): Will delete all the segments in the SLO manifest and then, if successful, will delete the manifest file. - :params req: a swob.Request with an obj in path + :param req: a :class:`~swift.common.swob.Request` with an obj in path :returns: swob.Response whose app_iter set to Bulk.handle_delete_iter """ req.headers['Content-Type'] = None # Ignore content-type from client diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 7238aa70b5..a74fe6a08d 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -389,37 +389,35 @@ class TestSloPutManifest(SloTestCase): 'PUT', '/v1/AUTH_test/checktest/man_3', swob.HTTPCreated, {}, None) def test_put_manifest_too_quick_fail(self): - req = Request.blank('/v1/a/c/o') + req = Request.blank('/v1/a/c/o?multipart-manifest=put', method='PUT') req.content_length = self.slo.max_manifest_size + 1 - try: - self.slo.handle_multipart_put(req, fake_start_response) - except HTTPException as e: - pass - self.assertEqual(e.status_int, 413) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '413 Request Entity Too Large') with patch.object(self.slo, 'max_manifest_segments', 0): - req = Request.blank('/v1/a/c/o', body=test_json_data) - e = None - try: - self.slo.handle_multipart_put(req, fake_start_response) - except HTTPException as e: - pass - self.assertEqual(e.status_int, 413) + req = Request.blank('/v1/a/c/o?multipart-manifest=put', + method='PUT', body=test_json_data) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '413 Request Entity Too Large') - req = Request.blank('/v1/a/c/o', headers={'X-Copy-From': 'lala'}) - try: - self.slo.handle_multipart_put(req, fake_start_response) - except HTTPException as e: - pass - self.assertEqual(e.status_int, 405) + req = Request.blank('/v1/a/c/o?multipart-manifest=put', method='PUT', + headers={'X-Copy-From': 'lala'}) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '405 Method Not Allowed') - # ignores requests to / - req = Request.blank( - '/?multipart-manifest=put', - environ={'REQUEST_METHOD': 'PUT'}, body=test_json_data) - self.assertEqual( - list(self.slo.handle_multipart_put(req, fake_start_response)), - ['passed']) + # we already validated that there are enough path segments in __call__ + for path in ('/', '/v1/', '/v1/a/', '/v1/a/c/'): + req = Request.blank( + path + '?multipart-manifest=put', + environ={'REQUEST_METHOD': 'PUT'}, body=test_json_data) + with self.assertRaises(ValueError): + list(self.slo.handle_multipart_put(req, fake_start_response)) + + req = Request.blank( + path.rstrip('/') + '?multipart-manifest=put', + environ={'REQUEST_METHOD': 'PUT'}, body=test_json_data) + with self.assertRaises(ValueError): + list(self.slo.handle_multipart_put(req, fake_start_response)) def test_handle_multipart_put_success(self): req = Request.blank( @@ -430,11 +428,9 @@ class TestSloPutManifest(SloTestCase): 'X-Object-Sysmeta-Slo-Size'): self.assertNotIn(h, req.headers) - def my_fake_start_response(*args, **kwargs): - gen_etag = '"' + md5hex('etagoftheobjectsegment') + '"' - self.assertIn(('Etag', gen_etag), args[1]) - - self.slo(req.environ, my_fake_start_response) + status, headers, body = self.call_slo(req) + gen_etag = '"' + md5hex('etagoftheobjectsegment') + '"' + self.assertIn(('Etag', gen_etag), headers) self.assertIn('X-Static-Large-Object', req.headers) self.assertEqual(req.headers['X-Static-Large-Object'], 'True') self.assertIn('X-Object-Sysmeta-Slo-Etag', req.headers) @@ -486,10 +482,10 @@ class TestSloPutManifest(SloTestCase): {'path': '/cont/small_object', 'etag': 'etagoftheobjectsegment', 'size_bytes': 100}]) - req = Request.blank('/v1/a/c/o', body=test_json_data) - with self.assertRaises(HTTPException) as catcher: - self.slo.handle_multipart_put(req, fake_start_response) - self.assertEqual(catcher.exception.status_int, 400) + req = Request.blank('/v1/a/c/o?multipart-manifest=put', + method='PUT', body=test_json_data) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '400 Bad Request') def test_handle_multipart_put_disallow_empty_last_segment(self): test_json_data = json.dumps([{'path': '/cont/object', @@ -498,10 +494,10 @@ class TestSloPutManifest(SloTestCase): {'path': '/cont/small_object', 'etag': 'etagoftheobjectsegment', 'size_bytes': 0}]) - req = Request.blank('/v1/a/c/o', body=test_json_data) - with self.assertRaises(HTTPException) as catcher: - self.slo.handle_multipart_put(req, fake_start_response) - self.assertEqual(catcher.exception.status_int, 400) + req = Request.blank('/v1/a/c/o?multipart-manifest=put', + method='PUT', body=test_json_data) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '400 Bad Request') def test_handle_multipart_put_success_unicode(self): test_json_data = json.dumps([{'path': u'/cont/object\u2661', @@ -512,7 +508,7 @@ class TestSloPutManifest(SloTestCase): environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'}, body=test_json_data) self.assertNotIn('X-Static-Large-Object', req.headers) - self.slo(req.environ, fake_start_response) + self.call_slo(req) self.assertIn('X-Static-Large-Object', req.headers) self.assertEqual(req.environ['PATH_INFO'], '/v1/AUTH_test/c/man') self.assertIn(('HEAD', '/v1/AUTH_test/cont/object\xe2\x99\xa1'), @@ -523,7 +519,7 @@ class TestSloPutManifest(SloTestCase): '/test_good/AUTH_test/c/man?multipart-manifest=put', environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'}, body=test_xml_data) - no_xml = self.slo(req.environ, fake_start_response) + no_xml = list(self.slo(req.environ, fake_start_response)) self.assertEqual(no_xml, ['Manifest must be valid JSON.\n']) def test_handle_multipart_put_bad_data(self): @@ -533,14 +529,15 @@ class TestSloPutManifest(SloTestCase): req = Request.blank( '/test_good/AUTH_test/c/man?multipart-manifest=put', environ={'REQUEST_METHOD': 'PUT'}, body=bad_data) - self.assertRaises(HTTPException, self.slo.handle_multipart_put, req, - fake_start_response) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '400 Bad Request') + self.assertIn('invalid size_bytes', body) for bad_data in [ json.dumps([{'path': '/cont', 'etag': 'etagoftheobj', 'size_bytes': 100}]), json.dumps('asdf'), json.dumps(None), json.dumps(5), - 'not json', '1234', None, '', json.dumps({'path': None}), + 'not json', '1234', '', json.dumps({'path': None}), json.dumps([{'path': '/cont/object', 'etag': None, 'size_bytes': 12}]), json.dumps([{'path': '/cont/object', 'etag': 'asdf', @@ -557,8 +554,14 @@ class TestSloPutManifest(SloTestCase): req = Request.blank( '/v1/AUTH_test/c/man?multipart-manifest=put', environ={'REQUEST_METHOD': 'PUT'}, body=bad_data) - self.assertRaises(HTTPException, self.slo.handle_multipart_put, - req, fake_start_response) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '400 Bad Request') + + req = Request.blank( + '/v1/AUTH_test/c/man?multipart-manifest=put', + environ={'REQUEST_METHOD': 'PUT'}, body=None) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '411 Length Required') def test_handle_multipart_put_check_data(self): good_data = json.dumps( @@ -642,10 +645,11 @@ class TestSloPutManifest(SloTestCase): {'path': '/cont/small_object', 'etag': 'etagoftheobjectsegment', 'size_bytes': 100}]) - req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data) - with self.assertRaises(HTTPException) as cm: - self.slo.handle_multipart_put(req, fake_start_response) - self.assertEqual(cm.exception.status_int, 400) + req = Request.blank('/v1/AUTH_test/c/o?multipart-manifest=put', + method='PUT', body=test_json_data) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '400 Bad Request') + self.assertIn('Too small; each segment must be at least 1 byte', body) def test_handle_multipart_put_skip_size_check_no_early_bailout(self): # The first is too small (it's 0 bytes), and @@ -657,12 +661,12 @@ class TestSloPutManifest(SloTestCase): {'path': '/cont/object2', 'etag': 'wrong wrong wrong', 'size_bytes': 100}]) - req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data) - with self.assertRaises(HTTPException) as cm: - self.slo.handle_multipart_put(req, fake_start_response) - self.assertEqual(cm.exception.status_int, 400) - self.assertIn('at least 1 byte', cm.exception.body) - self.assertIn('Etag Mismatch', cm.exception.body) + req = Request.blank('/v1/AUTH_test/c/o?multipart-manifest=put', + method='PUT', body=test_json_data) + status, headers, body = self.call_slo(req) + self.assertEqual(status, '400 Bad Request') + self.assertIn('at least 1 byte', body) + self.assertIn('Etag Mismatch', body) def test_handle_multipart_put_skip_etag_check(self): good_data = json.dumps([ @@ -694,10 +698,9 @@ class TestSloPutManifest(SloTestCase): req = Request.blank( '/v1/AUTH_test/checktest/man_3?multipart-manifest=put', environ={'REQUEST_METHOD': 'PUT'}, body=bad_data) - with self.assertRaises(HTTPException) as catcher: - self.slo.handle_multipart_put(req, fake_start_response) - self.assertEqual(400, catcher.exception.status_int) - self.assertIn("Unsatisfiable Range", catcher.exception.body) + status, headers, body = self.call_slo(req) + self.assertEqual('400 Bad Request', status) + self.assertIn("Unsatisfiable Range", body) def test_handle_multipart_put_success_conditional(self): test_json_data = json.dumps([{'path': u'/cont/object', @@ -2771,29 +2774,25 @@ class TestSloGetManifest(SloTestCase): self.assertTrue(error_lines[0].startswith( 'ERROR: An error occurred while retrieving segments')) - def test_download_takes_too_long(self): - the_time = [time.time()] - - def mock_time(): - return the_time[0] - - # this is just a convenient place to hang a time jump; there's nothing - # special about the choice of is_success(). - def mock_is_success(status_int): - the_time[0] += 7 * 3600 - return status_int // 100 == 2 - + @patch('swift.common.request_helpers.time') + def test_download_takes_too_long(self, mock_time): + mock_time.time.side_effect = [ + 0, # start time + 1, # just building the first segment request; purely local + 2, # build the second segment request object, too, so we know we + # can't coalesce and should instead go fetch the first segment + 7 * 3600, # that takes a while, but gets serviced; we build the + # third request and service the second + 21 * 3600, # which takes *even longer* (ostensibly something to + # do with submanifests), but we build the fourth... + 28 * 3600, # and before we go to service it we time out + ] req = Request.blank( '/v1/AUTH_test/gettest/manifest-abcd', environ={'REQUEST_METHOD': 'GET'}) - with patch.object(slo, 'is_success', mock_is_success), \ - patch('swift.common.request_helpers.time.time', - mock_time), \ - patch('swift.common.request_helpers.is_success', - mock_is_success): - status, headers, body, exc = self.call_slo( - req, expect_exception=True) + status, headers, body, exc = self.call_slo( + req, expect_exception=True) self.assertIsInstance(exc, SegmentError) self.assertEqual(status, '200 OK') From 839c13003aea955c48e77269d4d40a567e07dd44 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 4 Oct 2017 18:59:49 +0000 Subject: [PATCH 34/43] Stop clearing params for account_autocreate responses Otherwise, we send back a 204 where middlewares should be expecting a 200 and an empty JSON array. Change-Id: I05549342327108f71b60a316f734c55bc9589915 Related-Change: Id3ce37aa0402e2d8dd5784ce329d7cb4fbaf700d --- swift/proxy/controllers/account.py | 1 - test/unit/proxy/test_server.py | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/swift/proxy/controllers/account.py b/swift/proxy/controllers/account.py index 7a42c57748..8751fcb93e 100644 --- a/swift/proxy/controllers/account.py +++ b/swift/proxy/controllers/account.py @@ -89,7 +89,6 @@ class AccountController(Controller): # creates the account if necessary. If we feed it a perfect # lie, it'll just try to create the container without # creating the account, and that'll fail. - req.params = {} # clear our format override resp = account_listing_response( self.account_name, req, listing_formats.get_listing_content_type(req)) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 31b73be5b8..12e96c8c9f 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -7639,7 +7639,7 @@ class TestContainerController(unittest.TestCase): # cache a 204 for the account because it's sort of like it # exists self.app.account_autocreate = True - test_status_map((404, 404, 404), 404, None, 204) + test_status_map((404, 404, 404), 404, None, 200) def test_PUT_policy_headers(self): backend_requests = [] @@ -8810,9 +8810,9 @@ class TestAccountController(unittest.TestCase): # If successful, the GET request is repeated. controller.app.account_autocreate = True self.assert_status_map(controller.GET, - (404, 404, 404), 204) + (404, 404, 404), 200) self.assert_status_map(controller.GET, - (404, 503, 404), 204) + (404, 503, 404), 200) # We always return 503 if no majority between 4xx, 3xx or 2xx found self.assert_status_map(controller.GET, @@ -8846,9 +8846,9 @@ class TestAccountController(unittest.TestCase): (404, 404, 404), 404) controller.app.account_autocreate = True self.assert_status_map(controller.HEAD, - (404, 404, 404), 204) + (404, 404, 404), 200) self.assert_status_map(controller.HEAD, - (500, 404, 404), 204) + (500, 404, 404), 200) # We always return 503 if no majority between 4xx, 3xx or 2xx found self.assert_status_map(controller.HEAD, (500, 500, 400), 503) From 747b9d928624a3f44f1f9f0269489597cddc5997 Mon Sep 17 00:00:00 2001 From: Jan Zerebecki Date: Wed, 4 Oct 2017 21:14:03 +0200 Subject: [PATCH 35/43] Fix swift-ring-builder set_weight with >1 device When iterating over the (device, weight) tuples do not carry over the device from the previous iteration. Closes-Bug: 1454433 Change-Id: Iba82519b0b2bc80e2c1abbed308b651c4da4b06a --- swift/cli/ringbuilder.py | 5 ++--- test/unit/cli/test_ringbuilder.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index a3a3570cbd..1b9cb05577 100644 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -218,7 +218,6 @@ def _parse_set_weight_values(argvish): # --options format, # but not both. If both are specified, raise an error. try: - devs = [] if not new_cmd_format: if len(args) % 2 != 0: print(Commands.set_weight.__doc__.strip()) @@ -227,7 +226,7 @@ def _parse_set_weight_values(argvish): devs_and_weights = izip(islice(argvish, 0, len(argvish), 2), islice(argvish, 1, len(argvish), 2)) for devstr, weightstr in devs_and_weights: - devs.extend(builder.search_devs( + devs = (builder.search_devs( parse_search_value(devstr)) or []) weight = float(weightstr) _set_weight_values(devs, weight, opts) @@ -236,7 +235,7 @@ def _parse_set_weight_values(argvish): print(Commands.set_weight.__doc__.strip()) exit(EXIT_ERROR) - devs.extend(builder.search_devs( + devs = (builder.search_devs( parse_search_values_from_opts(opts)) or []) weight = float(args[0]) _set_weight_values(devs, weight, opts) diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index cc485ee3a5..5cb6c0ec7c 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -804,6 +804,21 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ring.rebalance() self.assertTrue(ring.validate()) + def test_set_weight_old_format_two_devices(self): + # Would block without the 'yes' argument + self.create_sample_ring() + argv = ["", self.tmpfile, "set_weight", + "d2", "3.14", "d1", "6.28", "--yes"] + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + + ring = RingBuilder.load(self.tmpfile) + # Check that weight was changed + self.assertEqual(ring.devs[2]['weight'], 3.14) + self.assertEqual(ring.devs[1]['weight'], 6.28) + # Check that other devices in ring are not affected + self.assertEqual(ring.devs[0]['weight'], 100) + self.assertEqual(ring.devs[3]['weight'], 100) + def test_set_weight_ipv4_old_format(self): self.create_sample_ring() # Test ipv4(old format) From 816331155c624c444ed123bcab412821bd7854fb Mon Sep 17 00:00:00 2001 From: HCLTech-SSW Date: Fri, 6 Oct 2017 01:37:34 -0700 Subject: [PATCH 36/43] Added the man page for container-sync-realms.conf Updated the comments of reviewers. Change-Id: I0760ce149e6d74f2b3f1badebac3e36da1ab7e77 Closes-Bug: #1607026 --- doc/manpages/container-sync-realms.conf.5 | 119 ++++++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 doc/manpages/container-sync-realms.conf.5 diff --git a/doc/manpages/container-sync-realms.conf.5 b/doc/manpages/container-sync-realms.conf.5 new file mode 100644 index 0000000000..2343027493 --- /dev/null +++ b/doc/manpages/container-sync-realms.conf.5 @@ -0,0 +1,119 @@ +.\" +.\" Author: HCLTech-SSW +.\" Copyright (c) 2010-2017 OpenStack Foundation. +.\" +.\" Licensed under the Apache License, Version 2.0 (the "License"); +.\" you may not use this file except in compliance with the License. +.\" You may obtain a copy of the License at +.\" +.\" http://www.apache.org/licenses/LICENSE-2.0 +.\" +.\" Unless required by applicable law or agreed to in writing, software +.\" distributed under the License is distributed on an "AS IS" BASIS, +.\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +.\" implied. +.\" See the License for the specific language governing permissions and +.\" limitations under the License. +.\" +.TH container-sync-realms.conf 5 "10/09/2017" "Linux" "OpenStack Swift" + +.SH NAME +.LP +.B container-sync-realms.conf +\- configuration file for the OpenStack Swift container sync realms + + + +.SH SYNOPSIS +.LP +.B container-sync-realms.conf + + + +.SH DESCRIPTION +.PP +This is the configuration file used by the Object storage Swift to perform container to container +synchronization. This configuration file is used to configure clusters to allow/accept sync +requests to/from other clusters. Using this configuration file, the user specifies where +to sync their container to along with a secret synchronization key. + +You can find more information about container to container synchronization at +\fIhttps://docs.openstack.org/swift/latest/overview_container_sync.html\fR + +The configuration file follows the python-pastedeploy syntax. The file is divided +into sections, which are enclosed by square brackets. Each section will contain a +certain number of key/value parameters which are described later. + +Any line that begins with a '#' symbol is ignored. + +You can find more information about python-pastedeploy configuration format at +\fIhttp://pythonpaste.org/deploy/#config-format\fR + + + +.SH GLOBAL SECTION +.PD 1 +.RS 0 +This is indicated by section named [DEFAULT]. Below are the parameters that +are acceptable within this section. + +.IP "\fBmtime_check_interval\fR" +The number of seconds between checking the modified time of this config file for changes +and therefore reloading it. The default value is 300. +.RE +.PD + + + +.SH REALM SECTIONS +.PD 1 +.RS 0 +Each section name is the name of a sync realm, for example [realm1]. +A sync realm is a set of clusters that have agreed to allow container syncing with each other. +Realm names will be considered case insensitive. Below are the parameters that are acceptable +within this section. + +.IP "\fBcluster_clustername1\fR" +Any values in the realm section whose name begin with cluster_ will indicate the name and +endpoint of a cluster and will be used by external users in their container's +X-Container-Sync-To metadata header values with the format as "realm_name/cluster_name/container_name". +The Realm and cluster names are considered to be case insensitive. +.IP "\fBcluster_clustername2\fR" +Any values in the realm section whose name begin with cluster_ will indicate the name and +endpoint of a cluster and will be used by external users in their container's +X-Container-Sync-To metadata header values with the format as "realm_name/cluster_name/container_name". +The Realm and cluster names are considered to be case insensitive. + +The endpoint is what the container sync daemon will use when sending out +requests to that cluster. Keep in mind this endpoint must be reachable by all +container servers, since that is where the container sync daemon runs. Note +that the endpoint ends with /v1/ and that the container sync daemon will then +add the account/container/obj name after that. + +.IP "\fBkey\fR" +The key is the overall cluster-to-cluster key used in combination with the external +users' key that they set on their containers' X-Container-Sync-Key metadata header +values. These keys will be used to sign each request the container sync daemon makes +and used to validate each incoming container sync request. +.IP "\fBkey2\fR" +The key2 is optional and is an additional key incoming requests will be checked +against. This is so you can rotate keys if you wish; you move the existing +key to key2 and make a new key value. +.RE +.PD + + + + +.SH DOCUMENTATION +.LP +More in depth documentation in regards to +.BI swift-container-sync +and also about OpenStack Swift as a whole can be found at +.BI https://docs.openstack.org/swift/latest/overview_container_sync.html +and +.BI https://docs.openstack.org/swift/latest/ + + +.SH "SEE ALSO" +.BR swift-container-sync(1) From 8b7f15223cde4c19fd9cbbd97e8ad79a1b4afa8d Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Mon, 9 Oct 2017 10:06:19 +0100 Subject: [PATCH 37/43] Add example to container-sync-realms.conf.5 man page Related-Change: I0760ce149e6d74f2b3f1badebac3e36da1ab7e77 Change-Id: I129de42f91d7924c7bcb9952f17fe8a1a10ae219 --- doc/manpages/container-sync-realms.conf.5 | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/doc/manpages/container-sync-realms.conf.5 b/doc/manpages/container-sync-realms.conf.5 index 2343027493..6602615aae 100644 --- a/doc/manpages/container-sync-realms.conf.5 +++ b/doc/manpages/container-sync-realms.conf.5 @@ -102,8 +102,27 @@ key to key2 and make a new key value. .RE .PD +.SH EXAMPLE +.nf +.RS 0 +[DEFAULT] +mtime_check_interval = 300 +[realm1] +key = realm1key +key2 = realm1key2 +cluster_clustername1 = https://host1/v1/ +cluster_clustername2 = https://host2/v1/ + +[realm2] +key = realm2key +key2 = realm2key2 +cluster_clustername3 = https://host3/v1/ +cluster_clustername4 = https://host4/v1/ +.RE +.fi + .SH DOCUMENTATION .LP From a4a5494fd2fe8a43a5d50a21a1951266cc7c4212 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Mon, 9 Oct 2017 11:33:28 +0100 Subject: [PATCH 38/43] test account autocreate listing format Related-Change: Id3ce37aa0402e2d8dd5784ce329d7cb4fbaf700d Change-Id: I50c22225bbebff71600bea9158bda1edd18b48b0 --- test/unit/proxy/test_server.py | 37 +++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 12e96c8c9f..5c84293409 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -7636,8 +7636,7 @@ class TestContainerController(unittest.TestCase): # return 404 (as account is not found) and don't cache container test_status_map((404, 404, 404), 404, None, 404) - # cache a 204 for the account because it's sort of like it - # exists + # cache a 200 for the account because it appears to be created self.app.account_autocreate = True test_status_map((404, 404, 404), 404, None, 200) @@ -8809,15 +8808,39 @@ class TestAccountController(unittest.TestCase): # ALL nodes are asked to create the account # If successful, the GET request is repeated. controller.app.account_autocreate = True - self.assert_status_map(controller.GET, - (404, 404, 404), 200) - self.assert_status_map(controller.GET, - (404, 503, 404), 200) - + expected = 200 + self.assert_status_map(controller.GET, (404, 404, 404), expected) + self.assert_status_map(controller.GET, (404, 503, 404), expected) # We always return 503 if no majority between 4xx, 3xx or 2xx found self.assert_status_map(controller.GET, (500, 500, 400), 503) + def _check_autocreate_listing_with_query_string(self, query_string): + controller = proxy_server.AccountController(self.app, 'a') + controller.app.account_autocreate = True + statuses = (404, 404, 404) + expected = 200 + # get the response to check it has json content + with save_globals(): + set_http_connect(*statuses) + req = Request.blank('/v1/a' + query_string) + self.app.update_request(req) + res = controller.GET(req) + headers = res.headers + self.assertEqual( + 'yes', headers.get('X-Backend-Fake-Account-Listing')) + self.assertEqual( + 'application/json; charset=utf-8', + headers.get('Content-Type')) + self.assertEqual([], json.loads(res.body)) + self.assertEqual(res.status_int, expected) + + def test_auto_create_account_listing_response_is_json(self): + self._check_autocreate_listing_with_query_string('') + self._check_autocreate_listing_with_query_string('?format=plain') + self._check_autocreate_listing_with_query_string('?format=json') + self._check_autocreate_listing_with_query_string('?format=xml') + def test_HEAD(self): # Same behaviour as GET with save_globals(): From 94bac4ab2fe65104d602378e8e49c37b8187a75d Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 12 May 2017 10:55:21 -0400 Subject: [PATCH 39/43] domain_remap: stop mangling client-provided paths The root_path option for domain_remap seems to serve two purposes: - provide the first component (version) for the backend request - be an optional leading component for the client request, which should be stripped off As a result, we have mappings like: c.a.example.com/v1/o -> /v1/AUTH_a/c/o instead of c.a.example.com/v1/o -> /v1/AUTH_a/c/v1/o which is rather bizarre. Why on earth did we *ever* start doing this? Now, this second behavior is managed by a config option (mangle_client_paths) with the default being to disable it. Upgrade Consideration ===================== If for some reason you *do* want to drop some parts of the client-supplied path, add mangle_client_paths = True to the [filter:domain_remap] section of your proxy-server.conf. Do this before upgrading to avoid any loss of availability. UpgradeImpact Change-Id: I87944bfbf8b767e1fc36dbc7910305fa1f11eeed --- swift/common/middleware/domain_remap.py | 8 +- .../common/middleware/test_domain_remap.py | 80 +++++++++++++++---- 2 files changed, 72 insertions(+), 16 deletions(-) diff --git a/swift/common/middleware/domain_remap.py b/swift/common/middleware/domain_remap.py index 34e01da531..a8f47650b3 100644 --- a/swift/common/middleware/domain_remap.py +++ b/swift/common/middleware/domain_remap.py @@ -100,7 +100,8 @@ storage end points as sync destinations. from swift.common.middleware import RewriteContext from swift.common.swob import Request, HTTPBadRequest -from swift.common.utils import list_from_csv, register_swift_info +from swift.common.utils import config_true_value, list_from_csv, \ + register_swift_info class _DomainRemapContext(RewriteContext): @@ -132,6 +133,8 @@ class DomainRemapMiddleware(object): self.reseller_prefixes_lower = [x.lower() for x in self.reseller_prefixes] self.default_reseller_prefix = conf.get('default_reseller_prefix') + self.mangle_client_paths = config_true_value( + conf.get('mangle_client_paths')) def __call__(self, env, start_response): if not self.storage_domain: @@ -182,7 +185,8 @@ class DomainRemapMiddleware(object): new_path_parts = ['', self.path_root[:-1], account] if container: new_path_parts.append(container) - if (path + '/').startswith(self.path_root): + if self.mangle_client_paths and (path + '/').startswith( + self.path_root): path = path[len(self.path_root):] new_path_parts.append(path) new_path = '/'.join(new_path_parts) diff --git a/test/unit/common/middleware/test_domain_remap.py b/test/unit/common/middleware/test_domain_remap.py index d304711a13..63a0b8aa96 100644 --- a/test/unit/common/middleware/test_domain_remap.py +++ b/test/unit/common/middleware/test_domain_remap.py @@ -85,17 +85,17 @@ class TestDomainRemap(unittest.TestCase): resp = self.app(req.environ, start_response) self.assertEqual(resp, ['Bad domain in host header']) - def test_domain_remap_account_with_path_root(self): + def test_domain_remap_account_with_path_root_container(self): req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'}, headers={'Host': 'AUTH_a.example.com'}) resp = self.app(req.environ, start_response) - self.assertEqual(resp, ['/v1/AUTH_a/']) + self.assertEqual(resp, ['/v1/AUTH_a/v1']) - def test_domain_remap_account_container_with_path_root(self): + def test_domain_remap_account_container_with_path_root_obj(self): req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'}, headers={'Host': 'c.AUTH_a.example.com'}) resp = self.app(req.environ, start_response) - self.assertEqual(resp, ['/v1/AUTH_a/c/']) + self.assertEqual(resp, ['/v1/AUTH_a/c/v1']) def test_domain_remap_account_container_with_path_obj_slash_v1(self): # Include http://localhost because urlparse used in Request.__init__ @@ -111,7 +111,7 @@ class TestDomainRemap(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}, headers={'Host': 'c.AUTH_a.example.com'}) resp = self.app(req.environ, start_response) - self.assertEqual(resp, ['/v1/AUTH_a/c//v1']) + self.assertEqual(resp, ['/v1/AUTH_a/c/v1//v1']) def test_domain_remap_account_container_with_path_trailing_slash(self): req = Request.blank('/obj/', environ={'REQUEST_METHOD': 'GET'}, @@ -129,7 +129,7 @@ class TestDomainRemap(unittest.TestCase): req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'}, headers={'Host': 'c.AUTH_a.example.com'}) resp = self.app(req.environ, start_response) - self.assertEqual(resp, ['/v1/AUTH_a/c/obj']) + self.assertEqual(resp, ['/v1/AUTH_a/c/v1/obj']) def test_domain_remap_with_path_root_and_path_no_slash(self): req = Request.blank('/v1obj', environ={'REQUEST_METHOD': 'GET'}, @@ -255,6 +255,58 @@ class TestDomainRemap(unittest.TestCase): 'http://cont.auth-uuid.example.com/test/') +class TestDomainRemapClientMangling(unittest.TestCase): + def setUp(self): + self.app = domain_remap.DomainRemapMiddleware(FakeApp(), { + 'mangle_client_paths': True}) + + def test_domain_remap_account_with_path_root_container(self): + req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'AUTH_a.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEqual(resp, ['/v1/AUTH_a/']) + + def test_domain_remap_account_container_with_path_root_obj(self): + req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'c.AUTH_a.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEqual(resp, ['/v1/AUTH_a/c/']) + + def test_domain_remap_account_container_with_path_obj_slash_v1(self): + # Include http://localhost because urlparse used in Request.__init__ + # parse //v1 as http://v1 + req = Request.blank('http://localhost//v1', + environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'c.AUTH_a.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEqual(resp, ['/v1/AUTH_a/c//v1']) + + def test_domain_remap_account_container_with_root_path_obj_slash_v1(self): + req = Request.blank('/v1//v1', + environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'c.AUTH_a.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEqual(resp, ['/v1/AUTH_a/c//v1']) + + def test_domain_remap_account_container_with_path_trailing_slash(self): + req = Request.blank('/obj/', environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'c.AUTH_a.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEqual(resp, ['/v1/AUTH_a/c/obj/']) + + def test_domain_remap_account_container_with_path_root_and_path(self): + req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'c.AUTH_a.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEqual(resp, ['/v1/AUTH_a/c/obj']) + + def test_domain_remap_with_path_root_and_path_no_slash(self): + req = Request.blank('/v1obj', environ={'REQUEST_METHOD': 'GET'}, + headers={'Host': 'c.AUTH_a.example.com'}) + resp = self.app(req.environ, start_response) + self.assertEqual(resp, ['/v1/AUTH_a/c/v1obj']) + + class TestSwiftInfo(unittest.TestCase): def setUp(self): utils._swift_info = {} @@ -263,17 +315,17 @@ class TestSwiftInfo(unittest.TestCase): def test_registered_defaults(self): domain_remap.filter_factory({}) swift_info = utils.get_swift_info() - self.assertTrue('domain_remap' in swift_info) - self.assertTrue( - swift_info['domain_remap'].get('default_reseller_prefix') is None) + self.assertIn('domain_remap', swift_info) + self.assertEqual(swift_info['domain_remap'], { + 'default_reseller_prefix': None}) def test_registered_nondefaults(self): - domain_remap.filter_factory({'default_reseller_prefix': 'cupcake'}) + domain_remap.filter_factory({'default_reseller_prefix': 'cupcake', + 'mangle_client_paths': 'yes'}) swift_info = utils.get_swift_info() - self.assertTrue('domain_remap' in swift_info) - self.assertEqual( - swift_info['domain_remap'].get('default_reseller_prefix'), - 'cupcake') + self.assertIn('domain_remap', swift_info) + self.assertEqual(swift_info['domain_remap'], { + 'default_reseller_prefix': 'cupcake'}) if __name__ == '__main__': From 45ca39fc68cdb42b382c1638a92cc8d3cec5529a Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Tue, 10 Oct 2017 11:47:50 -0700 Subject: [PATCH 40/43] add mangle_client_paths to example config Change-Id: Ic1126fc95e8152025fccf25356c253facce3e3ec --- etc/proxy-server.conf-sample | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 2acac1a289..ae53b50c2c 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -558,6 +558,14 @@ use = egg:swift#domain_remap # reseller_prefixes = AUTH # default_reseller_prefix = +# Enable legacy remapping behavior for versioned path requests: +# c.a.example.com/v1/o -> /v1/AUTH_a/c/o +# instead of +# c.a.example.com/v1/o -> /v1/AUTH_a/c/v1/o +# ... by default all path parts after a remapped domain are considered part of +# the object name with no special case for the path "v1" +# mangle_client_paths = False + [filter:catch_errors] use = egg:swift#catch_errors # You can override the default log routing for this filter here: From 407f5394f0f5cb422c06b4e5b2f9fbfdb07782d1 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 12 Oct 2017 08:12:38 +0000 Subject: [PATCH 41/43] Imported Translations from Zanata For more information about this automatic import see: https://docs.openstack.org/i18n/latest/reviewing-translation-import.html Change-Id: I628cb09aa78d8e339b4762a3c9ed8aed43941261 --- .../locale/en_GB/LC_MESSAGES/releasenotes.po | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po new file mode 100644 index 0000000000..e1922ad80f --- /dev/null +++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po @@ -0,0 +1,67 @@ +# Andi Chandler , 2017. #zanata +msgid "" +msgstr "" +"Project-Id-Version: Swift Release Notes 2.15.2\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2017-10-10 22:05+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2017-10-05 03:59+0000\n" +"Last-Translator: Andi Chandler \n" +"Language-Team: English (United Kingdom)\n" +"Language: en-GB\n" +"X-Generator: Zanata 3.9.6\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" + +msgid "2.10.0" +msgstr "2.10.0" + +msgid "2.10.1" +msgstr "2.10.1" + +msgid "2.10.2" +msgstr "2.10.2" + +msgid "2.11.0" +msgstr "2.11.0" + +msgid "2.12.0" +msgstr "2.12.0" + +msgid "2.13.0" +msgstr "2.13.0" + +msgid "2.13.1" +msgstr "2.13.1" + +msgid "2.14.0" +msgstr "2.14.0" + +msgid "2.15.0" +msgstr "2.15.0" + +msgid "2.15.1" +msgstr "2.15.1" + +msgid "" +"A PUT or POST to a container will now update the container's Last-Modified " +"time, and that value will be included in a GET/HEAD response." +msgstr "" +"A PUT or POST to a container will now update the container's Last-Modified " +"time, and that value will be included in a GET/HEAD response." + +msgid "Current (Unreleased) Release Notes" +msgstr "Current (Unreleased) Release Notes" + +msgid "Swift Release Notes" +msgstr "Swift Release Notes" + +msgid "domain_remap now accepts a list of domains in \"storage_domain\"." +msgstr "domain_remap now accepts a list of domains in \"storage_domain\"." + +msgid "name_check and cname_lookup keys have been added to `/info`." +msgstr "name_check and cname_lookup keys have been added to `/info`." + +msgid "swift-recon now respects storage policy aliases." +msgstr "swift-recon now respects storage policy aliases." From 1d67485c0b935719e0c8999eb353dfd84713add6 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Fri, 15 Apr 2016 12:43:44 -0700 Subject: [PATCH 42/43] Move all monkey patching to one function Change-Id: I2db2e53c50bcfa17f08a136581cfd7ac4958ada2 --- swift/common/utils.py | 13 +++++ swift/common/wsgi.py | 7 +-- swift/container/updater.py | 10 ++-- swift/obj/updater.py | 8 +-- test/unit/common/test_wsgi.py | 99 ++++++++++++++++++----------------- 5 files changed, 73 insertions(+), 64 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index 6723b99aac..4a7f907fd8 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -52,6 +52,7 @@ import datetime import eventlet import eventlet.debug import eventlet.greenthread +import eventlet.patcher import eventlet.semaphore from eventlet import GreenPool, sleep, Timeout, tpool from eventlet.green import socket, threading @@ -470,6 +471,18 @@ def config_read_prefixed_options(conf, prefix_name, defaults): return params +def eventlet_monkey_patch(): + """ + Install the appropriate Eventlet monkey patches. + """ + # NOTE(sileht): + # monkey-patching thread is required by python-keystoneclient; + # monkey-patching select is required by oslo.messaging pika driver + # if thread is monkey-patched. + eventlet.patcher.monkey_patch(all=False, socket=True, select=True, + thread=True) + + def noop_libc_function(*args): return 0 diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 72eee0209e..add551c6f8 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -412,12 +412,7 @@ def run_server(conf, logger, sock, global_conf=None): wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) - # NOTE(sileht): - # monkey-patching thread is required by python-keystoneclient; - # monkey-patching select is required by oslo.messaging pika driver - # if thread is monkey-patched. - eventlet.patcher.monkey_patch(all=False, socket=True, select=True, - thread=True) + utils.eventlet_monkey_patch() eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) wsgi_logger = NullLogger() diff --git a/swift/container/updater.py b/swift/container/updater.py index c72acf34e2..ef63997d24 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -23,7 +23,7 @@ from swift import gettext_ as _ from random import random, shuffle from tempfile import mkstemp -from eventlet import spawn, patcher, Timeout +from eventlet import spawn, Timeout import swift.common.db from swift.container.backend import ContainerBroker, DATADIR @@ -31,7 +31,8 @@ from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ConnectionTimeout from swift.common.ring import Ring from swift.common.utils import get_logger, config_true_value, ismount, \ - dump_recon_cache, majority_size, Timestamp, ratelimit_sleep + dump_recon_cache, majority_size, Timestamp, ratelimit_sleep, \ + eventlet_monkey_patch from swift.common.daemon import Daemon from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR @@ -155,8 +156,7 @@ class ContainerUpdater(Daemon): pid2filename[pid] = tmpfilename else: signal.signal(signal.SIGTERM, signal.SIG_DFL) - patcher.monkey_patch(all=False, socket=True, select=True, - thread=True) + eventlet_monkey_patch() self.no_changes = 0 self.successes = 0 self.failures = 0 @@ -190,7 +190,7 @@ class ContainerUpdater(Daemon): """ Run the updater once. """ - patcher.monkey_patch(all=False, socket=True, select=True, thread=True) + eventlet_monkey_patch() self.logger.info(_('Begin container update single threaded sweep')) begin = time.time() self.no_changes = 0 diff --git a/swift/obj/updater.py b/swift/obj/updater.py index 6edd35a9b8..1013617615 100644 --- a/swift/obj/updater.py +++ b/swift/obj/updater.py @@ -21,13 +21,14 @@ import time from swift import gettext_ as _ from random import random -from eventlet import spawn, patcher, Timeout +from eventlet import spawn, Timeout from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ConnectionTimeout from swift.common.ring import Ring from swift.common.utils import get_logger, renamer, write_pickle, \ - dump_recon_cache, config_true_value, ismount, ratelimit_sleep + dump_recon_cache, config_true_value, ismount, ratelimit_sleep, \ + eventlet_monkey_patch from swift.common.daemon import Daemon from swift.common.header_key_dict import HeaderKeyDict from swift.common.storage_policy import split_policy_string, PolicyError @@ -106,8 +107,7 @@ class ObjectUpdater(Daemon): pids.append(pid) else: signal.signal(signal.SIGTERM, signal.SIG_DFL) - patcher.monkey_patch(all=False, socket=True, select=True, - thread=True) + eventlet_monkey_patch() self.successes = 0 self.failures = 0 forkbegin = time.time() diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index 3d7d772ca9..70abfb8152 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -384,23 +384,24 @@ class TestWSGI(unittest.TestCase): f.write(contents.replace('TEMPDIR', t)) _fake_rings(t) with mock.patch('swift.proxy.server.Application.' - 'modify_wsgi_pipeline'): - with mock.patch('swift.common.wsgi.wsgi') as _wsgi: - with mock.patch('swift.common.wsgi.eventlet') as _eventlet: - with mock.patch('swift.common.wsgi.inspect'): - conf = wsgi.appconfig(conf_file) - logger = logging.getLogger('test') - sock = listen_zero() - wsgi.run_server(conf, logger, sock) + 'modify_wsgi_pipeline'), \ + mock.patch('swift.common.wsgi.wsgi') as _wsgi, \ + mock.patch('swift.common.wsgi.eventlet') as _wsgi_evt, \ + mock.patch('swift.common.utils.eventlet') as _utils_evt, \ + mock.patch('swift.common.wsgi.inspect'): + conf = wsgi.appconfig(conf_file) + logger = logging.getLogger('test') + sock = listen_zero() + wsgi.run_server(conf, logger, sock) self.assertEqual('HTTP/1.0', _wsgi.HttpProtocol.default_request_version) self.assertEqual(30, _wsgi.WRITE_TIMEOUT) - _eventlet.hubs.use_hub.assert_called_with(utils.get_hub()) - _eventlet.patcher.monkey_patch.assert_called_with(all=False, - socket=True, - select=True, - thread=True) - _eventlet.debug.hub_exceptions.assert_called_with(False) + _wsgi_evt.hubs.use_hub.assert_called_with(utils.get_hub()) + _utils_evt.patcher.monkey_patch.assert_called_with(all=False, + socket=True, + select=True, + thread=True) + _wsgi_evt.debug.hub_exceptions.assert_called_with(False) self.assertTrue(_wsgi.server.called) args, kwargs = _wsgi.server.call_args server_sock, server_app, server_logger = args @@ -470,29 +471,28 @@ class TestWSGI(unittest.TestCase): f.write('[DEFAULT]\nswift_dir = %s' % conf_root) _fake_rings(conf_root) with mock.patch('swift.proxy.server.Application.' - 'modify_wsgi_pipeline'): - with mock.patch('swift.common.wsgi.wsgi') as _wsgi: - with mock.patch('swift.common.wsgi.eventlet') as _eventlet: - with mock.patch.dict('os.environ', {'TZ': ''}): - with mock.patch('swift.common.wsgi.inspect'): - with mock.patch('time.tzset') as mock_tzset: - conf = wsgi.appconfig(conf_dir) - logger = logging.getLogger('test') - sock = listen_zero() - wsgi.run_server(conf, logger, sock) - self.assertEqual(os.environ['TZ'], 'UTC+0') - self.assertEqual(mock_tzset.mock_calls, - [mock.call()]) + 'modify_wsgi_pipeline'), \ + mock.patch('swift.common.wsgi.wsgi') as _wsgi, \ + mock.patch('swift.common.wsgi.eventlet') as _wsgi_evt, \ + mock.patch('swift.common.utils.eventlet') as _utils_evt, \ + mock.patch.dict('os.environ', {'TZ': ''}), \ + mock.patch('swift.common.wsgi.inspect'), \ + mock.patch('time.tzset'): + conf = wsgi.appconfig(conf_dir) + logger = logging.getLogger('test') + sock = listen_zero() + wsgi.run_server(conf, logger, sock) + self.assertTrue(os.environ['TZ'] is not '') self.assertEqual('HTTP/1.0', _wsgi.HttpProtocol.default_request_version) self.assertEqual(30, _wsgi.WRITE_TIMEOUT) - _eventlet.hubs.use_hub.assert_called_with(utils.get_hub()) - _eventlet.patcher.monkey_patch.assert_called_with(all=False, - socket=True, - select=True, - thread=True) - _eventlet.debug.hub_exceptions.assert_called_with(False) + _wsgi_evt.hubs.use_hub.assert_called_with(utils.get_hub()) + _utils_evt.patcher.monkey_patch.assert_called_with(all=False, + socket=True, + select=True, + thread=True) + _wsgi_evt.debug.hub_exceptions.assert_called_with(False) self.assertTrue(_wsgi.server.called) args, kwargs = _wsgi.server.call_args server_sock, server_app, server_logger = args @@ -527,25 +527,26 @@ class TestWSGI(unittest.TestCase): f.write(contents.replace('TEMPDIR', t)) _fake_rings(t) with mock.patch('swift.proxy.server.Application.' - 'modify_wsgi_pipeline'): - with mock.patch('swift.common.wsgi.wsgi') as _wsgi: - mock_server = _wsgi.server - _wsgi.server = lambda *args, **kwargs: mock_server( - *args, **kwargs) - with mock.patch('swift.common.wsgi.eventlet') as _eventlet: - conf = wsgi.appconfig(conf_file) - logger = logging.getLogger('test') - sock = listen_zero() - wsgi.run_server(conf, logger, sock) + 'modify_wsgi_pipeline'), \ + mock.patch('swift.common.wsgi.wsgi') as _wsgi, \ + mock.patch('swift.common.utils.eventlet') as _utils_evt, \ + mock.patch('swift.common.wsgi.eventlet') as _wsgi_evt: + mock_server = _wsgi.server + _wsgi.server = lambda *args, **kwargs: mock_server( + *args, **kwargs) + conf = wsgi.appconfig(conf_file) + logger = logging.getLogger('test') + sock = listen_zero() + wsgi.run_server(conf, logger, sock) self.assertEqual('HTTP/1.0', _wsgi.HttpProtocol.default_request_version) self.assertEqual(30, _wsgi.WRITE_TIMEOUT) - _eventlet.hubs.use_hub.assert_called_with(utils.get_hub()) - _eventlet.patcher.monkey_patch.assert_called_with(all=False, - socket=True, - select=True, - thread=True) - _eventlet.debug.hub_exceptions.assert_called_with(True) + _wsgi_evt.hubs.use_hub.assert_called_with(utils.get_hub()) + _utils_evt.patcher.monkey_patch.assert_called_with(all=False, + socket=True, + select=True, + thread=True) + _wsgi_evt.debug.hub_exceptions.assert_called_with(True) self.assertTrue(mock_server.called) args, kwargs = mock_server.call_args server_sock, server_app, server_logger = args From 24188beb81d39790034fa0902246163a7bf54c91 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Thu, 12 Oct 2017 16:13:25 -0700 Subject: [PATCH 43/43] Remove some leftover threadpool cruft. Change-Id: I43a1a428bd96a2e18aac334c03743a9f94f7d3e1 --- swift/common/utils.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index 6723b99aac..2def08bf78 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -77,12 +77,6 @@ from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \ from swift.common.header_key_dict import HeaderKeyDict from swift.common.linkat import linkat -if six.PY3: - stdlib_queue = eventlet.patcher.original('queue') -else: - stdlib_queue = eventlet.patcher.original('Queue') -stdlib_threading = eventlet.patcher.original('threading') - # logging doesn't import patched as cleanly as one would like from logging.handlers import SysLogHandler import logging