From b72141979f8766f411d2fea8fe8856b157bc9b91 Mon Sep 17 00:00:00 2001 From: Yatin Kumbhare Date: Mon, 14 Dec 2015 17:04:34 +0530 Subject: [PATCH 001/156] Fixed inconsistencies in docstrings Fixed :returns:, and few one-line comments as per the docstring guide lines. http://docs.openstack.org/developer/hacking/#docstrings Change-Id: I36ecf4faf5b49e070c13eb6324841ebcf442524f --- swift/common/constraints.py | 12 ++++++------ swift/common/internal_client.py | 29 ++++++++++++----------------- swift/common/manager.py | 2 +- swift/common/utils.py | 7 +++---- swift/common/wsgi.py | 4 ++-- 5 files changed, 24 insertions(+), 30 deletions(-) diff --git a/swift/common/constraints.py b/swift/common/constraints.py index 451e7458bf..985eefe019 100644 --- a/swift/common/constraints.py +++ b/swift/common/constraints.py @@ -174,12 +174,12 @@ def check_object_creation(req, object_name): :param req: HTTP request object :param object_name: name of object to be created - :returns HTTPRequestEntityTooLarge: the object is too large - :returns HTTPLengthRequired: missing content-length header and not - a chunked request - :returns HTTPBadRequest: missing or bad content-type header, or - bad metadata - :returns HTTPNotImplemented: unsupported transfer-encoding header value + :returns: HTTPRequestEntityTooLarge -- the object is too large + :returns: HTTPLengthRequired -- missing content-length header and not + a chunked request + :returns: HTTPBadRequest -- missing or bad content-type header, or + bad metadata + :returns: HTTPNotImplemented -- unsupported transfer-encoding header value """ try: ml = req.message_length() diff --git a/swift/common/internal_client.py b/swift/common/internal_client.py index 7dceda8427..7d08a69d27 100644 --- a/swift/common/internal_client.py +++ b/swift/common/internal_client.py @@ -152,8 +152,7 @@ class InternalClient(object): def make_request( self, method, path, headers, acceptable_statuses, body_file=None): - """ - Makes a request to Swift with retries. + """Makes a request to Swift with retries. :param method: HTTP method of request. :param path: Path of request. @@ -162,7 +161,7 @@ class InternalClient(object): :param body_file: Body file to be passed along with request, defaults to None. - :returns : Response object on success. + :returns: Response object on success. :raises UnexpectedResponse: Exception raised when make_request() fails to get a response with an acceptable status @@ -212,8 +211,8 @@ class InternalClient(object): defaults to (2,). :param headers: extra headers to send - :returns : A dict of metadata with metadata_prefix stripped from keys. - Keys will be lowercase. + :returns: A dict of metadata with metadata_prefix stripped from keys. + Keys will be lowercase. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status @@ -366,8 +365,7 @@ class InternalClient(object): def get_account_metadata( self, account, metadata_prefix='', acceptable_statuses=(2,)): - """ - Gets account metadata. + """Gets account metadata. :param account: Account on which to get the metadata. :param metadata_prefix: Used to filter values from the headers @@ -376,7 +374,7 @@ class InternalClient(object): :param acceptable_statuses: List of status for valid responses, defaults to (2,). - :returns : Returns dict of account metadata. Keys will be lowercase. + :returns: Returns dict of account metadata. Keys will be lowercase. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status @@ -417,8 +415,7 @@ class InternalClient(object): # container methods def container_exists(self, account, container): - """ - Checks to see if a container exists. + """Checks to see if a container exists. :param account: The container's account. :param container: Container to check. @@ -428,7 +425,7 @@ class InternalClient(object): :raises Exception: Exception is raised when code fails in an unexpected way. - :returns : True if container exists, false otherwise. + :returns: True if container exists, false otherwise. """ path = self.make_path(account, container) @@ -478,8 +475,7 @@ class InternalClient(object): def get_container_metadata( self, account, container, metadata_prefix='', acceptable_statuses=(2,)): - """ - Gets container metadata. + """Gets container metadata. :param account: The container's account. :param container: Container to get metadata on. @@ -489,7 +485,7 @@ class InternalClient(object): :param acceptable_statuses: List of status for valid responses, defaults to (2,). - :returns : Returns dict of container metadata. Keys will be lowercase. + :returns: Returns dict of container metadata. Keys will be lowercase. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status @@ -579,8 +575,7 @@ class InternalClient(object): def get_object_metadata( self, account, container, obj, metadata_prefix='', acceptable_statuses=(2,), headers=None): - """ - Gets object metadata. + """Gets object metadata. :param account: The object's account. :param container: The object's container. @@ -592,7 +587,7 @@ class InternalClient(object): defaults to (2,). :param headers: extra headers to send with request - :returns : Dict of object metadata. + :returns: Dict of object metadata. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status diff --git a/swift/common/manager.py b/swift/common/manager.py index 03eb0479e9..d4e010caec 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -608,7 +608,7 @@ class Server(object): :param wait: boolean, if true capture stdout with a pipe :param daemon: boolean, if false ask server to log to console - :returns : the pid of the spawned process + :returns: the pid of the spawned process """ args = [self.cmd, conf_file] if once: diff --git a/swift/common/utils.py b/swift/common/utils.py index d6cc5d7afb..ceb8fd7816 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -1629,15 +1629,14 @@ def capture_stdio(logger, **kwargs): def parse_options(parser=None, once=False, test_args=None): - """ - Parse standard swift server/daemon options with optparse.OptionParser. + """Parse standard swift server/daemon options with optparse.OptionParser. :param parser: OptionParser to use. If not sent one will be created. :param once: Boolean indicating the "once" option is available :param test_args: Override sys.argv; used in testing - :returns : Tuple of (config, options); config is an absolute path to the - config file, options is the parser options as a dictionary. + :returns: Tuple of (config, options); config is an absolute path to the + config file, options is the parser options as a dictionary. :raises SystemExit: First arg (CONFIG) is required, file must exist """ diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 97e704228a..725c826b0b 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -168,8 +168,8 @@ def get_socket(conf): :param conf: Configuration dict to read settings from - :returns : a socket object as returned from socket.listen or - ssl.wrap_socket if conf specifies cert_file + :returns: a socket object as returned from socket.listen or + ssl.wrap_socket if conf specifies cert_file """ try: bind_port = int(conf['bind_port']) From efdf123a402b373a2d572e777f535c21241b4bb8 Mon Sep 17 00:00:00 2001 From: Andy McCrae Date: Thu, 12 May 2016 15:46:24 +0100 Subject: [PATCH 002/156] [Docs] Document prevention of disk full scenarios Adds section to detail how to prevent disk full scenarios from occurring. Change-Id: Iafb4a47fa4892f6067252f3a80de87cd76506a40 --- doc/source/admin_guide.rst | 98 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 94 insertions(+), 4 deletions(-) diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index 4f87939c0c..c905bbe596 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -234,6 +234,96 @@ using the format `regex_pattern_X = regex_expression`, where `X` is a number. This script has been tested on Ubuntu 10.04 and Ubuntu 12.04, so if you are using a different distro or OS, some care should be taken before using in production. +------------------------------ +Preventing Disk Full Scenarios +------------------------------ + +Prevent disk full scenarios by ensuring that the ``proxy-server`` blocks PUT +requests and rsync prevents replication to the specific drives. + +You can prevent `proxy-server` PUT requests to low space disks by ensuring +``fallocate_reserve`` is set in the ``object-server.conf``. By default, +``fallocate_reserve`` is set to 1%. This blocks PUT requests that leave the +free disk space below 1% of the disk. + +In order to prevent rsync replication to specific drives, firstly +setup ``rsync_module`` per disk in your ``object-replicator``. +Set this in ``object-server.conf``: + +.. code:: + + [object-replicator] + rsync_module = {replication_ip}::object_{device} + +Set the individual drives in ``rsync.conf``. For example: + +.. code:: + + [object_sda] + max connections = 4 + lock file = /var/lock/object_sda.lock + + [object_sdb] + max connections = 4 + lock file = /var/lock/object_sdb.lock + +Finally, monitor the disk space of each disk and adjust the rsync +``max connections`` per drive to ``-1``. We recommend utilising your existing +monitoring solution to achieve this. The following is an example script: + +.. code-block:: python + + #!/usr/bin/env python + import os + import errno + + RESERVE = 500 * 2 ** 20 # 500 MiB + + DEVICES = '/srv/node1' + + path_template = '/etc/rsync.d/disable_%s.conf' + config_template = ''' + [object_%s] + max connections = -1 + ''' + + def disable_rsync(device): + with open(path_template % device, 'w') as f: + f.write(config_template.lstrip() % device) + + + def enable_rsync(device): + try: + os.unlink(path_template % device) + except OSError as e: + # ignore file does not exist + if e.errno != errno.ENOENT: + raise + + + for device in os.listdir(DEVICES): + path = os.path.join(DEVICES, device) + st = os.statvfs(path) + free = st.f_bavail * st.f_frsize + if free < RESERVE: + disable_rsync(device) + else: + enable_rsync(device) + +For the above script to work, ensure ``/etc/rsync.d/`` conf files are +included, by specifying ``&include`` in your ``rsync.conf`` file: + +.. code:: + + &include /etc/rsync.d + +Use this in conjunction with a cron job to periodically run the script, for example: + +.. code:: + + # /etc/cron.d/devicecheck + * * * * * root /some/path/to/disable_rsync.py + .. _dispersion_report: ----------------- @@ -465,11 +555,11 @@ Example:: Assuming 3 replicas, this configuration will make object PUTs try storing the object's replicas on up to 6 disks ("2 * replicas") in -region 1 ("r1"). Proxy server tries to find 3 devices for storing the -object. While a device is unavailable, it queries the ring for the 4th +region 1 ("r1"). Proxy server tries to find 3 devices for storing the +object. While a device is unavailable, it queries the ring for the 4th device and so on until 6th device. If the 6th disk is still unavailable, -the last replica will be sent to other region. It doesn't mean there'll -have 6 replicas in region 1. +the last replica will be sent to other region. It doesn't mean there'll +have 6 replicas in region 1. You should be aware that, if you have data coming into SF faster than From ef942dadebc038a82d9198b8048244c980cd2c24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20Lis=C3=A1k?= Date: Fri, 13 Nov 2015 13:56:13 +0100 Subject: [PATCH 003/156] Call swift-recon with more than one server type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Examples: * get auditor stats from account and container server swift-recon account container --auditor * perform all checks on all servers swift-recon account container object --all Change-Id: I05bb7c785e6a4d80969d90448f1b9e0fd08dae86 Co-Authored-By: Ondřej Nový --- swift/cli/recon.py | 148 ++++++++++++++++++++++++--------------------- 1 file changed, 78 insertions(+), 70 deletions(-) diff --git a/swift/cli/recon.py b/swift/cli/recon.py index 2936c30fe0..cd96c60c8f 100644 --- a/swift/cli/recon.py +++ b/swift/cli/recon.py @@ -941,7 +941,8 @@ class SwiftRecon(object): """ print("=" * 79) usage = ''' - usage: %prog [-v] [--suppress] [-a] [-r] [-u] [-d] + usage: %prog [ []] + [-v] [--suppress] [-a] [-r] [-u] [-d] [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] [--human-readable] @@ -1009,103 +1010,110 @@ class SwiftRecon(object): 'storage policy (specified as name or index).') options, arguments = args.parse_args() - if len(sys.argv) <= 1 or len(arguments) > 1: + if len(sys.argv) <= 1 or len(arguments) > len(self.check_types): args.print_help() sys.exit(0) if arguments: - if arguments[0] in self.check_types: - self.server_type = arguments[0] + arguments = set(arguments) + if arguments.issubset(self.check_types): + server_types = arguments else: print("Invalid Server Type") args.print_help() sys.exit(1) - else: - self.server_type = 'object' + else: # default + server_types = ['object'] swift_dir = options.swiftdir self.verbose = options.verbose self.suppress_errors = options.suppress self.timeout = options.timeout - ring_names = self._get_ring_names(options.policy) - if not ring_names: - print('Invalid Storage Policy') - args.print_help() - sys.exit(0) - - hosts = self.get_hosts(options.region, options.zone, - swift_dir, ring_names) - - print("--> Starting reconnaissance on %s hosts" % len(hosts)) - print("=" * 79) - - if options.all: - if self.server_type == 'object': - self.async_check(hosts) - self.object_auditor_check(hosts) - self.updater_check(hosts) - self.expirer_check(hosts) - elif self.server_type == 'container': - self.auditor_check(hosts) - self.updater_check(hosts) - elif self.server_type == 'account': - self.auditor_check(hosts) - self.replication_check(hosts) - self.umount_check(hosts) - self.load_check(hosts) - self.disk_usage(hosts, options.top, options.lowest, - options.human_readable) - self.get_ringmd5(hosts, swift_dir) - self.get_swiftconfmd5(hosts) - self.quarantine_check(hosts) - self.socket_usage(hosts) - self.server_type_check(hosts) - self.driveaudit_check(hosts) - self.time_check(hosts) - else: - if options.async: + for server_type in server_types: + self.server_type = server_type + ring_names = self._get_ring_names(options.policy) + if not ring_names: + print('Invalid Storage Policy: %s' % options.policy) + args.print_help() + sys.exit(0) + hosts = self.get_hosts(options.region, options.zone, + swift_dir, ring_names) + print("--> Starting reconnaissance on %s hosts (%s)" % + (len(hosts), self.server_type)) + print("=" * 79) + if options.all: if self.server_type == 'object': self.async_check(hosts) - else: - print("Error: Can't check asyncs on non object servers.") - if options.unmounted: - self.umount_check(hosts) - if options.replication: - self.replication_check(hosts) - if options.auditor: - if self.server_type == 'object': self.object_auditor_check(hosts) - else: - self.auditor_check(hosts) - if options.updater: - if self.server_type == 'account': - print("Error: Can't check updaters on account servers.") - else: self.updater_check(hosts) - if options.expirer: - if self.server_type == 'object': self.expirer_check(hosts) - else: - print("Error: Can't check expired on non object servers.") - if options.validate_servers: - self.server_type_check(hosts) - if options.loadstats: + elif self.server_type == 'container': + self.auditor_check(hosts) + self.updater_check(hosts) + elif self.server_type == 'account': + self.auditor_check(hosts) + self.replication_check(hosts) + self.umount_check(hosts) self.load_check(hosts) - if options.diskusage: self.disk_usage(hosts, options.top, options.lowest, options.human_readable) - if options.md5: self.get_ringmd5(hosts, swift_dir) self.get_swiftconfmd5(hosts) - if options.quarantined: self.quarantine_check(hosts) - if options.sockstat: self.socket_usage(hosts) - if options.driveaudit: + self.server_type_check(hosts) self.driveaudit_check(hosts) - if options.time: self.time_check(hosts) + else: + if options.async: + if self.server_type == 'object': + self.async_check(hosts) + else: + print("Error: Can't check asyncs on non object " + "servers.") + print("=" * 79) + if options.unmounted: + self.umount_check(hosts) + if options.replication: + self.replication_check(hosts) + if options.auditor: + if self.server_type == 'object': + self.object_auditor_check(hosts) + else: + self.auditor_check(hosts) + if options.updater: + if self.server_type == 'account': + print("Error: Can't check updaters on account " + "servers.") + print("=" * 79) + else: + self.updater_check(hosts) + if options.expirer: + if self.server_type == 'object': + self.expirer_check(hosts) + else: + print("Error: Can't check expired on non object " + "servers.") + print("=" * 79) + if options.validate_servers: + self.server_type_check(hosts) + if options.loadstats: + self.load_check(hosts) + if options.diskusage: + self.disk_usage(hosts, options.top, options.lowest, + options.human_readable) + if options.md5: + self.get_ringmd5(hosts, swift_dir) + self.get_swiftconfmd5(hosts) + if options.quarantined: + self.quarantine_check(hosts) + if options.sockstat: + self.socket_usage(hosts) + if options.driveaudit: + self.driveaudit_check(hosts) + if options.time: + self.time_check(hosts) def main(): From 66c77e049dc60ec012fb34a45ef9531f11ea906a Mon Sep 17 00:00:00 2001 From: Sivasathurappan Radhakrishnan Date: Fri, 19 Feb 2016 21:54:12 +0000 Subject: [PATCH 004/156] Added unit test cases for cli/recon.py Added unit test cases to cover code paths of umount_check and async_check function. Change-Id: I236d24b1b22ce244b3fb9546ff31223426edb8ed --- test/unit/cli/test_recon.py | 75 +++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/test/unit/cli/test_recon.py b/test/unit/cli/test_recon.py index b30c624787..0c628ea02f 100644 --- a/test/unit/cli/test_recon.py +++ b/test/unit/cli/test_recon.py @@ -348,6 +348,81 @@ class TestRecon(unittest.TestCase): % ex) self.assertFalse(expected) + def test_async_check(self): + hosts = [('127.0.0.1', 6011), ('127.0.0.1', 6021), + ('127.0.0.1', 6031), ('127.0.0.1', 6041)] + # sample json response from http://:/recon/async + responses = {6011: {'async_pending': 15}, + 6021: {'async_pending': 0}, + 6031: {'async_pending': 257}, + 6041: {'async_pending': 56}} + # + expected = (0, 257, 82.0, 328, 0.0, 0, 4) + + def mock_scout_async(app, host): + url = 'http://%s:%s/recon/async' % host + response = responses[host[1]] + status = 200 + return url, response, status, 0, 0 + + stdout = StringIO() + with mock.patch('swift.cli.recon.Scout.scout', + mock_scout_async), \ + mock.patch('sys.stdout', new=stdout): + self.recon_instance.async_check(hosts) + + output = stdout.getvalue() + r = re.compile("\[async_pending(.*)\](.*)") + lines = output.splitlines() + self.assertTrue(lines) + for line in lines: + m = r.match(line) + if m: + self.assertEqual(m.group(2), + " low: %s, high: %s, avg: %s, total: %s," + " Failed: %s%%, no_result: %s, reported: %s" + % expected) + break + else: + self.fail('The expected line is not found') + + def test_umount_check(self): + hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6020), + ('127.0.0.1', 6030), ('127.0.0.1', 6040)] + # sample json response from http://:/recon/unmounted + responses = {6010: [{'device': 'sdb1', 'mounted': False}], + 6020: [{'device': 'sdb2', 'mounted': False}], + 6030: [{'device': 'sdb3', 'mounted': False}], + 6040: [{'device': 'sdb4', 'mounted': 'bad'}]} + + expected = ['Not mounted: sdb1 on 127.0.0.1:6010', + 'Not mounted: sdb2 on 127.0.0.1:6020', + 'Not mounted: sdb3 on 127.0.0.1:6030', + 'Device errors: sdb4 on 127.0.0.1:6040'] + + def mock_scout_umount(app, host): + url = 'http://%s:%s/recon/unmounted' % host + response = responses[host[1]] + status = 200 + return url, response, status, 0, 0 + + stdout = StringIO() + with mock.patch('swift.cli.recon.Scout.scout', + mock_scout_umount), \ + mock.patch('sys.stdout', new=stdout): + self.recon_instance.umount_check(hosts) + + output = stdout.getvalue() + r = re.compile("\Not mounted:|Device errors: .*") + lines = output.splitlines() + self.assertTrue(lines) + for line in lines: + m = r.match(line) + if m: + self.assertIn(line, expected) + expected.remove(line) + self.assertFalse(expected) + def test_drive_audit_check(self): hosts = [('127.0.0.1', 6010), ('127.0.0.1', 6020), ('127.0.0.1', 6030), ('127.0.0.1', 6040)] From fcb6e4cd3aa6896976733c57c683592358e4c6f0 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Thu, 2 Jul 2015 00:35:02 -0700 Subject: [PATCH 005/156] Last-Modified header support on HEAD/GET container This patch enables to show a x-put-timestamp as a last-modified header in container-server. Note that the last-modified header will be changed only when a request for container (PUT container or POST container) comes into Swift. i.e. some requests for objects (e.g. PUT object, POST object) will never affect the last-modified value but only when using python-swiftclient like as "swift upload", the last-modified will be close to the upload time because python-swiftclient will make a PUT container request for "swift upload" each time. Change-Id: I9971bf90d24eee8921f67c02b7e2c80fd8995623 --- swift/container/server.py | 6 ++++- test/functional/swift_test_client.py | 3 ++- test/functional/tests.py | 39 ++++++++++++++++++++++++++++ test/unit/container/test_server.py | 38 ++++++++++++++++++++++----- test/unit/proxy/test_server.py | 6 +++++ 5 files changed, 84 insertions(+), 8 deletions(-) diff --git a/swift/container/server.py b/swift/container/server.py index a77dadcd22..a531d2fd70 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -17,6 +17,7 @@ import json import os import time import traceback +import math from swift import gettext_ as _ from xml.etree.cElementTree import Element, SubElement, tostring @@ -433,7 +434,9 @@ class ContainerController(BaseStorageServer): if value != '' and (key.lower() in self.save_headers or is_sys_or_user_meta('container', key))) headers['Content-Type'] = out_content_type - return HTTPNoContent(request=req, headers=headers, charset='utf-8') + resp = HTTPNoContent(request=req, headers=headers, charset='utf-8') + resp.last_modified = math.ceil(float(headers['X-PUT-Timestamp'])) + return resp def update_data_record(self, record): """ @@ -530,6 +533,7 @@ class ContainerController(BaseStorageServer): if not container_list: return HTTPNoContent(request=req, headers=resp_headers) ret.body = '\n'.join(rec[0] for rec in container_list) + '\n' + ret.last_modified = math.ceil(float(resp_headers['X-PUT-Timestamp'])) return ret @public diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 3c9bb0b5e2..67a393c9ef 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -613,7 +613,8 @@ class Container(Base): if self.conn.response.status == 204: required_fields = [['bytes_used', 'x-container-bytes-used'], - ['object_count', 'x-container-object-count']] + ['object_count', 'x-container-object-count'], + ['last_modified', 'last-modified']] optional_fields = [ ['versions', 'x-versions-location'], ['tempurl_key', 'x-container-meta-temp-url-key'], diff --git a/test/functional/tests.py b/test/functional/tests.py index d083aa10c2..c43a60c1f3 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -808,6 +808,45 @@ class TestContainer(Base): file_item = cont.file(Utils.create_name()) file_item.write_random() + def testContainerLastModified(self): + container = self.env.account.container(Utils.create_name()) + self.assertTrue(container.create()) + info = container.info() + t0 = info['last_modified'] + # last modified header is in date format which supports in second + # so we need to wait to increment a sec in the header. + eventlet.sleep(1) + + # POST container change last modified timestamp + self.assertTrue( + container.update_metadata({'x-container-meta-japan': 'mitaka'})) + info = container.info() + t1 = info['last_modified'] + self.assertNotEqual(t0, t1) + eventlet.sleep(1) + + # PUT container (overwrite) also change last modified + self.assertTrue(container.create()) + info = container.info() + t2 = info['last_modified'] + self.assertNotEqual(t1, t2) + eventlet.sleep(1) + + # PUT object doesn't change container last modified timestamp + obj = container.file(Utils.create_name()) + self.assertTrue( + obj.write("aaaaa", hdrs={'Content-Type': 'text/plain'})) + info = container.info() + t3 = info['last_modified'] + self.assertEqual(t2, t3) + + # POST object also doesn't change container last modified timestamp + self.assertTrue( + obj.sync_metadata({'us': 'austin'})) + info = container.info() + t4 = info['last_modified'] + self.assertEqual(t2, t4) + class TestContainerUTF8(Base2, TestContainer): set_up = False diff --git a/test/unit/container/test_server.py b/test/unit/container/test_server.py index 706f3c3366..4fc4f32c78 100644 --- a/test/unit/container/test_server.py +++ b/test/unit/container/test_server.py @@ -180,12 +180,7 @@ class TestContainerController(unittest.TestCase): self.assertEqual(response.headers.get('x-container-write'), 'account:user') - def test_HEAD(self): - start = int(time.time()) - ts = (Timestamp(t).internal for t in itertools.count(start)) - req = Request.blank('/sda1/p/a/c', method='PUT', headers={ - 'x-timestamp': next(ts)}) - req.get_response(self.controller) + def _test_head(self, start, ts): req = Request.blank('/sda1/p/a/c', method='HEAD') response = req.get_response(self.controller) self.assertEqual(response.status_int, 204) @@ -213,6 +208,9 @@ class TestContainerController(unittest.TestCase): self.assertTrue(created_at_header >= start) self.assertEqual(response.headers['x-put-timestamp'], Timestamp(start).normal) + self.assertEqual( + response.last_modified.strftime("%a, %d %b %Y %H:%M:%S GMT"), + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(start))) # backend headers self.assertEqual(int(response.headers @@ -227,6 +225,22 @@ class TestContainerController(unittest.TestCase): self.assertEqual(response.headers['x-backend-status-changed-at'], Timestamp(start).internal) + def test_HEAD(self): + start = int(time.time()) + ts = (Timestamp(t).internal for t in itertools.count(start)) + req = Request.blank('/sda1/p/a/c', method='PUT', headers={ + 'x-timestamp': next(ts)}) + req.get_response(self.controller) + self._test_head(Timestamp(start), ts) + + def test_HEAD_timestamp_with_offset(self): + start = int(time.time()) + ts = (Timestamp(t, offset=1).internal for t in itertools.count(start)) + req = Request.blank('/sda1/p/a/c', method='PUT', headers={ + 'x-timestamp': next(ts)}) + req.get_response(self.controller) + self._test_head(Timestamp(start, offset=1), ts) + def test_HEAD_not_found(self): req = Request.blank('/sda1/p/a/c', method='HEAD') resp = req.get_response(self.controller) @@ -241,6 +255,8 @@ class TestContainerController(unittest.TestCase): Timestamp(0).internal) self.assertEqual(resp.headers['x-backend-delete-timestamp'], Timestamp(0).internal) + self.assertIsNone(resp.last_modified) + for header in ('x-container-object-count', 'x-container-bytes-used', 'x-timestamp', 'x-put-timestamp'): self.assertEqual(resp.headers[header], None) @@ -264,6 +280,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/sda1/p/a/c', method=method) resp = req.get_response(self.controller) self.assertEqual(resp.status_int, 404) + self.assertIsNone(resp.last_modified) # backend headers self.assertEqual(int(resp.headers[ 'X-Backend-Storage-Policy-Index']), @@ -2021,6 +2038,9 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) self.assertEqual(resp.content_type, 'application/json') + self.assertEqual( + resp.last_modified.strftime("%a, %d %b %Y %H:%M:%S GMT"), + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(0))) self.assertEqual(json.loads(resp.body), json_body) self.assertEqual(resp.charset, 'utf-8') @@ -2082,6 +2102,9 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) self.assertEqual(resp.content_type, 'text/plain') + self.assertEqual( + resp.last_modified.strftime("%a, %d %b %Y %H:%M:%S GMT"), + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(0))) self.assertEqual(resp.body, plain_body) self.assertEqual(resp.charset, 'utf-8') @@ -2212,6 +2235,9 @@ class TestContainerController(unittest.TestCase): environ={'REQUEST_METHOD': 'GET'}) resp = req.get_response(self.controller) self.assertEqual(resp.content_type, 'application/xml') + self.assertEqual( + resp.last_modified.strftime("%a, %d %b %Y %H:%M:%S GMT"), + time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(0))) self.assertEqual(resp.body, xml_body) self.assertEqual(resp.charset, 'utf-8') diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 7aac742c19..da28804b53 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -6346,6 +6346,9 @@ class TestContainerController(unittest.TestCase): if expected < 400: self.assertIn('x-works', res.headers) self.assertEqual(res.headers['x-works'], 'yes') + if expected < 300: + self.assertIn('last-modified', res.headers) + self.assertEqual(res.headers['last-modified'], '1') if c_expected: self.assertIn('container/a/c', infocache) self.assertEqual( @@ -6371,6 +6374,9 @@ class TestContainerController(unittest.TestCase): if expected < 400: self.assertTrue('x-works' in res.headers) self.assertEqual(res.headers['x-works'], 'yes') + if expected < 300: + self.assertIn('last-modified', res.headers) + self.assertEqual(res.headers['last-modified'], '1') if c_expected: self.assertIn('container/a/c', infocache) self.assertEqual( From 334140a543cb16aa4c7b01b1f4371fc4e982032f Mon Sep 17 00:00:00 2001 From: Mohit Motiani Date: Tue, 7 Jun 2016 19:08:50 +0000 Subject: [PATCH 006/156] Removed whitespaces from swift-ring-builder manpage Change-Id: I25cf69f8d963ba84df4c59129d72ee39ec341bd3 --- doc/manpages/swift-ring-builder.1 | 48 +++++++++++++++---------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/doc/manpages/swift-ring-builder.1 b/doc/manpages/swift-ring-builder.1 index e4c12b6958..4caa04b81e 100644 --- a/doc/manpages/swift-ring-builder.1 +++ b/doc/manpages/swift-ring-builder.1 @@ -14,10 +14,10 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-ring-builder 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-ring-builder \- Openstack-swift ring builder @@ -27,13 +27,13 @@ .B swift-ring-builder <...> -.SH DESCRIPTION +.SH DESCRIPTION .PP -The swift-ring-builder utility is used to create, search and manipulate -the swift storage ring. The ring-builder assigns partitions to devices and +The swift-ring-builder utility is used to create, search and manipulate +the swift storage ring. The ring-builder assigns partitions to devices and writes an optimized Python structure to a gzipped, pickled file on disk for -shipping out to the servers. The server processes just check the modification -time of the file occasionally and reload their in-memory copies of the ring +shipping out to the servers. The server processes just check the modification +time of the file occasionally and reload their in-memory copies of the ring structure as needed. Because of how the ring-builder manages changes to the ring, using a slightly older ring usually just means one of the three replicas for a subset of the partitions will be incorrect, which can be easily worked around. @@ -59,7 +59,7 @@ needs to interact with the rings manually. .SH SEARCH -.PD 0 +.PD 0 .IP "\fB\fR" .RS 5 @@ -81,12 +81,12 @@ needs to interact with the rings manually. .IP "[::1] Matches devices in any zone with the ip ::1" .IP "z1-[::1]:5678 Matches devices in zone 1 with ip ::1 and port 5678" .RE - + Most specific example: .RS 3 -d74z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8" -.RE +d74z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8" +.RE Nerd explanation: @@ -94,7 +94,7 @@ Nerd explanation: .IP "All items require their single character prefix except the ip, in which case the - is optional unless the device id or zone is also included." .RE .RE -.PD +.PD .SH OPTIONS @@ -104,12 +104,12 @@ Assume a yes response to all questions .SH COMMANDS -.PD 0 +.PD 0 .IP "\fB\fR" .RS 5 -Shows information about the ring and the devices within. +Shows information about the ring and the devices within. .RE @@ -143,7 +143,7 @@ the devices matching the search values given. The first column is the assigned partition number and the second column is the number of device matches for that partition. The list is ordered from most number of matches to least. If there are a lot of devices to match against, this command -could take a while to run. +could take a while to run. .RE @@ -198,8 +198,8 @@ Just runs the validation routines on the ring. .IP "\fBwrite_ring\fR" .RS 5 -Just rewrites the distributable ring file. This is done automatically after -a successful rebalance, so really this is only useful after one or more 'set_info' +Just rewrites the distributable ring file. This is done automatically after +a successful rebalance, so really this is only useful after one or more 'set_info' calls when no rebalance is needed but you want to send out the new device information. .RE @@ -208,18 +208,18 @@ calls when no rebalance is needed but you want to send out the new device inform set_min_part_hours set_weight validate write_ring \fBExit codes:\fR 0 = ring changed, 1 = ring did not change, 2 = error -.PD +.PD + - .SH DOCUMENTATION .LP -More in depth documentation about the swift ring and also Openstack-Swift as a -whole can be found at -.BI http://swift.openstack.org/overview_ring.html, -.BI http://swift.openstack.org/admin_guide.html#managing-the-rings -and +More in depth documentation about the swift ring and also Openstack-Swift as a +whole can be found at +.BI http://swift.openstack.org/overview_ring.html, +.BI http://swift.openstack.org/admin_guide.html#managing-the-rings +and .BI http://swift.openstack.org From f6b0b75a25ef970e8bad67f554c82df676e0d172 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Wed, 8 Jun 2016 10:01:39 +0000 Subject: [PATCH 007/156] Make test_ringbuilder less brittle If one has an object.builder file in the current directory and runs test_ringbuilder, it will fail with an irritating error. That's because test_use_ringfile_as_builderfile doesn't use self.tmpfile, but object.builder - and that one might exist in the local directory. This patch changes this, using self.tmpfile as argument name. Closes-Bug: 1590356 Change-Id: I4b3287a36e8a5e469eb037128427dc7867910e53 --- test/unit/cli/test_ringbuilder.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index 955e421ab8..1f2b9494a5 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -1925,14 +1925,18 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): mock_stdout = six.StringIO() mock_stderr = six.StringIO() - argv = ["", "object.ring.gz"] + argv = ["", self.tmpfile, "rebalance", "3"], + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + + argv = ["", "%s.ring.gz" % self.tmpfile] with mock.patch("sys.stdout", mock_stdout): with mock.patch("sys.stderr", mock_stderr): self.assertSystemExit(EXIT_ERROR, ringbuilder.main, argv) - expected = "Note: using object.builder instead of object.ring.gz " \ + expected = "Note: using %s.builder instead of %s.ring.gz " \ "as builder file\n" \ - "Ring Builder file does not exist: object.builder\n" + "Ring Builder file does not exist: %s.builder\n" % ( + self.tmpfile, self.tmpfile, self.tmpfile) self.assertEqual(expected, mock_stdout.getvalue()) def test_main_no_arguments(self): From 7a50972104095478c91a477f5c5499dda4372711 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Thu, 9 Jun 2016 11:22:37 -0700 Subject: [PATCH 008/156] update .gitreview Change-Id: I9593e453891c137fd430a44306e17268ba45fd12 --- .gitreview | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitreview b/.gitreview index d7c52c0593..8dcf603328 100644 --- a/.gitreview +++ b/.gitreview @@ -2,3 +2,4 @@ host=review.openstack.org port=29418 project=openstack/swift.git +defaultbranch=feature/crypto-review From 6c9a1899a182d58232a95af92dae168df9e131d5 Mon Sep 17 00:00:00 2001 From: Mohit Motiani Date: Tue, 31 May 2016 20:53:03 +0000 Subject: [PATCH 009/156] Adds region as a search-value in manpage This patch adds region and example related to it in the man page of swift-ring-builder. Change-Id: I12ceab4e41240240cf2daa77dad94729dd1fd76d --- doc/manpages/swift-ring-builder.1 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/manpages/swift-ring-builder.1 b/doc/manpages/swift-ring-builder.1 index 4caa04b81e..12b15bb3a8 100644 --- a/doc/manpages/swift-ring-builder.1 +++ b/doc/manpages/swift-ring-builder.1 @@ -64,7 +64,7 @@ needs to interact with the rings manually. .IP "\fB\fR" .RS 5 .IP "Can be of the form:" -.IP "dz-:/_" +.IP "drz-:/_" .IP "Any part is optional, but you must include at least one, examples:" @@ -73,6 +73,7 @@ needs to interact with the rings manually. .IP "z1 Matches devices in zone 1" .IP "z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4" .IP "1.2.3.4 Matches devices in any zone with the ip 1.2.3.4" +.IP "r1z1:5678 Matches devices in zone 1 present in region 1 using port 5678" .IP "z1:5678 Matches devices in zone 1 using port 5678" .IP ":5678 Matches devices that use port 5678" .IP "/sdb1 Matches devices with the device name sdb1" From 65a9a6d21b81a000effd911cab5613a3fa6a784e Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Tue, 7 Jun 2016 11:27:56 +0000 Subject: [PATCH 010/156] Add simple multiple server type test Ensures that swift-recon actually gathers data from multiple server types if more than one is given on the command line. Change-Id: I4017b82fb044265ec117df01e14968752df02201 --- test/unit/cli/test_recon.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/test/unit/cli/test_recon.py b/test/unit/cli/test_recon.py index b30c624787..ebec89df7c 100644 --- a/test/unit/cli/test_recon.py +++ b/test/unit/cli/test_recon.py @@ -878,3 +878,27 @@ class TestReconCommands(unittest.TestCase): # We need any_order=True because the order of calls depends on the dict # that is returned from the recon middleware, thus can't rely on it mock_print.assert_has_calls(default_calls, any_order=True) + + @mock.patch('six.moves.builtins.print') + @mock.patch('swift.cli.recon.SwiftRecon.get_hosts') + def test_multiple_server_types(self, mock_get_hosts, mock_print): + mock_get_hosts.return_value = set([('127.0.0.1', 10000)]) + + self.recon.object_auditor_check = mock.MagicMock() + self.recon.auditor_check = mock.MagicMock() + + with mock.patch.object( + sys, 'argv', + ["prog", "account", "container", "object", "--auditor"]): + self.recon.main() + expected_calls = [ + mock.call("--> Starting reconnaissance on 1 hosts (account)"), + mock.call("--> Starting reconnaissance on 1 hosts (container)"), + mock.call("--> Starting reconnaissance on 1 hosts (object)"), + ] + mock_print.assert_has_calls(expected_calls, any_order=True) + + expected = mock.call(set([('127.0.0.1', 10000)])) + self.recon.object_auditor_check.assert_has_calls([expected]) + # Two calls expected - one account, one container + self.recon.auditor_check.assert_has_calls([expected, expected]) From 3944d820387f08372c1a29444f4af7d8e6090ae9 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Tue, 14 Jun 2016 16:10:16 -0700 Subject: [PATCH 011/156] Catch AttributeError less often I always get tripped up when I'm editing code that catches AttributeError and does something with it. I'll type "req.emthod" or something, and next thing I know I'm getting 405s in all my unit tests. This diff removes some places where we catch AttributeError (sometimes, having deliberately thrown it only one line before) so that typos can crash the way Guido intended. Change-Id: I2f7586f96b41a97e6ae254efc83218b3b5c6cc9e --- swift/account/server.py | 8 ++------ swift/common/manager.py | 5 ++--- swift/container/server.py | 8 ++------ swift/obj/server.py | 5 +---- swift/proxy/server.py | 7 +++---- 5 files changed, 10 insertions(+), 23 deletions(-) diff --git a/swift/account/server.py b/swift/account/server.py index 8795844afc..3bfe3fbf49 100644 --- a/swift/account/server.py +++ b/swift/account/server.py @@ -262,14 +262,10 @@ class AccountController(BaseStorageServer): else: try: # disallow methods which are not publicly accessible - try: - if req.method not in self.allowed_methods: - raise AttributeError('Not allowed method.') - except AttributeError: + if req.method not in self.allowed_methods: res = HTTPMethodNotAllowed() else: - method = getattr(self, req.method) - res = method(req) + res = getattr(self, req.method)(req) except HTTPException as error_response: res = error_response except (Exception, Timeout): diff --git a/swift/common/manager.py b/swift/common/manager.py index 123f27d10f..2cc764493c 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -378,9 +378,8 @@ class Manager(object): """ cmd = cmd.lower().replace('-', '_') - try: - f = getattr(self, cmd) - except AttributeError: + f = getattr(self, cmd, None) + if f is None: raise UnknownCommandError(cmd) if not hasattr(f, 'publicly_accessible'): raise UnknownCommandError(cmd) diff --git a/swift/container/server.py b/swift/container/server.py index a77dadcd22..898ef36ea1 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -594,14 +594,10 @@ class ContainerController(BaseStorageServer): else: try: # disallow methods which have not been marked 'public' - try: - if req.method not in self.allowed_methods: - raise AttributeError('Not allowed method.') - except AttributeError: + if req.method not in self.allowed_methods: res = HTTPMethodNotAllowed() else: - method = getattr(self, req.method) - res = method(req) + res = getattr(self, req.method)(req) except HTTPException as error_response: res = error_response except (Exception, Timeout): diff --git a/swift/obj/server.py b/swift/obj/server.py index c3fde72525..b9c8616124 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -1031,10 +1031,7 @@ class ObjectController(BaseStorageServer): else: try: # disallow methods which have not been marked 'public' - try: - if req.method not in self.allowed_methods: - raise AttributeError('Not allowed method.') - except AttributeError: + if req.method not in self.allowed_methods: res = HTTPMethodNotAllowed() else: method = getattr(self, req.method) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 4993c90735..99b99afd54 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -382,10 +382,9 @@ class Application(object): req.headers['x-trans-id'] = req.environ['swift.trans_id'] controller.trans_id = req.environ['swift.trans_id'] self.logger.client_ip = get_remote_client(req) - try: - handler = getattr(controller, req.method) - getattr(handler, 'publicly_accessible') - except AttributeError: + + handler = getattr(controller, req.method, None) + if not getattr(handler, 'publicly_accessible', False): allowed_methods = getattr(controller, 'allowed_methods', set()) return HTTPMethodNotAllowed( request=req, headers={'Allow': ', '.join(allowed_methods)}) From 29b8d2da20bba3782b2182d01350f16e3db44263 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Tue, 14 Jun 2016 00:52:24 -0700 Subject: [PATCH 012/156] Avoid docs warning: Duplicate explicit target name When we add two (or more than) different links as same target name, we need to set two underscore[1] instead of one. This avoids "Duplicate explicit target name" warnings. 1: http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#hyperlink-references Change-Id: I8a493e7a1deeece33ee1b3fb3f5c848f3cc31d06 --- doc/source/howto_installmultinode.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/howto_installmultinode.rst b/doc/source/howto_installmultinode.rst index 0e296237a2..3df24bee0b 100644 --- a/doc/source/howto_installmultinode.rst +++ b/doc/source/howto_installmultinode.rst @@ -10,29 +10,29 @@ Object Storage installation guide for OpenStack Mitaka ------------------------------------------------------ * `openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1 `_ - * `RHEL 7, CentOS 7 `_ - * `Ubuntu 14.04 `_ + * `RHEL 7, CentOS 7 `__ + * `Ubuntu 14.04 `__ Object Storage installation guide for OpenStack Liberty ------------------------------------------------------- - * `openSUSE 13.2 and SUSE Linux Enterprise Server 12 `_ - * `RHEL 7, CentOS 7 `_ - * `Ubuntu 14.04 `_ + * `openSUSE 13.2 and SUSE Linux Enterprise Server 12 `__ + * `RHEL 7, CentOS 7 `__ + * `Ubuntu 14.04 `__ Object Storage installation guide for OpenStack Kilo ---------------------------------------------------- - * `openSUSE 13.2 and SUSE Linux Enterprise Server 12 `_ + * `openSUSE 13.2 and SUSE Linux Enterprise Server 12 `__ * `RHEL 7, CentOS 7, and Fedora 21 `_ - * `Ubuntu 14.04 `_ + * `Ubuntu 14.04 `__ Object Storage installation guide for OpenStack Juno ---------------------------------------------------- * `openSUSE 13.1 and SUSE Linux Enterprise Server 11 `_ * `RHEL 7, CentOS 7, and Fedora 20 `_ - * `Ubuntu 14.04 `_ + * `Ubuntu 14.04 `__ Object Storage installation guide for OpenStack Icehouse -------------------------------------------------------- From 928c4790ebce3782f42d239faa9758941a8dd296 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 7 Jun 2016 13:41:55 +0100 Subject: [PATCH 013/156] Refactor tests and add tests Relocates some test infrastructure in preparation for use with encryption tests, in particular moves the test server setup code from test/unit/proxy/test_server.py to a new helpers.py so that it can be re-used, and adds ability to specify additional config options for the test servers (used in encryption tests). Adds unit test coverage for extract_swift_bytes and functional test coverage for container listings. Adds a check on the content and metadata of reconciled objects in probe tests. Change-Id: I9bfbf4e47cb0eb370e7a74d18c78d67b6b9d6645 --- test/functional/swift_test_client.py | 1 + test/functional/tests.py | 24 ++ test/probe/brain.py | 6 +- .../test_container_merge_policy_index.py | 84 +++--- test/unit/common/middleware/helpers.py | 10 + .../common/middleware/test_proxy_logging.py | 7 +- test/unit/common/test_utils.py | 18 ++ test/unit/helpers.py | 271 ++++++++++++++++++ test/unit/proxy/test_server.py | 226 ++------------- 9 files changed, 393 insertions(+), 254 deletions(-) create mode 100644 test/unit/helpers.py diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 3c9bb0b5e2..98262f5892 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -585,6 +585,7 @@ class Container(Base): file_item['name'] = file_item['name'].encode('utf-8') file_item['content_type'] = file_item['content_type'].\ encode('utf-8') + file_item['bytes'] = int(file_item['bytes']) return files else: content = self.conn.response.read() diff --git a/test/functional/tests.py b/test/functional/tests.py index d083aa10c2..78f1f33be1 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -744,6 +744,30 @@ class TestContainer(Base): for file_item in files: self.assertIn(file_item, self.env.files) + def _testContainerFormattedFileList(self, format_type): + expected = {} + for name in self.env.files: + expected[name] = self.env.container.file(name).info() + + file_list = self.env.container.files(parms={'format': format_type}) + self.assert_status(200) + for actual in file_list: + name = actual['name'] + self.assertIn(name, expected) + self.assertEqual(expected[name]['etag'], actual['hash']) + self.assertEqual( + expected[name]['content_type'], actual['content_type']) + self.assertEqual( + expected[name]['content_length'], actual['bytes']) + expected.pop(name) + self.assertFalse(expected) # sanity check + + def testContainerJsonFileList(self): + self._testContainerFormattedFileList('json') + + def testContainerXmlFileList(self): + self._testContainerFormattedFileList('xml') + def testMarkerLimitFileList(self): for format_type in [None, 'json', 'xml']: for marker in ['0', 'A', 'I', 'R', 'Z', 'a', 'i', 'r', 'z', diff --git a/test/probe/brain.py b/test/probe/brain.py index 9f90ed8d8b..3a63b18565 100644 --- a/test/probe/brain.py +++ b/test/probe/brain.py @@ -164,12 +164,12 @@ class BrainSplitter(object): client.delete_container(self.url, self.token, self.container_name) @command - def put_object(self, headers=None): + def put_object(self, headers=None, contents=None): """ - issue put for zero byte test object + issue put for test object """ client.put_object(self.url, self.token, self.container_name, - self.object_name, headers=headers) + self.object_name, headers=headers, contents=contents) @command def delete_object(self): diff --git a/test/probe/test_container_merge_policy_index.py b/test/probe/test_container_merge_policy_index.py index 829329a7eb..cd60e6dead 100644 --- a/test/probe/test_container_merge_policy_index.py +++ b/test/probe/test_container_merge_policy_index.py @@ -46,6 +46,24 @@ class TestContainerMergePolicyIndex(ReplProbeTest): self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name, 'container') + def _get_object_patiently(self, policy_index): + # use proxy to access object (bad container info might be cached...) + timeout = time.time() + TIMEOUT + while time.time() < timeout: + try: + return client.get_object(self.url, self.token, + self.container_name, + self.object_name) + except ClientException as err: + if err.http_status != HTTP_NOT_FOUND: + raise + time.sleep(1) + else: + self.fail('could not HEAD /%s/%s/%s/ from policy %s ' + 'after %s seconds.' % ( + self.account, self.container_name, self.object_name, + int(policy_index), TIMEOUT)) + def test_merge_storage_policy_index(self): # generic split brain self.brain.stop_primary_half() @@ -53,7 +71,8 @@ class TestContainerMergePolicyIndex(ReplProbeTest): self.brain.start_primary_half() self.brain.stop_handoff_half() self.brain.put_container() - self.brain.put_object() + self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'}, + contents='VERIFY') self.brain.start_handoff_half() # make sure we have some manner of split brain container_part, container_nodes = self.container_ring.get_nodes( @@ -127,24 +146,10 @@ class TestContainerMergePolicyIndex(ReplProbeTest): self.fail('Found /%s/%s/%s in %s' % ( self.account, self.container_name, self.object_name, orig_policy_index)) - # use proxy to access object (bad container info might be cached...) - timeout = time.time() + TIMEOUT - while time.time() < timeout: - try: - metadata = client.head_object(self.url, self.token, - self.container_name, - self.object_name) - except ClientException as err: - if err.http_status != HTTP_NOT_FOUND: - raise - time.sleep(1) - else: - break - else: - self.fail('could not HEAD /%s/%s/%s/ from policy %s ' - 'after %s seconds.' % ( - self.account, self.container_name, self.object_name, - expected_policy_index, TIMEOUT)) + # verify that the object data read by external client is correct + headers, data = self._get_object_patiently(expected_policy_index) + self.assertEqual('VERIFY', data) + self.assertEqual('custom-meta', headers['x-object-meta-test']) def test_reconcile_delete(self): # generic split brain @@ -399,17 +404,18 @@ class TestContainerMergePolicyIndex(ReplProbeTest): self.assertEqual(2, len(old_container_node_ids)) # hopefully memcache still has the new policy cached - self.brain.put_object() + self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'}, + contents='VERIFY') # double-check object correctly written to new policy conf_files = [] for server in Manager(['container-reconciler']).servers: conf_files.extend(server.conf_files()) conf_file = conf_files[0] - client = InternalClient(conf_file, 'probe-test', 3) - client.get_object_metadata( + int_client = InternalClient(conf_file, 'probe-test', 3) + int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) - client.get_object_metadata( + int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) @@ -423,9 +429,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest): tuple(server.once(number=n + 1) for n in old_container_node_ids) # verify entry in the queue for the "misplaced" new_policy - for container in client.iter_containers('.misplaced_objects'): - for obj in client.iter_objects('.misplaced_objects', - container['name']): + for container in int_client.iter_containers('.misplaced_objects'): + for obj in int_client.iter_objects('.misplaced_objects', + container['name']): expected = '%d:/%s/%s/%s' % (new_policy, self.account, self.container_name, self.object_name) @@ -434,12 +440,12 @@ class TestContainerMergePolicyIndex(ReplProbeTest): Manager(['container-reconciler']).once() # verify object in old_policy - client.get_object_metadata( + int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # verify object is *not* in new_policy - client.get_object_metadata( + int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) @@ -447,10 +453,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest): self.get_to_final_state() # verify entry in the queue - client = InternalClient(conf_file, 'probe-test', 3) - for container in client.iter_containers('.misplaced_objects'): - for obj in client.iter_objects('.misplaced_objects', - container['name']): + for container in int_client.iter_containers('.misplaced_objects'): + for obj in int_client.iter_objects('.misplaced_objects', + container['name']): expected = '%d:/%s/%s/%s' % (old_policy, self.account, self.container_name, self.object_name) @@ -459,21 +464,26 @@ class TestContainerMergePolicyIndex(ReplProbeTest): Manager(['container-reconciler']).once() # and now it flops back - client.get_object_metadata( + int_client.get_object_metadata( self.account, self.container_name, self.object_name, headers={'X-Backend-Storage-Policy-Index': int(new_policy)}) - client.get_object_metadata( + int_client.get_object_metadata( self.account, self.container_name, self.object_name, acceptable_statuses=(4,), headers={'X-Backend-Storage-Policy-Index': int(old_policy)}) # make sure the queue is settled self.get_to_final_state() - for container in client.iter_containers('.misplaced_objects'): - for obj in client.iter_objects('.misplaced_objects', - container['name']): + for container in int_client.iter_containers('.misplaced_objects'): + for obj in int_client.iter_objects('.misplaced_objects', + container['name']): self.fail('Found unexpected object %r in the queue' % obj) + # verify that the object data read by external client is correct + headers, data = self._get_object_patiently(int(new_policy)) + self.assertEqual('VERIFY', data) + self.assertEqual('custom-meta', headers['x-object-meta-test']) + if __name__ == "__main__": unittest.main() diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index bcd3c4c2ec..e542818967 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -168,3 +168,13 @@ class FakeSwift(object): def register_responses(self, method, path, responses): self._responses[(method, path)] = list(responses) + + +class FakeAppThatExcepts(object): + MESSAGE = "We take exception to that!" + + def __init__(self, exception_class=Exception): + self.exception_class = exception_class + + def __call__(self, env, start_response): + raise self.exception_class(self.MESSAGE) diff --git a/test/unit/common/middleware/test_proxy_logging.py b/test/unit/common/middleware/test_proxy_logging.py index 19866cb793..2282a9f1b7 100644 --- a/test/unit/common/middleware/test_proxy_logging.py +++ b/test/unit/common/middleware/test_proxy_logging.py @@ -27,6 +27,7 @@ from swift.common.swob import Request, Response from swift.common import constraints from swift.common.storage_policy import StoragePolicy from test.unit import patch_policies +from test.unit.common.middleware.helpers import FakeAppThatExcepts class FakeApp(object): @@ -59,12 +60,6 @@ class FakeApp(object): return self.body -class FakeAppThatExcepts(object): - - def __call__(self, env, start_response): - raise Exception("We take exception to that!") - - class FakeAppNoContentLengthNoTransferEncoding(object): def __init__(self, body=None): diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 14e826c908..446abfc1fa 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -3210,6 +3210,24 @@ cluster_dfw1 = http://dfw1.host/v1/ self.assertEqual(listing_dict['content_type'], 'text/plain;hello="world"') + def test_extract_swift_bytes(self): + scenarios = { + # maps input value -> expected returned tuple + '': ('', None), + 'text/plain': ('text/plain', None), + 'text/plain; other=thing': ('text/plain;other=thing', None), + 'text/plain; swift_bytes=123': ('text/plain', '123'), + 'text/plain; other=thing;swift_bytes=123': + ('text/plain;other=thing', '123'), + 'text/plain; swift_bytes=123; other=thing': + ('text/plain;other=thing', '123'), + 'text/plain; swift_bytes=123; swift_bytes=456': + ('text/plain', '456'), + 'text/plain; swift_bytes=123; other=thing;swift_bytes=456': + ('text/plain;other=thing', '456')} + for test_value, expected in scenarios.items(): + self.assertEqual(expected, utils.extract_swift_bytes(test_value)) + def test_clean_content_type(self): subtests = { '': '', 'text/plain': 'text/plain', diff --git a/test/unit/helpers.py b/test/unit/helpers.py new file mode 100644 index 0000000000..46f4b80b1e --- /dev/null +++ b/test/unit/helpers.py @@ -0,0 +1,271 @@ +# Copyright (c) 2010-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides helper functions for unit tests. + +This cannot be in test/unit/__init__.py because that module is imported by the +py34 unit test job and there are imports here that end up importing modules +that are not yet ported to py34, such wsgi.py which import mimetools. +""" +import os +from contextlib import closing +from gzip import GzipFile +from tempfile import mkdtemp +import time + + +from eventlet import listen, spawn, wsgi +import mock +from shutil import rmtree +import six.moves.cPickle as pickle + +import swift +from swift.account import server as account_server +from swift.common import storage_policy +from swift.common.ring import RingData +from swift.common.storage_policy import StoragePolicy, ECStoragePolicy +from swift.common.middleware import proxy_logging +from swift.common import utils +from swift.common.utils import mkdirs, normalize_timestamp, NullLogger +from swift.container import server as container_server +from swift.obj import server as object_server +from swift.proxy import server as proxy_server +import swift.proxy.controllers.obj + +from test.unit import write_fake_ring, DEFAULT_TEST_EC_TYPE, debug_logger, \ + connect_tcp, readuntil2crlfs + + +def setup_servers(the_object_server=object_server, extra_conf=None): + """ + Setup proxy, account, container and object servers using a set of fake + rings and policies. + + :param the_object_server: The object server module to use (optional, + defaults to swift.obj.server) + :param extra_conf: A dict of config options that will update the basic + config passed to all server instances. + :returns: A dict containing the following entries: + orig_POLICIES: the value of storage_policy.POLICIES prior to + it being patched with fake policies + orig_SysLogHandler: the value of utils.SysLogHandler prior to + it being patched + testdir: root directory used for test files + test_POLICIES: a StoragePolicyCollection of fake policies + test_servers: a tuple of test server instances + test_sockets: a tuple of sockets used by test servers + test_coros: a tuple of greenthreads in which test servers are + running + """ + context = { + "orig_POLICIES": storage_policy._POLICIES, + "orig_SysLogHandler": utils.SysLogHandler} + + utils.HASH_PATH_SUFFIX = 'endcap' + utils.SysLogHandler = mock.MagicMock() + # Since we're starting up a lot here, we're going to test more than + # just chunked puts; we're also going to test parts of + # proxy_server.Application we couldn't get to easily otherwise. + context["testdir"] = _testdir = \ + os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked') + mkdirs(_testdir) + rmtree(_testdir) + for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1', + 'sdf1', 'sdg1', 'sdh1', 'sdi1'): + mkdirs(os.path.join(_testdir, drive, 'tmp')) + conf = {'devices': _testdir, 'swift_dir': _testdir, + 'mount_check': 'false', 'allowed_headers': + 'content-encoding, x-object-manifest, content-disposition, foo', + 'allow_versions': 't'} + if extra_conf: + conf.update(extra_conf) + prolis = listen(('localhost', 0)) + acc1lis = listen(('localhost', 0)) + acc2lis = listen(('localhost', 0)) + con1lis = listen(('localhost', 0)) + con2lis = listen(('localhost', 0)) + obj1lis = listen(('localhost', 0)) + obj2lis = listen(('localhost', 0)) + obj3lis = listen(('localhost', 0)) + objsocks = [obj1lis, obj2lis, obj3lis] + context["test_sockets"] = \ + (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis) + account_ring_path = os.path.join(_testdir, 'account.ring.gz') + account_devs = [ + {'port': acc1lis.getsockname()[1]}, + {'port': acc2lis.getsockname()[1]}, + ] + write_fake_ring(account_ring_path, *account_devs) + container_ring_path = os.path.join(_testdir, 'container.ring.gz') + container_devs = [ + {'port': con1lis.getsockname()[1]}, + {'port': con2lis.getsockname()[1]}, + ] + write_fake_ring(container_ring_path, *container_devs) + storage_policy._POLICIES = storage_policy.StoragePolicyCollection([ + StoragePolicy(0, 'zero', True), + StoragePolicy(1, 'one', False), + StoragePolicy(2, 'two', False), + ECStoragePolicy(3, 'ec', ec_type=DEFAULT_TEST_EC_TYPE, + ec_ndata=2, ec_nparity=1, ec_segment_size=4096)]) + obj_rings = { + 0: ('sda1', 'sdb1'), + 1: ('sdc1', 'sdd1'), + 2: ('sde1', 'sdf1'), + # sdg1, sdh1, sdi1 taken by policy 3 (see below) + } + for policy_index, devices in obj_rings.items(): + policy = storage_policy.POLICIES[policy_index] + obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz') + obj_devs = [ + {'port': objsock.getsockname()[1], 'device': dev} + for objsock, dev in zip(objsocks, devices)] + write_fake_ring(obj_ring_path, *obj_devs) + + # write_fake_ring can't handle a 3-element ring, and the EC policy needs + # at least 3 devs to work with, so we do it manually + devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1', + 'port': obj1lis.getsockname()[1]}, + {'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1', + 'port': obj2lis.getsockname()[1]}, + {'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1', + 'port': obj3lis.getsockname()[1]}] + pol3_replica2part2dev_id = [[0, 1, 2, 0], + [1, 2, 0, 1], + [2, 0, 1, 2]] + obj3_ring_path = os.path.join( + _testdir, storage_policy.POLICIES[3].ring_name + '.ring.gz') + part_shift = 30 + with closing(GzipFile(obj3_ring_path, 'wb')) as fh: + pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh) + + prosrv = proxy_server.Application(conf, logger=debug_logger('proxy')) + for policy in storage_policy.POLICIES: + # make sure all the rings are loaded + prosrv.get_object_ring(policy.idx) + # don't lose this one! + context["test_POLICIES"] = storage_policy._POLICIES + acc1srv = account_server.AccountController( + conf, logger=debug_logger('acct1')) + acc2srv = account_server.AccountController( + conf, logger=debug_logger('acct2')) + con1srv = container_server.ContainerController( + conf, logger=debug_logger('cont1')) + con2srv = container_server.ContainerController( + conf, logger=debug_logger('cont2')) + obj1srv = the_object_server.ObjectController( + conf, logger=debug_logger('obj1')) + obj2srv = the_object_server.ObjectController( + conf, logger=debug_logger('obj2')) + obj3srv = the_object_server.ObjectController( + conf, logger=debug_logger('obj3')) + context["test_servers"] = \ + (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv) + nl = NullLogger() + logging_prosv = proxy_logging.ProxyLoggingMiddleware(prosrv, conf, + logger=prosrv.logger) + prospa = spawn(wsgi.server, prolis, logging_prosv, nl) + acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl) + acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl) + con1spa = spawn(wsgi.server, con1lis, con1srv, nl) + con2spa = spawn(wsgi.server, con2lis, con2srv, nl) + obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl) + obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl) + obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl) + context["test_coros"] = \ + (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa) + # Create account + ts = normalize_timestamp(time.time()) + partition, nodes = prosrv.account_ring.get_nodes('a') + for node in nodes: + conn = swift.proxy.controllers.obj.http_connect(node['ip'], + node['port'], + node['device'], + partition, 'PUT', '/a', + {'X-Timestamp': ts, + 'x-trans-id': 'test'}) + resp = conn.getresponse() + assert(resp.status == 201) + # Create another account + # used for account-to-account tests + ts = normalize_timestamp(time.time()) + partition, nodes = prosrv.account_ring.get_nodes('a1') + for node in nodes: + conn = swift.proxy.controllers.obj.http_connect(node['ip'], + node['port'], + node['device'], + partition, 'PUT', + '/a1', + {'X-Timestamp': ts, + 'x-trans-id': 'test'}) + resp = conn.getresponse() + assert(resp.status == 201) + # Create containers, 1 per test policy + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % ( + exp, headers[:len(exp)]) + # Create container in other account + # used for account-to-account tests + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % ( + exp, headers[:len(exp)]) + + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write( + 'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + assert headers[:len(exp)] == exp, \ + "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)]) + + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write( + 'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n' + 'Content-Length: 0\r\n\r\n') + fd.flush() + headers = readuntil2crlfs(fd) + exp = 'HTTP/1.1 201' + assert headers[:len(exp)] == exp, \ + "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)]) + return context + + +def teardown_servers(context): + for server in context["test_coros"]: + server.kill() + rmtree(os.path.dirname(context["testdir"])) + utils.SysLogHandler = context["orig_SysLogHandler"] + storage_policy._POLICIES = context["orig_POLICIES"] diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 7aac742c19..6ae48bc605 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -20,12 +20,10 @@ import logging import json import math import os -import pickle import sys import traceback import unittest -from contextlib import closing, contextmanager -from gzip import GzipFile +from contextlib import contextmanager from shutil import rmtree import gc import time @@ -55,13 +53,11 @@ from swift.common.utils import hash_path, storage_directory, \ iter_multipart_mime_documents, public from test.unit import ( - connect_tcp, readuntil2crlfs, FakeLogger, fake_http_connect, FakeRing, + connect_tcp, readuntil2crlfs, FakeLogger, FakeRing, fake_http_connect, FakeMemcache, debug_logger, patch_policies, write_fake_ring, mocked_http_conn, DEFAULT_TEST_EC_TYPE) from swift.proxy import server as proxy_server from swift.proxy.controllers.obj import ReplicatedObjectController -from swift.account import server as account_server -from swift.container import server as container_server from swift.obj import server as object_server from swift.common.middleware import proxy_logging, versioned_writes, \ copy @@ -69,8 +65,7 @@ from swift.common.middleware.acl import parse_acl, format_acl from swift.common.exceptions import ChunkReadTimeout, DiskFileNotExist, \ APIVersionError, ChunkWriteTimeout from swift.common import utils, constraints -from swift.common.ring import RingData -from swift.common.utils import mkdirs, normalize_timestamp, NullLogger +from swift.common.utils import mkdirs, NullLogger from swift.common.wsgi import monkey_patch_mimetools, loadapp from swift.proxy.controllers import base as proxy_base from swift.proxy.controllers.base import get_cache_key, cors_validation, \ @@ -80,212 +75,31 @@ import swift.proxy.controllers.obj from swift.common.header_key_dict import HeaderKeyDict from swift.common.swob import Request, Response, HTTPUnauthorized, \ HTTPException, HTTPBadRequest -from swift.common import storage_policy -from swift.common.storage_policy import StoragePolicy, ECStoragePolicy, \ - StoragePolicyCollection, POLICIES +from swift.common.storage_policy import StoragePolicy, POLICIES import swift.common.request_helpers from swift.common.request_helpers import get_sys_meta_prefix +from test.unit.helpers import setup_servers, teardown_servers + # mocks logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) STATIC_TIME = time.time() -_test_coros = _test_servers = _test_sockets = _orig_container_listing_limit = \ - _testdir = _orig_SysLogHandler = _orig_POLICIES = _test_POLICIES = None +_test_context = _test_servers = _test_sockets = _testdir = \ + _test_POLICIES = None -def do_setup(the_object_server): - utils.HASH_PATH_SUFFIX = 'endcap' - global _testdir, _test_servers, _test_sockets, \ - _orig_container_listing_limit, _test_coros, _orig_SysLogHandler, \ - _orig_POLICIES, _test_POLICIES - _orig_POLICIES = storage_policy._POLICIES - _orig_SysLogHandler = utils.SysLogHandler - utils.SysLogHandler = mock.MagicMock() +def do_setup(object_server): + # setup test context and break out some globals for convenience + global _test_context, _testdir, _test_servers, _test_sockets, \ + _test_POLICIES monkey_patch_mimetools() - # Since we're starting up a lot here, we're going to test more than - # just chunked puts; we're also going to test parts of - # proxy_server.Application we couldn't get to easily otherwise. - _testdir = \ - os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked') - mkdirs(_testdir) - rmtree(_testdir) - for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1', - 'sdf1', 'sdg1', 'sdh1', 'sdi1'): - mkdirs(os.path.join(_testdir, drive, 'tmp')) - conf = {'devices': _testdir, 'swift_dir': _testdir, - 'mount_check': 'false', 'allowed_headers': - 'content-encoding, x-object-manifest, content-disposition, foo', - 'allow_versions': 't'} - prolis = listen(('localhost', 0)) - acc1lis = listen(('localhost', 0)) - acc2lis = listen(('localhost', 0)) - con1lis = listen(('localhost', 0)) - con2lis = listen(('localhost', 0)) - obj1lis = listen(('localhost', 0)) - obj2lis = listen(('localhost', 0)) - obj3lis = listen(('localhost', 0)) - objsocks = [obj1lis, obj2lis, obj3lis] - _test_sockets = \ - (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis) - account_ring_path = os.path.join(_testdir, 'account.ring.gz') - account_devs = [ - {'port': acc1lis.getsockname()[1]}, - {'port': acc2lis.getsockname()[1]}, - ] - write_fake_ring(account_ring_path, *account_devs) - container_ring_path = os.path.join(_testdir, 'container.ring.gz') - container_devs = [ - {'port': con1lis.getsockname()[1]}, - {'port': con2lis.getsockname()[1]}, - ] - write_fake_ring(container_ring_path, *container_devs) - storage_policy._POLICIES = StoragePolicyCollection([ - StoragePolicy(0, 'zero', True), - StoragePolicy(1, 'one', False), - StoragePolicy(2, 'two', False), - ECStoragePolicy(3, 'ec', ec_type=DEFAULT_TEST_EC_TYPE, - ec_ndata=2, ec_nparity=1, ec_segment_size=4096)]) - obj_rings = { - 0: ('sda1', 'sdb1'), - 1: ('sdc1', 'sdd1'), - 2: ('sde1', 'sdf1'), - # sdg1, sdh1, sdi1 taken by policy 3 (see below) - } - for policy_index, devices in obj_rings.items(): - policy = POLICIES[policy_index] - obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz') - obj_devs = [ - {'port': objsock.getsockname()[1], 'device': dev} - for objsock, dev in zip(objsocks, devices)] - write_fake_ring(obj_ring_path, *obj_devs) - - # write_fake_ring can't handle a 3-element ring, and the EC policy needs - # at least 3 devs to work with, so we do it manually - devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1', - 'port': obj1lis.getsockname()[1]}, - {'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1', - 'port': obj2lis.getsockname()[1]}, - {'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1', - 'port': obj3lis.getsockname()[1]}] - pol3_replica2part2dev_id = [[0, 1, 2, 0], - [1, 2, 0, 1], - [2, 0, 1, 2]] - obj3_ring_path = os.path.join(_testdir, POLICIES[3].ring_name + '.ring.gz') - part_shift = 30 - with closing(GzipFile(obj3_ring_path, 'wb')) as fh: - pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh) - - prosrv = proxy_server.Application(conf, FakeMemcacheReturnsNone(), - logger=debug_logger('proxy')) - for policy in POLICIES: - # make sure all the rings are loaded - prosrv.get_object_ring(policy.idx) - # don't lose this one! - _test_POLICIES = storage_policy._POLICIES - acc1srv = account_server.AccountController( - conf, logger=debug_logger('acct1')) - acc2srv = account_server.AccountController( - conf, logger=debug_logger('acct2')) - con1srv = container_server.ContainerController( - conf, logger=debug_logger('cont1')) - con2srv = container_server.ContainerController( - conf, logger=debug_logger('cont2')) - obj1srv = the_object_server.ObjectController( - conf, logger=debug_logger('obj1')) - obj2srv = the_object_server.ObjectController( - conf, logger=debug_logger('obj2')) - obj3srv = the_object_server.ObjectController( - conf, logger=debug_logger('obj3')) - _test_servers = \ - (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv) - nl = NullLogger() - logging_prosv = proxy_logging.ProxyLoggingMiddleware(prosrv, conf, - logger=prosrv.logger) - prospa = spawn(wsgi.server, prolis, logging_prosv, nl) - acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl) - acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl) - con1spa = spawn(wsgi.server, con1lis, con1srv, nl) - con2spa = spawn(wsgi.server, con2lis, con2srv, nl) - obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl) - obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl) - obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl) - _test_coros = \ - (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa) - # Create account - ts = normalize_timestamp(time.time()) - partition, nodes = prosrv.account_ring.get_nodes('a') - for node in nodes: - conn = swift.proxy.controllers.obj.http_connect(node['ip'], - node['port'], - node['device'], - partition, 'PUT', '/a', - {'X-Timestamp': ts, - 'x-trans-id': 'test'}) - resp = conn.getresponse() - assert(resp.status == 201) - # Create another account - # used for account-to-account tests - ts = normalize_timestamp(time.time()) - partition, nodes = prosrv.account_ring.get_nodes('a1') - for node in nodes: - conn = swift.proxy.controllers.obj.http_connect(node['ip'], - node['port'], - node['device'], - partition, 'PUT', - '/a1', - {'X-Timestamp': ts, - 'x-trans-id': 'test'}) - resp = conn.getresponse() - assert(resp.status == 201) - # Create containers, 1 per test policy - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % ( - exp, headers[:len(exp)]) - # Create container in other account - # used for account-to-account tests - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % ( - exp, headers[:len(exp)]) - - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write( - 'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - assert headers[:len(exp)] == exp, \ - "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)]) - - sock = connect_tcp(('localhost', prolis.getsockname()[1])) - fd = sock.makefile() - fd.write( - 'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n' - 'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n' - 'Content-Length: 0\r\n\r\n') - fd.flush() - headers = readuntil2crlfs(fd) - exp = 'HTTP/1.1 201' - assert headers[:len(exp)] == exp, \ - "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)]) + _test_context = setup_servers(object_server) + _testdir = _test_context["testdir"] + _test_servers = _test_context["test_servers"] + _test_sockets = _test_context["test_sockets"] + _test_POLICIES = _test_context["test_POLICIES"] def unpatch_policies(f): @@ -308,11 +122,7 @@ def setup(): def teardown(): - for server in _test_coros: - server.kill() - rmtree(os.path.dirname(_testdir)) - utils.SysLogHandler = _orig_SysLogHandler - storage_policy._POLICIES = _orig_POLICIES + teardown_servers(_test_context) def sortHeaderNames(headerNames): From da4a59f8e276e80764b58f9c52d6a3c2bca06782 Mon Sep 17 00:00:00 2001 From: Or Ozeri Date: Thu, 16 Jun 2016 11:14:14 +0300 Subject: [PATCH 014/156] pickle_async_update should create tmp_dir While creating a probe test for the expirer daemon, I found the following error scenario: 1. Introduce a new object server. Initially it doesn't have a tmp_dir. 2. Have the object-replicator replicate some objects, one of them with an expiration (X-Delete-At). 3. Send a DELETE request for the expired object. While beginning to process the DELETE request, the fresh object server still doesn't have a tmp_dir created. Since the object has an old expiration value, the object server will first call "delete_at_update", before creating a tombstone. delete_at_update then must create an async_pending, which will lead to an IO error, since tmp_dir doesn't exist. As said, I have witnessed this in practice in the probe test I wrote at https://review.openstack.org/#/c/326903/. This patch changes pickle_async_update behavior to create tmp_dir, in case it doesn't exist. Change-Id: I88b0e5f75a2a28d6880694ff327ac2763c816d24 --- swift/obj/diskfile.py | 4 +++- test/unit/obj/test_diskfile.py | 4 ---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 49deb36f67..7f9135f229 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -1115,12 +1115,14 @@ class BaseDiskFileManager(object): """ device_path = self.construct_dev_path(device) async_dir = os.path.join(device_path, get_async_dir(policy)) + tmp_dir = os.path.join(device_path, get_tmp_dir(policy)) + mkdirs(tmp_dir) ohash = hash_path(account, container, obj) write_pickle( data, os.path.join(async_dir, ohash[-3:], ohash + '-' + Timestamp(timestamp).internal), - os.path.join(device_path, get_tmp_dir(policy))) + tmp_dir) self.logger.increment('async_pendings') def get_diskfile(self, device, partition, account, container, obj, diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 2a18478087..73a46b168a 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -246,10 +246,6 @@ class TestDiskFileModuleMethods(unittest.TestCase): self.assertFalse(os.path.isdir(tmp_path)) pickle_args = (self.existing_device, 'a', 'c', 'o', 'data', 0.0, policy) - # async updates don't create their tmpdir on their own - self.assertRaises(OSError, self.df_mgr.pickle_async_update, - *pickle_args) - os.makedirs(tmp_path) # now create a async update self.df_mgr.pickle_async_update(*pickle_args) # check tempdir From 5885d97b7df787ccd64777174b59c48e6dbfaa06 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Fri, 17 Jun 2016 12:24:57 -0700 Subject: [PATCH 015/156] added note to testFileSizeLimit functional test Change-Id: I0323ff2511506c354db3416f1b37ede772acaedb --- test/functional/tests.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/functional/tests.py b/test/functional/tests.py index d083aa10c2..1f9e9b4504 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -1861,6 +1861,16 @@ class TestFile(Base): else: return False + # This loop will result in fallocate calls for 4x the limit + # (minus 111 bytes). With fallocate turned on in the object servers, + # this may fail if you don't have 4x the limit available on your + # data drives. + + # Note that this test does not actually send any data to the system. + # All it does is ensure that a response (success or failure) comes + # back within 3 seconds. For the successful tests (size smaller + # than limit), the cluster will log a 499. + for i in (limit - 100, limit - 10, limit - 1, limit, limit + 1, limit + 10, limit + 100): From c0217a4845e2ea780dc4dcb61877e604bc488729 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Tue, 21 Jun 2016 07:56:35 +0200 Subject: [PATCH 016/156] Update Sphinx version The api-ref document needs a newer sphinx version, allow a 1.2 Sphinx version to be used - like it's used in global-requirements.txt. Change-Id: I9183cc56753fbe7e41206c6a9081899df5c3919a Needed-By: Ifebc65b188c4f2ba35b61c0deae5ec24401df7f9 --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 0c6e9fe2cc..3525d03615 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -9,7 +9,7 @@ nose nosexcover nosehtmloutput oslosphinx -sphinx>=1.1.2,<1.2 +sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 # BSD os-testr>=0.4.1 mock>=1.0 python-swiftclient From 4b1387968078bde6dcaeace5e390478d93b1780a Mon Sep 17 00:00:00 2001 From: Cloud User Date: Wed, 4 May 2016 02:00:53 +0000 Subject: [PATCH 017/156] Adds migrated API reference files This brings in RST plus YAML files, migrated from the source for [0]. The migration explanation is found on the openstack-dev mailing list [1]. Project instruction is in the OpenStack Documentation Contributor Guide [2]. A patch for publishing this source is in [3]. The conf.py and the tox environment are standard across other projects. [0]http://developer.openstack.org/api-ref-objectstorage-v1.html [1]http://lists.openstack.org/pipermail/openstack-dev/2016-May/093765.html [2]http://docs.openstack.org/contributor-guide/api-guides.html [3]https://review.openstack.org/#/c/313015/ Change-Id: Ifebc65b188c4f2ba35b61c0deae5ec24401df7f9 --- api-ref/source/conf.py | 221 ++++ api-ref/source/index.rst | 13 + api-ref/source/parameters.yaml | 961 ++++++++++++++++++ ...ount-containers-list-http-request-json.txt | 1 + ...count-containers-list-http-request-xml.txt | 2 + ...unt-containers-list-http-response-json.txt | 11 + ...ount-containers-list-http-response-xml.txt | 11 + .../account-containers-list-response.json | 12 + .../account-containers-list-response.xml | 13 + .../samples/capabilities-list-response.json | 7 + .../samples/containers-list-http-request.txt | 3 + .../samples/containers-list-http-response.txt | 9 + .../endpoints-list-response-headers.json | 14 + .../samples/endpoints-list-response.json | 8 + api-ref/source/samples/goodbyeworld.txt | 1 + api-ref/source/samples/helloworld.txt | 1 + .../objects-list-http-response-json.txt | 10 + .../objects-list-http-response-xml.txt | 10 + .../source/samples/objects-list-response.json | 16 + .../source/samples/objects-list-response.xml | 17 + api-ref/source/storage-account-services.inc | 380 +++++++ api-ref/source/storage-container-services.inc | 503 +++++++++ api-ref/source/storage-object-services.inc | 687 +++++++++++++ api-ref/source/storage_endpoints.inc | 37 + api-ref/source/storage_info.inc | 41 + test-requirements.txt | 1 + tox.ini | 16 + 27 files changed, 3006 insertions(+) create mode 100644 api-ref/source/conf.py create mode 100644 api-ref/source/index.rst create mode 100644 api-ref/source/parameters.yaml create mode 100644 api-ref/source/samples/account-containers-list-http-request-json.txt create mode 100644 api-ref/source/samples/account-containers-list-http-request-xml.txt create mode 100644 api-ref/source/samples/account-containers-list-http-response-json.txt create mode 100644 api-ref/source/samples/account-containers-list-http-response-xml.txt create mode 100644 api-ref/source/samples/account-containers-list-response.json create mode 100644 api-ref/source/samples/account-containers-list-response.xml create mode 100644 api-ref/source/samples/capabilities-list-response.json create mode 100644 api-ref/source/samples/containers-list-http-request.txt create mode 100644 api-ref/source/samples/containers-list-http-response.txt create mode 100644 api-ref/source/samples/endpoints-list-response-headers.json create mode 100644 api-ref/source/samples/endpoints-list-response.json create mode 100644 api-ref/source/samples/goodbyeworld.txt create mode 100644 api-ref/source/samples/helloworld.txt create mode 100644 api-ref/source/samples/objects-list-http-response-json.txt create mode 100644 api-ref/source/samples/objects-list-http-response-xml.txt create mode 100644 api-ref/source/samples/objects-list-response.json create mode 100644 api-ref/source/samples/objects-list-response.xml create mode 100644 api-ref/source/storage-account-services.inc create mode 100644 api-ref/source/storage-container-services.inc create mode 100644 api-ref/source/storage-object-services.inc create mode 100644 api-ref/source/storage_endpoints.inc create mode 100644 api-ref/source/storage_info.inc diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py new file mode 100644 index 0000000000..e01012aeb0 --- /dev/null +++ b/api-ref/source/conf.py @@ -0,0 +1,221 @@ +# -*- coding: utf-8 -*- +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# swift documentation build configuration file +# +# This file is execfile()d with the current directory set to +# its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +from swift import __version__ +import subprocess +import sys +import warnings + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('../../')) +sys.path.insert(0, os.path.abspath('../')) +sys.path.insert(0, os.path.abspath('./')) + +# -- General configuration ---------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. + +extensions = [ + 'os_api_ref', + 'oslosphinx', +] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# +# source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Object Storage API Reference' +copyright = u'2010-present, OpenStack Foundation' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = __version__.rsplit('.', 1)[0] +# The full version, including alpha/beta/rc tags. +release = __version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# The reST default role (used for this markup: `text`) to use +# for all documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = False + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# -- Options for man page output ---------------------------------------------- + +# Grouping the document tree for man pages. +# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' + + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +#html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' +git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", + "-n1"] +try: + html_last_updated_fmt = subprocess.Popen( + git_cmd, stdout=subprocess.PIPE).communicate()[0] +except OSError: + warnings.warn('Cannot get last updated time from git repository. ' + 'Not setting "html_last_updated_fmt".') + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_use_modindex = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'swiftdoc' + + +# -- Options for LaTeX output ------------------------------------------------- + +# The paper size ('letter' or 'a4'). +# latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +# latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [ + ('index', 'swift.tex', u'OpenStack Object Storage API Documentation', + u'OpenStack Foundation', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +# latex_preamble = '' + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_use_modindex = True diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst new file mode 100644 index 0000000000..5b4e643f1d --- /dev/null +++ b/api-ref/source/index.rst @@ -0,0 +1,13 @@ +:tocdepth: 2 + +=================== + Object Storage API +=================== + +.. rest_expand_all:: + +.. include:: storage-account-services.inc +.. include:: storage_endpoints.inc +.. include:: storage-object-services.inc +.. include:: storage-container-services.inc +.. include:: storage_info.inc diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml new file mode 100644 index 0000000000..9213ada101 --- /dev/null +++ b/api-ref/source/parameters.yaml @@ -0,0 +1,961 @@ +# variables in header +Accept: + description: | + Instead of using the ``format`` query parameter, + set this header to ``application/json``, ``application/xml``, or + ``text/xml``. + in: header + required: false + type: string +Accept-Ranges: + description: | + The type of ranges that the object accepts. + in: header + required: true + type: string +Content-Disposition: + description: | + If set, specifies the override behavior for the + browser. For example, this header might specify that the browser + use a download program to save this file rather than show the + file, which is the default. + in: header + required: false + type: string +Content-Disposition_1: + description: | + If set, specifies the override behavior for the + browser. For example, this header might specify that the browser + use a download program to save this file rather than show the + file, which is the default. If not set, this header is not + returned by this operation. + in: header + required: false + type: string +Content-Encoding: + description: | + If set, the value of the ``Content-Encoding`` + metadata. + in: header + required: false + type: string +Content-Encoding_1: + description: | + If set, the value of the ``Content-Encoding`` + metadata. If not set, the operation does not return this header. + in: header + required: false + type: string +Content-Length: + description: | + If the operation succeeds, this value is zero + (0). If the operation fails, this value is the length of the error + text in the response body. + in: header + required: true + type: string +Content-Length_1: + description: | + Set to the length of the object content. Do not + set if chunked transfer encoding is being used. + in: header + required: false + type: integer +Content-Length_2: + description: | + The length of the response body that contains the + list of names. If the operation fails, this value is the length of + the error text in the response body. + in: header + required: true + type: string +Content-Length_3: + description: | + HEAD operations do not return content. The + ``Content-Length`` header value is not the size of the response + body but is the size of the object, in bytes. + in: header + required: true + type: string +Content-Length_4: + description: | + The length of the object content in the response + body, in bytes. + in: header + required: true + type: string +Content-Type: + description: | + Changes the MIME type for the object. + in: header + required: false + type: string +Content-Type_1: + description: | + If the operation fails, this value is the MIME + type of the error text in the response body. + in: header + required: true + type: string +Content-Type_2: + description: | + The MIME type of the object. + in: header + required: true + type: string +Content-Type_3: + description: | + The MIME type of the list of names. If the + operation fails, this value is the MIME type of the error text in + the response body. + in: header + required: true + type: string +Date: + description: | + The transaction date and time. + + The date and time stamp format is `ISO 8601 + `_: + + :: + + CCYY-MM-DDThh:mm:ss±hh:mm + + For example, ``2015-08-27T09:49:58-05:00``. + + The ``±hh:mm`` value, if included, is the time zone as an offset + from UTC. In the previous example, the offset value is ``-05:00``. + + A ``null`` value indicates that the token never expires. + in: header + required: true + type: string +Destination: + description: | + The container and object name of the destination + object in the form of ``/container/object``. You must UTF-8-encode + and then URL-encode the names of the destination container and + object before you include them in this header. + in: header + required: true + type: string +ETag: + description: | + The MD5 checksum of the copied object content. + The value is not quoted. + in: header + required: true + type: string +ETag_1: + description: | + The MD5 checksum value of the request body. For + example, the MD5 checksum value of the object content. You are + strongly recommended to compute the MD5 checksum value of object + content and include it in the request. This enables the Object + Storage API to check the integrity of the upload. The value is not + quoted. + in: header + required: false + type: string +ETag_2: + description: | + For objects smaller than 5 GB, this value is the + MD5 checksum of the object content. The value is not quoted. For + manifest objects, this value is the MD5 checksum of the + concatenated string of MD5 checksums and ETags for each of the + segments in the manifest, and not the MD5 checksum of the content + that was downloaded. Also the value is enclosed in double-quote + characters. You are strongly recommended to compute the MD5 + checksum of the response body as it is received and compare this + value with the one in the ETag header. If they differ, the content + was corrupted, so retry the operation. + in: header + required: true + type: string +If-Match: + description: | + See `Request for Comments: 2616 + `_. + in: header + required: false + type: string +If-Modified-Since: + description: | + See `Request for Comments: 2616 + `_. + in: header + required: false + type: string +If-None-Match: + description: | + In combination with ``Expect: 100-Continue``, + specify an ``"If- None-Match: *"`` header to query whether the + server already has a copy of the object before any data is sent. + in: header + required: false + type: string +If-Unmodified-Since: + description: | + See `Request for Comments: 2616 + `_. + in: header + required: false + type: string +Last-Modified: + description: | + The date and time when the object was created or its metadata was + changed. + + The date and time stamp format is `ISO 8601 + `_: + + :: + + CCYY-MM-DDThh:mm:ss±hh:mm + + For example, ``2015-08-27T09:49:58-05:00``. + + The ``±hh:mm`` value, if included, is the time zone as an offset + from UTC. In the previous example, the offset value is ``-05:00``. + in: header + required: true + type: string +Range: + description: | + The ranges of content to get. You can use the + ``Range`` header to get portions of data by using one or more + range specifications. To specify many ranges, separate the range + specifications with a comma. The types of range specifications + are: - **Byte range specification**. Use FIRST_BYTE_OFFSET to + specify the start of the data range, and LAST_BYTE_OFFSET to + specify the end. You can omit the LAST_BYTE_OFFSET and if you + do, the value defaults to the offset of the last byte of data. + - **Suffix byte range specification**. Use LENGTH bytes to specify + the length of the data range. The following forms of the header + specify the following ranges of data: - ``Range: bytes=-5``. The + last five bytes. - ``Range: bytes=10-15``. The five bytes of data + after a 10-byte offset. - ``Range: bytes=10-15,-5``. A multi- + part response that contains the last five bytes and the five + bytes of data after a 10-byte offset. The ``Content-Type`` + response header contains ``multipart/byteranges``. - ``Range: + bytes=4-6``. Bytes 4 to 6 inclusive. - ``Range: bytes=2-2``. Byte + 2, the third byte of the data. - ``Range: bytes=6-``. Byte 6 and + after. - ``Range: bytes=1-3,2-5``. A multi-part response that + contains bytes 1 to 3 inclusive, and bytes 2 to 5 inclusive. The + ``Content-Type`` response header contains + ``multipart/byteranges``. + in: header + required: false + type: string +Transfer-Encoding: + description: | + Set to ``chunked`` to enable chunked transfer + encoding. If used, do not set the ``Content-Length`` header to a + non-zero value. + in: header + required: false + type: string +X-Account-Bytes-Used: + description: | + The total number of bytes that are stored in + Object Storage for the account. + in: header + required: true + type: integer +X-Account-Container-Count: + description: | + The number of containers. + in: header + required: true + type: integer +X-Account-Meta-name: + description: | + The custom account metadata item, where + ``{name}`` is the name of the metadata item. One ``X-Account- + Meta- {name}`` response header appears for each metadata item (for + each ``{name}``). + in: header + required: false + type: string +X-Account-Meta-name_1: + description: | + The account metadata. The ``{name}`` is the name + of metadata item that you want to add, update, or delete. To + delete this item, send an empty value in this header. You must + specify an ``X-Account-Meta- {name}`` header for each metadata + item (for each ``{name}``) that you want to add, update, or + delete. + in: header + required: false + type: string +X-Account-Meta-Temp-URL-Key: + description: | + The secret key value for temporary URLs. If not + set, this header is not returned in the response. + in: header + required: false + type: string +X-Account-Meta-Temp-URL-Key-2: + description: | + A second secret key value for temporary URLs. If + not set, this header is not returned in the response. + The second key enables you to rotate keys by having + two active keys at the same time. + in: header + required: false + type: string +X-Account-Object-Count: + description: | + The number of objects in the account. + in: header + required: true + type: integer +X-Auth-Token: + description: | + Authentication token. If you omit this header, + your request fails unless the account owner has granted you access + through an access control list (ACL). + in: header + required: false + type: string +X-Auth-Token_1: + description: | + Authentication token. + in: header + required: true + type: string +X-Container-Bytes-Used: + description: | + The total number of bytes used. + in: header + required: true + type: integer +X-Container-Meta-Access-Control-Allow-Origin: + description: | + Originating URLs allowed to make cross-origin + requests (CORS), separated by spaces. This heading applies to the + container only, and all objects within the container with this + header applied are CORS-enabled for the allowed origin URLs. A + browser (user-agent) typically issues a `preflighted request + `_ , which is an OPTIONS call + that verifies the origin is allowed to make the request. The + Object Storage service returns 200 if the originating URL is + listed in this header parameter, and issues a 401 if the + originating URL is not allowed to make a cross-origin request. + Once a 200 is returned, the browser makes a second request to the + Object Storage service to retrieve the CORS-enabled object. + in: header + required: false + type: string +X-Container-Meta-Access-Control-Expose-Headers: + description: | + Headers the Object Storage service exposes to the + browser (technically, through the ``user-agent`` setting), in the + request response, separated by spaces. By default the Object + Storage service returns the following values for this header: - + All “simple response headers” as listed on + `http://www.w3.org/TR/cors/#simple-response-header + `_. - The + headers ``etag``, ``x-timestamp``, ``x-trans-id``. - All metadata + headers (``X-Container-Meta-*`` for containers and ``X-Object- + Meta-*`` for objects) headers listed in ``X-Container- Meta- + Access-Control-Expose-Headers``. + in: header + required: false + type: string +X-Container-Meta-Access-Control-Max-Age: + description: | + Maximum time for the origin to hold the preflight + results. A browser may make an OPTIONS call to verify the origin + is allowed to make the request. Set the value to an integer number + of seconds after the time that the request was received. + in: header + required: false + type: string +X-Container-Meta-name: + description: | + The container metadata, where ``{name}`` is the + name of metadata item. You must specify an ``X-Container-Meta- + {name}`` header for each metadata item (for each ``{name}``) that + you want to add or update. + in: header + required: false + type: string +X-Container-Meta-name_1: + description: | + The custom container metadata item, where + ``{name}`` is the name of the metadata item. One ``X-Container- + Meta- {name}`` response header appears for each metadata item (for + each ``{name}``). + in: header + required: true + type: string +X-Container-Meta-Quota-Bytes: + description: | + Sets maximum size of the container, in bytes. + Typically these values are set by an administrator. Returns a 413 + response (request entity too large) when an object PUT operation + exceeds this quota value. + in: header + required: false + type: string +X-Container-Meta-Quota-Count: + description: | + Sets maximum object count of the container. + Typically these values are set by an administrator. Returns a 413 + response (request entity too large) when an object PUT operation + exceeds this quota value. + in: header + required: false + type: string +X-Container-Meta-Temp-URL-Key: + description: | + The secret key value for temporary URLs. + in: header + required: false + type: string +X-Container-Meta-Temp-URL-Key-2: + description: | + A second secret key value for temporary URLs. The + second key enables you to rotate keys by having two active keys at + the same time. + in: header + required: false + type: string +X-Container-Meta-Web-Directory-Type: + description: | + Sets the content-type of directory marker + objects. If the header is not set, default is + ``application/directory``. Directory marker objects are 0-byte + objects that represent directories to create a simulated + hierarchical structure. For example, if you set ``"X-Container- + Meta-Web-Directory- Type: text/directory"``, Object Storage treats + 0-byte objects with a content-type of ``text/directory`` as + directories rather than objects. + in: header + required: false + type: string +X-Container-Object-Count: + description: | + The number of objects. + in: header + required: true + type: integer +X-Container-Read: + description: | + Sets a container access control list (ACL) that grants read access. + Container ACLs are available on any Object Storage cluster, and are + enabled by container rather than by cluster. + + To set the container read ACL: + + .. code-block:: bash + + $ curl -X {PUT|POST} -i -H "X-Auth-Token: TOKEN" -H \ + "X-Container-Read: ACL" STORAGE_URL/CONTAINER + + For example: + + .. code-block:: bash + + $ curl -X PUT -i \ + -H "X-Auth-Token: 0101010101" \ + -H "X-Container-Read: .r:*" \ + http://swift.example.com/v1/AUTH_bob/read_container + + In the command, specify the ACL in the ``X-Container-Read`` header, + as follows: + + - ``.r:*`` All referrers. + + - ``.r:example.com,swift.example.com`` Comma-separated list of + referrers. + + - ``.rlistings`` Container listing access. + + - ``AUTH_username`` Access to a user who authenticates through a + legacy or non-OpenStack-Identity-based authentication system. + + - ``LDAP_`` Access to all users who authenticate through an LDAP- + based legacy or non-OpenStack-Identity-based authentication + system. + in: header + required: false + type: string +X-Container-Read_1: + description: | + The ACL that grants read access. If not set, this + header is not returned by this operation. + in: header + required: false + type: string +X-Container-Sync-Key: + description: | + Sets the secret key for container + synchronization. If you remove the secret key, synchronization is + halted. + in: header + required: false + type: string +X-Container-Sync-Key_1: + description: | + The secret key for container synchronization. If + not set, this header is not returned by this operation. + in: header + required: false + type: string +X-Container-Sync-To: + description: | + Sets the destination for container + synchronization. Used with the secret key indicated in the ``X + -Container-Sync-Key`` header. If you want to stop a container from + synchronizing, send a blank value for the ``X-Container-Sync-Key`` + header. + in: header + required: false + type: string +X-Container-Sync-To_1: + description: | + The destination for container synchronization. If + not set, this header is not returned by this operation. + in: header + required: false + type: string +X-Container-Write: + description: | + Sets an ACL that grants write access. + in: header + required: false + type: string +X-Container-Write_1: + description: | + The ACL that grants write access. If not set, + this header is not returned by this operation. + in: header + required: false + type: string +X-Copied-From: + description: | + For a copied object, shows the container and + object name from which the new object was copied. The value is in + the ``{container}/{object}`` format. + in: header + required: false + type: string +X-Copied-From-Last-Modified: + description: | + For a copied object, the date and time in `UNIX + Epoch time stamp format + `_ when the container and + object name from which the new object was copied was last + modified. For example, ``1440619048`` is equivalent to ``Mon, + Wed, 26 Aug 2015 19:57:28 GMT``. + in: header + required: false + type: integer +X-Copy-From: + description: | + If set, this is the name of an object used to + create the new object by copying the ``X-Copy-From`` object. The + value is in form ``{container}/{object}``. You must UTF-8-encode + and then URL-encode the names of the container and object before + you include them in the header. Using PUT with ``X-Copy-From`` + has the same effect as using the COPY operation to copy an object. + Using ``Range`` header with ``X-Copy-From`` will create a new + partial copied object with bytes set by ``Range``. + in: header + required: false + type: string +X-Delete-After: + description: | + The number of seconds after which the system + removes the object. Internally, the Object Storage system stores + this value in the ``X -Delete-At`` metadata item. + in: header + required: false + type: integer +X-Delete-At: + description: | + The date and time in `UNIX Epoch time stamp + format `_ when the system + removes the object. For example, ``1440619048`` is equivalent to + ``Mon, Wed, 26 Aug 2015 19:57:28 GMT``. + in: header + required: false + type: integer +X-Delete-At_1: + description: | + If set, the date and time in `UNIX Epoch time + stamp format `_ when the + system deletes the object. For example, ``1440619048`` is + equivalent to ``Mon, Wed, 26 Aug 2015 19:57:28 GMT``. If not set, + this operation does not return this header. + in: header + required: false + type: integer +X-Detect-Content-Type: + description: | + If set to ``true``, Object Storage guesses the + content type based on the file extension and ignores the value + sent in the ``Content- Type`` header, if present. + in: header + required: false + type: boolean +X-Fresh-Metadata: + description: | + Enables object creation that omits existing user + metadata. If set to ``true``, the COPY request creates an object + without existing user metadata. Default value is ``false``. + in: header + required: false + type: boolean +X-Newest: + description: | + If set to true , Object Storage queries all + replicas to return the most recent one. If you omit this header, + Object Storage responds faster after it finds one valid replica. + Because setting this header to true is more expensive for the back + end, use it only when it is absolutely needed. + in: header + required: false + type: boolean +X-Object-Manifest: + description: | + Set to specify that this is a dynamic large + object manifest object. The value is the container and object name + prefix of the segment objects in the form ``container/prefix``. + You must UTF-8-encode and then URL-encode the names of the + container and prefix before you include them in this header. + in: header + required: false + type: string +X-Object-Manifest_1: + description: | + If set, to this is a dynamic large object + manifest object. The value is the container and object name prefix + of the segment objects in the form ``container/prefix``. + in: header + required: false + type: string +X-Object-Meta-name: + description: | + The object metadata, where ``{name}`` is the name + of the metadata item. You must specify an ``X-Object-Meta- + {name}`` header for each metadata ``{name}`` item that you want to + add or update. + in: header + required: false + type: string +X-Object-Meta-name_1: + description: | + The custom object metadata item, where ``{name}`` + is the name of the metadata item. One ``X-Object-Meta- {name}`` + response header appears for each metadata ``{name}`` item. + in: header + required: true + type: string +X-Remove-Container-name: + description: | + Removes the metadata item named ``{name}``. For + example, ``X -Remove-Container-Read`` removes the ``X-Container- + Read`` metadata item. + in: header + required: false + type: string +X-Remove-Versions-Location: + description: | + Set to any value to disable versioning. + in: header + required: false + type: string +X-Static-Large-Object: + description: | + Set to ``true`` if this object is a static large + object manifest object. + in: header + required: true + type: boolean +X-Timestamp: + description: | + The date and time in `UNIX Epoch time stamp + format `_ when the + account, container, or object was initially created as a current + version. For example, ``1440619048`` is equivalent to ``Mon, Wed, + 26 Aug 2015 19:57:28 GMT``. + in: header + required: true + type: integer +X-Trans-Id: + description: | + A unique transaction ID for this request. Your + service provider might need this value if you report a problem. + in: header + required: true + type: string +X-Trans-Id-Extra: + description: | + Extra transaction information. Use the ``X-Trans- + Id-Extra`` request header to include extra information to help you + debug any errors that might occur with large object upload and + other Object Storage transactions. Object Storage appends the + first 32 characters of the ``X-Trans-Id- Extra`` request header + value to the transaction ID value in the generated ``X-Trans-Id`` + response header. You must UTF-8-encode and then URL-encode the + extra transaction information before you include it in the ``X + -Trans-Id-Extra`` request header. For example, you can include + extra transaction information when you upload `large objects + `_ such as images. When + you upload each segment and the manifest, include the same value + in the ``X-Trans-Id-Extra`` request header. If an error occurs, + you can find all requests that are related to the large object + upload in the Object Storage logs. You can also use ``X-Trans-Id- + Extra`` strings to help operators debug requests that fail to + receive responses. The operator can search for the extra + information in the logs. + in: header + required: false + type: string +X-Versions-Location: + description: | + Enables versioning on this container. The value + is the name of another container. You must UTF-8-encode and then + URL-encode the name before you include it in the header. To + disable versioning, set the header to an empty string. + in: header + required: false + type: string +X-Versions-Location_1: + description: | + Enables versioning on this container. The value + is the name of another container. You must UTF-8-encode and then + URL-encode the name before you include it in the header. To + disable versioning, set the header to an empty string. + in: header + required: true + type: string + +# variables in path +account: + description: | + The unique name for the account. An account is + also known as the project or tenant. + in: path + required: false + type: string +container: + description: | + The unique name for the container. The container + name must be from 1 to 256 characters long and can start with any + character and contain any pattern. Character set must be UTF-8. + The container name cannot contain a slash (``/``) character + because this character delimits the container and object name. For + example, ``/account/container/object``. + in: path + required: false + type: string +object: + description: | + The unique name for the object. + in: path + required: false + type: string + +# variables in query +delimiter: + description: | + Delimiter value, which returns the object names + that are nested in the container. If you do not set a prefix and + set the delimiter to "/" you may get unexpected results where all + the objects are returned instead of only those with the delimiter + set. + in: query + required: false + type: string +end_marker: + description: | + For a string value, x , returns container names + that are less than the marker value. + in: query + required: false + type: string +filename: + description: | + Overrides the default file name. Object Storage + generates a default file name for GET temporary URLs that is based + on the object name. Object Storage returns this value in the + ``Content-Disposition`` response header. Browsers can interpret + this file name value as a file attachment to save. For more + information about temporary URLs, see `Temporary URL middleware + `_. + in: query + required: false + type: string +format: + description: | + The response format. Valid values are ``json``, + ``xml``, or ``plain``. The default is ``plain``. If you append + the ``format=xml`` or ``format=json`` query parameter to the + storage account URL, the response shows extended container + information serialized in that format. If you append the + ``format=plain`` query parameter, the response lists the container + names separated by newlines. + in: query + required: false + type: string +limit: + description: | + For an integer value n , limits the number of + results to n . + in: query + required: false + type: integer +marker: + description: | + For a string value, x , returns container names + that are greater than the marker value. + in: query + required: false + type: string +multipart-manifest: + description: | + If ``?multipart-manifest=put``, the object is a + static large object manifest and the body contains the manifest. + in: query + required: false + type: string +multipart-manifest_1: + description: | + If you include the ``multipart-manifest=delete`` + query parameter and the object is a static large object, the + segment objects and manifest object are deleted. If you omit the + ``multipart- manifest=delete`` query parameter and the object is a + static large object, the manifest object is deleted but the + segment objects are not deleted. For a bulk delete, the response + body looks the same as it does for a normal bulk delete. In + contrast, a plain object DELETE response has an empty body. + in: query + required: false + type: string +multipart-manifest_2: + description: | + If you include the ``multipart-manifest=get`` + query parameter and the object is a large object, the object + contents are not returned. Instead, the manifest is returned in + the ``X-Object-Manifest`` response header for dynamic large + objects or in the response body for static large objects. + in: query + required: false + type: string +path: + description: | + For a string value, returns the object names that + are nested in the pseudo path. + in: query + required: false + type: string +prefix: + description: | + Prefix value. Named items in the response begin + with this value. + in: query + required: false + type: string +swiftinfo_expires: + description: | + Filters the response by the expiration date and + time in `UNIX Epoch time stamp format + `_. For example, + ``1440619048`` is equivalent to ``Mon, Wed, 26 Aug 2015 19:57:28 + GMT``. + in: query + required: false + type: integer +swiftinfo_sig: + description: | + A hash-based message authentication code (HMAC) + that enables access to administrator-only information. To use this + parameter, the ``swiftinfo_expires`` parameter is also required. + in: query + required: false + type: string +temp_url_expires: + description: | + The date and time in `UNIX Epoch time stamp + format `_ when the + signature for temporary URLs expires. For example, ``1440619048`` + is equivalent to ``Mon, Wed, 26 Aug 2015 19:57:28 GMT``. For more + information about temporary URLs, see `Temporary URL middleware + `_. + in: query + required: true + type: integer +temp_url_sig: + description: | + Used with temporary URLs to sign the request with + an HMAC-SHA1 cryptographic signature that defines the allowed HTTP + method, expiration date, full path to the object, and the secret + key for the temporary URL. For more information about temporary + URLs, see `Temporary URL middleware + `_. + in: query + required: true + type: string + +# variables in body +bytes: + description: | + The total number of bytes that are stored in + Object Storage for the account. + in: body + required: true + type: integer +content_type: + description: | + The content type of the object. + in: body + required: true + type: string +count: + description: | + The number of objects in the container. + in: body + required: true + type: integer +hash: + description: | + The MD5 checksum value of the object content. + in: body + required: true + type: string +last_modified: + description: | + The date and time when the object was last modified. + + The date and time stamp format is `ISO 8601 + `_: + + :: + + CCYY-MM-DDThh:mm:ss±hh:mm + + For example, ``2015-08-27T09:49:58-05:00``. + + The ``±hh:mm`` value, if included, is the time zone as an offset + from UTC. In the previous example, the offset value is ``-05:00``. + in: body + required: true + type: string +name: + description: | + The name of the container. + in: body + required: true + type: string + + diff --git a/api-ref/source/samples/account-containers-list-http-request-json.txt b/api-ref/source/samples/account-containers-list-http-request-json.txt new file mode 100644 index 0000000000..a4b315556d --- /dev/null +++ b/api-ref/source/samples/account-containers-list-http-request-json.txt @@ -0,0 +1 @@ +curl -i https://23.253.72.207/v1/$account?format=json -X GET -H "X-Auth-Token: $token" \ No newline at end of file diff --git a/api-ref/source/samples/account-containers-list-http-request-xml.txt b/api-ref/source/samples/account-containers-list-http-request-xml.txt new file mode 100644 index 0000000000..cf255617f2 --- /dev/null +++ b/api-ref/source/samples/account-containers-list-http-request-xml.txt @@ -0,0 +1,2 @@ +curl -i https://23.253.72.207/v1/$account?format=xml \ + -X GET -H "X-Auth-Token: $token" \ No newline at end of file diff --git a/api-ref/source/samples/account-containers-list-http-response-json.txt b/api-ref/source/samples/account-containers-list-http-response-json.txt new file mode 100644 index 0000000000..0cdba62a8a --- /dev/null +++ b/api-ref/source/samples/account-containers-list-http-response-json.txt @@ -0,0 +1,11 @@ +HTTP/1.1 200 OK +Content-Length: 96 +X-Account-Object-Count: 1 +X-Timestamp: 1389453423.35964 +X-Account-Meta-Subject: Literature +X-Account-Bytes-Used: 14 +X-Account-Container-Count: 2 +Content-Type: application/json; charset=utf-8 +Accept-Ranges: bytes +X-Trans-Id: tx274a77a8975c4a66aeb24-0052d95365 +Date: Fri, 17 Jan 2014 15:59:33 GMT \ No newline at end of file diff --git a/api-ref/source/samples/account-containers-list-http-response-xml.txt b/api-ref/source/samples/account-containers-list-http-response-xml.txt new file mode 100644 index 0000000000..6ad781aaec --- /dev/null +++ b/api-ref/source/samples/account-containers-list-http-response-xml.txt @@ -0,0 +1,11 @@ +HTTP/1.1 200 OK +Content-Length: 262 +X-Account-Object-Count: 1 +X-Timestamp: 1389453423.35964 +X-Account-Meta-Subject: Literature +X-Account-Bytes-Used: 14 +X-Account-Container-Count: 2 +Content-Type: application/xml; charset=utf-8 +Accept-Ranges: bytes +X-Trans-Id: tx69f60bc9f7634a01988e6-0052d9544b +Date: Fri, 17 Jan 2014 16:03:23 GMT \ No newline at end of file diff --git a/api-ref/source/samples/account-containers-list-response.json b/api-ref/source/samples/account-containers-list-response.json new file mode 100644 index 0000000000..4ae34aa4ca --- /dev/null +++ b/api-ref/source/samples/account-containers-list-response.json @@ -0,0 +1,12 @@ +[ + { + "count": 0, + "bytes": 0, + "name": "janeausten" + }, + { + "count": 1, + "bytes": 14, + "name": "marktwain" + } +] diff --git a/api-ref/source/samples/account-containers-list-response.xml b/api-ref/source/samples/account-containers-list-response.xml new file mode 100644 index 0000000000..d8f51cfa0d --- /dev/null +++ b/api-ref/source/samples/account-containers-list-response.xml @@ -0,0 +1,13 @@ + + + + janeausten + 0 + 0 + + + marktwain + 1 + 14 + + diff --git a/api-ref/source/samples/capabilities-list-response.json b/api-ref/source/samples/capabilities-list-response.json new file mode 100644 index 0000000000..bcc91f7d53 --- /dev/null +++ b/api-ref/source/samples/capabilities-list-response.json @@ -0,0 +1,7 @@ +{ + "swift": { + "version": "1.11.0" + }, + "staticweb": {}, + "tempurl": {} +} diff --git a/api-ref/source/samples/containers-list-http-request.txt b/api-ref/source/samples/containers-list-http-request.txt new file mode 100644 index 0000000000..4101ce80e5 --- /dev/null +++ b/api-ref/source/samples/containers-list-http-request.txt @@ -0,0 +1,3 @@ +GET /{api_version}/{account} HTTP/1.1 +Host: storage.swiftdrive.com +X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb \ No newline at end of file diff --git a/api-ref/source/samples/containers-list-http-response.txt b/api-ref/source/samples/containers-list-http-response.txt new file mode 100644 index 0000000000..43070e5235 --- /dev/null +++ b/api-ref/source/samples/containers-list-http-response.txt @@ -0,0 +1,9 @@ +HTTP/1.1 200 Ok +Date: Thu, 07 Jun 2010 18:57:07 GMT +Content-Type: text/plain; charset=UTF-8 +Content-Length: 32 + +images +movies +documents +backups \ No newline at end of file diff --git a/api-ref/source/samples/endpoints-list-response-headers.json b/api-ref/source/samples/endpoints-list-response-headers.json new file mode 100644 index 0000000000..a6de5f0681 --- /dev/null +++ b/api-ref/source/samples/endpoints-list-response-headers.json @@ -0,0 +1,14 @@ +{ + "endpoints": [ + "http://storage01.swiftdrive.com:6008/d8/583/AUTH_dev/EC_cont1/obj", + "http://storage02.swiftdrive.com:6008/d2/583/AUTH_dev/EC_cont1/obj", + "http://storage02.swiftdrive.com:6006/d3/583/AUTH_dev/EC_cont1/obj", + "http://storage02.swiftdrive.com:6008/d5/583/AUTH_dev/EC_cont1/obj", + "http://storage01.swiftdrive.com:6007/d7/583/AUTH_dev/EC_cont1/obj", + "http://storage02.swiftdrive.com:6007/d4/583/AUTH_dev/EC_cont1/obj", + "http://storage01.swiftdrive.com:6006/d6/583/AUTH_dev/EC_cont1/obj" + ], + "headers": { + "X-Backend-Storage-Policy-Index": "2" + } +} diff --git a/api-ref/source/samples/endpoints-list-response.json b/api-ref/source/samples/endpoints-list-response.json new file mode 100644 index 0000000000..eeba7c1738 --- /dev/null +++ b/api-ref/source/samples/endpoints-list-response.json @@ -0,0 +1,8 @@ +{ + "endpoints": [ + "http://storage02.swiftdrive:6002/d2/617/AUTH_dev", + "http://storage01.swiftdrive:6002/d8/617/AUTH_dev", + "http://storage01.swiftdrive:6002/d11/617/AUTH_dev" + ], + "headers": {} +} diff --git a/api-ref/source/samples/goodbyeworld.txt b/api-ref/source/samples/goodbyeworld.txt new file mode 100644 index 0000000000..aebc9c0c05 --- /dev/null +++ b/api-ref/source/samples/goodbyeworld.txt @@ -0,0 +1 @@ +Goodbye World! \ No newline at end of file diff --git a/api-ref/source/samples/helloworld.txt b/api-ref/source/samples/helloworld.txt new file mode 100644 index 0000000000..6900abf34d --- /dev/null +++ b/api-ref/source/samples/helloworld.txt @@ -0,0 +1 @@ +Hello World Again! \ No newline at end of file diff --git a/api-ref/source/samples/objects-list-http-response-json.txt b/api-ref/source/samples/objects-list-http-response-json.txt new file mode 100644 index 0000000000..2efe63a3f2 --- /dev/null +++ b/api-ref/source/samples/objects-list-http-response-json.txt @@ -0,0 +1,10 @@ +HTTP/1.1 200 OK +Content-Length: 341 +X-Container-Object-Count: 2 +Accept-Ranges: bytes +X-Container-Meta-Book: TomSawyer +X-Timestamp: 1389727543.65372 +X-Container-Bytes-Used: 26 +Content-Type: application/json; charset=utf-8 +X-Trans-Id: tx26377fe5fab74869825d1-0052d6bdff +Date: Wed, 15 Jan 2014 16:57:35 GMT \ No newline at end of file diff --git a/api-ref/source/samples/objects-list-http-response-xml.txt b/api-ref/source/samples/objects-list-http-response-xml.txt new file mode 100644 index 0000000000..eb17bb2a6a --- /dev/null +++ b/api-ref/source/samples/objects-list-http-response-xml.txt @@ -0,0 +1,10 @@ +HTTP/1.1 200 OK +Content-Length: 500 +X-Container-Object-Count: 2 +Accept-Ranges: bytes +X-Container-Meta-Book: TomSawyer +X-Timestamp: 1389727543.65372 +X-Container-Bytes-Used: 26 +Content-Type: application/xml; charset=utf-8 +X-Trans-Id: txc75ea9a6e66f47d79e0c5-0052d6be76 +Date: Wed, 15 Jan 2014 16:59:35 GMT \ No newline at end of file diff --git a/api-ref/source/samples/objects-list-response.json b/api-ref/source/samples/objects-list-response.json new file mode 100644 index 0000000000..b104d3a9a6 --- /dev/null +++ b/api-ref/source/samples/objects-list-response.json @@ -0,0 +1,16 @@ +[ + { + "hash": "451e372e48e0f6b1114fa0724aa79fa1", + "last_modified": "2014-01-15T16:41:49.390270", + "bytes": 14, + "name": "goodbye", + "content_type": "application/octet-stream" + }, + { + "hash": "ed076287532e86365e841e92bfc50d8c", + "last_modified": "2014-01-15T16:37:43.427570", + "bytes": 12, + "name": "helloworld", + "content_type": "application/octet-stream" + } +] diff --git a/api-ref/source/samples/objects-list-response.xml b/api-ref/source/samples/objects-list-response.xml new file mode 100644 index 0000000000..07fda614c1 --- /dev/null +++ b/api-ref/source/samples/objects-list-response.xml @@ -0,0 +1,17 @@ + + + + goodbye + 451e372e48e0f6b1114fa0724aa79fa1 + 14 + application/octet-stream + 2014-01-15T16:41:49.390270 + + + helloworld + ed076287532e86365e841e92bfc50d8c + 12 + application/octet-stream + 2014-01-15T16:37:43.427570 + + diff --git a/api-ref/source/storage-account-services.inc b/api-ref/source/storage-account-services.inc new file mode 100644 index 0000000000..17f228c543 --- /dev/null +++ b/api-ref/source/storage-account-services.inc @@ -0,0 +1,380 @@ +.. -*- rst -*- + +======== +Accounts +======== + +Lists containers for an account. Creates, updates, shows, and +deletes account metadata. + +Account metadata operations work differently than container and +object metadata operations work. Depending on the contents of your +POST account metadata request, the Object Storage API updates the +metadata in one of these ways: + +**Account metadata operations** + ++----------------------------------------------------------+---------------------------------------------------------------+ +| POST request body contains | Description | ++----------------------------------------------------------+---------------------------------------------------------------+ +| A metadata key without a value. | The API removes the metadata item from the account. | +| | | +| The metadata key already exists for the account. | | ++----------------------------------------------------------+---------------------------------------------------------------+ +| A metadata key without a value. | The API ignores the metadata key. | +| | | +| The metadata key does not already exist for the account. | | ++----------------------------------------------------------+---------------------------------------------------------------+ +| A metadata key value. | The API updates the metadata key value for the account. | +| | | +| The metadata key already exists for the account. | | ++----------------------------------------------------------+---------------------------------------------------------------+ +| A metadata key value. | The API adds the metadata key and value pair, or item, to the | +| | account. | +| The metadata key does not already exist for the account. | | ++----------------------------------------------------------+---------------------------------------------------------------+ +| One or more account metadata items are omitted. | The API does not change the existing metadata items. | +| | | +| The metadata items already exist for the account. | | ++----------------------------------------------------------+---------------------------------------------------------------+ + + + +For these requests, specifying the ``X-Remove-Account-Meta-*`` +request header for the key with any value is equivalent to +specifying the ``X-Account-Meta-*`` request header with an empty +value. + +Metadata keys must be treated as case-insensitive at all times. +These keys can contain ASCII 7-bit characters that are not control +(0-31) characters, DEL, or a separator character, according to +`HTTP/1.1 `_ . +Also, Object Storage does not support the underscore character, +which it silently converts to a hyphen. + +The metadata values in Object Storage do not follow HTTP/1.1 rules +for character encodings. You must use a UTF-8 encoding to get a +byte array for any string that contains characters that are not in +the 7-bit ASCII 0-127 range. Otherwise, Object Storage returns the +404 response code for ISO-8859-1 characters in the 128-255 range, +which is a direct violation of the HTTP/1.1 `basic rules +`_. + + +Show account details and list containers +======================================== + +.. rest_method:: GET /v1/{account} + +Shows details for an account and lists containers, sorted by name, in the account. + +The sort order for the name is based on a binary comparison, a +single built-in collating sequence that compares string data by +using the SQLite memcmp() function, regardless of text encoding. +See `Collating Sequences +`_. + +Example requests and responses: + +- Show account details and list containers and ask for a JSON + response: + + :: + + curl -i $publicURL?format=json -X GET -H "X-Auth-Token: $token" + + +- List containers and ask for an XML response: + + :: + + curl -i $publicURL?format=xml -X GET -H "X-Auth-Token: $token" + + +The response body returns a list of containers. The default +response (``text/plain``) returns one container per line. + +If you use query parameters to page through a long list of +containers, you have reached the end of the list if the number of +items in the returned list is less than the request ``limit`` +value. The list contains more items if the number of items in the +returned list equals the ``limit`` value. + +When asking for a list of containers and there are none, the +response behavior changes depending on whether the request format +is text, JSON, or XML. For a text response, you get a 204 , because +there is no content. However, for a JSON or XML response, you get a +200 with content indicating an empty array. + +If the request succeeds, the operation returns one of these status +codes: + +- ``OK (200)``. Success. The response body lists the containers. + +- ``No Content (204)``. Success. The response body shows no + containers. Either the account has no containers or you are + paging through a long list of names by using the ``marker``, + ``limit``, or ``end_marker`` query parameter and you have reached + the end of the list. + + +Normal response codes: 200 +Error response codes:204, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - limit: limit + - marker: marker + - end_marker: end_marker + - format: format + - prefix: prefix + - delimiter: delimiter + - X-Auth-Token: X-Auth-Token + - X-Newest: X-Newest + - Accept: Accept + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Content-Length: Content-Length + - X-Account-Meta-name: X-Account-Meta-name + - X-Account-Object-Count: X-Account-Object-Count + - X-Account-Meta-Temp-URL-Key-2: X-Account-Meta-Temp-URL-Key-2 + - X-Timestamp: X-Timestamp + - X-Account-Meta-Temp-URL-Key: X-Account-Meta-Temp-URL-Key + - X-Trans-Id: X-Trans-Id + - Date: Date + - X-Account-Bytes-Used: X-Account-Bytes-Used + - X-Account-Container-Count: X-Account-Container-Count + - Content-Type: Content-Type + - count: count + - bytes: bytes + - name: name + + + +Response Example +---------------- + +.. literalinclude:: samples/account-containers-list-http-response-xml.txt + :language: javascript + + + + + +Create, update, or delete account metadata +========================================== + +.. rest_method:: POST /v1/{account} + +Creates, updates, or deletes account metadata. + +To create, update, or delete metadata, use the ``X-Account- +Meta-{name}`` request header, where ``{name}`` is the name of the +metadata item. + +Subsequent requests for the same key and value pair overwrite the +existing value. + +To delete a metadata header, send an empty value for that header, +such as for the ``X-Account-Meta-Book`` header. If the tool you use +to communicate with Object Storage, such as an older version of +cURL, does not support empty headers, send the ``X-Remove-Account- +Meta-{name}`` header with an arbitrary value. For example, ``X +-Remove-Account-Meta-Book: x``. The operation ignores the arbitrary +value. + +If the container already has other custom metadata items, a request +to create, update, or delete metadata does not affect those items. + +This operation does not accept a request body. + +Example requests and responses: + +- Create account metadata: + + :: + + curl -i $publicURL -X POST -H "X-Auth-Token: $token" -H "X-Account-Meta-Book: MobyDick" -H "X-Account-Meta-Subject: Literature" + + + + + :: + + HTTP/1.1 204 No Content + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx8c2dd6aee35442a4a5646-0052d954fb + Date: Fri, 17 Jan 2014 16:06:19 GMT + + +- Update account metadata: + + :: + + curl -i $publicURL -X POST -H "X-Auth-Token: $token" -H "X-Account-Meta-Subject: AmericanLiterature" + + + + + :: + + HTTP/1.1 204 No Content + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx1439b96137364ab581156-0052d95532 + Date: Fri, 17 Jan 2014 16:07:14 GMT + + +- Delete account metadata: + + :: + + curl -i $publicURL -X POST -H "X-Auth-Token: $token" -H "X-Remove-Account-Meta-Subject: x" + + + + + :: + + HTTP/1.1 204 No Content + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx411cf57701424da99948a-0052d9556f + Date: Fri, 17 Jan 2014 16:08:15 GMT + + +If the request succeeds, the operation returns the ``No Content +(204)`` response code. + +To confirm your changes, issue a show account metadata request. + +Error response codes:204, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - X-Auth-Token: X-Auth-Token + - X-Account-Meta-Temp-URL-Key: X-Account-Meta-Temp-URL-Key + - X-Account-Meta-Temp-URL-Key-2: X-Account-Meta-Temp-URL-Key-2 + - X-Account-Meta-name: X-Account-Meta-name + - Content-Type: Content-Type + - X-Detect-Content-Type: X-Detect-Content-Type + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Date: Date + - X-Timestamp: X-Timestamp + - Content-Length: Content-Length + - Content-Type: Content-Type + - X-Trans-Id: X-Trans-Id + + + + + +Show account metadata +===================== + +.. rest_method:: HEAD /v1/{account} + +Shows metadata for an account. + +Metadata for the account includes: + +- Number of containers + +- Number of objects + +- Total number of bytes that are stored in Object Storage for the + account + +Because the storage system can store large amounts of data, take +care when you represent the total bytes response as an integer; +when possible, convert it to a 64-bit unsigned integer if your +platform supports that primitive type. + +Do not include metadata headers in this request. + +Show account metadata request: + +:: + + curl -i $publicURL -X HEAD -H "X-Auth-Token: $token" + + + + +:: + + HTTP/1.1 204 No Content + Content-Length: 0 + X-Account-Object-Count: 1 + X-Account-Meta-Book: MobyDick + X-Timestamp: 1389453423.35964 + X-Account-Bytes-Used: 14 + X-Account-Container-Count: 2 + Content-Type: text/plain; charset=utf-8 + Accept-Ranges: bytes + X-Trans-Id: txafb3504870144b8ca40f7-0052d955d4 + Date: Fri, 17 Jan 2014 16:09:56 GMT + + +If the account or authentication token is not valid, the operation +returns the ``Unauthorized (401)`` response code. + +Error response codes:204,401, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - X-Auth-Token: X-Auth-Token + - X-Newest: X-Newest + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Content-Length: Content-Length + - X-Account-Meta-name: X-Account-Meta-name + - X-Account-Object-Count: X-Account-Object-Count + - X-Account-Meta-Temp-URL-Key-2: X-Account-Meta-Temp-URL-Key-2 + - X-Timestamp: X-Timestamp + - X-Account-Meta-Temp-URL-Key: X-Account-Meta-Temp-URL-Key + - X-Trans-Id: X-Trans-Id + - Date: Date + - X-Account-Bytes-Used: X-Account-Bytes-Used + - X-Account-Container-Count: X-Account-Container-Count + - Content-Type: Content-Type + + + + + diff --git a/api-ref/source/storage-container-services.inc b/api-ref/source/storage-container-services.inc new file mode 100644 index 0000000000..6b69ef9d00 --- /dev/null +++ b/api-ref/source/storage-container-services.inc @@ -0,0 +1,503 @@ +.. -*- rst -*- + +========== +Containers +========== + +Lists objects in a container. Creates, shows details for, and +deletes containers. Creates, updates, shows, and deletes container +metadata. + + +Show container details and list objects +======================================= + +.. rest_method:: GET /v1/{account}/{container} + +Shows details for a container and lists objects, sorted by name, in the container. + +Specify query parameters in the request to filter the list and +return a subset of object names. Omit query parameters to return +the complete list of object names that are stored in the container, +up to 10,000 names. The 10,000 maximum value is configurable. To +view the value for the cluster, issue a GET ``/info`` request. + +Example requests and responses: + +- ``OK (200)``. Success. The response body lists the objects. + +- ``No Content (204)``. Success. The response body shows no objects. + Either the container has no objects or you are paging through a + long list of names by using the ``marker``, ``limit``, or + ``end_marker`` query parameter and you have reached the end of + the list. + +If the container does not exist, the call returns the ``Not Found +(404)`` response code. + +The operation returns the ``Range Not Satisfiable (416)`` response +code for any ranged GET requests that specify more than: + +- Fifty ranges. + +- Three overlapping ranges. + +- Eight non-increasing ranges. + + +Normal response codes: 200 +Error response codes:416,404,204, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - container: container + - limit: limit + - marker: marker + - end_marker: end_marker + - prefix: prefix + - format: format + - delimiter: delimiter + - path: path + - X-Auth-Token: X-Auth-Token + - X-Newest: X-Newest + - Accept: Accept + - X-Container-Meta-Temp-URL-Key: X-Container-Meta-Temp-URL-Key + - X-Container-Meta-Temp-URL-Key-2: X-Container-Meta-Temp-URL-Key-2 + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - X-Container-Meta-name: X-Container-Meta-name + - Content-Length: Content-Length + - X-Container-Object-Count: X-Container-Object-Count + - Accept-Ranges: Accept-Ranges + - X-Container-Meta-Temp-URL-Key: X-Container-Meta-Temp-URL-Key + - X-Container-Bytes-Used: X-Container-Bytes-Used + - X-Container-Meta-Temp-URL-Key-2: X-Container-Meta-Temp-URL-Key-2 + - X-Timestamp: X-Timestamp + - X-Trans-Id: X-Trans-Id + - Date: Date + - Content-Type: Content-Type + - hash: hash + - last_modified: last_modified + - bytes: bytes + - name: name + - content_type: content_type + + + +Response Example +---------------- + +.. literalinclude:: samples/objects-list-http-response-xml.txt + :language: javascript + + + + + + + +Create container +================ + +.. rest_method:: PUT /v1/{account}/{container} + +Creates a container. + +You do not need to check whether a container already exists before +issuing a PUT operation because the operation is idempotent: It +creates a container or updates an existing container, as +appropriate. + +Example requests and responses: + +- Create a container with no metadata: + + :: + + curl -i $publicURL/steven -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" + + + + + :: + + HTTP/1.1 201 Created + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx7f6b7fa09bc2443a94df0-0052d58b56 + Date: Tue, 14 Jan 2014 19:09:10 GMT + + +- Create a container with metadata: + + :: + + curl -i $publicURL/marktwain -X PUT -H "X-Auth-Token: $token" -H "X-Container-Meta-Book: TomSawyer" + + + + + :: + + HTTP/1.1 201 Created + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx06021f10fc8642b2901e7-0052d58f37 + Date: Tue, 14 Jan 2014 19:25:43 GMT + +Error response codes:201,204, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - container: container + - X-Auth-Token: X-Auth-Token + - X-Container-Read: X-Container-Read + - X-Container-Write: X-Container-Write + - X-Container-Sync-To: X-Container-Sync-To + - X-Container-Sync-Key: X-Container-Sync-Key + - X-Versions-Location: X-Versions-Location + - X-Container-Meta-name: X-Container-Meta-name + - X-Container-Meta-Access-Control-Allow-Origin: X-Container-Meta-Access-Control-Allow-Origin + - X-Container-Meta-Access-Control-Max-Age: X-Container-Meta-Access-Control-Max-Age + - X-Container-Meta-Access-Control-Expose-Headers: X-Container-Meta-Access-Control-Expose-Headers + - Content-Type: Content-Type + - X-Detect-Content-Type: X-Detect-Content-Type + - X-Container-Meta-Temp-URL-Key: X-Container-Meta-Temp-URL-Key + - X-Container-Meta-Temp-URL-Key-2: X-Container-Meta-Temp-URL-Key-2 + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Date: Date + - X-Timestamp: X-Timestamp + - Content-Length: Content-Length + - Content-Type: Content-Type + - X-Trans-Id: X-Trans-Id + + + + + + +Create, update, or delete container metadata +============================================ + +.. rest_method:: POST /v1/{account}/{container} + +Creates, updates, or deletes custom metadata for a container. + +To create, update, or delete a custom metadata item, use the ``X +-Container-Meta-{name}`` header, where ``{name}`` is the name of +the metadata item. + +Subsequent requests for the same key and value pair overwrite the +previous value. + +To delete container metadata, send an empty value for that header, +such as for the ``X-Container-Meta-Book`` header. If the tool you +use to communicate with Object Storage, such as an older version of +cURL, does not support empty headers, send the ``X-Remove- +Container-Meta-{name}`` header with an arbitrary value. For +example, ``X-Remove-Container-Meta-Book: x``. The operation ignores +the arbitrary value. + +If the container already has other custom metadata items, a request +to create, update, or delete metadata does not affect those items. + +Example requests and responses: + +- Create container metadata: + + :: + + curl -i $publicURL/marktwain -X POST -H "X-Auth-Token: $token" -H "X-Container-Meta-Author: MarkTwain" -H "X-Container-Meta-Web-Directory-Type: text/directory" -H "X-Container-Meta-Century: Nineteenth" + + + + + :: + + HTTP/1.1 204 No Content + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx05dbd434c651429193139-0052d82635 + Date: Thu, 16 Jan 2014 18:34:29 GMT + + +- Update container metadata: + + :: + + curl -i $publicURL/marktwain -X POST -H "X-Auth-Token: $token" -H "X-Container-Meta-Author: SamuelClemens" + + + + + :: + + HTTP/1.1 204 No Content + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: txe60c7314bf614bb39dfe4-0052d82653 + Date: Thu, 16 Jan 2014 18:34:59 GMT + + +- Delete container metadata: + + :: + + curl -i $publicURL/marktwain -X POST -H "X-Auth-Token: $token" -H "X-Remove-Container-Meta-Century: x" + + + + + :: + + HTTP/1.1 204 No Content + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx7997e18da2a34a9e84ceb-0052d826d0 + Date: Thu, 16 Jan 2014 18:37:04 GMT + + +If the request succeeds, the operation returns the ``No Content +(204)`` response code. + +To confirm your changes, issue a show container metadata request. + +Error response codes:204, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - container: container + - X-Auth-Token: X-Auth-Token + - X-Container-Read: X-Container-Read + - X-Remove-Container-name: X-Remove-Container-name + - X-Container-Write: X-Container-Write + - X-Container-Sync-To: X-Container-Sync-To + - X-Container-Sync-Key: X-Container-Sync-Key + - X-Versions-Location: X-Versions-Location + - X-Remove-Versions-Location: X-Remove-Versions-Location + - X-Container-Meta-name: X-Container-Meta-name + - X-Container-Meta-Access-Control-Allow-Origin: X-Container-Meta-Access-Control-Allow-Origin + - X-Container-Meta-Access-Control-Max-Age: X-Container-Meta-Access-Control-Max-Age + - X-Container-Meta-Access-Control-Expose-Headers: X-Container-Meta-Access-Control-Expose-Headers + - X-Container-Meta-Quota-Bytes: X-Container-Meta-Quota-Bytes + - X-Container-Meta-Quota-Count: X-Container-Meta-Quota-Count + - X-Container-Meta-Web-Directory-Type: X-Container-Meta-Web-Directory-Type + - Content-Type: Content-Type + - X-Detect-Content-Type: X-Detect-Content-Type + - X-Container-Meta-Temp-URL-Key: X-Container-Meta-Temp-URL-Key + - X-Container-Meta-Temp-URL-Key-2: X-Container-Meta-Temp-URL-Key-2 + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Date: Date + - X-Timestamp: X-Timestamp + - Content-Length: Content-Length + - Content-Type: Content-Type + - X-Trans-Id: X-Trans-Id + + + + + +Show container metadata +======================= + +.. rest_method:: HEAD /v1/{account}/{container} + +Shows container metadata, including the number of objects and the total bytes of all objects stored in the container. + +Show container metadata request: + +:: + + curl -i $publicURL/marktwain -X HEAD -H "X-Auth-Token: $token" + + + + +:: + + HTTP/1.1 204 No Content + Content-Length: 0 + X-Container-Object-Count: 1 + Accept-Ranges: bytes + X-Container-Meta-Book: TomSawyer + X-Timestamp: 1389727543.65372 + X-Container-Meta-Author: SamuelClemens + X-Container-Bytes-Used: 14 + Content-Type: text/plain; charset=utf-8 + X-Trans-Id: tx0287b982a268461b9ec14-0052d826e2 + Date: Thu, 16 Jan 2014 18:37:22 GMT + + +If the request succeeds, the operation returns the ``No Content +(204)`` response code. + +Error response codes:204, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - container: container + - X-Auth-Token: X-Auth-Token + - X-Newest: X-Newest + - X-Container-Meta-Temp-URL-Key: X-Container-Meta-Temp-URL-Key + - X-Container-Meta-Temp-URL-Key-2: X-Container-Meta-Temp-URL-Key-2 + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - X-Container-Sync-Key: X-Container-Sync-Key + - X-Container-Meta-name: X-Container-Meta-name + - Content-Length: Content-Length + - X-Container-Object-Count: X-Container-Object-Count + - X-Container-Write: X-Container-Write + - X-Container-Meta-Quota-Count: X-Container-Meta-Quota-Count + - Accept-Ranges: Accept-Ranges + - X-Container-Read: X-Container-Read + - X-Container-Meta-Access-Control-Expose-Headers: X-Container-Meta-Access-Control-Expose-Headers + - X-Container-Meta-Temp-URL-Key: X-Container-Meta-Temp-URL-Key + - X-Container-Bytes-Used: X-Container-Bytes-Used + - X-Container-Meta-Temp-URL-Key-2: X-Container-Meta-Temp-URL-Key-2 + - X-Timestamp: X-Timestamp + - X-Container-Meta-Access-Control-Allow-Origin: X-Container-Meta-Access-Control-Allow-Origin + - X-Container-Meta-Access-Control-Max-Age: X-Container-Meta-Access-Control-Max-Age + - Date: Date + - X-Trans-Id: X-Trans-Id + - X-Container-Sync-To: X-Container-Sync-To + - Content-Type: Content-Type + - X-Container-Meta-Quota-Bytes: X-Container-Meta-Quota-Bytes + - X-Versions-Location: X-Versions-Location + + + + + +Delete container +================ + +.. rest_method:: DELETE /v1/{account}/{container} + +Deletes an empty container. + +This operation fails unless the container is empty. An empty +container has no objects. + +Delete the ``steven`` container: + +:: + + curl -i $publicURL/steven -X DELETE -H "X-Auth-Token: $token" + + +If the container does not exist, the response is: + +:: + + HTTP/1.1 404 Not Found + Content-Length: 70 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx4d728126b17b43b598bf7-0052d81e34 + Date: Thu, 16 Jan 2014 18:00:20 GMT + + +If the container exists and the deletion succeeds, the response is: + +:: + + HTTP/1.1 204 No Content + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: txf76c375ebece4df19c84c-0052d81f14 + Date: Thu, 16 Jan 2014 18:04:04 GMT + + +If the container exists but is not empty, the response is: + +:: + + HTTP/1.1 409 Conflict + Content-Length: 95 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx7782dc6a97b94a46956b5-0052d81f6b + Date: Thu, 16 Jan 2014 18:05:31 GMT + +

Conflict +

+

There was a conflict when trying to complete your request. +

+ + +Error response codes:404,204,409, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - container: container + - X-Auth-Token: X-Auth-Token + - X-Container-Meta-Temp-URL-Key: X-Container-Meta-Temp-URL-Key + - X-Container-Meta-Temp-URL-Key-2: X-Container-Meta-Temp-URL-Key-2 + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Date: Date + - X-Timestamp: X-Timestamp + - Content-Length: Content-Length + - Content-Type: Content-Type + - X-Trans-Id: X-Trans-Id + + + + + + diff --git a/api-ref/source/storage-object-services.inc b/api-ref/source/storage-object-services.inc new file mode 100644 index 0000000000..56d861158d --- /dev/null +++ b/api-ref/source/storage-object-services.inc @@ -0,0 +1,687 @@ +.. -*- rst -*- + +======= +Objects +======= + +Creates, replaces, shows details for, and deletes objects. Copies +objects from another object with a new or different name. Updates +object metadata. + + +Get object content and metadata +=============================== + +.. rest_method:: GET /v1/{account}/{container}/{object} + +Downloads the object content and gets the object metadata. + +This operation returns the object metadata in the response headers +and the object content in the response body. + +If this is a large object, the response body contains the +concatenated content of the segment objects. To get the manifest +instead of concatenated segment objects for a static large object, +use the ``multipart-manifest`` query parameter. + +Example requests and responses: + +- Show object details for the ``goodbye`` object in the + ``marktwain`` container: + + :: + + curl -i $publicURL/marktwain/goodbye -X GET -H "X-Auth-Token: $token" + + + + + :: + + HTTP/1.1 200 OK + Content-Length: 14 + Accept-Ranges: bytes + Last-Modified: Wed, 15 Jan 2014 16:41:49 GMT + Etag: 451e372e48e0f6b1114fa0724aa79fa1 + X-Timestamp: 1389804109.39027 + X-Object-Meta-Orig-Filename: goodbyeworld.txt + Content-Type: application/octet-stream + X-Trans-Id: tx8145a190241f4cf6b05f5-0052d82a34 + Date: Thu, 16 Jan 2014 18:51:32 GMT + Goodbye World! + + +- Show object details for the ``goodbye`` object, which does not + exist, in the ``janeausten`` container: + + :: + + curl -i $publicURL/janeausten/goodbye -X GET -H "X-Auth-Token: $token" + + + + + :: + + HTTP/1.1 404 Not Found + Content-Length: 70 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx073f7cbb850c4c99934b9-0052d82b04 + Date: Thu, 16 Jan 2014 18:55:00 GMT + +

Not Found +

+

The resource could not be found. +

+ + + +The operation returns the ``Range Not Satisfiable (416)`` response +code for any ranged GET requests that specify more than: + +- Fifty ranges. + +- Three overlapping ranges. + +- Eight non-increasing ranges. + + +Normal response codes: 200 +Error response codes:416,404, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - object: object + - container: container + - X-Auth-Token: X-Auth-Token + - X-Newest: X-Newest + - temp_url_sig: temp_url_sig + - temp_url_expires: temp_url_expires + - filename: filename + - multipart-manifest: multipart-manifest + - Range: Range + - If-Match: If-Match + - If-None-Match: If-None-Match + - If-Modified-Since: If-Modified-Since + - If-Unmodified-Since: If-Unmodified-Since + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Content-Length: Content-Length + - X-Object-Meta-name: X-Object-Meta-name + - Content-Disposition: Content-Disposition + - Content-Encoding: Content-Encoding + - X-Delete-At: X-Delete-At + - Accept-Ranges: Accept-Ranges + - X-Object-Manifest: X-Object-Manifest + - Last-Modified: Last-Modified + - ETag: ETag + - X-Timestamp: X-Timestamp + - X-Trans-Id: X-Trans-Id + - Date: Date + - X-Static-Large-Object: X-Static-Large-Object + - Content-Type: Content-Type + + + +Response Example +---------------- + +See examples above. + + +Create or replace object +======================== + +.. rest_method:: PUT /v1/{account}/{container}/{object} + +Creates an object with data content and metadata, or replaces an existing object with data content and metadata. + +The PUT operation always creates an object. If you use this +operation on an existing object, you replace the existing object +and metadata rather than modifying the object. Consequently, this +operation returns the ``Created (201)`` response code. + +If you use this operation to copy a manifest object, the new object +is a normal object and not a copy of the manifest. Instead it is a +concatenation of all the segment objects. This means that you +cannot copy objects larger than 5 GB. + +Example requests and responses: + +- Create object: + + :: + + curl -i $publicURL/janeausten/helloworld.txt -X PUT -H "Content-Length: 1" -H "Content-Type: text/html; charset=UTF-8" -H "X-Auth-Token: $token" + + + + + :: + + HTTP/1.1 201 Created + Last-Modified: Fri, 17 Jan 2014 17:28:35 GMT + Content-Length: 116 + Etag: d41d8cd98f00b204e9800998ecf8427e + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx4d5e4f06d357462bb732f-0052d96843 + Date: Fri, 17 Jan 2014 17:28:35 GMT + + +- Replace object: + + :: + + curl -i $publicURL/janeausten/helloworld -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" + + + + + :: + + HTTP/1.1 201 Created + Last-Modified: Fri, 17 Jan 2014 17:28:35 GMT + Content-Length: 116 + Etag: d41d8cd98f00b204e9800998ecf8427e + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx4d5e4f06d357462bb732f-0052d96843 + Date: Fri, 17 Jan 2014 17:28:35 GMT + + +The ``Created (201)`` response code indicates a successful write. + +If the request times out, the operation returns the ``Request +Timeout (408)`` response code. + +The ``Length Required (411)`` response code indicates a missing +``Transfer-Encoding`` or ``Content-Length`` request header. + +If the MD5 checksum of the data that is written to the object store +does not match the optional ``ETag`` value, the operation returns +the ``Unprocessable Entity (422)`` response code. + +Error response codes:201,422,411,408, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - object: object + - container: container + - multipart-manifest: multipart-manifest + - temp_url_sig: temp_url_sig + - temp_url_expires: temp_url_expires + - filename: filename + - X-Object-Manifest: X-Object-Manifest + - X-Auth-Token: X-Auth-Token + - Content-Length: Content-Length + - Transfer-Encoding: Transfer-Encoding + - Content-Type: Content-Type + - X-Detect-Content-Type: X-Detect-Content-Type + - X-Copy-From: X-Copy-From + - ETag: ETag + - Content-Disposition: Content-Disposition + - Content-Encoding: Content-Encoding + - X-Delete-At: X-Delete-At + - X-Delete-After: X-Delete-After + - X-Object-Meta-name: X-Object-Meta-name + - If-None-Match: If-None-Match + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Content-Length: Content-Length + - ETag: ETag + - X-Timestamp: X-Timestamp + - X-Trans-Id: X-Trans-Id + - Date: Date + - Content-Type: Content-Type + - last_modified: last_modified + + + + + + + + +Copy object +=========== + +.. rest_method:: COPY /v1/{account}/{container}/{object} + +Copies an object to another object in the object store. + +You can copy an object to a new object with the same name. Copying +to the same name is an alternative to using POST to add metadata to +an object. With POST , you must specify all the metadata. With COPY +, you can add additional metadata to the object. + +With COPY , you can set the ``X-Fresh-Metadata`` header to ``true`` +to copy the object without any existing metadata. + +Alternatively, you can use PUT with the ``X-Copy-From`` request +header to accomplish the same operation as the COPY object +operation. + +The PUT operation always creates an object. If you use this +operation on an existing object, you replace the existing object +and metadata rather than modifying the object. Consequently, this +operation returns the ``Created (201)`` response code. + +If you use this operation to copy a manifest object, the new object +is a normal object and not a copy of the manifest. Instead it is a +concatenation of all the segment objects. This means that you +cannot copy objects larger than 5 GB in size. All metadata is +preserved during the object copy. If you specify metadata on the +request to copy the object, either PUT or COPY , the metadata +overwrites any conflicting keys on the target (new) object. + +Example requests and responses: + +- Copy the ``goodbye`` object from the ``marktwain`` container to + the ``janeausten`` container: + + :: + + curl -i $publicURL/marktwain/goodbye -X COPY -H "X-Auth-Token: $token" -H "Destination: janeausten/goodbye" + + + + + :: + + HTTP/1.1 201 Created + Content-Length: 0 + X-Copied-From-Last-Modified: Thu, 16 Jan 2014 21:19:45 GMT + X-Copied-From: marktwain/goodbye + Last-Modified: Fri, 17 Jan 2014 18:22:57 GMT + Etag: 451e372e48e0f6b1114fa0724aa79fa1 + Content-Type: text/html; charset=UTF-8 + X-Object-Meta-Movie: AmericanPie + X-Trans-Id: txdcb481ad49d24e9a81107-0052d97501 + Date: Fri, 17 Jan 2014 18:22:57 GMT + + +- Alternatively, you can use PUT to copy the ``goodbye`` object from + the ``marktwain`` container to the ``janeausten`` container. This + request requires a ``Content-Length`` header, even if it is set + to zero (0). + + :: + + curl -i $publicURL/janeausten/goodbye -X PUT -H "X-Auth-Token: $token" -H "X-Copy-From: /marktwain/goodbye" -H "Content-Length: 0" + + + + + :: + + HTTP/1.1 201 Created + Content-Length: 0 + X-Copied-From-Last-Modified: Thu, 16 Jan 2014 21:19:45 GMT + X-Copied-From: marktwain/goodbye + Last-Modified: Fri, 17 Jan 2014 18:22:57 GMT + Etag: 451e372e48e0f6b1114fa0724aa79fa1 + Content-Type: text/html; charset=UTF-8 + X-Object-Meta-Movie: AmericanPie + X-Trans-Id: txdcb481ad49d24e9a81107-0052d97501 + Date: Fri, 17 Jan 2014 18:22:57 GMT + + +When several replicas exist, the system copies from the most recent +replica. That is, the COPY operation behaves as though the +``X-Newest`` header is in the request. + +Error response codes:201, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - object: object + - container: container + - X-Auth-Token: X-Auth-Token + - Destination: Destination + - Content-Type: Content-Type + - Content-Encoding: Content-Encoding + - Content-Disposition: Content-Disposition + - X-Object-Meta-name: X-Object-Meta-name + - X-Fresh-Metadata: X-Fresh-Metadata + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Content-Length: Content-Length + - X-Object-Meta-name: X-Object-Meta-name + - X-Copied-From-Last-Modified: X-Copied-From-Last-Modified + - X-Copied-From: X-Copied-From + - Last-Modified: Last-Modified + - ETag: ETag + - X-Timestamp: X-Timestamp + - X-Trans-Id: X-Trans-Id + - Date: Date + - Content-Type: Content-Type + + + + + +Delete object +============= + +.. rest_method:: DELETE /v1/{account}/{container}/{object} + +Permanently deletes an object from the object store. + +You can use the COPY method to copy the object to a new location. +Then, use the DELETE method to delete the original object. + +Object deletion occurs immediately at request time. Any subsequent +GET , HEAD , POST , or DELETE operations return a ``404 Not Found`` +error code. + +For static large object manifests, you can add the ``?multipart- +manifest=delete`` query parameter. This operation deletes the +segment objects and if all deletions succeed, this operation +deletes the manifest object. + +Example request and response: + +- Delete the ``helloworld`` object from the ``marktwain`` container: + + :: + + curl -i $publicURL/marktwain/helloworld -X DELETE -H "X-Auth-Token: $token" + + + + + :: + + HTTP/1.1 204 No Content + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx36c7606fcd1843f59167c-0052d6fdac + Date: Wed, 15 Jan 2014 21:29:16 GMT + + +Typically, the DELETE operation does not return a response body. +However, with the ``multipart-manifest=delete`` query parameter, +the response body contains a list of manifest and segment objects +and the status of their DELETE operations. + +Error response codes:204, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - object: object + - container: container + - multipart-manifest: multipart-manifest + - X-Auth-Token: X-Auth-Token + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Date: Date + - X-Timestamp: X-Timestamp + - Content-Length: Content-Length + - Content-Type: Content-Type + - X-Trans-Id: X-Trans-Id + + + + + +Show object metadata +==================== + +.. rest_method:: HEAD /v1/{account}/{container}/{object} + +Shows object metadata. + +If the ``Content-Length`` response header is non-zero, the example +cURL command stalls after it prints the response headers because it +is waiting for a response body. However, the Object Storage system +does not return a response body for the HEAD operation. + +Example requests and responses: + +- Show object metadata: + + :: + + curl -i $publicURL/marktwain/goodbye -X HEAD -H "X-Auth-Token: $token" + + + + + :: + + HTTP/1.1 200 OK + Content-Length: 14 + Accept-Ranges: bytes + Last-Modified: Thu, 16 Jan 2014 21:12:31 GMT + Etag: 451e372e48e0f6b1114fa0724aa79fa1 + X-Timestamp: 1389906751.73463 + X-Object-Meta-Book: GoodbyeColumbus + Content-Type: application/octet-stream + X-Trans-Id: tx37ea34dcd1ed48ca9bc7d-0052d84b6f + Date: Thu, 16 Jan 2014 21:13:19 GMT + + +If the request succeeds, the operation returns the ``200`` response +code. + + +Normal response codes: 200 +Error response codes:204, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - object: object + - container: container + - X-Auth-Token: X-Auth-Token + - temp_url_sig: temp_url_sig + - temp_url_expires: temp_url_expires + - filename: filename + - X-Newest: X-Newest + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Last-Modified: Last-Modified + - Content-Length: Content-Length + - X-Object-Meta-name: X-Object-Meta-name + - Content-Disposition: Content-Disposition + - Content-Encoding: Content-Encoding + - X-Delete-At: X-Delete-At + - X-Object-Manifest: X-Object-Manifest + - Last-Modified: Last-Modified + - ETag: ETag + - X-Timestamp: X-Timestamp + - X-Trans-Id: X-Trans-Id + - Date: Date + - X-Static-Large-Object: X-Static-Large-Object + - Content-Type: Content-Type + + + +Response Example +---------------- + +See examples above. + + + +Create or update object metadata +================================ + +.. rest_method:: POST /v1/{account}/{container}/{object} + +Creates or updates object metadata. + +To create or update custom metadata, use the ``X-Object- +Meta-{name}`` header, where ``{name}`` is the name of the metadata +item. + +In addition to the custom metadata, you can update the ``Content- +Type``, ``Content-Encoding``, ``Content-Disposition``, and ``X +-Delete-At`` system metadata items. However you cannot update other +system metadata, such as ``Content-Length`` or ``Last-Modified``. + +You can use COPY as an alternate to the POST operation by copying +to the same object. With the POST operation you must specify all +metadata items, whereas with the COPY operation, you need to +specify only changed or additional items. + +All metadata is preserved during the object copy. If you specify +metadata on the request to copy the object, either PUT or COPY , +the metadata overwrites any conflicting keys on the target (new) +object. + +A POST request deletes any existing custom metadata that you added +with a previous PUT or POST request. Consequently, you must specify +all custom metadata in the request. However, system metadata is +unchanged by the POST request unless you explicitly supply it in a +request header. + +You can also set the ``X-Delete-At`` or ``X-Delete-After`` header +to define when to expire the object. + +When used as described in this section, the POST operation creates +or replaces metadata. This form of the operation has no request +body. + +You can also use the `form POST feature +`_ to upload objects. + +Example requests and responses: + +- Create object metadata: + + :: + + curl -i $publicURL/marktwain/goodbye -X POST -H "X-Auth-Token: $token" -H "X-Object-Meta-Book: GoodbyeColumbus" + + + + + :: + + HTTP/1.1 202 Accepted + Content-Length: 76 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: txb5fb5c91ba1f4f37bb648-0052d84b3f + Date: Thu, 16 Jan 2014 21:12:31 GMT + +

Accepted +

+

The request is accepted for processing. +

+ + + +- Update object metadata: + + :: + + curl -i $publicURL/marktwain/goodbye -X POST -H "X-Auth-Token: $token" H "X-Object-Meta-Book: GoodbyeOldFriend" + + + + + :: + + HTTP/1.1 202 Accepted + Content-Length: 76 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx5ec7ab81cdb34ced887c8-0052d84ca4 + Date: Thu, 16 Jan 2014 21:18:28 GMT + +

Accepted +

+

The request is accepted for processing. +

+ + +Error response codes:202, + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - account: account + - object: object + - container: container + - X-Auth-Token: X-Auth-Token + - X-Object-Meta-name: X-Object-Meta-name + - X-Delete-At: X-Delete-At + - Content-Disposition: Content-Disposition + - Content-Encoding: Content-Encoding + - X-Delete-After: X-Delete-After + - Content-Type: Content-Type + - X-Detect-Content-Type: X-Detect-Content-Type + - X-Trans-Id-Extra: X-Trans-Id-Extra + + +Response Parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - Date: Date + - X-Timestamp: X-Timestamp + - Content-Length: Content-Length + - Content-Type: Content-Type + - X-Trans-Id: X-Trans-Id + + + + diff --git a/api-ref/source/storage_endpoints.inc b/api-ref/source/storage_endpoints.inc new file mode 100644 index 0000000000..41845425d4 --- /dev/null +++ b/api-ref/source/storage_endpoints.inc @@ -0,0 +1,37 @@ +.. -*- rst -*- + +========= +Endpoints +========= + +If configured, lists endpoints for an account. + + +List endpoints +============== + +.. rest_method:: GET /v1/endpoints + +Lists endpoints for an object, account, or container. + +When the cloud provider enables middleware to list the +``/endpoints/`` path, software that needs data location information +can use this call to avoid network overhead. The cloud provider can +map the ``/endpoints/`` path to another resource, so this exact +resource might vary from provider to provider. Because it goes +straight to the middleware, the call is not authenticated, so be +sure you have tightly secured the environment and network when +using this call. + +Error response codes:201, + + +Request +------- + +This operation does not accept a request body. + + + + + diff --git a/api-ref/source/storage_info.inc b/api-ref/source/storage_info.inc new file mode 100644 index 0000000000..60b4082f7c --- /dev/null +++ b/api-ref/source/storage_info.inc @@ -0,0 +1,41 @@ +.. -*- rst -*- + +=============== +Discoverability +=============== + +If configured, lists the activated capabilities for this version of +the OpenStack Object Storage API. + + +List activated capabilities +=========================== + +.. rest_method:: GET /info + +Lists the activated capabilities for this version of the OpenStack Object Storage API. + + +Normal response codes: 200 +Error response codes: + + +Request +------- + +.. rest_parameters:: parameters.yaml + + - swiftinfo_sig: swiftinfo_sig + - swiftinfo_expires: swiftinfo_expires + + + + +Response Example +---------------- + +.. literalinclude:: samples/capabilities-list-response.json + :language: javascript + + + diff --git a/test-requirements.txt b/test-requirements.txt index 3525d03615..6f237978d3 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,6 +10,7 @@ nosexcover nosehtmloutput oslosphinx sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 # BSD +os-api-ref>=0.1.0 # Apache-2.0 os-testr>=0.4.1 mock>=1.0 python-swiftclient diff --git a/tox.ini b/tox.ini index 1e79f67b88..5becdf2a9e 100644 --- a/tox.ini +++ b/tox.ini @@ -59,6 +59,22 @@ commands = {posargs} [testenv:docs] commands = python setup.py build_sphinx +[testenv:api-ref] +# This environment is called from CI scripts to test and publish +# the API Ref to developer.openstack.org. +# NOTE(sdague): this target does not use constraints because +# upstream infra does not yet support it. Once that's fixed, we can +# drop the install_command. +# +# we do not use -W here because we are doing some slightly tricky +# things to build a single page document, and as such, we are ok +# ignoring the duplicate stanzas warning. +install_command = pip install -U --force-reinstall {opts} {packages} +commands = + rm -rf api-ref/build + sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html + + [testenv:bandit] deps = -r{toxinidir}/test-requirements.txt commands = bandit -c bandit.yaml -r swift bin -n 5 -p gate From 03b762e80a9b3d33ce13b8222f4cd2b549171c51 Mon Sep 17 00:00:00 2001 From: Janie Richling Date: Mon, 6 Jun 2016 17:19:48 +0100 Subject: [PATCH 018/156] Support for http footers - Replication and EC Before this patch, the proxy ObjectController supported sending metadata from the proxy server to object servers in "footers" that trail the body of HTTP PUT requests, but this support was for EC policies only. The encryption feature requires that footers are sent with both EC and replicated policy requests in order to persist encryption specific sysmeta, and to override container update headers with an encrypted Etag value. This patch: - Moves most of the functionality of ECPutter into a generic Putter class that is used for replicated object PUTs without footers. - Creates a MIMEPutter subclass to support multipart and multiphase behaviour required for any replicated object PUT with footers and all EC PUTs. - Modifies ReplicatedObjectController to use Putter objects in place of raw connection objects. - Refactors the _get_put_connections method and _put_connect_node methods so that more code is in the BaseObjectController class and therefore shared by [EC|Replicated]ObjectController classes. - Adds support to call a callback that middleware may have placed in the environ, so the callback can set footers. The x-object-sysmeta-ec- namespace is reserved and any footer values set by middleware in that namespace will not be forwarded to object servers. In addition this patch enables more than one value to be added to the X-Backend-Etag-Is-At header. This header is used to point to an (optional) alternative sysmeta header whose value should be used when evaluating conditional requests with If-[None-]Match headers. This is already used with EC policies when the ECObjectController has calculated the actual body Etag and sent it using a footer (X-Object-Sysmeta-EC-Etag). X-Backend-Etag-Is-At is in that case set to X-Object-Sysmeta-Ec-Etag so as to point to the actual body Etag value rather than the EC fragment Etag. Encryption will also need to add a pointer to an encrypted Etag value. However, the referenced sysmeta may not exist, for example if the object was created before encryption was enabled. The X-Backend-Etag-Is-At value is therefore changed to support a list of possible locations for alternate Etag values. Encryption will place its expected alternative Etag location on this list, as will the ECObjectController, and the object server will look for the first object metadata to match an entry on the list when matching conditional requests. That way, if the object was not encrypted then the object server will fall through to using the EC Etag value, or in the case of a replicated policy will fall through to using the normal Etag metadata. If your proxy has a third-party middleware that uses X-Backend-Etag-Is-At and it upgrades before an object server it's talking to then conditional requests may be broken. UpgradeImpact Co-Authored-By: Alistair Coles Co-Authored-By: Thiago da Silva Co-Authored-By: Samuel Merritt Co-Authored-By: Kota Tsuyuzaki Closes-Bug: #1594739 Change-Id: I12a6e41150f90de746ce03623032b83ed1987ee1 --- swift/common/request_helpers.py | 66 +- swift/common/swob.py | 4 +- swift/obj/server.py | 12 +- swift/proxy/controllers/obj.py | 945 ++++++++++++----------- test/unit/__init__.py | 15 +- test/unit/common/middleware/helpers.py | 25 +- test/unit/common/test_request_helpers.py | 74 +- test/unit/obj/test_server.py | 34 + test/unit/proxy/controllers/test_obj.py | 480 +++++++++++- test/unit/proxy/test_server.py | 4 +- 10 files changed, 1165 insertions(+), 494 deletions(-) diff --git a/swift/common/request_helpers.py b/swift/common/request_helpers.py index 07e34d8b46..71a32106af 100644 --- a/swift/common/request_helpers.py +++ b/swift/common/request_helpers.py @@ -27,6 +27,7 @@ import time import six from six.moves.urllib.parse import unquote +from swift.common.header_key_dict import HeaderKeyDict from swift import gettext_ as _ from swift.common.storage_policy import POLICIES @@ -38,7 +39,7 @@ from swift.common.swob import HTTPBadRequest, HTTPNotAcceptable, \ from swift.common.utils import split_path, validate_device_partition, \ close_if_possible, maybe_multipart_byteranges_to_document_iters, \ multipart_byteranges_to_document_iters, parse_content_type, \ - parse_content_range + parse_content_range, csv_append, list_from_csv from swift.common.wsgi import make_subrequest @@ -544,3 +545,66 @@ def http_response_to_document_iters(response, read_chunk_size=4096): params = dict(params_list) return multipart_byteranges_to_document_iters( response, params['boundary'], read_chunk_size) + + +def update_etag_is_at_header(req, name): + """ + Helper function to update an X-Backend-Etag-Is-At header whose value is a + list of alternative header names at which the actual object etag may be + found. This informs the object server where to look for the actual object + etag when processing conditional requests. + + Since the proxy server and/or middleware may set alternative etag header + names, the value of X-Backend-Etag-Is-At is a comma separated list which + the object server inspects in order until it finds an etag value. + + :param req: a swob Request + :param name: name of a sysmeta where alternative etag may be found + """ + if ',' in name: + # HTTP header names should not have commas but we'll check anyway + raise ValueError('Header name must not contain commas') + existing = req.headers.get("X-Backend-Etag-Is-At") + req.headers["X-Backend-Etag-Is-At"] = csv_append( + existing, name) + + +def resolve_etag_is_at_header(req, metadata): + """ + Helper function to resolve an alternative etag value that may be stored in + metadata under an alternate name. + + The value of the request's X-Backend-Etag-Is-At header (if it exists) is a + comma separated list of alternate names in the metadata at which an + alternate etag value may be found. This list is processed in order until an + alternate etag is found. + + The left most value in X-Backend-Etag-Is-At will have been set by the left + most middleware, or if no middleware, by ECObjectController, if an EC + policy is in use. The left most middleware is assumed to be the authority + on what the etag value of the object content is. + + The resolver will work from left to right in the list until it finds a + value that is a name in the given metadata. So the left most wins, IF it + exists in the metadata. + + By way of example, assume the encrypter middleware is installed. If an + object is *not* encrypted then the resolver will not find the encrypter + middleware's alternate etag sysmeta (X-Object-Sysmeta-Crypto-Etag) but will + then find the EC alternate etag (if EC policy). But if the object *is* + encrypted then X-Object-Sysmeta-Crypto-Etag is found and used, which is + correct because it should be preferred over X-Object-Sysmeta-Crypto-Etag. + + :param req: a swob Request + :param metadata: a dict containing object metadata + :return: an alternate etag value if any is found, otherwise None + """ + alternate_etag = None + metadata = HeaderKeyDict(metadata) + if "X-Backend-Etag-Is-At" in req.headers: + names = list_from_csv(req.headers["X-Backend-Etag-Is-At"]) + for name in names: + if name in metadata: + alternate_etag = metadata[name] + break + return alternate_etag diff --git a/swift/common/swob.py b/swift/common/swob.py index 2ba5d5e6a4..aa11ec01f2 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -1140,8 +1140,8 @@ class Response(object): conditional requests. It's most effectively used with X-Backend-Etag-Is-At which would - define the additional Metadata key where the original ETag of the - clear-form client request data. + define the additional Metadata key(s) where the original ETag of the + clear-form client request data may be found. """ if self._conditional_etag is not None: return self._conditional_etag diff --git a/swift/obj/server.py b/swift/obj/server.py index c3fde72525..99083800eb 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -46,7 +46,7 @@ from swift.common.http import is_success from swift.common.base_storage_server import BaseStorageServer from swift.common.header_key_dict import HeaderKeyDict from swift.common.request_helpers import get_name_and_placement, \ - is_user_meta, is_sys_or_user_meta + is_user_meta, is_sys_or_user_meta, resolve_etag_is_at_header from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \ HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \ HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \ @@ -832,10 +832,7 @@ class ObjectController(BaseStorageServer): keep_cache = (self.keep_cache_private or ('X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers)) - conditional_etag = None - if 'X-Backend-Etag-Is-At' in request.headers: - conditional_etag = metadata.get( - request.headers['X-Backend-Etag-Is-At']) + conditional_etag = resolve_etag_is_at_header(request, metadata) response = Response( app_iter=disk_file.reader(keep_cache=keep_cache), request=request, conditional_response=True, @@ -889,10 +886,7 @@ class ObjectController(BaseStorageServer): headers['X-Backend-Timestamp'] = e.timestamp.internal return HTTPNotFound(request=request, headers=headers, conditional_response=True) - conditional_etag = None - if 'X-Backend-Etag-Is-At' in request.headers: - conditional_etag = metadata.get( - request.headers['X-Backend-Etag-Is-At']) + conditional_etag = resolve_etag_is_at_header(request, metadata) response = Response(request=request, conditional_response=True, conditional_etag=conditional_etag) response.headers['Content-Type'] = metadata.get( diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index 6f8559063a..af6b9368d7 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -71,6 +71,8 @@ from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \ HTTPServerError, HTTPServiceUnavailable, HTTPClientDisconnect, \ HTTPUnprocessableEntity, Response, HTTPException, \ HTTPRequestedRangeNotSatisfiable, Range, HTTPInternalServerError +from swift.common.request_helpers import update_etag_is_at_header, \ + resolve_etag_is_at_header def check_content_type(req): @@ -289,71 +291,111 @@ class BaseObjectController(Controller): return headers - def _await_response(self, conn, **kwargs): - with Timeout(self.app.node_timeout): - if conn.resp: - return conn.resp - else: - return conn.getresponse() - - def _get_conn_response(self, conn, req, logger_thread_locals, **kwargs): + def _get_conn_response(self, putter, path, logger_thread_locals, + final_phase, **kwargs): self.app.logger.thread_locals = logger_thread_locals try: - resp = self._await_response(conn, **kwargs) - return (conn, resp) + resp = putter.await_response( + self.app.node_timeout, not final_phase) except (Exception, Timeout): + resp = None + if final_phase: + status_type = 'final' + else: + status_type = 'commit' self.app.exception_occurred( - conn.node, _('Object'), - _('Trying to get final status of PUT to %s') % req.path) - return (None, None) + putter.node, _('Object'), + _('Trying to get %(status_type)s status of PUT to %(path)s') % + {'status_type': status_type, 'path': path}) + return (putter, resp) - def _get_put_responses(self, req, conns, nodes, **kwargs): + def _have_adequate_put_responses(self, statuses, num_nodes, min_responses): """ - Collect replicated object responses. + Test for sufficient PUT responses from backend nodes to proceed with + PUT handling. + + :param statuses: a list of response statuses. + :param num_nodes: number of backend nodes to which PUT requests may be + issued. + :param min_responses: (optional) minimum number of nodes required to + have responded with satisfactory status code. + :return: True if sufficient backend responses have returned a + satisfactory status code. + """ + raise NotImplementedError + + def _get_put_responses(self, req, putters, num_nodes, final_phase=True, + min_responses=None): + """ + Collect object responses to a PUT request and determine if a + satisfactory number of nodes have returned success. Returns + lists of accumulated status codes, reasons, bodies and etags. + + :param req: the request + :param putters: list of putters for the request + :param num_nodes: number of nodes involved + :param final_phase: boolean indicating if this is the last phase + :param min_responses: minimum needed when not requiring quorum + :return: a tuple of lists of status codes, reasons, bodies and etags. + The list of bodies and etags is only populated for the final + phase of a PUT transaction. """ statuses = [] reasons = [] bodies = [] etags = set() - pile = GreenAsyncPile(len(conns)) - for conn in conns: - pile.spawn(self._get_conn_response, conn, - req, self.app.logger.thread_locals) + pile = GreenAsyncPile(len(putters)) + for putter in putters: + if putter.failed: + continue + pile.spawn(self._get_conn_response, putter, req, + self.app.logger.thread_locals, final_phase=final_phase) - def _handle_response(conn, response): + def _handle_response(putter, response): statuses.append(response.status) reasons.append(response.reason) - bodies.append(response.read()) + if final_phase: + body = response.read() + else: + body = '' + bodies.append(body) if response.status == HTTP_INSUFFICIENT_STORAGE: - self.app.error_limit(conn.node, + putter.failed = True + self.app.error_limit(putter.node, _('ERROR Insufficient Storage')) elif response.status >= HTTP_INTERNAL_SERVER_ERROR: + putter.failed = True self.app.error_occurred( - conn.node, + putter.node, _('ERROR %(status)d %(body)s From Object Server ' 're: %(path)s') % {'status': response.status, - 'body': bodies[-1][:1024], 'path': req.path}) + 'body': body[:1024], 'path': req.path}) elif is_success(response.status): etags.add(response.getheader('etag').strip('"')) - for (conn, response) in pile: + for (putter, response) in pile: if response: - _handle_response(conn, response) - if self.have_quorum(statuses, len(nodes)): + _handle_response(putter, response) + if self._have_adequate_put_responses( + statuses, num_nodes, min_responses): break + else: + putter.failed = True # give any pending requests *some* chance to finish finished_quickly = pile.waitall(self.app.post_quorum_timeout) - for (conn, response) in finished_quickly: + for (putter, response) in finished_quickly: if response: - _handle_response(conn, response) + _handle_response(putter, response) + + if final_phase: + while len(statuses) < num_nodes: + statuses.append(HTTP_SERVICE_UNAVAILABLE) + reasons.append('') + bodies.append('') - while len(statuses) < len(nodes): - statuses.append(HTTP_SERVICE_UNAVAILABLE) - reasons.append('') - bodies.append('') return statuses, reasons, bodies, etags def _config_obj_expiration(self, req): @@ -406,12 +448,17 @@ class BaseObjectController(Controller): req.headers['X-Timestamp'] = Timestamp(time.time()).internal return None - def _check_failure_put_connections(self, conns, req, nodes, min_conns): + def _check_failure_put_connections(self, putters, req, min_conns): """ Identify any failed connections and check minimum connection count. + + :param putters: a list of Putter instances + :param req: request + :param min_conns: minimum number of putter connections required """ if req.if_none_match is not None and '*' in req.if_none_match: - statuses = [conn.resp.status for conn in conns if conn.resp] + statuses = [ + putter.resp.status for putter in putters if putter.resp] if HTTP_PRECONDITION_FAILED in statuses: # If we find any copy of the file, it shouldn't be uploaded self.app.logger.debug( @@ -419,14 +466,14 @@ class BaseObjectController(Controller): {'statuses': statuses}) raise HTTPPreconditionFailed(request=req) - if any(conn for conn in conns if conn.resp and - conn.resp.status == HTTP_CONFLICT): + if any(putter for putter in putters if putter.resp and + putter.resp.status == HTTP_CONFLICT): status_times = ['%(status)s (%(timestamp)s)' % { - 'status': conn.resp.status, + 'status': putter.resp.status, 'timestamp': HeaderKeyDict( - conn.resp.getheaders()).get( + putter.resp.getheaders()).get( 'X-Backend-Timestamp', 'unknown') - } for conn in conns if conn.resp] + } for putter in putters if putter.resp] self.app.logger.debug( _('Object PUT returning 202 for 409: ' '%(req_timestamp)s <= %(timestamps)r'), @@ -434,32 +481,61 @@ class BaseObjectController(Controller): 'timestamps': ', '.join(status_times)}) raise HTTPAccepted(request=req) - self._check_min_conn(req, conns, min_conns) + self._check_min_conn(req, putters, min_conns) - def _connect_put_node(self, nodes, part, path, headers, + def _make_putter(self, node, part, req, headers): + """ + Returns a putter object for handling streaming of object to object + servers. + + Subclasses must implement this method. + + :param node: a storage node + :param part: ring partition number + :param req: a swob Request + :param headers: request headers + :return: an instance of a Putter + """ + raise NotImplementedError + + def _connect_put_node(self, nodes, part, req, headers, logger_thread_locals): """ Make connection to storage nodes - Connects to the first working node that it finds in nodes iter - and sends over the request headers. Returns an HTTPConnection - object to handle the rest of the streaming. - - This method must be implemented by each policy ObjectController. + Connects to the first working node that it finds in nodes iter and + sends over the request headers. Returns a Putter to handle the rest of + the streaming, or None if no working nodes were found. :param nodes: an iterator of the target storage nodes - :param partition: ring partition number - :param path: the object path to send to the storage node + :param part: ring partition number + :param req: a swob Request :param headers: request headers :param logger_thread_locals: The thread local values to be set on the self.app.logger to retain transaction logging information. - :return: HTTPConnection object + :return: an instance of a Putter """ - raise NotImplementedError() + self.app.logger.thread_locals = logger_thread_locals + for node in nodes: + try: + putter = self._make_putter(node, part, req, headers) + self.app.set_node_timing(node, putter.connect_duration) + return putter + except InsufficientStorage: + self.app.error_limit(node, _('ERROR Insufficient Storage')) + except PutterConnectError as e: + self.app.error_occurred( + node, _('ERROR %(status)d Expect: 100-continue ' + 'From Object Server') % { + 'status': e.status}) + except (Exception, Timeout): + self.app.exception_occurred( + node, _('Object'), + _('Expect: 100-continue on %s') % req.swift_entity_path) def _get_put_connections(self, req, nodes, partition, outgoing_headers, - policy, expect): + policy): """ Establish connections to storage nodes for PUT request """ @@ -469,25 +545,32 @@ class BaseObjectController(Controller): pile = GreenPile(len(nodes)) for nheaders in outgoing_headers: - if expect: + # RFC2616:8.2.3 disallows 100-continue without a body + if (req.content_length > 0) or req.is_chunked: nheaders['Expect'] = '100-continue' pile.spawn(self._connect_put_node, node_iter, partition, - req.swift_entity_path, nheaders, - self.app.logger.thread_locals) + req, nheaders, self.app.logger.thread_locals) - conns = [conn for conn in pile if conn] + putters = [putter for putter in pile if putter] - return conns + return putters - def _check_min_conn(self, req, conns, min_conns, msg=None): - msg = msg or 'Object PUT returning 503, %(conns)s/%(nodes)s ' \ - 'required connections' + def _check_min_conn(self, req, putters, min_conns, msg=None): + msg = msg or _('Object PUT returning 503, %(conns)s/%(nodes)s ' + 'required connections') - if len(conns) < min_conns: + if len(putters) < min_conns: self.app.logger.error((msg), - {'conns': len(conns), 'nodes': min_conns}) + {'conns': len(putters), 'nodes': min_conns}) raise HTTPServiceUnavailable(request=req) + def _get_footers(self, req): + footers = HeaderKeyDict() + footer_callback = req.environ.get( + 'swift.callback.update_footers', lambda _footer: None) + footer_callback(footers) + return footers + def _store_object(self, req, data_source, nodes, partition, outgoing_headers): """ @@ -659,115 +742,81 @@ class ReplicatedObjectController(BaseObjectController): req.swift_entity_path, concurrency) return resp - def _connect_put_node(self, nodes, part, path, headers, - logger_thread_locals): - """ - Make a connection for a replicated object. + def _make_putter(self, node, part, req, headers): + if req.environ.get('swift.callback.update_footers'): + putter = MIMEPutter.connect( + node, part, req.swift_entity_path, headers, + conn_timeout=self.app.conn_timeout, + node_timeout=self.app.node_timeout, + logger=self.app.logger, + need_multiphase=False) + else: + putter = Putter.connect( + node, part, req.swift_entity_path, headers, + conn_timeout=self.app.conn_timeout, + node_timeout=self.app.node_timeout, + logger=self.app.logger, + chunked=req.is_chunked) + return putter - Connects to the first working node that it finds in node_iter - and sends over the request headers. Returns an HTTPConnection - object to handle the rest of the streaming. - """ - self.app.logger.thread_locals = logger_thread_locals - for node in nodes: - try: - start_time = time.time() - with ConnectionTimeout(self.app.conn_timeout): - conn = http_connect( - node['ip'], node['port'], node['device'], part, 'PUT', - path, headers) - self.app.set_node_timing(node, time.time() - start_time) - with Timeout(self.app.node_timeout): - resp = conn.getexpect() - if resp.status == HTTP_CONTINUE: - conn.resp = None - conn.node = node - return conn - elif (is_success(resp.status) - or resp.status in (HTTP_CONFLICT, - HTTP_UNPROCESSABLE_ENTITY)): - conn.resp = resp - conn.node = node - return conn - elif headers['If-None-Match'] is not None and \ - resp.status == HTTP_PRECONDITION_FAILED: - conn.resp = resp - conn.node = node - return conn - elif resp.status == HTTP_INSUFFICIENT_STORAGE: - self.app.error_limit(node, _('ERROR Insufficient Storage')) - elif is_server_error(resp.status): - self.app.error_occurred( - node, - _('ERROR %(status)d Expect: 100-continue ' - 'From Object Server') % { - 'status': resp.status}) - except (Exception, Timeout): - self.app.exception_occurred( - node, _('Object'), - _('Expect: 100-continue on %s') % path) - - def _send_file(self, conn, path): - """Method for a file PUT coro""" - while True: - chunk = conn.queue.get() - if not conn.failed: - try: - with ChunkWriteTimeout(self.app.node_timeout): - conn.send(chunk) - except (Exception, ChunkWriteTimeout): - conn.failed = True - self.app.exception_occurred( - conn.node, _('Object'), - _('Trying to write to %s') % path) - conn.queue.task_done() - - def _transfer_data(self, req, data_source, conns, nodes): + def _transfer_data(self, req, data_source, putters, nodes): """ Transfer data for a replicated object. This method was added in the PUT method extraction change """ - min_conns = quorum_size(len(nodes)) bytes_transferred = 0 + + def send_chunk(chunk): + for putter in list(putters): + if not putter.failed: + putter.send_chunk(chunk) + else: + putter.close() + putters.remove(putter) + self._check_min_conn( + req, putters, min_conns, + msg=_('Object PUT exceptions during send, ' + '%(conns)s/%(nodes)s required connections')) + + min_conns = quorum_size(len(nodes)) try: with ContextPool(len(nodes)) as pool: - for conn in conns: - conn.failed = False - conn.queue = Queue(self.app.put_queue_depth) - pool.spawn(self._send_file, conn, req.path) + for putter in putters: + putter.spawn_sender_greenthread( + pool, self.app.put_queue_depth, self.app.node_timeout, + self.app.exception_occurred) while True: with ChunkReadTimeout(self.app.client_timeout): try: chunk = next(data_source) except StopIteration: - if req.is_chunked: - for conn in conns: - conn.queue.put('0\r\n\r\n') break bytes_transferred += len(chunk) if bytes_transferred > constraints.MAX_FILE_SIZE: raise HTTPRequestEntityTooLarge(request=req) - for conn in list(conns): - if not conn.failed: - conn.queue.put( - '%x\r\n%s\r\n' % (len(chunk), chunk) - if req.is_chunked else chunk) - else: - conn.close() - conns.remove(conn) - self._check_min_conn( - req, conns, min_conns, - msg='Object PUT exceptions during' - ' send, %(conns)s/%(nodes)s required connections') - for conn in conns: - if conn.queue.unfinished_tasks: - conn.queue.join() - conns = [conn for conn in conns if not conn.failed] - self._check_min_conn( - req, conns, min_conns, - msg='Object PUT exceptions after last send, ' - '%(conns)s/%(nodes)s required connections') + + send_chunk(chunk) + + if req.content_length and ( + bytes_transferred < req.content_length): + req.client_disconnect = True + self.app.logger.warning( + _('Client disconnected without sending enough data')) + self.app.logger.increment('client_disconnects') + raise HTTPClientDisconnect(request=req) + + trail_md = self._get_footers(req) + for putter in putters: + # send any footers set by middleware + putter.end_of_object_data(footer_metadata=trail_md) + + for putter in putters: + putter.wait() + self._check_min_conn( + req, [p for p in putters if not p.failed], min_conns, + msg=_('Object PUT exceptions after last send, ' + '%(conns)s/%(nodes)s required connections')) except ChunkReadTimeout as err: self.app.logger.warning( _('ERROR Client read timeout (%ss)'), err.seconds) @@ -790,12 +839,9 @@ class ReplicatedObjectController(BaseObjectController): _('ERROR Exception transferring data to object servers %s'), {'path': req.path}) raise HTTPInternalServerError(request=req) - if req.content_length and bytes_transferred < req.content_length: - req.client_disconnect = True - self.app.logger.warning( - _('Client disconnected without sending enough data')) - self.app.logger.increment('client_disconnects') - raise HTTPClientDisconnect(request=req) + + def _have_adequate_put_responses(self, statuses, num_nodes, min_responses): + return self.have_quorum(statuses, num_nodes) def _store_object(self, req, data_source, nodes, partition, outgoing_headers): @@ -812,30 +858,25 @@ class ReplicatedObjectController(BaseObjectController): if not nodes: return HTTPNotFound() - # RFC2616:8.2.3 disallows 100-continue without a body - if (req.content_length > 0) or req.is_chunked: - expect = True - else: - expect = False - conns = self._get_put_connections(req, nodes, partition, - outgoing_headers, policy, expect) + putters = self._get_put_connections( + req, nodes, partition, outgoing_headers, policy) min_conns = quorum_size(len(nodes)) try: # check that a minimum number of connections were established and # meet all the correct conditions set in the request - self._check_failure_put_connections(conns, req, nodes, min_conns) + self._check_failure_put_connections(putters, req, min_conns) # transfer data - self._transfer_data(req, data_source, conns, nodes) + self._transfer_data(req, data_source, putters, nodes) # get responses - statuses, reasons, bodies, etags = self._get_put_responses( - req, conns, nodes) + statuses, reasons, bodies, etags = \ + self._get_put_responses(req, putters, len(nodes)) except HTTPException as resp: return resp finally: - for conn in conns: - conn.close() + for putter in putters: + putter.close() if len(etags) > 1: self.app.logger.error( @@ -1380,33 +1421,38 @@ DATA_ACKED = 4 COMMIT_SENT = 5 -class ECPutter(object): +class Putter(object): """ - This is here mostly to wrap up the fact that all EC PUTs are - chunked because of the mime boundary footer trick and the first - half of the two-phase PUT conversation handling. + Putter for backend PUT requests. - An HTTP PUT request that supports streaming. + Encapsulates all the actions required to establish a connection with a + storage node and stream data to that node. - Probably deserves more docs than this, but meh. + :param conn: an HTTPConnection instance + :param node: dict describing storage node + :param resp: an HTTPResponse instance if connect() received final response + :param path: the object path to send to the storage node + :param connect_duration: time taken to initiate the HTTPConnection + :param logger: a Logger instance + :param chunked: boolean indicating if the request encoding is chunked """ - def __init__(self, conn, node, resp, path, connect_duration, - mime_boundary): + def __init__(self, conn, node, resp, path, connect_duration, logger, + chunked=False): # Note: you probably want to call Putter.connect() instead of # instantiating one of these directly. self.conn = conn self.node = node - self.resp = resp + self.resp = self.final_resp = resp self.path = path self.connect_duration = connect_duration # for handoff nodes node_index is None self.node_index = node.get('index') - self.mime_boundary = mime_boundary - self.chunk_hasher = md5() self.failed = False self.queue = None self.state = NO_DATA_SENT + self.chunked = chunked + self.logger = logger def await_response(self, timeout, informational=False): """ @@ -1419,16 +1465,20 @@ class ECPutter(object): a 100 Continue response and sent up the PUT request's body, then we'll actually read the 2xx-5xx response off the network here. + :param timeout: time to wait for a response + :param informational: if True then try to get a 100-continue response, + otherwise try to get a final response. :returns: HTTPResponse :raises: Timeout if the response took too long """ - conn = self.conn with Timeout(timeout): - if not conn.resp: + # don't do this update of self.resp if the Expect response during + # conenct() was actually a final response + if not self.final_resp: if informational: - self.resp = conn.getexpect() + self.resp = self.conn.getexpect() else: - self.resp = conn.getresponse() + self.resp = self.conn.getresponse() return self.resp def spawn_sender_greenthread(self, pool, queue_depth, write_timeout, @@ -1441,9 +1491,10 @@ class ECPutter(object): if self.queue.unfinished_tasks: self.queue.join() - def _start_mime_doc_object_body(self): - self.queue.put("--%s\r\nX-Document: object body\r\n\r\n" % - (self.mime_boundary,)) + def _start_object_data(self): + # Called immediately before the first chunk of object data is sent. + # Subclasses may implement custom behaviour + pass def send_chunk(self, chunk): if not chunk: @@ -1455,30 +1506,148 @@ class ECPutter(object): elif self.state == DATA_SENT: raise ValueError("called send_chunk after end_of_object_data") - if self.state == NO_DATA_SENT and self.mime_boundary: - # We're sending the object plus other stuff in the same request - # body, all wrapped up in multipart MIME, so we'd better start - # off the MIME document before sending any object data. - self._start_mime_doc_object_body() + if self.state == NO_DATA_SENT: + self._start_object_data() self.state = SENDING_DATA self.queue.put(chunk) - def end_of_object_data(self, footer_metadata): + def end_of_object_data(self, **kwargs): + """ + Call when there is no more data to send. + """ + if self.state == DATA_SENT: + raise ValueError("called end_of_object_data twice") + + self.queue.put('') + self.state = DATA_SENT + + def _send_file(self, write_timeout, exception_handler): + """ + Method for a file PUT coroutine. Takes chunks from a queue and sends + them down a socket. + + If something goes wrong, the "failed" attribute will be set to true + and the exception handler will be called. + """ + while True: + chunk = self.queue.get() + if not self.failed: + if self.chunked: + to_send = "%x\r\n%s\r\n" % (len(chunk), chunk) + else: + to_send = chunk + try: + with ChunkWriteTimeout(write_timeout): + self.conn.send(to_send) + except (Exception, ChunkWriteTimeout): + self.failed = True + exception_handler(self.node, _('Object'), + _('Trying to write to %s') % self.path) + + self.queue.task_done() + + def close(self): + # release reference to response to ensure connection really does close, + # see bug https://bugs.launchpad.net/swift/+bug/1594739 + self.resp = self.final_resp = None + self.conn.close() + + @classmethod + def _make_connection(cls, node, part, path, headers, conn_timeout, + node_timeout): + start_time = time.time() + with ConnectionTimeout(conn_timeout): + conn = http_connect(node['ip'], node['port'], node['device'], + part, 'PUT', path, headers) + connect_duration = time.time() - start_time + + with ResponseTimeout(node_timeout): + resp = conn.getexpect() + + if resp.status == HTTP_INSUFFICIENT_STORAGE: + raise InsufficientStorage + + if is_server_error(resp.status): + raise PutterConnectError(resp.status) + + final_resp = None + if (is_success(resp.status) or + resp.status in (HTTP_CONFLICT, HTTP_UNPROCESSABLE_ENTITY) or + (headers.get('If-None-Match', None) is not None and + resp.status == HTTP_PRECONDITION_FAILED)): + final_resp = resp + + return conn, resp, final_resp, connect_duration + + @classmethod + def connect(cls, node, part, path, headers, conn_timeout, node_timeout, + logger=None, chunked=False, **kwargs): + """ + Connect to a backend node and send the headers. + + :returns: Putter instance + + :raises: ConnectionTimeout if initial connection timed out + :raises: ResponseTimeout if header retrieval timed out + :raises: InsufficientStorage on 507 response from node + :raises: PutterConnectError on non-507 server error response from node + """ + conn, expect_resp, final_resp, connect_duration = cls._make_connection( + node, part, path, headers, conn_timeout, node_timeout) + return cls(conn, node, final_resp, path, connect_duration, logger, + chunked=chunked) + + +class MIMEPutter(Putter): + """ + Putter for backend PUT requests that use MIME. + + This is here mostly to wrap up the fact that all multipart PUTs are + chunked because of the mime boundary footer trick and the first + half of the two-phase PUT conversation handling. + + An HTTP PUT request that supports streaming. + """ + def __init__(self, conn, node, resp, req, connect_duration, + logger, mime_boundary, multiphase=False): + super(MIMEPutter, self).__init__(conn, node, resp, req, + connect_duration, logger) + # Note: you probably want to call MimePutter.connect() instead of + # instantiating one of these directly. + self.chunked = True # MIME requests always send chunked body + self.mime_boundary = mime_boundary + self.multiphase = multiphase + + def _start_object_data(self): + # We're sending the object plus other stuff in the same request + # body, all wrapped up in multipart MIME, so we'd better start + # off the MIME document before sending any object data. + self.queue.put("--%s\r\nX-Document: object body\r\n\r\n" % + (self.mime_boundary,)) + + def end_of_object_data(self, footer_metadata=None): """ Call when there is no more data to send. + Overrides superclass implementation to send any footer metadata + after object data. + :param footer_metadata: dictionary of metadata items + to be sent as footers. """ if self.state == DATA_SENT: raise ValueError("called end_of_object_data twice") elif self.state == NO_DATA_SENT and self.mime_boundary: - self._start_mime_doc_object_body() + self._start_object_data() footer_body = json.dumps(footer_metadata) footer_md5 = md5(footer_body).hexdigest() tail_boundary = ("--%s" % (self.mime_boundary,)) + if not self.multiphase: + # this will be the last part sent + tail_boundary = tail_boundary + "--" message_parts = [ ("\r\n--%s\r\n" % self.mime_boundary), @@ -1498,6 +1667,9 @@ class ECPutter(object): Call when there are > quorum 2XX responses received. Send commit confirmations to all object nodes to finalize the PUT. """ + if not self.multiphase: + raise ValueError( + "called send_commit_confirmation but multiphase is False") if self.state == COMMIT_SENT: raise ValueError("called send_commit_confirmation twice") @@ -1517,79 +1689,49 @@ class ECPutter(object): self.queue.put('') self.state = COMMIT_SENT - def _send_file(self, write_timeout, exception_handler): - """ - Method for a file PUT coro. Takes chunks from a queue and sends them - down a socket. - - If something goes wrong, the "failed" attribute will be set to true - and the exception handler will be called. - """ - while True: - chunk = self.queue.get() - if not self.failed: - to_send = "%x\r\n%s\r\n" % (len(chunk), chunk) - try: - with ChunkWriteTimeout(write_timeout): - self.conn.send(to_send) - except (Exception, ChunkWriteTimeout): - self.failed = True - exception_handler(self.conn.node, _('Object'), - _('Trying to write to %s') % self.path) - self.queue.task_done() - @classmethod - def connect(cls, node, part, path, headers, conn_timeout, node_timeout, - chunked=False, expected_frag_archive_size=None): + def connect(cls, node, part, req, headers, conn_timeout, node_timeout, + logger=None, need_multiphase=True, **kwargs): """ Connect to a backend node and send the headers. - :returns: Putter instance + Override superclass method to notify object of need for support for + multipart body with footers and optionally multiphase commit, and + verify object server's capabilities. - :raises: ConnectionTimeout if initial connection timed out - :raises: ResponseTimeout if header retrieval timed out - :raises: InsufficientStorage on 507 response from node - :raises: PutterConnectError on non-507 server error response from node + :param need_multiphase: if True then multiphase support is required of + the object server :raises: FooterNotSupported if need_metadata_footer is set but backend node can't process footers - :raises: MultiphasePUTNotSupported if need_multiphase_support is - set but backend node can't handle multiphase PUT + :raises: MultiphasePUTNotSupported if need_multiphase is set but + backend node can't handle multiphase PUT """ mime_boundary = "%.64x" % random.randint(0, 16 ** 64) headers = HeaderKeyDict(headers) + # when using a multipart mime request to backend the actual + # content-length is not equal to the object content size, so move the + # object content size to X-Backend-Obj-Content-Length if that has not + # already been set by the EC PUT path. + headers.setdefault('X-Backend-Obj-Content-Length', + headers.pop('Content-Length', None)) # We're going to be adding some unknown amount of data to the # request, so we can't use an explicit content length, and thus # we must use chunked encoding. headers['Transfer-Encoding'] = 'chunked' headers['Expect'] = '100-continue' - # make sure this isn't there - headers.pop('Content-Length') - headers['X-Backend-Obj-Content-Length'] = expected_frag_archive_size - headers['X-Backend-Obj-Multipart-Mime-Boundary'] = mime_boundary headers['X-Backend-Obj-Metadata-Footer'] = 'yes' - headers['X-Backend-Obj-Multiphase-Commit'] = 'yes' + if need_multiphase: + headers['X-Backend-Obj-Multiphase-Commit'] = 'yes' - start_time = time.time() - with ConnectionTimeout(conn_timeout): - conn = http_connect(node['ip'], node['port'], node['device'], - part, 'PUT', path, headers) - connect_duration = time.time() - start_time + conn, expect_resp, final_resp, connect_duration = cls._make_connection( + node, part, req, headers, conn_timeout, node_timeout) - with ResponseTimeout(node_timeout): - resp = conn.getexpect() - - if resp.status == HTTP_INSUFFICIENT_STORAGE: - raise InsufficientStorage - - if is_server_error(resp.status): - raise PutterConnectError(resp.status) - - if is_informational(resp.status): - continue_headers = HeaderKeyDict(resp.getheaders()) + if is_informational(expect_resp.status): + continue_headers = HeaderKeyDict(expect_resp.getheaders()) can_send_metadata_footer = config_true_value( continue_headers.get('X-Obj-Metadata-Footer', 'no')) can_handle_multiphase_put = config_true_value( @@ -1598,18 +1740,11 @@ class ECPutter(object): if not can_send_metadata_footer: raise FooterNotSupported() - if not can_handle_multiphase_put: + if need_multiphase and not can_handle_multiphase_put: raise MultiphasePUTNotSupported() - conn.node = node - conn.resp = None - if is_success(resp.status) or resp.status == HTTP_CONFLICT: - conn.resp = resp - elif (headers.get('If-None-Match', None) is not None and - resp.status == HTTP_PRECONDITION_FAILED): - conn.resp = resp - - return cls(conn, node, resp, path, connect_duration, mime_boundary) + return cls(conn, node, final_resp, req, connect_duration, logger, + mime_boundary, multiphase=need_multiphase) def chunk_transformer(policy, nstreams): @@ -1674,7 +1809,7 @@ def chunk_transformer(policy, nstreams): def trailing_metadata(policy, client_obj_hasher, bytes_transferred_from_client, fragment_archive_index): - return { + return HeaderKeyDict({ # etag and size values are being added twice here. # The container override header is used to update the container db # with these values as they represent the correct etag and size for @@ -1692,7 +1827,7 @@ def trailing_metadata(policy, client_obj_hasher, # AKA "what is this thing?" 'X-Object-Sysmeta-EC-Scheme': policy.ec_scheme_description, 'X-Object-Sysmeta-EC-Segment-Size': str(policy.ec_segment_size), - } + }) @ObjectControllerRouter.register(EC_POLICY) @@ -1764,8 +1899,7 @@ class ECObjectController(BaseObjectController): return range_specs def _get_or_head_response(self, req, node_iter, partition, policy): - req.headers.setdefault("X-Backend-Etag-Is-At", - "X-Object-Sysmeta-Ec-Etag") + update_etag_is_at_header(req, "X-Object-Sysmeta-Ec-Etag") if req.method == 'HEAD': # no fancy EC decoding here, just one plain old HEAD request to @@ -1862,14 +1996,18 @@ class ECObjectController(BaseObjectController): resp = self.best_response( req, statuses, reasons, bodies, 'Object', headers=headers) - self._fix_response(resp) + self._fix_response(req, resp) return resp - def _fix_response(self, resp): + def _fix_response(self, req, resp): # EC fragment archives each have different bytes, hence different # etags. However, they all have the original object's etag stored in # sysmeta, so we copy that here (if it exists) so the client gets it. resp.headers['Etag'] = resp.headers.get('X-Object-Sysmeta-Ec-Etag') + # We're about to invoke conditional response checking so set the + # correct conditional etag from wherever X-Backend-Etag-Is-At points, + # if it exists at all. + resp._conditional_etag = resolve_etag_is_at_header(req, resp.headers) if (is_success(resp.status_int) or is_redirection(resp.status_int) or resp.status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE): resp.accept_ranges = 'bytes' @@ -1878,66 +2016,13 @@ class ECObjectController(BaseObjectController): 'X-Object-Sysmeta-Ec-Content-Length') resp.fix_conditional_response() - def _connect_put_node(self, node_iter, part, path, headers, - logger_thread_locals): - """ - Make a connection for a erasure encoded object. - - Connects to the first working node that it finds in node_iter and sends - over the request headers. Returns a Putter to handle the rest of the - streaming, or None if no working nodes were found. - """ - # the object server will get different bytes, so these - # values do not apply (Content-Length might, in general, but - # in the specific case of replication vs. EC, it doesn't). - client_cl = headers.pop('Content-Length', None) - headers.pop('Etag', None) - - expected_frag_size = None - if client_cl: - policy_index = int(headers.get('X-Backend-Storage-Policy-Index')) - policy = POLICIES.get_by_index(policy_index) - # TODO: PyECLib <= 1.2.0 looks to return the segment info - # different from the input for aligned data efficiency but - # Swift never does. So calculate the fragment length Swift - # will actually send to object sever by making two different - # get_segment_info calls (until PyECLib fixed). - # policy.fragment_size makes the call using segment size, - # and the next call is to get info for the last segment - - # get number of fragments except the tail - use truncation // - num_fragments = int(client_cl) // policy.ec_segment_size - expected_frag_size = policy.fragment_size * num_fragments - - # calculate the tail fragment_size by hand and add it to - # expected_frag_size - last_segment_size = int(client_cl) % policy.ec_segment_size - if last_segment_size: - last_info = policy.pyeclib_driver.get_segment_info( - last_segment_size, policy.ec_segment_size) - expected_frag_size += last_info['fragment_size'] - - self.app.logger.thread_locals = logger_thread_locals - for node in node_iter: - try: - putter = ECPutter.connect( - node, part, path, headers, - conn_timeout=self.app.conn_timeout, - node_timeout=self.app.node_timeout, - expected_frag_archive_size=expected_frag_size) - self.app.set_node_timing(node, putter.connect_duration) - return putter - except InsufficientStorage: - self.app.error_limit(node, _('ERROR Insufficient Storage')) - except PutterConnectError as e: - self.app.error_occurred( - node, _('ERROR %(status)d Expect: 100-continue ' - 'From Object Server') % { - 'status': e.status}) - except (Exception, Timeout): - self.app.exception_occurred( - node, _('Object'), - _('Expect: 100-continue on %s') % path) + def _make_putter(self, node, part, req, headers): + return MIMEPutter.connect( + node, part, req.swift_entity_path, headers, + conn_timeout=self.app.conn_timeout, + node_timeout=self.app.node_timeout, + logger=self.app.logger, + need_multiphase=True) def _determine_chunk_destinations(self, putters): """ @@ -1985,8 +2070,16 @@ class ECObjectController(BaseObjectController): bytes_transferred = 0 chunk_transform = chunk_transformer(policy, len(nodes)) chunk_transform.send(None) + chunk_hashers = collections.defaultdict(md5) def send_chunk(chunk): + # Note: there's two different hashers in here. etag_hasher is + # hashing the original object so that we can validate the ETag + # that the client sent (and etag_hasher is None if the client + # didn't send one). The hasher in chunk_hashers is hashing the + # fragment archive being sent to the client; this lets us guard + # against data corruption on the network between proxy and + # object server. if etag_hasher: etag_hasher.update(chunk) backend_chunks = chunk_transform.send(chunk) @@ -1996,15 +2089,18 @@ class ECObjectController(BaseObjectController): return for putter in list(putters): - backend_chunk = backend_chunks[chunk_index[putter]] + ci = chunk_index[putter] + backend_chunk = backend_chunks[ci] if not putter.failed: - putter.chunk_hasher.update(backend_chunk) + chunk_hashers[ci].update(backend_chunk) putter.send_chunk(backend_chunk) else: + putter.close() putters.remove(putter) self._check_min_conn( - req, putters, min_conns, msg='Object PUT exceptions during' - ' send, %(conns)s/%(nodes)s required connections') + req, putters, min_conns, + msg=_('Object PUT exceptions during send, ' + '%(conns)s/%(nodes)s required connections')) try: with ContextPool(len(putters)) as pool: @@ -2047,14 +2143,26 @@ class ECObjectController(BaseObjectController): send_chunk('') # flush out any buffered data + footers = self._get_footers(req) + received_etag = footers.get( + 'etag', '').strip('"') + if (computed_etag and received_etag and + computed_etag != received_etag): + raise HTTPUnprocessableEntity(request=req) + + # Remove any EC reserved metadata names from footers + footers = {(k, v) for k, v in footers.items() + if not k.lower().startswith('x-object-sysmeta-ec-')} for putter in putters: + ci = chunk_index[putter] + # Update any footers set by middleware with EC footers trail_md = trailing_metadata( policy, etag_hasher, - bytes_transferred, - chunk_index[putter]) - trail_md['Etag'] = \ - putter.chunk_hasher.hexdigest() - putter.end_of_object_data(trail_md) + bytes_transferred, ci) + trail_md.update(footers) + # Etag footer must always be hash of what we sent + trail_md['Etag'] = chunk_hashers[ci].hexdigest() + putter.end_of_object_data(footer_metadata=trail_md) for putter in putters: putter.wait() @@ -2065,12 +2173,12 @@ class ECObjectController(BaseObjectController): # object data and metadata commit and is a necessary # condition to be met before starting 2nd PUT phase final_phase = False - need_quorum = True - statuses, reasons, bodies, _junk, quorum = \ + statuses, reasons, bodies, _junk = \ self._get_put_responses( - req, putters, len(nodes), final_phase, - min_conns, need_quorum=need_quorum) - if not quorum: + req, putters, len(nodes), final_phase=final_phase, + min_responses=min_conns) + if not self.have_quorum( + statuses, len(nodes), quorum=min_conns): self.app.logger.error( _('Not enough object servers ack\'ed (got %d)'), statuses.count(HTTP_CONTINUE)) @@ -2153,109 +2261,15 @@ class ECObjectController(BaseObjectController): return self._have_adequate_responses( statuses, min_responses, is_informational) - def _await_response(self, conn, final_phase): - return conn.await_response( - self.app.node_timeout, not final_phase) - - def _get_conn_response(self, conn, req, logger_thread_locals, - final_phase, **kwargs): - self.app.logger.thread_locals = logger_thread_locals - try: - resp = self._await_response(conn, final_phase=final_phase, - **kwargs) - except (Exception, Timeout): - resp = None - if final_phase: - status_type = 'final' - else: - status_type = 'commit' - self.app.exception_occurred( - conn.node, _('Object'), - _('Trying to get %(status_type)s status of PUT to %(path)s') % - {'status_type': status_type, 'path': req.path}) - return (conn, resp) - - def _get_put_responses(self, req, putters, num_nodes, final_phase, - min_responses, need_quorum=True): - """ - Collect erasure coded object responses. - - Collect object responses to a PUT request and determine if - satisfactory number of nodes have returned success. Return - statuses, quorum result if indicated by 'need_quorum' and - etags if this is a final phase or a multiphase PUT transaction. - - :param req: the request - :param putters: list of putters for the request - :param num_nodes: number of nodes involved - :param final_phase: boolean indicating if this is the last phase - :param min_responses: minimum needed when not requiring quorum - :param need_quorum: boolean indicating if quorum is required - """ - statuses = [] - reasons = [] - bodies = [] - etags = set() - - pile = GreenAsyncPile(len(putters)) - for putter in putters: - if putter.failed: - continue - pile.spawn(self._get_conn_response, putter, req, - self.app.logger.thread_locals, final_phase=final_phase) - - def _handle_response(putter, response): - statuses.append(response.status) - reasons.append(response.reason) - if final_phase: - body = response.read() - else: - body = '' - bodies.append(body) - if response.status == HTTP_INSUFFICIENT_STORAGE: - putter.failed = True - self.app.error_limit(putter.node, - _('ERROR Insufficient Storage')) - elif response.status >= HTTP_INTERNAL_SERVER_ERROR: - putter.failed = True - self.app.error_occurred( - putter.node, - _('ERROR %(status)d %(body)s From Object Server ' - 're: %(path)s') % - {'status': response.status, - 'body': body[:1024], 'path': req.path}) - elif is_success(response.status): - etags.add(response.getheader('etag').strip('"')) - - quorum = False - for (putter, response) in pile: - if response: - _handle_response(putter, response) - if self._have_adequate_successes(statuses, min_responses): - break - else: - putter.failed = True - - # give any pending requests *some* chance to finish - finished_quickly = pile.waitall(self.app.post_quorum_timeout) - for (putter, response) in finished_quickly: - if response: - _handle_response(putter, response) - - if need_quorum: - if final_phase: - while len(statuses) < num_nodes: - statuses.append(HTTP_SERVICE_UNAVAILABLE) - reasons.append('') - bodies.append('') - else: - # intermediate response phase - set return value to true only - # if there are responses having same value of *any* status - # except 5xx - if self.have_quorum(statuses, num_nodes, quorum=min_responses): - quorum = True - - return statuses, reasons, bodies, etags, quorum + def _have_adequate_put_responses(self, statuses, num_nodes, min_responses): + # For an EC PUT we require a quorum of responses with success statuses + # in order to move on to next phase of PUT request handling without + # having to wait for *all* responses. + # TODO: this implies that in the first phase of the backend PUTs when + # we are actually expecting 1xx responses that we will end up waiting + # for *all* responses. That seems inefficient since we only need a + # quorum of 1xx responses to proceed. + return self._have_adequate_successes(statuses, min_responses) def _store_object(self, req, data_source, nodes, partition, outgoing_headers): @@ -2264,6 +2278,35 @@ class ECObjectController(BaseObjectController): """ policy_index = int(req.headers.get('X-Backend-Storage-Policy-Index')) policy = POLICIES.get_by_index(policy_index) + + expected_frag_size = None + if req.content_length: + # TODO: PyECLib <= 1.2.0 looks to return the segment info + # different from the input for aligned data efficiency but + # Swift never does. So calculate the fragment length Swift + # will actually send to object sever by making two different + # get_segment_info calls (until PyECLib fixed). + # policy.fragment_size makes the call using segment size, + # and the next call is to get info for the last segment + + # get number of fragments except the tail - use truncation // + num_fragments = req.content_length // policy.ec_segment_size + expected_frag_size = policy.fragment_size * num_fragments + + # calculate the tail fragment_size by hand and add it to + # expected_frag_size + last_segment_size = req.content_length % policy.ec_segment_size + if last_segment_size: + last_info = policy.pyeclib_driver.get_segment_info( + last_segment_size, policy.ec_segment_size) + expected_frag_size += last_info['fragment_size'] + for headers in outgoing_headers: + headers['X-Backend-Obj-Content-Length'] = expected_frag_size + # the object server will get different bytes, so these + # values do not apply. + headers.pop('Content-Length', None) + headers.pop('Etag', None) + # Since the request body sent from client -> proxy is not # the same as the request body sent proxy -> object, we # can't rely on the object-server to do the etag checking - @@ -2272,18 +2315,15 @@ class ECObjectController(BaseObjectController): min_conns = policy.quorum putters = self._get_put_connections( - req, nodes, partition, outgoing_headers, - policy, expect=True) + req, nodes, partition, outgoing_headers, policy) try: # check that a minimum number of connections were established and # meet all the correct conditions set in the request - self._check_failure_put_connections(putters, req, nodes, min_conns) + self._check_failure_put_connections(putters, req, min_conns) self._transfer_data(req, policy, data_source, putters, nodes, min_conns, etag_hasher) - final_phase = True - need_quorum = False # The .durable file will propagate in a replicated fashion; if # one exists, the reconstructor will spread it around. # In order to avoid successfully writing an object, but refusing @@ -2292,15 +2332,16 @@ class ECObjectController(BaseObjectController): # writes as quorum fragment writes. If object servers are in the # future able to serve their non-durable fragment archives we may # be able to reduce this quorum count if needed. - min_conns = policy.quorum - putters = [p for p in putters if not p.failed] - # ignore response etags, and quorum boolean - statuses, reasons, bodies, _etags, _quorum = \ + # ignore response etags + statuses, reasons, bodies, _etags = \ self._get_put_responses(req, putters, len(nodes), - final_phase, min_conns, - need_quorum=need_quorum) + final_phase=True, + min_responses=min_conns) except HTTPException as resp: return resp + finally: + for putter in putters: + putter.close() etag = etag_hasher.hexdigest() resp = self.best_response(req, statuses, reasons, bodies, diff --git a/test/unit/__init__.py b/test/unit/__init__.py index c4c833a79c..acc3c8612f 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -32,6 +32,8 @@ import eventlet from eventlet.green import socket from tempfile import mkdtemp from shutil import rmtree + + from swift.common.utils import Timestamp, NOTICE from test import get_config from swift.common import utils @@ -848,7 +850,7 @@ def fake_http_connect(*code_iter, **kwargs): def __init__(self, status, etag=None, body='', timestamp='1', headers=None, expect_headers=None, connection_id=None, - give_send=None): + give_send=None, give_expect=None): if not isinstance(status, FakeStatus): status = FakeStatus(status) self._status = status @@ -864,6 +866,8 @@ def fake_http_connect(*code_iter, **kwargs): self.timestamp = timestamp self.connection_id = connection_id self.give_send = give_send + self.give_expect = give_expect + self.closed = False if 'slow' in kwargs and isinstance(kwargs['slow'], list): try: self._next_sleep = kwargs['slow'].pop(0) @@ -884,6 +888,8 @@ def fake_http_connect(*code_iter, **kwargs): return self def getexpect(self): + if self.give_expect: + self.give_expect(self) expect_status = self._status.get_expect_status() headers = dict(self.expect_headers) if expect_status == 409: @@ -953,7 +959,7 @@ def fake_http_connect(*code_iter, **kwargs): def send(self, amt=None): if self.give_send: - self.give_send(self.connection_id, amt) + self.give_send(self, amt) am_slow, value = self.get_slow() if am_slow: if self.received < 4: @@ -964,7 +970,7 @@ def fake_http_connect(*code_iter, **kwargs): return HeaderKeyDict(self.getheaders()).get(name, default) def close(self): - pass + self.closed = True timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter)) etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter)) @@ -1017,7 +1023,8 @@ def fake_http_connect(*code_iter, **kwargs): body = next(body_iter) return FakeConn(status, etag, body=body, timestamp=timestamp, headers=headers, expect_headers=expect_headers, - connection_id=i, give_send=kwargs.get('give_send')) + connection_id=i, give_send=kwargs.get('give_send'), + give_expect=kwargs.get('give_expect')) connect.code_iter = code_iter diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index e542818967..8b8fff3b3d 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -16,7 +16,6 @@ # This stuff can't live in test/unit/__init__.py due to its swob dependency. from collections import defaultdict -from copy import deepcopy from hashlib import md5 from swift.common import swob from swift.common.header_key_dict import HeaderKeyDict @@ -113,24 +112,34 @@ class FakeSwift(object): raise KeyError("Didn't find %r in allowed responses" % ( (method, path),)) - self._calls.append((method, path, req_headers)) - # simulate object PUT if method == 'PUT' and obj: - input = env['wsgi.input'].read() + input = ''.join(iter(env['wsgi.input'].read, '')) + if 'swift.callback.update_footers' in env: + footers = HeaderKeyDict() + env['swift.callback.update_footers'](footers) + req_headers.update(footers) etag = md5(input).hexdigest() headers.setdefault('Etag', etag) headers.setdefault('Content-Length', len(input)) # keep it for subsequent GET requests later - self.uploaded[path] = (deepcopy(headers), input) + self.uploaded[path] = (dict(req_headers), input) if "CONTENT_TYPE" in env: self.uploaded[path][0]['Content-Type'] = env["CONTENT_TYPE"] - # range requests ought to work, which require conditional_response=True + self._calls.append((method, path, HeaderKeyDict(req_headers))) + + # range requests ought to work, hence conditional_response=True req = swob.Request(env) - resp = resp_class(req=req, headers=headers, body=body, - conditional_response=req.method in ('GET', 'HEAD')) + if isinstance(body, list): + resp = resp_class( + req=req, headers=headers, app_iter=body, + conditional_response=req.method in ('GET', 'HEAD')) + else: + resp = resp_class( + req=req, headers=headers, body=body, + conditional_response=req.method in ('GET', 'HEAD')) wsgi_iter = resp(env, start_response) self.mark_opened(path) return LeakTrackingIter(wsgi_iter, self, path) diff --git a/test/unit/common/test_request_helpers.py b/test/unit/common/test_request_helpers.py index c13bc03ca9..1c39e9f0af 100644 --- a/test/unit/common/test_request_helpers.py +++ b/test/unit/common/test_request_helpers.py @@ -21,7 +21,8 @@ from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY from swift.common.request_helpers import is_sys_meta, is_user_meta, \ is_sys_or_user_meta, strip_sys_meta_prefix, strip_user_meta_prefix, \ remove_items, copy_header_subset, get_name_and_placement, \ - http_response_to_document_iters + http_response_to_document_iters, update_etag_is_at_header, \ + resolve_etag_is_at_header from test.unit import patch_policies from test.unit.common.test_utils import FakeResponse @@ -273,3 +274,74 @@ class TestHTTPResponseToDocumentIters(unittest.TestCase): self.assertEqual(body.read(), 'ches') self.assertRaises(StopIteration, next, doc_iters) + + def test_update_etag_is_at_header(self): + # start with no existing X-Backend-Etag-Is-At + req = Request.blank('/v/a/c/o') + update_etag_is_at_header(req, 'X-Object-Sysmeta-My-Etag') + self.assertEqual('X-Object-Sysmeta-My-Etag', + req.headers['X-Backend-Etag-Is-At']) + # add another alternate + update_etag_is_at_header(req, 'X-Object-Sysmeta-Ec-Etag') + self.assertEqual('X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag', + req.headers['X-Backend-Etag-Is-At']) + with self.assertRaises(ValueError) as cm: + update_etag_is_at_header(req, 'X-Object-Sysmeta-,-Bad') + self.assertEqual('Header name must not contain commas', + cm.exception.message) + + def test_resolve_etag_is_at_header(self): + def do_test(): + req = Request.blank('/v/a/c/o') + # ok to have no X-Backend-Etag-Is-At + self.assertIsNone(resolve_etag_is_at_header(req, metadata)) + + # ok to have no matching metadata + req.headers['X-Backend-Etag-Is-At'] = 'X-Not-There' + self.assertIsNone(resolve_etag_is_at_header(req, metadata)) + + # selects from metadata + req.headers['X-Backend-Etag-Is-At'] = 'X-Object-Sysmeta-Ec-Etag' + self.assertEqual('an etag value', + resolve_etag_is_at_header(req, metadata)) + req.headers['X-Backend-Etag-Is-At'] = 'X-Object-Sysmeta-My-Etag' + self.assertEqual('another etag value', + resolve_etag_is_at_header(req, metadata)) + + # first in list takes precedence + req.headers['X-Backend-Etag-Is-At'] = \ + 'X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag' + self.assertEqual('another etag value', + resolve_etag_is_at_header(req, metadata)) + + # non-existent alternates are passed over + req.headers['X-Backend-Etag-Is-At'] = \ + 'X-Bogus,X-Object-Sysmeta-My-Etag,X-Object-Sysmeta-Ec-Etag' + self.assertEqual('another etag value', + resolve_etag_is_at_header(req, metadata)) + + # spaces in list are ok + alts = 'X-Foo, X-Object-Sysmeta-My-Etag , X-Object-Sysmeta-Ec-Etag' + req.headers['X-Backend-Etag-Is-At'] = alts + self.assertEqual('another etag value', + resolve_etag_is_at_header(req, metadata)) + + # lower case in list is ok + alts = alts.lower() + req.headers['X-Backend-Etag-Is-At'] = alts + self.assertEqual('another etag value', + resolve_etag_is_at_header(req, metadata)) + + # upper case in list is ok + alts = alts.upper() + req.headers['X-Backend-Etag-Is-At'] = alts + self.assertEqual('another etag value', + resolve_etag_is_at_header(req, metadata)) + + metadata = {'X-Object-Sysmeta-Ec-Etag': 'an etag value', + 'X-Object-Sysmeta-My-Etag': 'another etag value'} + do_test() + metadata = dict((k.lower(), v) for k, v in metadata.items()) + do_test() + metadata = dict((k.upper(), v) for k, v in metadata.items()) + do_test() diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 24eba9956a..b85230f395 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -2385,6 +2385,7 @@ class TestObjectController(unittest.TestCase): 'X-Timestamp': utils.Timestamp(time()).internal, 'Content-Type': 'application/octet-stream', 'X-Object-Meta-Xtag': 'madeup', + 'X-Object-Sysmeta-Xtag': 'alternate madeup', } req = Request.blank('/sda1/p/a/c/o', method='PUT', headers=headers) @@ -2400,6 +2401,39 @@ class TestObjectController(unittest.TestCase): resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 200) + # match x-backend-etag-is-at, using first in list of alternates + req = Request.blank('/sda1/p/a/c/o', headers={ + 'If-Match': 'madeup', + 'X-Backend-Etag-Is-At': + 'X-Object-Meta-Xtag,X-Object-Sysmeta-Z'}) + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 200) + + # match x-backend-etag-is-at, using second in list of alternates + alts = 'X-Object-Sysmeta-Y,X-Object-Meta-Xtag,X-Object-Sysmeta-Z' + req = Request.blank('/sda1/p/a/c/o', headers={ + 'If-Match': 'madeup', + 'X-Backend-Etag-Is-At': alts}) + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 200) + + # match x-backend-etag-is-at, choosing first of multiple alternates + alts = 'X-Object-Sysmeta-Y,X-Object-Meta-Xtag,X-Object-Sysmeta-Xtag' + req = Request.blank('/sda1/p/a/c/o', headers={ + 'If-Match': 'madeup', + 'X-Backend-Etag-Is-At': alts}) + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 200) + + # match x-backend-etag-is-at, choosing first of multiple alternates + # (switches order of second two alternates from previous assertion) + alts = 'X-Object-Sysmeta-Y,X-Object-Sysmeta-Xtag,X-Object-Meta-Xtag' + req = Request.blank('/sda1/p/a/c/o', headers={ + 'If-Match': 'alternate madeup', + 'X-Backend-Etag-Is-At': alts}) + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 200) + # no match x-backend-etag-is-at req = Request.blank('/sda1/p/a/c/o', headers={ 'If-Match': real_etag, diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index be0893dbb2..4495fb0c68 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -122,6 +122,27 @@ class PatchedObjControllerApp(proxy_server.Application): PatchedObjControllerApp, self).__call__(*args, **kwargs) +def make_footers_callback(body=None): + # helper method to create a footers callback that will generate some fake + # footer metadata + cont_etag = 'container update etag may differ' + crypto_etag = '20242af0cd21dd7195a10483eb7472c9' + etag_crypto_meta = \ + '{"cipher": "AES_CTR_256", "iv": "sD+PSw/DfqYwpsVGSo0GEw=="}' + etag = md5(body).hexdigest() if body is not None else None + footers_to_add = { + 'X-Object-Sysmeta-Container-Update-Override-Etag': cont_etag, + 'X-Object-Sysmeta-Crypto-Etag': crypto_etag, + 'X-Object-Sysmeta-Crypto-Meta-Etag': etag_crypto_meta, + 'X-I-Feel-Lucky': 'Not blocked', + 'Etag': etag} + + def footers_callback(footers): + footers.update(footers_to_add) + + return footers_callback + + class BaseObjectControllerMixin(object): container_info = { 'status': 200, @@ -253,10 +274,11 @@ class BaseObjectControllerMixin(object): def test_connect_put_node_timeout(self): controller = self.controller_cls( self.app, 'a', 'c', 'o') + req = swift.common.swob.Request.blank('/v1/a/c/o') self.app.conn_timeout = 0.05 with set_http_connect(slow_connect=True): nodes = [dict(ip='', port='', device='')] - res = controller._connect_put_node(nodes, '', '', {}, ('', '')) + res = controller._connect_put_node(nodes, '', req, {}, ('', '')) self.assertTrue(res is None) def test_DELETE_simple(self): @@ -564,6 +586,163 @@ class TestReplicatedObjController(BaseObjectControllerMixin, resp = req.get_response(self.app) self.assertEqual(resp.status_int, 201) + def test_PUT_error_with_footers(self): + footers_callback = make_footers_callback('') + env = {'swift.callback.update_footers': footers_callback} + req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', + environ=env) + req.headers['content-length'] = '0' + codes = [503] * self.replicas() + expect_headers = { + 'X-Obj-Metadata-Footer': 'yes' + } + + with set_http_connect(*codes, expect_headers=expect_headers): + resp = req.get_response(self.app) + self.assertEqual(resp.status_int, 503) + + def _test_PUT_with_no_footers(self, test_body='', chunked=False): + # verify that when no footers are required then the PUT uses a regular + # single part body + req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', + body=test_body) + if chunked: + req.headers['Transfer-Encoding'] = 'chunked' + etag = md5(test_body).hexdigest() + req.headers['Etag'] = etag + + put_requests = defaultdict( + lambda: {'headers': None, 'chunks': [], 'connection': None}) + + def capture_body(conn, chunk): + put_requests[conn.connection_id]['chunks'].append(chunk) + put_requests[conn.connection_id]['connection'] = conn + + def capture_headers(ip, port, device, part, method, path, headers, + **kwargs): + conn_id = kwargs['connection_id'] + put_requests[conn_id]['headers'] = headers + + codes = [201] * self.replicas() + expect_headers = {'X-Obj-Metadata-Footer': 'yes'} + with set_http_connect(*codes, expect_headers=expect_headers, + give_send=capture_body, + give_connect=capture_headers): + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 201) + for connection_id, info in put_requests.items(): + body = ''.join(info['chunks']) + headers = info['headers'] + if chunked: + body = unchunk_body(body) + self.assertEqual('100-continue', headers['Expect']) + self.assertEqual('chunked', headers['Transfer-Encoding']) + else: + self.assertNotIn('Transfer-Encoding', headers) + if body: + self.assertEqual('100-continue', headers['Expect']) + else: + self.assertNotIn('Expect', headers) + self.assertNotIn('X-Backend-Obj-Multipart-Mime-Boundary', headers) + self.assertNotIn('X-Backend-Obj-Metadata-Footer', headers) + self.assertNotIn('X-Backend-Obj-Multiphase-Commit', headers) + self.assertEqual(etag, headers['Etag']) + + self.assertEqual(test_body, body) + self.assertTrue(info['connection'].closed) + + def test_PUT_with_chunked_body_and_no_footers(self): + self._test_PUT_with_no_footers(test_body='asdf', chunked=True) + + def test_PUT_with_body_and_no_footers(self): + self._test_PUT_with_no_footers(test_body='asdf', chunked=False) + + def test_PUT_with_no_body_and_no_footers(self): + self._test_PUT_with_no_footers(test_body='', chunked=False) + + def _test_PUT_with_footers(self, test_body=''): + # verify that when footers are required the PUT body is multipart + # and the footers are appended + footers_callback = make_footers_callback(test_body) + env = {'swift.callback.update_footers': footers_callback} + req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', + environ=env) + req.body = test_body + # send bogus Etag header to differentiate from footer value + req.headers['Etag'] = 'header_etag' + codes = [201] * self.replicas() + expect_headers = { + 'X-Obj-Metadata-Footer': 'yes' + } + + put_requests = defaultdict( + lambda: {'headers': None, 'chunks': [], 'connection': None}) + + def capture_body(conn, chunk): + put_requests[conn.connection_id]['chunks'].append(chunk) + put_requests[conn.connection_id]['connection'] = conn + + def capture_headers(ip, port, device, part, method, path, headers, + **kwargs): + conn_id = kwargs['connection_id'] + put_requests[conn_id]['headers'] = headers + + with set_http_connect(*codes, expect_headers=expect_headers, + give_send=capture_body, + give_connect=capture_headers): + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 201) + for connection_id, info in put_requests.items(): + body = unchunk_body(''.join(info['chunks'])) + headers = info['headers'] + boundary = headers['X-Backend-Obj-Multipart-Mime-Boundary'] + self.assertTrue(boundary is not None, + "didn't get boundary for conn %r" % ( + connection_id,)) + self.assertEqual('chunked', headers['Transfer-Encoding']) + self.assertEqual('100-continue', headers['Expect']) + self.assertEqual('yes', headers['X-Backend-Obj-Metadata-Footer']) + self.assertNotIn('X-Backend-Obj-Multiphase-Commit', headers) + self.assertEqual('header_etag', headers['Etag']) + + # email.parser.FeedParser doesn't know how to take a multipart + # message and boundary together and parse it; it only knows how + # to take a string, parse the headers, and figure out the + # boundary on its own. + parser = email.parser.FeedParser() + parser.feed( + "Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" % + boundary) + parser.feed(body) + message = parser.close() + + self.assertTrue(message.is_multipart()) # sanity check + mime_parts = message.get_payload() + # notice, no commit confirmation + self.assertEqual(len(mime_parts), 2) + obj_part, footer_part = mime_parts + + self.assertEqual(obj_part['X-Document'], 'object body') + self.assertEqual(test_body, obj_part.get_payload()) + + # validate footer metadata + self.assertEqual(footer_part['X-Document'], 'object metadata') + footer_metadata = json.loads(footer_part.get_payload()) + self.assertTrue(footer_metadata) + expected = {} + footers_callback(expected) + self.assertDictEqual(expected, footer_metadata) + + self.assertTrue(info['connection'].closed) + + def test_PUT_with_body_and_footers(self): + self._test_PUT_with_footers(test_body='asdf') + + def test_PUT_with_no_body_and_footers(self): + self._test_PUT_with_footers() + def test_txn_id_logging_on_PUT(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT') self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id' @@ -585,11 +764,15 @@ class TestReplicatedObjController(BaseObjectControllerMixin, req.headers['Content-Length'] = '0' req.headers['Etag'] = '"catbus"' - # The 2-tuple here makes getexpect() return 422, not 100. For - # objects that are >0 bytes, you get a 100 Continue and then a 422 - # Unprocessable Entity after sending the body. For zero-byte - # objects, though, you get the 422 right away. - codes = [FakeStatus((422, 422)) + # The 2-tuple here makes getexpect() return 422, not 100. For objects + # that are >0 bytes, you get a 100 Continue and then a 422 + # Unprocessable Entity after sending the body. For zero-byte objects, + # though, you get the 422 right away because no Expect header is sent + # with zero-byte PUT. The second status in the tuple should not be + # consumed, it's just there to make the FakeStatus treat the first as + # an expect status, but we'll make it something other than a 422 so + # that if it is consumed then the test should fail. + codes = [FakeStatus((422, 200)) for _junk in range(self.replicas())] with set_http_connect(*codes): @@ -707,16 +890,24 @@ class TestReplicatedObjController(BaseObjectControllerMixin, class FakeReader(object): def read(self, size): raise Timeout() + conns = [] + + def capture_expect(conn): + # stash connections so that we can verify they all get closed + conns.append(conn) req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT', body='test body') req.environ['wsgi.input'] = FakeReader() req.headers['content-length'] = '6' - with set_http_connect(201, 201, 201): + with set_http_connect(201, 201, 201, give_expect=capture_expect): resp = req.get_response(self.app) self.assertEqual(resp.status_int, 499) + self.assertEqual(self.replicas(), len(conns)) + for conn in conns: + self.assertTrue(conn.closed) def test_PUT_exception_during_transfer_data(self): class FakeReader(object): @@ -1131,6 +1322,108 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(resp.status_int, 200) self.assertIn('Accept-Ranges', resp.headers) + def _test_if_match(self, method): + num_responses = self.policy.ec_ndata if method == 'GET' else 1 + + def _do_test(match_value, backend_status, + etag_is_at='X-Object-Sysmeta-Does-Not-Exist'): + req = swift.common.swob.Request.blank( + '/v1/a/c/o', method=method, + headers={'If-Match': match_value, + 'X-Backend-Etag-Is-At': etag_is_at}) + get_resp = [backend_status] * num_responses + resp_headers = {'Etag': 'frag_etag', + 'X-Object-Sysmeta-Ec-Etag': 'data_etag', + 'X-Object-Sysmeta-Alternate-Etag': 'alt_etag'} + with set_http_connect(*get_resp, headers=resp_headers): + resp = req.get_response(self.app) + self.assertEqual('data_etag', resp.headers['Etag']) + return resp + + # wildcard + resp = _do_test('*', 200) + self.assertEqual(resp.status_int, 200) + + # match + resp = _do_test('"data_etag"', 200) + self.assertEqual(resp.status_int, 200) + + # no match + resp = _do_test('"frag_etag"', 412) + self.assertEqual(resp.status_int, 412) + + # match wildcard against an alternate etag + resp = _do_test('*', 200, + etag_is_at='X-Object-Sysmeta-Alternate-Etag') + self.assertEqual(resp.status_int, 200) + + # match against an alternate etag + resp = _do_test('"alt_etag"', 200, + etag_is_at='X-Object-Sysmeta-Alternate-Etag') + self.assertEqual(resp.status_int, 200) + + # no match against an alternate etag + resp = _do_test('"data_etag"', 412, + etag_is_at='X-Object-Sysmeta-Alternate-Etag') + self.assertEqual(resp.status_int, 412) + + def test_GET_if_match(self): + self._test_if_match('GET') + + def test_HEAD_if_match(self): + self._test_if_match('HEAD') + + def _test_if_none_match(self, method): + num_responses = self.policy.ec_ndata if method == 'GET' else 1 + + def _do_test(match_value, backend_status, + etag_is_at='X-Object-Sysmeta-Does-Not-Exist'): + req = swift.common.swob.Request.blank( + '/v1/a/c/o', method=method, + headers={'If-None-Match': match_value, + 'X-Backend-Etag-Is-At': etag_is_at}) + get_resp = [backend_status] * num_responses + resp_headers = {'Etag': 'frag_etag', + 'X-Object-Sysmeta-Ec-Etag': 'data_etag', + 'X-Object-Sysmeta-Alternate-Etag': 'alt_etag'} + with set_http_connect(*get_resp, headers=resp_headers): + resp = req.get_response(self.app) + self.assertEqual('data_etag', resp.headers['Etag']) + return resp + + # wildcard + resp = _do_test('*', 304) + self.assertEqual(resp.status_int, 304) + + # match + resp = _do_test('"data_etag"', 304) + self.assertEqual(resp.status_int, 304) + + # no match + resp = _do_test('"frag_etag"', 200) + self.assertEqual(resp.status_int, 200) + + # match wildcard against an alternate etag + resp = _do_test('*', 304, + etag_is_at='X-Object-Sysmeta-Alternate-Etag') + self.assertEqual(resp.status_int, 304) + + # match against an alternate etag + resp = _do_test('"alt_etag"', 304, + etag_is_at='X-Object-Sysmeta-Alternate-Etag') + self.assertEqual(resp.status_int, 304) + + # no match against an alternate etag + resp = _do_test('"data_etag"', 200, + etag_is_at='X-Object-Sysmeta-Alternate-Etag') + self.assertEqual(resp.status_int, 200) + + def test_GET_if_none_match(self): + self._test_if_none_match('GET') + + def test_HEAD_if_none_match(self): + self._test_if_none_match('HEAD') + def test_GET_simple_x_newest(self): req = swift.common.swob.Request.blank('/v1/a/c/o', headers={'X-Newest': 'true'}) @@ -1194,6 +1487,42 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): resp = req.get_response(self.app) self.assertEqual(resp.status_int, 201) + def test_PUT_with_body_and_bad_etag(self): + segment_size = self.policy.ec_segment_size + test_body = ('asdf' * segment_size)[:-10] + codes = [201] * self.replicas() + expect_headers = { + 'X-Obj-Metadata-Footer': 'yes', + 'X-Obj-Multiphase-Commit': 'yes' + } + conns = [] + + def capture_expect(conn): + # stash the backend connection so we can verify that it is closed + # (no data will be sent) + conns.append(conn) + + # send a bad etag in the request headers + headers = {'Etag': 'bad etag'} + req = swift.common.swob.Request.blank( + '/v1/a/c/o', method='PUT', headers=headers, body=test_body) + with set_http_connect(*codes, expect_headers=expect_headers, + give_expect=capture_expect): + resp = req.get_response(self.app) + self.assertEqual(422, resp.status_int) + self.assertEqual(self.replicas(), len(conns)) + for conn in conns: + self.assertTrue(conn.closed) + + # make the footers callback send a bad Etag footer + footers_callback = make_footers_callback('not the test body') + env = {'swift.callback.update_footers': footers_callback} + req = swift.common.swob.Request.blank( + '/v1/a/c/o', method='PUT', environ=env, body=test_body) + with set_http_connect(*codes, expect_headers=expect_headers): + resp = req.get_response(self.app) + self.assertEqual(422, resp.status_int) + def test_txn_id_logging_ECPUT(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', body='') @@ -1399,9 +1728,15 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(resp.status_int, 500) def test_PUT_with_body(self): - req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT') segment_size = self.policy.ec_segment_size test_body = ('asdf' * segment_size)[:-10] + # make the footers callback not include Etag footer so that we can + # verify that the correct EC-calculated Etag is included in footers + # sent to backend + footers_callback = make_footers_callback() + env = {'swift.callback.update_footers': footers_callback} + req = swift.common.swob.Request.blank( + '/v1/a/c/o', method='PUT', environ=env) etag = md5(test_body).hexdigest() size = len(test_body) req.body = test_body @@ -1413,8 +1748,8 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): put_requests = defaultdict(lambda: {'boundary': None, 'chunks': []}) - def capture_body(conn_id, chunk): - put_requests[conn_id]['chunks'].append(chunk) + def capture_body(conn, chunk): + put_requests[conn.connection_id]['chunks'].append(chunk) def capture_headers(ip, port, device, part, method, path, headers, **kwargs): @@ -1471,13 +1806,16 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(footer_part['X-Document'], 'object metadata') footer_metadata = json.loads(footer_part.get_payload()) self.assertTrue(footer_metadata) - expected = { - 'X-Object-Sysmeta-EC-Content-Length': str(size), + expected = {} + # update expected with footers from the callback... + footers_callback(expected) + expected.update({ + 'X-Object-Sysmeta-Ec-Content-Length': str(size), 'X-Backend-Container-Update-Override-Size': str(size), - 'X-Object-Sysmeta-EC-Etag': etag, + 'X-Object-Sysmeta-Ec-Etag': etag, 'X-Backend-Container-Update-Override-Etag': etag, - 'X-Object-Sysmeta-EC-Segment-Size': str(segment_size), - } + 'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size), + 'Etag': md5(obj_part.get_payload()).hexdigest()}) for header, value in expected.items(): self.assertEqual(footer_metadata[header], value) @@ -1504,6 +1842,118 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(len(test_body), len(expected_body)) self.assertEqual(test_body, expected_body) + def test_PUT_with_footers(self): + # verify footers supplied by a footers callback being added to + # trailing metadata + segment_size = self.policy.ec_segment_size + test_body = ('asdf' * segment_size)[:-10] + etag = md5(test_body).hexdigest() + size = len(test_body) + codes = [201] * self.replicas() + expect_headers = { + 'X-Obj-Metadata-Footer': 'yes', + 'X-Obj-Multiphase-Commit': 'yes' + } + + def do_test(footers_to_add, expect_added): + put_requests = defaultdict( + lambda: {'boundary': None, 'chunks': []}) + + def capture_body(conn, chunk): + put_requests[conn.connection_id]['chunks'].append(chunk) + + def capture_headers(ip, port, device, part, method, path, headers, + **kwargs): + conn_id = kwargs['connection_id'] + put_requests[conn_id]['boundary'] = headers[ + 'X-Backend-Obj-Multipart-Mime-Boundary'] + + def footers_callback(footers): + footers.update(footers_to_add) + env = {'swift.callback.update_footers': footers_callback} + req = swift.common.swob.Request.blank( + '/v1/a/c/o', method='PUT', environ=env, body=test_body) + + with set_http_connect(*codes, expect_headers=expect_headers, + give_send=capture_body, + give_connect=capture_headers): + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 201) + for connection_id, info in put_requests.items(): + body = unchunk_body(''.join(info['chunks'])) + # email.parser.FeedParser doesn't know how to take a multipart + # message and boundary together and parse it; it only knows how + # to take a string, parse the headers, and figure out the + # boundary on its own. + parser = email.parser.FeedParser() + parser.feed( + "Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" + % info['boundary']) + parser.feed(body) + message = parser.close() + + self.assertTrue(message.is_multipart()) # sanity check + mime_parts = message.get_payload() + self.assertEqual(len(mime_parts), 3) + obj_part, footer_part, commit_part = mime_parts + + # validate EC footer metadata - should always be present + self.assertEqual(footer_part['X-Document'], 'object metadata') + footer_metadata = json.loads(footer_part.get_payload()) + self.assertIsNotNone( + footer_metadata.pop('X-Object-Sysmeta-Ec-Frag-Index')) + expected = { + 'X-Object-Sysmeta-Ec-Scheme': + self.policy.ec_scheme_description, + 'X-Object-Sysmeta-Ec-Content-Length': str(size), + 'X-Object-Sysmeta-Ec-Etag': etag, + 'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size), + 'Etag': md5(obj_part.get_payload()).hexdigest()} + expected.update(expect_added) + for header, value in expected.items(): + self.assertIn(header, footer_metadata) + self.assertEqual(value, footer_metadata[header]) + footer_metadata.pop(header) + self.assertFalse(footer_metadata) + + # sanity check - middleware sets no footer, expect EC overrides + footers_to_add = {} + expect_added = { + 'X-Backend-Container-Update-Override-Size': str(size), + 'X-Backend-Container-Update-Override-Etag': etag} + do_test(footers_to_add, expect_added) + + # middleware cannot overwrite any EC sysmeta + footers_to_add = { + 'X-Object-Sysmeta-Ec-Content-Length': str(size + 1), + 'X-Object-Sysmeta-Ec-Etag': 'other etag', + 'X-Object-Sysmeta-Ec-Segment-Size': str(segment_size + 1), + 'X-Object-Sysmeta-Ec-Unused-But-Reserved': 'ignored'} + do_test(footers_to_add, expect_added) + + # middleware can add x-object-sysmeta- headers including + # x-object-sysmeta-container-update-override headers + footers_to_add = { + 'X-Object-Sysmeta-Foo': 'bar', + 'X-Object-Sysmeta-Container-Update-Override-Size': + str(size + 1), + 'X-Object-Sysmeta-Container-Update-Override-Etag': 'other etag', + 'X-Object-Sysmeta-Container-Update-Override-Ping': 'pong' + } + expect_added.update(footers_to_add) + do_test(footers_to_add, expect_added) + + # middleware can also overwrite x-backend-container-update-override + # headers + override_footers = { + 'X-Backend-Container-Update-Override-Wham': 'bam', + 'X-Backend-Container-Update-Override-Size': str(size + 2), + 'X-Backend-Container-Update-Override-Etag': 'another etag'} + footers_to_add.update(override_footers) + expect_added.update(override_footers) + do_test(footers_to_add, expect_added) + def test_PUT_old_obj_server(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', body='') diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 6ae48bc605..f43ca5778e 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -2011,7 +2011,7 @@ class TestObjectController(unittest.TestCase): call_count[0] += 1 commit_confirmation = \ - 'swift.proxy.controllers.obj.ECPutter.send_commit_confirmation' + 'swift.proxy.controllers.obj.MIMEPutter.send_commit_confirmation' with mock.patch('swift.obj.server.md5', busted_md5_constructor), \ mock.patch(commit_confirmation, mock_committer): @@ -2062,7 +2062,7 @@ class TestObjectController(unittest.TestCase): read_footer = \ 'swift.obj.server.ObjectController._read_metadata_footer' commit_confirmation = \ - 'swift.proxy.controllers.obj.ECPutter.send_commit_confirmation' + 'swift.proxy.controllers.obj.MIMEPutter.send_commit_confirmation' with mock.patch(read_footer) as read_footer_call, \ mock.patch(commit_confirmation, mock_committer): From fa7d80029b53391a7877aeb6438c98a45bab42a7 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Mon, 6 Jun 2016 18:16:11 +0100 Subject: [PATCH 019/156] Make container update override headers persistent Whatever container update override etag is sent to the object server with a PUT must be used in container updates for subsequent POSTs. Unfortunately the current container update override headers (x-backend-container-update-override-*) are not persisted with the object metadata so are not available when handling a POST. For EC there is an ugly hack in the object server to use the x-object-sysmeta-ec-[etag,size] values when doing a container update for a POST. With crypto, the encryption middleware needs to override the etag (possibly overriding the already overridden EC etag value) with an encrypted etag value. We therefore have a similar problem that this override value is not persisted at the object server. This patch introduces a new namespace for container override headers, x-object-sysmeta-container-update-override-*, which uses object sysmeta so that override values are persisted. This allows a general mechanism in the object server to apply the override values (if any have been set) from object sysmeta when constructing a container update for a PUT or a POST. Middleware should use the x-object-sysmeta-container-update-override-* namespace when setting container update overrides. Middleware should be aware that other middleware may have already set container override headers, in which case consideration should be given to whether any existing value should take precedence. For backwards compatibility the existing x-backend-container-update-override-* style headers are still supported in the object server for EC override values, and the ugly hack for EC etag/size override in POST updates remains in the object server. That allows an older proxy server to be used with an upgraded object server. The proxy server continues to use the x-backend-container-update-override-* style headers for EC values so that an older object server will continue to work with an upgraded proxy server. x-object-sysmeta-container-update-override-* headers take precedence over x-backend-container-update-override-* headers and the use of x-backend-container-update-override-* headers by middleware is deprecated. Existing third party middleware that is using x-backend-container-update-override-* headers should be modified to use x-object-sysmeta-container-update-override-* headers in order to be compatible with other middleware such as encryption and to ensure that container updates during POST requests carry correct values. If targeting multiple versions of Swift object servers it may be necessary to send headers from both namespaces. However, in general it is recommended to upgrade all backend servers, then upgrade proxy servers before finally upgrading third party middleware. Co-Authored-By: Tim Burke UpgradeImpact Change-Id: Ib80b4db57dfc2d37ea8ed3745084a3981d082784 --- swift/common/middleware/copy.py | 21 ++- swift/obj/server.py | 31 +++- swift/proxy/controllers/obj.py | 5 + test/probe/test_object_async_update.py | 104 +++++++++--- test/unit/common/middleware/helpers.py | 2 + test/unit/common/middleware/test_copy.py | 188 +++++++++++++++++++++- test/unit/obj/test_server.py | 196 ++++++++++++++++++----- 7 files changed, 471 insertions(+), 76 deletions(-) diff --git a/swift/common/middleware/copy.py b/swift/common/middleware/copy.py index b446b1b7b3..a5fc44ca2d 100644 --- a/swift/common/middleware/copy.py +++ b/swift/common/middleware/copy.py @@ -142,7 +142,7 @@ from swift.common.utils import get_logger, \ from swift.common.swob import Request, HTTPPreconditionFailed, \ HTTPRequestEntityTooLarge, HTTPBadRequest from swift.common.http import HTTP_MULTIPLE_CHOICES, HTTP_CREATED, \ - is_success + is_success, HTTP_OK from swift.common.constraints import check_account_format, MAX_FILE_SIZE from swift.common.request_helpers import copy_header_subset, remove_items, \ is_sys_meta, is_sys_or_user_meta @@ -474,7 +474,24 @@ class ServerSideCopyMiddleware(object): # Set data source, content length and etag for the PUT request sink_req.environ['wsgi.input'] = FileLikeIter(source_resp.app_iter) sink_req.content_length = source_resp.content_length - sink_req.etag = source_resp.etag + if (source_resp.status_int == HTTP_OK and + 'X-Static-Large-Object' not in source_resp.headers and + ('X-Object-Manifest' not in source_resp.headers or + req.params.get('multipart-manifest') == 'get')): + # copy source etag so that copied content is verified, unless: + # - not a 200 OK response: source etag may not match the actual + # content, for example with a 206 Partial Content response to a + # ranged request + # - SLO manifest: etag cannot be specified in manifest PUT; SLO + # generates its own etag value which may differ from source + # - SLO: etag in SLO response is not hash of actual content + # - DLO: etag in DLO response is not hash of actual content + sink_req.headers['Etag'] = source_resp.etag + else: + # since we're not copying the source etag, make sure that any + # container update override values are not copied. + remove_items(source_resp.headers, lambda k: k.startswith( + 'X-Object-Sysmeta-Container-Update-Override-')) # We no longer need these headers sink_req.headers.pop('X-Copy-From', None) diff --git a/swift/obj/server.py b/swift/obj/server.py index 99083800eb..7193b73e70 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -447,11 +447,32 @@ class ObjectController(BaseStorageServer): raise HTTPBadRequest("invalid JSON for footer doc") def _check_container_override(self, update_headers, metadata): - for key, val in metadata.items(): - override_prefix = 'x-backend-container-update-override-' - if key.lower().startswith(override_prefix): - override = key.lower().replace(override_prefix, 'x-') - update_headers[override] = val + """ + Applies any overrides to the container update headers. + + Overrides may be in the x-object-sysmeta-container-update- namespace or + the x-backend-container-update-override- namespace. The former is + preferred and is used by proxy middlewares. The latter is historical + but is still used with EC policy PUT requests; for backwards + compatibility the header names used with EC policy requests have not + been changed to the sysmeta namespace - that way the EC PUT path of a + newer proxy will remain compatible with an object server that pre-dates + the introduction of the x-object-sysmeta-container-update- namespace + and vice-versa. + + :param update_headers: a dict of headers used in the container update + :param metadata: a dict that may container override items + """ + # the order of this list is significant: + # x-object-sysmeta-container-update-override-* headers take precedence + # over x-backend-container-update-override-* headers + override_prefixes = ['x-backend-container-update-override-', + 'x-object-sysmeta-container-update-override-'] + for override_prefix in override_prefixes: + for key, val in metadata.items(): + if key.lower().startswith(override_prefix): + override = key.lower().replace(override_prefix, 'x-') + update_headers[override] = val def _preserve_slo_manifest(self, update_metadata, orig_metadata): if 'X-Static-Large-Object' in orig_metadata: diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index af6b9368d7..962cf1bec6 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -1818,6 +1818,11 @@ def trailing_metadata(policy, client_obj_hasher, 'X-Object-Sysmeta-EC-Etag': client_obj_hasher.hexdigest(), 'X-Object-Sysmeta-EC-Content-Length': str(bytes_transferred_from_client), + # older style x-backend-container-update-override-* headers are used + # here (rather than x-object-sysmeta-container-update-override-* + # headers) for backwards compatibility: the request may be to an object + # server that has not yet been upgraded to accept the newer style + # x-object-sysmeta-container-update-override- headers. 'X-Backend-Container-Update-Override-Etag': client_obj_hasher.hexdigest(), 'X-Backend-Container-Update-Override-Size': diff --git a/test/probe/test_object_async_update.py b/test/probe/test_object_async_update.py index b831bbeb72..bab7286424 100755 --- a/test/probe/test_object_async_update.py +++ b/test/probe/test_object_async_update.py @@ -62,7 +62,7 @@ class TestObjectAsyncUpdate(ReplProbeTest): class TestUpdateOverrides(ReplProbeTest): """ Use an internal client to PUT an object to proxy server, - bypassing gatekeeper so that X-Backend- headers can be included. + bypassing gatekeeper so that X-Object-Sysmeta- headers can be included. Verify that the update override headers take effect and override values propagate to the container server. """ @@ -71,10 +71,10 @@ class TestUpdateOverrides(ReplProbeTest): int_client = self.make_internal_client() headers = { 'Content-Type': 'text/plain', - 'X-Backend-Container-Update-Override-Etag': 'override-etag', - 'X-Backend-Container-Update-Override-Content-Type': + 'X-Object-Sysmeta-Container-Update-Override-Etag': 'override-etag', + 'X-Object-Sysmeta-Container-Update-Override-Content-Type': 'override-type', - 'X-Backend-Container-Update-Override-Size': '1999' + 'X-Object-Sysmeta-Container-Update-Override-Size': '1999' } client.put_container(self.url, self.token, 'c1', headers={'X-Storage-Policy': @@ -117,7 +117,8 @@ class TestUpdateOverridesEC(ECProbeTest): # an async update to it kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server) content = u'stuff' - client.put_object(self.url, self.token, 'c1', 'o1', contents=content) + client.put_object(self.url, self.token, 'c1', 'o1', contents=content, + content_type='test/ctype') meta = client.head_object(self.url, self.token, 'c1', 'o1') # re-start the container server and assert that it does not yet know @@ -129,11 +130,26 @@ class TestUpdateOverridesEC(ECProbeTest): # Run the object-updaters to be sure updates are done Manager(['object-updater']).once() - # check the re-started container server has update with override values - obj = direct_client.direct_get_container( - cnodes[0], cpart, self.account, 'c1')[1][0] - self.assertEqual(meta['etag'], obj['hash']) - self.assertEqual(len(content), obj['bytes']) + # check the re-started container server got same update as others. + # we cannot assert the actual etag value because it may be encrypted + listing_etags = set() + for cnode in cnodes: + listing = direct_client.direct_get_container( + cnode, cpart, self.account, 'c1')[1] + self.assertEqual(1, len(listing)) + self.assertEqual(len(content), listing[0]['bytes']) + self.assertEqual('test/ctype', listing[0]['content_type']) + listing_etags.add(listing[0]['hash']) + self.assertEqual(1, len(listing_etags)) + + # check that listing meta returned to client is consistent with object + # meta returned to client + hdrs, listing = client.get_container(self.url, self.token, 'c1') + self.assertEqual(1, len(listing)) + self.assertEqual('o1', listing[0]['name']) + self.assertEqual(len(content), listing[0]['bytes']) + self.assertEqual(meta['etag'], listing[0]['hash']) + self.assertEqual('test/ctype', listing[0]['content_type']) def test_update_during_POST_only(self): # verify correct update values when PUT update is missed but then a @@ -147,7 +163,8 @@ class TestUpdateOverridesEC(ECProbeTest): # an async update to it kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server) content = u'stuff' - client.put_object(self.url, self.token, 'c1', 'o1', contents=content) + client.put_object(self.url, self.token, 'c1', 'o1', contents=content, + content_type='test/ctype') meta = client.head_object(self.url, self.token, 'c1', 'o1') # re-start the container server and assert that it does not yet know @@ -165,20 +182,39 @@ class TestUpdateOverridesEC(ECProbeTest): int_client.get_object_metadata(self.account, 'c1', 'o1') ['x-object-meta-fruit']) # sanity - # check the re-started container server has update with override values - obj = direct_client.direct_get_container( - cnodes[0], cpart, self.account, 'c1')[1][0] - self.assertEqual(meta['etag'], obj['hash']) - self.assertEqual(len(content), obj['bytes']) + # check the re-started container server got same update as others. + # we cannot assert the actual etag value because it may be encrypted + listing_etags = set() + for cnode in cnodes: + listing = direct_client.direct_get_container( + cnode, cpart, self.account, 'c1')[1] + self.assertEqual(1, len(listing)) + self.assertEqual(len(content), listing[0]['bytes']) + self.assertEqual('test/ctype', listing[0]['content_type']) + listing_etags.add(listing[0]['hash']) + self.assertEqual(1, len(listing_etags)) + + # check that listing meta returned to client is consistent with object + # meta returned to client + hdrs, listing = client.get_container(self.url, self.token, 'c1') + self.assertEqual(1, len(listing)) + self.assertEqual('o1', listing[0]['name']) + self.assertEqual(len(content), listing[0]['bytes']) + self.assertEqual(meta['etag'], listing[0]['hash']) + self.assertEqual('test/ctype', listing[0]['content_type']) # Run the object-updaters to send the async pending from the PUT Manager(['object-updater']).once() # check container listing metadata is still correct - obj = direct_client.direct_get_container( - cnodes[0], cpart, self.account, 'c1')[1][0] - self.assertEqual(meta['etag'], obj['hash']) - self.assertEqual(len(content), obj['bytes']) + for cnode in cnodes: + listing = direct_client.direct_get_container( + cnode, cpart, self.account, 'c1')[1] + self.assertEqual(1, len(listing)) + self.assertEqual(len(content), listing[0]['bytes']) + self.assertEqual('test/ctype', listing[0]['content_type']) + listing_etags.add(listing[0]['hash']) + self.assertEqual(1, len(listing_etags)) def test_async_updates_after_PUT_and_POST(self): # verify correct update values when PUT update and POST updates are @@ -192,7 +228,8 @@ class TestUpdateOverridesEC(ECProbeTest): # we force async updates to it kill_server((cnodes[0]['ip'], cnodes[0]['port']), self.ipport2server) content = u'stuff' - client.put_object(self.url, self.token, 'c1', 'o1', contents=content) + client.put_object(self.url, self.token, 'c1', 'o1', contents=content, + content_type='test/ctype') meta = client.head_object(self.url, self.token, 'c1', 'o1') # use internal client for POST so we can force fast-post mode @@ -213,11 +250,26 @@ class TestUpdateOverridesEC(ECProbeTest): # Run the object-updaters to send the async pendings Manager(['object-updater']).once() - # check container listing metadata is still correct - obj = direct_client.direct_get_container( - cnodes[0], cpart, self.account, 'c1')[1][0] - self.assertEqual(meta['etag'], obj['hash']) - self.assertEqual(len(content), obj['bytes']) + # check the re-started container server got same update as others. + # we cannot assert the actual etag value because it may be encrypted + listing_etags = set() + for cnode in cnodes: + listing = direct_client.direct_get_container( + cnode, cpart, self.account, 'c1')[1] + self.assertEqual(1, len(listing)) + self.assertEqual(len(content), listing[0]['bytes']) + self.assertEqual('test/ctype', listing[0]['content_type']) + listing_etags.add(listing[0]['hash']) + self.assertEqual(1, len(listing_etags)) + + # check that listing meta returned to client is consistent with object + # meta returned to client + hdrs, listing = client.get_container(self.url, self.token, 'c1') + self.assertEqual(1, len(listing)) + self.assertEqual('o1', listing[0]['name']) + self.assertEqual(len(content), listing[0]['bytes']) + self.assertEqual(meta['etag'], listing[0]['hash']) + self.assertEqual('test/ctype', listing[0]['content_type']) if __name__ == '__main__': diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index 8b8fff3b3d..c295ee4768 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -128,6 +128,8 @@ class FakeSwift(object): if "CONTENT_TYPE" in env: self.uploaded[path][0]['Content-Type'] = env["CONTENT_TYPE"] + # note: tests may assume this copy of req_headers is case insensitive + # so we deliberately use a HeaderKeyDict self._calls.append((method, path, HeaderKeyDict(req_headers))) # range requests ought to work, hence conditional_response=True diff --git a/test/unit/common/middleware/test_copy.py b/test/unit/common/middleware/test_copy.py index 254203e630..3f024d4395 100644 --- a/test/unit/common/middleware/test_copy.py +++ b/test/unit/common/middleware/test_copy.py @@ -20,6 +20,7 @@ import shutil import tempfile import unittest from hashlib import md5 +from six.moves import urllib from textwrap import dedent from swift.common import swob @@ -224,9 +225,10 @@ class TestServerSideCopyMiddleware(unittest.TestCase): self.assertEqual('PUT', self.authorized[1].method) self.assertEqual('/v1/a/c/o2', self.authorized[1].path) - def test_static_large_object(self): + def test_static_large_object_manifest(self): self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, - {'X-Static-Large-Object': 'True'}, 'passed') + {'X-Static-Large-Object': 'True', + 'Etag': 'should not be sent'}, 'passed') self.app.register('PUT', '/v1/a/c/o2?multipart-manifest=put', swob.HTTPCreated, {}) req = Request.blank('/v1/a/c/o2?multipart-manifest=get', @@ -236,11 +238,43 @@ class TestServerSideCopyMiddleware(unittest.TestCase): status, headers, body = self.call_ssc(req) self.assertEqual(status, '201 Created') self.assertTrue(('X-Copied-From', 'c/o') in headers) - calls = self.app.calls_with_headers - method, path, req_headers = calls[1] - self.assertEqual('PUT', method) - self.assertEqual('/v1/a/c/o2?multipart-manifest=put', path) + self.assertEqual(2, len(self.app.calls)) + self.assertEqual('GET', self.app.calls[0][0]) + get_path, qs = self.app.calls[0][1].split('?') + params = urllib.parse.parse_qs(qs) + self.assertDictEqual( + {'format': ['raw'], 'multipart-manifest': ['get']}, params) + self.assertEqual(get_path, '/v1/a/c/o') + self.assertEqual(self.app.calls[1], + ('PUT', '/v1/a/c/o2?multipart-manifest=put')) + req_headers = self.app.headers[1] self.assertNotIn('X-Static-Large-Object', req_headers) + self.assertNotIn('Etag', req_headers) + self.assertEqual(len(self.authorized), 2) + self.assertEqual('GET', self.authorized[0].method) + self.assertEqual('/v1/a/c/o', self.authorized[0].path) + self.assertEqual('PUT', self.authorized[1].method) + self.assertEqual('/v1/a/c/o2', self.authorized[1].path) + + def test_static_large_object(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, + {'X-Static-Large-Object': 'True', + 'Etag': 'should not be sent'}, 'passed') + self.app.register('PUT', '/v1/a/c/o2', + swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o2', + environ={'REQUEST_METHOD': 'PUT'}, + headers={'Content-Length': '0', + 'X-Copy-From': 'c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertEqual(self.app.calls, [ + ('GET', '/v1/a/c/o'), + ('PUT', '/v1/a/c/o2')]) + req_headers = self.app.headers[1] + self.assertNotIn('X-Static-Large-Object', req_headers) + self.assertNotIn('Etag', req_headers) self.assertEqual(len(self.authorized), 2) self.assertEqual('GET', self.authorized[0].method) self.assertEqual('/v1/a/c/o', self.authorized[0].path) @@ -587,7 +621,8 @@ class TestServerSideCopyMiddleware(unittest.TestCase): self.assertEqual('/v1/a/c/o', self.authorized[0].path) def test_basic_COPY(self): - self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, { + 'etag': 'is sent'}, 'passed') self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {}) req = Request.blank( '/v1/a/c/o', method='COPY', @@ -601,6 +636,145 @@ class TestServerSideCopyMiddleware(unittest.TestCase): self.assertEqual('/v1/a/c/o', self.authorized[0].path) self.assertEqual('PUT', self.authorized[1].method) self.assertEqual('/v1/a/c/o-copy', self.authorized[1].path) + self.assertEqual(self.app.calls, [ + ('GET', '/v1/a/c/o'), + ('PUT', '/v1/a/c/o-copy')]) + self.assertIn('etag', self.app.headers[1]) + self.assertEqual(self.app.headers[1]['etag'], 'is sent') + + def test_basic_DLO(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, { + 'x-object-manifest': 'some/path', + 'etag': 'is not sent'}, 'passed') + self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {}) + req = Request.blank( + '/v1/a/c/o', method='COPY', + headers={'Content-Length': 0, + 'Destination': 'c/o-copy'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertEqual(self.app.calls, [ + ('GET', '/v1/a/c/o'), + ('PUT', '/v1/a/c/o-copy')]) + self.assertNotIn('x-object-manifest', self.app.headers[1]) + self.assertNotIn('etag', self.app.headers[1]) + + def test_basic_DLO_manifest(self): + self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, { + 'x-object-manifest': 'some/path', + 'etag': 'is sent'}, 'passed') + self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {}) + req = Request.blank( + '/v1/a/c/o?multipart-manifest=get', method='COPY', + headers={'Content-Length': 0, + 'Destination': 'c/o-copy'}) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + self.assertTrue(('X-Copied-From', 'c/o') in headers) + self.assertEqual(2, len(self.app.calls)) + self.assertEqual('GET', self.app.calls[0][0]) + get_path, qs = self.app.calls[0][1].split('?') + params = urllib.parse.parse_qs(qs) + self.assertDictEqual( + {'format': ['raw'], 'multipart-manifest': ['get']}, params) + self.assertEqual(get_path, '/v1/a/c/o') + self.assertEqual(self.app.calls[1], ('PUT', '/v1/a/c/o-copy')) + self.assertIn('x-object-manifest', self.app.headers[1]) + self.assertEqual(self.app.headers[1]['x-object-manifest'], 'some/path') + self.assertIn('etag', self.app.headers[1]) + self.assertEqual(self.app.headers[1]['etag'], 'is sent') + + def test_COPY_source_metadata(self): + source_headers = { + 'x-object-sysmeta-test1': 'copy me', + 'x-object-meta-test2': 'copy me too', + 'x-object-sysmeta-container-update-override-etag': 'etag val', + 'x-object-sysmeta-container-update-override-size': 'size val', + 'x-object-sysmeta-container-update-override-foo': 'bar'} + + get_resp_headers = source_headers.copy() + get_resp_headers['etag'] = 'source etag' + self.app.register( + 'GET', '/v1/a/c/o', swob.HTTPOk, + headers=get_resp_headers, body='passed') + + def verify_headers(expected_headers, unexpected_headers, + actual_headers): + for k, v in actual_headers: + if k.lower() in expected_headers: + expected_val = expected_headers.pop(k.lower()) + self.assertEqual(expected_val, v) + self.assertNotIn(k.lower(), unexpected_headers) + self.assertFalse(expected_headers) + + # use a COPY request + self.app.register('PUT', '/v1/a/c/o-copy0', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', method='COPY', + headers={'Content-Length': 0, + 'Destination': 'c/o-copy0'}) + status, headers, body = self.call_ssc(req) + self.assertEqual('201 Created', status) + verify_headers(source_headers.copy(), [], headers) + method, path, headers = self.app.calls_with_headers[-1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c/o-copy0', path) + verify_headers(source_headers.copy(), [], headers.items()) + self.assertIn('etag', headers) + self.assertEqual(headers['etag'], 'source etag') + + req = Request.blank('/v1/a/c/o-copy0', method='GET') + status, headers, body = self.call_ssc(req) + self.assertEqual('200 OK', status) + verify_headers(source_headers.copy(), [], headers) + + # use a COPY request with a Range header + self.app.register('PUT', '/v1/a/c/o-copy1', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', method='COPY', + headers={'Content-Length': 0, + 'Destination': 'c/o-copy1', + 'Range': 'bytes=1-2'}) + status, headers, body = self.call_ssc(req) + expected_headers = source_headers.copy() + unexpected_headers = ( + 'x-object-sysmeta-container-update-override-etag', + 'x-object-sysmeta-container-update-override-size', + 'x-object-sysmeta-container-update-override-foo') + for h in unexpected_headers: + expected_headers.pop(h) + self.assertEqual('201 Created', status) + verify_headers(expected_headers, unexpected_headers, headers) + method, path, headers = self.app.calls_with_headers[-1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c/o-copy1', path) + verify_headers(expected_headers, unexpected_headers, headers.items()) + # etag should not be copied with a Range request + self.assertNotIn('etag', headers) + + req = Request.blank('/v1/a/c/o-copy1', method='GET') + status, headers, body = self.call_ssc(req) + self.assertEqual('200 OK', status) + verify_headers(expected_headers, unexpected_headers, headers) + + # use a PUT with x-copy-from + self.app.register('PUT', '/v1/a/c/o-copy2', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o-copy2', method='PUT', + headers={'Content-Length': 0, + 'X-Copy-From': 'c/o'}) + status, headers, body = self.call_ssc(req) + self.assertEqual('201 Created', status) + verify_headers(source_headers.copy(), [], headers) + method, path, headers = self.app.calls_with_headers[-1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c/o-copy2', path) + verify_headers(source_headers.copy(), [], headers.items()) + self.assertIn('etag', headers) + self.assertEqual(headers['etag'], 'source etag') + + req = Request.blank('/v1/a/c/o-copy2', method='GET') + status, headers, body = self.call_ssc(req) + self.assertEqual('200 OK', status) + verify_headers(source_headers.copy(), [], headers) def test_COPY_no_destination_header(self): req = Request.blank( diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index b85230f395..a40d75c5a2 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -710,6 +710,102 @@ class TestObjectController(unittest.TestCase): self._test_POST_container_updates( POLICIES[1], update_etag='override_etag') + def test_POST_container_updates_precedence(self): + # Verify correct etag and size being sent with container updates for a + # PUT and for a subsequent POST. + ts_iter = make_timestamp_iter() + + def do_test(body, headers, policy): + def mock_container_update(ctlr, op, account, container, obj, req, + headers_out, objdevice, policy): + calls_made.append((headers_out, policy)) + calls_made = [] + ts_put = next(ts_iter) + + # make PUT with given headers and verify correct etag is sent in + # container update + headers.update({ + 'Content-Type': + 'application/octet-stream;swift_bytes=123456789', + 'X-Backend-Storage-Policy-Index': int(policy), + 'X-Object-Sysmeta-Ec-Frag-Index': 2, + 'X-Timestamp': ts_put.internal, + 'Content-Length': len(body)}) + + req = Request.blank('/sda1/p/a/c/o', + environ={'REQUEST_METHOD': 'PUT'}, + headers=headers, body=body) + + with mock.patch( + 'swift.obj.server.ObjectController.container_update', + mock_container_update): + resp = req.get_response(self.object_controller) + + self.assertEqual(resp.status_int, 201) + self.assertEqual(1, len(calls_made)) + expected_headers = HeaderKeyDict({ + 'x-size': '4', + 'x-content-type': + 'application/octet-stream;swift_bytes=123456789', + 'x-timestamp': ts_put.internal, + 'x-etag': 'expected'}) + self.assertDictEqual(expected_headers, calls_made[0][0]) + self.assertEqual(policy, calls_made[0][1]) + + # make a POST and verify container update has the same etag + calls_made = [] + ts_post = next(ts_iter) + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, + headers={'X-Timestamp': ts_post.internal, + 'X-Backend-Storage-Policy-Index': int(policy)}) + + with mock.patch( + 'swift.obj.server.ObjectController.container_update', + mock_container_update): + resp = req.get_response(self.object_controller) + + self.assertEqual(resp.status_int, 202) + self.assertEqual(1, len(calls_made)) + expected_headers.update({ + 'x-content-type-timestamp': ts_put.internal, + 'x-meta-timestamp': ts_post.internal}) + self.assertDictEqual(expected_headers, calls_made[0][0]) + self.assertEqual(policy, calls_made[0][1]) + + # sanity check - EC headers are ok + headers = { + 'X-Backend-Container-Update-Override-Etag': 'expected', + 'X-Backend-Container-Update-Override-Size': '4', + 'X-Object-Sysmeta-Ec-Etag': 'expected', + 'X-Object-Sysmeta-Ec-Content-Length': '4'} + do_test('test ec frag longer than 4', headers, POLICIES[1]) + + # middleware overrides take precedence over EC/older overrides + headers = { + 'X-Backend-Container-Update-Override-Etag': 'unexpected', + 'X-Backend-Container-Update-Override-Size': '3', + 'X-Object-Sysmeta-Ec-Etag': 'unexpected', + 'X-Object-Sysmeta-Ec-Content-Length': '3', + 'X-Object-Sysmeta-Container-Update-Override-Etag': 'expected', + 'X-Object-Sysmeta-Container-Update-Override-Size': '4'} + do_test('test ec frag longer than 4', headers, POLICIES[1]) + + # overrides with replication policy + headers = { + 'X-Object-Sysmeta-Container-Update-Override-Etag': 'expected', + 'X-Object-Sysmeta-Container-Update-Override-Size': '4'} + do_test('longer than 4', headers, POLICIES[0]) + + # middleware overrides take precedence over EC/older overrides with + # replication policy + headers = { + 'X-Backend-Container-Update-Override-Etag': 'unexpected', + 'X-Backend-Container-Update-Override-Size': '3', + 'X-Object-Sysmeta-Container-Update-Override-Etag': 'expected', + 'X-Object-Sysmeta-Container-Update-Override-Size': '4'} + do_test('longer than 4', headers, POLICIES[0]) + def _test_PUT_then_POST_async_pendings(self, policy, update_etag=None): # Test that PUT and POST requests result in distinct async pending # files when sync container update fails. @@ -4310,47 +4406,75 @@ class TestObjectController(unittest.TestCase): 'x-trans-id': '123', 'referer': 'PUT http://localhost/sda1/0/a/c/o'})) - def test_container_update_overrides(self): - container_updates = [] + def test_PUT_container_update_overrides(self): + ts_iter = make_timestamp_iter() - def capture_updates(ip, port, method, path, headers, *args, **kwargs): - container_updates.append((ip, port, method, path, headers)) + def do_test(override_headers): + container_updates = [] - headers = { - 'X-Timestamp': 1, - 'X-Trans-Id': '123', - 'X-Container-Host': 'chost:cport', - 'X-Container-Partition': 'cpartition', - 'X-Container-Device': 'cdevice', - 'Content-Type': 'text/plain', + def capture_updates( + ip, port, method, path, headers, *args, **kwargs): + container_updates.append((ip, port, method, path, headers)) + + ts_put = next(ts_iter) + headers = { + 'X-Timestamp': ts_put.internal, + 'X-Trans-Id': '123', + 'X-Container-Host': 'chost:cport', + 'X-Container-Partition': 'cpartition', + 'X-Container-Device': 'cdevice', + 'Content-Type': 'text/plain', + } + headers.update(override_headers) + req = Request.blank('/sda1/0/a/c/o', method='PUT', + headers=headers, body='') + with mocked_http_conn( + 200, give_connect=capture_updates) as fake_conn: + with fake_spawn(): + resp = req.get_response(self.object_controller) + self.assertRaises(StopIteration, fake_conn.code_iter.next) + self.assertEqual(resp.status_int, 201) + self.assertEqual(len(container_updates), 1) + ip, port, method, path, headers = container_updates[0] + self.assertEqual(ip, 'chost') + self.assertEqual(port, 'cport') + self.assertEqual(method, 'PUT') + self.assertEqual(path, '/cdevice/cpartition/a/c/o') + self.assertEqual(headers, HeaderKeyDict({ + 'user-agent': 'object-server %s' % os.getpid(), + 'x-size': '0', + 'x-etag': 'override_etag', + 'x-content-type': 'override_val', + 'x-timestamp': ts_put.internal, + 'X-Backend-Storage-Policy-Index': '0', # default + 'x-trans-id': '123', + 'referer': 'PUT http://localhost/sda1/0/a/c/o', + 'x-foo': 'bar'})) + + # EC policy override headers + do_test({ 'X-Backend-Container-Update-Override-Etag': 'override_etag', 'X-Backend-Container-Update-Override-Content-Type': 'override_val', 'X-Backend-Container-Update-Override-Foo': 'bar', - 'X-Backend-Container-Ignored': 'ignored' - } - req = Request.blank('/sda1/0/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, - headers=headers, body='') - with mocked_http_conn(200, give_connect=capture_updates) as fake_conn: - with fake_spawn(): - resp = req.get_response(self.object_controller) - self.assertRaises(StopIteration, fake_conn.code_iter.next) - self.assertEqual(resp.status_int, 201) - self.assertEqual(len(container_updates), 1) - ip, port, method, path, headers = container_updates[0] - self.assertEqual(ip, 'chost') - self.assertEqual(port, 'cport') - self.assertEqual(method, 'PUT') - self.assertEqual(path, '/cdevice/cpartition/a/c/o') - self.assertEqual(headers, HeaderKeyDict({ - 'user-agent': 'object-server %s' % os.getpid(), - 'x-size': '0', - 'x-etag': 'override_etag', - 'x-content-type': 'override_val', - 'x-timestamp': utils.Timestamp(1).internal, - 'X-Backend-Storage-Policy-Index': '0', # default when not given - 'x-trans-id': '123', - 'referer': 'PUT http://localhost/sda1/0/a/c/o', - 'x-foo': 'bar'})) + 'X-Backend-Container-Ignored': 'ignored'}) + + # middleware override headers + do_test({ + 'X-Object-Sysmeta-Container-Update-Override-Etag': 'override_etag', + 'X-Object-Sysmeta-Container-Update-Override-Content-Type': + 'override_val', + 'X-Object-Sysmeta-Container-Update-Override-Foo': 'bar', + 'X-Object-Sysmeta-Ignored': 'ignored'}) + + # middleware override headers take precedence over EC policy headers + do_test({ + 'X-Object-Sysmeta-Container-Update-Override-Etag': 'override_etag', + 'X-Object-Sysmeta-Container-Update-Override-Content-Type': + 'override_val', + 'X-Object-Sysmeta-Container-Update-Override-Foo': 'bar', + 'X-Backend-Container-Update-Override-Etag': 'ignored', + 'X-Backend-Container-Update-Override-Content-Type': 'ignored', + 'X-Backend-Container-Update-Override-Foo': 'ignored'}) def test_container_update_async(self): policy = random.choice(list(POLICIES)) From a60096769c13ba8bd4c99a5ec516741f91305191 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Thu, 23 Jun 2016 13:29:51 +0200 Subject: [PATCH 020/156] Python 3: Fix basestring, long and StringIO * The basestring type was removed in Python 3: replace it with six.string_types. * Replace StringIO.StringIO with six.StringIO * Replace (int, long) with six.integer_types Change-Id: Ic0d443b0bdd00fb18452e79ffae07b9be0fa8116 --- swift/common/middleware/slo.py | 4 ++-- swift/common/swob.py | 2 +- swift/common/utils.py | 2 +- test/unit/cli/test_ring_builder_analyzer.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index 35b98c988b..47e9a660ff 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -297,11 +297,11 @@ def parse_and_validate_input(req_body, req_path): for ek in sorted(extraneous_keys)))) continue - if not isinstance(seg_dict['path'], basestring): + if not isinstance(seg_dict['path'], six.string_types): errors.append("Index %d: \"path\" must be a string" % seg_index) continue if not (seg_dict['etag'] is None or - isinstance(seg_dict['etag'], basestring)): + isinstance(seg_dict['etag'], six.string_types)): errors.append( "Index %d: \"etag\" must be a string or null" % seg_index) continue diff --git a/swift/common/swob.py b/swift/common/swob.py index 2ba5d5e6a4..717eb8bca8 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -284,7 +284,7 @@ def _resp_status_property(): return '%s %s' % (self.status_int, self.title) def setter(self, value): - if isinstance(value, (int, long)): + if isinstance(value, six.integer_types): self.status_int = value self.explanation = self.title = RESPONSE_REASONS[value][0] else: diff --git a/swift/common/utils.py b/swift/common/utils.py index d3ef5a7dcb..e8715ff62b 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -964,7 +964,7 @@ def decode_timestamps(encoded, explicit=False): # TODO: some tests, e.g. in test_replicator, put float timestamps values # into container db's, hence this defensive check, but in real world # this may never happen. - if not isinstance(encoded, basestring): + if not isinstance(encoded, six.string_types): ts = Timestamp(encoded) return ts, ts, ts diff --git a/test/unit/cli/test_ring_builder_analyzer.py b/test/unit/cli/test_ring_builder_analyzer.py index 2de2b16a9c..980a016c51 100644 --- a/test/unit/cli/test_ring_builder_analyzer.py +++ b/test/unit/cli/test_ring_builder_analyzer.py @@ -17,8 +17,8 @@ import os import json import mock +from six import StringIO import unittest -from StringIO import StringIO from test.unit import with_tempdir from swift.cli.ring_builder_analyzer import parse_scenario, run_scenario From b923e0f8928d70b23c894db8d2d24bdfdc6d283d Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Thu, 23 Jun 2016 13:34:51 +0200 Subject: [PATCH 021/156] Python 3: dict.iteritems() and dict.itervalues() * Replace dict.itervalues() with dict.values(). The Python 3 dict type has no itervalues() method, the old itervalues() method was renamed to values(). * Same change for dict.iteritems(), replaced with dict.items() * Exception: use six.itervalues() to yield on sock_data_by_port Using six.itervalues() and six.iteritems() would make the code less readable. The overhead of creating a temporary list is considered as negligible: http://lists.openstack.org/pipermail/openstack-dev/2015-June/066391.html Change-Id: Ifbe7faa16d419e7fe26f1fb464019b83c9171c45 --- swift/common/wsgi.py | 6 +++++- swift/container/backend.py | 4 ++-- test/unit/common/middleware/test_recon.py | 2 +- test/unit/common/test_db.py | 4 ++-- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 6676d38358..89f29c9f2e 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -31,6 +31,7 @@ import eventlet.debug from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout from paste.deploy import loadwsgi from eventlet.green import socket, ssl, os as green_os +import six from six import BytesIO from six import StringIO from six.moves.urllib.parse import unquote @@ -643,7 +644,10 @@ class PortPidState(object): Yield all current listen sockets. """ - for orphan_data in self.sock_data_by_port.itervalues(): + # Use six.itervalues() instead of calling directly the .values() method + # on Python 2 to avoid a temporary list, because sock_data_by_port + # comes from users and can be large. + for orphan_data in six.itervalues(self.sock_data_by_port): yield orphan_data['sock'] def forget_port(self, port): diff --git a/swift/container/backend.py b/swift/container/backend.py index d41f297acf..e98f9e0e67 100644 --- a/swift/container/backend.py +++ b/swift/container/backend.py @@ -861,7 +861,7 @@ class ContainerBroker(DatabaseBroker): 'DELETE FROM object WHERE ' + query_mod + 'name=? AND storage_policy_index=?', ((rec['name'], rec['storage_policy_index']) - for rec in to_delete.itervalues())) + for rec in to_delete.values())) if to_add: curs.executemany( 'INSERT INTO object (name, created_at, size, content_type,' @@ -870,7 +870,7 @@ class ContainerBroker(DatabaseBroker): ((rec['name'], rec['created_at'], rec['size'], rec['content_type'], rec['etag'], rec['deleted'], rec['storage_policy_index']) - for rec in to_add.itervalues())) + for rec in to_add.values())) if source: # for replication we rely on the remote end sending merges in # order with no gaps to increment sync_points diff --git a/test/unit/common/middleware/test_recon.py b/test/unit/common/middleware/test_recon.py index a194378083..63e82cd61e 100644 --- a/test/unit/common/middleware/test_recon.py +++ b/test/unit/common/middleware/test_recon.py @@ -300,7 +300,7 @@ class TestReconSuccess(TestCase): array.array('H', [4, 2, 4, 3])] } - for ringfn, replica_map in rings.iteritems(): + for ringfn, replica_map in rings.items(): ringpath = os.path.join(self.tempdir, ringfn) self._create_ring(ringpath, replica_map, self.ring_devs, self.ring_part_shift) diff --git a/test/unit/common/test_db.py b/test/unit/common/test_db.py index 7e7660e77c..45949c9007 100644 --- a/test/unit/common/test_db.py +++ b/test/unit/common/test_db.py @@ -691,9 +691,9 @@ class TestDatabaseBroker(unittest.TestCase): self.assertTrue(stub_called[0]) # ensure that metadata was cleared m2 = broker.metadata - self.assertTrue(not any(v[0] for v in m2.itervalues())) + self.assertTrue(not any(v[0] for v in m2.values())) self.assertTrue(all(v[1] == normalize_timestamp('2') - for v in m2.itervalues())) + for v in m2.values())) def test_get(self): broker = DatabaseBroker(':memory:') From 143b0eec92b723d11d5d1731ac2ae179e4d390e3 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 23 Jun 2016 10:46:27 -0700 Subject: [PATCH 022/156] MockMemcached cleanup * Break sendall's switch into different functions * Actually name some parameters * Raise errors on unexpected input * Consistently use tuples in self.cache Change-Id: I93aa33f83cdf6943a43f14a1868b1497bc7f4478 --- test/unit/common/test_memcached.py | 115 ++++++++++++++++------------- 1 file changed, 63 insertions(+), 52 deletions(-) diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py index da7fbf3875..0ac4878c75 100644 --- a/test/unit/common/test_memcached.py +++ b/test/unit/common/test_memcached.py @@ -62,6 +62,8 @@ class ExplodingMockMemcached(object): class MockMemcached(object): + # See https://github.com/memcached/memcached/blob/master/doc/protocol.txt + # In particular, the "Storage commands" section may be interesting. def __init__(self): self.inbuf = '' @@ -79,58 +81,67 @@ class MockMemcached(object): while '\n' in self.inbuf: cmd, self.inbuf = self.inbuf.split('\n', 1) parts = cmd.split() - if parts[0].lower() == 'set': - self.cache[parts[1]] = parts[2], parts[3], \ - self.inbuf[:int(parts[4])] - self.inbuf = self.inbuf[int(parts[4]) + 2:] - if len(parts) < 6 or parts[5] != 'noreply': - self.outbuf += 'STORED\r\n' - elif parts[0].lower() == 'add': - value = self.inbuf[:int(parts[4])] - self.inbuf = self.inbuf[int(parts[4]) + 2:] - if parts[1] in self.cache: - if len(parts) < 6 or parts[5] != 'noreply': - self.outbuf += 'NOT_STORED\r\n' - else: - self.cache[parts[1]] = parts[2], parts[3], value - if len(parts) < 6 or parts[5] != 'noreply': - self.outbuf += 'STORED\r\n' - elif parts[0].lower() == 'delete': - if self.exc_on_delete: - raise Exception('mock is has exc_on_delete set') - if parts[1] in self.cache: - del self.cache[parts[1]] - if 'noreply' not in parts: - self.outbuf += 'DELETED\r\n' - elif 'noreply' not in parts: - self.outbuf += 'NOT_FOUND\r\n' - elif parts[0].lower() == 'get': - for key in parts[1:]: - if key in self.cache: - val = self.cache[key] - self.outbuf += 'VALUE %s %s %s\r\n' % ( - key, val[0], len(val[2])) - self.outbuf += val[2] + '\r\n' - self.outbuf += 'END\r\n' - elif parts[0].lower() == 'incr': - if parts[1] in self.cache: - val = list(self.cache[parts[1]]) - val[2] = str(int(val[2]) + int(parts[2])) - self.cache[parts[1]] = val - self.outbuf += str(val[2]) + '\r\n' - else: - self.outbuf += 'NOT_FOUND\r\n' - elif parts[0].lower() == 'decr': - if parts[1] in self.cache: - val = list(self.cache[parts[1]]) - if int(val[2]) - int(parts[2]) > 0: - val[2] = str(int(val[2]) - int(parts[2])) - else: - val[2] = '0' - self.cache[parts[1]] = val - self.outbuf += str(val[2]) + '\r\n' - else: - self.outbuf += 'NOT_FOUND\r\n' + handler = getattr(self, 'handle_%s' % parts[0].lower(), None) + if handler: + handler(*parts[1:]) + else: + raise ValueError('Unhandled command: %s' % parts[0]) + + def handle_set(self, key, flags, exptime, num_bytes, noreply=''): + self.cache[key] = flags, exptime, self.inbuf[:int(num_bytes)] + self.inbuf = self.inbuf[int(num_bytes) + 2:] + if noreply != 'noreply': + self.outbuf += 'STORED\r\n' + + def handle_add(self, key, flags, exptime, num_bytes, noreply=''): + value = self.inbuf[:int(num_bytes)] + self.inbuf = self.inbuf[int(num_bytes) + 2:] + if key in self.cache: + if noreply != 'noreply': + self.outbuf += 'NOT_STORED\r\n' + else: + self.cache[key] = flags, exptime, value + if noreply != 'noreply': + self.outbuf += 'STORED\r\n' + + def handle_delete(self, key, noreply=''): + if self.exc_on_delete: + raise Exception('mock is has exc_on_delete set') + if key in self.cache: + del self.cache[key] + if noreply != 'noreply': + self.outbuf += 'DELETED\r\n' + elif noreply != 'noreply': + self.outbuf += 'NOT_FOUND\r\n' + + def handle_get(self, *keys): + for key in keys: + if key in self.cache: + val = self.cache[key] + self.outbuf += 'VALUE %s %s %s\r\n' % ( + key, val[0], len(val[2])) + self.outbuf += val[2] + '\r\n' + self.outbuf += 'END\r\n' + + def handle_incr(self, key, value, noreply=''): + if key in self.cache: + current = self.cache[key][2] + new_val = str(int(current) + int(value)) + self.cache[key] = self.cache[key][:2] + (new_val, ) + self.outbuf += str(new_val) + '\r\n' + else: + self.outbuf += 'NOT_FOUND\r\n' + + def handle_decr(self, key, value, noreply=''): + if key in self.cache: + current = self.cache[key][2] + new_val = str(int(current) - int(value)) + if new_val[0] == '-': # ie, val is negative + new_val = '0' + self.cache[key] = self.cache[key][:2] + (new_val, ) + self.outbuf += str(new_val) + '\r\n' + else: + self.outbuf += 'NOT_FOUND\r\n' def readline(self): if self.read_return_none: From c953e84e28b9b17e16bde7dfbbbdabca7acded13 Mon Sep 17 00:00:00 2001 From: liangjingtao Date: Fri, 24 Jun 2016 11:50:20 +0800 Subject: [PATCH 023/156] Make string.letters PY3 compatible String.letters are removed in py3,use string.ascii_letters instead. Change-Id: I3c71b65b09b42dc954a3eb9e02894e5d3b12a3f4 Closes-Bug: #1595786 --- test/functional/test_account.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/functional/test_account.py b/test/functional/test_account.py index 9688a5f493..cb04a2ae0c 100755 --- a/test/functional/test_account.py +++ b/test/functional/test_account.py @@ -19,7 +19,7 @@ import unittest2 import json from uuid import uuid4 from unittest2 import SkipTest -from string import letters +from string import ascii_letters from six.moves import range from swift.common.middleware.acl import format_acl @@ -127,7 +127,7 @@ class TestAccount(unittest2.TestCase): # needs to be an acceptable header size num_keys = 8 max_key_size = load_constraint('max_header_size') / num_keys - acl = {'admin': [c * max_key_size for c in letters[:num_keys]]} + acl = {'admin': [c * max_key_size for c in ascii_letters[:num_keys]]} headers = {'x-account-access-control': format_acl( version=2, acl_dict=acl)} resp = retry(post, headers=headers, use_account=1) @@ -135,7 +135,8 @@ class TestAccount(unittest2.TestCase): self.assertEqual(resp.status, 400) # and again a touch smaller - acl = {'admin': [c * max_key_size for c in letters[:num_keys - 1]]} + acl = {'admin': [c * max_key_size for c + in ascii_letters[:num_keys - 1]]} headers = {'x-account-access-control': format_acl( version=2, acl_dict=acl)} resp = retry(post, headers=headers, use_account=1) From 029c2782ddfd933af20b697c00a46b1375e0f23e Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Mon, 27 Jun 2016 14:18:43 +0200 Subject: [PATCH 024/156] Add swiftbackmeup to associated projects Change-Id: I99f7a38d9b26605324408f1d200bf08da1e2772f --- doc/source/associated_projects.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/associated_projects.rst b/doc/source/associated_projects.rst index 10f061fab8..12ef46800e 100644 --- a/doc/source/associated_projects.rst +++ b/doc/source/associated_projects.rst @@ -114,3 +114,4 @@ Other * `Swift Browser `_ - JavaScript interface for Swift * `swift-ui `_ - OpenStack Swift web browser * `Swift Durability Calculator `_ - Data Durability Calculation Tool for Swift +* `swiftbackmeup ` - Utility that allows one to create backups and upload them to OpenStack Swift From 3ad003cf51151f8ce6dfc6c2c529206eda5f7b60 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Mon, 6 Jun 2016 18:38:50 +0100 Subject: [PATCH 025/156] Enable middleware to set metadata on object POST Adds a new form of system metadata for objects. Sysmeta cannot be updated by an object POST because that would cause all existing sysmeta to be deleted. Crypto middleware will want to add 'system' metadata to object metadata on PUTs and POSTs, but it is ok for this metadata to be replaced en-masse on every POST. This patch introduces x-object-transient-sysmeta-* that is persisted by object servers and returned in GET and HEAD responses, just like user metadata, without polluting the x-object-meta-* namespace. All headers in this namespace will be filtered inbound and outbound by the gatekeeper, so cannot be set or read by clients. Co-Authored-By: Clay Gerrard Co-Authored-By: Janie Richling Change-Id: I5075493329935ba6790543fc82ea6e039704811d --- doc/source/development_middleware.rst | 67 ++++++++++- swift/common/middleware/copy.py | 30 ++--- swift/common/middleware/gatekeeper.py | 9 +- swift/common/request_helpers.py | 41 +++++++ swift/obj/server.py | 22 ++-- swift/proxy/controllers/base.py | 9 +- .../probe/test_object_metadata_replication.py | 25 ++-- test/unit/common/middleware/helpers.py | 33 ++++-- test/unit/common/middleware/test_copy.py | 66 +++++++---- .../unit/common/middleware/test_gatekeeper.py | 9 +- test/unit/common/test_request_helpers.py | 12 +- test/unit/obj/test_diskfile.py | 5 + test/unit/obj/test_server.py | 81 ++++++++++++- test/unit/proxy/controllers/test_base.py | 12 +- test/unit/proxy/test_server.py | 2 +- test/unit/proxy/test_sysmeta.py | 107 ++++++++++++++++++ 16 files changed, 450 insertions(+), 80 deletions(-) diff --git a/doc/source/development_middleware.rst b/doc/source/development_middleware.rst index 14bfcddb5b..b6dac83289 100644 --- a/doc/source/development_middleware.rst +++ b/doc/source/development_middleware.rst @@ -200,6 +200,8 @@ core swift features which predate sysmeta have added exceptions for custom non-user metadata headers (e.g. :ref:`acls`, :ref:`large-objects`) +.. _usermeta: + ^^^^^^^^^^^^^ User Metadata ^^^^^^^^^^^^^ @@ -209,7 +211,7 @@ User metadata takes the form of ``X--Meta-: ``, where and ```` and ```` are set by the client. User metadata should generally be reserved for use by the client or -client applications. An perfect example use-case for user metadata is +client applications. A perfect example use-case for user metadata is `python-swiftclient`_'s ``X-Object-Meta-Mtime`` which it stores on object it uploads to implement its ``--changed`` option which will only upload files that have changed since the last upload. @@ -223,6 +225,20 @@ borrows the user metadata namespace is :ref:`tempurl`. An example of middleware which uses custom non-user metadata to avoid the user metadata namespace is :ref:`slo-doc`. +User metadata that is stored by a PUT or POST request to a container or account +resource persists until it is explicitly removed by a subsequent PUT or POST +request that includes a header ``X--Meta-`` with no value or a +header ``X-Remove--Meta-: ``. In the latter case the +```` is not stored. All user metadata stored with an account or +container resource is deleted when the account or container is deleted. + +User metadata that is stored with an object resource has a different semantic; +object user metadata persists until any subsequent PUT or POST request is made +to the same object, at which point all user metadata stored with that object is +deleted en-masse and replaced with any user metadata included with the PUT or +POST request. As a result, it is not possible to update a subset of the user +metadata items stored with an object while leaving some items unchanged. + .. _sysmeta: ^^^^^^^^^^^^^^^ @@ -237,7 +253,7 @@ Swift WSGI Server. All headers on client requests in the form of ``X--Sysmeta-`` will be dropped from the request before being processed by any middleware. All headers on responses from back-end systems in the form -of ``X--Sysmeta-`` will be removed after all middleware has +of ``X--Sysmeta-`` will be removed after all middlewares have processed the response but before the response is sent to the client. See :ref:`gatekeeper` middleware for more information. @@ -249,3 +265,50 @@ modified directly by client requests, and the outgoing filter ensures that removing middleware that uses a specific system metadata key renders it benign. New middleware should take advantage of system metadata. + +System metadata may be set on accounts and containers by including headers with +a PUT or POST request. Where a header name matches the name of an existing item +of system metadata, the value of the existing item will be updated. Otherwise +existing items are preserved. A system metadata header with an empty value will +cause any existing item with the same name to be deleted. + +System metadata may be set on objects using only PUT requests. All items of +existing system metadata will be deleted and replaced en-masse by any system +metadata headers included with the PUT request. System metadata is neither +updated nor deleted by a POST request: updating individual items of system +metadata with a POST request is not yet supported in the same way that updating +individual items of user metadata is not supported. In cases where middleware +needs to store its own metadata with a POST request, it may use Object Transient +Sysmeta. + +^^^^^^^^^^^^^^^^^^^^^^^^ +Object Transient-Sysmeta +^^^^^^^^^^^^^^^^^^^^^^^^ + +If middleware needs to store object metadata with a POST request it may do so +using headers of the form ``X-Object-Transient-Sysmeta-: ``. + +All headers on client requests in the form of +``X-Object-Transient-Sysmeta-`` will be dropped from the request before +being processed by any middleware. All headers on responses from back-end +systems in the form of ``X-Object-Transient-Sysmeta-`` will be removed +after all middlewares have processed the response but before the response is +sent to the client. See :ref:`gatekeeper` middleware for more information. + +Transient-sysmeta updates on an object have the same semantic as user +metadata updates on an object (see :ref:`usermeta`) i.e. whenever any PUT or +POST request is made to an object, all existing items of transient-sysmeta are +deleted en-masse and replaced with any transient-sysmeta included with the PUT +or POST request. Transient-sysmeta set by a middleware is therefore prone to +deletion by a subsequent client-generated POST request unless the middleware is +careful to include its transient-sysmeta with every POST. Likewise, user +metadata set by a client is prone to deletion by a subsequent +middleware-generated POST request, and for that reason middleware should avoid +generating POST requests that are independent of any client request. + +Transient-sysmeta deliberately uses a different header prefix to user metadata +so that middlewares can avoid potential conflict with user metadata keys. + +Transient-sysmeta deliberately uses a different header prefix to system +metadata to emphasize the fact that the data is only persisted until a +subsequent POST. diff --git a/swift/common/middleware/copy.py b/swift/common/middleware/copy.py index a5fc44ca2d..1daadfe90c 100644 --- a/swift/common/middleware/copy.py +++ b/swift/common/middleware/copy.py @@ -145,7 +145,7 @@ from swift.common.http import HTTP_MULTIPLE_CHOICES, HTTP_CREATED, \ is_success, HTTP_OK from swift.common.constraints import check_account_format, MAX_FILE_SIZE from swift.common.request_helpers import copy_header_subset, remove_items, \ - is_sys_meta, is_sys_or_user_meta + is_sys_meta, is_sys_or_user_meta, is_object_transient_sysmeta from swift.common.wsgi import WSGIContext, make_subrequest @@ -206,16 +206,18 @@ def _check_destination_header(req): '/') -def _copy_headers_into(from_r, to_r): +def _copy_headers(src, dest): """ - Will copy desired headers from from_r to to_r - :params from_r: a swob Request or Response - :params to_r: a swob Request or Response + Will copy desired headers from src to dest. + + :params src: an instance of collections.Mapping + :params dest: an instance of collections.Mapping """ - pass_headers = ['x-delete-at'] - for k, v in from_r.headers.items(): - if is_sys_or_user_meta('object', k) or k.lower() in pass_headers: - to_r.headers[k] = v + for k, v in src.items(): + if (is_sys_or_user_meta('object', k) or + is_object_transient_sysmeta(k) or + k.lower() == 'x-delete-at'): + dest[k] = v class ServerSideCopyWebContext(WSGIContext): @@ -422,9 +424,7 @@ class ServerSideCopyMiddleware(object): source_resp.headers['last-modified'] # Existing sys and user meta of source object is added to response # headers in addition to the new ones. - for k, v in sink_req.headers.items(): - if is_sys_or_user_meta('object', k) or k.lower() == 'x-delete-at': - resp_headers[k] = v + _copy_headers(sink_req.headers, resp_headers) return resp_headers def handle_PUT(self, req, start_response): @@ -511,10 +511,10 @@ class ServerSideCopyMiddleware(object): remove_items(sink_req.headers, condition) copy_header_subset(source_resp, sink_req, condition) else: - # Copy/update existing sysmeta and user meta - _copy_headers_into(source_resp, sink_req) + # Copy/update existing sysmeta, transient-sysmeta and user meta + _copy_headers(source_resp.headers, sink_req.headers) # Copy/update new metadata provided in request if any - _copy_headers_into(req, sink_req) + _copy_headers(req.headers, sink_req.headers) # Create response headers for PUT response resp_headers = self._create_response_headers(source_path, diff --git a/swift/common/middleware/gatekeeper.py b/swift/common/middleware/gatekeeper.py index c5c1066505..e5df5bf44c 100644 --- a/swift/common/middleware/gatekeeper.py +++ b/swift/common/middleware/gatekeeper.py @@ -33,22 +33,25 @@ automatically inserted close to the start of the pipeline by the proxy server. from swift.common.swob import Request from swift.common.utils import get_logger, config_true_value -from swift.common.request_helpers import remove_items, get_sys_meta_prefix +from swift.common.request_helpers import ( + remove_items, get_sys_meta_prefix, OBJECT_TRANSIENT_SYSMETA_PREFIX +) import re #: A list of python regular expressions that will be used to #: match against inbound request headers. Matching headers will #: be removed from the request. # Exclude headers starting with a sysmeta prefix. +# Exclude headers starting with object transient system metadata prefix. +# Exclude headers starting with an internal backend header prefix. # If adding to this list, note that these are regex patterns, # so use a trailing $ to constrain to an exact header match # rather than prefix match. inbound_exclusions = [get_sys_meta_prefix('account'), get_sys_meta_prefix('container'), get_sys_meta_prefix('object'), + OBJECT_TRANSIENT_SYSMETA_PREFIX, 'x-backend'] -# 'x-object-sysmeta' is reserved in anticipation of future support -# for system metadata being applied to objects #: A list of python regular expressions that will be used to diff --git a/swift/common/request_helpers.py b/swift/common/request_helpers.py index 71a32106af..65f21bebce 100644 --- a/swift/common/request_helpers.py +++ b/swift/common/request_helpers.py @@ -44,6 +44,9 @@ from swift.common.utils import split_path, validate_device_partition, \ from swift.common.wsgi import make_subrequest +OBJECT_TRANSIENT_SYSMETA_PREFIX = 'x-object-transient-sysmeta-' + + def get_param(req, name, default=None): """ Get parameters from an HTTP request ensuring proper handling UTF-8 @@ -175,6 +178,19 @@ def is_sys_or_user_meta(server_type, key): return is_user_meta(server_type, key) or is_sys_meta(server_type, key) +def is_object_transient_sysmeta(key): + """ + Tests if a header key starts with and is longer than the prefix for object + transient system metadata. + + :param key: header key + :returns: True if the key satisfies the test, False otherwise + """ + if len(key) <= len(OBJECT_TRANSIENT_SYSMETA_PREFIX): + return False + return key.lower().startswith(OBJECT_TRANSIENT_SYSMETA_PREFIX) + + def strip_user_meta_prefix(server_type, key): """ Removes the user metadata prefix for a given server type from the start @@ -199,6 +215,17 @@ def strip_sys_meta_prefix(server_type, key): return key[len(get_sys_meta_prefix(server_type)):] +def strip_object_transient_sysmeta_prefix(key): + """ + Removes the object transient system metadata prefix from the start of a + header key. + + :param key: header key + :returns: stripped header key + """ + return key[len(OBJECT_TRANSIENT_SYSMETA_PREFIX):] + + def get_user_meta_prefix(server_type): """ Returns the prefix for user metadata headers for given server type. @@ -225,6 +252,20 @@ def get_sys_meta_prefix(server_type): return 'x-%s-%s-' % (server_type.lower(), 'sysmeta') +def get_object_transient_sysmeta(key): + """ + Returns the Object Transient System Metadata header for key. + The Object Transient System Metadata namespace will be persisted by + backend object servers. These headers are treated in the same way as + object user metadata i.e. all headers in this namespace will be + replaced on every POST request. + + :param key: metadata key + :returns: the entire object transient system metadata header for key + """ + return '%s%s' % (OBJECT_TRANSIENT_SYSMETA_PREFIX, key) + + def remove_items(headers, condition): """ Removes items from a dict whose keys satisfy diff --git a/swift/obj/server.py b/swift/obj/server.py index 7193b73e70..1edefb8cd4 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -46,7 +46,8 @@ from swift.common.http import is_success from swift.common.base_storage_server import BaseStorageServer from swift.common.header_key_dict import HeaderKeyDict from swift.common.request_helpers import get_name_and_placement, \ - is_user_meta, is_sys_or_user_meta, resolve_etag_is_at_header + is_user_meta, is_sys_or_user_meta, is_object_transient_sysmeta, \ + resolve_etag_is_at_header from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \ HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \ HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \ @@ -520,7 +521,8 @@ class ObjectController(BaseStorageServer): metadata = {'X-Timestamp': req_timestamp.internal} self._preserve_slo_manifest(metadata, orig_metadata) metadata.update(val for val in request.headers.items() - if is_user_meta('object', val[0])) + if (is_user_meta('object', val[0]) or + is_object_transient_sysmeta(val[0]))) headers_to_copy = ( request.headers.get( 'X-Backend-Replication-Headers', '').split() + @@ -767,9 +769,11 @@ class ObjectController(BaseStorageServer): 'Content-Length': str(upload_size), } metadata.update(val for val in request.headers.items() - if is_sys_or_user_meta('object', val[0])) + if (is_sys_or_user_meta('object', val[0]) or + is_object_transient_sysmeta(val[0]))) metadata.update(val for val in footer_meta.items() - if is_sys_or_user_meta('object', val[0])) + if (is_sys_or_user_meta('object', val[0]) or + is_object_transient_sysmeta(val[0]))) headers_to_copy = ( request.headers.get( 'X-Backend-Replication-Headers', '').split() + @@ -861,8 +865,9 @@ class ObjectController(BaseStorageServer): response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream') for key, value in metadata.items(): - if is_sys_or_user_meta('object', key) or \ - key.lower() in self.allowed_headers: + if (is_sys_or_user_meta('object', key) or + is_object_transient_sysmeta(key) or + key.lower() in self.allowed_headers): response.headers[key] = value response.etag = metadata['ETag'] response.last_modified = math.ceil(float(file_x_ts)) @@ -913,8 +918,9 @@ class ObjectController(BaseStorageServer): response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream') for key, value in metadata.items(): - if is_sys_or_user_meta('object', key) or \ - key.lower() in self.allowed_headers: + if (is_sys_or_user_meta('object', key) or + is_object_transient_sysmeta(key) or + key.lower() in self.allowed_headers): response.headers[key] = value response.etag = metadata['ETag'] ts = Timestamp(metadata['X-Timestamp']) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 407a7aed93..c1a909dad5 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -58,7 +58,8 @@ from swift.common.swob import Request, Response, Range, \ status_map from swift.common.request_helpers import strip_sys_meta_prefix, \ strip_user_meta_prefix, is_user_meta, is_sys_meta, is_sys_or_user_meta, \ - http_response_to_document_iters + http_response_to_document_iters, is_object_transient_sysmeta, \ + strip_object_transient_sysmeta_prefix from swift.common.storage_policy import POLICIES @@ -180,12 +181,18 @@ def headers_to_object_info(headers, status_int=HTTP_OK): Construct a cacheable dict of object info based on response headers. """ headers, meta, sysmeta = _prep_headers_to_info(headers, 'object') + transient_sysmeta = {} + for key, val in headers.iteritems(): + if is_object_transient_sysmeta(key): + key = strip_object_transient_sysmeta_prefix(key.lower()) + transient_sysmeta[key] = val info = {'status': status_int, 'length': headers.get('content-length'), 'type': headers.get('content-type'), 'etag': headers.get('etag'), 'meta': meta, 'sysmeta': sysmeta, + 'transient_sysmeta': transient_sysmeta } return info diff --git a/test/probe/test_object_metadata_replication.py b/test/probe/test_object_metadata_replication.py index 4759d5dfc3..57ef8e455e 100644 --- a/test/probe/test_object_metadata_replication.py +++ b/test/probe/test_object_metadata_replication.py @@ -339,6 +339,8 @@ class Test(ReplProbeTest): def test_sysmeta_after_replication_with_subsequent_post(self): sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'} usermeta = {'x-object-meta-bar': 'meta-bar'} + transient_sysmeta = { + 'x-object-transient-sysmeta-bar': 'transient-sysmeta-bar'} self.brain.put_container(policy_index=int(self.policy)) # put object self._put_object() @@ -356,11 +358,13 @@ class Test(ReplProbeTest): # post some user meta to second server subset self.brain.stop_handoff_half() self.container_brain.stop_handoff_half() - self._post_object(usermeta) + user_and_transient_sysmeta = dict(usermeta) + user_and_transient_sysmeta.update(transient_sysmeta) + self._post_object(user_and_transient_sysmeta) metadata = self._get_object_metadata() - for key in usermeta: + for key in user_and_transient_sysmeta: self.assertTrue(key in metadata) - self.assertEqual(metadata[key], usermeta[key]) + self.assertEqual(metadata[key], user_and_transient_sysmeta[key]) for key in sysmeta: self.assertFalse(key in metadata) self.brain.start_handoff_half() @@ -376,6 +380,7 @@ class Test(ReplProbeTest): metadata = self._get_object_metadata() expected = dict(sysmeta) expected.update(usermeta) + expected.update(transient_sysmeta) for key in expected.keys(): self.assertTrue(key in metadata, key) self.assertEqual(metadata[key], expected[key]) @@ -399,6 +404,8 @@ class Test(ReplProbeTest): def test_sysmeta_after_replication_with_prior_post(self): sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'} usermeta = {'x-object-meta-bar': 'meta-bar'} + transient_sysmeta = { + 'x-object-transient-sysmeta-bar': 'transient-sysmeta-bar'} self.brain.put_container(policy_index=int(self.policy)) # put object self._put_object() @@ -406,11 +413,13 @@ class Test(ReplProbeTest): # put user meta to first server subset self.brain.stop_handoff_half() self.container_brain.stop_handoff_half() - self._post_object(headers=usermeta) + user_and_transient_sysmeta = dict(usermeta) + user_and_transient_sysmeta.update(transient_sysmeta) + self._post_object(user_and_transient_sysmeta) metadata = self._get_object_metadata() - for key in usermeta: + for key in user_and_transient_sysmeta: self.assertTrue(key in metadata) - self.assertEqual(metadata[key], usermeta[key]) + self.assertEqual(metadata[key], user_and_transient_sysmeta[key]) self.brain.start_handoff_half() self.container_brain.start_handoff_half() @@ -436,7 +445,7 @@ class Test(ReplProbeTest): for key in sysmeta: self.assertTrue(key in metadata) self.assertEqual(metadata[key], sysmeta[key]) - for key in usermeta: + for key in user_and_transient_sysmeta: self.assertFalse(key in metadata) self.brain.start_primary_half() self.container_brain.start_primary_half() @@ -449,7 +458,7 @@ class Test(ReplProbeTest): for key in sysmeta: self.assertTrue(key in metadata) self.assertEqual(metadata[key], sysmeta[key]) - for key in usermeta: + for key in user_and_transient_sysmeta: self.assertFalse(key in metadata) self.brain.start_handoff_half() self.container_brain.start_handoff_half() diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index c295ee4768..1e31362f0d 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -19,6 +19,8 @@ from collections import defaultdict from hashlib import md5 from swift.common import swob from swift.common.header_key_dict import HeaderKeyDict +from swift.common.request_helpers import is_user_meta, \ + is_object_transient_sysmeta from swift.common.swob import HTTPNotImplemented from swift.common.utils import split_path @@ -87,7 +89,7 @@ class FakeSwift(object): if resp: return resp(env, start_response) - req_headers = swob.Request(env).headers + req = swob.Request(env) self.swift_sources.append(env.get('swift.source')) self.txn_ids.append(env.get('swift.trans_id')) @@ -114,26 +116,41 @@ class FakeSwift(object): # simulate object PUT if method == 'PUT' and obj: - input = ''.join(iter(env['wsgi.input'].read, '')) + put_body = ''.join(iter(env['wsgi.input'].read, '')) if 'swift.callback.update_footers' in env: footers = HeaderKeyDict() env['swift.callback.update_footers'](footers) - req_headers.update(footers) - etag = md5(input).hexdigest() + req.headers.update(footers) + etag = md5(put_body).hexdigest() headers.setdefault('Etag', etag) - headers.setdefault('Content-Length', len(input)) + headers.setdefault('Content-Length', len(put_body)) # keep it for subsequent GET requests later - self.uploaded[path] = (dict(req_headers), input) + self.uploaded[path] = (dict(req.headers), put_body) if "CONTENT_TYPE" in env: self.uploaded[path][0]['Content-Type'] = env["CONTENT_TYPE"] + # simulate object POST + elif method == 'POST' and obj: + metadata, data = self.uploaded.get(path, ({}, None)) + # select items to keep from existing... + new_metadata = dict( + (k, v) for k, v in metadata.items() + if (not is_user_meta('object', k) and not + is_object_transient_sysmeta(k))) + # apply from new + new_metadata.update( + dict((k, v) for k, v in req.headers.items() + if (is_user_meta('object', k) or + is_object_transient_sysmeta(k) or + k.lower == 'content-type'))) + self.uploaded[path] = new_metadata, data + # note: tests may assume this copy of req_headers is case insensitive # so we deliberately use a HeaderKeyDict - self._calls.append((method, path, HeaderKeyDict(req_headers))) + self._calls.append((method, path, HeaderKeyDict(req.headers))) # range requests ought to work, hence conditional_response=True - req = swob.Request(env) if isinstance(body, list): resp = resp_class( req=req, headers=headers, app_iter=body, diff --git a/test/unit/common/middleware/test_copy.py b/test/unit/common/middleware/test_copy.py index 3f024d4395..3a6663db00 100644 --- a/test/unit/common/middleware/test_copy.py +++ b/test/unit/common/middleware/test_copy.py @@ -689,9 +689,11 @@ class TestServerSideCopyMiddleware(unittest.TestCase): source_headers = { 'x-object-sysmeta-test1': 'copy me', 'x-object-meta-test2': 'copy me too', + 'x-object-transient-sysmeta-test3': 'ditto', 'x-object-sysmeta-container-update-override-etag': 'etag val', 'x-object-sysmeta-container-update-override-size': 'size val', - 'x-object-sysmeta-container-update-override-foo': 'bar'} + 'x-object-sysmeta-container-update-override-foo': 'bar', + 'x-delete-at': 'delete-at-time'} get_resp_headers = source_headers.copy() get_resp_headers['etag'] = 'source etag' @@ -713,20 +715,20 @@ class TestServerSideCopyMiddleware(unittest.TestCase): req = Request.blank('/v1/a/c/o', method='COPY', headers={'Content-Length': 0, 'Destination': 'c/o-copy0'}) - status, headers, body = self.call_ssc(req) + status, resp_headers, body = self.call_ssc(req) self.assertEqual('201 Created', status) - verify_headers(source_headers.copy(), [], headers) - method, path, headers = self.app.calls_with_headers[-1] + verify_headers(source_headers.copy(), [], resp_headers) + method, path, put_headers = self.app.calls_with_headers[-1] self.assertEqual('PUT', method) self.assertEqual('/v1/a/c/o-copy0', path) - verify_headers(source_headers.copy(), [], headers.items()) - self.assertIn('etag', headers) - self.assertEqual(headers['etag'], 'source etag') + verify_headers(source_headers.copy(), [], put_headers.items()) + self.assertIn('etag', put_headers) + self.assertEqual(put_headers['etag'], 'source etag') req = Request.blank('/v1/a/c/o-copy0', method='GET') - status, headers, body = self.call_ssc(req) + status, resp_headers, body = self.call_ssc(req) self.assertEqual('200 OK', status) - verify_headers(source_headers.copy(), [], headers) + verify_headers(source_headers.copy(), [], resp_headers) # use a COPY request with a Range header self.app.register('PUT', '/v1/a/c/o-copy1', swob.HTTPCreated, {}) @@ -734,7 +736,7 @@ class TestServerSideCopyMiddleware(unittest.TestCase): headers={'Content-Length': 0, 'Destination': 'c/o-copy1', 'Range': 'bytes=1-2'}) - status, headers, body = self.call_ssc(req) + status, resp_headers, body = self.call_ssc(req) expected_headers = source_headers.copy() unexpected_headers = ( 'x-object-sysmeta-container-update-override-etag', @@ -743,38 +745,54 @@ class TestServerSideCopyMiddleware(unittest.TestCase): for h in unexpected_headers: expected_headers.pop(h) self.assertEqual('201 Created', status) - verify_headers(expected_headers, unexpected_headers, headers) - method, path, headers = self.app.calls_with_headers[-1] + verify_headers(expected_headers, unexpected_headers, resp_headers) + method, path, put_headers = self.app.calls_with_headers[-1] self.assertEqual('PUT', method) self.assertEqual('/v1/a/c/o-copy1', path) - verify_headers(expected_headers, unexpected_headers, headers.items()) + verify_headers( + expected_headers, unexpected_headers, put_headers.items()) # etag should not be copied with a Range request - self.assertNotIn('etag', headers) + self.assertNotIn('etag', put_headers) req = Request.blank('/v1/a/c/o-copy1', method='GET') - status, headers, body = self.call_ssc(req) + status, resp_headers, body = self.call_ssc(req) self.assertEqual('200 OK', status) - verify_headers(expected_headers, unexpected_headers, headers) + verify_headers(expected_headers, unexpected_headers, resp_headers) # use a PUT with x-copy-from self.app.register('PUT', '/v1/a/c/o-copy2', swob.HTTPCreated, {}) req = Request.blank('/v1/a/c/o-copy2', method='PUT', headers={'Content-Length': 0, 'X-Copy-From': 'c/o'}) - status, headers, body = self.call_ssc(req) + status, resp_headers, body = self.call_ssc(req) self.assertEqual('201 Created', status) - verify_headers(source_headers.copy(), [], headers) - method, path, headers = self.app.calls_with_headers[-1] + verify_headers(source_headers.copy(), [], resp_headers) + method, path, put_headers = self.app.calls_with_headers[-1] self.assertEqual('PUT', method) self.assertEqual('/v1/a/c/o-copy2', path) - verify_headers(source_headers.copy(), [], headers.items()) - self.assertIn('etag', headers) - self.assertEqual(headers['etag'], 'source etag') + verify_headers(source_headers.copy(), [], put_headers.items()) + self.assertIn('etag', put_headers) + self.assertEqual(put_headers['etag'], 'source etag') req = Request.blank('/v1/a/c/o-copy2', method='GET') - status, headers, body = self.call_ssc(req) + status, resp_headers, body = self.call_ssc(req) self.assertEqual('200 OK', status) - verify_headers(source_headers.copy(), [], headers) + verify_headers(source_headers.copy(), [], resp_headers) + + # copy to same path as source + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', method='PUT', + headers={'Content-Length': 0, + 'X-Copy-From': 'c/o'}) + status, resp_headers, body = self.call_ssc(req) + self.assertEqual('201 Created', status) + verify_headers(source_headers.copy(), [], resp_headers) + method, path, put_headers = self.app.calls_with_headers[-1] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c/o', path) + verify_headers(source_headers.copy(), [], put_headers.items()) + self.assertIn('etag', put_headers) + self.assertEqual(put_headers['etag'], 'source etag') def test_COPY_no_destination_header(self): req = Request.blank( diff --git a/test/unit/common/middleware/test_gatekeeper.py b/test/unit/common/middleware/test_gatekeeper.py index a01d45cbb1..5f4e87b5a2 100644 --- a/test/unit/common/middleware/test_gatekeeper.py +++ b/test/unit/common/middleware/test_gatekeeper.py @@ -74,12 +74,17 @@ class TestGatekeeper(unittest.TestCase): x_backend_headers = {'X-Backend-Replication': 'true', 'X-Backend-Replication-Headers': 'stuff'} + object_transient_sysmeta_headers = { + 'x-object-transient-sysmeta-': 'value', + 'x-object-transient-sysmeta-foo': 'value'} x_timestamp_headers = {'X-Timestamp': '1455952805.719739'} forbidden_headers_out = dict(sysmeta_headers.items() + - x_backend_headers.items()) + x_backend_headers.items() + + object_transient_sysmeta_headers.items()) forbidden_headers_in = dict(sysmeta_headers.items() + - x_backend_headers.items()) + x_backend_headers.items() + + object_transient_sysmeta_headers.items()) shunted_headers_in = dict(x_timestamp_headers.items()) def _assertHeadersEqual(self, expected, actual): diff --git a/test/unit/common/test_request_helpers.py b/test/unit/common/test_request_helpers.py index 1c39e9f0af..e451174516 100644 --- a/test/unit/common/test_request_helpers.py +++ b/test/unit/common/test_request_helpers.py @@ -21,8 +21,8 @@ from swift.common.storage_policy import POLICIES, EC_POLICY, REPL_POLICY from swift.common.request_helpers import is_sys_meta, is_user_meta, \ is_sys_or_user_meta, strip_sys_meta_prefix, strip_user_meta_prefix, \ remove_items, copy_header_subset, get_name_and_placement, \ - http_response_to_document_iters, update_etag_is_at_header, \ - resolve_etag_is_at_header + http_response_to_document_iters, is_object_transient_sysmeta, \ + update_etag_is_at_header, resolve_etag_is_at_header from test.unit import patch_policies from test.unit.common.test_utils import FakeResponse @@ -69,6 +69,14 @@ class TestRequestHelpers(unittest.TestCase): self.assertEqual(strip_user_meta_prefix(st, 'x-%s-%s-a' % (st, mt)), 'a') + def test_is_object_transient_sysmeta(self): + self.assertTrue(is_object_transient_sysmeta( + 'x-object-transient-sysmeta-foo')) + self.assertFalse(is_object_transient_sysmeta( + 'x-object-transient-sysmeta-')) + self.assertFalse(is_object_transient_sysmeta( + 'x-object-meatmeta-foo')) + def test_remove_items(self): src = {'a': 'b', 'c': 'd'} diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py index 2a18478087..0a92f184f2 100644 --- a/test/unit/obj/test_diskfile.py +++ b/test/unit/obj/test_diskfile.py @@ -2374,6 +2374,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): def test_disk_file_default_disallowed_metadata(self): # build an object with some meta (at t0+1s) orig_metadata = {'X-Object-Meta-Key1': 'Value1', + 'X-Object-Transient-Sysmeta-KeyA': 'ValueA', 'Content-Type': 'text/garbage'} df = self._get_open_disk_file(ts=self.ts().internal, extra_metadata=orig_metadata) @@ -2382,6 +2383,7 @@ class DiskFileMixin(BaseDiskFileTestMixin): # write some new metadata (fast POST, don't send orig meta, at t0+1) df = self._simple_get_diskfile() df.write_metadata({'X-Timestamp': self.ts().internal, + 'X-Object-Transient-Sysmeta-KeyB': 'ValueB', 'X-Object-Meta-Key2': 'Value2'}) df = self._simple_get_diskfile() with df.open(): @@ -2389,8 +2391,11 @@ class DiskFileMixin(BaseDiskFileTestMixin): self.assertEqual('text/garbage', df._metadata['Content-Type']) # original fast-post updateable keys are removed self.assertNotIn('X-Object-Meta-Key1', df._metadata) + self.assertNotIn('X-Object-Transient-Sysmeta-KeyA', df._metadata) # new fast-post updateable keys are added self.assertEqual('Value2', df._metadata['X-Object-Meta-Key2']) + self.assertEqual('ValueB', + df._metadata['X-Object-Transient-Sysmeta-KeyB']) def test_disk_file_preserves_sysmeta(self): # build an object with some meta (at t0) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index a40d75c5a2..79fc1b32f4 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -1683,7 +1683,8 @@ class TestObjectController(unittest.TestCase): 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', 'X-Object-Meta-1': 'One', 'X-Object-Sysmeta-1': 'One', - 'X-Object-Sysmeta-Two': 'Two'}) + 'X-Object-Sysmeta-Two': 'Two', + 'X-Object-Transient-Sysmeta-Foo': 'Bar'}) req.body = 'VERIFY SYSMETA' resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) @@ -1702,7 +1703,8 @@ class TestObjectController(unittest.TestCase): 'name': '/a/c/o', 'X-Object-Meta-1': 'One', 'X-Object-Sysmeta-1': 'One', - 'X-Object-Sysmeta-Two': 'Two'}) + 'X-Object-Sysmeta-Two': 'Two', + 'X-Object-Transient-Sysmeta-Foo': 'Bar'}) def test_PUT_succeeds_with_later_POST(self): ts_iter = make_timestamp_iter() @@ -1875,6 +1877,62 @@ class TestObjectController(unittest.TestCase): resp = req.get_response(self.object_controller) check_response(resp) + def test_POST_transient_sysmeta(self): + # check that diskfile transient system meta is changed by a POST + timestamp1 = normalize_timestamp(time()) + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Timestamp': timestamp1, + 'Content-Type': 'text/plain', + 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', + 'X-Object-Meta-1': 'One', + 'X-Object-Sysmeta-1': 'One', + 'X-Object-Transient-Sysmeta-Foo': 'Bar'}) + req.body = 'VERIFY SYSMETA' + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 201) + + timestamp2 = normalize_timestamp(time()) + req = Request.blank( + '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, + headers={'X-Timestamp': timestamp2, + 'X-Object-Meta-1': 'Not One', + 'X-Object-Sysmeta-1': 'Not One', + 'X-Object-Transient-Sysmeta-Foo': 'Not Bar'}) + resp = req.get_response(self.object_controller) + self.assertEqual(resp.status_int, 202) + + # original .data file metadata should be unchanged + objfile = os.path.join( + self.testdir, 'sda1', + storage_directory(diskfile.get_data_dir(0), 'p', + hash_path('a', 'c', 'o')), + timestamp1 + '.data') + self.assertTrue(os.path.isfile(objfile)) + self.assertEqual(open(objfile).read(), 'VERIFY SYSMETA') + self.assertDictEqual(diskfile.read_metadata(objfile), + {'X-Timestamp': timestamp1, + 'Content-Length': '14', + 'Content-Type': 'text/plain', + 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', + 'name': '/a/c/o', + 'X-Object-Meta-1': 'One', + 'X-Object-Sysmeta-1': 'One', + 'X-Object-Transient-Sysmeta-Foo': 'Bar'}) + + # .meta file metadata should have only user meta items + metafile = os.path.join( + self.testdir, 'sda1', + storage_directory(diskfile.get_data_dir(0), 'p', + hash_path('a', 'c', 'o')), + timestamp2 + '.meta') + self.assertTrue(os.path.isfile(metafile)) + self.assertDictEqual(diskfile.read_metadata(metafile), + {'X-Timestamp': timestamp2, + 'name': '/a/c/o', + 'X-Object-Meta-1': 'Not One', + 'X-Object-Transient-Sysmeta-Foo': 'Not Bar'}) + def test_PUT_then_fetch_system_metadata(self): timestamp = normalize_timestamp(time()) req = Request.blank( @@ -1884,7 +1942,8 @@ class TestObjectController(unittest.TestCase): 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', 'X-Object-Meta-1': 'One', 'X-Object-Sysmeta-1': 'One', - 'X-Object-Sysmeta-Two': 'Two'}) + 'X-Object-Sysmeta-Two': 'Two', + 'X-Object-Transient-Sysmeta-Foo': 'Bar'}) req.body = 'VERIFY SYSMETA' resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) @@ -1903,6 +1962,8 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.headers['x-object-meta-1'], 'One') self.assertEqual(resp.headers['x-object-sysmeta-1'], 'One') self.assertEqual(resp.headers['x-object-sysmeta-two'], 'Two') + self.assertEqual(resp.headers['x-object-transient-sysmeta-foo'], + 'Bar') req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) @@ -1921,9 +1982,13 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': timestamp, 'Content-Type': 'text/plain', 'ETag': '1000d172764c9dbc3a5798a67ec5bb76', + 'X-Object-Meta-0': 'deleted by post', + 'X-Object-Sysmeta-0': 'Zero', + 'X-Object-Transient-Sysmeta-0': 'deleted by post', 'X-Object-Meta-1': 'One', 'X-Object-Sysmeta-1': 'One', - 'X-Object-Sysmeta-Two': 'Two'}) + 'X-Object-Sysmeta-Two': 'Two', + 'X-Object-Transient-Sysmeta-Foo': 'Bar'}) req.body = 'VERIFY SYSMETA' resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) @@ -1934,7 +1999,8 @@ class TestObjectController(unittest.TestCase): headers={'X-Timestamp': timestamp2, 'X-Object-Meta-1': 'Not One', 'X-Object-Sysmeta-1': 'Not One', - 'X-Object-Sysmeta-Two': 'Not Two'}) + 'X-Object-Sysmeta-Two': 'Not Two', + 'X-Object-Transient-Sysmeta-Foo': 'Not Bar'}) resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 202) @@ -1951,8 +2017,13 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.headers['etag'], '"1000d172764c9dbc3a5798a67ec5bb76"') self.assertEqual(resp.headers['x-object-meta-1'], 'Not One') + self.assertEqual(resp.headers['x-object-sysmeta-0'], 'Zero') self.assertEqual(resp.headers['x-object-sysmeta-1'], 'One') self.assertEqual(resp.headers['x-object-sysmeta-two'], 'Two') + self.assertEqual(resp.headers['x-object-transient-sysmeta-foo'], + 'Not Bar') + self.assertNotIn('x-object-meta-0', resp.headers) + self.assertNotIn('x-object-transient-sysmeta-0', resp.headers) req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 689c6c88a8..55214f6d03 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -29,7 +29,9 @@ from swift.common.http import is_success from swift.common.storage_policy import StoragePolicy from test.unit import fake_http_connect, FakeRing, FakeMemcache from swift.proxy import server as proxy_server -from swift.common.request_helpers import get_sys_meta_prefix +from swift.common.request_helpers import ( + get_sys_meta_prefix, get_object_transient_sysmeta +) from test.unit import patch_policies @@ -537,6 +539,14 @@ class TestFuncs(unittest.TestCase): self.assertEqual(resp['sysmeta']['whatevs'], 14) self.assertEqual(resp['sysmeta']['somethingelse'], 0) + def test_headers_to_object_info_transient_sysmeta(self): + headers = {get_object_transient_sysmeta('Whatevs'): 14, + get_object_transient_sysmeta('somethingelse'): 0} + resp = headers_to_object_info(headers.items(), 200) + self.assertEqual(len(resp['transient_sysmeta']), 2) + self.assertEqual(resp['transient_sysmeta']['whatevs'], 14) + self.assertEqual(resp['transient_sysmeta']['somethingelse'], 0) + def test_headers_to_object_info_values(self): headers = { 'content-length': '1024', diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index f43ca5778e..6452fb5b0c 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -53,7 +53,7 @@ from swift.common.utils import hash_path, storage_directory, \ iter_multipart_mime_documents, public from test.unit import ( - connect_tcp, readuntil2crlfs, FakeLogger, FakeRing, fake_http_connect, + connect_tcp, readuntil2crlfs, FakeLogger, fake_http_connect, FakeRing, FakeMemcache, debug_logger, patch_policies, write_fake_ring, mocked_http_conn, DEFAULT_TEST_EC_TYPE) from swift.proxy import server as proxy_server diff --git a/test/unit/proxy/test_sysmeta.py b/test/unit/proxy/test_sysmeta.py index 1a7f82334e..eb58523e39 100644 --- a/test/unit/proxy/test_sysmeta.py +++ b/test/unit/proxy/test_sysmeta.py @@ -28,6 +28,7 @@ from swift.common.wsgi import monkey_patch_mimetools, WSGIContext from swift.obj import server as object_server from swift.proxy import server as proxy import swift.proxy.controllers +from swift.proxy.controllers.base import get_object_info from test.unit import FakeMemcache, debug_logger, FakeRing, \ fake_http_connect, patch_policies @@ -172,6 +173,17 @@ class TestObjectSysmeta(unittest.TestCase): 'x-object-meta-test1': 'meta1 changed'} new_meta_headers = {'x-object-meta-test3': 'meta3'} bad_headers = {'x-account-sysmeta-test1': 'bad1'} + # these transient_sysmeta headers get changed... + original_transient_sysmeta_headers_1 = \ + {'x-object-transient-sysmeta-testA': 'A'} + # these transient_sysmeta headers get deleted... + original_transient_sysmeta_headers_2 = \ + {'x-object-transient-sysmeta-testB': 'B'} + # these are replacement transient_sysmeta headers + changed_transient_sysmeta_headers = \ + {'x-object-transient-sysmeta-testA': 'changed_A'} + new_transient_sysmeta_headers_1 = {'x-object-transient-sysmeta-testC': 'C'} + new_transient_sysmeta_headers_2 = {'x-object-transient-sysmeta-testD': 'D'} def test_PUT_sysmeta_then_GET(self): path = '/v1/a/c/o' @@ -180,6 +192,7 @@ class TestObjectSysmeta(unittest.TestCase): hdrs = dict(self.original_sysmeta_headers_1) hdrs.update(self.original_meta_headers_1) hdrs.update(self.bad_headers) + hdrs.update(self.original_transient_sysmeta_headers_1) req = Request.blank(path, environ=env, headers=hdrs, body='x') resp = req.get_response(self.app) self._assertStatus(resp, 201) @@ -189,6 +202,7 @@ class TestObjectSysmeta(unittest.TestCase): self._assertStatus(resp, 200) self._assertInHeaders(resp, self.original_sysmeta_headers_1) self._assertInHeaders(resp, self.original_meta_headers_1) + self._assertInHeaders(resp, self.original_transient_sysmeta_headers_1) self._assertNotInHeaders(resp, self.bad_headers) def test_PUT_sysmeta_then_HEAD(self): @@ -198,6 +212,7 @@ class TestObjectSysmeta(unittest.TestCase): hdrs = dict(self.original_sysmeta_headers_1) hdrs.update(self.original_meta_headers_1) hdrs.update(self.bad_headers) + hdrs.update(self.original_transient_sysmeta_headers_1) req = Request.blank(path, environ=env, headers=hdrs, body='x') resp = req.get_response(self.app) self._assertStatus(resp, 201) @@ -208,6 +223,7 @@ class TestObjectSysmeta(unittest.TestCase): self._assertStatus(resp, 200) self._assertInHeaders(resp, self.original_sysmeta_headers_1) self._assertInHeaders(resp, self.original_meta_headers_1) + self._assertInHeaders(resp, self.original_transient_sysmeta_headers_1) self._assertNotInHeaders(resp, self.bad_headers) def test_sysmeta_replaced_by_PUT(self): @@ -306,6 +322,8 @@ class TestObjectSysmeta(unittest.TestCase): hdrs.update(self.original_sysmeta_headers_2) hdrs.update(self.original_meta_headers_1) hdrs.update(self.original_meta_headers_2) + hdrs.update(self.original_transient_sysmeta_headers_1) + hdrs.update(self.original_transient_sysmeta_headers_2) req = Request.blank(path, environ=env, headers=hdrs, body='x') resp = req.get_response(self.copy_app) self._assertStatus(resp, 201) @@ -315,6 +333,8 @@ class TestObjectSysmeta(unittest.TestCase): hdrs.update(self.new_sysmeta_headers) hdrs.update(self.changed_meta_headers) hdrs.update(self.new_meta_headers) + hdrs.update(self.changed_transient_sysmeta_headers) + hdrs.update(self.new_transient_sysmeta_headers_1) hdrs.update(self.bad_headers) hdrs.update({'Destination': dest}) req = Request.blank(path, environ=env, headers=hdrs) @@ -326,6 +346,9 @@ class TestObjectSysmeta(unittest.TestCase): self._assertInHeaders(resp, self.changed_meta_headers) self._assertInHeaders(resp, self.new_meta_headers) self._assertInHeaders(resp, self.original_meta_headers_2) + self._assertInHeaders(resp, self.changed_transient_sysmeta_headers) + self._assertInHeaders(resp, self.new_transient_sysmeta_headers_1) + self._assertInHeaders(resp, self.original_transient_sysmeta_headers_2) self._assertNotInHeaders(resp, self.bad_headers) req = Request.blank('/v1/a/c/o2', environ={}) @@ -337,6 +360,9 @@ class TestObjectSysmeta(unittest.TestCase): self._assertInHeaders(resp, self.changed_meta_headers) self._assertInHeaders(resp, self.new_meta_headers) self._assertInHeaders(resp, self.original_meta_headers_2) + self._assertInHeaders(resp, self.changed_transient_sysmeta_headers) + self._assertInHeaders(resp, self.new_transient_sysmeta_headers_1) + self._assertInHeaders(resp, self.original_transient_sysmeta_headers_2) self._assertNotInHeaders(resp, self.bad_headers) def test_sysmeta_updated_by_COPY_from(self): @@ -380,3 +406,84 @@ class TestObjectSysmeta(unittest.TestCase): self._assertInHeaders(resp, self.new_meta_headers) self._assertInHeaders(resp, self.original_meta_headers_2) self._assertNotInHeaders(resp, self.bad_headers) + + def _test_transient_sysmeta_replaced_by_PUT_or_POST(self, app): + # check transient_sysmeta is replaced en-masse by a POST + path = '/v1/a/c/o' + + env = {'REQUEST_METHOD': 'PUT'} + hdrs = dict(self.original_transient_sysmeta_headers_1) + hdrs.update(self.original_transient_sysmeta_headers_2) + hdrs.update(self.original_meta_headers_1) + req = Request.blank(path, environ=env, headers=hdrs, body='x') + resp = req.get_response(app) + self._assertStatus(resp, 201) + + req = Request.blank(path, environ={}) + resp = req.get_response(app) + self._assertStatus(resp, 200) + self._assertInHeaders(resp, self.original_transient_sysmeta_headers_1) + self._assertInHeaders(resp, self.original_transient_sysmeta_headers_2) + self._assertInHeaders(resp, self.original_meta_headers_1) + + info = get_object_info(req.environ, app) + self.assertEqual(2, len(info.get('transient_sysmeta', ()))) + self.assertEqual({'testa': 'A', 'testb': 'B'}, + info['transient_sysmeta']) + + # POST will replace all existing transient_sysmeta and usermeta values + env = {'REQUEST_METHOD': 'POST'} + hdrs = dict(self.changed_transient_sysmeta_headers) + hdrs.update(self.new_transient_sysmeta_headers_1) + req = Request.blank(path, environ=env, headers=hdrs) + resp = req.get_response(app) + self._assertStatus(resp, 202) + + req = Request.blank(path, environ={}) + resp = req.get_response(app) + self._assertStatus(resp, 200) + self._assertInHeaders(resp, self.changed_transient_sysmeta_headers) + self._assertInHeaders(resp, self.new_transient_sysmeta_headers_1) + self._assertNotInHeaders(resp, self.original_meta_headers_1) + self._assertNotInHeaders(resp, + self.original_transient_sysmeta_headers_2) + + info = get_object_info(req.environ, app) + self.assertEqual(2, len(info.get('transient_sysmeta', ()))) + self.assertEqual({'testa': 'changed_A', 'testc': 'C'}, + info['transient_sysmeta']) + + # subsequent PUT replaces all transient_sysmeta and usermeta values + env = {'REQUEST_METHOD': 'PUT'} + hdrs = dict(self.new_transient_sysmeta_headers_2) + hdrs.update(self.original_meta_headers_2) + req = Request.blank(path, environ=env, headers=hdrs, body='x') + resp = req.get_response(app) + self._assertStatus(resp, 201) + + req = Request.blank(path, environ={}) + resp = req.get_response(app) + self._assertStatus(resp, 200) + self._assertInHeaders(resp, self.original_meta_headers_2) + self._assertInHeaders(resp, self.new_transient_sysmeta_headers_2) + # meta from previous POST should have gone away... + self._assertNotInHeaders(resp, self.changed_transient_sysmeta_headers) + self._assertNotInHeaders(resp, self.new_transient_sysmeta_headers_1) + # sanity check that meta from first PUT did not re-appear... + self._assertNotInHeaders(resp, self.original_meta_headers_1) + self._assertNotInHeaders(resp, + self.original_transient_sysmeta_headers_1) + self._assertNotInHeaders(resp, + self.original_transient_sysmeta_headers_2) + + info = get_object_info(req.environ, app) + self.assertEqual(1, len(info.get('transient_sysmeta', ()))) + self.assertEqual({'testd': 'D'}, info['transient_sysmeta']) + + def test_transient_sysmeta_replaced_by_PUT_or_POST(self): + self._test_transient_sysmeta_replaced_by_PUT_or_POST(self.app) + + def test_transient_sysmeta_replaced_by_PUT_or_POST_as_copy(self): + # test post-as-copy by issuing requests to the copy middleware app + self.copy_app.object_post_as_copy = True + self._test_transient_sysmeta_replaced_by_PUT_or_POST(self.copy_app) From 4a9f7378ec4d73c8bf16748e24bec45aa503b08e Mon Sep 17 00:00:00 2001 From: zhengyao1 Date: Fri, 24 Jun 2016 17:34:26 +0800 Subject: [PATCH 026/156] make print python3 compatible The print '' in python2 was supported. But in python3, print '' was error. In python3, recommend using print() instead. This patch will fix it. Change-Id: I226461b0400023dc44238d9e5ee1ae2f2430de9e Closes-Bug: #1595773 --- swift/cli/ring_builder_analyzer.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/swift/cli/ring_builder_analyzer.py b/swift/cli/ring_builder_analyzer.py index 599af0e4d7..5c1955414c 100644 --- a/swift/cli/ring_builder_analyzer.py +++ b/swift/cli/ring_builder_analyzer.py @@ -294,7 +294,7 @@ def run_scenario(scenario): } for round_index, commands in enumerate(scenario['rounds']): - print "Round %d" % (round_index + 1) + print("Round %d" % (round_index + 1)) for command in commands: key = command.pop(0) @@ -307,17 +307,16 @@ def run_scenario(scenario): rebalance_number = 1 parts_moved, old_balance, removed_devs = rb.rebalance(seed=seed) rb.pretend_min_part_hours_passed() - print "\tRebalance 1: moved %d parts, balance is %.6f, \ - %d removed devs" % ( - parts_moved, old_balance, removed_devs) + print("\tRebalance 1: moved %d parts, balance is %.6f, %d removed " + "devs" % (parts_moved, old_balance, removed_devs)) while True: rebalance_number += 1 parts_moved, new_balance, removed_devs = rb.rebalance(seed=seed) rb.pretend_min_part_hours_passed() - print "\tRebalance %d: moved %d parts, balance is %.6f, \ - %d removed devs" % ( - rebalance_number, parts_moved, new_balance, removed_devs) + print("\tRebalance %d: moved %d parts, balance is %.6f, " + "%d removed devs" % (rebalance_number, parts_moved, + new_balance, removed_devs)) if parts_moved == 0 and removed_devs == 0: break if abs(new_balance - old_balance) < 1 and not ( From 365171395ea10def59d40e7fcc4d0f18c51bbe7b Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 29 Jun 2016 12:22:22 -0700 Subject: [PATCH 027/156] Remove some unnecessary error handling in healthcheck ...as well as an unused class variable. Change-Id: If1091f420b0bcf34c37e49b13f59b229e8deecc6 --- swift/common/middleware/healthcheck.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/swift/common/middleware/healthcheck.py b/swift/common/middleware/healthcheck.py index 5b58e46ee2..cfb0b8a0ba 100644 --- a/swift/common/middleware/healthcheck.py +++ b/swift/common/middleware/healthcheck.py @@ -31,7 +31,6 @@ class HealthCheckMiddleware(object): def __init__(self, app, conf): self.app = app - self.conf = conf self.disable_path = conf.get('disable_path', '') def GET(self, req): @@ -45,15 +44,11 @@ class HealthCheckMiddleware(object): def __call__(self, env, start_response): req = Request(env) - try: - if req.path == '/healthcheck': - handler = self.GET - if self.disable_path and os.path.exists(self.disable_path): - handler = self.DISABLED - return handler(req)(env, start_response) - except UnicodeError: - # definitely, this is not /healthcheck - pass + if req.path == '/healthcheck': + handler = self.GET + if self.disable_path and os.path.exists(self.disable_path): + handler = self.DISABLED + return handler(req)(env, start_response) return self.app(env, start_response) From 57ac316b0324a50f2313a125e8ae6c582953a1bd Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 29 Jun 2016 13:21:31 -0700 Subject: [PATCH 028/156] Fix intermittent SLO unit test failures Now that we have concurrent deletes, the order is not guaranteed. Change-Id: Ib833306a07ee0755a80501d5294eaa87b2347dc0 --- test/unit/common/middleware/test_slo.py | 65 ++++++++++++------------- 1 file changed, 32 insertions(+), 33 deletions(-) diff --git a/test/unit/common/middleware/test_slo.py b/test/unit/common/middleware/test_slo.py index 83a34c48f5..3898c9db6b 100644 --- a/test/unit/common/middleware/test_slo.py +++ b/test/unit/common/middleware/test_slo.py @@ -840,15 +840,15 @@ class TestSloDeleteManifest(SloTestCase): status, headers, body = self.call_slo(req) resp_data = json.loads(body) self.assertEqual( - self.app.calls, - [('GET', - '/v1/AUTH_test/deltest/man?multipart-manifest=get'), - ('DELETE', - '/v1/AUTH_test/deltest/gone?multipart-manifest=delete'), - ('DELETE', - '/v1/AUTH_test/deltest/b_2?multipart-manifest=delete'), - ('DELETE', - '/v1/AUTH_test/deltest/man?multipart-manifest=delete')]) + set(self.app.calls), + set([('GET', + '/v1/AUTH_test/deltest/man?multipart-manifest=get'), + ('DELETE', + '/v1/AUTH_test/deltest/gone?multipart-manifest=delete'), + ('DELETE', + '/v1/AUTH_test/deltest/b_2?multipart-manifest=delete'), + ('DELETE', + '/v1/AUTH_test/deltest/man?multipart-manifest=delete')])) self.assertEqual(resp_data['Response Status'], '200 OK') self.assertEqual(resp_data['Number Deleted'], 2) self.assertEqual(resp_data['Number Not Found'], 1) @@ -858,14 +858,13 @@ class TestSloDeleteManifest(SloTestCase): '/v1/AUTH_test/deltest/man-all-there?multipart-manifest=delete', environ={'REQUEST_METHOD': 'DELETE'}) self.call_slo(req) - self.assertEqual( - self.app.calls, - [('GET', - '/v1/AUTH_test/deltest/man-all-there?multipart-manifest=get'), - ('DELETE', '/v1/AUTH_test/deltest/b_2?multipart-manifest=delete'), - ('DELETE', '/v1/AUTH_test/deltest/c_3?multipart-manifest=delete'), - ('DELETE', ('/v1/AUTH_test/deltest/' + - 'man-all-there?multipart-manifest=delete'))]) + self.assertEqual(set(self.app.calls), set([ + ('GET', + '/v1/AUTH_test/deltest/man-all-there?multipart-manifest=get'), + ('DELETE', '/v1/AUTH_test/deltest/b_2?multipart-manifest=delete'), + ('DELETE', '/v1/AUTH_test/deltest/c_3?multipart-manifest=delete'), + ('DELETE', ('/v1/AUTH_test/deltest/' + + 'man-all-there?multipart-manifest=delete'))])) def test_handle_multipart_delete_nested(self): req = Request.blank( @@ -1013,14 +1012,15 @@ class TestSloDeleteManifest(SloTestCase): status, headers, body = self.call_slo(req) resp_data = json.loads(body) self.assertEqual( - self.app.calls, - [('GET', '/v1/AUTH_test/deltest/' + - 'manifest-with-unauth-segment?multipart-manifest=get'), - ('DELETE', '/v1/AUTH_test/deltest/a_1?multipart-manifest=delete'), - ('DELETE', '/v1/AUTH_test/deltest-unauth/' + - 'q_17?multipart-manifest=delete'), - ('DELETE', '/v1/AUTH_test/deltest/' + - 'manifest-with-unauth-segment?multipart-manifest=delete')]) + set(self.app.calls), + set([('GET', '/v1/AUTH_test/deltest/' + + 'manifest-with-unauth-segment?multipart-manifest=get'), + ('DELETE', + '/v1/AUTH_test/deltest/a_1?multipart-manifest=delete'), + ('DELETE', '/v1/AUTH_test/deltest-unauth/' + + 'q_17?multipart-manifest=delete'), + ('DELETE', '/v1/AUTH_test/deltest/' + + 'manifest-with-unauth-segment?multipart-manifest=delete')])) self.assertEqual(resp_data['Response Status'], '400 Bad Request') self.assertEqual(resp_data['Response Body'], '') self.assertEqual(resp_data['Number Deleted'], 2) @@ -1039,14 +1039,13 @@ class TestSloDeleteManifest(SloTestCase): resp_data = json.loads(body) self.assertEqual(resp_data["Number Deleted"], 3) - self.assertEqual( - self.app.calls, - [('GET', - '/v1/AUTH_test/deltest/man-all-there?multipart-manifest=get'), - ('DELETE', '/v1/AUTH_test/deltest/b_2?multipart-manifest=delete'), - ('DELETE', '/v1/AUTH_test/deltest/c_3?multipart-manifest=delete'), - ('DELETE', ('/v1/AUTH_test/deltest/' + - 'man-all-there?multipart-manifest=delete'))]) + self.assertEqual(set(self.app.calls), set([ + ('GET', + '/v1/AUTH_test/deltest/man-all-there?multipart-manifest=get'), + ('DELETE', '/v1/AUTH_test/deltest/b_2?multipart-manifest=delete'), + ('DELETE', '/v1/AUTH_test/deltest/c_3?multipart-manifest=delete'), + ('DELETE', ('/v1/AUTH_test/deltest/' + + 'man-all-there?multipart-manifest=delete'))])) class TestSloHeadManifest(SloTestCase): From 6970099a76a8713da7744da40b462985b2c82a6a Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 29 Jun 2016 14:30:39 -0700 Subject: [PATCH 029/156] Fix intermittent bulk delete unit test failures Change-Id: I0822b14d7b1ddae5fe0cc567c7cbaf544cb081ee Closes-Bug: 1588414 --- test/unit/common/middleware/test_bulk.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/test/unit/common/middleware/test_bulk.py b/test/unit/common/middleware/test_bulk.py index 1439b0bd2e..7399b00231 100644 --- a/test/unit/common/middleware/test_bulk.py +++ b/test/unit/common/middleware/test_bulk.py @@ -639,9 +639,9 @@ class TestDelete(unittest.TestCase): resp_body = ''.join(self.bulk.handle_delete_iter( req, objs_to_delete=objs_to_delete, out_content_type='application/json')) - self.assertEqual( - self.app.delete_paths, ['/delete_works/AUTH_Acc/c/file_a', - '/delete_works/AUTH_Acc/c/file_d']) + self.assertEqual(set(self.app.delete_paths), + set(['/delete_works/AUTH_Acc/c/file_a', + '/delete_works/AUTH_Acc/c/file_d'])) self.assertEqual(self.app.calls, 2) resp_data = utils.json.loads(resp_body) self.assertEqual(resp_data['Response Status'], '400 Bad Request') @@ -655,9 +655,9 @@ class TestDelete(unittest.TestCase): headers={'Accept': 'application/json'}) req.method = 'POST' resp_body = self.handle_delete_and_iter(req) - self.assertEqual( - self.app.delete_paths, - ['/delete_works/AUTH_Acc/c/f', '/delete_works/AUTH_Acc/c/f404']) + self.assertEqual(set(self.app.delete_paths), + set(['/delete_works/AUTH_Acc/c/f', + '/delete_works/AUTH_Acc/c/f404'])) self.assertEqual(self.app.calls, 2) resp_data = utils.json.loads(resp_body) self.assertEqual(resp_data['Number Deleted'], 1) @@ -668,9 +668,9 @@ class TestDelete(unittest.TestCase): headers={'Accept': 'application/json'}) req.method = 'DELETE' resp_body = self.handle_delete_and_iter(req) - self.assertEqual( - self.app.delete_paths, - ['/delete_works/AUTH_Acc/c/f', '/delete_works/AUTH_Acc/c/f404']) + self.assertEqual(set(self.app.delete_paths), + set(['/delete_works/AUTH_Acc/c/f', + '/delete_works/AUTH_Acc/c/f404'])) self.assertEqual(self.app.calls, 2) resp_data = utils.json.loads(resp_body) self.assertEqual(resp_data['Number Deleted'], 1) From c9b4d54972587f5f72c8c15820a85433b279a0c5 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 30 Jun 2016 14:36:39 -0700 Subject: [PATCH 030/156] Change elifs to ifs John seemed to have some misgivings about using elif. Change-Id: I39962607cf2a8f90353020f67979e92b48959dd6 Related-Change: I7732f13b06c8826537b8f8230a2785607790b8e1 --- swift/common/constraints.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/swift/common/constraints.py b/swift/common/constraints.py index 787d2d91da..e8c09ac3a3 100644 --- a/swift/common/constraints.py +++ b/swift/common/constraints.py @@ -156,16 +156,16 @@ def check_metadata(req, target_type): return HTTPBadRequest( body='Metadata name too long: %s%s' % (prefix, key), request=req, content_type='text/plain') - elif len(value) > MAX_META_VALUE_LENGTH: + if len(value) > MAX_META_VALUE_LENGTH: return HTTPBadRequest( body='Metadata value longer than %d: %s%s' % ( MAX_META_VALUE_LENGTH, prefix, key), request=req, content_type='text/plain') - elif meta_count > MAX_META_COUNT: + if meta_count > MAX_META_COUNT: return HTTPBadRequest( body='Too many metadata items; max %d' % MAX_META_COUNT, request=req, content_type='text/plain') - elif meta_size > MAX_META_OVERALL_SIZE: + if meta_size > MAX_META_OVERALL_SIZE: return HTTPBadRequest( body='Total metadata too large; max %d' % MAX_META_OVERALL_SIZE, From 96a0e077532c3227b9290af7d74a0b42ee08e8de Mon Sep 17 00:00:00 2001 From: Janie Richling Date: Tue, 7 Jun 2016 15:01:32 +0100 Subject: [PATCH 031/156] Enable object body and metadata encryption Adds encryption middlewares. All object servers and proxy servers should be upgraded before introducing encryption middleware. Encryption middleware should be first introduced with the encryption middleware disable_encryption option set to True. Once all proxies have encryption middleware installed this option may be set to False (the default). Increases constraints.py:MAX_HEADER_COUNT by 4 to allow for headers generated by encryption-related middleware. Co-Authored-By: Tim Burke Co-Authored-By: Christian Cachin Co-Authored-By: Mahati Chamarthy Co-Authored-By: Peter Chng Co-Authored-By: Alistair Coles Co-Authored-By: Jonathan Hinson Co-Authored-By: Hamdi Roumani UpgradeImpact Change-Id: Ie6db22697ceb1021baaa6bddcf8e41ae3acb5376 --- doc/source/middleware.rst | 18 +- etc/proxy-server.conf-sample | 32 +- etc/swift.conf-sample | 7 +- other-requirements.txt | 2 + requirements.txt | 1 + setup.cfg | 2 + swift/common/constraints.py | 9 +- swift/common/exceptions.py | 4 + swift/common/middleware/crypto/__init__.py | 34 + .../common/middleware/crypto/crypto_utils.py | 283 +++++ swift/common/middleware/crypto/decrypter.py | 449 +++++++ swift/common/middleware/crypto/encrypter.py | 369 ++++++ swift/common/middleware/crypto/keymaster.py | 153 +++ swift/common/swob.py | 1 + test/functional/__init__.py | 6 + test/probe/test_empty_device_handoff.py | 15 +- test/probe/test_object_failures.py | 19 +- test/probe/test_object_handoff.py | 34 +- .../unit/common/middleware/crypto/__init__.py | 0 .../middleware/crypto/crypto_helpers.py | 54 + .../common/middleware/crypto/test_crypto.py | 39 + .../middleware/crypto/test_crypto_utils.py | 495 ++++++++ .../middleware/crypto/test_decrypter.py | 1119 +++++++++++++++++ .../middleware/crypto/test_encrypter.py | 820 ++++++++++++ .../middleware/crypto/test_encryption.py | 631 ++++++++++ .../middleware/crypto/test_keymaster.py | 163 +++ 26 files changed, 4731 insertions(+), 28 deletions(-) create mode 100644 swift/common/middleware/crypto/__init__.py create mode 100644 swift/common/middleware/crypto/crypto_utils.py create mode 100644 swift/common/middleware/crypto/decrypter.py create mode 100644 swift/common/middleware/crypto/encrypter.py create mode 100644 swift/common/middleware/crypto/keymaster.py create mode 100644 test/unit/common/middleware/crypto/__init__.py create mode 100644 test/unit/common/middleware/crypto/crypto_helpers.py create mode 100644 test/unit/common/middleware/crypto/test_crypto.py create mode 100644 test/unit/common/middleware/crypto/test_crypto_utils.py create mode 100644 test/unit/common/middleware/crypto/test_decrypter.py create mode 100644 test/unit/common/middleware/crypto/test_encrypter.py create mode 100644 test/unit/common/middleware/crypto/test_encryption.py create mode 100644 test/unit/common/middleware/crypto/test_keymaster.py diff --git a/doc/source/middleware.rst b/doc/source/middleware.rst index a078747204..f636c11f91 100644 --- a/doc/source/middleware.rst +++ b/doc/source/middleware.rst @@ -96,6 +96,15 @@ DLO support centers around a user specified filter that matches segments and concatenates them together in object listing order. Please see the DLO docs for :ref:`dlo-doc` further details. +.. _encryption: + +Encryption +========== + +.. automodule:: swift.common.middleware.crypto + :members: + :show-inheritance: + .. _formpost: FormPost @@ -108,7 +117,7 @@ FormPost .. _gatekeeper: GateKeeper -============= +========== .. automodule:: swift.common.middleware.gatekeeper :members: @@ -123,6 +132,13 @@ Healthcheck :members: :show-inheritance: +Keymaster +========= + +.. automodule:: swift.common.middleware.crypto.keymaster + :members: + :show-inheritance: + .. _keystoneauth: KeystoneAuth diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 6a4962ff9c..aebb872787 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -79,7 +79,7 @@ bind_port = 8080 [pipeline:main] # This sample pipeline uses tempauth and is used for SAIO dev work and # testing. See below for a pipeline using keystone. -pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth copy container-quotas account-quotas slo dlo versioned_writes keymaster encryption proxy-logging proxy-server # The following pipeline shows keystone integration. Comment out the one # above and uncomment this one. Additional steps for integrating keystone are @@ -765,3 +765,33 @@ use = egg:swift#copy # When object_post_as_copy is set to True, a POST request will be transformed # into a COPY request where source and destination objects are the same. # object_post_as_copy = true + +# Note: To enable encryption, add the following 2 dependent pieces of crypto +# middleware to the proxy-server pipeline. They should be to the right of all +# other middleware apart from the final proxy-logging middleware, and in the +# order shown in this example: +# keymaster encryption proxy-logging proxy-server +[filter:keymaster] +use = egg:swift#keymaster + +# Sets the root secret from which encryption keys are derived. This must be set +# before first use to a value that is a base64 encoding of at least 32 bytes. +# The security of all encrypted data critically depends on this key, therefore +# it should be set to a high-entropy value. For example, a suitable value may +# be obtained by base-64 encoding a 32 byte (or longer) value generated by a +# cryptographically secure random number generator. Changing the root secret is +# likely to result in data loss. +# TODO - STOP SETTING THIS DEFAULT! This is only here while work +# continues on the feature/crypto branch. Later, this will be added +# to the devstack proxy-config so that gate tests can pass. +# base64 encoding of "dontEverUseThisIn_PRODUCTION_xxxxxxxxxxxxxxx" +encryption_root_secret = ZG9udEV2ZXJVc2VUaGlzSW5fUFJPRFVDVElPTl94eHh4eHh4eHh4eHh4eHg= + +[filter:encryption] +use = egg:swift#encryption + +# By default all PUT or POST'ed object data and/or metadata will be encrypted. +# Encryption of new data and/or metadata may be disabled by setting +# disable_encryption to True. However, all encryption middleware should remain +# in the pipeline in order for existing encrypted data to be read. +# disable_encryption = False diff --git a/etc/swift.conf-sample b/etc/swift.conf-sample index 78684730e2..1d21ba20a8 100644 --- a/etc/swift.conf-sample +++ b/etc/swift.conf-sample @@ -136,9 +136,10 @@ aliases = yellow, orange # By default the maximum number of allowed headers depends on the number of max -# allowed metadata settings plus a default value of 32 for regular http -# headers. If for some reason this is not enough (custom middleware for -# example) it can be increased with the extra_header_count constraint. +# allowed metadata settings plus a default value of 36 for swift internally +# generated headers and regular http headers. If for some reason this is not +# enough (custom middleware for example) it can be increased with the +# extra_header_count constraint. #extra_header_count = 0 diff --git a/other-requirements.txt b/other-requirements.txt index 394f2b0f7a..2fef68fdd8 100644 --- a/other-requirements.txt +++ b/other-requirements.txt @@ -13,3 +13,5 @@ python-dev [platform:dpkg] python-devel [platform:rpm] rsync xfsprogs +libssl-dev [platform:dpkg] +openssl-devel [platform:rpm] diff --git a/requirements.txt b/requirements.txt index 3480d4f3b2..3c17288b9b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,3 +11,4 @@ pastedeploy>=1.3.3 six>=1.9.0 xattr>=0.4 PyECLib>=1.2.0 # BSD +cryptography>=1.0,!=1.3.0 # BSD/Apache-2.0 diff --git a/setup.cfg b/setup.cfg index 098b6c64f7..cb4cda4419 100644 --- a/setup.cfg +++ b/setup.cfg @@ -97,6 +97,8 @@ paste.filter_factory = xprofile = swift.common.middleware.xprofile:filter_factory versioned_writes = swift.common.middleware.versioned_writes:filter_factory copy = swift.common.middleware.copy:filter_factory + keymaster = swift.common.middleware.crypto.keymaster:filter_factory + encryption = swift.common.middleware.crypto:filter_factory [build_sphinx] all_files = 1 diff --git a/swift/common/constraints.py b/swift/common/constraints.py index 787d2d91da..efb7089871 100644 --- a/swift/common/constraints.py +++ b/swift/common/constraints.py @@ -110,10 +110,11 @@ FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json', # By default the maximum number of allowed headers depends on the number of max -# allowed metadata settings plus a default value of 32 for regular http -# headers. If for some reason this is not enough (custom middleware for -# example) it can be increased with the extra_header_count constraint. -MAX_HEADER_COUNT = MAX_META_COUNT + 32 + max(EXTRA_HEADER_COUNT, 0) +# allowed metadata settings plus a default value of 36 for swift internally +# generated headers and regular http headers. If for some reason this is not +# enough (custom middleware for example) it can be increased with the +# extra_header_count constraint. +MAX_HEADER_COUNT = MAX_META_COUNT + 36 + max(EXTRA_HEADER_COUNT, 0) def check_metadata(req, target_type): diff --git a/swift/common/exceptions.py b/swift/common/exceptions.py index 721ac3421a..05f972f972 100644 --- a/swift/common/exceptions.py +++ b/swift/common/exceptions.py @@ -207,6 +207,10 @@ class APIVersionError(SwiftException): pass +class EncryptionException(SwiftException): + pass + + class ClientException(Exception): def __init__(self, msg, http_scheme='', http_host='', http_port='', diff --git a/swift/common/middleware/crypto/__init__.py b/swift/common/middleware/crypto/__init__.py new file mode 100644 index 0000000000..55fd93a046 --- /dev/null +++ b/swift/common/middleware/crypto/__init__.py @@ -0,0 +1,34 @@ +# Copyright (c) 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Implements middleware for object encryption which comprises an instance of a +Decrypter combined with an instance of an Encrypter. +""" +from swift.common.middleware.crypto.decrypter import Decrypter +from swift.common.middleware.crypto.encrypter import Encrypter + +from swift.common.utils import config_true_value, register_swift_info + + +def filter_factory(global_conf, **local_conf): + """Provides a factory function for loading encryption middleware.""" + conf = global_conf.copy() + conf.update(local_conf) + enabled = not config_true_value(conf.get('disable_encryption', 'false')) + register_swift_info('encryption', admin=True, enabled=enabled) + + def encryption_filter(app): + return Decrypter(Encrypter(app, conf), conf) + return encryption_filter diff --git a/swift/common/middleware/crypto/crypto_utils.py b/swift/common/middleware/crypto/crypto_utils.py new file mode 100644 index 0000000000..4efa152259 --- /dev/null +++ b/swift/common/middleware/crypto/crypto_utils.py @@ -0,0 +1,283 @@ +# Copyright (c) 2015-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +import binascii +import collections +import json +import os + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes +import six +from six.moves.urllib import parse as urlparse + +from swift import gettext_ as _ +from swift.common.exceptions import EncryptionException +from swift.common.swob import HTTPInternalServerError +from swift.common.utils import get_logger +from swift.common.wsgi import WSGIContext + +CRYPTO_KEY_CALLBACK = 'swift.callback.fetch_crypto_keys' + + +class Crypto(object): + """ + Used by middleware: Calls cryptography library + """ + cipher = 'AES_CTR_256' + # AES will accept several key sizes - we are using 256 bits i.e. 32 bytes + key_length = 32 + iv_length = algorithms.AES.block_size / 8 + + def __init__(self, conf=None): + self.logger = get_logger(conf, log_route="crypto") + # memoize backend to avoid repeated iteration over entry points + self.backend = default_backend() + + def create_encryption_ctxt(self, key, iv): + """ + Creates a crypto context for encrypting + + :param key: 256-bit key + :param iv: 128-bit iv or nonce used for encryption + :raises: ValueError on invalid key or iv + :returns: an instance of an encryptor + """ + self.check_key(key) + engine = Cipher(algorithms.AES(key), modes.CTR(iv), + backend=self.backend) + return engine.encryptor() + + def create_decryption_ctxt(self, key, iv, offset): + """ + Creates a crypto context for decrypting + + :param key: 256-bit key + :param iv: 128-bit iv or nonce used for decryption + :param offset: offset into the message; used for range reads + :returns: an instance of a decryptor + """ + self.check_key(key) + if offset < 0: + raise ValueError('Offset must not be negative') + if offset: + # Adjust IV so that it is correct for decryption at offset. + # The CTR mode offset is incremented for every AES block and taken + # modulo 2^128. + offset_blocks, offset_in_block = divmod(offset, self.iv_length) + ivl = long(binascii.hexlify(iv), 16) + offset_blocks + ivl %= 1 << algorithms.AES.block_size + iv = str(bytearray.fromhex(format( + ivl, '0%dx' % (2 * self.iv_length)))) + else: + offset_in_block = 0 + + engine = Cipher(algorithms.AES(key), modes.CTR(iv), + backend=self.backend) + dec = engine.decryptor() + # Adjust decryption boundary within current AES block + dec.update('*' * offset_in_block) + return dec + + def create_iv(self): + return os.urandom(self.iv_length) + + def create_crypto_meta(self): + # create a set of parameters + return {'iv': self.create_iv(), 'cipher': self.cipher} + + def check_crypto_meta(self, meta): + """ + Check that crypto meta dict has valid items. + + :param meta: a dict + :raises EncryptionException: if an error is found in the crypto meta + """ + try: + if meta['cipher'] != self.cipher: + raise EncryptionException('Bad crypto meta: Cipher must be %s' + % self.cipher) + if len(meta['iv']) != self.iv_length: + raise EncryptionException( + 'Bad crypto meta: IV must be length %s bytes' + % self.iv_length) + except KeyError as err: + raise EncryptionException( + 'Bad crypto meta: Missing %s' % err) + + def create_random_key(self): + # helper method to create random key of correct length + return os.urandom(self.key_length) + + def wrap_key(self, wrapping_key, key_to_wrap): + # we don't use an RFC 3394 key wrap algorithm such as cryptography's + # aes_wrap_key because it's slower and we have iv material readily + # available so don't need a deterministic algorithm + iv = self.create_iv() + encryptor = Cipher(algorithms.AES(wrapping_key), modes.CTR(iv), + backend=self.backend).encryptor() + return {'key': encryptor.update(key_to_wrap), 'iv': iv} + + def unwrap_key(self, wrapping_key, context): + # unwrap a key from dict of form returned by wrap_key + # check the key length early - unwrapping won't change the length + self.check_key(context['key']) + decryptor = Cipher(algorithms.AES(wrapping_key), + modes.CTR(context['iv']), + backend=self.backend).decryptor() + return decryptor.update(context['key']) + + def check_key(self, key): + if len(key) != self.key_length: + raise ValueError("Key must be length %s bytes" % self.key_length) + + +class CryptoWSGIContext(WSGIContext): + """ + Base class for contexts used by crypto middlewares. + """ + def __init__(self, crypto_app, server_type, logger): + super(CryptoWSGIContext, self).__init__(crypto_app.app) + self.crypto = crypto_app.crypto + self.logger = logger + self.server_type = server_type + + def get_keys(self, env, required=None): + # Get the key(s) from the keymaster + required = required if required is not None else [self.server_type] + try: + fetch_crypto_keys = env[CRYPTO_KEY_CALLBACK] + except KeyError: + self.logger.exception(_('ERROR get_keys() missing callback')) + raise HTTPInternalServerError( + "Unable to retrieve encryption keys.") + + try: + keys = fetch_crypto_keys() + except Exception as err: # noqa + self.logger.exception(_( + 'ERROR get_keys(): from callback: %s') % err) + raise HTTPInternalServerError( + "Unable to retrieve encryption keys.") + + for name in required: + try: + key = keys[name] + self.crypto.check_key(key) + continue + except KeyError: + self.logger.exception(_("Missing key for %r") % name) + except TypeError: + self.logger.exception(_("Did not get a keys dict")) + except ValueError as e: + # don't include the key in any messages! + self.logger.exception(_("Bad key for %(name)r: %(err)s") % + {'name': name, 'err': e}) + raise HTTPInternalServerError( + "Unable to retrieve encryption keys.") + + return keys + + +def dump_crypto_meta(crypto_meta): + """ + Serialize crypto meta to a form suitable for including in a header value. + + The crypto-meta is serialized as a json object. The iv and key values are + random bytes and as a result need to be base64 encoded before sending over + the wire. Base64 encoding returns a bytes object in py3, to future proof + the code, decode this data to produce a string, which is what the + json.dumps function expects. + + :param crypto_meta: a dict containing crypto meta items + :returns: a string serialization of a crypto meta dict + """ + def b64_encode_meta(crypto_meta): + return { + name: (base64.b64encode(value).decode() if name in ('iv', 'key') + else b64_encode_meta(value) if isinstance(value, dict) + else value) + for name, value in crypto_meta.items()} + + # use sort_keys=True to make serialized form predictable for testing + return urlparse.quote_plus( + json.dumps(b64_encode_meta(crypto_meta), sort_keys=True)) + + +def load_crypto_meta(value): + """ + Build the crypto_meta from the json object. + + Note that json.loads always produces unicode strings, to ensure the + resultant crypto_meta matches the original object cast all key and value + data to a str except the key and iv which are base64 decoded. This will + work in py3 as well where all strings are unicode implying the cast is + effectively a no-op. + + :param value: a string serialization of a crypto meta dict + :returns: a dict containing crypto meta items + :raises EncryptionException: if an error occurs while parsing the + crypto meta + """ + def b64_decode_meta(crypto_meta): + return { + str(name): (base64.b64decode(val) if name in ('iv', 'key') + else b64_decode_meta(val) if isinstance(val, dict) + else val.encode('utf8')) + for name, val in crypto_meta.items()} + + try: + if not isinstance(value, six.string_types): + raise ValueError('crypto meta not a string') + val = json.loads(urlparse.unquote_plus(value)) + if not isinstance(val, collections.Mapping): + raise ValueError('crypto meta not a Mapping') + return b64_decode_meta(val) + except (KeyError, ValueError, TypeError) as err: + msg = 'Bad crypto meta %r: %s' % (value, err) + raise EncryptionException(msg) + + +def append_crypto_meta(value, crypto_meta): + """ + Serialize and append crypto metadata to an encrypted value. + + :param value: value to which serialized crypto meta will be appended. + :param crypto_meta: a dict of crypto meta + :return: a string of the form ; swift_meta= + """ + return '%s; swift_meta=%s' % (value, dump_crypto_meta(crypto_meta)) + + +def extract_crypto_meta(value): + """ + Extract and deserialize any crypto meta from the end of a value. + + :param value: string that may have crypto meta at end + :return: a tuple of the form: + (, or None) + """ + crypto_meta = None + # we only attempt to extract crypto meta from values that we know were + # encrypted and base64-encoded, or from etag values, so it's safe to split + # on ';' even if it turns out that the value was an unencrypted etag + parts = value.split(';') + if len(parts) == 2: + value, param = parts + crypto_meta_tag = 'swift_meta=' + if param.strip().startswith(crypto_meta_tag): + param = param.strip()[len(crypto_meta_tag):] + crypto_meta = load_crypto_meta(param) + return value, crypto_meta diff --git a/swift/common/middleware/crypto/decrypter.py b/swift/common/middleware/crypto/decrypter.py new file mode 100644 index 0000000000..46e2dbc484 --- /dev/null +++ b/swift/common/middleware/crypto/decrypter.py @@ -0,0 +1,449 @@ +# Copyright (c) 2015-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import json +import xml.etree.cElementTree as ElementTree + +from swift import gettext_ as _ +from swift.common.http import is_success +from swift.common.middleware.crypto.crypto_utils import CryptoWSGIContext, \ + load_crypto_meta, extract_crypto_meta, Crypto +from swift.common.exceptions import EncryptionException +from swift.common.request_helpers import get_object_transient_sysmeta, \ + get_listing_content_type, get_sys_meta_prefix, get_user_meta_prefix +from swift.common.swob import Request, HTTPException, HTTPInternalServerError +from swift.common.utils import get_logger, config_true_value, \ + parse_content_range, closing_if_possible, parse_content_type, \ + FileLikeIter, multipart_byteranges_to_document_iters + +DECRYPT_CHUNK_SIZE = 65536 + + +def purge_crypto_sysmeta_headers(headers): + return [h for h in headers if not + h[0].lower().startswith( + (get_object_transient_sysmeta('crypto-'), + get_sys_meta_prefix('object') + 'crypto-'))] + + +class BaseDecrypterContext(CryptoWSGIContext): + def get_crypto_meta(self, header_name): + """ + Extract a crypto_meta dict from a header. + + :param header_name: name of header that may have crypto_meta + :return: A dict containing crypto_meta items + :raises EncryptionException: if an error occurs while parsing the + crypto meta + """ + crypto_meta_json = self._response_header_value(header_name) + + if crypto_meta_json is None: + return None + crypto_meta = load_crypto_meta(crypto_meta_json) + self.crypto.check_crypto_meta(crypto_meta) + return crypto_meta + + def get_unwrapped_key(self, crypto_meta, wrapping_key): + """ + Get a wrapped key from crypto-meta and unwrap it using the provided + wrapping key. + + :param crypto_meta: a dict of crypto-meta + :param wrapping_key: key to be used to decrypt the wrapped key + :return: an unwrapped key + :raises EncryptionException: if the crypto-meta has no wrapped key or + the unwrapped key is invalid + """ + try: + return self.crypto.unwrap_key(wrapping_key, + crypto_meta['body_key']) + except KeyError as err: + err = 'Missing %s' % err + except ValueError as err: + pass + msg = 'Error decrypting %s' % self.server_type + self.logger.error(_('%(msg)s: %(err)s') % {'msg': msg, 'err': err}) + raise HTTPInternalServerError(body=msg, content_type='text/plain') + + def decrypt_value_with_meta(self, value, key, required=False): + """ + Base64-decode and decrypt a value if crypto meta can be extracted from + the value itself, otherwise return the value unmodified. + + A value should either be a string that does not contain the ';' + character or should be of the form: + + ;swift_meta= + + :param value: value to decrypt + :param key: crypto key to use + :param required: if True then the value is required to be decrypted + and an EncryptionException will be raised if the + header cannot be decrypted due to missing crypto meta. + :returns: decrypted value if crypto meta is found, otherwise the + unmodified value + :raises EncryptionException: if an error occurs while parsing crypto + meta or if the header value was required + to be decrypted but crypto meta was not + found. + """ + value, crypto_meta = extract_crypto_meta(value) + if crypto_meta: + self.crypto.check_crypto_meta(crypto_meta) + value = self.decrypt_value(value, key, crypto_meta) + elif required: + raise EncryptionException( + "Missing crypto meta in value %s" % value) + return value + + def decrypt_value(self, value, key, crypto_meta): + """ + Base64-decode and decrypt a value using the crypto_meta provided. + + :param value: a base64-encoded value to decrypt + :param key: crypto key to use + :param crypto_meta: a crypto-meta dict of form returned by + :py:func:`~swift.common.middleware.crypto.Crypto.get_crypto_meta` + :returns: decrypted value + """ + if not value: + return '' + crypto_ctxt = self.crypto.create_decryption_ctxt( + key, crypto_meta['iv'], 0) + return crypto_ctxt.update(base64.b64decode(value)) + + def get_decryption_keys(self, req): + """ + Determine if a response should be decrypted, and if so then fetch keys. + + :param req: a Request object + :returns: a dict of decryption keys + """ + if config_true_value(req.environ.get('swift.crypto.override')): + self.logger.debug('No decryption is necessary because of override') + return None + + return self.get_keys(req.environ) + + +class DecrypterObjContext(BaseDecrypterContext): + def __init__(self, decrypter, logger): + super(DecrypterObjContext, self).__init__(decrypter, 'object', logger) + + def _decrypt_header(self, header, value, key, required=False): + """ + Attempt to decrypt a header value that may be encrypted. + + :param header: the header name + :param value: the header value + :param key: decryption key + :param required: if True then the header is required to be decrypted + and an HTTPInternalServerError will be raised if the + header cannot be decrypted due to missing crypto meta. + :return: decrypted value or the original value if it was not encrypted. + :raises HTTPInternalServerError: if an error occurred during decryption + or if the header value was required to + be decrypted but crypto meta was not + found. + """ + try: + return self.decrypt_value_with_meta(value, key, required) + except EncryptionException as e: + msg = "Error decrypting header" + self.logger.error(_("%(msg)s %(hdr)s: %(e)s") % + {'msg': msg, 'hdr': header, 'e': e}) + raise HTTPInternalServerError(body=msg, content_type='text/plain') + + def decrypt_user_metadata(self, keys): + prefix = get_object_transient_sysmeta('crypto-meta-') + prefix_len = len(prefix) + new_prefix = get_user_meta_prefix(self.server_type).title() + result = [] + for name, val in self._response_headers: + if name.lower().startswith(prefix) and val: + short_name = name[prefix_len:] + decrypted_value = self._decrypt_header( + name, val, keys[self.server_type], required=True) + result.append((new_prefix + short_name, decrypted_value)) + return result + + def decrypt_resp_headers(self, keys): + """ + Find encrypted headers and replace with the decrypted versions. + + :param keys: a dict of decryption keys. + :return: A list of headers with any encrypted headers replaced by their + decrypted values. + :raises HTTPInternalServerError: if any error occurs while decrypting + headers + """ + mod_hdr_pairs = [] + + # Decrypt plaintext etag and place in Etag header for client response + etag_header = 'X-Object-Sysmeta-Crypto-Etag' + encrypted_etag = self._response_header_value(etag_header) + if encrypted_etag: + decrypted_etag = self._decrypt_header( + etag_header, encrypted_etag, keys['object'], required=True) + mod_hdr_pairs.append(('Etag', decrypted_etag)) + + etag_header = 'X-Object-Sysmeta-Container-Update-Override-Etag' + encrypted_etag = self._response_header_value(etag_header) + if encrypted_etag: + decrypted_etag = self._decrypt_header( + etag_header, encrypted_etag, keys['container']) + mod_hdr_pairs.append((etag_header, decrypted_etag)) + + # Decrypt all user metadata. Encrypted user metadata values are stored + # in the x-object-transient-sysmeta-crypto-meta- namespace. Those are + # decrypted and moved back to the x-object-meta- namespace. Prior to + # decryption, the response should have no x-object-meta- headers, but + # if it does then they will be overwritten by any decrypted headers + # that map to the same x-object-meta- header names i.e. decrypted + # headers win over unexpected, unencrypted headers. + mod_hdr_pairs.extend(self.decrypt_user_metadata(keys)) + + mod_hdr_names = {h.lower() for h, v in mod_hdr_pairs} + mod_hdr_pairs.extend([(h, v) for h, v in self._response_headers + if h.lower() not in mod_hdr_names]) + return mod_hdr_pairs + + def multipart_response_iter(self, resp, boundary, body_key, crypto_meta): + """ + Decrypts a multipart mime doc response body. + + :param resp: application response + :param boundary: multipart boundary string + :param keys: a dict of decryption keys. + :param crypto_meta: crypto_meta for the response body + :return: generator for decrypted response body + """ + with closing_if_possible(resp): + parts_iter = multipart_byteranges_to_document_iters( + FileLikeIter(resp), boundary) + for first_byte, last_byte, length, headers, body in parts_iter: + yield "--" + boundary + "\r\n" + + for header_pair in headers: + yield "%s: %s\r\n" % header_pair + + yield "\r\n" + + decrypt_ctxt = self.crypto.create_decryption_ctxt( + body_key, crypto_meta['iv'], first_byte) + for chunk in iter(lambda: body.read(DECRYPT_CHUNK_SIZE), ''): + yield decrypt_ctxt.update(chunk) + + yield "\r\n" + + yield "--" + boundary + "--" + + def response_iter(self, resp, body_key, crypto_meta, offset): + """ + Decrypts a response body. + + :param resp: application response + :param keys: a dict of decryption keys. + :param crypto_meta: crypto_meta for the response body + :param offset: offset into object content at which response body starts + :return: generator for decrypted response body + """ + decrypt_ctxt = self.crypto.create_decryption_ctxt( + body_key, crypto_meta['iv'], offset) + with closing_if_possible(resp): + for chunk in resp: + yield decrypt_ctxt.update(chunk) + + def handle_get(self, req, start_response): + app_resp = self._app_call(req.environ) + + keys = self.get_decryption_keys(req) + if keys is None: + # skip decryption + start_response(self._response_status, self._response_headers, + self._response_exc_info) + return app_resp + + mod_resp_headers = self.decrypt_resp_headers(keys) + + crypto_meta = None + if is_success(self._get_status_int()): + try: + crypto_meta = self.get_crypto_meta( + 'X-Object-Sysmeta-Crypto-Body-Meta') + except EncryptionException as err: + msg = 'Error decrypting object' + self.logger.error(_('%(msg)s: %(err)s') % + {'msg': msg, 'err': err}) + raise HTTPInternalServerError( + body=msg, content_type='text/plain') + + if crypto_meta: + # 2xx response and encrypted body + body_key = self.get_unwrapped_key(crypto_meta, keys['object']) + content_type, content_type_attrs = parse_content_type( + self._response_header_value('Content-Type')) + + if (self._get_status_int() == 206 and + content_type == 'multipart/byteranges'): + boundary = dict(content_type_attrs)["boundary"] + resp_iter = self.multipart_response_iter( + app_resp, boundary, body_key, crypto_meta) + else: + offset = 0 + content_range = self._response_header_value('Content-Range') + if content_range: + # Determine offset within the whole object if ranged GET + offset, end, total = parse_content_range(content_range) + resp_iter = self.response_iter( + app_resp, body_key, crypto_meta, offset) + else: + # don't decrypt body of unencrypted or non-2xx responses + resp_iter = app_resp + + mod_resp_headers = purge_crypto_sysmeta_headers(mod_resp_headers) + start_response(self._response_status, mod_resp_headers, + self._response_exc_info) + + return resp_iter + + def handle_head(self, req, start_response): + app_resp = self._app_call(req.environ) + + keys = self.get_decryption_keys(req) + + if keys is None: + # skip decryption + start_response(self._response_status, self._response_headers, + self._response_exc_info) + else: + mod_resp_headers = self.decrypt_resp_headers(keys) + mod_resp_headers = purge_crypto_sysmeta_headers(mod_resp_headers) + start_response(self._response_status, mod_resp_headers, + self._response_exc_info) + + return app_resp + + +class DecrypterContContext(BaseDecrypterContext): + def __init__(self, decrypter, logger): + super(DecrypterContContext, self).__init__( + decrypter, 'container', logger) + + def handle_get(self, req, start_response): + app_resp = self._app_call(req.environ) + + if is_success(self._get_status_int()): + # only decrypt body of 2xx responses + out_content_type = get_listing_content_type(req) + if out_content_type == 'application/json': + handler = self.process_json_resp + keys = self.get_decryption_keys(req) + elif out_content_type.endswith('/xml'): + handler = self.process_xml_resp + keys = self.get_decryption_keys(req) + else: + handler = keys = None + + if handler and keys: + try: + app_resp = handler(keys['container'], app_resp) + except EncryptionException as err: + msg = "Error decrypting container listing" + self.logger.error(_('%(msg)s: %(err)s') % + {'msg': msg, 'err': err}) + raise HTTPInternalServerError( + body=msg, content_type='text/plain') + + start_response(self._response_status, + self._response_headers, + self._response_exc_info) + + return app_resp + + def update_content_length(self, new_total_len): + self._response_headers = [ + (h, v) for h, v in self._response_headers + if h.lower() != 'content-length'] + self._response_headers.append(('Content-Length', str(new_total_len))) + + def process_json_resp(self, key, resp_iter): + """ + Parses json body listing and decrypt encrypted entries. Updates + Content-Length header with new body length and return a body iter. + """ + with closing_if_possible(resp_iter): + resp_body = ''.join(resp_iter) + body_json = json.loads(resp_body) + new_body = json.dumps([self.decrypt_obj_dict(obj_dict, key) + for obj_dict in body_json]) + self.update_content_length(len(new_body)) + return [new_body] + + def decrypt_obj_dict(self, obj_dict, key): + ciphertext = obj_dict['hash'] + obj_dict['hash'] = self.decrypt_value_with_meta(ciphertext, key) + return obj_dict + + def process_xml_resp(self, key, resp_iter): + """ + Parses xml body listing and decrypt encrypted entries. Updates + Content-Length header with new body length and return a body iter. + """ + with closing_if_possible(resp_iter): + resp_body = ''.join(resp_iter) + tree = ElementTree.fromstring(resp_body) + for elem in tree.iter('hash'): + ciphertext = elem.text.encode('utf8') + plain = self.decrypt_value_with_meta(ciphertext, key) + elem.text = plain.decode('utf8') + new_body = ElementTree.tostring(tree, encoding='UTF-8').replace( + "", + '', 1) + self.update_content_length(len(new_body)) + return [new_body] + + +class Decrypter(object): + """Middleware for decrypting data and user metadata.""" + + def __init__(self, app, conf): + self.app = app + self.logger = get_logger(conf, log_route="decrypter") + self.crypto = Crypto(conf) + + def __call__(self, env, start_response): + req = Request(env) + try: + parts = req.split_path(3, 4, True) + except ValueError: + return self.app(env, start_response) + + if parts[3] and req.method == 'GET': + handler = DecrypterObjContext(self, self.logger).handle_get + elif parts[3] and req.method == 'HEAD': + handler = DecrypterObjContext(self, self.logger).handle_head + elif parts[2] and req.method == 'GET': + handler = DecrypterContContext(self, self.logger).handle_get + else: + # url and/or request verb is not handled by decrypter + return self.app(env, start_response) + + try: + return handler(req, start_response) + except HTTPException as err_resp: + return err_resp(env, start_response) diff --git a/swift/common/middleware/crypto/encrypter.py b/swift/common/middleware/crypto/encrypter.py new file mode 100644 index 0000000000..2719d47700 --- /dev/null +++ b/swift/common/middleware/crypto/encrypter.py @@ -0,0 +1,369 @@ +# Copyright (c) 2015-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +import hashlib +import hmac +from contextlib import contextmanager + +from swift.common.constraints import check_metadata +from swift.common.http import is_success +from swift.common.middleware.crypto.crypto_utils import CryptoWSGIContext, \ + dump_crypto_meta, append_crypto_meta, Crypto +from swift.common.request_helpers import get_object_transient_sysmeta, \ + strip_user_meta_prefix, is_user_meta, update_etag_is_at_header +from swift.common.swob import Request, Match, HTTPException, \ + HTTPUnprocessableEntity +from swift.common.utils import get_logger, config_true_value + + +def encrypt_header_val(crypto, value, key): + """ + Encrypt a header value using the supplied key. + + :param crypto: a Crypto instance + :param value: value to encrypt + :param key: crypto key to use + :returns: a tuple of (encrypted value, crypto_meta) where crypto_meta is a + dict of form returned by + :py:func:`~swift.common.middleware.crypto.Crypto.get_crypto_meta` + """ + if not value: + return '', None + + crypto_meta = crypto.create_crypto_meta() + crypto_ctxt = crypto.create_encryption_ctxt(key, crypto_meta['iv']) + enc_val = base64.b64encode(crypto_ctxt.update(value)) + return enc_val, crypto_meta + + +def _hmac_etag(key, etag): + """ + Compute an HMAC-SHA256 using given key and etag. + + :param key: The starting key for the hash. + :param etag: The etag to hash. + :returns: a Base64-encoded representation of the HMAC + """ + result = hmac.new(key, etag, digestmod=hashlib.sha256).digest() + return base64.b64encode(result).decode() + + +class EncInputWrapper(object): + """File-like object to be swapped in for wsgi.input.""" + def __init__(self, crypto, keys, req, logger): + self.env = req.environ + self.wsgi_input = req.environ['wsgi.input'] + self.path = req.path + self.crypto = crypto + self.body_crypto_ctxt = None + self.keys = keys + self.plaintext_md5 = None + self.ciphertext_md5 = None + self.logger = logger + self.install_footers_callback(req) + + def _init_encryption_context(self): + # do this once when body is first read + if self.body_crypto_ctxt is None: + self.body_crypto_meta = self.crypto.create_crypto_meta() + body_key = self.crypto.create_random_key() + # wrap the body key with object key + self.body_crypto_meta['body_key'] = self.crypto.wrap_key( + self.keys['object'], body_key) + self.body_crypto_meta['key_id'] = self.keys['id'] + self.body_crypto_ctxt = self.crypto.create_encryption_ctxt( + body_key, self.body_crypto_meta.get('iv')) + self.plaintext_md5 = hashlib.md5() + self.ciphertext_md5 = hashlib.md5() + + def install_footers_callback(self, req): + # the proxy controller will call back for footer metadata after + # body has been sent + inner_callback = req.environ.get('swift.callback.update_footers') + # remove any Etag from headers, it won't be valid for ciphertext and + # we'll send the ciphertext Etag later in footer metadata + client_etag = req.headers.pop('etag', None) + container_listing_etag_header = req.headers.get( + 'X-Object-Sysmeta-Container-Update-Override-Etag') + + def footers_callback(footers): + if inner_callback: + # pass on footers dict to any other callback that was + # registered before this one. It may override any footers that + # were set. + inner_callback(footers) + + plaintext_etag = None + if self.body_crypto_ctxt: + plaintext_etag = self.plaintext_md5.hexdigest() + # If client (or other middleware) supplied etag, then validate + # against plaintext etag + etag_to_check = footers.get('Etag') or client_etag + if (etag_to_check is not None and + plaintext_etag != etag_to_check): + raise HTTPUnprocessableEntity(request=Request(self.env)) + + # override any previous notion of etag with the ciphertext etag + footers['Etag'] = self.ciphertext_md5.hexdigest() + + # Encrypt the plaintext etag using the object key and persist + # as sysmeta along with the crypto parameters that were used. + encrypted_etag, etag_crypto_meta = encrypt_header_val( + self.crypto, plaintext_etag, self.keys['object']) + footers['X-Object-Sysmeta-Crypto-Etag'] = \ + append_crypto_meta(encrypted_etag, etag_crypto_meta) + footers['X-Object-Sysmeta-Crypto-Body-Meta'] = \ + dump_crypto_meta(self.body_crypto_meta) + + # Also add an HMAC of the etag for use when evaluating + # conditional requests + footers['X-Object-Sysmeta-Crypto-Etag-Mac'] = _hmac_etag( + self.keys['object'], plaintext_etag) + else: + # No data was read from body, nothing was encrypted, so don't + # set any crypto sysmeta for the body, but do re-instate any + # etag provided in inbound request if other middleware has not + # already set a value. + if client_etag is not None: + footers.setdefault('Etag', client_etag) + + # When deciding on the etag that should appear in container + # listings, look for: + # * override in the footer, otherwise + # * override in the header, and finally + # * MD5 of the plaintext received + # This may be None if no override was set and no data was read + container_listing_etag = footers.get( + 'X-Object-Sysmeta-Container-Update-Override-Etag', + container_listing_etag_header) or plaintext_etag + + if container_listing_etag is not None: + # Encrypt the container-listing etag using the container key + # and a random IV, and use it to override the container update + # value, with the crypto parameters appended. We use the + # container key here so that only that key is required to + # decrypt all etag values in a container listing when handling + # a container GET request. + val, crypto_meta = encrypt_header_val( + self.crypto, container_listing_etag, + self.keys['container']) + crypto_meta['key_id'] = self.keys['id'] + footers['X-Object-Sysmeta-Container-Update-Override-Etag'] = \ + append_crypto_meta(val, crypto_meta) + # else: no override was set and no data was read + + req.environ['swift.callback.update_footers'] = footers_callback + + def read(self, *args, **kwargs): + return self.readChunk(self.wsgi_input.read, *args, **kwargs) + + def readline(self, *args, **kwargs): + return self.readChunk(self.wsgi_input.readline, *args, **kwargs) + + def readChunk(self, read_method, *args, **kwargs): + chunk = read_method(*args, **kwargs) + + if chunk: + self._init_encryption_context() + self.plaintext_md5.update(chunk) + # Encrypt one chunk at a time + ciphertext = self.body_crypto_ctxt.update(chunk) + self.ciphertext_md5.update(ciphertext) + return ciphertext + + return chunk + + +class EncrypterObjContext(CryptoWSGIContext): + def __init__(self, encrypter, logger): + super(EncrypterObjContext, self).__init__( + encrypter, 'object', logger) + + def _check_headers(self, req): + # Check the user-metadata length before encrypting and encoding + error_response = check_metadata(req, self.server_type) + if error_response: + raise error_response + + def encrypt_user_metadata(self, req, keys): + """ + Encrypt user-metadata header values. Replace each x-object-meta- + user metadata header with a corresponding + x-object-transient-sysmeta-crypto-meta- header which has the + crypto metadata required to decrypt appended to the encrypted value. + + :param req: a swob Request + :param keys: a dict of encryption keys + """ + prefix = get_object_transient_sysmeta('crypto-meta-') + user_meta_headers = [h for h in req.headers.items() if + is_user_meta(self.server_type, h[0]) and h[1]] + crypto_meta = None + for name, val in user_meta_headers: + short_name = strip_user_meta_prefix(self.server_type, name) + new_name = prefix + short_name + enc_val, crypto_meta = encrypt_header_val( + self.crypto, val, keys[self.server_type]) + req.headers[new_name] = append_crypto_meta(enc_val, crypto_meta) + req.headers.pop(name) + # store a single copy of the crypto meta items that are common to all + # encrypted user metadata independently of any such meta that is stored + # with the object body because it might change on a POST. This is done + # for future-proofing - the meta stored here is not currently used + # during decryption. + if crypto_meta: + meta = dump_crypto_meta({'cipher': crypto_meta['cipher'], + 'key_id': keys['id']}) + req.headers[get_object_transient_sysmeta('crypto-meta')] = meta + + def handle_put(self, req, start_response): + self._check_headers(req) + keys = self.get_keys(req.environ, required=['object', 'container']) + self.encrypt_user_metadata(req, keys) + + enc_input_proxy = EncInputWrapper(self.crypto, keys, req, self.logger) + req.environ['wsgi.input'] = enc_input_proxy + + resp = self._app_call(req.environ) + + # If an etag is in the response headers and a plaintext etag was + # calculated, then overwrite the response value with the plaintext etag + # provided it matches the ciphertext etag. If it does not match then do + # not overwrite and allow the response value to return to client. + mod_resp_headers = self._response_headers + if (is_success(self._get_status_int()) and + enc_input_proxy.plaintext_md5): + plaintext_etag = enc_input_proxy.plaintext_md5.hexdigest() + ciphertext_etag = enc_input_proxy.ciphertext_md5.hexdigest() + mod_resp_headers = [ + (h, v if (h.lower() != 'etag' or + v.strip('"') != ciphertext_etag) + else plaintext_etag) + for h, v in mod_resp_headers] + + start_response(self._response_status, mod_resp_headers, + self._response_exc_info) + return resp + + def handle_post(self, req, start_response): + """ + Encrypt the new object headers with a new iv and the current crypto. + Note that an object may have encrypted headers while the body may + remain unencrypted. + """ + self._check_headers(req) + keys = self.get_keys(req.environ) + self.encrypt_user_metadata(req, keys) + + resp = self._app_call(req.environ) + start_response(self._response_status, self._response_headers, + self._response_exc_info) + return resp + + @contextmanager + def _mask_conditional_etags(self, req, header_name): + """ + Calculate HMACs of etags in header value and append to existing list. + The HMACs are calculated in the same way as was done for the object + plaintext etag to generate the value of + X-Object-Sysmeta-Crypto-Etag-Mac when the object was PUT. The object + server can therefore use these HMACs to evaluate conditional requests. + + The existing etag values are left in the list of values to match in + case the object was not encrypted when it was PUT. It is unlikely that + a masked etag value would collide with an unmasked value. + + :param req: an instance of swob.Request + :param header_name: name of header that has etags to mask + :return: True if any etags were masked, False otherwise + """ + masked = False + old_etags = req.headers.get(header_name) + if old_etags: + keys = self.get_keys(req.environ) + new_etags = [] + for etag in Match(old_etags).tags: + if etag == '*': + new_etags.append(etag) + continue + masked_etag = _hmac_etag(keys['object'], etag) + new_etags.extend(('"%s"' % etag, '"%s"' % masked_etag)) + masked = True + + req.headers[header_name] = ', '.join(new_etags) + + try: + yield masked + finally: + if old_etags: + req.headers[header_name] = old_etags + + def handle_get_or_head(self, req, start_response): + with self._mask_conditional_etags(req, 'If-Match') as masked1: + with self._mask_conditional_etags(req, 'If-None-Match') as masked2: + if masked1 or masked2: + update_etag_is_at_header( + req, 'X-Object-Sysmeta-Crypto-Etag-Mac') + resp = self._app_call(req.environ) + start_response(self._response_status, self._response_headers, + self._response_exc_info) + return resp + + +class Encrypter(object): + """Middleware for encrypting data and user metadata. + + By default all PUT or POST'ed object data and/or metadata will be + encrypted. Encryption of new data and/or metadata may be disabled by + setting the ``disable_encryption`` option to True. However, this middleware + should remain in the pipeline in order for existing encrypted data to be + read. + """ + + def __init__(self, app, conf): + self.app = app + self.logger = get_logger(conf, log_route="encrypter") + self.crypto = Crypto(conf) + self.disable_encryption = config_true_value( + conf.get('disable_encryption', 'false')) + + def __call__(self, env, start_response): + # If override is set in env, then just pass along + if config_true_value(env.get('swift.crypto.override')): + return self.app(env, start_response) + + req = Request(env) + + if self.disable_encryption and req.method in ('PUT', 'POST'): + return self.app(env, start_response) + try: + req.split_path(4, 4, True) + except ValueError: + return self.app(env, start_response) + + if req.method in ('GET', 'HEAD'): + handler = EncrypterObjContext(self, self.logger).handle_get_or_head + elif req.method == 'PUT': + handler = EncrypterObjContext(self, self.logger).handle_put + elif req.method == 'POST': + handler = EncrypterObjContext(self, self.logger).handle_post + else: + # anything else + return self.app(env, start_response) + + try: + return handler(req, start_response) + except HTTPException as err_resp: + return err_resp(env, start_response) diff --git a/swift/common/middleware/crypto/keymaster.py b/swift/common/middleware/crypto/keymaster.py new file mode 100644 index 0000000000..4b6ac71f2c --- /dev/null +++ b/swift/common/middleware/crypto/keymaster.py @@ -0,0 +1,153 @@ +# Copyright (c) 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +import hashlib +import hmac +import os + +from swift.common.middleware.crypto.crypto_utils import CRYPTO_KEY_CALLBACK +from swift.common.swob import Request, HTTPException +from swift.common.wsgi import WSGIContext + + +class KeyMasterContext(WSGIContext): + """ + The simple scheme for key derivation is as follows: every path is + associated with a key, where the key is derived from the path itself in a + deterministic fashion such that the key does not need to be stored. + Specifically, the key for any path is an HMAC of a root key and the path + itself, calculated using an SHA256 hash function:: + + = HMAC_SHA256(, ) + """ + def __init__(self, keymaster, account, container, obj): + """ + :param keymaster: a Keymaster instance + :param account: account name + :param container: container name + :param obj: object name + """ + super(KeyMasterContext, self).__init__(keymaster.app) + self.keymaster = keymaster + self.account = account + self.container = container + self.obj = obj + self._keys = None + + def fetch_crypto_keys(self, *args, **kwargs): + """ + Setup container and object keys based on the request path. + + Keys are derived from request path. The 'id' entry in the results dict + includes the part of the path used to derive keys. Other keymaster + implementations may use a different strategy to generate keys and may + include a different type of 'id', so callers should treat the 'id' as + opaque keymaster-specific data. + + :returns: A dict containing encryption keys for 'object' and + 'container' and a key 'id'. + """ + if self._keys: + return self._keys + + self._keys = {} + account_path = os.path.join(os.sep, self.account) + + if self.container: + path = os.path.join(account_path, self.container) + self._keys['container'] = self.keymaster.create_key(path) + + if self.obj: + path = os.path.join(path, self.obj) + self._keys['object'] = self.keymaster.create_key(path) + + # For future-proofing include a keymaster version number and the + # path used to derive keys in the 'id' entry of the results. The + # encrypter will persist this as part of the crypto-meta for + # encrypted data and metadata. If we ever change the way keys are + # generated then the decrypter could pass the persisted 'id' value + # when it calls fetch_crypto_keys to inform the keymaster as to how + # that particular data or metadata had its keys generated. + # Currently we have no need to do that, so we are simply persisting + # this information for future use. + self._keys['id'] = {'v': '1', 'path': path} + + return self._keys + + def handle_request(self, req, start_response): + req.environ[CRYPTO_KEY_CALLBACK] = self.fetch_crypto_keys + resp = self._app_call(req.environ) + start_response(self._response_status, self._response_headers, + self._response_exc_info) + return resp + + +class KeyMaster(object): + """Middleware for providing encryption keys. + + The middleware requires its ``encryption_root_secret`` option to be set. + This is the root secret from which encryption keys are derived. This must + be set before first use to a value that is a base64 encoding of at least 32 + bytes. The security of all encrypted data critically depends on this key, + therefore it should be set to a high-entropy value. For example, a suitable + value may be obtained by base-64 encoding a 32 byte (or longer) value + generated by a cryptographically secure random number generator. Changing + the root secret is likely to result in data loss. + """ + + def __init__(self, app, conf): + self.app = app + self.root_secret = conf.get('encryption_root_secret') + try: + self.root_secret = base64.b64decode(self.root_secret) + if len(self.root_secret) < 32: + raise ValueError + except (TypeError, ValueError): + raise ValueError( + 'encryption_root_secret option in proxy-server.conf must be ' + 'a base64 encoding of at least 32 raw bytes') + + def __call__(self, env, start_response): + req = Request(env) + + try: + parts = req.split_path(2, 4, True) + except ValueError: + return self.app(env, start_response) + + if req.method in ('PUT', 'POST', 'GET', 'HEAD'): + # handle only those request methods that may require keys + km_context = KeyMasterContext(self, *parts[1:]) + try: + return km_context.handle_request(req, start_response) + except HTTPException as err_resp: + return err_resp(env, start_response) + + # anything else + return self.app(env, start_response) + + def create_key(self, key_id): + return hmac.new(self.root_secret, key_id, + digestmod=hashlib.sha256).digest() + + +def filter_factory(global_conf, **local_conf): + conf = global_conf.copy() + conf.update(local_conf) + + def keymaster_filter(app): + return KeyMaster(app, conf) + + return keymaster_filter diff --git a/swift/common/swob.py b/swift/common/swob.py index aa11ec01f2..f80c13846d 100644 --- a/swift/common/swob.py +++ b/swift/common/swob.py @@ -1419,6 +1419,7 @@ HTTPOk = status_map[200] HTTPCreated = status_map[201] HTTPAccepted = status_map[202] HTTPNoContent = status_map[204] +HTTPPartialContent = status_map[206] HTTPMovedPermanently = status_map[301] HTTPFound = status_map[302] HTTPSeeOther = status_map[303] diff --git a/test/functional/__init__.py b/test/functional/__init__.py index 52be849bfa..0bf324f85d 100644 --- a/test/functional/__init__.py +++ b/test/functional/__init__.py @@ -361,6 +361,12 @@ def in_process_setup(the_object_server=object_server): 'allow_account_management': 'true', 'account_autocreate': 'true', 'allow_versions': 'True', + # TODO - Remove encryption_root_secret - this is only necessary while + # encryption middleware is in the default proxy pipeline in + # proxy-server.conf-sample + # base64 encoding of "dontEverUseThisIn_PRODUCTION_xxxxxxxxxxxxxxx" + 'encryption_root_secret': + 'ZG9udEV2ZXJVc2VUaGlzSW5fUFJPRFVDVElPTl94eHh4eHh4eHh4eHh4eHg=', # Below are values used by the functional test framework, as well as # by the various in-process swift servers 'auth_host': '127.0.0.1', diff --git a/test/probe/test_empty_device_handoff.py b/test/probe/test_empty_device_handoff.py index 65338ed84b..e1f8ade50c 100755 --- a/test/probe/test_empty_device_handoff.py +++ b/test/probe/test_empty_device_handoff.py @@ -73,6 +73,13 @@ class TestEmptyDevice(ReplProbeTest): raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) + # Stash the on disk data from a primary for future comparison with the + # handoff - this may not equal 'VERIFY' if for example the proxy has + # crypto enabled + direct_get_data = direct_client.direct_get_object( + onodes[1], opart, self.account, container, obj, headers={ + 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] + # Kill other two container/obj primary servers # to ensure GET handoff works for node in onodes[1:]: @@ -95,9 +102,7 @@ class TestEmptyDevice(ReplProbeTest): odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] - if odata != 'VERIFY': - raise Exception('Direct object GET did not return VERIFY, instead ' - 'it returned: %s' % repr(odata)) + self.assertEqual(direct_get_data, odata) # Assert container listing (via proxy and directly) has container/obj objs = [o['name'] for o in @@ -155,9 +160,7 @@ class TestEmptyDevice(ReplProbeTest): odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] - if odata != 'VERIFY': - raise Exception('Direct object GET did not return VERIFY, instead ' - 'it returned: %s' % repr(odata)) + self.assertEqual(direct_get_data, odata) # Assert the handoff server no longer has container/obj try: diff --git a/test/probe/test_object_failures.py b/test/probe/test_object_failures.py index ba53177743..1850b2750d 100755 --- a/test/probe/test_object_failures.py +++ b/test/probe/test_object_failures.py @@ -77,6 +77,12 @@ class TestObjectFailures(ReplProbeTest): obj = 'object-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'VERIFY') + # Stash the on disk data for future comparison - this may not equal + # 'VERIFY' if for example the proxy has crypto enabled + backend_data = direct_client.direct_get_object( + onode, opart, self.account, container, obj, headers={ + 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] + metadata = read_metadata(data_file) metadata['ETag'] = 'badetag' write_metadata(data_file, metadata) @@ -84,7 +90,7 @@ class TestObjectFailures(ReplProbeTest): odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] - self.assertEqual(odata, 'VERIFY') + self.assertEqual(odata, backend_data) try: direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ @@ -98,14 +104,19 @@ class TestObjectFailures(ReplProbeTest): obj = 'object-range-%s' % uuid4() onode, opart, data_file = self._setup_data_file(container, obj, 'RANGE') + # Stash the on disk data for future comparison - this may not equal + # 'VERIFY' if for example the proxy has crypto enabled + backend_data = direct_client.direct_get_object( + onode, opart, self.account, container, obj, headers={ + 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] metadata = read_metadata(data_file) metadata['ETag'] = 'badetag' write_metadata(data_file, metadata) base_headers = {'X-Backend-Storage-Policy-Index': self.policy.idx} - for header, result in [({'Range': 'bytes=0-2'}, 'RAN'), - ({'Range': 'bytes=1-11'}, 'ANGE'), - ({'Range': 'bytes=0-11'}, 'RANGE')]: + for header, result in [({'Range': 'bytes=0-2'}, backend_data[0:3]), + ({'Range': 'bytes=1-11'}, backend_data[1:]), + ({'Range': 'bytes=0-11'}, backend_data)]: req_headers = base_headers.copy() req_headers.update(header) odata = direct_client.direct_get_object( diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py index 3808df0616..ca0b3d0e02 100755 --- a/test/probe/test_object_handoff.py +++ b/test/probe/test_object_handoff.py @@ -55,6 +55,13 @@ class TestObjectHandoff(ReplProbeTest): raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) + # Stash the on disk data from a primary for future comparison with the + # handoff - this may not equal 'VERIFY' if for example the proxy has + # crypto enabled + direct_get_data = direct_client.direct_get_object( + onodes[1], opart, self.account, container, obj, headers={ + 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] + # Kill other two container/obj primary servers # to ensure GET handoff works for node in onodes[1:]: @@ -76,9 +83,7 @@ class TestObjectHandoff(ReplProbeTest): odata = direct_client.direct_get_object( another_onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] - if odata != 'VERIFY': - raise Exception('Direct object GET did not return VERIFY, instead ' - 'it returned: %s' % repr(odata)) + self.assertEqual(direct_get_data, odata) # drop a tempfile in the handoff's datadir, like it might have # had if there was an rsync failure while it was previously a @@ -143,9 +148,7 @@ class TestObjectHandoff(ReplProbeTest): odata = direct_client.direct_get_object( onode, opart, self.account, container, obj, headers={ 'X-Backend-Storage-Policy-Index': self.policy.idx})[-1] - if odata != 'VERIFY': - raise Exception('Direct object GET did not return VERIFY, instead ' - 'it returned: %s' % repr(odata)) + self.assertEqual(direct_get_data, odata) # and that it does *not* have a temporary rsync dropping! found_data_filename = False @@ -273,6 +276,14 @@ class TestECObjectHandoffOverwrite(ECProbeTest): # shutdown one of the primary data nodes failed_primary = random.choice(onodes) failed_primary_device_path = self.device_dir('object', failed_primary) + # first read its ec etag value for future reference - this may not + # equal old_contents.etag if for example the proxy has crypto enabled + req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)} + headers = direct_client.direct_head_object( + failed_primary, opart, self.account, container_name, + object_name, headers=req_headers) + old_backend_etag = headers['X-Object-Sysmeta-EC-Etag'] + self.kill_drive(failed_primary_device_path) # overwrite our object with some new data @@ -290,13 +301,18 @@ class TestECObjectHandoffOverwrite(ECProbeTest): failed_primary, opart, self.account, container_name, object_name, headers=req_headers) self.assertEqual(headers['X-Object-Sysmeta-EC-Etag'], - old_contents.etag) + old_backend_etag) # we have 1 primary with wrong old etag, and we should have 5 with # new etag plus a handoff with the new etag, so killing 2 other # primaries forces proxy to try to GET from all primaries plus handoff. other_nodes = [n for n in onodes if n != failed_primary] random.shuffle(other_nodes) + # grab the value of the new content's ec etag for future reference + headers = direct_client.direct_head_object( + other_nodes[0], opart, self.account, container_name, + object_name, headers=req_headers) + new_backend_etag = headers['X-Object-Sysmeta-EC-Etag'] for node in other_nodes[:2]: self.kill_drive(self.device_dir('object', node)) @@ -314,8 +330,8 @@ class TestECObjectHandoffOverwrite(ECProbeTest): continue found_frags[headers['X-Object-Sysmeta-EC-Etag']] += 1 self.assertEqual(found_frags, { - new_contents.etag: 4, # this should be enough to rebuild! - old_contents.etag: 1, + new_backend_etag: 4, # this should be enough to rebuild! + old_backend_etag: 1, }) # clear node error limiting diff --git a/test/unit/common/middleware/crypto/__init__.py b/test/unit/common/middleware/crypto/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/unit/common/middleware/crypto/crypto_helpers.py b/test/unit/common/middleware/crypto/crypto_helpers.py new file mode 100644 index 0000000000..0af7d3e83c --- /dev/null +++ b/test/unit/common/middleware/crypto/crypto_helpers.py @@ -0,0 +1,54 @@ +# Copyright (c) 2015-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +import hashlib + +from swift.common.middleware.crypto.crypto_utils import Crypto + + +def fetch_crypto_keys(): + return {'account': 'This is an account key 012345678', + 'container': 'This is a container key 01234567', + 'object': 'This is an object key 0123456789', + 'id': {'v': 'fake', 'path': '/a/c/fake'}} + + +def md5hex(s): + return hashlib.md5(s).hexdigest() + + +def encrypt(val, key=None, iv=None, ctxt=None): + if ctxt is None: + ctxt = Crypto({}).create_encryption_ctxt(key, iv) + enc_val = ctxt.update(val) + return enc_val + + +def decrypt(key, iv, enc_val): + dec_ctxt = Crypto({}).create_decryption_ctxt(key, iv, 0) + dec_val = dec_ctxt.update(enc_val) + return dec_val + + +FAKE_IV = "This is an IV123" +# do not use this example encryption_root_secret in production, use a randomly +# generated value with high entropy +TEST_KEYMASTER_CONF = {'encryption_root_secret': base64.b64encode(b'x' * 32)} + + +def fake_get_crypto_meta(**kwargs): + meta = {'iv': FAKE_IV, 'cipher': Crypto.cipher} + meta.update(kwargs) + return meta diff --git a/test/unit/common/middleware/crypto/test_crypto.py b/test/unit/common/middleware/crypto/test_crypto.py new file mode 100644 index 0000000000..c5f6cd0cd7 --- /dev/null +++ b/test/unit/common/middleware/crypto/test_crypto.py @@ -0,0 +1,39 @@ +# Copyright (c) 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest + +from swift.common import utils +from swift.common.middleware import crypto + + +class TestCrypto(unittest.TestCase): + def test_filter_factory(self): + factory = crypto.filter_factory({}) + self.assertTrue(callable(factory)) + self.assertIsInstance(factory({}), crypto.decrypter.Decrypter) + self.assertIsInstance(factory({}).app, crypto.encrypter.Encrypter) + self.assertIn('encryption', utils._swift_admin_info) + self.assertDictEqual( + {'enabled': True}, utils._swift_admin_info['encryption']) + self.assertNotIn('encryption', utils._swift_info) + + factory = crypto.filter_factory({'disable_encryption': True}) + self.assertTrue(callable(factory)) + self.assertIsInstance(factory({}), crypto.decrypter.Decrypter) + self.assertIsInstance(factory({}).app, crypto.encrypter.Encrypter) + self.assertIn('encryption', utils._swift_admin_info) + self.assertDictEqual( + {'enabled': False}, utils._swift_admin_info['encryption']) + self.assertNotIn('encryption', utils._swift_info) diff --git a/test/unit/common/middleware/crypto/test_crypto_utils.py b/test/unit/common/middleware/crypto/test_crypto_utils.py new file mode 100644 index 0000000000..56aca2ea0b --- /dev/null +++ b/test/unit/common/middleware/crypto/test_crypto_utils.py @@ -0,0 +1,495 @@ +# Copyright (c) 2015-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import unittest + +import mock +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes + +from swift.common.exceptions import EncryptionException +from swift.common.middleware.crypto import crypto_utils +from swift.common.middleware.crypto.crypto_utils import ( + CRYPTO_KEY_CALLBACK, Crypto, CryptoWSGIContext) +from swift.common.swob import HTTPException +from test.unit import FakeLogger +from test.unit.common.middleware.crypto.crypto_helpers import fetch_crypto_keys + + +class TestCryptoWsgiContext(unittest.TestCase): + def setUp(self): + class FakeFilter(object): + app = None + crypto = Crypto({}) + + self.fake_logger = FakeLogger() + self.crypto_context = CryptoWSGIContext( + FakeFilter(), 'object', self.fake_logger) + + def test_get_keys(self): + # ok + env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + keys = self.crypto_context.get_keys(env) + self.assertDictEqual(fetch_crypto_keys(), keys) + + # only default required keys are checked + subset_keys = {'object': fetch_crypto_keys()['object']} + env = {CRYPTO_KEY_CALLBACK: lambda: subset_keys} + keys = self.crypto_context.get_keys(env) + self.assertDictEqual(subset_keys, keys) + + # only specified required keys are checked + subset_keys = {'container': fetch_crypto_keys()['container']} + env = {CRYPTO_KEY_CALLBACK: lambda: subset_keys} + keys = self.crypto_context.get_keys(env, required=['container']) + self.assertDictEqual(subset_keys, keys) + + subset_keys = {'object': fetch_crypto_keys()['object'], + 'container': fetch_crypto_keys()['container']} + env = {CRYPTO_KEY_CALLBACK: lambda: subset_keys} + keys = self.crypto_context.get_keys( + env, required=['object', 'container']) + self.assertDictEqual(subset_keys, keys) + + def test_get_keys_missing_callback(self): + with self.assertRaises(HTTPException) as cm: + self.crypto_context.get_keys({}) + self.assertIn('500 Internal Error', cm.exception.message) + self.assertIn('missing callback', + self.fake_logger.get_lines_for_level('error')[0]) + self.assertIn('Unable to retrieve encryption keys.', cm.exception.body) + + def test_get_keys_callback_exception(self): + def callback(): + raise Exception('boom') + with self.assertRaises(HTTPException) as cm: + self.crypto_context.get_keys({CRYPTO_KEY_CALLBACK: callback}) + self.assertIn('500 Internal Error', cm.exception.message) + self.assertIn('from callback: boom', + self.fake_logger.get_lines_for_level('error')[0]) + self.assertIn('Unable to retrieve encryption keys.', cm.exception.body) + + def test_get_keys_missing_key_for_default_required_list(self): + bad_keys = dict(fetch_crypto_keys()) + bad_keys.pop('object') + with self.assertRaises(HTTPException) as cm: + self.crypto_context.get_keys( + {CRYPTO_KEY_CALLBACK: lambda: bad_keys}) + self.assertIn('500 Internal Error', cm.exception.message) + self.assertIn("Missing key for 'object'", + self.fake_logger.get_lines_for_level('error')[0]) + self.assertIn('Unable to retrieve encryption keys.', cm.exception.body) + + def test_get_keys_missing_object_key_for_specified_required_list(self): + bad_keys = dict(fetch_crypto_keys()) + bad_keys.pop('object') + with self.assertRaises(HTTPException) as cm: + self.crypto_context.get_keys( + {CRYPTO_KEY_CALLBACK: lambda: bad_keys}, + required=['object', 'container']) + self.assertIn('500 Internal Error', cm.exception.message) + self.assertIn("Missing key for 'object'", + self.fake_logger.get_lines_for_level('error')[0]) + self.assertIn('Unable to retrieve encryption keys.', cm.exception.body) + + def test_get_keys_missing_container_key_for_specified_required_list(self): + bad_keys = dict(fetch_crypto_keys()) + bad_keys.pop('container') + with self.assertRaises(HTTPException) as cm: + self.crypto_context.get_keys( + {CRYPTO_KEY_CALLBACK: lambda: bad_keys}, + required=['object', 'container']) + self.assertIn('500 Internal Error', cm.exception.message) + self.assertIn("Missing key for 'container'", + self.fake_logger.get_lines_for_level('error')[0]) + self.assertIn('Unable to retrieve encryption keys.', cm.exception.body) + + def test_bad_object_key_for_default_required_list(self): + bad_keys = dict(fetch_crypto_keys()) + bad_keys['object'] = 'the minor key' + with self.assertRaises(HTTPException) as cm: + self.crypto_context.get_keys( + {CRYPTO_KEY_CALLBACK: lambda: bad_keys}) + self.assertIn('500 Internal Error', cm.exception.message) + self.assertIn("Bad key for 'object'", + self.fake_logger.get_lines_for_level('error')[0]) + self.assertIn('Unable to retrieve encryption keys.', cm.exception.body) + + def test_bad_container_key_for_default_required_list(self): + bad_keys = dict(fetch_crypto_keys()) + bad_keys['container'] = 'the major key' + with self.assertRaises(HTTPException) as cm: + self.crypto_context.get_keys( + {CRYPTO_KEY_CALLBACK: lambda: bad_keys}, + required=['object', 'container']) + self.assertIn('500 Internal Error', cm.exception.message) + self.assertIn("Bad key for 'container'", + self.fake_logger.get_lines_for_level('error')[0]) + self.assertIn('Unable to retrieve encryption keys.', cm.exception.body) + + def test_get_keys_not_a_dict(self): + with self.assertRaises(HTTPException) as cm: + self.crypto_context.get_keys( + {CRYPTO_KEY_CALLBACK: lambda: ['key', 'quay', 'qui']}) + self.assertIn('500 Internal Error', cm.exception.message) + self.assertIn("Did not get a keys dict", + self.fake_logger.get_lines_for_level('error')[0]) + self.assertIn('Unable to retrieve encryption keys.', cm.exception.body) + + +class TestModuleMethods(unittest.TestCase): + meta = {'iv': '0123456789abcdef', 'cipher': 'AES_CTR_256'} + serialized_meta = '%7B%22cipher%22%3A+%22AES_CTR_256%22%2C+%22' \ + 'iv%22%3A+%22MDEyMzQ1Njc4OWFiY2RlZg%3D%3D%22%7D' + + meta_with_key = {'iv': '0123456789abcdef', 'cipher': 'AES_CTR_256', + 'body_key': {'key': 'fedcba9876543210fedcba9876543210', + 'iv': 'fedcba9876543210'}} + serialized_meta_with_key = '%7B%22body_key%22%3A+%7B%22iv%22%3A+%22ZmVkY' \ + '2JhOTg3NjU0MzIxMA%3D%3D%22%2C+%22key%22%3A+%' \ + '22ZmVkY2JhOTg3NjU0MzIxMGZlZGNiYTk4NzY1NDMyMT' \ + 'A%3D%22%7D%2C+%22cipher%22%3A+%22AES_CTR_256' \ + '%22%2C+%22iv%22%3A+%22MDEyMzQ1Njc4OWFiY2RlZg' \ + '%3D%3D%22%7D' + + def test_dump_crypto_meta(self): + actual = crypto_utils.dump_crypto_meta(self.meta) + self.assertEqual(self.serialized_meta, actual) + + actual = crypto_utils.dump_crypto_meta(self.meta_with_key) + self.assertEqual(self.serialized_meta_with_key, actual) + + def test_load_crypto_meta(self): + actual = crypto_utils.load_crypto_meta(self.serialized_meta) + self.assertEqual(self.meta, actual) + + actual = crypto_utils.load_crypto_meta(self.serialized_meta_with_key) + self.assertEqual(self.meta_with_key, actual) + + def assert_raises(value, message): + with self.assertRaises(EncryptionException) as cm: + crypto_utils.load_crypto_meta(value) + self.assertIn('Bad crypto meta %r' % value, cm.exception.message) + self.assertIn(message, cm.exception.message) + + assert_raises(None, 'crypto meta not a string') + assert_raises(99, 'crypto meta not a string') + assert_raises('', 'No JSON object could be decoded') + assert_raises('abc', 'No JSON object could be decoded') + assert_raises('[]', 'crypto meta not a Mapping') + assert_raises('{"iv": "abcdef"}', 'Incorrect padding') + assert_raises('{"iv": []}', 'must be string or buffer') + assert_raises('{"iv": {}}', 'must be string or buffer') + assert_raises('{"iv": 99}', 'must be string or buffer') + assert_raises('{"key": "abcdef"}', 'Incorrect padding') + assert_raises('{"key": []}', 'must be string or buffer') + assert_raises('{"key": {}}', 'must be string or buffer') + assert_raises('{"key": 99}', 'must be string or buffer') + assert_raises('{"body_key": {"iv": "abcdef"}}', 'Incorrect padding') + assert_raises('{"body_key": {"iv": []}}', 'must be string or buffer') + assert_raises('{"body_key": {"iv": {}}}', 'must be string or buffer') + assert_raises('{"body_key": {"iv": 99}}', 'must be string or buffer') + assert_raises('{"body_key": {"key": "abcdef"}}', 'Incorrect padding') + assert_raises('{"body_key": {"key": []}}', 'must be string or buffer') + assert_raises('{"body_key": {"key": {}}}', 'must be string or buffer') + assert_raises('{"body_key": {"key": 99}}', 'must be string or buffer') + + def test_dump_then_load_crypto_meta(self): + actual = crypto_utils.load_crypto_meta( + crypto_utils.dump_crypto_meta(self.meta)) + self.assertEqual(self.meta, actual) + + actual = crypto_utils.load_crypto_meta( + crypto_utils.dump_crypto_meta(self.meta_with_key)) + self.assertEqual(self.meta_with_key, actual) + + def test_append_crypto_meta(self): + actual = crypto_utils.append_crypto_meta('abc', self.meta) + expected = 'abc; swift_meta=%s' % self.serialized_meta + self.assertEqual(actual, expected) + + actual = crypto_utils.append_crypto_meta('abc', self.meta_with_key) + expected = 'abc; swift_meta=%s' % self.serialized_meta_with_key + self.assertEqual(actual, expected) + + def test_extract_crypto_meta(self): + val, meta = crypto_utils.extract_crypto_meta( + 'abc; swift_meta=%s' % self.serialized_meta) + self.assertEqual('abc', val) + self.assertDictEqual(self.meta, meta) + + val, meta = crypto_utils.extract_crypto_meta( + 'abc; swift_meta=%s' % self.serialized_meta_with_key) + self.assertEqual('abc', val) + self.assertDictEqual(self.meta_with_key, meta) + + val, meta = crypto_utils.extract_crypto_meta('abc') + self.assertEqual('abc', val) + self.assertIsNone(meta) + + # other param names will be ignored + val, meta = crypto_utils.extract_crypto_meta('abc; foo=bar') + self.assertEqual('abc', val) + self.assertIsNone(meta) + + def test_append_then_extract_crypto_meta(self): + val = 'abc' + actual = crypto_utils.extract_crypto_meta( + crypto_utils.append_crypto_meta(val, self.meta)) + self.assertEqual((val, self.meta), actual) + + +class TestCrypto(unittest.TestCase): + + def setUp(self): + self.crypto = Crypto({}) + + def test_create_encryption_context(self): + value = 'encrypt me' * 100 # more than one cipher block + key = os.urandom(32) + iv = os.urandom(16) + ctxt = self.crypto.create_encryption_ctxt(key, iv) + expected = Cipher( + algorithms.AES(key), modes.CTR(iv), + backend=default_backend()).encryptor().update(value) + self.assertEqual(expected, ctxt.update(value)) + + for bad_iv in ('a little too long', 'too short'): + self.assertRaises( + ValueError, self.crypto.create_encryption_ctxt, key, bad_iv) + + for bad_key in ('objKey', 'a' * 31, 'a' * 33, 'a' * 16, 'a' * 24): + self.assertRaises( + ValueError, self.crypto.create_encryption_ctxt, bad_key, iv) + + def test_create_decryption_context(self): + value = 'decrypt me' * 100 # more than one cipher block + key = os.urandom(32) + iv = os.urandom(16) + ctxt = self.crypto.create_decryption_ctxt(key, iv, 0) + expected = Cipher( + algorithms.AES(key), modes.CTR(iv), + backend=default_backend()).decryptor().update(value) + self.assertEqual(expected, ctxt.update(value)) + + for bad_iv in ('a little too long', 'too short'): + self.assertRaises( + ValueError, self.crypto.create_decryption_ctxt, key, bad_iv, 0) + + for bad_key in ('objKey', 'a' * 31, 'a' * 33, 'a' * 16, 'a' * 24): + self.assertRaises( + ValueError, self.crypto.create_decryption_ctxt, bad_key, iv, 0) + + with self.assertRaises(ValueError) as cm: + self.crypto.create_decryption_ctxt(key, iv, -1) + self.assertEqual("Offset must not be negative", cm.exception.message) + + def test_enc_dec_small_chunks(self): + self.enc_dec_chunks(['encrypt me', 'because I', 'am sensitive']) + + def test_enc_dec_large_chunks(self): + self.enc_dec_chunks([os.urandom(65536), os.urandom(65536)]) + + def enc_dec_chunks(self, chunks): + key = 'objL7wjV6L79Sfs4y7dy41273l0k6Wki' + iv = self.crypto.create_iv() + enc_ctxt = self.crypto.create_encryption_ctxt(key, iv) + enc_val = [enc_ctxt.update(chunk) for chunk in chunks] + self.assertTrue(''.join(enc_val) != chunks) + dec_ctxt = self.crypto.create_decryption_ctxt(key, iv, 0) + dec_val = [dec_ctxt.update(chunk) for chunk in enc_val] + self.assertEqual(''.join(chunks), ''.join(dec_val), + 'Expected value {%s} but got {%s}' % + (''.join(chunks), ''.join(dec_val))) + + def test_decrypt_range(self): + chunks = ['0123456789abcdef', 'ghijklmnopqrstuv'] + key = 'objL7wjV6L79Sfs4y7dy41273l0k6Wki' + iv = self.crypto.create_iv() + enc_ctxt = self.crypto.create_encryption_ctxt(key, iv) + enc_val = [enc_ctxt.update(chunk) for chunk in chunks] + self.assertTrue(''.join(enc_val) != chunks) + + # Simulate a ranged GET from byte 19 to 32 : 'jklmnopqrstuv' + dec_ctxt = self.crypto.create_decryption_ctxt(key, iv, 19) + ranged_chunks = [enc_val[1][3:]] + dec_val = [dec_ctxt.update(chunk) for chunk in ranged_chunks] + self.assertEqual('jklmnopqrstuv', ''.join(dec_val), + 'Expected value {%s} but got {%s}' % + ('jklmnopqrstuv', ''.join(dec_val))) + + def test_create_decryption_context_non_zero_offset(self): + # Verify that iv increments for each 16 bytes of offset. + # For a ranged GET we pass a non-zero offset so that the decrypter + # counter is incremented to the correct value to start decrypting at + # that offset into the object body. The counter should increment by one + # from the starting IV value for every 16 bytes offset into the object + # body, until it reaches 2^128 -1 when it should wrap to zero. We check + # that is happening by verifying a decrypted value using various + # offsets. + key = 'objL7wjV6L79Sfs4y7dy41273l0k6Wki' + + def do_test(): + for offset, exp_iv in mappings.items(): + dec_ctxt = self.crypto.create_decryption_ctxt(key, iv, offset) + offset_in_block = offset % 16 + cipher = Cipher(algorithms.AES(key), + modes.CTR(exp_iv), + backend=default_backend()) + expected = cipher.decryptor().update( + 'p' * offset_in_block + 'ciphertext') + actual = dec_ctxt.update('ciphertext') + expected = expected[offset % 16:] + self.assertEqual(expected, actual, + 'Expected %r but got %r, iv=%s and offset=%s' + % (expected, actual, iv, offset)) + + iv = '0000000010000000' + mappings = { + 2: '0000000010000000', + 16: '0000000010000001', + 19: '0000000010000001', + 48: '0000000010000003', + 1024: '000000001000000p', + 5119: '000000001000001o' + } + do_test() + + # choose max iv value and test that it wraps to zero + iv = chr(0xff) * 16 + mappings = { + 2: iv, + 16: str(bytearray.fromhex('00' * 16)), # iv wraps to 0 + 19: str(bytearray.fromhex('00' * 16)), + 48: str(bytearray.fromhex('00' * 15 + '02')), + 1024: str(bytearray.fromhex('00' * 15 + '3f')), + 5119: str(bytearray.fromhex('00' * 14 + '013E')) + } + do_test() + + iv = chr(0x0) * 16 + mappings = { + 2: iv, + 16: str(bytearray.fromhex('00' * 15 + '01')), + 19: str(bytearray.fromhex('00' * 15 + '01')), + 48: str(bytearray.fromhex('00' * 15 + '03')), + 1024: str(bytearray.fromhex('00' * 15 + '40')), + 5119: str(bytearray.fromhex('00' * 14 + '013F')) + } + do_test() + + iv = chr(0x0) * 8 + chr(0xff) * 8 + mappings = { + 2: iv, + 16: str(bytearray.fromhex('00' * 7 + '01' + '00' * 8)), + 19: str(bytearray.fromhex('00' * 7 + '01' + '00' * 8)), + 48: str(bytearray.fromhex('00' * 7 + '01' + '00' * 7 + '02')), + 1024: str(bytearray.fromhex('00' * 7 + '01' + '00' * 7 + '3F')), + 5119: str(bytearray.fromhex('00' * 7 + '01' + '00' * 6 + '013E')) + } + do_test() + + def test_check_key(self): + for key in ('objKey', 'a' * 31, 'a' * 33, 'a' * 16, 'a' * 24): + with self.assertRaises(ValueError) as cm: + self.crypto.check_key(key) + self.assertEqual("Key must be length 32 bytes", + cm.exception.message) + + def test_check_crypto_meta(self): + meta = {'cipher': 'AES_CTR_256'} + with self.assertRaises(EncryptionException) as cm: + self.crypto.check_crypto_meta(meta) + self.assertEqual("Bad crypto meta: Missing 'iv'", + cm.exception.message) + + for bad_iv in ('a little too long', 'too short'): + meta['iv'] = bad_iv + with self.assertRaises(EncryptionException) as cm: + self.crypto.check_crypto_meta(meta) + self.assertEqual("Bad crypto meta: IV must be length 16 bytes", + cm.exception.message) + + meta = {'iv': os.urandom(16)} + with self.assertRaises(EncryptionException) as cm: + self.crypto.check_crypto_meta(meta) + self.assertEqual("Bad crypto meta: Missing 'cipher'", + cm.exception.message) + + meta['cipher'] = 'Mystery cipher' + with self.assertRaises(EncryptionException) as cm: + self.crypto.check_crypto_meta(meta) + self.assertEqual("Bad crypto meta: Cipher must be AES_CTR_256", + cm.exception.message) + + def test_create_iv(self): + self.assertEqual(16, len(self.crypto.create_iv())) + # crude check that we get back different values on each call + self.assertNotEqual(self.crypto.create_iv(), self.crypto.create_iv()) + + def test_get_crypto_meta(self): + meta = self.crypto.create_crypto_meta() + self.assertIsInstance(meta, dict) + # this is deliberately brittle so that if new items are added then the + # test will need to be updated + self.assertEqual(2, len(meta)) + self.assertIn('iv', meta) + self.assertEqual(16, len(meta['iv'])) + self.assertIn('cipher', meta) + self.assertEqual('AES_CTR_256', meta['cipher']) + self.crypto.check_crypto_meta(meta) # sanity check + meta2 = self.crypto.create_crypto_meta() + self.assertNotEqual(meta['iv'], meta2['iv']) # crude sanity check + + def test_create_random_key(self): + # crude check that we get unique keys on each call + keys = set() + for i in range(10): + key = self.crypto.create_random_key() + self.assertEqual(32, len(key)) + keys.add(key) + self.assertEqual(10, len(keys)) + + def test_wrap_unwrap_key(self): + wrapping_key = os.urandom(32) + key_to_wrap = os.urandom(32) + iv = os.urandom(16) + with mock.patch( + 'swift.common.middleware.crypto.crypto_utils.Crypto.create_iv', + return_value=iv): + wrapped = self.crypto.wrap_key(wrapping_key, key_to_wrap) + cipher = Cipher(algorithms.AES(wrapping_key), modes.CTR(iv), + backend=default_backend()) + expected = {'key': cipher.encryptor().update(key_to_wrap), + 'iv': iv} + self.assertEqual(expected, wrapped) + + unwrapped = self.crypto.unwrap_key(wrapping_key, wrapped) + self.assertEqual(key_to_wrap, unwrapped) + + def test_unwrap_bad_key(self): + # verify that ValueError is raised if unwrapped key is invalid + wrapping_key = os.urandom(32) + for length in (0, 16, 24, 31, 33): + key_to_wrap = os.urandom(length) + wrapped = self.crypto.wrap_key(wrapping_key, key_to_wrap) + with self.assertRaises(ValueError) as cm: + self.crypto.unwrap_key(wrapping_key, wrapped) + self.assertEqual( + cm.exception.message, 'Key must be length 32 bytes') + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/common/middleware/crypto/test_decrypter.py b/test/unit/common/middleware/crypto/test_decrypter.py new file mode 100644 index 0000000000..b70d65029b --- /dev/null +++ b/test/unit/common/middleware/crypto/test_decrypter.py @@ -0,0 +1,1119 @@ +# Copyright (c) 2015-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +import json +import os +import unittest +from xml.dom import minidom + +import mock + +from swift.common.header_key_dict import HeaderKeyDict +from swift.common.middleware.crypto import decrypter +from swift.common.middleware.crypto.crypto_utils import CRYPTO_KEY_CALLBACK, \ + dump_crypto_meta, Crypto +from swift.common.swob import Request, HTTPException, HTTPOk, \ + HTTPPreconditionFailed, HTTPNotFound, HTTPPartialContent + +from test.unit import FakeLogger +from test.unit.common.middleware.crypto.crypto_helpers import md5hex, \ + fetch_crypto_keys, FAKE_IV, encrypt, fake_get_crypto_meta +from test.unit.common.middleware.helpers import FakeSwift, FakeAppThatExcepts + + +def get_crypto_meta_header(crypto_meta=None): + if crypto_meta is None: + crypto_meta = fake_get_crypto_meta() + return dump_crypto_meta(crypto_meta) + + +def encrypt_and_append_meta(value, key, crypto_meta=None): + return '%s; swift_meta=%s' % ( + base64.b64encode(encrypt(value, key, FAKE_IV)), + get_crypto_meta_header(crypto_meta)) + + +class TestDecrypterObjectRequests(unittest.TestCase): + def setUp(self): + self.app = FakeSwift() + self.decrypter = decrypter.Decrypter(self.app, {}) + self.decrypter.logger = FakeLogger() + + def _make_response_headers(self, content_length, plaintext_etag, keys, + body_key): + # helper method to make a typical set of response headers for a GET or + # HEAD request + cont_key = keys['container'] + object_key = keys['object'] + body_key_meta = {'key': encrypt(body_key, object_key, FAKE_IV), + 'iv': FAKE_IV} + body_crypto_meta = fake_get_crypto_meta(body_key=body_key_meta) + return HeaderKeyDict({ + 'Etag': 'hashOfCiphertext', + 'content-type': 'text/plain', + 'content-length': content_length, + 'X-Object-Sysmeta-Crypto-Etag': '%s; swift_meta=%s' % ( + base64.b64encode(encrypt(plaintext_etag, object_key, FAKE_IV)), + get_crypto_meta_header()), + 'X-Object-Sysmeta-Crypto-Body-Meta': + get_crypto_meta_header(body_crypto_meta), + 'x-object-transient-sysmeta-crypto-meta-test': + base64.b64encode(encrypt('encrypt me', object_key, FAKE_IV)) + + ';swift_meta=' + get_crypto_meta_header(), + 'x-object-sysmeta-container-update-override-etag': + encrypt_and_append_meta('encrypt me, too', cont_key), + 'x-object-sysmeta-test': 'do not encrypt me', + }) + + def _test_request_success(self, method, body): + env = {'REQUEST_METHOD': method, + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + plaintext_etag = md5hex(body) + body_key = os.urandom(32) + enc_body = encrypt(body, body_key, FAKE_IV) + hdrs = self._make_response_headers( + len(enc_body), plaintext_etag, fetch_crypto_keys(), body_key) + + # there shouldn't be any x-object-meta- headers, but if there are + # then the decrypted header will win where there is a name clash... + hdrs.update({ + 'x-object-meta-test': 'unexpected, overwritten by decrypted value', + 'x-object-meta-distinct': 'unexpected but distinct from encrypted' + }) + self.app.register( + method, '/v1/a/c/o', HTTPOk, body=enc_body, headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('200 OK', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + self.assertEqual('text/plain', resp.headers['Content-Type']) + self.assertEqual('encrypt me', resp.headers['x-object-meta-test']) + self.assertEqual('unexpected but distinct from encrypted', + resp.headers['x-object-meta-distinct']) + self.assertEqual('do not encrypt me', + resp.headers['x-object-sysmeta-test']) + self.assertEqual( + 'encrypt me, too', + resp.headers['X-Object-Sysmeta-Container-Update-Override-Etag']) + self.assertNotIn('X-Object-Sysmeta-Crypto-Body-Meta', resp.headers) + self.assertNotIn('X-Object-Sysmeta-Crypto-Etag', resp.headers) + return resp + + def test_GET_success(self): + body = 'FAKE APP' + resp = self._test_request_success('GET', body) + self.assertEqual(body, resp.body) + + def test_HEAD_success(self): + body = 'FAKE APP' + resp = self._test_request_success('HEAD', body) + self.assertEqual('', resp.body) + + def test_headers_case(self): + body = 'fAkE ApP' + req = Request.blank('/v1/a/c/o', body='FaKe') + req.environ[CRYPTO_KEY_CALLBACK] = fetch_crypto_keys + plaintext_etag = md5hex(body) + body_key = os.urandom(32) + enc_body = encrypt(body, body_key, FAKE_IV) + hdrs = self._make_response_headers( + len(enc_body), plaintext_etag, fetch_crypto_keys(), body_key) + + hdrs.update({ + 'x-Object-mEta-ignoRes-caSe': 'thIs pArt WilL bE cOol', + }) + self.app.register( + 'GET', '/v1/a/c/o', HTTPOk, body=enc_body, headers=hdrs) + + status, headers, app_iter = req.call_application(self.decrypter) + self.assertEqual(status, '200 OK') + expected = { + 'Etag': '7f7837924188f7b511a9e3881a9f77a8', + 'X-Object-Sysmeta-Container-Update-Override-Etag': + 'encrypt me, too', + 'X-Object-Meta-Test': 'encrypt me', + 'Content-Length': '8', + 'X-Object-Meta-Ignores-Case': 'thIs pArt WilL bE cOol', + 'X-Object-Sysmeta-Test': 'do not encrypt me', + 'Content-Type': 'text/plain', + } + self.assertEqual(dict(headers), expected) + self.assertEqual('fAkE ApP', ''.join(app_iter)) + + def _test_412_response(self, method): + # simulate a 412 response to a conditional GET which has an Etag header + data = 'the object content' + env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env, method=method) + resp_body = 'I am sorry, you have failed to meet a precondition' + hdrs = self._make_response_headers( + len(resp_body), md5hex(data), fetch_crypto_keys(), 'not used') + self.app.register(method, '/v1/a/c/o', HTTPPreconditionFailed, + body=resp_body, headers=hdrs) + resp = req.get_response(self.decrypter) + + self.assertEqual('412 Precondition Failed', resp.status) + # the response body should not be decrypted, it is already plaintext + self.assertEqual(resp_body if method == 'GET' else '', resp.body) + # whereas the Etag and other headers should be decrypted + self.assertEqual(md5hex(data), resp.headers['Etag']) + self.assertEqual('text/plain', resp.headers['Content-Type']) + self.assertEqual('encrypt me', resp.headers['x-object-meta-test']) + self.assertEqual('do not encrypt me', + resp.headers['x-object-sysmeta-test']) + + def test_GET_412_response(self): + self._test_412_response('GET') + + def test_HEAD_412_response(self): + self._test_412_response('HEAD') + + def _test_404_response(self, method): + # simulate a 404 response, sanity check response headers + env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env, method=method) + resp_body = 'You still have not found what you are looking for' + hdrs = {'content-type': 'text/plain', + 'content-length': len(resp_body)} + self.app.register(method, '/v1/a/c/o', HTTPNotFound, + body=resp_body, headers=hdrs) + resp = req.get_response(self.decrypter) + + self.assertEqual('404 Not Found', resp.status) + # the response body should not be decrypted, it is already plaintext + self.assertEqual(resp_body if method == 'GET' else '', resp.body) + # there should be no etag header inserted by decrypter + self.assertNotIn('Etag', resp.headers) + self.assertEqual('text/plain', resp.headers['Content-Type']) + + def test_GET_404_response(self): + self._test_404_response('GET') + + def test_HEAD_404_response(self): + self._test_404_response('HEAD') + + def test_GET_missing_etag_crypto_meta(self): + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + key = fetch_crypto_keys()['object'] + enc_body = encrypt(body, key, FAKE_IV) + hdrs = self._make_response_headers( + len(body), md5hex(body), fetch_crypto_keys(), 'not used') + # simulate missing crypto meta from encrypted etag + hdrs['X-Object-Sysmeta-Crypto-Etag'] = \ + base64.b64encode(encrypt(md5hex(body), key, FAKE_IV)) + self.app.register('GET', '/v1/a/c/o', HTTPOk, body=enc_body, + headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('500 Internal Error', resp.status) + self.assertIn('Error decrypting header', resp.body) + self.assertIn('Error decrypting header X-Object-Sysmeta-Crypto-Etag', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def _test_override_etag_bad_meta(self, method, bad_crypto_meta): + env = {'REQUEST_METHOD': method, + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + key = fetch_crypto_keys()['object'] + enc_body = encrypt(body, key, FAKE_IV) + hdrs = self._make_response_headers( + len(body), md5hex(body), fetch_crypto_keys(), 'not used') + # simulate missing crypto meta from encrypted override etag + hdrs['X-Object-Sysmeta-Container-Update-Override-Etag'] = \ + encrypt_and_append_meta( + md5hex(body), key, crypto_meta=bad_crypto_meta) + self.app.register(method, '/v1/a/c/o', HTTPOk, body=enc_body, + headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('500 Internal Error', resp.status) + self.assertIn('Error decrypting header ' + 'X-Object-Sysmeta-Container-Update-Override-Etag', + self.decrypter.logger.get_lines_for_level('error')[0]) + return resp + + def test_GET_override_etag_bad_iv(self): + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta['iv'] = 'bad_iv' + resp = self._test_override_etag_bad_meta('GET', bad_crypto_meta) + self.assertIn('Error decrypting header', resp.body) + + def test_HEAD_override_etag_bad_iv(self): + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta['iv'] = 'bad_iv' + resp = self._test_override_etag_bad_meta('HEAD', bad_crypto_meta) + self.assertEqual('', resp.body) + + def test_GET_override_etag_bad_cipher(self): + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta['cipher'] = 'unknown cipher' + resp = self._test_override_etag_bad_meta('GET', bad_crypto_meta) + self.assertIn('Error decrypting header', resp.body) + + def test_HEAD_override_etag_bad_cipher(self): + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta['cipher'] = 'unknown cipher' + resp = self._test_override_etag_bad_meta('HEAD', bad_crypto_meta) + self.assertEqual('', resp.body) + + def _test_bad_key(self, method): + # use bad key + def bad_fetch_crypto_keys(): + keys = fetch_crypto_keys() + keys['object'] = 'bad key' + return keys + + env = {'REQUEST_METHOD': method, + CRYPTO_KEY_CALLBACK: bad_fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + key = fetch_crypto_keys()['object'] + enc_body = encrypt(body, key, FAKE_IV) + hdrs = self._make_response_headers( + len(body), md5hex(body), fetch_crypto_keys(), 'not used') + self.app.register(method, '/v1/a/c/o', HTTPOk, body=enc_body, + headers=hdrs) + return req.get_response(self.decrypter) + + def test_HEAD_with_bad_key(self): + resp = self._test_bad_key('HEAD') + self.assertEqual('500 Internal Error', resp.status) + self.assertIn("Bad key for 'object'", + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_with_bad_key(self): + resp = self._test_bad_key('GET') + self.assertEqual('500 Internal Error', resp.status) + self.assertEqual('Unable to retrieve encryption keys.', + resp.body) + self.assertIn("Bad key for 'object'", + self.decrypter.logger.get_lines_for_level('error')[0]) + + def _test_bad_crypto_meta_for_user_metadata(self, method, bad_crypto_meta): + # use bad iv for metadata headers + env = {'REQUEST_METHOD': method, + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + key = fetch_crypto_keys()['object'] + enc_body = encrypt(body, key, FAKE_IV) + hdrs = self._make_response_headers( + len(body), md5hex(body), fetch_crypto_keys(), 'not used') + enc_val = base64.b64encode(encrypt('encrypt me', key, FAKE_IV)) + if bad_crypto_meta: + enc_val += ';swift_meta=' + get_crypto_meta_header( + crypto_meta=bad_crypto_meta) + hdrs['x-object-transient-sysmeta-crypto-meta-test'] = enc_val + self.app.register(method, '/v1/a/c/o', HTTPOk, body=enc_body, + headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('500 Internal Error', resp.status) + self.assertIn( + 'Error decrypting header X-Object-Transient-Sysmeta-Crypto-Meta-' + 'Test', self.decrypter.logger.get_lines_for_level('error')[0]) + return resp + + def test_HEAD_with_missing_crypto_meta_for_user_metadata(self): + self._test_bad_crypto_meta_for_user_metadata('HEAD', None) + self.assertIn('Missing crypto meta in value', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_with_missing_crypto_meta_for_user_metadata(self): + self._test_bad_crypto_meta_for_user_metadata('GET', None) + self.assertIn('Missing crypto meta in value', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_HEAD_with_bad_iv_for_user_metadata(self): + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta['iv'] = 'bad_iv' + self._test_bad_crypto_meta_for_user_metadata('HEAD', bad_crypto_meta) + self.assertIn('IV must be length 16', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_HEAD_with_missing_iv_for_user_metadata(self): + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta.pop('iv') + self._test_bad_crypto_meta_for_user_metadata('HEAD', bad_crypto_meta) + self.assertIn( + 'iv', self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_with_bad_iv_for_user_metadata(self): + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta['iv'] = 'bad_iv' + resp = self._test_bad_crypto_meta_for_user_metadata( + 'GET', bad_crypto_meta) + self.assertEqual('Error decrypting header', resp.body) + self.assertIn('IV must be length 16', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_with_missing_iv_for_user_metadata(self): + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta.pop('iv') + resp = self._test_bad_crypto_meta_for_user_metadata( + 'GET', bad_crypto_meta) + self.assertEqual('Error decrypting header', resp.body) + self.assertIn( + 'iv', self.decrypter.logger.get_lines_for_level('error')[0]) + + def _test_GET_with_bad_crypto_meta_for_object_body(self, bad_crypto_meta): + # use bad iv for object body + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + key = fetch_crypto_keys()['object'] + enc_body = encrypt(body, key, FAKE_IV) + hdrs = self._make_response_headers( + len(body), md5hex(body), fetch_crypto_keys(), 'not used') + hdrs['X-Object-Sysmeta-Crypto-Body-Meta'] = \ + get_crypto_meta_header(crypto_meta=bad_crypto_meta) + self.app.register('GET', '/v1/a/c/o', HTTPOk, body=enc_body, + headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('500 Internal Error', resp.status) + self.assertEqual('Error decrypting object', resp.body) + self.assertIn('Error decrypting object', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_with_bad_iv_for_object_body(self): + bad_crypto_meta = fake_get_crypto_meta(key=os.urandom(32)) + bad_crypto_meta['iv'] = 'bad_iv' + self._test_GET_with_bad_crypto_meta_for_object_body(bad_crypto_meta) + self.assertIn('IV must be length 16', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_with_missing_iv_for_object_body(self): + bad_crypto_meta = fake_get_crypto_meta(key=os.urandom(32)) + bad_crypto_meta.pop('iv') + self._test_GET_with_bad_crypto_meta_for_object_body(bad_crypto_meta) + self.assertIn("Missing 'iv'", + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_with_bad_body_key_for_object_body(self): + body_key_meta = {'key': 'wrapped too short key', 'iv': FAKE_IV} + bad_crypto_meta = fake_get_crypto_meta(body_key=body_key_meta) + self._test_GET_with_bad_crypto_meta_for_object_body(bad_crypto_meta) + self.assertIn('Key must be length 32', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_with_missing_body_key_for_object_body(self): + bad_crypto_meta = fake_get_crypto_meta() # no key by default + self._test_GET_with_bad_crypto_meta_for_object_body(bad_crypto_meta) + self.assertIn("Missing 'body_key'", + self.decrypter.logger.get_lines_for_level('error')[0]) + + def _test_req_metadata_not_encrypted(self, method): + # check that metadata is not decrypted if it does not have crypto meta; + # testing for case of an unencrypted POST to an object. + env = {'REQUEST_METHOD': method, + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + plaintext_etag = md5hex(body) + body_key = os.urandom(32) + enc_body = encrypt(body, body_key, FAKE_IV) + hdrs = self._make_response_headers( + len(body), plaintext_etag, fetch_crypto_keys(), body_key) + hdrs.pop('x-object-transient-sysmeta-crypto-meta-test') + hdrs['x-object-meta-test'] = 'plaintext not encrypted' + self.app.register( + method, '/v1/a/c/o', HTTPOk, body=enc_body, headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('200 OK', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + self.assertEqual('text/plain', resp.headers['Content-Type']) + self.assertEqual('plaintext not encrypted', + resp.headers['x-object-meta-test']) + + def test_HEAD_metadata_not_encrypted(self): + self._test_req_metadata_not_encrypted('HEAD') + + def test_GET_metadata_not_encrypted(self): + self._test_req_metadata_not_encrypted('GET') + + def test_GET_unencrypted_data(self): + # testing case of an unencrypted object with encrypted metadata from + # a later POST + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + obj_key = fetch_crypto_keys()['object'] + hdrs = {'Etag': md5hex(body), + 'content-type': 'text/plain', + 'content-length': len(body), + 'x-object-transient-sysmeta-crypto-meta-test': + base64.b64encode(encrypt('encrypt me', obj_key, FAKE_IV)) + + ';swift_meta=' + get_crypto_meta_header(), + 'x-object-sysmeta-test': 'do not encrypt me'} + self.app.register('GET', '/v1/a/c/o', HTTPOk, body=body, headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual(body, resp.body) + self.assertEqual('200 OK', resp.status) + self.assertEqual(md5hex(body), resp.headers['Etag']) + self.assertEqual('text/plain', resp.headers['Content-Type']) + # POSTed user meta was encrypted + self.assertEqual('encrypt me', resp.headers['x-object-meta-test']) + # PUT sysmeta was not encrypted + self.assertEqual('do not encrypt me', + resp.headers['x-object-sysmeta-test']) + + def test_GET_multiseg(self): + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + chunks = ['some', 'chunks', 'of data'] + body = ''.join(chunks) + plaintext_etag = md5hex(body) + body_key = os.urandom(32) + ctxt = Crypto().create_encryption_ctxt(body_key, FAKE_IV) + enc_body = [encrypt(chunk, ctxt=ctxt) for chunk in chunks] + hdrs = self._make_response_headers( + sum(map(len, enc_body)), plaintext_etag, fetch_crypto_keys(), + body_key) + self.app.register( + 'GET', '/v1/a/c/o', HTTPOk, body=enc_body, headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual(body, resp.body) + self.assertEqual('200 OK', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + self.assertEqual('text/plain', resp.headers['Content-Type']) + + def test_GET_multiseg_with_range(self): + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + req.headers['Content-Range'] = 'bytes 3-10/17' + chunks = ['0123', '45678', '9abcdef'] + body = ''.join(chunks) + plaintext_etag = md5hex(body) + body_key = os.urandom(32) + ctxt = Crypto().create_encryption_ctxt(body_key, FAKE_IV) + enc_body = [encrypt(chunk, ctxt=ctxt) for chunk in chunks] + enc_body = [enc_body[0][3:], enc_body[1], enc_body[2][:2]] + hdrs = self._make_response_headers( + sum(map(len, enc_body)), plaintext_etag, fetch_crypto_keys(), + body_key) + hdrs['content-range'] = req.headers['Content-Range'] + self.app.register( + 'GET', '/v1/a/c/o', HTTPOk, body=enc_body, headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('3456789a', resp.body) + self.assertEqual('200 OK', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + self.assertEqual('text/plain', resp.headers['Content-Type']) + + # Force the decrypter context updates to be less than one of our range + # sizes to check that the decrypt context offset is setup correctly with + # offset to first byte of range for first update and then re-used. + # Do mocking here to have the mocked value have effect in the generator + # function. + @mock.patch.object(decrypter, 'DECRYPT_CHUNK_SIZE', 4) + def test_GET_multipart_ciphertext(self): + # build fake multipart response body + body_key = os.urandom(32) + plaintext = 'Cwm fjord veg balks nth pyx quiz' + plaintext_etag = md5hex(plaintext) + ciphertext = encrypt(plaintext, body_key, FAKE_IV) + parts = ((0, 3, 'text/plain'), + (4, 9, 'text/plain; charset=us-ascii'), + (24, 32, 'text/plain')) + length = len(ciphertext) + body = '' + for start, end, ctype in parts: + body += '--multipartboundary\r\n' + body += 'Content-Type: %s\r\n' % ctype + body += 'Content-Range: bytes %s-%s/%s' % (start, end - 1, length) + body += '\r\n\r\n' + ciphertext[start:end] + '\r\n' + body += '--multipartboundary--' + + # register request with fake swift + hdrs = self._make_response_headers( + len(body), plaintext_etag, fetch_crypto_keys(), body_key) + hdrs['content-type'] = \ + 'multipart/byteranges;boundary=multipartboundary' + self.app.register('GET', '/v1/a/c/o', HTTPPartialContent, body=body, + headers=hdrs) + + # issue request + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + resp = req.get_response(self.decrypter) + + self.assertEqual('206 Partial Content', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + self.assertEqual(len(body), int(resp.headers['Content-Length'])) + self.assertEqual('multipart/byteranges;boundary=multipartboundary', + resp.headers['Content-Type']) + + # the multipart headers could be re-ordered, so parse response body to + # verify expected content + resp_lines = resp.body.split('\r\n') + resp_lines.reverse() + for start, end, ctype in parts: + self.assertEqual('--multipartboundary', resp_lines.pop()) + expected_header_lines = { + 'Content-Type: %s' % ctype, + 'Content-Range: bytes %s-%s/%s' % (start, end - 1, length)} + resp_header_lines = {resp_lines.pop(), resp_lines.pop()} + self.assertEqual(expected_header_lines, resp_header_lines) + self.assertEqual('', resp_lines.pop()) + self.assertEqual(plaintext[start:end], resp_lines.pop()) + self.assertEqual('--multipartboundary--', resp_lines.pop()) + + # we should have consumed the whole response body + self.assertFalse(resp_lines) + + def test_GET_multipart_content_type(self): + # *just* having multipart content type shouldn't trigger the mime doc + # code path + body_key = os.urandom(32) + plaintext = 'Cwm fjord veg balks nth pyx quiz' + plaintext_etag = md5hex(plaintext) + ciphertext = encrypt(plaintext, body_key, FAKE_IV) + + # register request with fake swift + hdrs = self._make_response_headers( + len(ciphertext), plaintext_etag, fetch_crypto_keys(), body_key) + hdrs['content-type'] = \ + 'multipart/byteranges;boundary=multipartboundary' + self.app.register('GET', '/v1/a/c/o', HTTPOk, body=ciphertext, + headers=hdrs) + + # issue request + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + resp = req.get_response(self.decrypter) + + self.assertEqual('200 OK', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + self.assertEqual(len(plaintext), int(resp.headers['Content-Length'])) + self.assertEqual('multipart/byteranges;boundary=multipartboundary', + resp.headers['Content-Type']) + self.assertEqual(plaintext, resp.body) + + def test_GET_multipart_no_body_crypto_meta(self): + # build fake multipart response body + plaintext = 'Cwm fjord veg balks nth pyx quiz' + plaintext_etag = md5hex(plaintext) + parts = ((0, 3, 'text/plain'), + (4, 9, 'text/plain; charset=us-ascii'), + (24, 32, 'text/plain')) + length = len(plaintext) + body = '' + for start, end, ctype in parts: + body += '--multipartboundary\r\n' + body += 'Content-Type: %s\r\n' % ctype + body += 'Content-Range: bytes %s-%s/%s' % (start, end - 1, length) + body += '\r\n\r\n' + plaintext[start:end] + '\r\n' + body += '--multipartboundary--' + + # register request with fake swift + hdrs = { + 'Etag': plaintext_etag, + 'content-type': 'multipart/byteranges;boundary=multipartboundary', + 'content-length': len(body)} + self.app.register('GET', '/v1/a/c/o', HTTPPartialContent, body=body, + headers=hdrs) + + # issue request + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + resp = req.get_response(self.decrypter) + + self.assertEqual('206 Partial Content', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + self.assertEqual(len(body), int(resp.headers['Content-Length'])) + self.assertEqual('multipart/byteranges;boundary=multipartboundary', + resp.headers['Content-Type']) + + # the multipart response body should be unchanged + self.assertEqual(body, resp.body) + + def _test_GET_multipart_bad_body_crypto_meta(self, bad_crypto_meta): + # build fake multipart response body + key = fetch_crypto_keys()['object'] + ctxt = Crypto().create_encryption_ctxt(key, FAKE_IV) + plaintext = 'Cwm fjord veg balks nth pyx quiz' + plaintext_etag = md5hex(plaintext) + ciphertext = encrypt(plaintext, ctxt=ctxt) + parts = ((0, 3, 'text/plain'), + (4, 9, 'text/plain; charset=us-ascii'), + (24, 32, 'text/plain')) + length = len(ciphertext) + body = '' + for start, end, ctype in parts: + body += '--multipartboundary\r\n' + body += 'Content-Type: %s\r\n' % ctype + body += 'Content-Range: bytes %s-%s/%s' % (start, end - 1, length) + body += '\r\n\r\n' + ciphertext[start:end] + '\r\n' + body += '--multipartboundary--' + + # register request with fake swift + hdrs = self._make_response_headers( + len(body), plaintext_etag, fetch_crypto_keys(), 'not used') + hdrs['content-type'] = \ + 'multipart/byteranges;boundary=multipartboundary' + hdrs['X-Object-Sysmeta-Crypto-Body-Meta'] = \ + get_crypto_meta_header(bad_crypto_meta) + self.app.register('GET', '/v1/a/c/o', HTTPOk, body=body, headers=hdrs) + + # issue request + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + resp = req.get_response(self.decrypter) + + self.assertEqual('500 Internal Error', resp.status) + self.assertEqual('Error decrypting object', resp.body) + self.assertIn('Error decrypting object', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_multipart_bad_body_cipher(self): + self._test_GET_multipart_bad_body_crypto_meta( + {'cipher': 'Mystery cipher', 'iv': '1234567887654321'}) + self.assertIn('Cipher must be AES_CTR_256', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_multipart_missing_body_cipher(self): + self._test_GET_multipart_bad_body_crypto_meta( + {'iv': '1234567887654321'}) + self.assertIn('cipher', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_multipart_too_short_body_iv(self): + self._test_GET_multipart_bad_body_crypto_meta( + {'cipher': 'AES_CTR_256', 'iv': 'too short'}) + self.assertIn('IV must be length 16', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_multipart_too_long_body_iv(self): + self._test_GET_multipart_bad_body_crypto_meta( + {'cipher': 'AES_CTR_256', 'iv': 'a little too long'}) + self.assertIn('IV must be length 16', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_multipart_missing_body_iv(self): + self._test_GET_multipart_bad_body_crypto_meta( + {'cipher': 'AES_CTR_256'}) + self.assertIn('iv', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_missing_key_callback(self): + # Do not provide keys, and do not set override flag + env = {'REQUEST_METHOD': 'GET'} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + enc_body = encrypt(body, fetch_crypto_keys()['object'], FAKE_IV) + hdrs = self._make_response_headers( + len(body), md5hex('not the body'), fetch_crypto_keys(), 'not used') + self.app.register( + 'GET', '/v1/a/c/o', HTTPOk, body=enc_body, headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('500 Internal Error', resp.status) + self.assertEqual('Unable to retrieve encryption keys.', + resp.body) + self.assertIn('missing callback', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_error_in_key_callback(self): + def raise_exc(): + raise Exception('Testing') + + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: raise_exc} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + enc_body = encrypt(body, fetch_crypto_keys()['object'], FAKE_IV) + hdrs = self._make_response_headers( + len(body), md5hex(body), fetch_crypto_keys(), 'not used') + self.app.register( + 'GET', '/v1/a/c/o', HTTPOk, body=enc_body, headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('500 Internal Error', resp.status) + self.assertEqual('Unable to retrieve encryption keys.', + resp.body) + self.assertIn('from callback: Testing', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_cipher_mismatch_for_body(self): + # Cipher does not match + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + enc_body = encrypt(body, fetch_crypto_keys()['object'], FAKE_IV) + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta['cipher'] = 'unknown_cipher' + hdrs = self._make_response_headers( + len(enc_body), md5hex(body), fetch_crypto_keys(), 'not used') + hdrs['X-Object-Sysmeta-Crypto-Body-Meta'] = \ + get_crypto_meta_header(crypto_meta=bad_crypto_meta) + self.app.register( + 'GET', '/v1/a/c/o', HTTPOk, body=enc_body, headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('500 Internal Error', resp.status) + self.assertEqual('Error decrypting object', resp.body) + self.assertIn('Error decrypting object', + self.decrypter.logger.get_lines_for_level('error')[0]) + self.assertIn('Bad crypto meta: Cipher', + self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_cipher_mismatch_for_metadata(self): + # Cipher does not match + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + key = fetch_crypto_keys()['object'] + enc_body = encrypt(body, key, FAKE_IV) + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta['cipher'] = 'unknown_cipher' + hdrs = self._make_response_headers( + len(enc_body), md5hex(body), fetch_crypto_keys(), 'not used') + hdrs.update({'x-object-transient-sysmeta-crypto-meta-test': + base64.b64encode(encrypt('encrypt me', key, FAKE_IV)) + + ';swift_meta=' + + get_crypto_meta_header(crypto_meta=bad_crypto_meta)}) + self.app.register( + 'GET', '/v1/a/c/o', HTTPOk, body=enc_body, headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual('500 Internal Error', resp.status) + self.assertEqual('Error decrypting header', resp.body) + self.assertIn( + 'Error decrypting header X-Object-Transient-Sysmeta-Crypto-Meta-' + 'Test', self.decrypter.logger.get_lines_for_level('error')[0]) + + def test_GET_decryption_override(self): + # This covers the case of an old un-encrypted object + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys, + 'swift.crypto.override': True} + req = Request.blank('/v1/a/c/o', environ=env) + body = 'FAKE APP' + hdrs = {'Etag': md5hex(body), + 'content-type': 'text/plain', + 'content-length': len(body), + 'x-object-meta-test': 'do not encrypt me', + 'x-object-sysmeta-test': 'do not encrypt me'} + self.app.register('GET', '/v1/a/c/o', HTTPOk, body=body, headers=hdrs) + resp = req.get_response(self.decrypter) + self.assertEqual(body, resp.body) + self.assertEqual('200 OK', resp.status) + self.assertEqual(md5hex(body), resp.headers['Etag']) + self.assertEqual('text/plain', resp.headers['Content-Type']) + self.assertEqual('do not encrypt me', + resp.headers['x-object-meta-test']) + self.assertEqual('do not encrypt me', + resp.headers['x-object-sysmeta-test']) + + +class TestDecrypterContainerRequests(unittest.TestCase): + def setUp(self): + self.app = FakeSwift() + self.decrypter = decrypter.Decrypter(self.app, {}) + self.decrypter.logger = FakeLogger() + + def _make_cont_get_req(self, resp_body, format, override=False, + callback=fetch_crypto_keys): + path = '/v1/a/c' + content_type = 'text/plain' + if format: + path = '%s/?format=%s' % (path, format) + content_type = 'application/' + format + env = {'REQUEST_METHOD': 'GET', + CRYPTO_KEY_CALLBACK: callback} + if override: + env['swift.crypto.override'] = True + req = Request.blank(path, environ=env) + hdrs = {'content-type': content_type} + self.app.register('GET', path, HTTPOk, body=resp_body, headers=hdrs) + return req.get_response(self.decrypter) + + def test_GET_container_success(self): + # no format requested, listing has names only + fake_body = 'testfile1\ntestfile2\n' + calls = [0] + + def wrapped_fetch_crypto_keys(): + calls[0] += 1 + return fetch_crypto_keys() + + resp = self._make_cont_get_req(fake_body, None, + callback=wrapped_fetch_crypto_keys) + + self.assertEqual('200 OK', resp.status) + names = resp.body.split('\n') + self.assertEqual(3, len(names)) + self.assertIn('testfile1', names) + self.assertIn('testfile2', names) + self.assertIn('', names) + self.assertEqual(0, calls[0]) + + def test_GET_container_json(self): + content_type_1 = u'\uF10F\uD20D\uB30B\u9409' + content_type_2 = 'text/plain; param=foo' + pt_etag1 = 'c6e8196d7f0fff6444b90861fe8d609d' + pt_etag2 = 'ac0374ed4d43635f803c82469d0b5a10' + key = fetch_crypto_keys()['container'] + + obj_dict_1 = {"bytes": 16, + "last_modified": "2015-04-14T23:33:06.439040", + "hash": encrypt_and_append_meta( + pt_etag1.encode('utf-8'), key), + "name": "testfile", + "content_type": content_type_1} + + obj_dict_2 = {"bytes": 24, + "last_modified": "2015-04-14T23:33:06.519020", + "hash": encrypt_and_append_meta( + pt_etag2.encode('utf-8'), key), + "name": "testfile2", + "content_type": content_type_2} + + listing = [obj_dict_1, obj_dict_2] + fake_body = json.dumps(listing) + + resp = self._make_cont_get_req(fake_body, 'json') + + self.assertEqual('200 OK', resp.status) + body = resp.body + self.assertEqual(len(body), int(resp.headers['Content-Length'])) + body_json = json.loads(body) + self.assertEqual(2, len(body_json)) + obj_dict_1['hash'] = pt_etag1 + self.assertDictEqual(obj_dict_1, body_json[0]) + obj_dict_2['hash'] = pt_etag2 + self.assertDictEqual(obj_dict_2, body_json[1]) + + def test_GET_container_json_with_crypto_override(self): + content_type_1 = 'image/jpeg' + content_type_2 = 'text/plain; param=foo' + pt_etag1 = 'c6e8196d7f0fff6444b90861fe8d609d' + pt_etag2 = 'ac0374ed4d43635f803c82469d0b5a10' + + obj_dict_1 = {"bytes": 16, + "last_modified": "2015-04-14T23:33:06.439040", + "hash": pt_etag1, + "name": "testfile", + "content_type": content_type_1} + + obj_dict_2 = {"bytes": 24, + "last_modified": "2015-04-14T23:33:06.519020", + "hash": pt_etag2, + "name": "testfile2", + "content_type": content_type_2} + + listing = [obj_dict_1, obj_dict_2] + fake_body = json.dumps(listing) + + resp = self._make_cont_get_req(fake_body, 'json', override=True) + + self.assertEqual('200 OK', resp.status) + body = resp.body + self.assertEqual(len(body), int(resp.headers['Content-Length'])) + body_json = json.loads(body) + self.assertEqual(2, len(body_json)) + self.assertDictEqual(obj_dict_1, body_json[0]) + self.assertDictEqual(obj_dict_2, body_json[1]) + + def test_cont_get_json_req_with_cipher_mismatch(self): + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta['cipher'] = 'unknown_cipher' + key = fetch_crypto_keys()['container'] + pt_etag = 'c6e8196d7f0fff6444b90861fe8d609d' + ct_etag = encrypt_and_append_meta(pt_etag, key, + crypto_meta=bad_crypto_meta) + + obj_dict_1 = {"bytes": 16, + "last_modified": "2015-04-14T23:33:06.439040", + "hash": ct_etag, + "name": "testfile", + "content_type": "image/jpeg"} + + listing = [obj_dict_1] + fake_body = json.dumps(listing) + + resp = self._make_cont_get_req(fake_body, 'json') + + self.assertEqual('500 Internal Error', resp.status) + self.assertEqual('Error decrypting container listing', resp.body) + self.assertIn("Cipher must be AES_CTR_256", + self.decrypter.logger.get_lines_for_level('error')[0]) + + def _assert_element_contains_dict(self, expected, element): + for k, v in expected.items(): + entry = element.getElementsByTagName(k) + self.assertIsNotNone(entry, 'Key %s not found' % k) + actual = entry[0].childNodes[0].nodeValue + self.assertEqual(v, actual, + "Expected %s but got %s for key %s" + % (v, actual, k)) + + def test_GET_container_xml(self): + content_type_1 = u'\uF10F\uD20D\uB30B\u9409' + content_type_2 = 'text/plain; param=foo' + pt_etag1 = 'c6e8196d7f0fff6444b90861fe8d609d' + pt_etag2 = 'ac0374ed4d43635f803c82469d0b5a10' + key = fetch_crypto_keys()['container'] + + fake_body = ''' +\ +\ +''' + encrypt_and_append_meta(pt_etag1.encode('utf8'), key) + '''\ +\ +''' + content_type_1 + '''\ +testfile16\ +2015-04-19T02:37:39.601660\ +\ +''' + encrypt_and_append_meta(pt_etag2.encode('utf8'), key) + '''\ +\ +''' + content_type_2 + '''\ +testfile224\ +2015-04-19T02:37:39.684740\ +''' + + resp = self._make_cont_get_req(fake_body, 'xml') + self.assertEqual('200 OK', resp.status) + body = resp.body + self.assertEqual(len(body), int(resp.headers['Content-Length'])) + + tree = minidom.parseString(body) + containers = tree.getElementsByTagName('container') + self.assertEqual(1, len(containers)) + self.assertEqual('testc', + containers[0].attributes.getNamedItem("name").value) + + objs = tree.getElementsByTagName('object') + self.assertEqual(2, len(objs)) + + obj_dict_1 = {"bytes": "16", + "last_modified": "2015-04-19T02:37:39.601660", + "hash": pt_etag1, + "name": "testfile", + "content_type": content_type_1} + self._assert_element_contains_dict(obj_dict_1, objs[0]) + obj_dict_2 = {"bytes": "24", + "last_modified": "2015-04-19T02:37:39.684740", + "hash": pt_etag2, + "name": "testfile2", + "content_type": content_type_2} + self._assert_element_contains_dict(obj_dict_2, objs[1]) + + def test_GET_container_xml_with_crypto_override(self): + content_type_1 = 'image/jpeg' + content_type_2 = 'text/plain; param=foo' + + fake_body = ''' +\ +c6e8196d7f0fff6444b90861fe8d609d\ +''' + content_type_1 + '''\ +testfile16\ +2015-04-19T02:37:39.601660\ +ac0374ed4d43635f803c82469d0b5a10\ +''' + content_type_2 + '''\ +testfile224\ +2015-04-19T02:37:39.684740\ +''' + + resp = self._make_cont_get_req(fake_body, 'xml', override=True) + + self.assertEqual('200 OK', resp.status) + body = resp.body + self.assertEqual(len(body), int(resp.headers['Content-Length'])) + + tree = minidom.parseString(body) + containers = tree.getElementsByTagName('container') + self.assertEqual(1, len(containers)) + self.assertEqual('testc', + containers[0].attributes.getNamedItem("name").value) + + objs = tree.getElementsByTagName('object') + self.assertEqual(2, len(objs)) + + obj_dict_1 = {"bytes": "16", + "last_modified": "2015-04-19T02:37:39.601660", + "hash": "c6e8196d7f0fff6444b90861fe8d609d", + "name": "testfile", + "content_type": content_type_1} + self._assert_element_contains_dict(obj_dict_1, objs[0]) + obj_dict_2 = {"bytes": "24", + "last_modified": "2015-04-19T02:37:39.684740", + "hash": "ac0374ed4d43635f803c82469d0b5a10", + "name": "testfile2", + "content_type": content_type_2} + self._assert_element_contains_dict(obj_dict_2, objs[1]) + + def test_cont_get_xml_req_with_cipher_mismatch(self): + bad_crypto_meta = fake_get_crypto_meta() + bad_crypto_meta['cipher'] = 'unknown_cipher' + + fake_body = ''' +\ +''' + encrypt_and_append_meta('c6e8196d7f0fff6444b90861fe8d609d', + fetch_crypto_keys()['container'], + crypto_meta=bad_crypto_meta) + '''\ +\ +image/jpeg\ +testfile16\ +2015-04-19T02:37:39.601660\ +''' + + resp = self._make_cont_get_req(fake_body, 'xml') + + self.assertEqual('500 Internal Error', resp.status) + self.assertEqual('Error decrypting container listing', resp.body) + self.assertIn("Cipher must be AES_CTR_256", + self.decrypter.logger.get_lines_for_level('error')[0]) + + +class TestModuleMethods(unittest.TestCase): + def test_purge_crypto_sysmeta_headers(self): + retained_headers = {'x-object-sysmeta-test1': 'keep', + 'x-object-meta-test2': 'retain', + 'x-object-transient-sysmeta-test3': 'leave intact', + 'etag': 'hold onto', + 'x-other': 'cherish', + 'x-object-not-meta': 'do not remove'} + purged_headers = {'x-object-sysmeta-crypto-test1': 'remove', + 'x-object-transient-sysmeta-crypto-test2': 'purge'} + test_headers = retained_headers.copy() + test_headers.update(purged_headers) + actual = decrypter.purge_crypto_sysmeta_headers(test_headers.items()) + + for k, v in actual: + k = k.lower() + self.assertNotIn(k, purged_headers) + self.assertEqual(retained_headers[k], v) + retained_headers.pop(k) + self.assertFalse(retained_headers) + + +class TestDecrypter(unittest.TestCase): + def test_app_exception(self): + app = decrypter.Decrypter(FakeAppThatExcepts(HTTPException), {}) + req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'}) + with self.assertRaises(HTTPException) as catcher: + req.get_response(app) + self.assertEqual(FakeAppThatExcepts.MESSAGE, catcher.exception.body) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/common/middleware/crypto/test_encrypter.py b/test/unit/common/middleware/crypto/test_encrypter.py new file mode 100644 index 0000000000..0f9553cad7 --- /dev/null +++ b/test/unit/common/middleware/crypto/test_encrypter.py @@ -0,0 +1,820 @@ +# Copyright (c) 2015-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +import hashlib +import hmac +import json +import os +import unittest +import urllib + +import mock + +from swift.common.middleware.crypto import encrypter +from swift.common.middleware.crypto.crypto_utils import ( + CRYPTO_KEY_CALLBACK, Crypto) +from swift.common.swob import ( + Request, HTTPException, HTTPCreated, HTTPAccepted, HTTPOk, HTTPBadRequest) +from swift.common.utils import FileLikeIter + +from test.unit import FakeLogger, EMPTY_ETAG +from test.unit.common.middleware.crypto.crypto_helpers import ( + fetch_crypto_keys, md5hex, FAKE_IV, encrypt) +from test.unit.common.middleware.helpers import FakeSwift, FakeAppThatExcepts + + +@mock.patch('swift.common.middleware.crypto.crypto_utils.Crypto.create_iv', + lambda *args: FAKE_IV) +class TestEncrypter(unittest.TestCase): + def setUp(self): + self.app = FakeSwift() + self.encrypter = encrypter.Encrypter(self.app, {}) + self.encrypter.logger = FakeLogger() + + def _verify_user_metadata(self, req_hdrs, name, value, key): + # verify encrypted version of user metadata + self.assertNotIn('X-Object-Meta-' + name, req_hdrs) + expected_hdr = 'X-Object-Transient-Sysmeta-Crypto-Meta-' + name + self.assertIn(expected_hdr, req_hdrs) + enc_val, param = req_hdrs[expected_hdr].split(';') + param = param.strip() + self.assertTrue(param.startswith('swift_meta=')) + actual_meta = json.loads( + urllib.unquote_plus(param[len('swift_meta='):])) + self.assertEqual(Crypto.cipher, actual_meta['cipher']) + meta_iv = base64.b64decode(actual_meta['iv']) + self.assertEqual(FAKE_IV, meta_iv) + self.assertEqual( + base64.b64encode(encrypt(value, key, meta_iv)), + enc_val) + # if there is any encrypted user metadata then this header should exist + self.assertIn('X-Object-Transient-Sysmeta-Crypto-Meta', req_hdrs) + common_meta = json.loads(urllib.unquote_plus( + req_hdrs['X-Object-Transient-Sysmeta-Crypto-Meta'])) + self.assertDictEqual({'cipher': Crypto.cipher, + 'key_id': {'v': 'fake', 'path': '/a/c/fake'}}, + common_meta) + + def test_PUT_req(self): + body_key = os.urandom(32) + object_key = fetch_crypto_keys()['object'] + plaintext = 'FAKE APP' + plaintext_etag = md5hex(plaintext) + ciphertext = encrypt(plaintext, body_key, FAKE_IV) + ciphertext_etag = md5hex(ciphertext) + + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + hdrs = {'etag': plaintext_etag, + 'content-type': 'text/plain', + 'content-length': str(len(plaintext)), + 'x-object-meta-etag': 'not to be confused with the Etag!', + 'x-object-meta-test': 'encrypt me', + 'x-object-sysmeta-test': 'do not encrypt me'} + req = Request.blank( + '/v1/a/c/o', environ=env, body=plaintext, headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) + with mock.patch( + 'swift.common.middleware.crypto.crypto_utils.' + 'Crypto.create_random_key', + return_value=body_key): + resp = req.get_response(self.encrypter) + self.assertEqual('201 Created', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + + # verify metadata items + self.assertEqual(1, len(self.app.calls), self.app.calls) + self.assertEqual('PUT', self.app.calls[0][0]) + req_hdrs = self.app.headers[0] + + # verify body crypto meta + actual = req_hdrs['X-Object-Sysmeta-Crypto-Body-Meta'] + actual = json.loads(urllib.unquote_plus(actual)) + self.assertEqual(Crypto().cipher, actual['cipher']) + self.assertEqual(FAKE_IV, base64.b64decode(actual['iv'])) + + # verify wrapped body key + expected_wrapped_key = encrypt(body_key, object_key, FAKE_IV) + self.assertEqual(expected_wrapped_key, + base64.b64decode(actual['body_key']['key'])) + self.assertEqual(FAKE_IV, + base64.b64decode(actual['body_key']['iv'])) + self.assertEqual(fetch_crypto_keys()['id'], actual['key_id']) + + # verify etag + self.assertEqual(ciphertext_etag, req_hdrs['Etag']) + + encrypted_etag, _junk, etag_meta = \ + req_hdrs['X-Object-Sysmeta-Crypto-Etag'].partition('; swift_meta=') + # verify crypto_meta was appended to this etag + self.assertTrue(etag_meta) + actual_meta = json.loads(urllib.unquote_plus(etag_meta)) + self.assertEqual(Crypto().cipher, actual_meta['cipher']) + + # verify encrypted version of plaintext etag + actual = base64.b64decode(encrypted_etag) + etag_iv = base64.b64decode(actual_meta['iv']) + enc_etag = encrypt(plaintext_etag, object_key, etag_iv) + self.assertEqual(enc_etag, actual) + + # verify etag MAC for conditional requests + actual_hmac = base64.b64decode( + req_hdrs['X-Object-Sysmeta-Crypto-Etag-Mac']) + self.assertEqual(actual_hmac, hmac.new( + object_key, plaintext_etag, hashlib.sha256).digest()) + + # verify encrypted etag for container update + self.assertIn( + 'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs) + parts = req_hdrs[ + 'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1) + self.assertEqual(2, len(parts)) + + # extract crypto_meta from end of etag for container update + param = parts[1].strip() + crypto_meta_tag = 'swift_meta=' + self.assertTrue(param.startswith(crypto_meta_tag), param) + actual_meta = json.loads( + urllib.unquote_plus(param[len(crypto_meta_tag):])) + self.assertEqual(Crypto().cipher, actual_meta['cipher']) + self.assertEqual(fetch_crypto_keys()['id'], actual_meta['key_id']) + + cont_key = fetch_crypto_keys()['container'] + cont_etag_iv = base64.b64decode(actual_meta['iv']) + self.assertEqual(FAKE_IV, cont_etag_iv) + self.assertEqual(encrypt(plaintext_etag, cont_key, cont_etag_iv), + base64.b64decode(parts[0])) + + # content-type is not encrypted + self.assertEqual('text/plain', req_hdrs['Content-Type']) + + # user meta is encrypted + self._verify_user_metadata(req_hdrs, 'Test', 'encrypt me', object_key) + self._verify_user_metadata( + req_hdrs, 'Etag', 'not to be confused with the Etag!', object_key) + + # sysmeta is not encrypted + self.assertEqual('do not encrypt me', + req_hdrs['X-Object-Sysmeta-Test']) + + # verify object is encrypted by getting direct from the app + get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'}) + resp = get_req.get_response(self.app) + self.assertEqual(ciphertext, resp.body) + self.assertEqual(ciphertext_etag, resp.headers['Etag']) + + def test_PUT_zero_size_object(self): + # object body encryption should be skipped for zero sized object body + object_key = fetch_crypto_keys()['object'] + plaintext_etag = EMPTY_ETAG + + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + hdrs = {'etag': EMPTY_ETAG, + 'content-type': 'text/plain', + 'content-length': '0', + 'x-object-meta-etag': 'not to be confused with the Etag!', + 'x-object-meta-test': 'encrypt me', + 'x-object-sysmeta-test': 'do not encrypt me'} + req = Request.blank( + '/v1/a/c/o', environ=env, body='', headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) + + resp = req.get_response(self.encrypter) + + self.assertEqual('201 Created', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + self.assertEqual(1, len(self.app.calls), self.app.calls) + self.assertEqual('PUT', self.app.calls[0][0]) + req_hdrs = self.app.headers[0] + + # verify that there is no body crypto meta + self.assertNotIn('X-Object-Sysmeta-Crypto-Meta', req_hdrs) + # verify etag is md5 of plaintext + self.assertEqual(EMPTY_ETAG, req_hdrs['Etag']) + # verify there is no etag crypto meta + self.assertNotIn('X-Object-Sysmeta-Crypto-Etag', req_hdrs) + # verify there is no container update override for etag + self.assertNotIn( + 'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs) + + # user meta is still encrypted + self._verify_user_metadata(req_hdrs, 'Test', 'encrypt me', object_key) + self._verify_user_metadata( + req_hdrs, 'Etag', 'not to be confused with the Etag!', object_key) + + # sysmeta is not encrypted + self.assertEqual('do not encrypt me', + req_hdrs['X-Object-Sysmeta-Test']) + + # verify object is empty by getting direct from the app + get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'}) + resp = get_req.get_response(self.app) + self.assertEqual('', resp.body) + self.assertEqual(EMPTY_ETAG, resp.headers['Etag']) + + def test_PUT_with_other_footers(self): + # verify handling of another middleware's footer callback + cont_key = fetch_crypto_keys()['container'] + body_key = os.urandom(32) + object_key = fetch_crypto_keys()['object'] + plaintext = 'FAKE APP' + plaintext_etag = md5hex(plaintext) + ciphertext = encrypt(plaintext, body_key, FAKE_IV) + ciphertext_etag = md5hex(ciphertext) + other_footers = { + 'Etag': plaintext_etag, + 'X-Object-Sysmeta-Other': 'other sysmeta', + 'X-Object-Sysmeta-Container-Update-Override-Size': + 'other override', + 'X-Object-Sysmeta-Container-Update-Override-Etag': + 'final etag'} + + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys, + 'swift.callback.update_footers': + lambda footers: footers.update(other_footers)} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(plaintext)), + 'Etag': 'correct etag is in footers'} + req = Request.blank( + '/v1/a/c/o', environ=env, body=plaintext, headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) + + with mock.patch( + 'swift.common.middleware.crypto.crypto_utils.' + 'Crypto.create_random_key', + lambda *args: body_key): + resp = req.get_response(self.encrypter) + + self.assertEqual('201 Created', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + + # verify metadata items + self.assertEqual(1, len(self.app.calls), self.app.calls) + self.assertEqual('PUT', self.app.calls[0][0]) + req_hdrs = self.app.headers[0] + + # verify that other middleware's footers made it to app, including any + # container update overrides but nothing Etag-related + other_footers.pop('Etag') + other_footers.pop('X-Object-Sysmeta-Container-Update-Override-Etag') + for k, v in other_footers.items(): + self.assertEqual(v, req_hdrs[k]) + + # verify encryption footers are ok + encrypted_etag, _junk, etag_meta = \ + req_hdrs['X-Object-Sysmeta-Crypto-Etag'].partition('; swift_meta=') + self.assertTrue(etag_meta) + actual_meta = json.loads(urllib.unquote_plus(etag_meta)) + self.assertEqual(Crypto().cipher, actual_meta['cipher']) + + self.assertEqual(ciphertext_etag, req_hdrs['Etag']) + actual = base64.b64decode(encrypted_etag) + etag_iv = base64.b64decode(actual_meta['iv']) + self.assertEqual(encrypt(plaintext_etag, object_key, etag_iv), actual) + + # verify encrypted etag for container update + self.assertIn( + 'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs) + parts = req_hdrs[ + 'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1) + self.assertEqual(2, len(parts)) + + # extract crypto_meta from end of etag for container update + param = parts[1].strip() + crypto_meta_tag = 'swift_meta=' + self.assertTrue(param.startswith(crypto_meta_tag), param) + actual_meta = json.loads( + urllib.unquote_plus(param[len(crypto_meta_tag):])) + self.assertEqual(Crypto().cipher, actual_meta['cipher']) + + cont_key = fetch_crypto_keys()['container'] + cont_etag_iv = base64.b64decode(actual_meta['iv']) + self.assertEqual(FAKE_IV, cont_etag_iv) + self.assertEqual(encrypt('final etag', cont_key, cont_etag_iv), + base64.b64decode(parts[0])) + + # verify body crypto meta + actual = req_hdrs['X-Object-Sysmeta-Crypto-Body-Meta'] + actual = json.loads(urllib.unquote_plus(actual)) + self.assertEqual(Crypto().cipher, actual['cipher']) + self.assertEqual(FAKE_IV, base64.b64decode(actual['iv'])) + + # verify wrapped body key + expected_wrapped_key = encrypt(body_key, object_key, FAKE_IV) + self.assertEqual(expected_wrapped_key, + base64.b64decode(actual['body_key']['key'])) + self.assertEqual(FAKE_IV, + base64.b64decode(actual['body_key']['iv'])) + self.assertEqual(fetch_crypto_keys()['id'], actual['key_id']) + + def test_PUT_with_etag_override_in_headers(self): + # verify handling of another middleware's + # container-update-override-etag in headers + plaintext = 'FAKE APP' + plaintext_etag = md5hex(plaintext) + + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(plaintext)), + 'Etag': plaintext_etag, + 'X-Object-Sysmeta-Container-Update-Override-Etag': + 'final etag'} + req = Request.blank( + '/v1/a/c/o', environ=env, body=plaintext, headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) + resp = req.get_response(self.encrypter) + + self.assertEqual('201 Created', resp.status) + self.assertEqual(plaintext_etag, resp.headers['Etag']) + + # verify metadata items + self.assertEqual(1, len(self.app.calls), self.app.calls) + self.assertEqual(('PUT', '/v1/a/c/o'), self.app.calls[0]) + req_hdrs = self.app.headers[0] + + # verify encrypted etag for container update + self.assertIn( + 'X-Object-Sysmeta-Container-Update-Override-Etag', req_hdrs) + parts = req_hdrs[ + 'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1) + self.assertEqual(2, len(parts)) + cont_key = fetch_crypto_keys()['container'] + + # extract crypto_meta from end of etag for container update + param = parts[1].strip() + crypto_meta_tag = 'swift_meta=' + self.assertTrue(param.startswith(crypto_meta_tag), param) + actual_meta = json.loads( + urllib.unquote_plus(param[len(crypto_meta_tag):])) + self.assertEqual(Crypto().cipher, actual_meta['cipher']) + self.assertEqual(fetch_crypto_keys()['id'], actual_meta['key_id']) + + cont_etag_iv = base64.b64decode(actual_meta['iv']) + self.assertEqual(FAKE_IV, cont_etag_iv) + self.assertEqual(encrypt('final etag', cont_key, cont_etag_iv), + base64.b64decode(parts[0])) + + def test_PUT_with_bad_etag_in_other_footers(self): + # verify that etag supplied in footers from other middleware overrides + # header etag when validating inbound plaintext etags + plaintext = 'FAKE APP' + plaintext_etag = md5hex(plaintext) + other_footers = { + 'Etag': 'bad etag', + 'X-Object-Sysmeta-Other': 'other sysmeta', + 'X-Object-Sysmeta-Container-Update-Override-Etag': + 'other override'} + + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys, + 'swift.callback.update_footers': + lambda footers: footers.update(other_footers)} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(plaintext)), + 'Etag': plaintext_etag} + req = Request.blank( + '/v1/a/c/o', environ=env, body=plaintext, headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) + resp = req.get_response(self.encrypter) + self.assertEqual('422 Unprocessable Entity', resp.status) + self.assertNotIn('Etag', resp.headers) + + def test_PUT_with_bad_etag_in_headers_and_other_footers(self): + # verify that etag supplied in headers from other middleware is used if + # none is supplied in footers when validating inbound plaintext etags + plaintext = 'FAKE APP' + other_footers = { + 'X-Object-Sysmeta-Other': 'other sysmeta', + 'X-Object-Sysmeta-Container-Update-Override-Etag': + 'other override'} + + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys, + 'swift.callback.update_footers': + lambda footers: footers.update(other_footers)} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(plaintext)), + 'Etag': 'bad etag'} + req = Request.blank( + '/v1/a/c/o', environ=env, body=plaintext, headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) + resp = req.get_response(self.encrypter) + self.assertEqual('422 Unprocessable Entity', resp.status) + self.assertNotIn('Etag', resp.headers) + + def test_PUT_nothing_read(self): + # simulate an artificial scenario of a downstream filter/app not + # actually reading the input stream from encrypter. + class NonReadingApp(object): + def __call__(self, env, start_response): + # note: no read from wsgi.input + req = Request(env) + env['swift.callback.update_footers'](req.headers) + call_headers.append(req.headers) + resp = HTTPCreated(req=req, headers={'Etag': 'response etag'}) + return resp(env, start_response) + + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + hdrs = {'content-type': 'text/plain', + 'content-length': 0, + 'etag': 'etag from client'} + req = Request.blank('/v1/a/c/o', environ=env, body='', headers=hdrs) + + call_headers = [] + resp = req.get_response(encrypter.Encrypter(NonReadingApp(), {})) + self.assertEqual('201 Created', resp.status) + self.assertEqual('response etag', resp.headers['Etag']) + self.assertEqual(1, len(call_headers)) + self.assertEqual('etag from client', call_headers[0]['etag']) + # verify no encryption footers + for k in call_headers[0]: + self.assertFalse(k.lower().startswith('x-object-sysmeta-crypto-')) + + # check that an upstream footer callback gets called + other_footers = { + 'Etag': 'other etag', + 'X-Object-Sysmeta-Other': 'other sysmeta', + 'X-Backend-Container-Update-Override-Etag': 'other override'} + env.update({'swift.callback.update_footers': + lambda footers: footers.update(other_footers)}) + req = Request.blank('/v1/a/c/o', environ=env, body='', headers=hdrs) + + call_headers = [] + resp = req.get_response(encrypter.Encrypter(NonReadingApp(), {})) + + self.assertEqual('201 Created', resp.status) + self.assertEqual('response etag', resp.headers['Etag']) + self.assertEqual(1, len(call_headers)) + # verify that other middleware's footers made it to app + for k, v in other_footers.items(): + self.assertEqual(v, call_headers[0][k]) + # verify no encryption footers + for k in call_headers[0]: + self.assertFalse(k.lower().startswith('x-object-sysmeta-crypto-')) + + def test_POST_req(self): + body = 'FAKE APP' + env = {'REQUEST_METHOD': 'POST', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + hdrs = {'x-object-meta-test': 'encrypt me', + 'x-object-sysmeta-test': 'do not encrypt me'} + req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs) + key = fetch_crypto_keys()['object'] + self.app.register('POST', '/v1/a/c/o', HTTPAccepted, {}) + resp = req.get_response(self.encrypter) + self.assertEqual('202 Accepted', resp.status) + self.assertNotIn('Etag', resp.headers) + + # verify metadata items + self.assertEqual(1, len(self.app.calls), self.app.calls) + self.assertEqual('POST', self.app.calls[0][0]) + req_hdrs = self.app.headers[0] + + # user meta is encrypted + self._verify_user_metadata(req_hdrs, 'Test', 'encrypt me', key) + + # sysmeta is not encrypted + self.assertEqual('do not encrypt me', + req_hdrs['X-Object-Sysmeta-Test']) + + def _test_no_user_metadata(self, method): + # verify that x-object-transient-sysmeta-crypto-meta is not set when + # there is no user metadata + env = {'REQUEST_METHOD': method, + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank('/v1/a/c/o', environ=env, body='body') + self.app.register(method, '/v1/a/c/o', HTTPAccepted, {}) + resp = req.get_response(self.encrypter) + self.assertEqual('202 Accepted', resp.status) + self.assertEqual(1, len(self.app.calls), self.app.calls) + self.assertEqual(method, self.app.calls[0][0]) + self.assertNotIn('x-object-transient-sysmeta-crypto-meta', + self.app.headers[0]) + + def test_PUT_no_user_metadata(self): + self._test_no_user_metadata('PUT') + + def test_POST_no_user_metadata(self): + self._test_no_user_metadata('POST') + + def _test_if_match(self, method, match_header_name): + def do_test(method, plain_etags, expected_plain_etags=None): + env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + match_header_value = ', '.join(plain_etags) + req = Request.blank( + '/v1/a/c/o', environ=env, method=method, + headers={match_header_name: match_header_value}) + app = FakeSwift() + app.register(method, '/v1/a/c/o', HTTPOk, {}) + resp = req.get_response(encrypter.Encrypter(app, {})) + self.assertEqual('200 OK', resp.status) + + self.assertEqual(1, len(app.calls), app.calls) + self.assertEqual(method, app.calls[0][0]) + actual_headers = app.headers[0] + + # verify the alternate etag location has been specified + if match_header_value and match_header_value != '*': + self.assertIn('X-Backend-Etag-Is-At', actual_headers) + self.assertEqual('X-Object-Sysmeta-Crypto-Etag-Mac', + actual_headers['X-Backend-Etag-Is-At']) + + # verify etags have been supplemented with masked values + self.assertIn(match_header_name, actual_headers) + actual_etags = set(actual_headers[match_header_name].split(', ')) + key = fetch_crypto_keys()['object'] + masked_etags = [ + '"%s"' % base64.b64encode(hmac.new( + key, etag.strip('"'), hashlib.sha256).digest()) + for etag in plain_etags if etag not in ('*', '')] + expected_etags = set((expected_plain_etags or plain_etags) + + masked_etags) + self.assertEqual(expected_etags, actual_etags) + # check that the request environ was returned to original state + self.assertEqual(set(plain_etags), + set(req.headers[match_header_name].split(', '))) + + do_test(method, ['']) + do_test(method, ['"an etag"']) + do_test(method, ['"an etag"', '"another_etag"']) + do_test(method, ['*']) + # rfc2616 does not allow wildcard *and* etag but test it anyway + do_test(method, ['*', '"an etag"']) + # etags should be quoted but check we can cope if they are not + do_test( + method, ['*', 'an etag', 'another_etag'], + expected_plain_etags=['*', '"an etag"', '"another_etag"']) + + def test_GET_if_match(self): + self._test_if_match('GET', 'If-Match') + + def test_HEAD_if_match(self): + self._test_if_match('HEAD', 'If-Match') + + def test_GET_if_none_match(self): + self._test_if_match('GET', 'If-None-Match') + + def test_HEAD_if_none_match(self): + self._test_if_match('HEAD', 'If-None-Match') + + def _test_existing_etag_is_at_header(self, method, match_header_name): + # if another middleware has already set X-Backend-Etag-Is-At then + # encrypter should not override that value + env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank( + '/v1/a/c/o', environ=env, method=method, + headers={match_header_name: "an etag", + 'X-Backend-Etag-Is-At': 'X-Object-Sysmeta-Other-Etag'}) + self.app.register(method, '/v1/a/c/o', HTTPOk, {}) + resp = req.get_response(self.encrypter) + self.assertEqual('200 OK', resp.status) + + self.assertEqual(1, len(self.app.calls), self.app.calls) + self.assertEqual(method, self.app.calls[0][0]) + actual_headers = self.app.headers[0] + self.assertIn('X-Backend-Etag-Is-At', actual_headers) + self.assertEqual( + 'X-Object-Sysmeta-Other-Etag,X-Object-Sysmeta-Crypto-Etag-Mac', + actual_headers['X-Backend-Etag-Is-At']) + actual_etags = set(actual_headers[match_header_name].split(', ')) + self.assertIn('"an etag"', actual_etags) + + def test_GET_if_match_with_existing_etag_is_at_header(self): + self._test_existing_etag_is_at_header('GET', 'If-Match') + + def test_HEAD_if_match_with_existing_etag_is_at_header(self): + self._test_existing_etag_is_at_header('HEAD', 'If-Match') + + def test_GET_if_none_match_with_existing_etag_is_at_header(self): + self._test_existing_etag_is_at_header('GET', 'If-None-Match') + + def test_HEAD_if_none_match_with_existing_etag_is_at_header(self): + self._test_existing_etag_is_at_header('HEAD', 'If-None-Match') + + def _test_etag_is_at_not_duplicated(self, method): + # verify only one occurrence of X-Object-Sysmeta-Crypto-Etag-Mac in + # X-Backend-Etag-Is-At + key = fetch_crypto_keys()['object'] + env = {CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + req = Request.blank( + '/v1/a/c/o', environ=env, method=method, + headers={'If-Match': '"an etag"', + 'If-None-Match': '"another etag"'}) + self.app.register(method, '/v1/a/c/o', HTTPOk, {}) + resp = req.get_response(self.encrypter) + self.assertEqual('200 OK', resp.status) + + self.assertEqual(1, len(self.app.calls), self.app.calls) + self.assertEqual(method, self.app.calls[0][0]) + actual_headers = self.app.headers[0] + self.assertIn('X-Backend-Etag-Is-At', actual_headers) + self.assertEqual('X-Object-Sysmeta-Crypto-Etag-Mac', + actual_headers['X-Backend-Etag-Is-At']) + + self.assertIn('"%s"' % base64.b64encode( + hmac.new(key, 'an etag', hashlib.sha256).digest()), + actual_headers['If-Match']) + self.assertIn('"another etag"', actual_headers['If-None-Match']) + self.assertIn('"%s"' % base64.b64encode( + hmac.new(key, 'another etag', hashlib.sha256).digest()), + actual_headers['If-None-Match']) + + def test_GET_etag_is_at_not_duplicated(self): + self._test_etag_is_at_not_duplicated('GET') + + def test_HEAD_etag_is_at_not_duplicated(self): + self._test_etag_is_at_not_duplicated('HEAD') + + def test_PUT_response_inconsistent_etag_is_not_replaced(self): + # if response is success but etag does not match the ciphertext md5 + # then verify that we do *not* replace it with the plaintext etag + body = 'FAKE APP' + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(body))} + req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, + {'Etag': 'not the ciphertext etag'}) + resp = req.get_response(self.encrypter) + self.assertEqual('201 Created', resp.status) + self.assertEqual('not the ciphertext etag', resp.headers['Etag']) + + def test_PUT_multiseg_no_client_etag(self): + body_key = os.urandom(32) + chunks = ['some', 'chunks', 'of data'] + body = ''.join(chunks) + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys, + 'wsgi.input': FileLikeIter(chunks)} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(body))} + req = Request.blank('/v1/a/c/o', environ=env, headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) + + with mock.patch( + 'swift.common.middleware.crypto.crypto_utils.' + 'Crypto.create_random_key', + lambda *args: body_key): + resp = req.get_response(self.encrypter) + + self.assertEqual('201 Created', resp.status) + # verify object is encrypted by getting direct from the app + get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'}) + self.assertEqual(encrypt(body, body_key, FAKE_IV), + get_req.get_response(self.app).body) + + def test_PUT_multiseg_good_client_etag(self): + body_key = os.urandom(32) + chunks = ['some', 'chunks', 'of data'] + body = ''.join(chunks) + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys, + 'wsgi.input': FileLikeIter(chunks)} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(body)), + 'Etag': md5hex(body)} + req = Request.blank('/v1/a/c/o', environ=env, headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) + + with mock.patch( + 'swift.common.middleware.crypto.crypto_utils.' + 'Crypto.create_random_key', + lambda *args: body_key): + resp = req.get_response(self.encrypter) + + self.assertEqual('201 Created', resp.status) + # verify object is encrypted by getting direct from the app + get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'}) + self.assertEqual(encrypt(body, body_key, FAKE_IV), + get_req.get_response(self.app).body) + + def test_PUT_multiseg_bad_client_etag(self): + chunks = ['some', 'chunks', 'of data'] + body = ''.join(chunks) + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: fetch_crypto_keys, + 'wsgi.input': FileLikeIter(chunks)} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(body)), + 'Etag': 'badclientetag'} + req = Request.blank('/v1/a/c/o', environ=env, headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) + resp = req.get_response(self.encrypter) + self.assertEqual('422 Unprocessable Entity', resp.status) + + def test_PUT_missing_key_callback(self): + body = 'FAKE APP' + env = {'REQUEST_METHOD': 'PUT'} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(body))} + req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs) + resp = req.get_response(self.encrypter) + self.assertEqual('500 Internal Error', resp.status) + self.assertIn('missing callback', + self.encrypter.logger.get_lines_for_level('error')[0]) + self.assertEqual('Unable to retrieve encryption keys.', resp.body) + + def test_PUT_error_in_key_callback(self): + def raise_exc(): + raise Exception('Testing') + + body = 'FAKE APP' + env = {'REQUEST_METHOD': 'PUT', + CRYPTO_KEY_CALLBACK: raise_exc} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(body))} + req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs) + resp = req.get_response(self.encrypter) + self.assertEqual('500 Internal Error', resp.status) + self.assertIn('from callback: Testing', + self.encrypter.logger.get_lines_for_level('error')[0]) + self.assertEqual('Unable to retrieve encryption keys.', resp.body) + + def test_PUT_encryption_override(self): + # set crypto override to disable encryption. + # simulate another middleware wanting to set footers + other_footers = { + 'Etag': 'other etag', + 'X-Object-Sysmeta-Other': 'other sysmeta', + 'X-Object-Sysmeta-Container-Update-Override-Etag': + 'other override'} + body = 'FAKE APP' + env = {'REQUEST_METHOD': 'PUT', + 'swift.crypto.override': True, + 'swift.callback.update_footers': + lambda footers: footers.update(other_footers)} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(body))} + req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs) + self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) + resp = req.get_response(self.encrypter) + self.assertEqual('201 Created', resp.status) + + # verify that other middleware's footers made it to app + req_hdrs = self.app.headers[0] + for k, v in other_footers.items(): + self.assertEqual(v, req_hdrs[k]) + + # verify object is NOT encrypted by getting direct from the app + get_req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'}) + self.assertEqual(body, get_req.get_response(self.app).body) + + def _test_constraints_checking(self, method): + # verify that the check_metadata function is called on PUT and POST + body = 'FAKE APP' + env = {'REQUEST_METHOD': method, + CRYPTO_KEY_CALLBACK: fetch_crypto_keys} + hdrs = {'content-type': 'text/plain', + 'content-length': str(len(body))} + req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs) + mocked_func = 'swift.common.middleware.crypto.encrypter.check_metadata' + with mock.patch(mocked_func) as mocked: + mocked.side_effect = [HTTPBadRequest('testing')] + resp = req.get_response(self.encrypter) + self.assertEqual('400 Bad Request', resp.status) + self.assertEqual(1, mocked.call_count) + mocked.assert_called_once_with(mock.ANY, 'object') + self.assertEqual(req.headers, + mocked.call_args_list[0][0][0].headers) + + def test_PUT_constraints_checking(self): + self._test_constraints_checking('PUT') + + def test_POST_constraints_checking(self): + self._test_constraints_checking('POST') + + def test_config_true_value_on_disable_encryption(self): + app = FakeSwift() + self.assertFalse(encrypter.Encrypter(app, {}).disable_encryption) + for val in ('true', '1', 'yes', 'on', 't', 'y'): + app = encrypter.Encrypter(app, + {'disable_encryption': val}) + self.assertTrue(app.disable_encryption) + + def test_PUT_app_exception(self): + app = encrypter.Encrypter(FakeAppThatExcepts(HTTPException), {}) + req = Request.blank('/', environ={'REQUEST_METHOD': 'PUT'}) + with self.assertRaises(HTTPException) as catcher: + req.get_response(app) + self.assertEqual(FakeAppThatExcepts.MESSAGE, catcher.exception.body) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/common/middleware/crypto/test_encryption.py b/test/unit/common/middleware/crypto/test_encryption.py new file mode 100644 index 0000000000..e984a5f0ae --- /dev/null +++ b/test/unit/common/middleware/crypto/test_encryption.py @@ -0,0 +1,631 @@ +# Copyright (c) 2015-2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +import hashlib +import hmac +import json +import unittest +import uuid + +from swift.common import storage_policy, constraints +from swift.common.middleware import copy +from swift.common.middleware import crypto +from swift.common.middleware.crypto import keymaster +from swift.common.middleware.crypto.crypto_utils import ( + load_crypto_meta, Crypto) +from swift.common.ring import Ring +from swift.common.swob import Request +from swift.obj import diskfile + +from test.unit import FakeLogger +from test.unit.common.middleware.crypto.crypto_helpers import ( + md5hex, encrypt, TEST_KEYMASTER_CONF) +from test.unit.helpers import setup_servers, teardown_servers + + +class TestCryptoPipelineChanges(unittest.TestCase): + # Tests the consequences of crypto middleware being in/out of the pipeline + # or having encryption disabled for PUT/GET requests on same object. Uses + # real backend servers so that the handling of headers and sysmeta is + # verified to diskfile and back. + _test_context = None + + @classmethod + def setUpClass(cls): + cls._test_context = setup_servers() + cls.proxy_app = cls._test_context["test_servers"][0] + + @classmethod + def tearDownClass(cls): + if cls._test_context is not None: + teardown_servers(cls._test_context) + cls._test_context = None + + def setUp(self): + self.plaintext = 'unencrypted body content' + self.plaintext_etag = md5hex(self.plaintext) + self._setup_crypto_app() + + def _setup_crypto_app(self, disable_encryption=False): + # Set up a pipeline of crypto middleware ending in the proxy app so + # that tests can make requests to either the proxy server directly or + # via the crypto middleware. Make a fresh instance for each test to + # avoid any state coupling. + conf = {'disable_encryption': disable_encryption} + self.encryption = crypto.filter_factory(conf)(self.proxy_app) + self.km = keymaster.KeyMaster(self.encryption, TEST_KEYMASTER_CONF) + self.crypto_app = self.km # for clarity + + def _create_container(self, app, policy_name='one', container_path=None): + if not container_path: + # choose new container name so that the policy can be specified + self.container_name = uuid.uuid4().hex + self.container_path = 'http://foo:8080/v1/a/' + self.container_name + self.object_name = 'o' + self.object_path = self.container_path + '/' + self.object_name + container_path = self.container_path + req = Request.blank( + container_path, method='PUT', + headers={'X-Storage-Policy': policy_name}) + resp = req.get_response(app) + self.assertEqual('201 Created', resp.status) + # sanity check + req = Request.blank( + container_path, method='HEAD', + headers={'X-Storage-Policy': policy_name}) + resp = req.get_response(app) + self.assertEqual(policy_name, resp.headers['X-Storage-Policy']) + + def _put_object(self, app, body): + req = Request.blank(self.object_path, method='PUT', body=body, + headers={'Content-Type': 'application/test'}) + resp = req.get_response(app) + self.assertEqual('201 Created', resp.status) + self.assertEqual(self.plaintext_etag, resp.headers['Etag']) + return resp + + def _post_object(self, app): + req = Request.blank(self.object_path, method='POST', + headers={'Content-Type': 'application/test', + 'X-Object-Meta-Fruit': 'Kiwi'}) + resp = req.get_response(app) + self.assertEqual('202 Accepted', resp.status) + return resp + + def _copy_object(self, app, destination): + req = Request.blank(self.object_path, method='COPY', + headers={'Destination': destination}) + resp = req.get_response(app) + self.assertEqual('201 Created', resp.status) + self.assertEqual(self.plaintext_etag, resp.headers['Etag']) + return resp + + def _check_GET_and_HEAD(self, app, object_path=None): + object_path = object_path or self.object_path + req = Request.blank(object_path, method='GET') + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + self.assertEqual(self.plaintext, resp.body) + self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit']) + + req = Request.blank(object_path, method='HEAD') + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + self.assertEqual('', resp.body) + self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit']) + + def _check_match_requests(self, method, app, object_path=None): + object_path = object_path or self.object_path + # verify conditional match requests + expected_body = self.plaintext if method == 'GET' else '' + + # If-Match matches + req = Request.blank(object_path, method=method, + headers={'If-Match': '"%s"' % self.plaintext_etag}) + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + self.assertEqual(expected_body, resp.body) + self.assertEqual(self.plaintext_etag, resp.headers['Etag']) + self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit']) + + # If-Match wildcard + req = Request.blank(object_path, method=method, + headers={'If-Match': '*'}) + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + self.assertEqual(expected_body, resp.body) + self.assertEqual(self.plaintext_etag, resp.headers['Etag']) + self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit']) + + # If-Match does not match + req = Request.blank(object_path, method=method, + headers={'If-Match': '"not the etag"'}) + resp = req.get_response(app) + self.assertEqual('412 Precondition Failed', resp.status) + self.assertEqual('', resp.body) + self.assertEqual(self.plaintext_etag, resp.headers['Etag']) + + # If-None-Match matches + req = Request.blank( + object_path, method=method, + headers={'If-None-Match': '"%s"' % self.plaintext_etag}) + resp = req.get_response(app) + self.assertEqual('304 Not Modified', resp.status) + self.assertEqual('', resp.body) + self.assertEqual(self.plaintext_etag, resp.headers['Etag']) + + # If-None-Match wildcard + req = Request.blank(object_path, method=method, + headers={'If-None-Match': '*'}) + resp = req.get_response(app) + self.assertEqual('304 Not Modified', resp.status) + self.assertEqual('', resp.body) + self.assertEqual(self.plaintext_etag, resp.headers['Etag']) + + # If-None-Match does not match + req = Request.blank(object_path, method=method, + headers={'If-None-Match': '"not the etag"'}) + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + self.assertEqual(expected_body, resp.body) + self.assertEqual(self.plaintext_etag, resp.headers['Etag']) + self.assertEqual('Kiwi', resp.headers['X-Object-Meta-Fruit']) + + def _check_listing(self, app, expect_mismatch=False, container_path=None): + container_path = container_path or self.container_path + req = Request.blank( + container_path, method='GET', query_string='format=json') + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + listing = json.loads(resp.body) + self.assertEqual(1, len(listing)) + self.assertEqual(self.object_name, listing[0]['name']) + self.assertEqual(len(self.plaintext), listing[0]['bytes']) + if expect_mismatch: + self.assertNotEqual(self.plaintext_etag, listing[0]['hash']) + else: + self.assertEqual(self.plaintext_etag, listing[0]['hash']) + + def test_write_with_crypto_and_override_headers(self): + self._create_container(self.proxy_app, policy_name='one') + + def verify_overrides(): + # verify object sysmeta + req = Request.blank( + self.object_path, method='GET') + resp = req.get_response(self.crypto_app) + for k, v in overrides.items(): + self.assertIn(k, resp.headers) + self.assertEqual(overrides[k], resp.headers[k]) + + # check container listing + req = Request.blank( + self.container_path, method='GET', query_string='format=json') + resp = req.get_response(self.crypto_app) + self.assertEqual('200 OK', resp.status) + listing = json.loads(resp.body) + self.assertEqual(1, len(listing)) + self.assertEqual('o', listing[0]['name']) + self.assertEqual( + overrides['x-object-sysmeta-container-update-override-size'], + str(listing[0]['bytes'])) + self.assertEqual( + overrides['x-object-sysmeta-container-update-override-etag'], + listing[0]['hash']) + + # include overrides in headers + overrides = {'x-object-sysmeta-container-update-override-etag': 'foo', + 'x-object-sysmeta-container-update-override-size': + str(len(self.plaintext) + 1)} + req = Request.blank(self.object_path, method='PUT', + body=self.plaintext, headers=overrides.copy()) + resp = req.get_response(self.crypto_app) + self.assertEqual('201 Created', resp.status) + self.assertEqual(self.plaintext_etag, resp.headers['Etag']) + verify_overrides() + + # include overrides in footers + overrides = {'x-object-sysmeta-container-update-override-etag': 'bar', + 'x-object-sysmeta-container-update-override-size': + str(len(self.plaintext) + 2)} + + def callback(footers): + footers.update(overrides) + + req = Request.blank( + self.object_path, method='PUT', body=self.plaintext) + req.environ['swift.callback.update_footers'] = callback + resp = req.get_response(self.crypto_app) + self.assertEqual('201 Created', resp.status) + self.assertEqual(self.plaintext_etag, resp.headers['Etag']) + verify_overrides() + + def test_write_with_crypto_read_with_crypto(self): + self._create_container(self.proxy_app, policy_name='one') + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + self._check_GET_and_HEAD(self.crypto_app) + self._check_match_requests('GET', self.crypto_app) + self._check_match_requests('HEAD', self.crypto_app) + self._check_listing(self.crypto_app) + + def test_write_with_crypto_read_with_crypto_ec(self): + self._create_container(self.proxy_app, policy_name='ec') + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + self._check_GET_and_HEAD(self.crypto_app) + self._check_match_requests('GET', self.crypto_app) + self._check_match_requests('HEAD', self.crypto_app) + self._check_listing(self.crypto_app) + + def test_put_without_crypto_post_with_crypto_read_with_crypto(self): + self._create_container(self.proxy_app, policy_name='one') + self._put_object(self.proxy_app, self.plaintext) + self._post_object(self.crypto_app) + self._check_GET_and_HEAD(self.crypto_app) + self._check_match_requests('GET', self.crypto_app) + self._check_match_requests('HEAD', self.crypto_app) + self._check_listing(self.crypto_app) + + def test_write_without_crypto_read_with_crypto(self): + self._create_container(self.proxy_app, policy_name='one') + self._put_object(self.proxy_app, self.plaintext) + self._post_object(self.proxy_app) + self._check_GET_and_HEAD(self.proxy_app) # sanity check + self._check_GET_and_HEAD(self.crypto_app) + self._check_match_requests('GET', self.proxy_app) # sanity check + self._check_match_requests('GET', self.crypto_app) + self._check_match_requests('HEAD', self.proxy_app) # sanity check + self._check_match_requests('HEAD', self.crypto_app) + self._check_listing(self.crypto_app) + + def test_write_without_crypto_read_with_crypto_ec(self): + self._create_container(self.proxy_app, policy_name='ec') + self._put_object(self.proxy_app, self.plaintext) + self._post_object(self.proxy_app) + self._check_GET_and_HEAD(self.proxy_app) # sanity check + self._check_GET_and_HEAD(self.crypto_app) + self._check_match_requests('GET', self.proxy_app) # sanity check + self._check_match_requests('GET', self.crypto_app) + self._check_match_requests('HEAD', self.proxy_app) # sanity check + self._check_match_requests('HEAD', self.crypto_app) + self._check_listing(self.crypto_app) + + def _check_GET_and_HEAD_not_decrypted(self, app): + req = Request.blank(self.object_path, method='GET') + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + self.assertNotEqual(self.plaintext, resp.body) + self.assertEqual('%s' % len(self.plaintext), + resp.headers['Content-Length']) + self.assertNotEqual('Kiwi', resp.headers['X-Object-Meta-Fruit']) + + req = Request.blank(self.object_path, method='HEAD') + resp = req.get_response(app) + self.assertEqual('200 OK', resp.status) + self.assertEqual('', resp.body) + self.assertNotEqual('Kiwi', resp.headers['X-Object-Meta-Fruit']) + + def test_write_with_crypto_read_without_crypto(self): + self._create_container(self.proxy_app, policy_name='one') + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + self._check_GET_and_HEAD(self.crypto_app) # sanity check + # without crypto middleware, GET and HEAD returns ciphertext + self._check_GET_and_HEAD_not_decrypted(self.proxy_app) + self._check_listing(self.proxy_app, expect_mismatch=True) + + def test_write_with_crypto_read_without_crypto_ec(self): + self._create_container(self.proxy_app, policy_name='ec') + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + self._check_GET_and_HEAD(self.crypto_app) # sanity check + # without crypto middleware, GET and HEAD returns ciphertext + self._check_GET_and_HEAD_not_decrypted(self.proxy_app) + self._check_listing(self.proxy_app, expect_mismatch=True) + + def test_disable_encryption_config_option(self): + # check that on disable_encryption = true, object is not encrypted + self._setup_crypto_app(disable_encryption=True) + self._create_container(self.proxy_app, policy_name='one') + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + self._check_GET_and_HEAD(self.crypto_app) + # check as if no crypto middleware exists + self._check_GET_and_HEAD(self.proxy_app) + self._check_match_requests('GET', self.crypto_app) + self._check_match_requests('HEAD', self.crypto_app) + self._check_match_requests('GET', self.proxy_app) + self._check_match_requests('HEAD', self.proxy_app) + + def test_write_with_crypto_read_with_disable_encryption_conf(self): + self._create_container(self.proxy_app, policy_name='one') + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + self._check_GET_and_HEAD(self.crypto_app) # sanity check + # turn on disable_encryption config option + self._setup_crypto_app(disable_encryption=True) + # GET and HEAD of encrypted objects should still work + self._check_GET_and_HEAD(self.crypto_app) + self._check_listing(self.crypto_app, expect_mismatch=False) + self._check_match_requests('GET', self.crypto_app) + self._check_match_requests('HEAD', self.crypto_app) + + def _test_ondisk_data_after_write_with_crypto(self, policy_name): + policy = storage_policy.POLICIES.get_by_name(policy_name) + self._create_container(self.proxy_app, policy_name=policy_name) + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + + # Verify container listing etag is encrypted by direct GET to container + # server. We can use any server for all nodes since they all share same + # devices dir. + cont_server = self._test_context['test_servers'][3] + cont_ring = Ring(self._test_context['testdir'], ring_name='container') + part, nodes = cont_ring.get_nodes('a', self.container_name) + for node in nodes: + req = Request.blank('/%s/%s/a/%s' + % (node['device'], part, self.container_name), + method='GET', query_string='format=json') + resp = req.get_response(cont_server) + listing = json.loads(resp.body) + # sanity checks... + self.assertEqual(1, len(listing)) + self.assertEqual('o', listing[0]['name']) + self.assertEqual('application/test', listing[0]['content_type']) + # verify encrypted etag value + parts = listing[0]['hash'].rsplit(';', 1) + crypto_meta_param = parts[1].strip() + crypto_meta = crypto_meta_param[len('swift_meta='):] + listing_etag_iv = load_crypto_meta(crypto_meta)['iv'] + exp_enc_listing_etag = base64.b64encode( + encrypt(self.plaintext_etag, + self.km.create_key('/a/%s' % self.container_name), + listing_etag_iv)) + self.assertEqual(exp_enc_listing_etag, parts[0]) + + # Verify diskfile data and metadata is encrypted + ring_object = self.proxy_app.get_object_ring(int(policy)) + partition, nodes = ring_object.get_nodes('a', self.container_name, 'o') + conf = {'devices': self._test_context["testdir"], + 'mount_check': 'false'} + df_mgr = diskfile.DiskFileRouter(conf, FakeLogger())[policy] + ondisk_data = [] + exp_enc_body = None + for node_index, node in enumerate(nodes): + df = df_mgr.get_diskfile(node['device'], partition, + 'a', self.container_name, 'o', + policy=policy) + with df.open(): + meta = df.get_metadata() + contents = ''.join(df.reader()) + metadata = dict((k.lower(), v) for k, v in meta.items()) + # verify on disk data - body + body_iv = load_crypto_meta( + metadata['x-object-sysmeta-crypto-body-meta'])['iv'] + body_key_meta = load_crypto_meta( + metadata['x-object-sysmeta-crypto-body-meta'])['body_key'] + obj_key = self.km.create_key('/a/%s/o' % self.container_name) + body_key = Crypto().unwrap_key(obj_key, body_key_meta) + exp_enc_body = encrypt(self.plaintext, body_key, body_iv) + ondisk_data.append((node, contents)) + + # verify on disk user metadata + enc_val, meta = metadata[ + 'x-object-transient-sysmeta-crypto-meta-fruit'].split(';') + meta = meta.strip()[len('swift_meta='):] + metadata_iv = load_crypto_meta(meta)['iv'] + exp_enc_meta = base64.b64encode(encrypt('Kiwi', obj_key, + metadata_iv)) + self.assertEqual(exp_enc_meta, enc_val) + self.assertNotIn('x-object-meta-fruit', metadata) + + self.assertIn( + 'x-object-transient-sysmeta-crypto-meta', metadata) + meta = load_crypto_meta( + metadata['x-object-transient-sysmeta-crypto-meta']) + self.assertIn('key_id', meta) + self.assertIn('path', meta['key_id']) + self.assertEqual( + '/a/%s/%s' % (self.container_name, self.object_name), + meta['key_id']['path']) + self.assertIn('v', meta['key_id']) + self.assertEqual('1', meta['key_id']['v']) + self.assertIn('cipher', meta) + self.assertEqual(Crypto.cipher, meta['cipher']) + + # verify etag + actual_enc_etag, _junk, actual_etag_meta = metadata[ + 'x-object-sysmeta-crypto-etag'].partition('; swift_meta=') + etag_iv = load_crypto_meta(actual_etag_meta)['iv'] + exp_enc_etag = base64.b64encode(encrypt(self.plaintext_etag, + obj_key, etag_iv)) + self.assertEqual(exp_enc_etag, actual_enc_etag) + + # verify etag hmac + exp_etag_mac = hmac.new( + obj_key, self.plaintext_etag, digestmod=hashlib.sha256) + exp_etag_mac = base64.b64encode(exp_etag_mac.digest()) + self.assertEqual(exp_etag_mac, + metadata['x-object-sysmeta-crypto-etag-mac']) + + # verify etag override for container updates + override = 'x-object-sysmeta-container-update-override-etag' + parts = metadata[override].rsplit(';', 1) + crypto_meta_param = parts[1].strip() + crypto_meta = crypto_meta_param[len('swift_meta='):] + listing_etag_iv = load_crypto_meta(crypto_meta)['iv'] + cont_key = self.km.create_key('/a/%s' % self.container_name) + exp_enc_listing_etag = base64.b64encode( + encrypt(self.plaintext_etag, cont_key, + listing_etag_iv)) + self.assertEqual(exp_enc_listing_etag, parts[0]) + + self._check_GET_and_HEAD(self.crypto_app) + return exp_enc_body, ondisk_data + + def test_ondisk_data_after_write_with_crypto(self): + exp_body, ondisk_data = self._test_ondisk_data_after_write_with_crypto( + policy_name='one') + for node, body in ondisk_data: + self.assertEqual(exp_body, body) + + def test_ondisk_data_after_write_with_crypto_ec(self): + exp_body, ondisk_data = self._test_ondisk_data_after_write_with_crypto( + policy_name='ec') + policy = storage_policy.POLICIES.get_by_name('ec') + for frag_selection in (ondisk_data[:2], ondisk_data[1:]): + frags = [frag for node, frag in frag_selection] + self.assertEqual(exp_body, policy.pyeclib_driver.decode(frags)) + + def _test_copy_encrypted_to_encrypted( + self, src_policy_name, dest_policy_name): + self._create_container(self.proxy_app, policy_name=src_policy_name) + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + + copy_crypto_app = copy.ServerSideCopyMiddleware(self.crypto_app, {}) + + dest_container = uuid.uuid4().hex + dest_container_path = 'http://localhost:8080/v1/a/' + dest_container + self._create_container(copy_crypto_app, policy_name=dest_policy_name, + container_path=dest_container_path) + dest_obj_path = dest_container_path + '/o' + dest = '/%s/%s' % (dest_container, 'o') + self._copy_object(copy_crypto_app, dest) + + self._check_GET_and_HEAD(copy_crypto_app, object_path=dest_obj_path) + self._check_listing( + copy_crypto_app, container_path=dest_container_path) + self._check_match_requests( + 'GET', copy_crypto_app, object_path=dest_obj_path) + self._check_match_requests( + 'HEAD', copy_crypto_app, object_path=dest_obj_path) + + def test_copy_encrypted_to_encrypted(self): + self._test_copy_encrypted_to_encrypted('ec', 'ec') + self._test_copy_encrypted_to_encrypted('one', 'ec') + self._test_copy_encrypted_to_encrypted('ec', 'one') + self._test_copy_encrypted_to_encrypted('one', 'one') + + def _test_copy_encrypted_to_unencrypted( + self, src_policy_name, dest_policy_name): + self._create_container(self.proxy_app, policy_name=src_policy_name) + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + + # make a pipeline with encryption disabled, use it to copy object + self._setup_crypto_app(disable_encryption=True) + copy_app = copy.ServerSideCopyMiddleware(self.crypto_app, {}) + + dest_container = uuid.uuid4().hex + dest_container_path = 'http://localhost:8080/v1/a/' + dest_container + self._create_container(self.crypto_app, policy_name=dest_policy_name, + container_path=dest_container_path) + dest_obj_path = dest_container_path + '/o' + dest = '/%s/%s' % (dest_container, 'o') + self._copy_object(copy_app, dest) + + self._check_GET_and_HEAD(copy_app, object_path=dest_obj_path) + self._check_GET_and_HEAD(self.proxy_app, object_path=dest_obj_path) + self._check_listing(copy_app, container_path=dest_container_path) + self._check_listing(self.proxy_app, container_path=dest_container_path) + self._check_match_requests( + 'GET', self.proxy_app, object_path=dest_obj_path) + self._check_match_requests( + 'HEAD', self.proxy_app, object_path=dest_obj_path) + + def test_copy_encrypted_to_unencrypted(self): + self._test_copy_encrypted_to_unencrypted('ec', 'ec') + self._test_copy_encrypted_to_unencrypted('one', 'ec') + self._test_copy_encrypted_to_unencrypted('ec', 'one') + self._test_copy_encrypted_to_unencrypted('one', 'one') + + def _test_copy_unencrypted_to_encrypted( + self, src_policy_name, dest_policy_name): + self._create_container(self.proxy_app, policy_name=src_policy_name) + self._put_object(self.proxy_app, self.plaintext) + self._post_object(self.proxy_app) + + copy_crypto_app = copy.ServerSideCopyMiddleware(self.crypto_app, {}) + + dest_container = uuid.uuid4().hex + dest_container_path = 'http://localhost:8080/v1/a/' + dest_container + self._create_container(copy_crypto_app, policy_name=dest_policy_name, + container_path=dest_container_path) + dest_obj_path = dest_container_path + '/o' + dest = '/%s/%s' % (dest_container, 'o') + self._copy_object(copy_crypto_app, dest) + + self._check_GET_and_HEAD(copy_crypto_app, object_path=dest_obj_path) + self._check_listing( + copy_crypto_app, container_path=dest_container_path) + self._check_match_requests( + 'GET', copy_crypto_app, object_path=dest_obj_path) + self._check_match_requests( + 'HEAD', copy_crypto_app, object_path=dest_obj_path) + + def test_copy_unencrypted_to_encrypted(self): + self._test_copy_unencrypted_to_encrypted('ec', 'ec') + self._test_copy_unencrypted_to_encrypted('one', 'ec') + self._test_copy_unencrypted_to_encrypted('ec', 'one') + self._test_copy_unencrypted_to_encrypted('one', 'one') + + def test_crypto_max_length_path(self): + # the path is stashed in the key_id in crypto meta; check that a long + # path is ok + self.container_name = 'c' * constraints.MAX_CONTAINER_NAME_LENGTH + self.object_name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH + self.container_path = 'http://foo:8080/v1/a/' + self.container_name + self.object_path = '%s/%s' % (self.container_path, self.object_name) + + self._create_container(self.proxy_app, policy_name='one', + container_path=self.container_path) + + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + self._check_GET_and_HEAD(self.crypto_app) + self._check_match_requests('GET', self.crypto_app) + self._check_match_requests('HEAD', self.crypto_app) + self._check_listing(self.crypto_app) + + def test_crypto_UTF8_path(self): + # check that UTF8 path is ok + self.container_name = self.object_name = u'\u010brypto' + self.container_path = 'http://foo:8080/v1/a/' + self.container_name + self.object_path = '%s/%s' % (self.container_path, self.object_name) + + self._create_container(self.proxy_app, policy_name='one', + container_path=self.container_path) + + self._put_object(self.crypto_app, self.plaintext) + self._post_object(self.crypto_app) + self._check_GET_and_HEAD(self.crypto_app) + self._check_match_requests('GET', self.crypto_app) + self._check_match_requests('HEAD', self.crypto_app) + self._check_listing(self.crypto_app) + + +class TestCryptoPipelineChangesFastPost(TestCryptoPipelineChanges): + @classmethod + def setUpClass(cls): + # set proxy config to use fast post + extra_conf = {'object_post_as_copy': 'False'} + cls._test_context = setup_servers(extra_conf=extra_conf) + cls.proxy_app = cls._test_context["test_servers"][0] + + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/common/middleware/crypto/test_keymaster.py b/test/unit/common/middleware/crypto/test_keymaster.py new file mode 100644 index 0000000000..2f8a1db458 --- /dev/null +++ b/test/unit/common/middleware/crypto/test_keymaster.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +import os + +import unittest + +from swift.common import swob +from swift.common.middleware.crypto import keymaster +from swift.common.middleware.crypto.crypto_utils import CRYPTO_KEY_CALLBACK +from swift.common.swob import Request +from test.unit.common.middleware.helpers import FakeSwift, FakeAppThatExcepts +from test.unit.common.middleware.crypto.crypto_helpers import ( + TEST_KEYMASTER_CONF) + + +def capture_start_response(): + calls = [] + + def start_response(*args): + calls.append(args) + return start_response, calls + + +class TestKeymaster(unittest.TestCase): + + def setUp(self): + super(TestKeymaster, self).setUp() + self.swift = FakeSwift() + self.app = keymaster.KeyMaster(self.swift, TEST_KEYMASTER_CONF) + + def test_object_path(self): + self.verify_keys_for_path( + '/a/c/o', expected_keys=('object', 'container')) + + def test_container_path(self): + self.verify_keys_for_path( + '/a/c', expected_keys=('container',)) + + def verify_keys_for_path(self, path, expected_keys): + put_keys = None + for method, resp_class, status in ( + ('PUT', swob.HTTPCreated, '201'), + ('POST', swob.HTTPAccepted, '202'), + ('GET', swob.HTTPOk, '200'), + ('HEAD', swob.HTTPNoContent, '204')): + resp_headers = {} + self.swift.register( + method, '/v1' + path, resp_class, resp_headers, '') + req = Request.blank( + '/v1' + path, environ={'REQUEST_METHOD': method}) + start_response, calls = capture_start_response() + self.app(req.environ, start_response) + self.assertEqual(1, len(calls)) + self.assertTrue(calls[0][0].startswith(status)) + self.assertNotIn('swift.crypto.override', req.environ) + self.assertIn(CRYPTO_KEY_CALLBACK, req.environ, + '%s not set in env' % CRYPTO_KEY_CALLBACK) + keys = req.environ.get(CRYPTO_KEY_CALLBACK)() + self.assertIn('id', keys) + id = keys.pop('id') + self.assertEqual(path, id['path']) + self.assertEqual('1', id['v']) + self.assertListEqual(sorted(expected_keys), sorted(keys.keys()), + '%s %s got keys %r, but expected %r' + % (method, path, keys.keys(), expected_keys)) + if put_keys is not None: + # check all key sets were consistent for this path + self.assertDictEqual(put_keys, keys) + else: + put_keys = keys + return put_keys + + def test_key_uniqueness(self): + # a rudimentary check that different keys are made for different paths + ref_path_parts = ('a1', 'c1', 'o1') + path = '/' + '/'.join(ref_path_parts) + ref_keys = self.verify_keys_for_path( + path, expected_keys=('object', 'container')) + + # for same path and for each differing path check that keys are unique + # when path to object or container is unique and vice-versa + for path_parts in [(a, c, o) for a in ('a1', 'a2') + for c in ('c1', 'c2') + for o in ('o1', 'o2')]: + path = '/' + '/'.join(path_parts) + keys = self.verify_keys_for_path( + path, expected_keys=('object', 'container')) + # object keys should only be equal when complete paths are equal + self.assertEqual(path_parts == ref_path_parts, + keys['object'] == ref_keys['object'], + 'Path %s keys:\n%s\npath %s keys\n%s' % + (ref_path_parts, ref_keys, path_parts, keys)) + # container keys should only be equal when paths to container are + # equal + self.assertEqual(path_parts[:2] == ref_path_parts[:2], + keys['container'] == ref_keys['container'], + 'Path %s keys:\n%s\npath %s keys\n%s' % + (ref_path_parts, ref_keys, path_parts, keys)) + + def test_filter(self): + factory = keymaster.filter_factory(TEST_KEYMASTER_CONF) + self.assertTrue(callable(factory)) + self.assertTrue(callable(factory(self.swift))) + + def test_app_exception(self): + app = keymaster.KeyMaster( + FakeAppThatExcepts(), TEST_KEYMASTER_CONF) + req = Request.blank('/', environ={'REQUEST_METHOD': 'PUT'}) + start_response, _ = capture_start_response() + self.assertRaises(Exception, app, req.environ, start_response) + + def test_root_secret(self): + for secret in (os.urandom(32), os.urandom(33), os.urandom(50)): + encoded_secret = base64.b64encode(secret) + try: + app = keymaster.KeyMaster( + self.swift, {'encryption_root_secret': + bytes(encoded_secret)}) + self.assertEqual(secret, app.root_secret) + except AssertionError as err: + self.fail(str(err) + ' for secret %s' % secret) + try: + app = keymaster.KeyMaster( + self.swift, {'encryption_root_secret': + unicode(encoded_secret)}) + self.assertEqual(secret, app.root_secret) + except AssertionError as err: + self.fail(str(err) + ' for secret %s' % secret) + + def test_invalid_root_secret(self): + for secret in (bytes(base64.b64encode(os.urandom(31))), # too short + unicode(base64.b64encode(os.urandom(31))), + u'?' * 44, b'?' * 44, # not base64 + u'a' * 45, b'a' * 45, # bad padding + 99, None): + conf = {'encryption_root_secret': secret} + try: + with self.assertRaises(ValueError) as err: + keymaster.KeyMaster(self.swift, conf) + self.assertEqual( + 'encryption_root_secret option in proxy-server.conf ' + 'must be a base64 encoding of at least 32 raw bytes', + err.exception.message) + except AssertionError as err: + self.fail(str(err) + ' for conf %s' % str(conf)) + + +if __name__ == '__main__': + unittest.main() From f36bc513c5e0029b90207d7a2dec81965eed8300 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 7 Jun 2016 15:08:54 +0100 Subject: [PATCH 032/156] Add encryption overview doc Include a note in container-sync docs pointing to specific configuration needed to be compatible with encryption. Also remove the sample encryption root secret from proxy-server.conf-sample and in-process test setup. Remove encryption middleware from the default proxy pipeline. Change-Id: Ibceac485813f3ac819a53e644995749735592a55 --- doc/source/development_middleware.rst | 2 + doc/source/index.rst | 1 + doc/source/overview_container_sync.rst | 6 + doc/source/overview_encryption.rst | 472 +++++++++++++++++++++++++ etc/proxy-server.conf-sample | 8 +- test/functional/__init__.py | 6 - 6 files changed, 483 insertions(+), 12 deletions(-) create mode 100644 doc/source/overview_encryption.rst diff --git a/doc/source/development_middleware.rst b/doc/source/development_middleware.rst index b6dac83289..6fef62e22e 100644 --- a/doc/source/development_middleware.rst +++ b/doc/source/development_middleware.rst @@ -281,6 +281,8 @@ individual items of user metadata is not supported. In cases where middleware needs to store its own metadata with a POST request, it may use Object Transient Sysmeta. +.. _transient_sysmeta: + ^^^^^^^^^^^^^^^^^^^^^^^^ Object Transient-Sysmeta ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/index.rst b/doc/source/index.rst index c648d0af4f..4784d91337 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -57,6 +57,7 @@ Overview and Concepts cors crossdomain overview_erasure_code + overview_encryption overview_backing_store ring_background associated_projects diff --git a/doc/source/overview_container_sync.rst b/doc/source/overview_container_sync.rst index e69ec2743e..9947fc3d10 100644 --- a/doc/source/overview_container_sync.rst +++ b/doc/source/overview_container_sync.rst @@ -18,6 +18,12 @@ synchronization key. your manifest file and your segment files are synced if they happen to be in different containers. +.. note:: + + If you are using encryption middleware in the cluster from which objects + are being synced, then you should follow the instructions to configure + :ref:`container_sync_client_config` to be compatible with encryption. + -------------------------- Configuring Container Sync -------------------------- diff --git a/doc/source/overview_encryption.rst b/doc/source/overview_encryption.rst new file mode 100644 index 0000000000..6aa24636c6 --- /dev/null +++ b/doc/source/overview_encryption.rst @@ -0,0 +1,472 @@ +================= +Object Encryption +================= + +Swift supports the optional encryption of object data at rest on storage nodes. +The encryption of object data is intended to mitigate the risk of users' data +being read if an unauthorised party were to gain physical access to a disk. + +.. note:: + + Swift's data-at-rest encryption accepts plaintext object data from the + client, encrypts it in the cluster, and stores the encrypted data. This + protects object data from inadvertently being exposed if a data drive + leaves the Swift cluster. If a user wishes to ensure that the plaintext + data is always encrypted while in transit and in storage, it is strongly + recommended that the data be encrypted before sending it to the Swift + cluster. Encrypting on the client side is the only way to ensure that the + data is fully encrypted for its entire lifecycle. + +Encryption of data at rest is implemented by middleware that may be included in +the proxy server WSGI pipeline. The feature is internal to a Swift cluster and +not exposed through the API. Clients are unaware that data is encrypted by this +feature internally to the Swift service; internally encrypted data should never +be returned to clients via the Swift API. + +The following data are encrypted while at rest in Swift: + +* Object content i.e. the content of an object PUT request's body +* The entity tag (ETag) of objects that have non-zero content +* All custom user object metadata values i.e. metadata sent using + X-Object-Meta- prefixed headers with PUT or POST requests + +Any data or metadata not included in the list above are not encrypted, +including: + +* Account, container and object names +* Account and container custom user metadata values +* All custom user metadata names +* Object Content-Type values +* Object size +* System metadata + +.. note:: + + This feature is intended to provide `confidentiality` of data that is at + rest i.e. to protect user data from being read by an attacker that gains + access to disks on which object data is stored. + + This feature is not intended to prevent undetectable `modification` + of user data at rest. + + This feature is not intended to protect against an attacker that gains + access to Swift's internal network connections, or gains access to key + material or is able to modify the Swift code running on Swift nodes. + +.. _encryption_deployment: + +------------------------ +Deployment and operation +------------------------ + +Encryption is deployed by adding two middleware filters to the proxy +server WSGI pipeline and including their respective filter configuration +sections in the `proxy-server.conf` file. :ref:`Additional steps +` are required if the container sync feature is +being used. + +The `keymaster` and `encryption` middleware filters must be to the right of all +other middleware in the pipeline apart from the final proxy-logging middleware, +and in the order shown in this example:: + + keymaster encryption proxy-logging proxy-server + + [filter:keymaster] + use = egg:swift#keymaster + encryption_root_secret = your_secret + + [filter:encryption] + use = egg:swift#encryption + # disable_encryption = False + +See the `proxy-server.conf-sample` file for further details on the middleware +configuration options. + +The keymaster config option ``encryption_root_secret`` MUST be set to a value +of at least 44 valid base-64 characters before the middleware is used and +should be consistent across all proxy servers. The minimum length of 44 has +been chosen because it is the length of a base-64 encoded 32 byte value. + +.. note:: + + The ``encryption_root_secret`` option holds the master secret key used for + encryption. The security of all encrypted data critically depends on this + key and it should therefore be set to a high-entropy value. For example, a + suitable ``encryption_root_secret`` may be obtained by base-64 encoding a + 32 byte (or longer) value generated by a cryptographically secure random + number generator. + + The ``encryption_root_secret`` value is necessary to recover any encrypted + data from the storage system, and therefore, it must be guarded against + accidental loss. Its value (and consequently, the proxy-server.conf file) + should not be stored on any disk that is in any account, container or + object ring. + +One method for generating a suitable value for ``encryption_root_secret`` is to +use the ``openssl`` command line tool:: + + openssl rand -base64 32 + +Once deployed, the encryption filter will by default encrypt object data and +metadata when handling PUT and POST requests and decrypt object data and +metadata when handling GET and HEAD requests. COPY requests are transformed +into GET and PUT requests by the :ref:`copy` middleware before reaching the +encryption middleware and as a result object data and metadata is decrypted and +re-encrypted when copied. + +Upgrade Considerations +---------------------- + +When upgrading an existing cluster to deploy encryption, the following sequence +of steps is recommended: + +#. Upgrade all object servers +#. Upgrade all proxy servers +#. Add keymaster and encryption middlewares to every proxy server's middleware + pipeline with the encryption ``disable_encryption`` option set to ``True`` + and the keymaster ``encryption_root_secret`` value set as described above. +#. If required, follow the steps for :ref:`container_sync_client_config`. +#. Finally, change the encryption ``disable_encryption`` option to ``False`` + +Objects that existed in the cluster prior to the keymaster and encryption +middlewares being deployed are still readable with GET and HEAD requests. The +content of those objects will not be encrypted unless they are written again by +a PUT or COPY request. Any user metadata of those objects will not be encrypted +unless it is written again by a PUT, POST or COPY request. + +Disabling Encryption +-------------------- + +Once deployed, the keymaster and encryption middlewares should not be removed +from the pipeline. To do so will cause encrypted object data and/or metadata to +be returned in response to GET or HEAD requests for objects that were +previously encrypted. + +Encryption of inbound object data may be disabled by setting the encryption +``disable_encryption`` option to ``True``, in which case existing encrypted +objects will remain encrypted but new data written with PUT, POST or COPY +requests will not be encrypted. The keymaster and encryption middlewares should +remain in the pipeline even when encryption of new objects is not required. The +encryption middleware is needed to handle GET requests for objects that may +have been previously encrypted. The keymaster is needed to provide keys for +those requests. + +.. _container_sync_client_config: + +Container sync configuration +---------------------------- + +If container sync is being used then the keymaster and encryption middlewares +must be added to the container sync internal client pipeline. The following +configuration steps are required: + +#. Create a custom internal client configuration file for container sync (if + one is not already in use) based on the sample file + `internal-client.conf-sample`. For example, copy + `internal-client.conf-sample` to `/etc/swift/container-sync-client.conf`. +#. Modify this file to include the middlewares in the pipeline in + the same way as described above for the proxy server. +#. Modify the container-sync section of all container server config files to + point to this internal client config file using the + ``internal_client_conf_path`` option. For example:: + + internal_client_conf_path = /etc/swift/container-sync-client.conf + +.. note:: + + The ``encryption_root_secret`` value is necessary to recover any encrypted + data from the storage system, and therefore, it must be guarded against + accidental loss. Its value (and consequently, the custom internal client + configuration file) should not be stored on any disk that is in any + account, container or object ring. + +.. note:: + + These container sync configuration steps will be necessary for container + sync probe tests to pass if the encryption middlewares are included in the + proxy pipeline of a test cluster. + +-------------- +Implementation +-------------- + +Encryption scheme +----------------- + +Plaintext data is encrypted to ciphertext using the AES cipher with 256-bit +keys implemented by the python `cryptography package +`_. The cipher is used in counter +(CTR) mode so that any byte or range of bytes in the ciphertext may be +decrypted independently of any other bytes in the ciphertext. This enables very +simple handling of ranged GETs. + +In general an item of unencrypted data, ``plaintext``, is transformed to an +item of encrypted data, ``ciphertext``:: + + ciphertext = E(plaintext, k, iv) + +where ``E`` is the encryption function, ``k`` is an encryption key and ``iv`` +is a unique initialization vector (IV) chosen for each encryption context. For +example, the object body is one encryption context with a randomly chosen IV. +The IV is stored as metadata of the encrypted item so that it is available for +decryption:: + + plaintext = D(ciphertext, k, iv) + +where ``D`` is the decryption function. + +The implementation of CTR mode follows `NIST SP800-38A +`_, and the +full IV passed to the encryption or decryption function serves as the initial +counter block. + +In general any encrypted item has accompanying crypto-metadata that describes +the IV and the cipher algorithm used for the encryption:: + + crypto_metadata = {"iv": <16 byte value>, + "cipher": "AES_CTR_256"} + +This crypto-metadata is stored either with the ciphertext (for user +metadata and etags) or as a separate header (for object bodies). + +Key management +-------------- + +A keymaster middleware is responsible for providing the keys required for each +encryption and decryption operation. Two keys are required when handling object +requests: a `container key` that is uniquely associated with the container path +and an `object key` that is uniquely associated with the object path. These +keys are made available to the encryption middleware via a callback function +that the keymaster installs in the WSGI request environ. + +The current keymaster implementation derives container and object keys from the +``encryption_root_secret`` in a deterministic way by constructing a SHA256 +HMAC using the ``encryption_root_secret`` as a key and the container or object +path as a message, for example:: + + object_key = HMAC(encryption_root_secret, "/a/c/o") + +Other strategies for providing object and container keys may be employed by +future implementations of alternative keymaster middleware. + +During each object PUT, a random key is generated to encrypt the object body. +This random key is then encrypted using the object key provided by the +keymaster. This makes it safe to store the encrypted random key alongside the +encrypted object data and metadata. + +This process of `key wrapping` enables more efficient re-keying events when the +object key may need to be replaced and consequently any data encrypted using +that key must be re-encrypted. Key wrapping minimizes the amount of data +encrypted using those keys to just other randomly chosen keys which can be +re-wrapped efficiently without needing to re-encrypt the larger amounts of data +that were encrypted using the random keys. + +.. note:: + + Re-keying is not currently implemented. Key wrapping is implemented + in anticipation of future re-keying operations. + + +Encryption middleware +--------------------- + +The encryption middleware is composed of an `encrypter` component and a +`decrypter` component. + +Encrypter operation +^^^^^^^^^^^^^^^^^^^ + +Custom user metadata +++++++++++++++++++++ + +The encrypter encrypts each item of custom user metadata using the object key +provided by the keymaster and an IV that is randomly chosen for that metadata +item. The encrypted values are stored as :ref:`transient_sysmeta` with +associated crypto-metadata appended to the encrypted value. For example:: + + X-Object-Meta-Private1: value1 + X-Object-Meta-Private2: value2 + +are transformed to:: + + X-Object-Transient-Sysmeta-Crypto-Meta-Private1: + E(value1, object_key, header_iv_1); swift_meta={"iv": header_iv_1, + "cipher": "AES_CTR_256"} + X-Object-Transient-Sysmeta-Crypto-Meta-Private2: + E(value2, object_key, header_iv_2); swift_meta={"iv": header_iv_2, + "cipher": "AES_CTR_256"} + +The unencrypted custom user metadata headers are removed. + +Object body ++++++++++++ + +Encryption of an object body is performed using a randomly chosen body key +and a randomly chosen IV:: + + body_ciphertext = E(body_plaintext, body_key, body_iv) + +The body_key is wrapped using the object key provided by the keymaster and a +randomly chosen IV:: + + wrapped_body_key = E(body_key, object_key, body_key_iv) + +The encrypter stores the associated crypto-metadata in a system metadata +header:: + + X-Object-Sysmeta-Crypto-Body-Meta: + {"iv": body_iv, + "cipher": "AES_CTR_256", + "body_key": {"key": wrapped_body_key, + "iv": body_key_iv}} + +Note that in this case there is an extra item of crypto-metadata which stores +the wrapped body key and its IV. + +Entity tag +++++++++++ + +While encrypting the object body the encrypter also calculates the ETag (md5 +digest) of the plaintext body. This value is encrypted using the object key +provided by the keymaster and a randomly chosen IV, and saved as an item of +system metadata, with associated crypto-metadata appended to the encrypted +value:: + + X-Object-Sysmeta-Crypto-Etag: + E(md5(plaintext), object_key, etag_iv); swift_meta={"iv": etag_iv, + "cipher": "AES_CTR_256"} + +The encrypter also forces an encrypted version of the plaintext ETag to be sent +with container updates by adding an update override header to the PUT request. +The associated crypto-metadata is appended to the encrypted ETag value of this +update override header:: + + X-Object-Sysmeta-Container-Update-Override-Etag: + E(md5(plaintext), container_key, override_etag_iv); + meta={"iv": override_etag_iv, "cipher": "AES_CTR_256"} + +The container key is used for this encryption so that the decrypter is able +to decrypt the ETags in container listings when handling a container request, +since object keys may not be available in that context. + +Since the plaintext ETag value is only known once the encrypter has completed +processing the entire object body, the ``X-Object-Sysmeta-Crypto-Etag`` and +``X-Object-Sysmeta-Container-Update-Override-Etag`` headers are sent after the +encrypted object body using the proxy server's support for request footers. + +.. _conditional_requests: + +Conditional Requests +++++++++++++++++++++ + +In general, an object server evaluates conditional requests with +``If[-None]-Match`` headers by comparing values listed in an +``If[-None]-Match`` header against the ETag that is stored in the object +metadata. This is not possible when the ETag stored in object metadata has been +encrypted. The encrypter therefore calculates an HMAC using the object key and +the ETag while handling object PUT requests, and stores this under the metadata +key ``X-Object-Sysmeta-Crypto-Etag-Mac``:: + + X-Object-Sysmeta-Crypto-Etag-Mac: HMAC(object_key, md5(plaintext)) + +Like other ETag-related metadata, this is sent after the encrypted object body +using the proxy server's support for request footers. + +The encrypter similarly calculates an HMAC for each ETag value included in +``If[-None]-Match`` headers of conditional GET or HEAD requests, and appends +these to the ``If[-None]-Match`` header. The encrypter also sets the +``X-Backend-Etag-Is-At`` header to point to the previously stored +``X-Object-Sysmeta-Crypto-Etag-Mac`` metadata so that the object server +evaluates the conditional request by comparing the HMAC values included in the +``If[-None]-Match`` with the value stored under +``X-Object-Sysmeta-Crypto-Etag-Mac``. For example, given a conditional request +with header:: + + If-Match: match_etag + +the encrypter would transform the request headers to include:: + + If-Match: match_etag,HMAC(object_key, match_etag) + X-Backend-Etag-Is-At: X-Object-Sysmeta-Crypto-Etag-Mac + +This enables the object server to perform an encrypted comparison to check +whether the ETags match, without leaking the ETag itself or leaking information +about the object body. + +Decrypter operation +^^^^^^^^^^^^^^^^^^^ + +For each GET or HEAD request to an object, the decrypter inspects the response +for encrypted items (revealed by crypto-metadata headers), and if any are +discovered then it will: + +#. Fetch the object and container keys from the keymaster via its callback +#. Decrypt the ``X-Object-Sysmeta-Crypto-Etag`` value +#. Decrypt the ``X-Object-Sysmeta-Container-Update-Override-Etag`` value +#. Decrypt metadata header values using the object key +#. Decrypt the wrapped body key found in ``X-Object-Sysmeta-Crypto-Body-Meta`` +#. Decrypt the body using the body key + +For each GET request to a container that would include ETags in its response +body, the decrypter will: + +#. GET the response body with the container listing +#. Fetch the container key from the keymaster via its callback +#. Decrypt any encrypted ETag entries in the container listing using the + container key + + +Impact on other Swift services and features +------------------------------------------- + +Encryption has no impact on :ref:`versioned_writes` other than that any +previously unencrypted objects will be encrypted as they are copied to or from +the versions container. Keymaster and encryption middlewares should be placed +after ``versioned_writes`` in the proxy server pipeline, as described in +:ref:`encryption_deployment`. + +`Container Sync` uses an internal client to GET objects that are to be sync'd. +This internal client must be configured to use the keymaster and encryption +middlewares as described :ref:`above `. + +Encryption has no impact on the `object-auditor` service. Since the ETag +header saved with the object at rest is the md5 sum of the encrypted object +body then the auditor will verify that encrypted data is valid. + +Encryption has no impact on the `object-expirer` service. ``X-Delete-At`` and +``X-Delete-After`` headers are not encrypted. + +Encryption has no impact on the `object-replicator` and `object-reconstructor` +services. These services are unaware of the object or EC fragment data being +encrypted. + +Encryption has no impact on the `container-reconciler` service. The +`container-reconciler` uses an internal client to move objects between +different policy rings. The destination object has the same URL as the source +object and the object is moved without re-encryption. + + +Considerations for developers +----------------------------- + +Developers should be aware that keymaster and encryption middlewares rely on +the path of an object remaining unchanged. The included keymaster derives keys +for containers and objects based on their paths and the +``encryption_root_secret``. The keymaster does not rely on object metadata to +inform its generation of keys for GET and HEAD requests because when handling +:ref:`conditional_requests` it is required to provide the object key before any +metadata has been read from the object. + +Developers should therefore give careful consideration to any new features that +would relocate object data and metadata within a Swift cluster by means that do +not cause the object data and metadata to pass through the encryption +middlewares in the proxy pipeline and be re-encrypted. + +The crypto-metadata associated with each encrypted item does include some +`key_id` metadata that is provided by the keymaster and contains the path used +to derive keys. This `key_id` metadata is persisted in anticipation of future +scenarios when it may be necessary to decrypt an object that has been relocated +without re-encrypting, in which case the metadata could be used to derive the +keys that were used for encryption. However, this alone is not sufficient to +handle conditional requests and to decrypt container listings where objects +have been relocated, and further work will be required to solve those issues. diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index aebb872787..517a9c29ad 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -79,7 +79,7 @@ bind_port = 8080 [pipeline:main] # This sample pipeline uses tempauth and is used for SAIO dev work and # testing. See below for a pipeline using keystone. -pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth copy container-quotas account-quotas slo dlo versioned_writes keymaster encryption proxy-logging proxy-server +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server # The following pipeline shows keystone integration. Comment out the one # above and uncomment this one. Additional steps for integrating keystone are @@ -781,11 +781,7 @@ use = egg:swift#keymaster # be obtained by base-64 encoding a 32 byte (or longer) value generated by a # cryptographically secure random number generator. Changing the root secret is # likely to result in data loss. -# TODO - STOP SETTING THIS DEFAULT! This is only here while work -# continues on the feature/crypto branch. Later, this will be added -# to the devstack proxy-config so that gate tests can pass. -# base64 encoding of "dontEverUseThisIn_PRODUCTION_xxxxxxxxxxxxxxx" -encryption_root_secret = ZG9udEV2ZXJVc2VUaGlzSW5fUFJPRFVDVElPTl94eHh4eHh4eHh4eHh4eHg= +encryption_root_secret = changeme [filter:encryption] use = egg:swift#encryption diff --git a/test/functional/__init__.py b/test/functional/__init__.py index 0bf324f85d..52be849bfa 100644 --- a/test/functional/__init__.py +++ b/test/functional/__init__.py @@ -361,12 +361,6 @@ def in_process_setup(the_object_server=object_server): 'allow_account_management': 'true', 'account_autocreate': 'true', 'allow_versions': 'True', - # TODO - Remove encryption_root_secret - this is only necessary while - # encryption middleware is in the default proxy pipeline in - # proxy-server.conf-sample - # base64 encoding of "dontEverUseThisIn_PRODUCTION_xxxxxxxxxxxxxxx" - 'encryption_root_secret': - 'ZG9udEV2ZXJVc2VUaGlzSW5fUFJPRFVDVElPTl94eHh4eHh4eHh4eHh4eHg=', # Below are values used by the functional test framework, as well as # by the various in-process swift servers 'auth_host': '127.0.0.1', From c84a3c4d967a2951da50b395445e4a282b9debc4 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 30 Jun 2016 15:28:24 -0700 Subject: [PATCH 033/156] Stop digging for publicly_accessible ourselves Also, make request method calling in obj.server consistent with change 3944d8 to account & container Change-Id: I893d77a06793a5eeafac203a45971e96425afb96 Related-Change: I2f7586f96b41a97e6ae254efc83218b3b5c6cc9e --- swift/obj/server.py | 3 +-- swift/proxy/server.py | 10 +++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/swift/obj/server.py b/swift/obj/server.py index b9c8616124..8967684b35 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -1034,8 +1034,7 @@ class ObjectController(BaseStorageServer): if req.method not in self.allowed_methods: res = HTTPMethodNotAllowed() else: - method = getattr(self, req.method) - res = method(req) + res = getattr(self, req.method)(req) except DiskFileCollision: res = HTTPForbidden(request=req) except HTTPException as error_response: diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 99b99afd54..9b89498c53 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -383,11 +383,11 @@ class Application(object): controller.trans_id = req.environ['swift.trans_id'] self.logger.client_ip = get_remote_client(req) - handler = getattr(controller, req.method, None) - if not getattr(handler, 'publicly_accessible', False): - allowed_methods = getattr(controller, 'allowed_methods', set()) - return HTTPMethodNotAllowed( - request=req, headers={'Allow': ', '.join(allowed_methods)}) + if req.method not in controller.allowed_methods: + return HTTPMethodNotAllowed(request=req, headers={ + 'Allow': ', '.join(controller.allowed_methods)}) + handler = getattr(controller, req.method) + old_authorize = None if 'swift.authorize' in req.environ: # We call authorize before the handler, always. If authorized, From f9d5a8683d0b5204157c5d97aa039056612d4347 Mon Sep 17 00:00:00 2001 From: yuyafei Date: Tue, 5 Jul 2016 16:42:00 +0800 Subject: [PATCH 034/156] Remove white space between print and () TrivialFix Change-Id: I0dca3493d43ee8642ae6d2f55597013eef261026 --- swift/common/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swift/common/manager.py b/swift/common/manager.py index 2cc764493c..a19ade2215 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -556,7 +556,7 @@ class Server(object): pids = {} for pid_file, pid in self.iter_pid_files(**kwargs): if not pid: # Catches None and 0 - print (_('Removing pid file %s with invalid pid') % pid_file) + print(_('Removing pid file %s with invalid pid') % pid_file) remove_file(pid_file) continue try: From d60662b2eb46cdd5d5ec591435d11beafb66ca01 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 30 Jun 2016 14:32:35 -0700 Subject: [PATCH 035/156] Only use Timeout if we don't have a final_resp I'm sure the Timeout context manager is relatively cheap, but it can't be free. Change-Id: I71c0c5944ec372e9b983021dd024de0c5aa1ded2 --- swift/proxy/controllers/obj.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index 962cf1bec6..54ee996aa7 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -1471,15 +1471,15 @@ class Putter(object): :returns: HTTPResponse :raises: Timeout if the response took too long """ - with Timeout(timeout): - # don't do this update of self.resp if the Expect response during - # conenct() was actually a final response - if not self.final_resp: + # don't do this update of self.resp if the Expect response during + # connect() was actually a final response + if not self.final_resp: + with Timeout(timeout): if informational: self.resp = self.conn.getexpect() else: self.resp = self.conn.getresponse() - return self.resp + return self.resp def spawn_sender_greenthread(self, pool, queue_depth, write_timeout, exception_handler): From 401311ff6a2ab93aca772e6be027e7098be5a906 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 5 Jul 2016 10:24:02 -0700 Subject: [PATCH 036/156] Have py35 tox env match py34 Very few of our tests can actually be run under py3. The ones that can should still pass on py35, though. Change-Id: Iaf9aaa296e3b21aa0ee513c479a50f3796787f32 --- tox.ini | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tox.ini b/tox.ini index 5becdf2a9e..a8bd6ecf92 100644 --- a/tox.ini +++ b/tox.ini @@ -29,6 +29,10 @@ setenv = VIRTUAL_ENV={envdir} commands = nosetests test/unit/common/test_exceptions.py +[testenv:py35] +commands = + nosetests test/unit/common/test_exceptions.py + [testenv:pep8] basepython = python2.7 commands = From 7568ea5dd9f5a728f482076eeb8e612c31c88ce6 Mon Sep 17 00:00:00 2001 From: Brian Cline Date: Wed, 29 Jun 2016 03:32:09 -0500 Subject: [PATCH 037/156] Prevent down nodes failing PUTs with non-ascii obj names On an object PUT with a non-ascii name, if we hit some kind of exception speaking to only one object-server of the N we try to connect to, we try to log it -- but this causes an exception when interpolating the UTF-8 encoded path iff the message template is unicode. Since this is essentially an exception within an exception handler, this fails the entire request with a 500 error -- even though the other nodes may have been just fine. This occurs before it attempts a handoff node. The simplest way to reproduce this is by running func tests against a small cluster where one of the object nodes is not running N.B. The locale of the node does not matter because the message template is interpolated with node/device data from the Ring which is always unicode because of json. This includes an update to the FakeRing used by unittest infrastructure to ensure that the FakeRing devices make a round-trip through json to ensure consistent typing with real Rings. Change-Id: Icb7284eb5abc9869c1620ee6366817112d8e5587 Closes-bug: #1597210 --- swift/proxy/server.py | 2 +- test/unit/__init__.py | 6 ++++-- test/unit/proxy/controllers/test_obj.py | 14 ++++++++++++++ 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 9b89498c53..98072aaf26 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -535,7 +535,7 @@ class Application(object): ' re: %(info)s'), {'type': typ, 'ip': node['ip'], 'port': node['port'], 'device': node['device'], - 'info': additional_info}, + 'info': additional_info.decode('utf-8')}, **kwargs) def modify_wsgi_pipeline(self, pipe): diff --git a/test/unit/__init__.py b/test/unit/__init__.py index acc3c8612f..97df0004b2 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -32,6 +32,7 @@ import eventlet from eventlet.green import socket from tempfile import mkdtemp from shutil import rmtree +import json from swift.common.utils import Timestamp, NOTICE @@ -223,7 +224,8 @@ class FakeRing(Ring): for x in range(self.replicas): ip = '10.0.0.%s' % x port = self._base_port + x - self._devs.append({ + # round trip through json to ensure unicode like real rings + self._devs.append(json.loads(json.dumps({ 'ip': ip, 'replication_ip': ip, 'port': port, @@ -232,7 +234,7 @@ class FakeRing(Ring): 'zone': x % 3, 'region': x % 2, 'id': x, - }) + }))) @property def replica_count(self): diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index 4495fb0c68..e24d152d26 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -856,6 +856,20 @@ class TestReplicatedObjController(BaseObjectControllerMixin, node_error_count(self.app, object_ring.devs[1]), self.app.error_suppression_limit + 1) + def test_PUT_connect_exception_with_unicode_path_and_locale(self): + expected = 201 + statuses = ( + Exception('Connection refused: Please insert ten dollars'), + 201, 201) + + req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89', + method='PUT', + body='life is utf-gr8') + with set_http_connect(*statuses): + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, expected) + def test_PUT_error_during_transfer_data(self): class FakeReader(object): def read(self, size): From 3781843cb3b32598cf66a5455d9c61c259fe77fa Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 1 Jul 2016 16:13:05 -0700 Subject: [PATCH 038/156] Fix gettext_ calls Change-Id: I80e7d204f78620c6eaf63bfad18588c4096529b8 --- swift/common/middleware/crypto/decrypter.py | 40 +++++++++++---------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/swift/common/middleware/crypto/decrypter.py b/swift/common/middleware/crypto/decrypter.py index 46e2dbc484..22a905984d 100644 --- a/swift/common/middleware/crypto/decrypter.py +++ b/swift/common/middleware/crypto/decrypter.py @@ -72,12 +72,15 @@ class BaseDecrypterContext(CryptoWSGIContext): return self.crypto.unwrap_key(wrapping_key, crypto_meta['body_key']) except KeyError as err: - err = 'Missing %s' % err + self.logger.error( + _('Error decrypting %(resp_type)s: Missing %(key)s'), + {'resp_type': self.server_type, 'key': err}) except ValueError as err: - pass - msg = 'Error decrypting %s' % self.server_type - self.logger.error(_('%(msg)s: %(err)s') % {'msg': msg, 'err': err}) - raise HTTPInternalServerError(body=msg, content_type='text/plain') + self.logger.error(_('Error decrypting %(resp_type)s: %(reason)s'), + {'resp_type': self.server_type, 'reason': err}) + raise HTTPInternalServerError( + body='Error decrypting %s' % self.server_type, + content_type='text/plain') def decrypt_value_with_meta(self, value, key, required=False): """ @@ -162,11 +165,13 @@ class DecrypterObjContext(BaseDecrypterContext): """ try: return self.decrypt_value_with_meta(value, key, required) - except EncryptionException as e: - msg = "Error decrypting header" - self.logger.error(_("%(msg)s %(hdr)s: %(e)s") % - {'msg': msg, 'hdr': header, 'e': e}) - raise HTTPInternalServerError(body=msg, content_type='text/plain') + except EncryptionException as err: + self.logger.error( + _("Error decrypting header %(header)s: %(error)s"), + {'header': header, 'error': err}) + raise HTTPInternalServerError( + body='Error decrypting header', + content_type='text/plain') def decrypt_user_metadata(self, keys): prefix = get_object_transient_sysmeta('crypto-meta-') @@ -286,11 +291,9 @@ class DecrypterObjContext(BaseDecrypterContext): crypto_meta = self.get_crypto_meta( 'X-Object-Sysmeta-Crypto-Body-Meta') except EncryptionException as err: - msg = 'Error decrypting object' - self.logger.error(_('%(msg)s: %(err)s') % - {'msg': msg, 'err': err}) + self.logger.error(_('Error decrypting object: %s'), err) raise HTTPInternalServerError( - body=msg, content_type='text/plain') + body='Error decrypting object', content_type='text/plain') if crypto_meta: # 2xx response and encrypted body @@ -363,11 +366,12 @@ class DecrypterContContext(BaseDecrypterContext): try: app_resp = handler(keys['container'], app_resp) except EncryptionException as err: - msg = "Error decrypting container listing" - self.logger.error(_('%(msg)s: %(err)s') % - {'msg': msg, 'err': err}) + self.logger.error( + _("Error decrypting container listing: %s"), + err) raise HTTPInternalServerError( - body=msg, content_type='text/plain') + body='Error decrypting container listing', + content_type='text/plain') start_response(self._response_status, self._response_headers, From aa12901edadb5da21d96e36cee2e8f4f7b8273be Mon Sep 17 00:00:00 2001 From: Maria Malyarova Date: Tue, 5 Jul 2016 17:14:45 +0300 Subject: [PATCH 039/156] Added missing parenthesis in print calls Upd. Import print_function from __future__ TrivialFix Change-Id: Ibcda2c7e4ddbdff2420502dfd7d17db01f3c8056 --- bin/swift-dispersion-populate | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/bin/swift-dispersion-populate b/bin/swift-dispersion-populate index afdc7cd320..946be0faa0 100755 --- a/bin/swift-dispersion-populate +++ b/bin/swift-dispersion-populate @@ -13,6 +13,7 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import traceback from optparse import OptionParser @@ -34,7 +35,6 @@ from swift.common.ring import Ring from swift.common.utils import compute_eta, get_time_units, config_true_value from swift.common.storage_policy import POLICIES - insecure = False @@ -77,9 +77,9 @@ def report(success): return next_report = time() + 5 eta, eta_unit = compute_eta(begun, created, need_to_create) - print ('\r\x1B[KCreating %s: %d of %d, %d%s left, %d retries' - % (item_type, created, need_to_create, round(eta), eta_unit, - retries_done)), + print('\r\x1B[KCreating %s: %d of %d, %d%s left, %d retries' + % (item_type, created, need_to_create, round(eta), eta_unit, + retries_done), end='') stdout.flush() @@ -105,9 +105,9 @@ Usage: %%prog [options] [conf_file] help='Allow accessing insecure keystone server. ' 'The keystone\'s certificate will not be verified.') parser.add_option('--no-overlap', action='store_true', default=False, - help='No overlap of partitions if running populate \ + help="No overlap of partitions if running populate \ more than once. Will increase coverage by amount shown \ - in dispersion.conf file') + in dispersion.conf file") parser.add_option('-P', '--policy-name', dest='policy_name', help="Specify storage policy name") @@ -127,7 +127,7 @@ Usage: %%prog [options] [conf_file] policy = POLICIES.get_by_name(options.policy_name) if policy is None: exit('Unable to find policy: %s' % options.policy_name) - print 'Using storage policy: %s ' % policy.name + print('Using storage policy: %s ' % policy.name) swift_dir = conf.get('swift_dir', '/etc/swift') dispersion_coverage = float(conf.get('dispersion_coverage', 1)) @@ -213,15 +213,15 @@ Usage: %%prog [options] [conf_file] suffix += 1 coropool.waitall() elapsed, elapsed_unit = get_time_units(time() - begun) - print '\r\x1B[KCreated %d containers for dispersion reporting, ' \ - '%d%s, %d retries' % \ + print('\r\x1B[KCreated %d containers for dispersion reporting, ' + '%d%s, %d retries' % ((need_to_create - need_to_queue), round(elapsed), elapsed_unit, - retries_done) + retries_done)) if options.no_overlap: con_coverage = container_ring.partition_count - len(parts_left) - print '\r\x1B[KTotal container coverage is now %.2f%%.' % \ + print('\r\x1B[KTotal container coverage is now %.2f%%.' % ((float(con_coverage) / container_ring.partition_count - * 100)) + * 100))) stdout.flush() if object_populate: @@ -269,12 +269,12 @@ Usage: %%prog [options] [conf_file] suffix += 1 coropool.waitall() elapsed, elapsed_unit = get_time_units(time() - begun) - print '\r\x1B[KCreated %d objects for dispersion reporting, ' \ - '%d%s, %d retries' % \ + print('\r\x1B[KCreated %d objects for dispersion reporting, ' + '%d%s, %d retries' % ((need_to_create - need_to_queue), round(elapsed), elapsed_unit, - retries_done) + retries_done)) if options.no_overlap: obj_coverage = object_ring.partition_count - len(parts_left) - print '\r\x1B[KTotal object coverage is now %.2f%%.' % \ - ((float(obj_coverage) / object_ring.partition_count * 100)) + print('\r\x1B[KTotal object coverage is now %.2f%%.' % + ((float(obj_coverage) / object_ring.partition_count * 100))) stdout.flush() From a2afabf283811cdfd66b98add0df626008682ff8 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Fri, 1 Jul 2016 09:54:32 -0700 Subject: [PATCH 040/156] Add basic functests for user object metadata Change-Id: I3c3b7d051a48449400e47e366461674bed9318c5 --- test/functional/test_object.py | 139 +++++++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) diff --git a/test/functional/test_object.py b/test/functional/test_object.py index e331e220f3..b2e375d879 100755 --- a/test/functional/test_object.py +++ b/test/functional/test_object.py @@ -133,6 +133,145 @@ class TestObject(unittest2.TestCase): resp.read() self.assertIn(resp.status, (204, 404)) + def test_metadata(self): + obj = 'test_metadata' + req_metadata = {} + + def put(url, token, parsed, conn): + headers = {'X-Auth-Token': token} + headers.update(req_metadata) + conn.request('PUT', '%s/%s/%s' % ( + parsed.path, self.container, obj + ), '', headers) + return check_response(conn) + + def get(url, token, parsed, conn): + conn.request( + 'GET', + '%s/%s/%s' % (parsed.path, self.container, obj), + '', + {'X-Auth-Token': token}) + return check_response(conn) + + def post(url, token, parsed, conn): + headers = {'X-Auth-Token': token} + headers.update(req_metadata) + conn.request('POST', '%s/%s/%s' % ( + parsed.path, self.container, obj + ), '', headers) + return check_response(conn) + + def metadata(resp): + metadata = {} + for k, v in resp.headers.items(): + if 'meta' in k.lower(): + metadata[k] = v + return metadata + + # empty put + resp = retry(put) + resp.read() + self.assertEqual(resp.status, 201) + resp = retry(get) + self.assertEqual('', resp.read()) + self.assertEqual(resp.status, 200) + self.assertEqual(metadata(resp), {}) + # empty post + resp = retry(post) + resp.read() + self.assertEqual(resp.status, 202) + resp = retry(get) + self.assertEqual('', resp.read()) + self.assertEqual(resp.status, 200) + self.assertEqual(metadata(resp), {}) + + # metadata put + req_metadata = { + 'x-object-meta-Color': 'blUe', + 'X-Object-Meta-food': 'PizZa', + } + resp = retry(put) + resp.read() + self.assertEqual(resp.status, 201) + resp = retry(get) + self.assertEqual('', resp.read()) + self.assertEqual(resp.status, 200) + self.assertEqual(metadata(resp), { + 'X-Object-Meta-Color': 'blUe', + 'X-Object-Meta-Food': 'PizZa', + }) + # metadata post + req_metadata = {'X-Object-Meta-color': 'oraNge'} + resp = retry(post) + resp.read() + self.assertEqual(resp.status, 202) + resp = retry(get) + self.assertEqual('', resp.read()) + self.assertEqual(resp.status, 200) + self.assertEqual(metadata(resp), { + 'X-Object-Meta-Color': 'oraNge' + }) + + # sysmeta put + req_metadata = { + 'X-Object-Meta-Color': 'Red', + 'X-Object-Sysmeta-Color': 'Green', + 'X-Object-Transient-Sysmeta-Color': 'Blue', + } + resp = retry(put) + resp.read() + self.assertEqual(resp.status, 201) + resp = retry(get) + self.assertEqual('', resp.read()) + self.assertEqual(resp.status, 200) + self.assertEqual(metadata(resp), { + 'X-Object-Meta-Color': 'Red', + }) + # sysmeta post + req_metadata = { + 'X-Object-Meta-Food': 'Burger', + 'X-Object-Meta-Animal': 'Cat', + 'X-Object-Sysmeta-Animal': 'Cow', + 'X-Object-Transient-Sysmeta-Food': 'Burger', + } + resp = retry(post) + resp.read() + self.assertEqual(resp.status, 202) + resp = retry(get) + self.assertEqual('', resp.read()) + self.assertEqual(resp.status, 200) + self.assertEqual(metadata(resp), { + 'X-Object-Meta-Food': 'Burger', + 'X-Object-Meta-Animal': 'Cat', + }) + + # non-ascii put + req_metadata = { + 'X-Object-Meta-Foo': u'B\u00e2r', + } + resp = retry(put) + resp.read() + self.assertEqual(resp.status, 201) + resp = retry(get) + self.assertEqual('', resp.read()) + self.assertEqual(resp.status, 200) + self.assertEqual(metadata(resp), { + 'X-Object-Meta-Foo': 'B\xc3\xa2r', + }) + # non-ascii post + req_metadata = { + 'X-Object-Meta-Foo': u'B\u00e5z', + } + resp = retry(post) + resp.read() + self.assertEqual(resp.status, 202) + resp = retry(get) + self.assertEqual('', resp.read()) + self.assertEqual(resp.status, 200) + self.assertEqual(metadata(resp), { + 'X-Object-Meta-Foo': 'B\xc3\xa5z', + }) + def test_if_none_match(self): def put(url, token, parsed, conn): conn.request('PUT', '%s/%s/%s' % ( From bcd9a58d3c30ce554bfbe7fee5bc851e9feccaa0 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 6 Jul 2016 14:50:56 -0700 Subject: [PATCH 041/156] Fix X-*-Container-Update-Override-* header/footer precedence Previously, all footer overrides (whether from the X-Backend-* or X-Object-Sysmeta-* namespace) would take priority over any header override. However, middleware should be able to set a Sysmeta override without needing to worry about whether it's working with a replicated policy (where setting it in headers will suffice) or an EC policy (where it would need to install a footers callback). This could be mitigated by *always* installing a footer callback, but doing so would incur additional overhead that would otherwise be unnecessary. Change-Id: Idb40361ac72da51e1390dff690723dbc2c653a13 --- swift/obj/server.py | 16 ++++++-- test/unit/obj/test_server.py | 80 ++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+), 3 deletions(-) diff --git a/swift/obj/server.py b/swift/obj/server.py index 47248d952a..55aa3eb9cc 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -447,7 +447,8 @@ class ObjectController(BaseStorageServer): except ValueError: raise HTTPBadRequest("invalid JSON for footer doc") - def _check_container_override(self, update_headers, metadata): + def _check_container_override(self, update_headers, metadata, + footers=None): """ Applies any overrides to the container update headers. @@ -463,7 +464,10 @@ class ObjectController(BaseStorageServer): :param update_headers: a dict of headers used in the container update :param metadata: a dict that may container override items + :param footers: another dict that may container override items, at a + higher priority than metadata """ + footers = footers or {} # the order of this list is significant: # x-object-sysmeta-container-update-override-* headers take precedence # over x-backend-container-update-override-* headers @@ -474,6 +478,12 @@ class ObjectController(BaseStorageServer): if key.lower().startswith(override_prefix): override = key.lower().replace(override_prefix, 'x-') update_headers[override] = val + # apply x-backend-container-update-override* from footers *before* + # x-object-sysmeta-container-update-override-* from headers + for key, val in footers.items(): + if key.lower().startswith(override_prefix): + override = key.lower().replace(override_prefix, 'x-') + update_headers[override] = val def _preserve_slo_manifest(self, update_metadata, orig_metadata): if 'X-Static-Large-Object' in orig_metadata: @@ -829,8 +839,8 @@ class ObjectController(BaseStorageServer): 'x-timestamp': metadata['X-Timestamp'], 'x-etag': metadata['ETag']}) # apply any container update header overrides sent with request - self._check_container_override(update_headers, request.headers) - self._check_container_override(update_headers, footer_meta) + self._check_container_override(update_headers, request.headers, + footer_meta) self.container_update( 'PUT', account, container, obj, request, update_headers, diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 79fc1b32f4..536a746037 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -1435,6 +1435,86 @@ class TestObjectController(unittest.TestCase): with open(objfile) as fh: self.assertEqual(fh.read(), "obj data") + def test_PUT_container_override_etag_in_footer(self): + ts_iter = make_timestamp_iter() + + def do_test(override_headers, override_footers): + def mock_container_update(ctlr, op, account, container, obj, req, + headers_out, objdevice, policy): + calls_made.append((headers_out, policy)) + calls_made = [] + ts_put = next(ts_iter) + + headers = { + 'X-Timestamp': ts_put.internal, + 'Content-Type': 'text/plain', + 'Transfer-Encoding': 'chunked', + 'Etag': 'other-etag', + 'X-Backend-Obj-Metadata-Footer': 'yes', + 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'} + headers.update(override_headers) + req = Request.blank( + '/sda1/p/a/c/o', headers=headers, + environ={'REQUEST_METHOD': 'PUT'}) + + obj_etag = md5("obj data").hexdigest() + footers = {'Etag': obj_etag} + footers.update(override_footers) + footer_meta = json.dumps(footers) + footer_meta_cksum = md5(footer_meta).hexdigest() + + req.body = "\r\n".join(( + "--boundary", + "", + "obj data", + "--boundary", + "Content-MD5: " + footer_meta_cksum, + "", + footer_meta, + "--boundary--", + )) + req.headers.pop("Content-Length", None) + + with mock.patch( + 'swift.obj.server.ObjectController.container_update', + mock_container_update): + resp = req.get_response(self.object_controller) + self.assertEqual(resp.etag, obj_etag) + self.assertEqual(resp.status_int, 201) + self.assertEqual(1, len(calls_made)) + self.assertEqual({ + 'X-Size': str(len('obj data')), + 'X-Etag': 'update-etag', + 'X-Content-Type': 'text/plain', + 'X-Timestamp': ts_put.internal, + }, calls_made[0][0]) + self.assertEqual(POLICIES[0], calls_made[0][1]) + + # lone headers/footers work + do_test({'X-Backend-Container-Update-Override-Etag': 'update-etag'}, + {}) + do_test({}, + {'X-Backend-Container-Update-Override-Etag': 'update-etag'}) + do_test({'X-Object-Sysmeta-Container-Update-Override-Etag': + 'update-etag'}, + {}) + do_test({}, + {'X-Object-Sysmeta-Container-Update-Override-Etag': + 'update-etag'}) + + # footer trumps header + do_test({'X-Backend-Container-Update-Override-Etag': 'ignored-etag'}, + {'X-Backend-Container-Update-Override-Etag': 'update-etag'}) + do_test({'X-Object-Sysmeta-Container-Update-Override-Etag': + 'ignored-etag'}, + {'X-Object-Sysmeta-Container-Update-Override-Etag': + 'update-etag'}) + + # but sysmeta header trumps backend footer + do_test({'X-Object-Sysmeta-Container-Update-Override-Etag': + 'update-etag'}, + {'X-Backend-Container-Update-Override-Etag': 'ignored-etag'}) + def test_PUT_etag_in_footer_mismatch(self): timestamp = normalize_timestamp(time()) req = Request.blank( From de51a6db36bcd965a7fa7505380100497ae3c66a Mon Sep 17 00:00:00 2001 From: yuyafei Date: Tue, 5 Jul 2016 15:04:24 +0800 Subject: [PATCH 042/156] Add __ne__ built-in function In Python 3 __ne__ by default delegates to __eq__ and inverts the result, but in Python 2 they urge you to define __ne__ when you define __eq__ for it to work properly [1].There are no implied relationships among the comparison operators. The truth of x==y does not imply that x!=y is false. Accordingly, when defining __eq__(), one should also define __ne__() so that the operators will behave as expected. [1]https://docs.python.org/2/reference/datamodel.html#object.__ne__ Also remove class SubStringMatcher becasue this class isn't used following commit 7035639dfd239b52d4ed46aae50f78d16ec8cbfe. Change-Id: Ia2131f72a79226b0c2f3662b84661eb870d1d692 --- swift/common/manager.py | 3 +++ test/unit/common/ring/test_builder.py | 7 ------- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/swift/common/manager.py b/swift/common/manager.py index 2cc764493c..bd499de696 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -441,6 +441,9 @@ class Server(object): except AttributeError: return False + def __ne__(self, other): + return not self.__eq__(other) + def get_pid_file_name(self, conf_file): """Translate conf_file to a corresponding pid_file diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index bdcd96f765..0ced592741 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -2154,13 +2154,6 @@ class TestRingBuilder(unittest.TestCase): # now double up a device assignment rb._replica2part2dev[1][200] = rb._replica2part2dev[2][200] - class SubStringMatcher(object): - def __init__(self, substr): - self.substr = substr - - def __eq__(self, other): - return self.substr in other - with self.assertRaises(exceptions.RingValidationError) as e: rb.validate() From cf8b93918cc8a46ebd9035028847d8009d362794 Mon Sep 17 00:00:00 2001 From: Petr Kovar Date: Wed, 15 Jun 2016 18:35:05 +0200 Subject: [PATCH 043/156] Add install-guide for swift This adds swift-specific contents from the OpenStack Installation Guide in the swift repo per [1]. A separate change will remove the swift contents from the OpenStack Installation Guide for Newton per [2]. The swift install-guide structure is based on the Install Guide Cookiecutter [3]. Also adds tox.ini environment for install-guide and adds openstackdocs-theme to test-requirements.txt. [1] http://specs.openstack.org/openstack/docs-specs/specs/newton/project-specific-installguides.html [2] http://specs.openstack.org/openstack/docs-specs/specs/newton/installguide.html [3] http://git.openstack.org/cgit/openstack/installguide-cookiecutter/ Change-Id: I59b92eebaf5acc657b97bcf10d9ff2cf2db05885 Partially-Implements: blueprint projectspecificinstallguides Depends-On: Ifebc65b188c4f2ba35b61c0deae5ec24401df7f9 --- install-guide/source/conf.py | 300 ++++++++++++++++++ .../controller-common_prerequisites.txt | 116 +++++++ install-guide/source/controller-include.txt | 84 +++++ .../source/controller-install-debian.rst | 50 +++ .../source/controller-install-obs.rst | 45 +++ .../source/controller-install-rdo.rst | 50 +++ .../source/controller-install-ubuntu.rst | 52 +++ install-guide/source/controller-install.rst | 18 ++ .../source/finalize-installation-obs.rst | 80 +++++ .../source/finalize-installation-rdo.rst | 89 ++++++ .../finalize-installation-ubuntu-debian.rst | 80 +++++ .../source/finalize-installation.rst | 13 + install-guide/source/get_started.rst | 51 +++ install-guide/source/index.rst | 23 ++ install-guide/source/initial-rings.rst | 253 +++++++++++++++ install-guide/source/next-steps.rst | 10 + install-guide/source/storage-include1.txt | 41 +++ install-guide/source/storage-include2.txt | 41 +++ install-guide/source/storage-include3.txt | 42 +++ install-guide/source/storage-install-obs.rst | 139 ++++++++ install-guide/source/storage-install-rdo.rst | 155 +++++++++ .../source/storage-install-ubuntu-debian.rst | 159 ++++++++++ install-guide/source/storage-install.rst | 16 + install-guide/source/verify.rst | 96 ++++++ test-requirements.txt | 1 + tox.ini | 6 + 26 files changed, 2010 insertions(+) create mode 100644 install-guide/source/conf.py create mode 100644 install-guide/source/controller-common_prerequisites.txt create mode 100644 install-guide/source/controller-include.txt create mode 100644 install-guide/source/controller-install-debian.rst create mode 100644 install-guide/source/controller-install-obs.rst create mode 100644 install-guide/source/controller-install-rdo.rst create mode 100644 install-guide/source/controller-install-ubuntu.rst create mode 100644 install-guide/source/controller-install.rst create mode 100644 install-guide/source/finalize-installation-obs.rst create mode 100644 install-guide/source/finalize-installation-rdo.rst create mode 100644 install-guide/source/finalize-installation-ubuntu-debian.rst create mode 100644 install-guide/source/finalize-installation.rst create mode 100644 install-guide/source/get_started.rst create mode 100644 install-guide/source/index.rst create mode 100644 install-guide/source/initial-rings.rst create mode 100644 install-guide/source/next-steps.rst create mode 100644 install-guide/source/storage-include1.txt create mode 100644 install-guide/source/storage-include2.txt create mode 100644 install-guide/source/storage-include3.txt create mode 100644 install-guide/source/storage-install-obs.rst create mode 100644 install-guide/source/storage-install-rdo.rst create mode 100644 install-guide/source/storage-install-ubuntu-debian.rst create mode 100644 install-guide/source/storage-install.rst create mode 100644 install-guide/source/verify.rst diff --git a/install-guide/source/conf.py b/install-guide/source/conf.py new file mode 100644 index 0000000000..bcf546c929 --- /dev/null +++ b/install-guide/source/conf.py @@ -0,0 +1,300 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +# import sys + + +import openstackdocstheme + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +# TODO(ajaeger): enable PDF building, for example add 'rst2pdf.pdfbuilder' +# extensions = + +# Add any paths that contain templates here, relative to this directory. +# templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Installation Guide for Object Storage Service' +bug_tag = u'install-guide' +copyright = u'2016, OpenStack contributors' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.1' +# The full version, including alpha/beta/rc tags. +release = '0.1' + +# A few variables have to be set for the log-a-bug feature. +# giturl: The location of conf.py on Git. Must be set manually. +# gitsha: The SHA checksum of the bug description. Automatically extracted from git log. +# bug_tag: Tag for categorizing the bug. Must be set manually. +# These variables are passed to the logabug code via html_context. +giturl = u'http://git.openstack.org/cgit/openstack/swift/tree/install-guide/source' +git_cmd = "/usr/bin/git log | head -n1 | cut -f2 -d' '" +gitsha = os.popen(git_cmd).read().strip('\n') +html_context = {"gitsha": gitsha, "bug_tag": bug_tag, + "giturl": giturl, + "bug_project": "swift"} + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'openstackdocs' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = [openstackdocstheme.get_html_theme_path()] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = [] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# So that we can enable "log-a-bug" links from each output HTML page, this +# variable must be set to a format that includes year, month, day, hours and +# minutes. +html_last_updated_fmt = '%Y-%m-%d %H:%M' + + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +html_use_index = False + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +html_show_sourcelink = False + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'install-guide' + +# If true, publish source files +html_copy_source = False + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # 'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'InstallGuide.tex', u'Install Guide', + u'OpenStack contributors', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'installguide', u'Install Guide', + [u'OpenStack contributors'], 1) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'InstallGuide', u'Install Guide', + u'OpenStack contributors', 'InstallGuide', + 'This guide shows OpenStack end users how to install ' + 'an OpenStack cloud.', 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + +# -- Options for Internationalization output ------------------------------ +locale_dirs = ['locale/'] + +# -- Options for PDF output -------------------------------------------------- + +pdf_documents = [ + ('index', u'InstallGuide', u'Install Guide', + u'OpenStack contributors') +] diff --git a/install-guide/source/controller-common_prerequisites.txt b/install-guide/source/controller-common_prerequisites.txt new file mode 100644 index 0000000000..a87e7aad31 --- /dev/null +++ b/install-guide/source/controller-common_prerequisites.txt @@ -0,0 +1,116 @@ +Prerequisites +------------- + +The proxy service relies on an authentication and authorization mechanism such +as the Identity service. However, unlike other services, it also offers an +internal mechanism that allows it to operate without any other OpenStack +services. Before you configure the Object Storage service, you must +create service credentials and an API endpoint. + +.. note:: + + The Object Storage service does not use an SQL database on the controller + node. Instead, it uses distributed SQLite databases on each storage node. + +#. Source the ``admin`` credentials to gain access to admin-only CLI commands: + + .. code-block:: console + + $ . admin-openrc + +#. To create the Identity service credentials, complete these steps: + + * Create the ``swift`` user: + + .. code-block:: console + + $ openstack user create --domain default --password-prompt swift + User Password: + Repeat User Password: + +-----------+----------------------------------+ + | Field | Value | + +-----------+----------------------------------+ + | domain_id | e0353a670a9e496da891347c589539e9 | + | enabled | True | + | id | d535e5cbd2b74ac7bfb97db9cced3ed6 | + | name | swift | + +-----------+----------------------------------+ + + * Add the ``admin`` role to the ``swift`` user: + + .. code-block:: console + + $ openstack role add --project service --user swift admin + + .. note:: + + This command provides no output. + + * Create the ``swift`` service entity: + + .. code-block:: console + + $ openstack service create --name swift \ + --description "OpenStack Object Storage" object-store + +-------------+----------------------------------+ + | Field | Value | + +-------------+----------------------------------+ + | description | OpenStack Object Storage | + | enabled | True | + | id | 75ef509da2c340499d454ae96a2c5c34 | + | name | swift | + | type | object-store | + +-------------+----------------------------------+ + +#. Create the Object Storage service API endpoints: + + .. code-block:: console + + $ openstack endpoint create --region RegionOne \ + object-store public http://controller:8080/v1/AUTH_%\(tenant_id\)s + +--------------+----------------------------------------------+ + | Field | Value | + +--------------+----------------------------------------------+ + | enabled | True | + | id | 12bfd36f26694c97813f665707114e0d | + | interface | public | + | region | RegionOne | + | region_id | RegionOne | + | service_id | 75ef509da2c340499d454ae96a2c5c34 | + | service_name | swift | + | service_type | object-store | + | url | http://controller:8080/v1/AUTH_%(tenant_id)s | + +--------------+----------------------------------------------+ + + $ openstack endpoint create --region RegionOne \ + object-store internal http://controller:8080/v1/AUTH_%\(tenant_id\)s + +--------------+----------------------------------------------+ + | Field | Value | + +--------------+----------------------------------------------+ + | enabled | True | + | id | 7a36bee6733a4b5590d74d3080ee6789 | + | interface | internal | + | region | RegionOne | + | region_id | RegionOne | + | service_id | 75ef509da2c340499d454ae96a2c5c34 | + | service_name | swift | + | service_type | object-store | + | url | http://controller:8080/v1/AUTH_%(tenant_id)s | + +--------------+----------------------------------------------+ + + $ openstack endpoint create --region RegionOne \ + object-store admin http://controller:8080/v1 + +--------------+----------------------------------+ + | Field | Value | + +--------------+----------------------------------+ + | enabled | True | + | id | ebb72cd6851d4defabc0b9d71cdca69b | + | interface | admin | + | region | RegionOne | + | region_id | RegionOne | + | service_id | 75ef509da2c340499d454ae96a2c5c34 | + | service_name | swift | + | service_type | object-store | + | url | http://controller:8080/v1 | + +--------------+----------------------------------+ + diff --git a/install-guide/source/controller-include.txt b/install-guide/source/controller-include.txt new file mode 100644 index 0000000000..3e9b2b4305 --- /dev/null +++ b/install-guide/source/controller-include.txt @@ -0,0 +1,84 @@ +Edit the ``/etc/swift/proxy-server.conf`` file and complete the +following actions: + +* In the ``[DEFAULT]`` section, configure the bind port, user, and + configuration directory: + + .. code-block:: none + + [DEFAULT] + ... + bind_port = 8080 + user = swift + swift_dir = /etc/swift + +* In the ``[pipeline:main]`` section, remove the ``tempurl`` and + ``tempauth`` modules and add the ``authtoken`` and ``keystoneauth`` + modules: + + .. code-block:: none + + [pipeline:main] + pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server + + .. note:: + + Do not change the order of the modules. + + .. note:: + + For more information on other modules that enable additional features, + see the `Deployment Guide `__. + +* In the ``[app:proxy-server]`` section, enable automatic account creation: + + .. code-block:: console + + [app:proxy-server] + use = egg:swift#proxy + ... + account_autocreate = True + +* In the ``[filter:keystoneauth]`` section, configure the operator roles: + + .. code-block:: console + + [filter:keystoneauth] + use = egg:swift#keystoneauth + ... + operator_roles = admin,user + +* In the ``[filter:authtoken]`` section, configure Identity service access: + + .. code-block:: none + + [filter:authtoken] + paste.filter_factory = keystonemiddleware.auth_token:filter_factory + ... + auth_uri = http://controller:5000 + auth_url = http://controller:35357 + memcached_servers = controller:11211 + auth_type = password + project_domain_name = default + user_domain_name = default + project_name = service + username = swift + password = SWIFT_PASS + delay_auth_decision = True + + Replace ``SWIFT_PASS`` with the password you chose for the ``swift`` user + in the Identity service. + + .. note:: + + Comment out or remove any other options in the ``[filter:authtoken]`` + section. + +* In the ``[filter:cache]`` section, configure the ``memcached`` location: + + .. code-block:: none + + [filter:cache] + use = egg:swift#memcache + ... + memcache_servers = controller:11211 diff --git a/install-guide/source/controller-install-debian.rst b/install-guide/source/controller-install-debian.rst new file mode 100644 index 0000000000..3a9ce2ba01 --- /dev/null +++ b/install-guide/source/controller-install-debian.rst @@ -0,0 +1,50 @@ +.. _controller-debian: + +Install and configure the controller node for Debian +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure the proxy service that +handles requests for the account, container, and object services operating +on the storage nodes. For simplicity, this guide installs and configures +the proxy service on the controller node. However, you can run the proxy +service on any node with network connectivity to the storage nodes. +Additionally, you can install and configure the proxy service on multiple +nodes to increase performance and redundancy. For more information, see the +`Deployment Guide `__. + +This section applies to Debian. + +.. include:: controller-common_prerequisites.txt + +Install and configure components +-------------------------------- + +.. note:: + + Default configuration files vary by distribution. You might need + to add these sections and options rather than modifying existing + sections and options. Also, an ellipsis (``...``) in the configuration + snippets indicates potential default configuration options that you + should retain. + +#. Install the packages: + + .. code-block:: console + + # apt-get install swift swift-proxy python-swiftclient \ + python-keystoneclient python-keystonemiddleware \ + memcached + + .. note:: + + Complete OpenStack environments already include some of these + packages. + + 2. Create the ``/etc/swift`` directory. + + 3. Obtain the proxy service configuration file from the Object Storage + source repository: + + .. code-block:: console + + # curl -o /etc/swift/proxy-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/proxy-server.conf-sample?h=stable/mitaka diff --git a/install-guide/source/controller-install-obs.rst b/install-guide/source/controller-install-obs.rst new file mode 100644 index 0000000000..7588510d44 --- /dev/null +++ b/install-guide/source/controller-install-obs.rst @@ -0,0 +1,45 @@ +.. _controller-obs: + +Install and configure the controller node for openSUSE and SUSE Linux Enterprise +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure the proxy service that +handles requests for the account, container, and object services operating +on the storage nodes. For simplicity, this guide installs and configures +the proxy service on the controller node. However, you can run the proxy +service on any node with network connectivity to the storage nodes. +Additionally, you can install and configure the proxy service on multiple +nodes to increase performance and redundancy. For more information, see the +`Deployment Guide `__. + +This section applies to openSUSE Leap 42.1 and SUSE Linux Enterprise Server +12 SP1. + +.. include:: controller-common_prerequisites.txt + +Install and configure components +-------------------------------- + +.. note:: + + Default configuration files vary by distribution. You might need + to add these sections and options rather than modifying existing + sections and options. Also, an ellipsis (``...``) in the configuration + snippets indicates potential default configuration options that you + should retain. + +#. Install the packages: + + .. code-block:: console + + # zypper install openstack-swift-proxy python-swiftclient \ + python-keystoneclient python-keystonemiddleware \ + python-xml memcached + + .. note:: + + Complete OpenStack environments already include some of these + packages. + + 2. .. include:: controller-include.txt + diff --git a/install-guide/source/controller-install-rdo.rst b/install-guide/source/controller-install-rdo.rst new file mode 100644 index 0000000000..82c9eb657d --- /dev/null +++ b/install-guide/source/controller-install-rdo.rst @@ -0,0 +1,50 @@ +.. _controller-rdo: + +Install and configure the controller node for Red Hat Enterprise Linux and CentOS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure the proxy service that +handles requests for the account, container, and object services operating +on the storage nodes. For simplicity, this guide installs and configures +the proxy service on the controller node. However, you can run the proxy +service on any node with network connectivity to the storage nodes. +Additionally, you can install and configure the proxy service on multiple +nodes to increase performance and redundancy. For more information, see the +`Deployment Guide `__. + +This section applies to Red Hat Enterprise Linux 7 and CentOS 7. + +.. include:: controller-common_prerequisites.txt + +Install and configure components +-------------------------------- + +.. note:: + + Default configuration files vary by distribution. You might need + to add these sections and options rather than modifying existing + sections and options. Also, an ellipsis (``...``) in the configuration + snippets indicates potential default configuration options that you + should retain. + +#. Install the packages: + + .. code-block:: console + + # yum install openstack-swift-proxy python-swiftclient \ + python-keystoneclient python-keystonemiddleware \ + memcached + + .. note:: + + Complete OpenStack environments already include some of these + packages. + + 2. Obtain the proxy service configuration file from the Object Storage + source repository: + + .. code-block:: console + + # curl -o /etc/swift/proxy-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/proxy-server.conf-sample?h=stable/mitaka + + 3. .. include:: controller-include.txt diff --git a/install-guide/source/controller-install-ubuntu.rst b/install-guide/source/controller-install-ubuntu.rst new file mode 100644 index 0000000000..2b8baed14f --- /dev/null +++ b/install-guide/source/controller-install-ubuntu.rst @@ -0,0 +1,52 @@ +.. _controller-ubuntu: + +Install and configure the controller node for Ubuntu +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure the proxy service that +handles requests for the account, container, and object services operating +on the storage nodes. For simplicity, this guide installs and configures +the proxy service on the controller node. However, you can run the proxy +service on any node with network connectivity to the storage nodes. +Additionally, you can install and configure the proxy service on multiple +nodes to increase performance and redundancy. For more information, see the +`Deployment Guide `__. + +This section applies to Ubuntu 14.04 (LTS). + +.. include:: controller-common_prerequisites.txt + +Install and configure components +-------------------------------- + +.. note:: + + Default configuration files vary by distribution. You might need + to add these sections and options rather than modifying existing + sections and options. Also, an ellipsis (``...``) in the configuration + snippets indicates potential default configuration options that you + should retain. + +#. Install the packages: + + .. code-block:: console + + # apt-get install swift swift-proxy python-swiftclient \ + python-keystoneclient python-keystonemiddleware \ + memcached + + .. note:: + + Complete OpenStack environments already include some of these + packages. + + 2. Create the ``/etc/swift`` directory. + + 3. Obtain the proxy service configuration file from the Object Storage + source repository: + + .. code-block:: console + + # curl -o /etc/swift/proxy-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/proxy-server.conf-sample?h=stable/mitaka + + 4. .. include:: controller-include.txt diff --git a/install-guide/source/controller-install.rst b/install-guide/source/controller-install.rst new file mode 100644 index 0000000000..f33faaf596 --- /dev/null +++ b/install-guide/source/controller-install.rst @@ -0,0 +1,18 @@ +.. _controller: + +Install and configure the controller node +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure the proxy service that +handles requests for the account, container, and object services operating +on the storage nodes. + +Note that installation and configuration vary by distribution. + +.. toctree:: + :maxdepth: 1 + + controller-install-obs.rst + controller-install-rdo.rst + controller-install-ubuntu.rst + controller-install-debian.rst diff --git a/install-guide/source/finalize-installation-obs.rst b/install-guide/source/finalize-installation-obs.rst new file mode 100644 index 0000000000..2d45407217 --- /dev/null +++ b/install-guide/source/finalize-installation-obs.rst @@ -0,0 +1,80 @@ +.. _finalize-obs: + +Finalize installation for openSUSE and SUSE Linux Enterprise +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: + + Default configuration files vary by distribution. You might need + to add these sections and options rather than modifying existing + sections and options. Also, an ellipsis (``...``) in the configuration + snippets indicates potential default configuration options that you + should retain. + +This section applies to openSUSE Leap 42.1 and SUSE Linux Enterprise Server +12 SP1. + +#. Edit the ``/etc/swift/swift.conf`` file and complete the following + actions: + + * In the ``[swift-hash]`` section, configure the hash path prefix and + suffix for your environment. + + .. code-block:: none + + [swift-hash] + ... + swift_hash_path_suffix = HASH_PATH_SUFFIX + swift_hash_path_prefix = HASH_PATH_PREFIX + + Replace HASH_PATH_PREFIX and HASH_PATH_SUFFIX with unique values. + + .. warning:: + + Keep these values secret and do not change or lose them. + + * In the ``[storage-policy:0]`` section, configure the default + storage policy: + + .. code-block:: none + + [storage-policy:0] + ... + name = Policy-0 + default = yes + +#. Copy the ``swift.conf`` file to the ``/etc/swift`` directory on + each storage node and any additional nodes running the proxy service. + +3. On all nodes, ensure proper ownership of the configuration directory: + + .. code-block:: console + + # chown -R root:swift /etc/swift + +4. On the controller node and any other nodes running the proxy service, + start the Object Storage proxy service including its dependencies and + configure them to start when the system boots: + + .. code-block:: console + + # systemctl enable openstack-swift-proxy.service memcached.service + # systemctl start openstack-swift-proxy.service memcached.service + +5. On the storage nodes, start the Object Storage services and configure + them to start when the system boots: + + .. code-block:: console + + # systemctl enable openstack-swift-account.service openstack-swift-account-auditor.service \ + openstack-swift-account-reaper.service openstack-swift-account-replicator.service + # systemctl start openstack-swift-account.service openstack-swift-account-auditor.service \ + openstack-swift-account-reaper.service openstack-swift-account-replicator.service + # systemctl enable openstack-swift-container.service openstack-swift-container-auditor.service \ + openstack-swift-container-replicator.service openstack-swift-container-updater.service + # systemctl start openstack-swift-container.service openstack-swift-container-auditor.service \ + openstack-swift-container-replicator.service openstack-swift-container-updater.service + # systemctl enable openstack-swift-object.service openstack-swift-object-auditor.service \ + openstack-swift-object-replicator.service openstack-swift-object-updater.service + # systemctl start openstack-swift-object.service openstack-swift-object-auditor.service \ + openstack-swift-object-replicator.service openstack-swift-object-updater.service diff --git a/install-guide/source/finalize-installation-rdo.rst b/install-guide/source/finalize-installation-rdo.rst new file mode 100644 index 0000000000..fd4f3e68bd --- /dev/null +++ b/install-guide/source/finalize-installation-rdo.rst @@ -0,0 +1,89 @@ +.. _finalize-rdo: + +Finalize installation for Red Hat Enterprise Linux and CentOS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: + + Default configuration files vary by distribution. You might need + to add these sections and options rather than modifying existing + sections and options. Also, an ellipsis (``...``) in the configuration + snippets indicates potential default configuration options that you + should retain. + +This section applies to Red Hat Enterprise Linux 7 and CentOS 7. + +#. Obtain the ``/etc/swift/swift.conf`` file from the Object + Storage source repository: + + .. code-block:: console + + # curl -o /etc/swift/swift.conf \ + https://git.openstack.org/cgit/openstack/swift/plain/etc/swift.conf-sample?h=stable/mitaka + +#. Edit the ``/etc/swift/swift.conf`` file and complete the following + actions: + + * In the ``[swift-hash]`` section, configure the hash path prefix and + suffix for your environment. + + .. code-block:: none + + [swift-hash] + ... + swift_hash_path_suffix = HASH_PATH_SUFFIX + swift_hash_path_prefix = HASH_PATH_PREFIX + + Replace HASH_PATH_PREFIX and HASH_PATH_SUFFIX with unique values. + + .. warning:: + + Keep these values secret and do not change or lose them. + + * In the ``[storage-policy:0]`` section, configure the default + storage policy: + + .. code-block:: none + + [storage-policy:0] + ... + name = Policy-0 + default = yes + +#. Copy the ``swift.conf`` file to the ``/etc/swift`` directory on + each storage node and any additional nodes running the proxy service. + +4. On all nodes, ensure proper ownership of the configuration directory: + + .. code-block:: console + + # chown -R root:swift /etc/swift + +5. On the controller node and any other nodes running the proxy service, + start the Object Storage proxy service including its dependencies and + configure them to start when the system boots: + + .. code-block:: console + + # systemctl enable openstack-swift-proxy.service memcached.service + # systemctl start openstack-swift-proxy.service memcached.service + +6. On the storage nodes, start the Object Storage services and configure + them to start when the system boots: + + .. code-block:: console + + # systemctl enable openstack-swift-account.service openstack-swift-account-auditor.service \ + openstack-swift-account-reaper.service openstack-swift-account-replicator.service + # systemctl start openstack-swift-account.service openstack-swift-account-auditor.service \ + openstack-swift-account-reaper.service openstack-swift-account-replicator.service + # systemctl enable openstack-swift-container.service \ + openstack-swift-container-auditor.service openstack-swift-container-replicator.service \ + openstack-swift-container-updater.service + # systemctl start openstack-swift-container.service \ + openstack-swift-container-auditor.service openstack-swift-container-replicator.service \ + openstack-swift-container-updater.service + # systemctl enable openstack-swift-object.service openstack-swift-object-auditor.service \ + openstack-swift-object-replicator.service openstack-swift-object-updater.service + # systemctl start openstack-swift-object.service openstack-swift-object-auditor.service \ + openstack-swift-object-replicator.service openstack-swift-object-updater.service diff --git a/install-guide/source/finalize-installation-ubuntu-debian.rst b/install-guide/source/finalize-installation-ubuntu-debian.rst new file mode 100644 index 0000000000..ca4dde8f26 --- /dev/null +++ b/install-guide/source/finalize-installation-ubuntu-debian.rst @@ -0,0 +1,80 @@ +.. _finalize-ubuntu-debian: + +Finalize installation for Ubuntu and Debian +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: + + Default configuration files vary by distribution. You might need + to add these sections and options rather than modifying existing + sections and options. Also, an ellipsis (``...``) in the configuration + snippets indicates potential default configuration options that you + should retain. + +This section applies to Ubuntu 14.04 (LTS) and Debian. + +#. Obtain the ``/etc/swift/swift.conf`` file from the Object + Storage source repository: + + .. code-block:: console + + # curl -o /etc/swift/swift.conf \ + https://git.openstack.org/cgit/openstack/swift/plain/etc/swift.conf-sample?h=stable/mitaka + +#. Edit the ``/etc/swift/swift.conf`` file and complete the following + actions: + + * In the ``[swift-hash]`` section, configure the hash path prefix and + suffix for your environment. + + .. code-block:: none + + [swift-hash] + ... + swift_hash_path_suffix = HASH_PATH_SUFFIX + swift_hash_path_prefix = HASH_PATH_PREFIX + + Replace HASH_PATH_PREFIX and HASH_PATH_SUFFIX with unique values. + + .. warning:: + + Keep these values secret and do not change or lose them. + + * In the ``[storage-policy:0]`` section, configure the default + storage policy: + + .. code-block:: none + + [storage-policy:0] + ... + name = Policy-0 + default = yes + +#. Copy the ``swift.conf`` file to the ``/etc/swift`` directory on + each storage node and any additional nodes running the proxy service. + +4. On all nodes, ensure proper ownership of the configuration directory: + + .. code-block:: console + + # chown -R root:swift /etc/swift + +5. On the controller node and any other nodes running the proxy service, + restart the Object Storage proxy service including its dependencies: + + .. code-block:: console + + # service memcached restart + # service swift-proxy restart + +6. On the storage nodes, start the Object Storage services: + + .. code-block:: console + + # swift-init all start + + .. note:: + + The storage node runs many Object Storage services and the + :command:`swift-init` command makes them easier to manage. + You can ignore errors from services not running on the storage node. diff --git a/install-guide/source/finalize-installation.rst b/install-guide/source/finalize-installation.rst new file mode 100644 index 0000000000..cdd168a4ac --- /dev/null +++ b/install-guide/source/finalize-installation.rst @@ -0,0 +1,13 @@ +.. _finalize: + +Finalize installation +~~~~~~~~~~~~~~~~~~~~~ + +Finalizing installation varies by distribution. + +.. toctree:: + :maxdepth: 1 + + finalize-installation-obs.rst + finalize-installation-rdo.rst + finalize-installation-ubuntu-debian.rst diff --git a/install-guide/source/get_started.rst b/install-guide/source/get_started.rst new file mode 100644 index 0000000000..301f6a2ebd --- /dev/null +++ b/install-guide/source/get_started.rst @@ -0,0 +1,51 @@ +=============================== +Object Storage service overview +=============================== + +The OpenStack Object Storage is a multi-tenant object storage system. It +is highly scalable and can manage large amounts of unstructured data at +low cost through a RESTful HTTP API. + +It includes the following components: + +Proxy servers (swift-proxy-server) + Accepts OpenStack Object Storage API and raw HTTP requests to upload + files, modify metadata, and create containers. It also serves file + or container listings to web browsers. To improve performance, the + proxy server can use an optional cache that is usually deployed with + memcache. + +Account servers (swift-account-server) + Manages accounts defined with Object Storage. + +Container servers (swift-container-server) + Manages the mapping of containers or folders, within Object Storage. + +Object servers (swift-object-server) + Manages actual objects, such as files, on the storage nodes. + +Various periodic processes + Performs housekeeping tasks on the large data store. The replication + services ensure consistency and availability through the cluster. + Other periodic processes include auditors, updaters, and reapers. + +WSGI middleware + Handles authentication and is usually OpenStack Identity. + +swift client + Enables users to submit commands to the REST API through a + command-line client authorized as either a admin user, reseller + user, or swift user. + +swift-init + Script that initializes the building of the ring file, takes daemon + names as parameter and offers commands. Documented in + http://docs.openstack.org/developer/swift/admin_guide.html#managing-services. + +swift-recon + A cli tool used to retrieve various metrics and telemetry information + about a cluster that has been collected by the swift-recon middleware. + +swift-ring-builder + Storage ring build and rebalance utility. Documented in + http://docs.openstack.org/developer/swift/admin_guide.html#managing-the-rings. diff --git a/install-guide/source/index.rst b/install-guide/source/index.rst new file mode 100644 index 0000000000..7869e05f0c --- /dev/null +++ b/install-guide/source/index.rst @@ -0,0 +1,23 @@ +====================== +Object Storage service +====================== + +.. toctree:: + :maxdepth: 2 + + get_started.rst + controller-install.rst + storage-install.rst + initial-rings.rst + finalize-installation.rst + verify.rst + next-steps.rst + +The Object Storage services (swift) work together to provide +object storage and retrieval through a REST API. + +This chapter assumes a working setup of OpenStack following the +`OpenStack Installation Tutorial `_. + +Your environment must at least include the Identity service (keystone) +prior to deploying Object Storage. diff --git a/install-guide/source/initial-rings.rst b/install-guide/source/initial-rings.rst new file mode 100644 index 0000000000..37ae3b17fd --- /dev/null +++ b/install-guide/source/initial-rings.rst @@ -0,0 +1,253 @@ +Create and distribute initial rings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Before starting the Object Storage services, you must create the initial +account, container, and object rings. The ring builder creates configuration +files that each node uses to determine and deploy the storage architecture. +For simplicity, this guide uses one region and two zones with 2^10 (1024) +maximum partitions, 3 replicas of each object, and 1 hour minimum time between +moving a partition more than once. For Object Storage, a partition indicates a +directory on a storage device rather than a conventional partition table. +For more information, see the +`Deployment Guide `__. + +.. note:: + Perform these steps on the controller node. + +Create account ring +------------------- + +The account server uses the account ring to maintain lists of containers. + +#. Change to the ``/etc/swift`` directory. + +#. Create the base ``account.builder`` file: + + .. code-block:: console + + # swift-ring-builder account.builder create 10 3 1 + + .. note:: + + This command provides no output. + +#. Add each storage node to the ring: + + .. code-block:: console + + # swift-ring-builder account.builder \ + add --region 1 --zone 1 --ip STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS --port 6002 \ + --device DEVICE_NAME --weight DEVICE_WEIGHT + + Replace ``STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address + of the management network on the storage node. Replace ``DEVICE_NAME`` with a + storage device name on the same storage node. For example, using the first + storage node in :ref:`storage` with the ``/dev/sdb`` storage + device and weight of 100: + + .. code-block:: console + + # swift-ring-builder account.builder add \ + --region 1 --zone 1 --ip 10.0.0.51 --port 6002 --device sdb --weight 100 + + Repeat this command for each storage device on each storage node. In the + example architecture, use the command in four variations: + + .. code-block:: console + + # swift-ring-builder account.builder add \ + --region 1 --zone 1 --ip 10.0.0.51 --port 6002 --device sdb --weight 100 + Device d0r1z1-10.0.0.51:6002R10.0.0.51:6002/sdb_"" with 100.0 weight got id 0 + # swift-ring-builder account.builder add \ + --region 1 --zone 1 --ip 10.0.0.51 --port 6002 --device sdc --weight 100 + Device d1r1z2-10.0.0.51:6002R10.0.0.51:6002/sdc_"" with 100.0 weight got id 1 + # swift-ring-builder account.builder add \ + --region 1 --zone 2 --ip 10.0.0.52 --port 6002 --device sdb --weight 100 + Device d2r1z3-10.0.0.52:6002R10.0.0.52:6002/sdb_"" with 100.0 weight got id 2 + # swift-ring-builder account.builder add \ + --region 1 --zone 2 --ip 10.0.0.52 --port 6002 --device sdc --weight 100 + Device d3r1z4-10.0.0.52:6002R10.0.0.52:6002/sdc_"" with 100.0 weight got id 3 + +#. Verify the ring contents: + + .. code-block:: console + + # swift-ring-builder account.builder + account.builder, build version 4 + 1024 partitions, 3.000000 replicas, 1 regions, 2 zones, 4 devices, 100.00 balance, 0.00 dispersion + The minimum number of hours before a partition can be reassigned is 1 + The overload factor is 0.00% (0.000000) + Devices: id region zone ip address port replication ip replication port name weight partitions balance meta + 0 1 1 10.0.0.51 6002 10.0.0.51 6002 sdb 100.00 0 -100.00 + 1 1 1 10.0.0.51 6002 10.0.0.51 6002 sdc 100.00 0 -100.00 + 2 1 2 10.0.0.52 6002 10.0.0.52 6002 sdb 100.00 0 -100.00 + 3 1 2 10.0.0.52 6002 10.0.0.52 6002 sdc 100.00 0 -100.00 + +#. Rebalance the ring: + + .. code-block:: console + + # swift-ring-builder account.builder rebalance + Reassigned 1024 (100.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 + +Create container ring +--------------------- + +The container server uses the container ring to maintain lists of objects. +However, it does not track object locations. + +#. Change to the ``/etc/swift`` directory. + +#. Create the base ``container.builder`` file: + + .. code-block:: console + + # swift-ring-builder container.builder create 10 3 1 + + .. note:: + + This command provides no output. + +#. Add each storage node to the ring: + + .. code-block:: console + + # swift-ring-builder container.builder \ + add --region 1 --zone 1 --ip STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS --port 6001 \ + --device DEVICE_NAME --weight DEVICE_WEIGHT + + Replace ``STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address + of the management network on the storage node. Replace ``DEVICE_NAME`` with a + storage device name on the same storage node. For example, using the first + storage node in :ref:`storage` with the ``/dev/sdb`` + storage device and weight of 100: + + .. code-block:: console + + # swift-ring-builder container.builder add \ + --region 1 --zone 1 --ip 10.0.0.51 --port 6001 --device sdb --weight 100 + + Repeat this command for each storage device on each storage node. In the + example architecture, use the command in four variations: + + .. code-block:: console + + # swift-ring-builder container.builder add \ + --region 1 --zone 1 --ip 10.0.0.51 --port 6001 --device sdb --weight 100 + Device d0r1z1-10.0.0.51:6001R10.0.0.51:6001/sdb_"" with 100.0 weight got id 0 + # swift-ring-builder container.builder add \ + --region 1 --zone 1 --ip 10.0.0.51 --port 6001 --device sdc --weight 100 + Device d1r1z2-10.0.0.51:6001R10.0.0.51:6001/sdc_"" with 100.0 weight got id 1 + # swift-ring-builder container.builder add \ + --region 1 --zone 2 --ip 10.0.0.52 --port 6001 --device sdb --weight 100 + Device d2r1z3-10.0.0.52:6001R10.0.0.52:6001/sdb_"" with 100.0 weight got id 2 + # swift-ring-builder container.builder add \ + --region 1 --zone 2 --ip 10.0.0.52 --port 6001 --device sdc --weight 100 + Device d3r1z4-10.0.0.52:6001R10.0.0.52:6001/sdc_"" with 100.0 weight got id 3 + +#. Verify the ring contents: + + .. code-block:: console + + # swift-ring-builder container.builder + container.builder, build version 4 + 1024 partitions, 3.000000 replicas, 1 regions, 2 zones, 4 devices, 100.00 balance, 0.00 dispersion + The minimum number of hours before a partition can be reassigned is 1 + The overload factor is 0.00% (0.000000) + Devices: id region zone ip address port replication ip replication port name weight partitions balance meta + 0 1 1 10.0.0.51 6001 10.0.0.51 6001 sdb 100.00 0 -100.00 + 1 1 1 10.0.0.51 6001 10.0.0.51 6001 sdc 100.00 0 -100.00 + 2 1 2 10.0.0.52 6001 10.0.0.52 6001 sdb 100.00 0 -100.00 + 3 1 2 10.0.0.52 6001 10.0.0.52 6001 sdc 100.00 0 -100.00 + +#. Rebalance the ring: + + .. code-block:: console + + # swift-ring-builder container.builder rebalance + Reassigned 1024 (100.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 + +Create object ring +------------------ + +The object server uses the object ring to maintain lists of object locations +on local devices. + +#. Change to the ``/etc/swift`` directory. + +#. Create the base ``object.builder`` file: + + .. code-block:: console + + # swift-ring-builder object.builder create 10 3 1 + + .. note:: + + This command provides no output. + +#. Add each storage node to the ring: + + .. code-block:: console + + # swift-ring-builder object.builder \ + add --region 1 --zone 1 --ip STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS --port 6000 \ + --device DEVICE_NAME --weight DEVICE_WEIGHT + + Replace ``STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address + of the management network on the storage node. Replace ``DEVICE_NAME`` with + a storage device name on the same storage node. For example, using the first + storage node in :ref:`storage` with the ``/dev/sdb`` storage + device and weight of 100: + + .. code-block:: console + + # swift-ring-builder object.builder add \ + --region 1 --zone 1 --ip 10.0.0.51 --port 6000 --device sdb --weight 100 + + Repeat this command for each storage device on each storage node. In the + example architecture, use the command in four variations: + + .. code-block:: console + + # swift-ring-builder object.builder add \ + --region 1 --zone 1 --ip 10.0.0.51 --port 6000 --device sdb --weight 100 + Device d0r1z1-10.0.0.51:6000R10.0.0.51:6000/sdb_"" with 100.0 weight got id 0 + # swift-ring-builder object.builder add \ + --region 1 --zone 1 --ip 10.0.0.51 --port 6000 --device sdc --weight 100 + Device d1r1z2-10.0.0.51:6000R10.0.0.51:6000/sdc_"" with 100.0 weight got id 1 + # swift-ring-builder object.builder add \ + --region 1 --zone 2 --ip 10.0.0.52 --port 6000 --device sdb --weight 100 + Device d2r1z3-10.0.0.52:6000R10.0.0.52:6000/sdb_"" with 100.0 weight got id 2 + # swift-ring-builder object.builder add \ + --region 1 --zone 2 --ip 10.0.0.52 --port 6000 --device sdc --weight 100 + Device d3r1z4-10.0.0.52:6000R10.0.0.52:6000/sdc_"" with 100.0 weight got id 3 + +#. Verify the ring contents: + + .. code-block:: console + + # swift-ring-builder object.builder + object.builder, build version 4 + 1024 partitions, 3.000000 replicas, 1 regions, 2 zones, 4 devices, 100.00 balance, 0.00 dispersion + The minimum number of hours before a partition can be reassigned is 1 + The overload factor is 0.00% (0.000000) + Devices: id region zone ip address port replication ip replication port name weight partitions balance meta + 0 1 1 10.0.0.51 6000 10.0.0.51 6000 sdb 100.00 0 -100.00 + 1 1 1 10.0.0.51 6000 10.0.0.51 6000 sdc 100.00 0 -100.00 + 2 1 2 10.0.0.52 6000 10.0.0.52 6000 sdb 100.00 0 -100.00 + 3 1 2 10.0.0.52 6000 10.0.0.52 6000 sdc 100.00 0 -100.00 + +#. Rebalance the ring: + + .. code-block:: console + + # swift-ring-builder object.builder rebalance + Reassigned 1024 (100.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 + +Distribute ring configuration files +----------------------------------- + +* Copy the ``account.ring.gz``, ``container.ring.gz``, and + ``object.ring.gz`` files to the ``/etc/swift`` directory + on each storage node and any additional nodes running the + proxy service. diff --git a/install-guide/source/next-steps.rst b/install-guide/source/next-steps.rst new file mode 100644 index 0000000000..de8f150460 --- /dev/null +++ b/install-guide/source/next-steps.rst @@ -0,0 +1,10 @@ +.. _next-steps: + +========== +Next steps +========== + +Your OpenStack environment now includes Object Storage. + +To add more services, see the +`additional documentation on installing OpenStack `_ . diff --git a/install-guide/source/storage-include1.txt b/install-guide/source/storage-include1.txt new file mode 100644 index 0000000000..3f25351856 --- /dev/null +++ b/install-guide/source/storage-include1.txt @@ -0,0 +1,41 @@ +Edit the ``/etc/swift/account-server.conf`` file and complete the +following actions: + +* In the ``[DEFAULT]`` section, configure the bind IP address, bind port, + user, configuration directory, and mount point directory: + + .. code-block:: none + + [DEFAULT] + ... + bind_ip = MANAGEMENT_INTERFACE_IP_ADDRESS + bind_port = 6002 + user = swift + swift_dir = /etc/swift + devices = /srv/node + mount_check = True + + Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the + management network on the storage node. + +* In the ``[pipeline:main]`` section, enable the appropriate modules: + + .. code-block:: none + + [pipeline:main] + pipeline = healthcheck recon account-server + + .. note:: + + For more information on other modules that enable additional features, + see the `Deployment Guide `__. + +* In the ``[filter:recon]`` section, configure the recon (meters) cache + directory: + + .. code-block:: none + + [filter:recon] + use = egg:swift#recon + ... + recon_cache_path = /var/cache/swift diff --git a/install-guide/source/storage-include2.txt b/install-guide/source/storage-include2.txt new file mode 100644 index 0000000000..835e9b2878 --- /dev/null +++ b/install-guide/source/storage-include2.txt @@ -0,0 +1,41 @@ +Edit the ``/etc/swift/container-server.conf`` file and complete the +following actions: + +* In the ``[DEFAULT]`` section, configure the bind IP address, bind port, + user, configuration directory, and mount point directory: + + .. code-block:: none + + [DEFAULT] + ... + bind_ip = MANAGEMENT_INTERFACE_IP_ADDRESS + bind_port = 6001 + user = swift + swift_dir = /etc/swift + devices = /srv/node + mount_check = True + + Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the + management network on the storage node. + +* In the ``[pipeline:main]`` section, enable the appropriate modules: + + .. code-block:: none + + [pipeline:main] + pipeline = healthcheck recon container-server + + .. note:: + + For more information on other modules that enable additional features, + see the `Deployment Guide `__. + +* In the ``[filter:recon]`` section, configure the recon (meters) cache + directory: + + .. code-block:: none + + [filter:recon] + use = egg:swift#recon + ... + recon_cache_path = /var/cache/swift diff --git a/install-guide/source/storage-include3.txt b/install-guide/source/storage-include3.txt new file mode 100644 index 0000000000..cd89acd392 --- /dev/null +++ b/install-guide/source/storage-include3.txt @@ -0,0 +1,42 @@ +Edit the ``/etc/swift/object-server.conf`` file and complete the +following actions: + +* In the ``[DEFAULT]`` section, configure the bind IP address, bind port, + user, configuration directory, and mount point directory: + + .. code-block:: none + + [DEFAULT] + ... + bind_ip = MANAGEMENT_INTERFACE_IP_ADDRESS + bind_port = 6000 + user = swift + swift_dir = /etc/swift + devices = /srv/node + mount_check = True + + Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the + management network on the storage node. + +* In the ``[pipeline:main]`` section, enable the appropriate modules: + + .. code-block:: none + + [pipeline:main] + pipeline = healthcheck recon object-server + + .. note:: + + For more information on other modules that enable additional features, + see the `Deployment Guide `__. + +* In the ``[filter:recon]`` section, configure the recon (meters) cache + and lock directories: + + .. code-block:: none + + [filter:recon] + use = egg:swift#recon + ... + recon_cache_path = /var/cache/swift + recon_lock_path = /var/lock diff --git a/install-guide/source/storage-install-obs.rst b/install-guide/source/storage-install-obs.rst new file mode 100644 index 0000000000..df88b83afb --- /dev/null +++ b/install-guide/source/storage-install-obs.rst @@ -0,0 +1,139 @@ +.. _storage-obs: + +Install and configure the storage nodes for openSUSE and SUSE Linux Enterprise +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure storage nodes +that operate the account, container, and object services. For +simplicity, this configuration references two storage nodes, each +containing two empty local block storage devices. The instructions +use ``/dev/sdb`` and ``/dev/sdc``, but you can substitute different +values for your particular nodes. + +Although Object Storage supports any file system with +extended attributes (xattr), testing and benchmarking +indicate the best performance and reliability on XFS. For +more information on horizontally scaling your environment, see the +`Deployment Guide `_. + +This section applies to openSUSE Leap 42.1 and SUSE Linux Enterprise Server +12 SP1. + +Prerequisites +------------- + +Before you install and configure the Object Storage service on the +storage nodes, you must prepare the storage devices. + +.. note:: + + Perform these steps on each storage node. + +#. Install the supporting utility packages: + + .. code-block:: console + + # zypper install xfsprogs rsync + +#. Format the ``/dev/sdb`` and ``/dev/sdc`` devices as XFS: + + .. code-block:: console + + # mkfs.xfs /dev/sdb + # mkfs.xfs /dev/sdc + +#. Create the mount point directory structure: + + .. code-block:: console + + # mkdir -p /srv/node/sdb + # mkdir -p /srv/node/sdc + +#. Edit the ``/etc/fstab`` file and add the following to it: + + .. code-block:: none + + /dev/sdb /srv/node/sdb xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 + /dev/sdc /srv/node/sdc xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 + +#. Mount the devices: + + .. code-block:: console + + # mount /srv/node/sdb + # mount /srv/node/sdc + +#. Create or edit the ``/etc/rsyncd.conf`` file to contain the following: + + .. code-block:: none + + uid = swift + gid = swift + log file = /var/log/rsyncd.log + pid file = /var/run/rsyncd.pid + address = MANAGEMENT_INTERFACE_IP_ADDRESS + + [account] + max connections = 2 + path = /srv/node/ + read only = False + lock file = /var/lock/account.lock + + [container] + max connections = 2 + path = /srv/node/ + read only = False + lock file = /var/lock/container.lock + + [object] + max connections = 2 + path = /srv/node/ + read only = False + lock file = /var/lock/object.lock + + Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the + management network on the storage node. + + .. note:: + + The ``rsync`` service requires no authentication, so consider running + it on a private network in production environments. + +7. Start the ``rsyncd`` service and configure it to start when the + system boots: + + .. code-block:: console + + # systemctl enable rsyncd.service + # systemctl start rsyncd.service + +Install and configure components +-------------------------------- + +.. note:: + + Default configuration files vary by distribution. You might need + to add these sections and options rather than modifying existing + sections and options. Also, an ellipsis (``...``) in the configuration + snippets indicates potential default configuration options that you + should retain. + +.. note:: + + Perform these steps on each storage node. + +#. Install the packages: + + .. code-block:: console + + # zypper install openstack-swift-account \ + openstack-swift-container openstack-swift-object python-xml + +2. .. include:: storage-include1.txt +3. .. include:: storage-include2.txt +4. .. include:: storage-include3.txt +5. Ensure proper ownership of the mount point directory structure: + + .. code-block:: console + + # chown -R swift:swift /srv/node diff --git a/install-guide/source/storage-install-rdo.rst b/install-guide/source/storage-install-rdo.rst new file mode 100644 index 0000000000..3e2e379efb --- /dev/null +++ b/install-guide/source/storage-install-rdo.rst @@ -0,0 +1,155 @@ +.. _storage-rdo: + +Install and configure the storage nodes for Red Hat Enterprise Linux and CentOS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure storage nodes +that operate the account, container, and object services. For +simplicity, this configuration references two storage nodes, each +containing two empty local block storage devices. The instructions +use ``/dev/sdb`` and ``/dev/sdc``, but you can substitute different +values for your particular nodes. + +Although Object Storage supports any file system with +extended attributes (xattr), testing and benchmarking +indicate the best performance and reliability on XFS. For +more information on horizontally scaling your environment, see the +`Deployment Guide `_. + +This section applies to Red Hat Enterprise Linux 7 and CentOS 7. + +Prerequisites +------------- + +Before you install and configure the Object Storage service on the +storage nodes, you must prepare the storage devices. + +.. note:: + + Perform these steps on each storage node. + +#. Install the supporting utility packages: + + .. code-block:: console + + # yum install xfsprogs rsync + +#. Format the ``/dev/sdb`` and ``/dev/sdc`` devices as XFS: + + .. code-block:: console + + # mkfs.xfs /dev/sdb + # mkfs.xfs /dev/sdc + +#. Create the mount point directory structure: + + .. code-block:: console + + # mkdir -p /srv/node/sdb + # mkdir -p /srv/node/sdc + +#. Edit the ``/etc/fstab`` file and add the following to it: + + .. code-block:: none + + /dev/sdb /srv/node/sdb xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 + /dev/sdc /srv/node/sdc xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 + +#. Mount the devices: + + .. code-block:: console + + # mount /srv/node/sdb + # mount /srv/node/sdc + +#. Create or edit the ``/etc/rsyncd.conf`` file to contain the following: + + .. code-block:: none + + uid = swift + gid = swift + log file = /var/log/rsyncd.log + pid file = /var/run/rsyncd.pid + address = MANAGEMENT_INTERFACE_IP_ADDRESS + + [account] + max connections = 2 + path = /srv/node/ + read only = False + lock file = /var/lock/account.lock + + [container] + max connections = 2 + path = /srv/node/ + read only = False + lock file = /var/lock/container.lock + + [object] + max connections = 2 + path = /srv/node/ + read only = False + lock file = /var/lock/object.lock + + Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the + management network on the storage node. + + .. note:: + + The ``rsync`` service requires no authentication, so consider running + it on a private network in production environments. + +7. Start the ``rsyncd`` service and configure it to start when the + system boots: + + .. code-block:: console + + # systemctl enable rsyncd.service + # systemctl start rsyncd.service + +Install and configure components +-------------------------------- + +.. note:: + + Default configuration files vary by distribution. You might need + to add these sections and options rather than modifying existing + sections and options. Also, an ellipsis (``...``) in the configuration + snippets indicates potential default configuration options that you + should retain. + +.. note:: + + Perform these steps on each storage node. + +#. Install the packages: + + .. code-block:: console + + # yum install openstack-swift-account openstack-swift-container \ + openstack-swift-object + +2. Obtain the accounting, container, and object service configuration + files from the Object Storage source repository: + + .. code-block:: console + + # curl -o /etc/swift/account-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/account-server.conf-sample?h=stable/mitaka + # curl -o /etc/swift/container-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/container-server.conf-sample?h=stable/mitaka + # curl -o /etc/swift/object-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/object-server.conf-sample?h=stable/mitaka + +3. .. include:: storage-include1.txt +4. .. include:: storage-include2.txt +5. .. include:: storage-include3.txt +6. Ensure proper ownership of the mount point directory structure: + + .. code-block:: console + + # chown -R swift:swift /srv/node + +7. Create the ``recon`` directory and ensure proper ownership of it: + + .. code-block:: console + + # mkdir -p /var/cache/swift + # chown -R root:swift /var/cache/swift + # chmod -R 775 /var/cache/swift diff --git a/install-guide/source/storage-install-ubuntu-debian.rst b/install-guide/source/storage-install-ubuntu-debian.rst new file mode 100644 index 0000000000..a429989dd2 --- /dev/null +++ b/install-guide/source/storage-install-ubuntu-debian.rst @@ -0,0 +1,159 @@ +.. _storage-ubuntu-debian: + +Install and configure the storage nodes for Ubuntu and Debian +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure storage nodes +that operate the account, container, and object services. For +simplicity, this configuration references two storage nodes, each +containing two empty local block storage devices. The instructions +use ``/dev/sdb`` and ``/dev/sdc``, but you can substitute different +values for your particular nodes. + +Although Object Storage supports any file system with +extended attributes (xattr), testing and benchmarking +indicate the best performance and reliability on XFS. For +more information on horizontally scaling your environment, see the +`Deployment Guide `_. + +This section applies to Ubuntu 14.04 (LTS) and Debian. + +Prerequisites +------------- + +Before you install and configure the Object Storage service on the +storage nodes, you must prepare the storage devices. + +.. note:: + + Perform these steps on each storage node. + +#. Install the supporting utility packages: + + .. code-block:: console + + # apt-get install xfsprogs rsync + +#. Format the ``/dev/sdb`` and ``/dev/sdc`` devices as XFS: + + .. code-block:: console + + # mkfs.xfs /dev/sdb + # mkfs.xfs /dev/sdc + +#. Create the mount point directory structure: + + .. code-block:: console + + # mkdir -p /srv/node/sdb + # mkdir -p /srv/node/sdc + +#. Edit the ``/etc/fstab`` file and add the following to it: + + .. code-block:: none + + /dev/sdb /srv/node/sdb xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 + /dev/sdc /srv/node/sdc xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 + +#. Mount the devices: + + .. code-block:: console + + # mount /srv/node/sdb + # mount /srv/node/sdc + +#. Create or edit the ``/etc/rsyncd.conf`` file to contain the following: + + .. code-block:: none + + uid = swift + gid = swift + log file = /var/log/rsyncd.log + pid file = /var/run/rsyncd.pid + address = MANAGEMENT_INTERFACE_IP_ADDRESS + + [account] + max connections = 2 + path = /srv/node/ + read only = False + lock file = /var/lock/account.lock + + [container] + max connections = 2 + path = /srv/node/ + read only = False + lock file = /var/lock/container.lock + + [object] + max connections = 2 + path = /srv/node/ + read only = False + lock file = /var/lock/object.lock + + Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the + management network on the storage node. + + .. note:: + + The ``rsync`` service requires no authentication, so consider running + it on a private network in production environments. + +7. Edit the ``/etc/default/rsync`` file and enable the ``rsync`` + service: + + .. code-block:: none + + RSYNC_ENABLE=true + +8. Start the ``rsync`` service: + + .. code-block:: console + + # service rsync start + +Install and configure components +-------------------------------- + +.. note:: + + Default configuration files vary by distribution. You might need + to add these sections and options rather than modifying existing + sections and options. Also, an ellipsis (``...``) in the configuration + snippets indicates potential default configuration options that you + should retain. + +.. note:: + + Perform these steps on each storage node. + +#. Install the packages: + + .. code-block:: console + + # apt-get install swift swift-account swift-container swift-object + +2. Obtain the accounting, container, and object service configuration + files from the Object Storage source repository: + + .. code-block:: console + + # curl -o /etc/swift/account-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/account-server.conf-sample?h=stable/mitaka + # curl -o /etc/swift/container-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/container-server.conf-sample?h=stable/mitaka + # curl -o /etc/swift/object-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/object-server.conf-sample?h=stable/mitaka + +3. .. include:: storage-include1.txt +4. .. include:: storage-include2.txt +5. .. include:: storage-include3.txt +6. Ensure proper ownership of the mount point directory structure: + + .. code-block:: console + + # chown -R swift:swift /srv/node + +7. Create the ``recon`` directory and ensure proper ownership of it: + + .. code-block:: console + + # mkdir -p /var/cache/swift + # chown -R root:swift /var/cache/swift + # chmod -R 775 /var/cache/swift diff --git a/install-guide/source/storage-install.rst b/install-guide/source/storage-install.rst new file mode 100644 index 0000000000..38f2b6875e --- /dev/null +++ b/install-guide/source/storage-install.rst @@ -0,0 +1,16 @@ +.. _storage: + +Install and configure the storage nodes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section describes how to install and configure storage nodes +that operate the account, container, and object services. + +Note that installation and configuration vary by distribution. + +.. toctree:: + :maxdepth: 1 + + storage-install-obs.rst + storage-install-rdo.rst + storage-install-ubuntu-debian.rst diff --git a/install-guide/source/verify.rst b/install-guide/source/verify.rst new file mode 100644 index 0000000000..7b29e6d1db --- /dev/null +++ b/install-guide/source/verify.rst @@ -0,0 +1,96 @@ +.. _verify: + +Verify operation +~~~~~~~~~~~~~~~~ + +Verify operation of the Object Storage service. + +.. note:: + + Perform these steps on the controller node. + +.. warning:: + + If you are using Red Hat Enterprise Linux 7 or CentOS 7 and one or more of + these steps do not work, check the ``/var/log/audit/audit.log`` file for + SELinux messages indicating denial of actions for the ``swift`` processes. + If present, change the security context of the ``/srv/node`` directory to + the lowest security level (s0) for the ``swift_data_t`` type, ``object_r`` + role and the ``system_u`` user: + + .. code-block:: console + + # chcon -R system_u:object_r:swift_data_t:s0 /srv/node + +#. Source the ``demo`` credentials: + + .. code-block:: console + + $ . demo-openrc + +#. Show the service status: + + .. code-block:: console + + $ swift stat + Account: AUTH_ed0b60bf607743088218b0a533d5943f + Containers: 0 + Objects: 0 + Bytes: 0 + Containers in policy "policy-0": 0 + Objects in policy "policy-0": 0 + Bytes in policy "policy-0": 0 + X-Account-Project-Domain-Id: default + X-Timestamp: 1444143887.71539 + X-Trans-Id: tx1396aeaf17254e94beb34-0056143bde + Content-Type: text/plain; charset=utf-8 + Accept-Ranges: bytes + +#. Create ``container1`` container: + + .. code-block:: console + + $ openstack container create container1 + +---------------------------------------+------------+------------------------------------+ + | account | container | x-trans-id | + +---------------------------------------+------------+------------------------------------+ + | AUTH_ed0b60bf607743088218b0a533d5943f | container1 | tx8c4034dc306c44dd8cd68-0056f00a4a | + +---------------------------------------+------------+------------------------------------+ + +#. Upload a test file to the ``container1`` container: + + .. code-block:: console + + $ openstack object create container1 FILE + +--------+------------+----------------------------------+ + | object | container | etag | + +--------+------------+----------------------------------+ + | FILE | container1 | ee1eca47dc88f4879d8a229cc70a07c6 | + +--------+------------+----------------------------------+ + + Replace ``FILE`` with the name of a local file to upload to the + ``container1`` container. + +#. List files in the ``container1`` container: + + .. code-block:: console + + $ openstack object list container1 + +------+ + | Name | + +------+ + | FILE | + +------+ + +#. Download a test file from the ``container1`` container: + + .. code-block:: console + + $ openstack object save container1 FILE + + Replace ``FILE`` with the name of the file uploaded to the + ``container1`` container. + + .. note:: + + This command provides no output. diff --git a/test-requirements.txt b/test-requirements.txt index 6f237978d3..933bc68cae 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,6 +8,7 @@ coverage nose nosexcover nosehtmloutput +openstackdocstheme>=1.0.3 # Apache-2.0 oslosphinx sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 # BSD os-api-ref>=0.1.0 # Apache-2.0 diff --git a/tox.ini b/tox.ini index 5becdf2a9e..54cbf1c0b9 100644 --- a/tox.ini +++ b/tox.ini @@ -74,6 +74,12 @@ commands = rm -rf api-ref/build sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html +[testenv:install-guide] +# NOTE(jaegerandi): this target does not use constraints because +# upstream infra does not yet support it. Once that's fixed, we can +# drop the install_command. +install_command = pip install -U --force-reinstall {opts} {packages} +commands = sphinx-build -a -E -W -d install-guide/build/doctrees -b html install-guide/source install-guide/build/html [testenv:bandit] deps = -r{toxinidir}/test-requirements.txt From b53214e30b1fb19336b77a19982c84c2dc5dce7f Mon Sep 17 00:00:00 2001 From: yuyafei Date: Thu, 7 Jul 2016 18:27:21 +0800 Subject: [PATCH 044/156] Correct reraising of exception When an exception was caught and rethrown, it should call 'raise' without any arguments because it shows the place where an exception occured initially instead of place where the exception re-raised. Change-Id: I326dd8eaf221cbf3729beedaff81b416c59ae2e6 --- swift/common/manager.py | 2 +- swift/common/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/swift/common/manager.py b/swift/common/manager.py index a19ade2215..f04f2a3e4b 100644 --- a/swift/common/manager.py +++ b/swift/common/manager.py @@ -316,7 +316,7 @@ class Manager(object): except OSError as e: # PID died before kill_group can take action? if e.errno != errno.ESRCH: - raise e + raise else: print(_('Waited %(kill_wait)s seconds for %(server)s ' 'to die; giving up') % diff --git a/swift/common/utils.py b/swift/common/utils.py index d3ef5a7dcb..60036489b0 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -1727,7 +1727,7 @@ def get_logger(conf, name=None, log_to_console=False, log_route=None, except socket.error as e: # Either /dev/log isn't a UNIX socket or it does not exist at all if e.errno not in [errno.ENOTSOCK, errno.ENOENT]: - raise e + raise handler = SysLogHandler(facility=facility) handler.setFormatter(formatter) logger.addHandler(handler) From 2be1d6a77e9fcd4c92d487ba7116407ec5389723 Mon Sep 17 00:00:00 2001 From: Kazuhiro MIYAHARA Date: Thu, 7 Jul 2016 21:28:07 +0900 Subject: [PATCH 045/156] Remove an unused variable from tests.py The varialbe 'size' in TestFile.testMetadataNumberLimit is not used. This patch remove the variable from the test. Change-Id: I255a1dcee12bb6b8dec6ff26ed7edf93ab2acf64 --- test/functional/tests.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/functional/tests.py b/test/functional/tests.py index d083aa10c2..84b501a78f 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -1582,7 +1582,6 @@ class TestFile(Base): j = size_limit / (i * 2) - size = 0 metadata = {} while len(metadata.keys()) < i: key = Utils.create_ascii_name() @@ -1592,7 +1591,6 @@ class TestFile(Base): key = key[:j] val = val[:j] - size += len(key) + len(val) metadata[key] = val file_item = self.env.container.file(Utils.create_name()) From e5a6d458829b367ecc16af54a44f629cb1fdcd68 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Tue, 7 Jun 2016 10:35:18 +0000 Subject: [PATCH 046/156] Add ringbuilder tests for --yes option Also added a Timeout class to test.unit to wrap possible long-running functions. For example, if there is some regression and the "--yes" argument is no longer evaluated correctly and the test excepts some keyboard input, it will be terminated after a few seconds to ensure there is no long-running blocker on the gate. Change-Id: I07b17d21d5af7fcc594ce5319ae2b6f7f58df4bb --- test/unit/__init__.py | 18 ++++++++++++++++++ test/unit/cli/test_ringbuilder.py | 25 +++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 97df0004b2..104977b793 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -32,6 +32,7 @@ import eventlet from eventlet.green import socket from tempfile import mkdtemp from shutil import rmtree +import signal import json @@ -1061,3 +1062,20 @@ def mocked_http_conn(*args, **kwargs): def make_timestamp_iter(): return iter(Timestamp(t) for t in itertools.count(int(time.time()))) + + +class Timeout(object): + def __init__(self, seconds): + self.seconds = seconds + + def __enter__(self): + signal.signal(signal.SIGALRM, self._exit) + signal.alarm(self.seconds) + + def __exit__(self, type, value, traceback): + signal.alarm(0) + + def _exit(self, signum, frame): + class TimeoutException(Exception): + pass + raise TimeoutException diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index 1f2b9494a5..b61907c967 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -29,6 +29,8 @@ from swift.cli.ringbuilder import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR from swift.common import exceptions from swift.common.ring import RingBuilder +from test.unit import Timeout + class RunSwiftRingBuilderMixin(object): @@ -1955,6 +1957,29 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): argv = ["-safe", self.tmpfile] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + def test_remove_all_devices(self): + # Would block without the 'yes' argument + self.create_sample_ring() + argv = ["", self.tmpfile, "remove", "--weight", "100", "--yes"] + with Timeout(5): + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + + def test_set_info_all_devices(self): + # Would block without the 'yes' argument + self.create_sample_ring() + argv = ["", self.tmpfile, "set_info", "--weight", "100", + "--change-meta", "something", "--yes"] + with Timeout(5): + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + + def test_set_weight_all_devices(self): + # Would block without the 'yes' argument + self.create_sample_ring() + argv = ["", self.tmpfile, "set_weight", + "--weight", "100", "200", "--yes"] + with Timeout(5): + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin): From ca2f6d13b6aa79d5e3c184955280b4a3ea17cd7d Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Thu, 7 Jul 2016 11:31:31 +0100 Subject: [PATCH 047/156] Fix unicode errors in object controller logging Change swift.proxy.server.Application.error_occurred() to decode message as utf-8 in same way that the exception_occurred() method was changed in [1]. This prevents a unicode error when logging error responses in swift.proxy.controllers.base.Controller._make_request() for paths that have non-ascii characters. Although the unicode error is currently caught by a surrounding except clause, the logging and error limiting treatment is different for ascii vs non-ascii paths. This patch makes them consistent. Fix the server type reported in _make_request() to be the correct server type, not always 'Container Server'. Fix path arg passed to _get_conn_response in swift.proxy.controllers.obj.BaseObjectController to be req.path rather than req. Add unit tests for error_occurred() being called with non-ascii paths and extend tests for exception_occurred() (see Related-Bug). [1] Change-Id: Icb7284eb5abc9869c1620ee6366817112d8e5587 Related-Bug: #1597210 Change-Id: I285499d164bff94835bdddb25d2af6d73114c281 --- swift/proxy/controllers/base.py | 5 +- swift/proxy/controllers/obj.py | 2 +- swift/proxy/server.py | 2 +- test/unit/__init__.py | 4 +- test/unit/proxy/controllers/test_obj.py | 137 +++++++++++++++++++++++- test/unit/proxy/test_server.py | 54 ++++++++++ 6 files changed, 196 insertions(+), 8 deletions(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index c1a909dad5..f9b1175e6d 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -1545,10 +1545,11 @@ class Controller(object): self.app.error_occurred( node, _('ERROR %(status)d ' 'Trying to %(method)s %(path)s' - 'From Container Server') % { + ' From %(type)s Server') % { 'status': resp.status, 'method': method, - 'path': path}) + 'path': path, + 'type': self.server_type}) except (Exception, Timeout): self.app.exception_occurred( node, self.server_type, diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index 54ee996aa7..a948bbb6d2 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -349,7 +349,7 @@ class BaseObjectController(Controller): for putter in putters: if putter.failed: continue - pile.spawn(self._get_conn_response, putter, req, + pile.spawn(self._get_conn_response, putter, req.path, self.app.logger.thread_locals, final_phase=final_phase) def _handle_response(putter, response): diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 98072aaf26..c35a3e1a97 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -509,7 +509,7 @@ class Application(object): """ self._incr_node_errors(node) self.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'), - {'msg': msg, 'ip': node['ip'], + {'msg': msg.decode('utf-8'), 'ip': node['ip'], 'port': node['port'], 'device': node['device']}) def iter_nodes(self, ring, partition, node_iter=None): diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 97df0004b2..7afc5a946e 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -959,9 +959,9 @@ def fake_http_connect(*code_iter, **kwargs): self.body = self.body[amt:] return rv - def send(self, amt=None): + def send(self, data=None): if self.give_send: - self.give_send(self, amt) + self.give_send(self, data) am_slow, value = self.get_slow() if am_slow: if self.received < 4: diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index e24d152d26..741be84b20 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -31,6 +31,7 @@ from six.moves import range import swift from swift.common import utils, swob, exceptions +from swift.common.exceptions import ChunkWriteTimeout from swift.common.header_key_dict import HeaderKeyDict from swift.proxy import server as proxy_server from swift.proxy.controllers import obj @@ -856,19 +857,151 @@ class TestReplicatedObjController(BaseObjectControllerMixin, node_error_count(self.app, object_ring.devs[1]), self.app.error_suppression_limit + 1) - def test_PUT_connect_exception_with_unicode_path_and_locale(self): + def test_PUT_connect_exception_with_unicode_path(self): expected = 201 statuses = ( Exception('Connection refused: Please insert ten dollars'), - 201, 201) + 201, 201, 201) req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89', method='PUT', body='life is utf-gr8') + self.app.logger.clear() with set_http_connect(*statuses): resp = req.get_response(self.app) self.assertEqual(resp.status_int, expected) + log_lines = self.app.logger.get_lines_for_level('error') + self.assertFalse(log_lines[1:]) + self.assertIn('ERROR with Object server', log_lines[0]) + self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0]) + self.assertIn('re: Expect: 100-continue', log_lines[0]) + + def test_PUT_get_expect_errors_with_unicode_path(self): + def do_test(statuses): + req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89', + method='PUT', + body='life is utf-gr8') + self.app.logger.clear() + with set_http_connect(*statuses): + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 201) + log_lines = self.app.logger.get_lines_for_level('error') + self.assertFalse(log_lines[1:]) + return log_lines + + log_lines = do_test((201, (507, None), 201, 201)) + self.assertIn('ERROR Insufficient Storage', log_lines[0]) + + log_lines = do_test((201, (503, None), 201, 201)) + self.assertIn('ERROR 503 Expect: 100-continue From Object Server', + log_lines[0]) + + def test_PUT_send_exception_with_unicode_path(self): + def do_test(exc): + conns = set() + + def capture_send(conn, data): + conns.add(conn) + if len(conns) == 2: + raise exc + + req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89', + method='PUT', + body='life is utf-gr8') + self.app.logger.clear() + with set_http_connect(201, 201, 201, give_send=capture_send): + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 201) + log_lines = self.app.logger.get_lines_for_level('error') + self.assertFalse(log_lines[1:]) + self.assertIn('ERROR with Object server', log_lines[0]) + self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0]) + self.assertIn('Trying to write to', log_lines[0]) + + do_test(Exception('Exception while sending data on connection')) + do_test(ChunkWriteTimeout()) + + def test_PUT_final_response_errors_with_unicode_path(self): + def do_test(statuses): + req = swob.Request.blank('/v1/AUTH_kilroy/%ED%88%8E/%E9%90%89', + method='PUT', + body='life is utf-gr8') + self.app.logger.clear() + with set_http_connect(*statuses): + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 201) + log_lines = self.app.logger.get_lines_for_level('error') + self.assertFalse(log_lines[1:]) + return req, log_lines + + req, log_lines = do_test((201, (100, Exception('boom')), 201)) + self.assertIn('ERROR with Object server', log_lines[0]) + self.assertIn(req.path.decode('utf-8'), log_lines[0]) + self.assertIn('Trying to get final status of PUT', log_lines[0]) + + req, log_lines = do_test((201, (100, Timeout()), 201)) + self.assertIn('ERROR with Object server', log_lines[0]) + self.assertIn(req.path.decode('utf-8'), log_lines[0]) + self.assertIn('Trying to get final status of PUT', log_lines[0]) + + req, log_lines = do_test((201, (100, 507), 201)) + self.assertIn('ERROR Insufficient Storage', log_lines[0]) + + req, log_lines = do_test((201, (100, 500), 201)) + self.assertIn('ERROR 500 From Object Server', log_lines[0]) + self.assertIn(req.path.decode('utf-8'), log_lines[0]) + + def test_DELETE_errors(self): + # verify logged errors with and without non-ascii characters in path + def do_test(path, statuses): + + req = swob.Request.blank('/v1' + path, + method='DELETE', + body='life is utf-gr8') + self.app.logger.clear() + with set_http_connect(*statuses): + resp = req.get_response(self.app) + + self.assertEqual(resp.status_int, 201) + log_lines = self.app.logger.get_lines_for_level('error') + self.assertFalse(log_lines[1:]) + return req, log_lines + + req, log_lines = do_test('/AUTH_kilroy/ascii/ascii', + (201, 500, 201, 201)) + self.assertIn('Trying to DELETE', log_lines[0]) + self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0]) + self.assertIn(' From Object Server', log_lines[0]) + + req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89', + (201, 500, 201, 201)) + self.assertIn('Trying to DELETE', log_lines[0]) + self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0]) + self.assertIn(' From Object Server', log_lines[0]) + + req, log_lines = do_test('/AUTH_kilroy/ascii/ascii', + (201, 507, 201, 201)) + self.assertIn('ERROR Insufficient Storage', log_lines[0]) + + req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89', + (201, 507, 201, 201)) + self.assertIn('ERROR Insufficient Storage', log_lines[0]) + + req, log_lines = do_test('/AUTH_kilroy/ascii/ascii', + (201, Exception(), 201, 201)) + self.assertIn('Trying to DELETE', log_lines[0]) + self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0]) + self.assertIn('ERROR with Object server', log_lines[0]) + + req, log_lines = do_test('/AUTH_kilroy/%ED%88%8E/%E9%90%89', + (201, Exception(), 201, 201)) + self.assertIn('Trying to DELETE', log_lines[0]) + self.assertIn(req.swift_entity_path.decode('utf-8'), log_lines[0]) + self.assertIn('ERROR with Object server', log_lines[0]) def test_PUT_error_during_transfer_data(self): class FakeReader(object): diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 6452fb5b0c..44a23ef6f4 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -874,6 +874,60 @@ class TestProxyServer(unittest.TestCase): self.assertEqual(controller.__name__, 'InfoController') + def test_exception_occurred(self): + def do_test(additional_info): + logger = debug_logger('test') + app = proxy_server.Application({}, FakeMemcache(), + account_ring=FakeRing(), + container_ring=FakeRing(), + logger=logger) + node = app.container_ring.get_part_nodes(0)[0] + node_key = app._error_limit_node_key(node) + self.assertNotIn(node_key, app._error_limiting) # sanity + try: + raise Exception('kaboom1!') + except Exception as err: + app.exception_occurred(node, 'server-type', additional_info) + + self.assertEqual(1, app._error_limiting[node_key]['errors']) + line = logger.get_lines_for_level('error')[-1] + self.assertIn('server-type server', line) + self.assertIn(additional_info.decode('utf8'), line) + self.assertIn(node['ip'], line) + self.assertIn(str(node['port']), line) + self.assertIn(node['device'], line) + log_args, log_kwargs = logger.log_dict['error'][-1] + self.assertTrue(log_kwargs['exc_info']) + self.assertEqual(err, log_kwargs['exc_info'][1]) + + do_test('success') + do_test('succès') + do_test(u'success') + + def test_error_occurred(self): + def do_test(msg): + logger = debug_logger('test') + app = proxy_server.Application({}, FakeMemcache(), + account_ring=FakeRing(), + container_ring=FakeRing(), + logger=logger) + node = app.container_ring.get_part_nodes(0)[0] + node_key = app._error_limit_node_key(node) + self.assertNotIn(node_key, app._error_limiting) # sanity + + app.error_occurred(node, msg) + + self.assertEqual(1, app._error_limiting[node_key]['errors']) + line = logger.get_lines_for_level('error')[-1] + self.assertIn(msg.decode('utf8'), line) + self.assertIn(node['ip'], line) + self.assertIn(str(node['port']), line) + self.assertIn(node['device'], line) + + do_test('success') + do_test('succès') + do_test(u'success') + def test_error_limit_methods(self): logger = debug_logger('test') app = proxy_server.Application({}, FakeMemcache(), From 4c0a1481f147b51cf67bdaaa9fc0101ad1b63620 Mon Sep 17 00:00:00 2001 From: Mohit Motiani Date: Thu, 7 Jul 2016 14:56:10 +0000 Subject: [PATCH 048/156] Fix typo in the account-server.conf manpage Change-Id: I4e7bb85ce746fcb1ec3a4cbf534761e4e47634c9 Closes-Bug: #1599888 --- doc/manpages/account-server.conf.5 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5 index 7123d8c876..1ad115f53a 100644 --- a/doc/manpages/account-server.conf.5 +++ b/doc/manpages/account-server.conf.5 @@ -77,7 +77,7 @@ The system user that the account server will run as. The default is swift. .IP \fBswift_dir\fR Swift configuration directory. The default is /etc/swift. .IP \fBdevices\fR -Parent directory or where devices are mounted. Default is /srv/node. +Parent directory of where devices are mounted. Default is /srv/node. .IP \fBmount_check\fR Whether or not check if the devices are mounted to prevent accidentally writing to the root device. The default is set to true. From 9e82891f08da4fc933deef7839b9c205b7fc51b7 Mon Sep 17 00:00:00 2001 From: Mohit Motiani Date: Thu, 7 Jul 2016 15:21:01 +0000 Subject: [PATCH 049/156] Fix typo in object-server.conf and container-server.conf manpage Change-Id: Iffad70b2fd901b305dc66d363039b7df44d619da --- doc/manpages/container-server.conf.5 | 2 +- doc/manpages/object-server.conf.5 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/manpages/container-server.conf.5 b/doc/manpages/container-server.conf.5 index cf5c06295c..1954ead453 100644 --- a/doc/manpages/container-server.conf.5 +++ b/doc/manpages/container-server.conf.5 @@ -83,7 +83,7 @@ The system user that the container server will run as. The default is swift. .IP \fBswift_dir\fR Swift configuration directory. The default is /etc/swift. .IP \fBdevices\fR -Parent directory or where devices are mounted. Default is /srv/node. +Parent directory of where devices are mounted. Default is /srv/node. .IP \fBmount_check\fR Whether or not check if the devices are mounted to prevent accidentally writing to the root device. The default is set to true. diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index 29a07c5584..24156a47c6 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -77,7 +77,7 @@ The system user that the object server will run as. The default is swift. .IP \fBswift_dir\fR Swift configuration directory. The default is /etc/swift. .IP \fBdevices\fR -Parent directory or where devices are mounted. Default is /srv/node. +Parent directory of where devices are mounted. Default is /srv/node. .IP \fBmount_check\fR Whether or not check if the devices are mounted to prevent accidentally writing to the root device. The default is set to true. From a53b12a62c0da2a654d13a26f75c7dc214c13e6a Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 8 Jun 2016 15:33:22 +0100 Subject: [PATCH 050/156] Don't encrypt update override etags for empty object Fix an anomaly where object metadata for an empty object has no encrypted etag, but if the encrypter received a container update override etag in footers or headers then it would encrypt that, so we'd have encrypted metadata in the container listing but not in the object metadata. (Empty object etags are not encrypted because the object content is revealed by its size anyway). This patch changes the override handling to not encrypt override etags that correspond to an empty object, with one exception: if for some reason the received override etag value is that of an empty string but there *was* an object body, then we'll encrypt the override etag, because it's value is not obvious from the object size. Change-Id: I8d7da34d6d98f351f59174883bc4d5ed0416c101 --- swift/common/middleware/crypto/encrypter.py | 12 ++- swift/common/utils.py | 2 + swift/obj/diskfile.py | 3 +- .../middleware/crypto/test_encrypter.py | 79 +++++++++++++++++-- 4 files changed, 83 insertions(+), 13 deletions(-) diff --git a/swift/common/middleware/crypto/encrypter.py b/swift/common/middleware/crypto/encrypter.py index 2719d47700..b6c651fb84 100644 --- a/swift/common/middleware/crypto/encrypter.py +++ b/swift/common/middleware/crypto/encrypter.py @@ -25,7 +25,8 @@ from swift.common.request_helpers import get_object_transient_sysmeta, \ strip_user_meta_prefix, is_user_meta, update_etag_is_at_header from swift.common.swob import Request, Match, HTTPException, \ HTTPUnprocessableEntity -from swift.common.utils import get_logger, config_true_value +from swift.common.utils import get_logger, config_true_value, \ + MD5_OF_EMPTY_STRING def encrypt_header_val(crypto, value, key): @@ -149,13 +150,18 @@ class EncInputWrapper(object): 'X-Object-Sysmeta-Container-Update-Override-Etag', container_listing_etag_header) or plaintext_etag - if container_listing_etag is not None: + if (container_listing_etag is not None and + (container_listing_etag != MD5_OF_EMPTY_STRING or + plaintext_etag)): # Encrypt the container-listing etag using the container key # and a random IV, and use it to override the container update # value, with the crypto parameters appended. We use the # container key here so that only that key is required to # decrypt all etag values in a container listing when handling - # a container GET request. + # a container GET request. Don't encrypt an EMPTY_ETAG + # unless there actually was some body content, in which case + # the container-listing etag is possibly conveying some + # non-obvious information. val, crypto_meta = encrypt_header_val( self.crypto, container_listing_etag, self.keys['container']) diff --git a/swift/common/utils.py b/swift/common/utils.py index d3ef5a7dcb..69a4727166 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -118,6 +118,8 @@ F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031) # Used by the parse_socket_string() function to validate IPv6 addresses IPV6_RE = re.compile("^\[(?P
.*)\](:(?P[0-9]+))?$") +MD5_OF_EMPTY_STRING = 'd41d8cd98f00b204e9800998ecf8427e' + class InvalidHashPathConfigError(ValueError): diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 7f9135f229..9e69e954f2 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -58,7 +58,7 @@ from swift.common.utils import mkdirs, Timestamp, \ fsync_dir, drop_buffer_cache, lock_path, write_pickle, \ config_true_value, listdir, split_path, ismount, remove_file, \ get_md5_socket, F_SETPIPE_SZ, decode_timestamps, encode_timestamps, \ - tpool_reraise + tpool_reraise, MD5_OF_EMPTY_STRING from swift.common.splice import splice, tee from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \ DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \ @@ -86,7 +86,6 @@ TMP_BASE = 'tmp' get_data_dir = partial(get_policy_string, DATADIR_BASE) get_async_dir = partial(get_policy_string, ASYNCDIR_BASE) get_tmp_dir = partial(get_policy_string, TMP_BASE) -MD5_OF_EMPTY_STRING = 'd41d8cd98f00b204e9800998ecf8427e' def _get_filename(fd): diff --git a/test/unit/common/middleware/crypto/test_encrypter.py b/test/unit/common/middleware/crypto/test_encrypter.py index 0f9553cad7..1fa765c66b 100644 --- a/test/unit/common/middleware/crypto/test_encrypter.py +++ b/test/unit/common/middleware/crypto/test_encrypter.py @@ -225,7 +225,7 @@ class TestEncrypter(unittest.TestCase): self.assertEqual('', resp.body) self.assertEqual(EMPTY_ETAG, resp.headers['Etag']) - def test_PUT_with_other_footers(self): + def _test_PUT_with_other_footers(self, override_etag): # verify handling of another middleware's footer callback cont_key = fetch_crypto_keys()['container'] body_key = os.urandom(32) @@ -240,7 +240,7 @@ class TestEncrypter(unittest.TestCase): 'X-Object-Sysmeta-Container-Update-Override-Size': 'other override', 'X-Object-Sysmeta-Container-Update-Override-Etag': - 'final etag'} + override_etag} env = {'REQUEST_METHOD': 'PUT', CRYPTO_KEY_CALLBACK: fetch_crypto_keys, @@ -304,7 +304,7 @@ class TestEncrypter(unittest.TestCase): cont_key = fetch_crypto_keys()['container'] cont_etag_iv = base64.b64decode(actual_meta['iv']) self.assertEqual(FAKE_IV, cont_etag_iv) - self.assertEqual(encrypt('final etag', cont_key, cont_etag_iv), + self.assertEqual(encrypt(override_etag, cont_key, cont_etag_iv), base64.b64decode(parts[0])) # verify body crypto meta @@ -321,7 +321,15 @@ class TestEncrypter(unittest.TestCase): base64.b64decode(actual['body_key']['iv'])) self.assertEqual(fetch_crypto_keys()['id'], actual['key_id']) - def test_PUT_with_etag_override_in_headers(self): + def test_PUT_with_other_footers(self): + self._test_PUT_with_other_footers('override etag') + + def test_PUT_with_other_footers_and_empty_etag(self): + # verify that an override etag value of EMPTY_ETAG will be encrypted + # when there was a non-zero body length + self._test_PUT_with_other_footers(EMPTY_ETAG) + + def _test_PUT_with_etag_override_in_headers(self, override_etag): # verify handling of another middleware's # container-update-override-etag in headers plaintext = 'FAKE APP' @@ -333,7 +341,7 @@ class TestEncrypter(unittest.TestCase): 'content-length': str(len(plaintext)), 'Etag': plaintext_etag, 'X-Object-Sysmeta-Container-Update-Override-Etag': - 'final etag'} + override_etag} req = Request.blank( '/v1/a/c/o', environ=env, body=plaintext, headers=hdrs) self.app.register('PUT', '/v1/a/c/o', HTTPCreated, {}) @@ -366,9 +374,17 @@ class TestEncrypter(unittest.TestCase): cont_etag_iv = base64.b64decode(actual_meta['iv']) self.assertEqual(FAKE_IV, cont_etag_iv) - self.assertEqual(encrypt('final etag', cont_key, cont_etag_iv), + self.assertEqual(encrypt(override_etag, cont_key, cont_etag_iv), base64.b64decode(parts[0])) + def test_PUT_with_etag_override_in_headers(self): + self._test_PUT_with_etag_override_in_headers('override_etag') + + def test_PUT_with_etag_override_in_headers_and_empty_etag(self): + # verify that an override etag value of EMPTY_ETAG will be encrypted + # when there was a non-zero body length + self._test_PUT_with_etag_override_in_headers(EMPTY_ETAG) + def test_PUT_with_bad_etag_in_other_footers(self): # verify that etag supplied in footers from other middleware overrides # header etag when validating inbound plaintext etags @@ -448,9 +464,10 @@ class TestEncrypter(unittest.TestCase): # check that an upstream footer callback gets called other_footers = { - 'Etag': 'other etag', + 'Etag': EMPTY_ETAG, 'X-Object-Sysmeta-Other': 'other sysmeta', - 'X-Backend-Container-Update-Override-Etag': 'other override'} + 'X-Object-Sysmeta-Container-Update-Override-Etag': + 'other override'} env.update({'swift.callback.update_footers': lambda footers: footers.update(other_footers)}) req = Request.blank('/v1/a/c/o', environ=env, body='', headers=hdrs) @@ -461,6 +478,52 @@ class TestEncrypter(unittest.TestCase): self.assertEqual('201 Created', resp.status) self.assertEqual('response etag', resp.headers['Etag']) self.assertEqual(1, len(call_headers)) + + # verify encrypted override etag for container update. + self.assertIn( + 'X-Object-Sysmeta-Container-Update-Override-Etag', call_headers[0]) + parts = call_headers[0][ + 'X-Object-Sysmeta-Container-Update-Override-Etag'].rsplit(';', 1) + self.assertEqual(2, len(parts)) + cont_key = fetch_crypto_keys()['container'] + + param = parts[1].strip() + crypto_meta_tag = 'swift_meta=' + self.assertTrue(param.startswith(crypto_meta_tag), param) + actual_meta = json.loads( + urllib.unquote_plus(param[len(crypto_meta_tag):])) + self.assertEqual(Crypto().cipher, actual_meta['cipher']) + self.assertEqual(fetch_crypto_keys()['id'], actual_meta['key_id']) + + cont_etag_iv = base64.b64decode(actual_meta['iv']) + self.assertEqual(FAKE_IV, cont_etag_iv) + self.assertEqual(encrypt('other override', cont_key, cont_etag_iv), + base64.b64decode(parts[0])) + + # verify that other middleware's footers made it to app + other_footers.pop('X-Object-Sysmeta-Container-Update-Override-Etag') + for k, v in other_footers.items(): + self.assertEqual(v, call_headers[0][k]) + # verify no encryption footers + for k in call_headers[0]: + self.assertFalse(k.lower().startswith('x-object-sysmeta-crypto-')) + + # if upstream footer override etag is for an empty body then check that + # it is not encrypted + other_footers = { + 'Etag': EMPTY_ETAG, + 'X-Object-Sysmeta-Container-Update-Override-Etag': EMPTY_ETAG} + env.update({'swift.callback.update_footers': + lambda footers: footers.update(other_footers)}) + req = Request.blank('/v1/a/c/o', environ=env, body='', headers=hdrs) + + call_headers = [] + resp = req.get_response(encrypter.Encrypter(NonReadingApp(), {})) + + self.assertEqual('201 Created', resp.status) + self.assertEqual('response etag', resp.headers['Etag']) + self.assertEqual(1, len(call_headers)) + # verify that other middleware's footers made it to app for k, v in other_footers.items(): self.assertEqual(v, call_headers[0][k]) From 008a037a3611734c2ba43c9aa0fe413bf5a2fa42 Mon Sep 17 00:00:00 2001 From: Maria Malyarova Date: Thu, 7 Jul 2016 20:13:03 +0300 Subject: [PATCH 051/156] Another amendment with missing parenthesis TrivialFix Change-Id: Id44ae27bc39ea97be9eb092f8a99a06056b86392 --- bin/swift-reconciler-enqueue | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/swift-reconciler-enqueue b/bin/swift-reconciler-enqueue index d707571d2e..2a9dcc3a55 100755 --- a/bin/swift-reconciler-enqueue +++ b/bin/swift-reconciler-enqueue @@ -11,6 +11,7 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import sys from optparse import OptionParser @@ -67,7 +68,7 @@ def main(): policy.idx, timestamp, options.op, force=options.force) if not container_name: return 'ERROR: unable to enqueue!' - print container_name + print(container_name) if __name__ == "__main__": From bfc8c59a08954f57d8f64ca66db9a1ba72207710 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Thu, 7 Jul 2016 18:35:48 +0100 Subject: [PATCH 052/156] Add encryption package requirements to SAIO instructions libssl-dev/openssl-devel are already listed in other-requirements.txt; add them to installation instructions in the SAIO docs. Change-Id: I3dc07213ff8dac1299d3eb68d3448a77e15c79af --- doc/source/development_saio.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index 1d497a526f..f258ee9659 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -38,7 +38,7 @@ Installing dependencies sudo apt-get update sudo apt-get install curl gcc memcached rsync sqlite3 xfsprogs \ git-core libffi-dev python-setuptools \ - liberasurecode-dev + liberasurecode-dev libssl-dev sudo apt-get install python-coverage python-dev python-nose \ python-xattr python-eventlet \ python-greenlet python-pastedeploy \ @@ -50,7 +50,7 @@ Installing dependencies sudo yum update sudo yum install curl gcc memcached rsync sqlite xfsprogs git-core \ libffi-devel xinetd liberasurecode-devel \ - python-setuptools \ + openssl-devel python-setuptools \ python-coverage python-devel python-nose \ pyxattr python-eventlet \ python-greenlet python-paste-deploy \ From b6be925cf59e0cf719307cbcdda876c8d135eda5 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 23 Jun 2016 12:22:02 -0700 Subject: [PATCH 053/156] Tighten memcached tests Check flags set and key used; stop relying on dict.values() Change-Id: Ibf9228dabd66ae98fb3b64050ccd46f5032d0df9 --- swift/common/memcached.py | 2 +- test/unit/common/test_memcached.py | 61 ++++++++++++++++++++---------- 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/swift/common/memcached.py b/swift/common/memcached.py index 9640ac6f8f..5f47684c93 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -388,7 +388,7 @@ class MemcacheRing(object): Sets multiple key/value pairs in memcache. :param mapping: dictionary of keys and values to be set in memcache - :param servery_key: key to use in determining which server in the ring + :param server_key: key to use in determining which server in the ring is used :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use diff --git a/test/unit/common/test_memcached.py b/test/unit/common/test_memcached.py index 0ac4878c75..86d704c503 100644 --- a/test/unit/common/test_memcached.py +++ b/test/unit/common/test_memcached.py @@ -17,6 +17,7 @@ """Tests for swift.common.utils""" from collections import defaultdict +from hashlib import md5 import logging import socket import time @@ -293,30 +294,38 @@ class TestMemcached(unittest.TestCase): finally: sock.close() - def test_set_get(self): + def test_set_get_json(self): memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) mock = MockMemcached() memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool( [(mock, mock)] * 2) + cache_key = md5('some_key').hexdigest() + memcache_client.set('some_key', [1, 2, 3]) self.assertEqual(memcache_client.get('some_key'), [1, 2, 3]) - self.assertEqual(mock.cache.values()[0][1], '0') + # See JSON_FLAG + self.assertEqual(mock.cache, {cache_key: ('2', '0', '[1, 2, 3]')}) + memcache_client.set('some_key', [4, 5, 6]) self.assertEqual(memcache_client.get('some_key'), [4, 5, 6]) + self.assertEqual(mock.cache, {cache_key: ('2', '0', '[4, 5, 6]')}) + memcache_client.set('some_key', ['simple str', 'utf8 str éà']) # As per http://wiki.openstack.org/encoding, # we should expect to have unicode self.assertEqual( memcache_client.get('some_key'), ['simple str', u'utf8 str éà']) - self.assertTrue(float(mock.cache.values()[0][1]) == 0) + self.assertEqual(mock.cache, {cache_key: ( + '2', '0', '["simple str", "utf8 str \\u00e9\\u00e0"]')}) + memcache_client.set('some_key', [1, 2, 3], time=20) - self.assertEqual(mock.cache.values()[0][1], '20') + self.assertEqual(mock.cache, {cache_key: ('2', '20', '[1, 2, 3]')}) sixtydays = 60 * 24 * 60 * 60 esttimeout = time.time() + sixtydays memcache_client.set('some_key', [1, 2, 3], time=sixtydays) - self.assertTrue( - -1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) + _junk, cache_timeout, _junk = mock.cache[cache_key] + self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1) def test_incr(self): memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) @@ -343,25 +352,32 @@ class TestMemcached(unittest.TestCase): mock = MockMemcached() memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool( [(mock, mock)] * 2) + cache_key = md5('some_key').hexdigest() + memcache_client.incr('some_key', delta=5, time=55) self.assertEqual(memcache_client.get('some_key'), '5') - self.assertEqual(mock.cache.values()[0][1], '55') + self.assertEqual(mock.cache, {cache_key: ('0', '55', '5')}) + memcache_client.delete('some_key') self.assertEqual(memcache_client.get('some_key'), None) + fiftydays = 50 * 24 * 60 * 60 esttimeout = time.time() + fiftydays memcache_client.incr('some_key', delta=5, time=fiftydays) self.assertEqual(memcache_client.get('some_key'), '5') - self.assertTrue( - -1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) + _junk, cache_timeout, _junk = mock.cache[cache_key] + self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1) + memcache_client.delete('some_key') self.assertEqual(memcache_client.get('some_key'), None) + memcache_client.incr('some_key', delta=5) self.assertEqual(memcache_client.get('some_key'), '5') - self.assertEqual(mock.cache.values()[0][1], '0') + self.assertEqual(mock.cache, {cache_key: ('0', '0', '5')}) + memcache_client.incr('some_key', delta=5, time=55) self.assertEqual(memcache_client.get('some_key'), '10') - self.assertEqual(mock.cache.values()[0][1], '0') + self.assertEqual(mock.cache, {cache_key: ('0', '0', '10')}) def test_decr(self): memcache_client = memcached.MemcacheRing(['1.2.3.4:11211']) @@ -409,28 +425,35 @@ class TestMemcached(unittest.TestCase): mock = MockMemcached() memcache_client._client_cache['1.2.3.4:11211'] = MockedMemcachePool( [(mock, mock)] * 2) + memcache_client.set_multi( {'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key') self.assertEqual( memcache_client.get_multi(('some_key2', 'some_key1'), 'multi_key'), [[4, 5, 6], [1, 2, 3]]) - self.assertEqual(mock.cache.values()[0][1], '0') - self.assertEqual(mock.cache.values()[1][1], '0') + for key in ('some_key1', 'some_key2'): + key = md5(key).hexdigest() + self.assertIn(key, mock.cache) + _junk, cache_timeout, _junk = mock.cache[key] + self.assertEqual(cache_timeout, '0') + memcache_client.set_multi( {'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key', time=20) - self.assertEqual(mock.cache.values()[0][1], '20') - self.assertEqual(mock.cache.values()[1][1], '20') + for key in ('some_key1', 'some_key2'): + key = md5(key).hexdigest() + _junk, cache_timeout, _junk = mock.cache[key] + self.assertEqual(cache_timeout, '20') fortydays = 50 * 24 * 60 * 60 esttimeout = time.time() + fortydays memcache_client.set_multi( {'some_key1': [1, 2, 3], 'some_key2': [4, 5, 6]}, 'multi_key', time=fortydays) - self.assertTrue( - -1 <= float(mock.cache.values()[0][1]) - esttimeout <= 1) - self.assertTrue( - -1 <= float(mock.cache.values()[1][1]) - esttimeout <= 1) + for key in ('some_key1', 'some_key2'): + key = md5(key).hexdigest() + _junk, cache_timeout, _junk = mock.cache[key] + self.assertAlmostEqual(float(cache_timeout), esttimeout, delta=1) self.assertEqual(memcache_client.get_multi( ('some_key2', 'some_key1', 'not_exists'), 'multi_key'), [[4, 5, 6], [1, 2, 3], None]) From 6f230c7ea0ffd3b0b85c4bf5701e57e0f9fb1570 Mon Sep 17 00:00:00 2001 From: Nandini Tata Date: Thu, 7 Jul 2016 21:24:52 +0000 Subject: [PATCH 054/156] Fixed inconsistent naming conventions Fixed naming conventions of Keystone, Swift and proxy servers in the docs. Change-Id: I294afd8d7bffa8c1fc299f5812effacb9ad08910 --- doc/source/deployment_guide.rst | 8 ++++---- doc/source/development_middleware.rst | 2 +- doc/source/overview_auth.rst | 6 +++--- doc/source/overview_expiring_objects.rst | 2 +- doc/source/overview_large_objects.rst | 6 +++--- doc/source/overview_policies.rst | 2 +- doc/source/overview_reaper.rst | 4 ++-- doc/source/overview_replication.rst | 2 +- doc/source/ratelimit.rst | 2 +- 9 files changed, 17 insertions(+), 17 deletions(-) diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 1935df7cd0..c1173fd970 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -17,8 +17,8 @@ or 6. Deployment Options ------------------ -The swift services run completely autonomously, which provides for a lot of -flexibility when architecting the hardware deployment for swift. The 4 main +The Swift services run completely autonomously, which provides for a lot of +flexibility when architecting the hardware deployment for Swift. The 4 main services are: #. Proxy Services @@ -265,7 +265,7 @@ lexicographical order. Filenames starting with '.' are ignored. A mixture of file and directory configuration paths is not supported - if the configuration path is a file only that file will be parsed. -The swift service management tool ``swift-init`` has adopted the convention of +The Swift service management tool ``swift-init`` has adopted the convention of looking for ``/etc/swift/{type}-server.conf.d/`` if the file ``/etc/swift/{type}-server.conf`` file does not exist. @@ -1581,7 +1581,7 @@ We do not recommend running Swift on RAID, but if you are using RAID it is also important to make sure that the proper sunit and swidth settings get set so that XFS can make most efficient use of the RAID array. -For a standard swift install, all data drives are mounted directly under +For a standard Swift install, all data drives are mounted directly under ``/srv/node`` (as can be seen in the above example of mounting ``/dev/sda1`` as ``/srv/node/sda``). If you choose to mount the drives in another directory, be sure to set the `devices` config option in all of the server configs to diff --git a/doc/source/development_middleware.rst b/doc/source/development_middleware.rst index 6fef62e22e..9612be9822 100644 --- a/doc/source/development_middleware.rst +++ b/doc/source/development_middleware.rst @@ -196,7 +196,7 @@ headers) All user resources in Swift (i.e. account, container, objects) can have user metadata associated with them. Middleware may also persist custom metadata to accounts and containers safely using System Metadata. Some -core swift features which predate sysmeta have added exceptions for +core Swift features which predate sysmeta have added exceptions for custom non-user metadata headers (e.g. :ref:`acls`, :ref:`large-objects`) diff --git a/doc/source/overview_auth.rst b/doc/source/overview_auth.rst index aa5a0c61e5..f0bba2ddb7 100644 --- a/doc/source/overview_auth.rst +++ b/doc/source/overview_auth.rst @@ -102,7 +102,7 @@ reseller_request to True. This can be used by other middlewares. TempAuth will now allow OPTIONS requests to go through without a token. -The user starts a session by sending a ReST request to the auth system to +The user starts a session by sending a REST request to the auth system to receive the auth token and a URL to the Swift system. ------------- @@ -143,7 +143,7 @@ having this in your ``/etc/keystone/default_catalog.templates`` :: catalog.RegionOne.object_store.adminURL = http://swiftproxy:8080/ catalog.RegionOne.object_store.internalURL = http://swiftproxy:8080/v1/AUTH_$(tenant_id)s -On your Swift Proxy server you will want to adjust your main pipeline +On your Swift proxy server you will want to adjust your main pipeline and add auth_token and keystoneauth in your ``/etc/swift/proxy-server.conf`` like this :: @@ -326,7 +326,7 @@ Extending Auth TempAuth is written as wsgi middleware, so implementing your own auth is as easy as writing new wsgi middleware, and plugging it in to the proxy server. -The KeyStone project and the Swauth project are examples of additional auth +The Keystone project and the Swauth project are examples of additional auth services. Also, see :doc:`development_auth`. diff --git a/doc/source/overview_expiring_objects.rst b/doc/source/overview_expiring_objects.rst index 90cab8f862..b889ce7758 100644 --- a/doc/source/overview_expiring_objects.rst +++ b/doc/source/overview_expiring_objects.rst @@ -12,7 +12,7 @@ As expiring objects are added to the system, the object servers will record the Usually, just one instance of the ``swift-object-expirer`` daemon needs to run for a cluster. This isn't exactly automatic failover high availability, but if this daemon doesn't run for a few hours it should not be any real issue. The expired-but-not-yet-deleted objects will still ``404 Not Found`` if someone tries to ``GET`` or ``HEAD`` them and they'll just be deleted a bit later when the daemon is restarted. -By default, the ``swift-object-expirer`` daemon will run with a concurrency of 1. Increase this value to get more concurrency. A concurrency of 1 may not be enough to delete expiring objects in a timely fashion for a particular swift cluster. +By default, the ``swift-object-expirer`` daemon will run with a concurrency of 1. Increase this value to get more concurrency. A concurrency of 1 may not be enough to delete expiring objects in a timely fashion for a particular Swift cluster. It is possible to run multiple daemons to do different parts of the work if a single process with a concurrency of more than 1 is not enough (see the sample config file for details). diff --git a/doc/source/overview_large_objects.rst b/doc/source/overview_large_objects.rst index 85a972120c..425bb1e8c7 100644 --- a/doc/source/overview_large_objects.rst +++ b/doc/source/overview_large_objects.rst @@ -90,7 +90,7 @@ History Dynamic large object support has gone through various iterations before settling on this implementation. -The primary factor driving the limitation of object size in swift is +The primary factor driving the limitation of object size in Swift is maintaining balance among the partitions of the ring. To maintain an even dispersion of disk usage throughout the cluster the obvious storage pattern was to simply split larger objects into smaller segments, which could then be @@ -121,7 +121,7 @@ The current "user manifest" design was chosen in order to provide a transparent download of large objects to the client and still provide the uploading client a clean API to support segmented uploads. -To meet an many use cases as possible swift supports two types of large +To meet an many use cases as possible Swift supports two types of large object manifests. Dynamic and static large object manifests both support the same idea of allowing the user to upload many segments to be later downloaded as a single file. @@ -143,7 +143,7 @@ also improves concurrent upload speed. It has the disadvantage that the manifest is finalized once PUT. Any changes to it means it has to be replaced. Between these two methods the user has great flexibility in how (s)he chooses -to upload and retrieve large objects to swift. Swift does not, however, stop +to upload and retrieve large objects to Swift. Swift does not, however, stop the user from harming themselves. In both cases the segments are deletable by the user at any time. If a segment was deleted by mistake, a dynamic large object, having no way of knowing it was ever there, would happily ignore the diff --git a/doc/source/overview_policies.rst b/doc/source/overview_policies.rst index 560320ae3e..6df467d676 100644 --- a/doc/source/overview_policies.rst +++ b/doc/source/overview_policies.rst @@ -49,7 +49,7 @@ Containers and Policies Policies are implemented at the container level. There are many advantages to this approach, not the least of which is how easy it makes life on applications that want to take advantage of them. It also ensures that -Storage Policies remain a core feature of swift independent of the auth +Storage Policies remain a core feature of Swift independent of the auth implementation. Policies were not implemented at the account/auth layer because it would require changes to all auth systems in use by Swift deployers. Each container has a new special immutable metadata element called diff --git a/doc/source/overview_reaper.rst b/doc/source/overview_reaper.rst index d42539c6d6..7e791c31f0 100644 --- a/doc/source/overview_reaper.rst +++ b/doc/source/overview_reaper.rst @@ -18,7 +18,7 @@ account-server.conf to delay the actual deletion of data. At this time, there is no utility to undelete an account; one would have to update the account database replicas directly, setting the status column to an empty string and updating the put_timestamp to be greater than the delete_timestamp. (On the -TODO list is writing a utility to perform this task, preferably through a ReST +TODO list is writing a utility to perform this task, preferably through a REST call.) The account reaper runs on each account server and scans the server @@ -53,7 +53,7 @@ History At first, a simple approach of deleting an account through completely external calls was considered as it required no changes to the system. All data would simply be deleted in the same way the actual user would, through the public -ReST API. However, the downside was that it would use proxy resources and log +REST API. However, the downside was that it would use proxy resources and log everything when it didn't really need to. Also, it would likely need a dedicated server or two, just for issuing the delete requests. diff --git a/doc/source/overview_replication.rst b/doc/source/overview_replication.rst index 56aeeacd7d..ad9d78fc01 100644 --- a/doc/source/overview_replication.rst +++ b/doc/source/overview_replication.rst @@ -2,7 +2,7 @@ Replication =========== -Because each replica in swift functions independently, and clients generally +Because each replica in Swift functions independently, and clients generally require only a simple majority of nodes responding to consider an operation successful, transient failures like network partitions can quickly cause replicas to diverge. These differences are eventually reconciled by diff --git a/doc/source/ratelimit.rst b/doc/source/ratelimit.rst index 91b5953ea2..81832f2bf2 100644 --- a/doc/source/ratelimit.rst +++ b/doc/source/ratelimit.rst @@ -4,7 +4,7 @@ Rate Limiting ============= -Rate limiting in swift is implemented as a pluggable middleware. Rate +Rate limiting in Swift is implemented as a pluggable middleware. Rate limiting is performed on requests that result in database writes to the account and container sqlite dbs. It uses memcached and is dependent on the proxy servers having highly synchronized time. The rate limits are From da317f01c6aeece5c4c4bccb49972a84a3b5580f Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 7 Jul 2016 17:58:58 +0000 Subject: [PATCH 055/156] Run flake8 against scripts in bin Just having a `flake8 --filename=swift* bin` command is insufficient. Change-Id: Ia2a5c364e52d9972d31e6b5e22366503894b720d --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 54cbf1c0b9..200e7a24ed 100644 --- a/tox.ini +++ b/tox.ini @@ -100,6 +100,7 @@ commands = bandit -c bandit.yaml -r swift bin -n 5 -p gate # H501: Do not use self.__dict__ for string formatting ignore = F812,H101,H202,H233,H301,H306,H401,H403,H404,H405,H501 exclude = .venv,.tox,dist,*egg +filename = *.py,bin/* show-source = True [testenv:bindep] From dcee7028019b89703fb5e25dd59fdb6fd882129a Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Thu, 23 Jun 2016 13:40:42 +0200 Subject: [PATCH 056/156] Python 3: fix urllib import Replace urllib.quote import with six.moves.urllib.parse.quote, so the code works on Python 2 and Python 3. Change-Id: I17e9cd9668661b6a67f33db83e0cbfc8ea6e3ca6 --- swift/common/middleware/copy.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/swift/common/middleware/copy.py b/swift/common/middleware/copy.py index b446b1b7b3..6f7cd4d17d 100644 --- a/swift/common/middleware/copy.py +++ b/swift/common/middleware/copy.py @@ -132,9 +132,8 @@ backwards compatibility. At first chance, set ``object_post_as_copy`` to """ import os -from urllib import quote from ConfigParser import ConfigParser, NoSectionError, NoOptionError -from six.moves.urllib.parse import unquote +from six.moves.urllib.parse import quote, unquote from swift.common import utils from swift.common.utils import get_logger, \ From c21375227165077d48bb7ef4e64a11f46d68b696 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Wed, 29 Jun 2016 09:30:34 +0200 Subject: [PATCH 057/156] Update dnspython to 1.14 dnspython 1.14 adds Python 3 support and so can now be used on Python 2 and Python 3. Drop dnspython3 dependency. Change-Id: I0a860b03800aeeed4375f528e6bf9cca57129db7 --- requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 3c17288b9b..eed45fa8f7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,8 +2,7 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -dnspython>=1.12.0;python_version<'3.0' -dnspython3>=1.12.0;python_version>='3.0' +dnspython>=1.14.0 # http://www.dnspython.org/LICENSE eventlet>=0.17.4 # MIT greenlet>=0.3.1 netifaces>=0.5,!=0.10.0,!=0.10.1 From ffaef489c6d1420ba6f4baf50b8fe84aa3d86319 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Fri, 1 Jul 2016 14:39:35 +0100 Subject: [PATCH 058/156] Add encrypter and decrypter links to middleware.rst Drive-by fix for crypto filter_factory test. Add note to encryption doc to highlight that root secret should not be changed (follow up on earlier review comment). Co-Authored-By: Tim Burke Change-Id: I9776cddd4d045408325342983e285a00c992bfae --- doc/source/middleware.rst | 16 +++++ doc/source/overview_encryption.rst | 5 ++ swift/common/middleware/crypto/__init__.py | 3 +- .../common/middleware/crypto/test_crypto.py | 63 ++++++++++++++----- 4 files changed, 70 insertions(+), 17 deletions(-) diff --git a/doc/source/middleware.rst b/doc/source/middleware.rst index f636c11f91..91f4c05c34 100644 --- a/doc/source/middleware.rst +++ b/doc/source/middleware.rst @@ -101,10 +101,21 @@ the DLO docs for :ref:`dlo-doc` further details. Encryption ========== +Encryption middleware should be deployed in conjunction with the +:ref:`keymaster` middleware. + .. automodule:: swift.common.middleware.crypto :members: :show-inheritance: +.. automodule:: swift.common.middleware.crypto.encrypter + :members: + :show-inheritance: + +.. automodule:: swift.common.middleware.crypto.decrypter + :members: + :show-inheritance: + .. _formpost: FormPost @@ -132,9 +143,14 @@ Healthcheck :members: :show-inheritance: +.. _keymaster: + Keymaster ========= +Keymaster middleware should be deployed in conjunction with the +:ref:`encryption` middleware. + .. automodule:: swift.common.middleware.crypto.keymaster :members: :show-inheritance: diff --git a/doc/source/overview_encryption.rst b/doc/source/overview_encryption.rst index 6aa24636c6..8cbee8132b 100644 --- a/doc/source/overview_encryption.rst +++ b/doc/source/overview_encryption.rst @@ -102,6 +102,11 @@ been chosen because it is the length of a base-64 encoded 32 byte value. should not be stored on any disk that is in any account, container or object ring. + The ``encryption_root_secret`` value should not be changed once deployed. + Doing so would prevent Swift from properly decrypting data that was + encrypted using the former value, and would therefore result in the loss of + that data. + One method for generating a suitable value for ``encryption_root_secret`` is to use the ``openssl`` command line tool:: diff --git a/swift/common/middleware/crypto/__init__.py b/swift/common/middleware/crypto/__init__.py index 55fd93a046..b526fcbaa7 100644 --- a/swift/common/middleware/crypto/__init__.py +++ b/swift/common/middleware/crypto/__init__.py @@ -14,7 +14,8 @@ # limitations under the License. """ Implements middleware for object encryption which comprises an instance of a -Decrypter combined with an instance of an Encrypter. +:class:`~swift.common.middleware.crypto.decrypter.Decrypter` combined with an +instance of an :class:`~swift.common.middleware.crypto.encrypter.Encrypter`. """ from swift.common.middleware.crypto.decrypter import Decrypter from swift.common.middleware.crypto.encrypter import Encrypter diff --git a/test/unit/common/middleware/crypto/test_crypto.py b/test/unit/common/middleware/crypto/test_crypto.py index c5f6cd0cd7..882e959de9 100644 --- a/test/unit/common/middleware/crypto/test_crypto.py +++ b/test/unit/common/middleware/crypto/test_crypto.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import unittest +import mock from swift.common import utils from swift.common.middleware import crypto @@ -20,20 +21,50 @@ from swift.common.middleware import crypto class TestCrypto(unittest.TestCase): def test_filter_factory(self): - factory = crypto.filter_factory({}) - self.assertTrue(callable(factory)) - self.assertIsInstance(factory({}), crypto.decrypter.Decrypter) - self.assertIsInstance(factory({}).app, crypto.encrypter.Encrypter) - self.assertIn('encryption', utils._swift_admin_info) - self.assertDictEqual( - {'enabled': True}, utils._swift_admin_info['encryption']) - self.assertNotIn('encryption', utils._swift_info) + def do_test(conf, expect_enabled): + fake_app = object() - factory = crypto.filter_factory({'disable_encryption': True}) - self.assertTrue(callable(factory)) - self.assertIsInstance(factory({}), crypto.decrypter.Decrypter) - self.assertIsInstance(factory({}).app, crypto.encrypter.Encrypter) - self.assertIn('encryption', utils._swift_admin_info) - self.assertDictEqual( - {'enabled': False}, utils._swift_admin_info['encryption']) - self.assertNotIn('encryption', utils._swift_info) + with mock.patch.dict('swift.common.utils._swift_admin_info', + clear=True): + # we're not expecting utils._swift_info to be modified but mock + # it anyway just in case it is + with mock.patch.dict('swift.common.utils._swift_info', + clear=True): + # Sanity checks... + self.assertNotIn('encryption', utils._swift_admin_info) + self.assertNotIn('encryption', + utils.get_swift_info(admin=True)) + self.assertNotIn('encryption', + utils.get_swift_info(admin=True)['admin']) + + factory = crypto.filter_factory(conf) + self.assertTrue(callable(factory)) + filtered_app = factory(fake_app) + + self.assertNotIn('encryption', utils._swift_info) + self.assertNotIn('encryption', utils.get_swift_info()) + self.assertNotIn('encryption', + utils.get_swift_info(admin=True)) + + self.assertIn('encryption', utils._swift_admin_info) + self.assertDictEqual({'enabled': expect_enabled}, + utils._swift_admin_info['encryption']) + self.assertIn('encryption', + utils.get_swift_info(admin=True)['admin']) + self.assertDictEqual( + {'enabled': expect_enabled}, + utils.get_swift_info( + admin=True)['admin']['encryption']) + + self.assertIsInstance(filtered_app, crypto.decrypter.Decrypter) + self.assertIsInstance(filtered_app.app, crypto.encrypter.Encrypter) + self.assertIs(filtered_app.app.app, fake_app) + + # default enabled + do_test({}, True) + + # explicitly enabled + do_test({'disable_encryption': False}, True) + + # explicitly disabled + do_test({'disable_encryption': True}, False) From 54ed0842344b6b06d5d874a297754fefcd993585 Mon Sep 17 00:00:00 2001 From: Mohit Motiani Date: Fri, 20 May 2016 16:26:15 +0000 Subject: [PATCH 059/156] Add region in swift-ring-builder add In the swift deployment guide, region is missing from the syntax of adding a new device to the swift-ring-builder. This patch adds region in the syntax. Change-Id: I43e247c92d461efd530c0f82ca3daddcb9e2ba5b Closes-Bug: #1584127 --- doc/source/deployment_guide.rst | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 1935df7cd0..b07a3fce18 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -114,17 +114,18 @@ specific partition can be moved in succession (24 is a good value for this). Devices can be added to the ring with:: - swift-ring-builder add z-:/_ + swift-ring-builder add rz-:/_ This will add a device to the ring where is the name of the -builder file that was created previously, is the number of the zone -this device is in, is the ip address of the server the device is in, - is the port number that the server is running on, is -the name of the device on the server (for example: sdb1), is a string -of metadata for the device (optional), and is a float weight that -determines how many partitions are put on the device relative to the rest of -the devices in the cluster (a good starting point is 100.0 x TB on the drive). -Add each device that will be initially in the cluster. +builder file that was created previously, is the number of the region +the zone is in, is the number of the zone this device is in, is +the ip address of the server the device is in, is the port number that +the server is running on, is the name of the device on the server +(for example: sdb1), is a string of metadata for the device (optional), +and is a float weight that determines how many partitions are put on +the device relative to the rest of the devices in the cluster (a good starting +point is 100.0 x TB on the drive).Add each device that will be initially in the +cluster. Once all of the devices are added to the ring, run:: From 9890184ea9378fbba1cb76e861f5a20bdd36b7c9 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Thu, 7 Jul 2016 18:00:05 +0000 Subject: [PATCH 060/156] Turn on H233 and start using print function As much as anything, I'm just tired of seeing a bunch or piecemeal fixes. Note that we *need* to include from __future__ import print_function in order to support things like print() # Would print "()" (the repr of an empty tuple) otherwise print(foo, end='') # Would SyntaxError print(bar, file=sys.stderr) # Would SyntaxError Change-Id: I8fdf0740e292eb1ee785512d02e8c552781dcae1 --- bin/swift-account-audit | 74 ++++++++++++++++--------------- bin/swift-config | 13 +++--- bin/swift-dispersion-report | 87 +++++++++++++++++++------------------ bin/swift-drive-audit | 4 +- bin/swift-get-nodes | 2 +- bin/swift-init | 4 +- bin/swift-orphans | 14 +++--- bin/swift-recon-cron | 8 ++-- bin/swift-temp-url | 49 +++++++++++---------- tox.ini | 2 +- 10 files changed, 135 insertions(+), 122 deletions(-) diff --git a/bin/swift-account-audit b/bin/swift-account-audit index 703eec9953..c0d82c70ed 100755 --- a/bin/swift-account-audit +++ b/bin/swift-account-audit @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import os import sys from hashlib import md5 @@ -78,7 +79,7 @@ class Auditor(object): container_listing = self.audit_container(account, container) consistent = True if name not in container_listing: - print " Object %s missing in container listing!" % path + print(" Object %s missing in container listing!" % path) consistent = False hash = None else: @@ -99,14 +100,14 @@ class Auditor(object): if resp.status // 100 != 2: self.object_not_found += 1 consistent = False - print ' Bad status GETting object "%s" on %s/%s' \ - % (path, node['ip'], node['device']) + print(' Bad status GETting object "%s" on %s/%s' + % (path, node['ip'], node['device'])) continue if resp.getheader('ETag').strip('"') != calc_hash: self.object_checksum_mismatch += 1 consistent = False - print ' MD5 does not match etag for "%s" on %s/%s' \ - % (path, node['ip'], node['device']) + print(' MD5 does not match etag for "%s" on %s/%s' + % (path, node['ip'], node['device'])) etags.append(resp.getheader('ETag')) else: conn = http_connect(node['ip'], node['port'], @@ -116,28 +117,29 @@ class Auditor(object): if resp.status // 100 != 2: self.object_not_found += 1 consistent = False - print ' Bad status HEADing object "%s" on %s/%s' \ - % (path, node['ip'], node['device']) + print(' Bad status HEADing object "%s" on %s/%s' + % (path, node['ip'], node['device'])) continue etags.append(resp.getheader('ETag')) except Exception: self.object_exceptions += 1 consistent = False - print ' Exception fetching object "%s" on %s/%s' \ - % (path, node['ip'], node['device']) + print(' Exception fetching object "%s" on %s/%s' + % (path, node['ip'], node['device'])) continue if not etags: consistent = False - print " Failed fo fetch object %s at all!" % path + print(" Failed fo fetch object %s at all!" % path) elif hash: for etag in etags: if resp.getheader('ETag').strip('"') != hash: consistent = False self.object_checksum_mismatch += 1 - print ' ETag mismatch for "%s" on %s/%s' \ - % (path, node['ip'], node['device']) + print(' ETag mismatch for "%s" on %s/%s' + % (path, node['ip'], node['device'])) if not consistent and self.error_file: - print >>open(self.error_file, 'a'), path + with open(self.error_file, 'a') as err_file: + print(path, file=err_file) self.objects_checked += 1 def audit_container(self, account, name, recurse=False): @@ -146,13 +148,13 @@ class Auditor(object): if (account, name) in self.list_cache: return self.list_cache[(account, name)] self.in_progress[(account, name)] = Event() - print 'Auditing container "%s"' % name + print('Auditing container "%s"' % name) path = '/%s/%s' % (account, name) account_listing = self.audit_account(account) consistent = True if name not in account_listing: consistent = False - print " Container %s not in account listing!" % path + print(" Container %s not in account listing!" % path) part, nodes = \ self.container_ring.get_nodes(account, name.encode('utf-8')) rec_d = {} @@ -180,8 +182,8 @@ class Auditor(object): except Exception: self.container_exceptions += 1 consistent = False - print ' Exception GETting container "%s" on %s/%s' % \ - (path, node['ip'], node['device']) + print(' Exception GETting container "%s" on %s/%s' % + (path, node['ip'], node['device'])) break if results: marker = results[-1]['name'] @@ -202,13 +204,15 @@ class Auditor(object): for header in responses.values()] if not obj_counts: consistent = False - print " Failed to fetch container %s at all!" % path + print(" Failed to fetch container %s at all!" % path) else: if len(set(obj_counts)) != 1: self.container_count_mismatch += 1 consistent = False - print " Container databases don't agree on number of objects." - print " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts)) + print( + " Container databases don't agree on number of objects.") + print( + " Max: %s, Min: %s" % (max(obj_counts), min(obj_counts))) self.containers_checked += 1 self.list_cache[(account, name)] = rec_d self.in_progress[(account, name)].send(True) @@ -217,7 +221,8 @@ class Auditor(object): for obj in rec_d.keys(): self.pool.spawn_n(self.audit_object, account, name, obj) if not consistent and self.error_file: - print >>open(self.error_file, 'a'), path + with open(self.error_file, 'a') as error_file: + print(path, file=error_file) return rec_d def audit_account(self, account, recurse=False): @@ -226,7 +231,7 @@ class Auditor(object): if account in self.list_cache: return self.list_cache[account] self.in_progress[account] = Event() - print 'Auditing account "%s"' % account + print('Auditing account "%s"' % account) consistent = True path = '/%s' % account part, nodes = self.account_ring.get_nodes(account) @@ -270,8 +275,8 @@ class Auditor(object): print(" Account databases for '%s' don't agree on" " number of containers." % account) if cont_counts: - print " Max: %s, Min: %s" % (max(cont_counts), - min(cont_counts)) + print(" Max: %s, Min: %s" % (max(cont_counts), + min(cont_counts))) obj_counts = [int(header['x-account-object-count']) for header in headers] if len(set(obj_counts)) != 1: @@ -280,8 +285,8 @@ class Auditor(object): print(" Account databases for '%s' don't agree on" " number of objects." % account) if obj_counts: - print " Max: %s, Min: %s" % (max(obj_counts), - min(obj_counts)) + print(" Max: %s, Min: %s" % (max(obj_counts), + min(obj_counts))) containers = set() for resp in responses.values(): containers.update(container['name'] for container in resp[1]) @@ -294,7 +299,8 @@ class Auditor(object): self.pool.spawn_n(self.audit_container, account, container, True) if not consistent and self.error_file: - print >>open(self.error_file, 'a'), path + with open(self.error_file, 'a') as error_file: + print(path, error_file) return containers def audit(self, account, container=None, obj=None): @@ -312,9 +318,9 @@ class Auditor(object): def _print_stat(name, stat): # Right align stat name in a field of 18 characters - print "{0:>18}: {1}".format(name, stat) + print("{0:>18}: {1}".format(name, stat)) - print + print() _print_stat("Accounts checked", self.accounts_checked) if self.account_not_found: _print_stat("Missing Replicas", self.account_not_found) @@ -324,7 +330,7 @@ class Auditor(object): _print_stat("Container mismatch", self.account_container_mismatch) if self.account_object_mismatch: _print_stat("Object mismatch", self.account_object_mismatch) - print + print() _print_stat("Containers checked", self.containers_checked) if self.container_not_found: _print_stat("Missing Replicas", self.container_not_found) @@ -334,7 +340,7 @@ class Auditor(object): _print_stat("Count mismatch", self.container_count_mismatch) if self.container_obj_mismatch: _print_stat("Object mismatch", self.container_obj_mismatch) - print + print() _print_stat("Objects checked", self.objects_checked) if self.object_not_found: _print_stat("Missing Replicas", self.object_not_found) @@ -348,11 +354,11 @@ if __name__ == '__main__': try: optlist, args = getopt.getopt(sys.argv[1:], 'c:r:e:d') except getopt.GetoptError as err: - print str(err) - print usage + print(str(err)) + print(usage) sys.exit(2) if not args and os.isatty(sys.stdin.fileno()): - print usage + print(usage) sys.exit() opts = dict(optlist) options = { diff --git a/bin/swift-config b/bin/swift-config index c79a2a08ff..12bcede09c 100755 --- a/bin/swift-config +++ b/bin/swift-config @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import optparse import os import sys @@ -64,7 +65,7 @@ def main(): else: conf_files += Server(arg).conf_files(**options) for conf_file in conf_files: - print '# %s' % conf_file + print('# %s' % conf_file) if options['wsgi']: app_config = appconfig(conf_file) conf = inspect_app_config(app_config) @@ -77,13 +78,13 @@ def main(): if not isinstance(v, dict): flat_vars[k] = v continue - print '[%s]' % k + print('[%s]' % k) for opt, value in v.items(): - print '%s = %s' % (opt, value) - print + print('%s = %s' % (opt, value)) + print() for k, v in flat_vars.items(): - print '# %s = %s' % (k, v) - print + print('# %s = %s' % (k, v)) + print() if __name__ == "__main__": sys.exit(main()) diff --git a/bin/swift-dispersion-report b/bin/swift-dispersion-report index 813156cf5b..3a72703142 100755 --- a/bin/swift-dispersion-report +++ b/bin/swift-dispersion-report @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import json from collections import defaultdict from six.moves.configparser import ConfigParser @@ -54,18 +55,18 @@ def get_error_log(prefix): if msg_or_exc.http_status == 507: if identifier not in unmounted: unmounted.append(identifier) - print >>stderr, 'ERROR: %s is unmounted -- This will ' \ - 'cause replicas designated for that device to be ' \ - 'considered missing until resolved or the ring is ' \ - 'updated.' % (identifier) + print('ERROR: %s is unmounted -- This will ' + 'cause replicas designated for that device to be ' + 'considered missing until resolved or the ring is ' + 'updated.' % (identifier), file=stderr) stderr.flush() if debug and identifier not in notfound: notfound.append(identifier) - print >>stderr, 'ERROR: %s returned a 404' % (identifier) + print('ERROR: %s returned a 404' % (identifier), file=stderr) stderr.flush() if not hasattr(msg_or_exc, 'http_status') or \ msg_or_exc.http_status not in (404, 507): - print >>stderr, 'ERROR: %s: %s' % (prefix, msg_or_exc) + print('ERROR: %s: %s' % (prefix, msg_or_exc), file=stderr) stderr.flush() return error_log @@ -77,8 +78,8 @@ def container_dispersion_report(coropool, connpool, account, container_ring, prefix='dispersion_%d' % policy.idx, full_listing=True)[1]] containers_listed = len(containers) if not containers_listed: - print >>stderr, 'No containers to query. Has ' \ - 'swift-dispersion-populate been run?' + print('No containers to query. Has ' + 'swift-dispersion-populate been run?', file=stderr) stderr.flush() return retries_done = [0] @@ -109,10 +110,10 @@ def container_dispersion_report(coropool, connpool, account, container_ring, if output_missing_partitions and \ found_count < len(nodes): missing = len(nodes) - found_count - print '\r\x1B[K', + print('\r\x1B[K', end='') stdout.flush() - print >>stderr, '# Container partition %s missing %s cop%s' % ( - part, missing, 'y' if missing == 1 else 'ies') + print('# Container partition %s missing %s cop%s' % ( + part, missing, 'y' if missing == 1 else 'ies'), file=stderr) container_copies_found[0] += found_count containers_queried[0] += 1 container_copies_missing[len(nodes) - found_count] += 1 @@ -121,9 +122,10 @@ def container_dispersion_report(coropool, connpool, account, container_ring, eta, eta_unit = compute_eta(begun, containers_queried[0], containers_listed) if not json_output: - print '\r\x1B[KQuerying containers: %d of %d, %d%s left, %d ' \ + print('\r\x1B[KQuerying containers: %d of %d, %d%s left, %d ' 'retries' % (containers_queried[0], containers_listed, round(eta), eta_unit, retries_done[0]), + end='') stdout.flush() container_parts = {} for container in containers: @@ -140,19 +142,19 @@ def container_dispersion_report(coropool, connpool, account, container_ring, elapsed, elapsed_unit = get_time_units(time() - begun) container_copies_missing.pop(0, None) if not json_output: - print '\r\x1B[KQueried %d containers for dispersion reporting, ' \ + print('\r\x1B[KQueried %d containers for dispersion reporting, ' '%d%s, %d retries' % (containers_listed, round(elapsed), - elapsed_unit, retries_done[0]) + elapsed_unit, retries_done[0])) if containers_listed - distinct_partitions: - print 'There were %d overlapping partitions' % ( - containers_listed - distinct_partitions) + print('There were %d overlapping partitions' % ( + containers_listed - distinct_partitions)) for missing_copies, num_parts in container_copies_missing.items(): - print missing_string(num_parts, missing_copies, - container_ring.replica_count) - print '%.02f%% of container copies found (%d of %d)' % ( - value, copies_found, copies_expected) - print 'Sample represents %.02f%% of the container partition space' % ( - 100.0 * distinct_partitions / container_ring.partition_count) + print(missing_string(num_parts, missing_copies, + container_ring.replica_count)) + print('%.02f%% of container copies found (%d of %d)' % ( + value, copies_found, copies_expected)) + print('Sample represents %.02f%% of the container partition space' % ( + 100.0 * distinct_partitions / container_ring.partition_count)) stdout.flush() return None else: @@ -177,14 +179,14 @@ def object_dispersion_report(coropool, connpool, account, object_ring, if err.http_status != 404: raise - print >>stderr, 'No objects to query. Has ' \ - 'swift-dispersion-populate been run?' + print('No objects to query. Has ' + 'swift-dispersion-populate been run?', file=stderr) stderr.flush() return objects_listed = len(objects) if not objects_listed: - print >>stderr, 'No objects to query. Has swift-dispersion-populate ' \ - 'been run?' + print('No objects to query. Has swift-dispersion-populate ' + 'been run?', file=stderr) stderr.flush() return retries_done = [0] @@ -221,10 +223,10 @@ def object_dispersion_report(coropool, connpool, account, object_ring, if output_missing_partitions and \ found_count < len(nodes): missing = len(nodes) - found_count - print '\r\x1B[K', + print('\r\x1B[K', end='') stdout.flush() - print >>stderr, '# Object partition %s missing %s cop%s' % ( - part, missing, 'y' if missing == 1 else 'ies') + print('# Object partition %s missing %s cop%s' % ( + part, missing, 'y' if missing == 1 else 'ies'), file=stderr) object_copies_found[0] += found_count object_copies_missing[len(nodes) - found_count] += 1 objects_queried[0] += 1 @@ -233,9 +235,10 @@ def object_dispersion_report(coropool, connpool, account, object_ring, eta, eta_unit = compute_eta(begun, objects_queried[0], objects_listed) if not json_output: - print '\r\x1B[KQuerying objects: %d of %d, %d%s left, %d ' \ + print('\r\x1B[KQuerying objects: %d of %d, %d%s left, %d ' 'retries' % (objects_queried[0], objects_listed, round(eta), eta_unit, retries_done[0]), + end='') stdout.flush() object_parts = {} for obj in objects: @@ -251,21 +254,21 @@ def object_dispersion_report(coropool, connpool, account, object_ring, value = 100.0 * copies_found / copies_expected elapsed, elapsed_unit = get_time_units(time() - begun) if not json_output: - print '\r\x1B[KQueried %d objects for dispersion reporting, ' \ + print('\r\x1B[KQueried %d objects for dispersion reporting, ' '%d%s, %d retries' % (objects_listed, round(elapsed), - elapsed_unit, retries_done[0]) + elapsed_unit, retries_done[0])) if objects_listed - distinct_partitions: - print 'There were %d overlapping partitions' % ( - objects_listed - distinct_partitions) + print('There were %d overlapping partitions' % ( + objects_listed - distinct_partitions)) for missing_copies, num_parts in object_copies_missing.items(): - print missing_string(num_parts, missing_copies, - object_ring.replica_count) + print(missing_string(num_parts, missing_copies, + object_ring.replica_count)) - print '%.02f%% of object copies found (%d of %d)' % \ - (value, copies_found, copies_expected) - print 'Sample represents %.02f%% of the object partition space' % ( - 100.0 * distinct_partitions / object_ring.partition_count) + print('%.02f%% of object copies found (%d of %d)' % + (value, copies_found, copies_expected)) + print('Sample represents %.02f%% of the object partition space' % ( + 100.0 * distinct_partitions / object_ring.partition_count)) stdout.flush() return None else: @@ -347,7 +350,7 @@ Usage: %%prog [options] [conf_file] policy = POLICIES.get_by_name(options.policy_name) if policy is None: exit('Unable to find policy: %s' % options.policy_name) - print 'Using storage policy: %s ' % policy.name + print('Using storage policy: %s ' % policy.name) swift_dir = conf.get('swift_dir', '/etc/swift') retries = int(conf.get('retries', 5)) @@ -405,4 +408,4 @@ Usage: %%prog [options] [conf_file] coropool, connpool, account, object_ring, retries, options.partitions, policy) if json_output: - print json.dumps(output) + print(json.dumps(output)) diff --git a/bin/swift-drive-audit b/bin/swift-drive-audit index 013bc20226..010fa10a4f 100755 --- a/bin/swift-drive-audit +++ b/bin/swift-drive-audit @@ -142,10 +142,10 @@ if __name__ == '__main__': try: conf_path = sys.argv[1] except Exception: - print "Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1] + print("Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1]) sys.exit(1) if not c.read(conf_path): - print "Unable to read config file %s" % conf_path + print("Unable to read config file %s" % conf_path) sys.exit(1) conf = dict(c.items('drive-audit')) device_dir = conf.get('device_dir', '/srv/node') diff --git a/bin/swift-get-nodes b/bin/swift-get-nodes index b8d068bc37..b25002adab 100755 --- a/bin/swift-get-nodes +++ b/bin/swift-get-nodes @@ -74,7 +74,7 @@ if __name__ == '__main__': ring_name = args[0].rsplit('/', 1)[-1].split('.', 1)[0] ring = Ring(args[0]) else: - print 'Ring file does not exist' + print('Ring file does not exist') args.pop(0) try: diff --git a/bin/swift-init b/bin/swift-init index c8e0aef5c6..b31a205c68 100755 --- a/bin/swift-init +++ b/bin/swift-init @@ -84,7 +84,7 @@ def main(): if len(args) < 2: parser.print_help() - print 'ERROR: specify server(s) and command' + print('ERROR: specify server(s) and command') return 1 command = args[-1] @@ -101,7 +101,7 @@ def main(): status = manager.run_command(command, **options.__dict__) except UnknownCommandError: parser.print_help() - print 'ERROR: unknown command, %s' % command + print('ERROR: unknown command, %s' % command) status = 1 return 1 if status else 0 diff --git a/bin/swift-orphans b/bin/swift-orphans index 90311c9816..d2a3889ad7 100755 --- a/bin/swift-orphans +++ b/bin/swift-orphans @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import optparse import os import signal @@ -104,11 +105,11 @@ Example (sends SIGTERM to all orphaned Swift processes older than two hours): args_len = max(args_len, len(args)) args_len = min(args_len, 78 - hours_len - pid_len) - print ('%%%ds %%%ds %%s' % (hours_len, pid_len)) % \ - ('Hours', 'PID', 'Command') + print(('%%%ds %%%ds %%s' % (hours_len, pid_len)) % + ('Hours', 'PID', 'Command')) for hours, pid, args in listing: - print ('%%%ds %%%ds %%s' % (hours_len, pid_len)) % \ - (hours, pid, args[:args_len]) + print(('%%%ds %%%ds %%s' % (hours_len, pid_len)) % + (hours, pid, args[:args_len])) if options.signal: try: @@ -120,7 +121,8 @@ Example (sends SIGTERM to all orphaned Swift processes older than two hours): if not signum: sys.exit('Could not translate %r to a signal number.' % options.signal) - print 'Sending processes %s (%d) signal...' % (options.signal, signum), + print('Sending processes %s (%d) signal...' % (options.signal, signum), + end='') for hours, pid, args in listing: os.kill(int(pid), signum) - print 'Done.' + print('Done.') diff --git a/bin/swift-recon-cron b/bin/swift-recon-cron index 0b4650cd86..8b3450b1a2 100755 --- a/bin/swift-recon-cron +++ b/bin/swift-recon-cron @@ -50,11 +50,11 @@ def main(): try: conf_path = sys.argv[1] except Exception: - print "Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1] - print "ex: swift-recon-cron /etc/swift/object-server.conf" + print("Usage: %s CONF_FILE" % sys.argv[0].split('/')[-1]) + print("ex: swift-recon-cron /etc/swift/object-server.conf") sys.exit(1) if not c.read(conf_path): - print "Unable to read config file %s" % conf_path + print("Unable to read config file %s" % conf_path) sys.exit(1) conf = dict(c.items('filter:recon')) device_dir = conf.get('devices', '/srv/node') @@ -68,7 +68,7 @@ def main(): os.mkdir(lock_dir) except OSError as e: logger.critical(str(e)) - print str(e) + print(str(e)) sys.exit(1) try: asyncs = get_async_count(device_dir, logger) diff --git a/bin/swift-temp-url b/bin/swift-temp-url index 0d8012e375..34445e8be8 100755 --- a/bin/swift-temp-url +++ b/bin/swift-temp-url @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import hmac from hashlib import sha1 from os.path import basename @@ -24,28 +25,28 @@ from six.moves import urllib if __name__ == '__main__': if len(argv) < 5: prog = basename(argv[0]) - print 'Syntax: %s ' % prog - print - print 'Where:' - print ' The method to allow; GET for example.' - print ' The number of seconds from now to allow requests.' - print ' The full path to the resource.' - print ' Example: /v1/AUTH_account/c/o' - print ' The X-Account-Meta-Temp-URL-Key for the account.' - print - print 'Example output:' - print ' /v1/AUTH_account/c/o?temp_url_sig=34d49efc32fe6e3082e411e' \ - 'eeb85bd8a&temp_url_expires=1323482948' - print - print 'This can be used to form a URL to give out for the access ' - print 'allowed. For example:' - print ' echo https://swift-cluster.example.com`%s GET 60 ' \ - '/v1/AUTH_account/c/o mykey`' % prog - print - print 'Might output:' - print ' https://swift-cluster.example.com/v1/AUTH_account/c/o?' \ - 'temp_url_sig=34d49efc32fe6e3082e411eeeb85bd8a&' \ - 'temp_url_expires=1323482948' + print('Syntax: %s ' % prog) + print() + print('Where:') + print(' The method to allow; GET for example.') + print(' The number of seconds from now to allow requests.') + print(' The full path to the resource.') + print(' Example: /v1/AUTH_account/c/o') + print(' The X-Account-Meta-Temp-URL-Key for the account.') + print() + print('Example output:') + print(' /v1/AUTH_account/c/o?temp_url_sig=34d49efc32fe6e3082e411e' + 'eeb85bd8a&temp_url_expires=1323482948') + print() + print('This can be used to form a URL to give out for the access ') + print('allowed. For example:') + print(' echo https://swift-cluster.example.com`%s GET 60 ' + '/v1/AUTH_account/c/o mykey`' % prog) + print() + print('Might output:') + print(' https://swift-cluster.example.com/v1/AUTH_account/c/o?' + 'temp_url_sig=34d49efc32fe6e3082e411eeeb85bd8a&' + 'temp_url_expires=1323482948') exit(1) method, seconds, path, key = argv[1:5] try: @@ -53,7 +54,7 @@ if __name__ == '__main__': except ValueError: expires = 0 if expires < 1: - print 'Please use a positive value.' + print('Please use a positive value.') exit(1) parts = path.split('/', 4) # Must be five parts, ['', 'v1', 'a', 'c', 'o'], must be a v1 request, have @@ -72,4 +73,4 @@ if __name__ == '__main__': real_path = path sig = hmac.new(key, '%s\n%s\n%s' % (method, expires, real_path), sha1).hexdigest() - print '%s?temp_url_sig=%s&temp_url_expires=%s' % (path, sig, expires) + print('%s?temp_url_sig=%s&temp_url_expires=%s' % (path, sig, expires)) diff --git a/tox.ini b/tox.ini index 200e7a24ed..24df9c6320 100644 --- a/tox.ini +++ b/tox.ini @@ -98,7 +98,7 @@ commands = bandit -c bandit.yaml -r swift bin -n 5 -p gate # H404: multi line docstring should start without a leading new line # H405: multi line docstring summary not separated with an empty line # H501: Do not use self.__dict__ for string formatting -ignore = F812,H101,H202,H233,H301,H306,H401,H403,H404,H405,H501 +ignore = F812,H101,H202,H301,H306,H401,H403,H404,H405,H501 exclude = .venv,.tox,dist,*egg filename = *.py,bin/* show-source = True From 6b6fa693499827b6bddf5a17d0787315ab3c5011 Mon Sep 17 00:00:00 2001 From: Mohit Motiani Date: Sun, 15 May 2016 14:36:57 +0000 Subject: [PATCH 061/156] Add description of server options (all, main, rest) for swift-init help Running swift-init with -h, --help, or no arguments displays help for the command. The help does not document the 'main', 'all', and 'rest' options. These are documented in the man page. This patch adds all these server options in the help of swift-init. Change-Id: I8e27589912ae72ace14c955e66b86942bc23d9f7 Closes-Bug: #1580722 --- bin/swift-init | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/bin/swift-init b/bin/swift-init index c8e0aef5c6..a5304e7f3f 100755 --- a/bin/swift-init +++ b/bin/swift-init @@ -26,6 +26,14 @@ USAGE = \ where: is the name of a swift service e.g. proxy-server. The '-server' part of the name may be omitted. + 'all', 'main' and 'rest' are reserved words that represent a + group of services. + all: Expands to all swift daemons. + main: Expands to main swift daemons. + (proxy, container, account, object) + rest: Expands to all remaining background daemons (beyond + "main"). + (updater, replicator, auditor, etc) is an explicit configuration filename without the .conf extension. If is specified then should refer to a directory containing the configuration file, e.g.: From 5817f00005d2c1761499c3d6e5e0d428ef238c77 Mon Sep 17 00:00:00 2001 From: Cheng Li Date: Sun, 10 Jul 2016 20:33:05 +0800 Subject: [PATCH 062/156] make 0 be avaiable value of options value 0 is regard as not available by swift-ring-builder $ swift-ring-builder testring add --region 0 --zone 1 --ip 127.0.0.2 --port 6000 --device sdb --weight 100 Required argument -r/--region not specified. The on-disk ring builder is unchanged. this patch is to make value 0 available. Change-Id: Id941d44d8dbfe438bf921ed905908b838c88a644 Closes-bug: #1547137 --- swift/common/ring/utils.py | 2 +- test/unit/common/ring/test_utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/swift/common/ring/utils.py b/swift/common/ring/utils.py index 830dbfef22..8dd2484fe6 100644 --- a/swift/common/ring/utils.py +++ b/swift/common/ring/utils.py @@ -616,7 +616,7 @@ def build_dev_from_opts(opts): ['port', '-p', '--port'], ['device', '-d', '--device'], ['weight', '-w', '--weight']): - if not getattr(opts, attribute, None): + if getattr(opts, attribute, None) is None: raise ValueError('Required argument %s/%s not specified.' % (shortopt, longopt)) diff --git a/test/unit/common/ring/test_utils.py b/test/unit/common/ring/test_utils.py index fda4cdfdcd..7daa7fb93a 100644 --- a/test/unit/common/ring/test_utils.py +++ b/test/unit/common/ring/test_utils.py @@ -579,7 +579,7 @@ class TestUtils(unittest.TestCase): def test_build_dev_from_opts(self): argv = \ - ["--region", "2", "--zone", "3", + ["--region", "0", "--zone", "3", "--ip", "test.test.com", "--port", "6200", "--replication-ip", "r.test.com", @@ -588,7 +588,7 @@ class TestUtils(unittest.TestCase): "--meta", "some meta data", "--weight", "3.14159265359"] expected = { - 'region': 2, + 'region': 0, 'zone': 3, 'ip': "test.test.com", 'port': 6200, From 5cd57dc3572bccd443d74d0e18ea64a221073dac Mon Sep 17 00:00:00 2001 From: Hisashi Osanai Date: Tue, 14 Jul 2015 15:13:06 +0900 Subject: [PATCH 063/156] Improve Keystone v3 token support Now keystoneauth uses HTTP_X_TENANT_NAME/ID only even if v3 token uses for backward compatibility. There is no problem with current behavior because keystonemiddleware set same value on the headers but the headers are specified as deprecated so this patch allows to support HTTP_X_PROJECT_NAME/ID in addition to HTTP_X_TENANT_NAME/ID. Change-Id: Ie5e02067a59e18f1ac215f51429863bdd42f729f --- swift/common/middleware/keystoneauth.py | 6 +- .../common/middleware/test_keystoneauth.py | 105 +++++++++++++++--- 2 files changed, 95 insertions(+), 16 deletions(-) diff --git a/swift/common/middleware/keystoneauth.py b/swift/common/middleware/keystoneauth.py index ccdd2a8ba9..781708ab30 100644 --- a/swift/common/middleware/keystoneauth.py +++ b/swift/common/middleware/keystoneauth.py @@ -243,8 +243,10 @@ class KeystoneAuth(object): service_roles = list_from_csv(environ.get('HTTP_X_SERVICE_ROLES', '')) identity = {'user': (environ.get('HTTP_X_USER_ID'), environ.get('HTTP_X_USER_NAME')), - 'tenant': (environ.get('HTTP_X_TENANT_ID'), - environ.get('HTTP_X_TENANT_NAME')), + 'tenant': (environ.get('HTTP_X_PROJECT_ID', + environ.get('HTTP_X_TENANT_ID')), + environ.get('HTTP_X_PROJECT_NAME', + environ.get('HTTP_X_TENANT_NAME'))), 'roles': roles, 'service_roles': service_roles} token_info = environ.get('keystone.token_info', {}) diff --git a/test/unit/common/middleware/test_keystoneauth.py b/test/unit/common/middleware/test_keystoneauth.py index 96d0ed4902..e8f29b5719 100644 --- a/test/unit/common/middleware/test_keystoneauth.py +++ b/test/unit/common/middleware/test_keystoneauth.py @@ -550,7 +550,8 @@ class BaseTestAuthorize(unittest.TestCase): if not identity: identity = self._get_identity() return get_account_for_tenant(self.test_auth, - identity['HTTP_X_TENANT_ID']) + identity.get('HTTP_X_PROJECT_ID') or + identity.get('HTTP_X_TENANT_ID')) def _get_identity(self, tenant_id='tenant_id', tenant_name='tenant_name', user_id='user_id', user_name='user_name', roles=None, @@ -564,13 +565,20 @@ class BaseTestAuthorize(unittest.TestCase): 'HTTP_X_USER_NAME': user_name, 'HTTP_X_USER_DOMAIN_NAME': user_domain_name, 'HTTP_X_USER_DOMAIN_ID': user_domain_id, - 'HTTP_X_TENANT_ID': tenant_id, - 'HTTP_X_TENANT_NAME': tenant_name, + 'HTTP_X_PROJECT_ID': tenant_id, + 'HTTP_X_PROJECT_NAME': tenant_name, 'HTTP_X_PROJECT_DOMAIN_ID': project_domain_id, 'HTTP_X_PROJECT_DOMAIN_NAME': project_domain_name, 'HTTP_X_ROLES': roles, 'HTTP_X_IDENTITY_STATUS': 'Confirmed'} + def _get_identity_for_v2(self, **kwargs): + identity = self._get_identity(**kwargs) + for suffix in ['ID', 'NAME']: + identity['HTTP_X_TENANT_{0}'.format(suffix)] = identity.pop( + 'HTTP_X_PROJECT_{0}'.format(suffix)) + return identity + def _get_env_id(self, tenant_id='tenant_id', tenant_name='tenant_name', user_id='user_id', user_name='user_name', roles=[], project_domain_name='domA', project_domain_id='99', @@ -597,7 +605,8 @@ class TestAuthorize(BaseTestAuthorize): # fake cached account info info_key = get_cache_key(account) default_env = { - 'REMOTE_USER': identity['HTTP_X_TENANT_ID'], + 'REMOTE_USER': (identity.get('HTTP_X_PROJECT_ID') or + identity.get('HTTP_X_TENANT_ID')), 'swift.infocache': {info_key: {'status': 200, 'sysmeta': {}}}} default_env.update(identity) if env: @@ -689,7 +698,7 @@ class TestAuthorize(BaseTestAuthorize): self._check_authenticate(identity=identity, acl=acl) def test_authorize_succeeds_for_tenant_name_user_in_roles(self): - identity = self._get_identity() + identity = self._get_identity_for_v2() user_name = identity['HTTP_X_USER_NAME'] user_id = identity['HTTP_X_USER_ID'] tenant_name = identity['HTTP_X_TENANT_NAME'] @@ -697,15 +706,33 @@ class TestAuthorize(BaseTestAuthorize): acl = '%s:%s' % (tenant_name, user) self._check_authenticate(identity=identity, acl=acl) - def test_authorize_succeeds_for_tenant_id_user_in_roles(self): + def test_authorize_succeeds_for_project_name_user_in_roles(self): identity = self._get_identity() user_name = identity['HTTP_X_USER_NAME'] user_id = identity['HTTP_X_USER_ID'] + project_name = identity['HTTP_X_PROJECT_NAME'] + for user in [user_id, user_name, '*']: + acl = '%s:%s' % (project_name, user) + self._check_authenticate(identity=identity, acl=acl) + + def test_authorize_succeeds_for_tenant_id_user_in_roles(self): + identity = self._get_identity_for_v2() + user_name = identity['HTTP_X_USER_NAME'] + user_id = identity['HTTP_X_USER_ID'] tenant_id = identity['HTTP_X_TENANT_ID'] for user in [user_id, user_name, '*']: acl = '%s:%s' % (tenant_id, user) self._check_authenticate(identity=identity, acl=acl) + def test_authorize_succeeds_for_project_id_user_in_roles(self): + identity = self._get_identity() + user_name = identity['HTTP_X_USER_NAME'] + user_id = identity['HTTP_X_USER_ID'] + project_id = identity['HTTP_X_PROJECT_ID'] + for user in [user_id, user_name, '*']: + acl = '%s:%s' % (project_id, user) + self._check_authenticate(identity=identity, acl=acl) + def test_authorize_succeeds_for_wildcard_tenant_user_in_roles(self): identity = self._get_identity() user_name = identity['HTTP_X_USER_NAME'] @@ -844,8 +871,8 @@ class TestAuthorize(BaseTestAuthorize): self.assertEqual(authorize_resp, None) def test_names_disallowed_in_acls_outside_default_domain(self): - id = self._get_identity(user_domain_id='non-default', - project_domain_id='non-default') + id = self._get_identity_for_v2(user_domain_id='non-default', + project_domain_id='non-default') env = {'keystone.token_info': _fake_token_info(version='3')} acl = '%s:%s' % (id['HTTP_X_TENANT_NAME'], id['HTTP_X_USER_NAME']) self._check_authenticate(acl=acl, identity=id, env=env, @@ -859,9 +886,23 @@ class TestAuthorize(BaseTestAuthorize): acl = '%s:%s' % (id['HTTP_X_TENANT_ID'], id['HTTP_X_USER_ID']) self._check_authenticate(acl=acl, identity=id, env=env) + id = self._get_identity(user_domain_id='non-default', + project_domain_id='non-default') + acl = '%s:%s' % (id['HTTP_X_PROJECT_NAME'], id['HTTP_X_USER_NAME']) + self._check_authenticate(acl=acl, identity=id, env=env, + exception=HTTP_FORBIDDEN) + acl = '%s:%s' % (id['HTTP_X_PROJECT_NAME'], id['HTTP_X_USER_ID']) + self._check_authenticate(acl=acl, identity=id, env=env, + exception=HTTP_FORBIDDEN) + acl = '%s:%s' % (id['HTTP_X_PROJECT_ID'], id['HTTP_X_USER_NAME']) + self._check_authenticate(acl=acl, identity=id, env=env, + exception=HTTP_FORBIDDEN) + acl = '%s:%s' % (id['HTTP_X_PROJECT_ID'], id['HTTP_X_USER_ID']) + self._check_authenticate(acl=acl, identity=id, env=env) + def test_names_allowed_in_acls_inside_default_domain(self): - id = self._get_identity(user_domain_id='default', - project_domain_id='default') + id = self._get_identity_for_v2(user_domain_id='default', + project_domain_id='default') env = {'keystone.token_info': _fake_token_info(version='3')} acl = '%s:%s' % (id['HTTP_X_TENANT_NAME'], id['HTTP_X_USER_NAME']) self._check_authenticate(acl=acl, identity=id, env=env) @@ -872,12 +913,23 @@ class TestAuthorize(BaseTestAuthorize): acl = '%s:%s' % (id['HTTP_X_TENANT_ID'], id['HTTP_X_USER_ID']) self._check_authenticate(acl=acl, identity=id, env=env) + id = self._get_identity(user_domain_id='default', + project_domain_id='default') + acl = '%s:%s' % (id['HTTP_X_PROJECT_NAME'], id['HTTP_X_USER_NAME']) + self._check_authenticate(acl=acl, identity=id, env=env) + acl = '%s:%s' % (id['HTTP_X_PROJECT_NAME'], id['HTTP_X_USER_ID']) + self._check_authenticate(acl=acl, identity=id, env=env) + acl = '%s:%s' % (id['HTTP_X_PROJECT_ID'], id['HTTP_X_USER_NAME']) + self._check_authenticate(acl=acl, identity=id, env=env) + acl = '%s:%s' % (id['HTTP_X_PROJECT_ID'], id['HTTP_X_USER_ID']) + self._check_authenticate(acl=acl, identity=id, env=env) + def test_names_allowed_in_acls_inside_default_domain_with_config(self): conf = {'allow_names_in_acls': 'yes'} self.test_auth = keystoneauth.filter_factory(conf)(FakeApp()) self.test_auth.logger = FakeLogger() - id = self._get_identity(user_domain_id='default', - project_domain_id='default') + id = self._get_identity_for_v2(user_domain_id='default', + project_domain_id='default') env = {'keystone.token_info': _fake_token_info(version='3')} acl = '%s:%s' % (id['HTTP_X_TENANT_NAME'], id['HTTP_X_USER_NAME']) self._check_authenticate(acl=acl, identity=id, env=env) @@ -888,12 +940,23 @@ class TestAuthorize(BaseTestAuthorize): acl = '%s:%s' % (id['HTTP_X_TENANT_ID'], id['HTTP_X_USER_ID']) self._check_authenticate(acl=acl, identity=id, env=env) + id = self._get_identity(user_domain_id='default', + project_domain_id='default') + acl = '%s:%s' % (id['HTTP_X_PROJECT_NAME'], id['HTTP_X_USER_NAME']) + self._check_authenticate(acl=acl, identity=id, env=env) + acl = '%s:%s' % (id['HTTP_X_PROJECT_NAME'], id['HTTP_X_USER_ID']) + self._check_authenticate(acl=acl, identity=id, env=env) + acl = '%s:%s' % (id['HTTP_X_PROJECT_ID'], id['HTTP_X_USER_NAME']) + self._check_authenticate(acl=acl, identity=id, env=env) + acl = '%s:%s' % (id['HTTP_X_PROJECT_ID'], id['HTTP_X_USER_ID']) + self._check_authenticate(acl=acl, identity=id, env=env) + def test_names_disallowed_in_acls_inside_default_domain(self): conf = {'allow_names_in_acls': 'false'} self.test_auth = keystoneauth.filter_factory(conf)(FakeApp()) self.test_auth.logger = FakeLogger() - id = self._get_identity(user_domain_id='default', - project_domain_id='default') + id = self._get_identity_for_v2(user_domain_id='default', + project_domain_id='default') env = {'keystone.token_info': _fake_token_info(version='3')} acl = '%s:%s' % (id['HTTP_X_TENANT_NAME'], id['HTTP_X_USER_NAME']) self._check_authenticate(acl=acl, identity=id, env=env, @@ -907,6 +970,20 @@ class TestAuthorize(BaseTestAuthorize): acl = '%s:%s' % (id['HTTP_X_TENANT_ID'], id['HTTP_X_USER_ID']) self._check_authenticate(acl=acl, identity=id, env=env) + id = self._get_identity(user_domain_id='default', + project_domain_id='default') + acl = '%s:%s' % (id['HTTP_X_PROJECT_NAME'], id['HTTP_X_USER_NAME']) + self._check_authenticate(acl=acl, identity=id, env=env, + exception=HTTP_FORBIDDEN) + acl = '%s:%s' % (id['HTTP_X_PROJECT_NAME'], id['HTTP_X_USER_ID']) + self._check_authenticate(acl=acl, identity=id, env=env, + exception=HTTP_FORBIDDEN) + acl = '%s:%s' % (id['HTTP_X_PROJECT_ID'], id['HTTP_X_USER_NAME']) + self._check_authenticate(acl=acl, identity=id, env=env, + exception=HTTP_FORBIDDEN) + acl = '%s:%s' % (id['HTTP_X_PROJECT_ID'], id['HTTP_X_USER_ID']) + self._check_authenticate(acl=acl, identity=id, env=env) + def test_keystone_identity(self): user = ('U_ID', 'U_NAME') roles = ('ROLE1', 'ROLE2') From 6740a7badd795d2ccc477472581b56b628b96818 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 1 Jul 2016 14:42:43 -0700 Subject: [PATCH 064/156] Add keymaster_config_path option to keymaster Also, tighten up the format checks on root secrets. Change-Id: I1cd9a97c4e8d87d7c065866e7ad3a9e748ff19ab --- etc/proxy-server.conf-sample | 9 +++ swift/common/middleware/crypto/keymaster.py | 23 +++++- .../middleware/crypto/test_keymaster.py | 80 +++++++++++++++---- 3 files changed, 95 insertions(+), 17 deletions(-) diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index 517a9c29ad..a74320b1b0 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -783,6 +783,15 @@ use = egg:swift#keymaster # likely to result in data loss. encryption_root_secret = changeme +# Sets the path from which the keymaster config options should be read. This +# allows multiple processes which need to be encryption-aware (for example, +# proxy-server and container-sync) to share the same config file, ensuring +# that the encryption keys used are the same. The format expected is similar +# to other config files, with a single [keymaster] section and a single +# encryption_root_secret option. If this option is set, the root secret +# MUST NOT be set in proxy-server.conf. +# keymaster_config_path = + [filter:encryption] use = egg:swift#encryption diff --git a/swift/common/middleware/crypto/keymaster.py b/swift/common/middleware/crypto/keymaster.py index 4b6ac71f2c..f7a7dd6391 100644 --- a/swift/common/middleware/crypto/keymaster.py +++ b/swift/common/middleware/crypto/keymaster.py @@ -16,9 +16,13 @@ import base64 import hashlib import hmac import os +import string + +import six from swift.common.middleware.crypto.crypto_utils import CRYPTO_KEY_CALLBACK from swift.common.swob import Request, HTTPException +from swift.common.utils import readconf from swift.common.wsgi import WSGIContext @@ -109,15 +113,30 @@ class KeyMaster(object): def __init__(self, app, conf): self.app = app + + keymaster_config_path = conf.get('keymaster_config_path') + if keymaster_config_path: + if any(opt in conf for opt in ('encryption_root_secret',)): + raise ValueError('keymaster_config_path is set, but there ' + 'are other config options specified!') + conf = readconf(keymaster_config_path, 'keymaster') + self.root_secret = conf.get('encryption_root_secret') try: + # b64decode will silently discard bad characters, but we should + # treat them as an error + if not isinstance(self.root_secret, six.string_types) or any( + c not in string.digits + string.ascii_letters + '/+\r\n' + for c in self.root_secret.strip('\r\n=')): + raise ValueError self.root_secret = base64.b64decode(self.root_secret) if len(self.root_secret) < 32: raise ValueError except (TypeError, ValueError): raise ValueError( - 'encryption_root_secret option in proxy-server.conf must be ' - 'a base64 encoding of at least 32 raw bytes') + 'encryption_root_secret option in %s must be a base64 ' + 'encoding of at least 32 raw bytes' % ( + keymaster_config_path or 'proxy-server.conf')) def __call__(self, env, start_response): req = Request(env) diff --git a/test/unit/common/middleware/crypto/test_keymaster.py b/test/unit/common/middleware/crypto/test_keymaster.py index 2f8a1db458..40ed32d7ab 100644 --- a/test/unit/common/middleware/crypto/test_keymaster.py +++ b/test/unit/common/middleware/crypto/test_keymaster.py @@ -16,6 +16,7 @@ import base64 import os +import mock import unittest from swift.common import swob @@ -126,25 +127,42 @@ class TestKeymaster(unittest.TestCase): def test_root_secret(self): for secret in (os.urandom(32), os.urandom(33), os.urandom(50)): encoded_secret = base64.b64encode(secret) - try: - app = keymaster.KeyMaster( - self.swift, {'encryption_root_secret': - bytes(encoded_secret)}) - self.assertEqual(secret, app.root_secret) - except AssertionError as err: - self.fail(str(err) + ' for secret %s' % secret) - try: - app = keymaster.KeyMaster( - self.swift, {'encryption_root_secret': - unicode(encoded_secret)}) - self.assertEqual(secret, app.root_secret) - except AssertionError as err: - self.fail(str(err) + ' for secret %s' % secret) + for conf_val in (bytes(encoded_secret), unicode(encoded_secret), + encoded_secret[:30] + '\n' + encoded_secret[30:]): + try: + app = keymaster.KeyMaster( + self.swift, {'encryption_root_secret': conf_val, + 'encryption_root_secret_path': ''}) + self.assertEqual(secret, app.root_secret) + except AssertionError as err: + self.fail(str(err) + ' for secret %r' % conf_val) + + @mock.patch('swift.common.middleware.crypto.keymaster.readconf') + def test_keymaster_config_path(self, mock_readconf): + for secret in (os.urandom(32), os.urandom(33), os.urandom(50)): + enc_secret = base64.b64encode(secret) + for conf_val in (bytes(enc_secret), unicode(enc_secret), + enc_secret[:30] + '\n' + enc_secret[30:], + enc_secret[:30] + '\r\n' + enc_secret[30:]): + for ignored_secret in ('invalid! but ignored!', + 'xValidButIgnored' * 10): + mock_readconf.reset_mock() + mock_readconf.return_value = { + 'encryption_root_secret': conf_val} + + app = keymaster.KeyMaster(self.swift, { + 'keymaster_config_path': '/some/path'}) + try: + self.assertEqual(secret, app.root_secret) + self.assertEqual(mock_readconf.mock_calls, [ + mock.call('/some/path', 'keymaster')]) + except AssertionError as err: + self.fail(str(err) + ' for secret %r' % secret) def test_invalid_root_secret(self): for secret in (bytes(base64.b64encode(os.urandom(31))), # too short unicode(base64.b64encode(os.urandom(31))), - u'?' * 44, b'?' * 44, # not base64 + u'a' * 44 + u'????', b'a' * 44 + b'????', # not base64 u'a' * 45, b'a' * 45, # bad padding 99, None): conf = {'encryption_root_secret': secret} @@ -158,6 +176,38 @@ class TestKeymaster(unittest.TestCase): except AssertionError as err: self.fail(str(err) + ' for conf %s' % str(conf)) + @mock.patch('swift.common.middleware.crypto.keymaster.readconf') + def test_root_secret_path_invalid_secret(self, mock_readconf): + for secret in (bytes(base64.b64encode(os.urandom(31))), # too short + unicode(base64.b64encode(os.urandom(31))), + u'a' * 44 + u'????', b'a' * 44 + b'????', # not base64 + u'a' * 45, b'a' * 45, # bad padding + 99, None): + mock_readconf.reset_mock() + mock_readconf.return_value = {'encryption_root_secret': secret} + + try: + with self.assertRaises(ValueError) as err: + keymaster.KeyMaster(self.swift, { + 'keymaster_config_path': '/some/other/path'}) + self.assertEqual( + 'encryption_root_secret option in /some/other/path ' + 'must be a base64 encoding of at least 32 raw bytes', + err.exception.message) + self.assertEqual(mock_readconf.mock_calls, [ + mock.call('/some/other/path', 'keymaster')]) + except AssertionError as err: + self.fail(str(err) + ' for secret %r' % secret) + + def test_can_only_configure_secret_in_one_place(self): + conf = {'encryption_root_secret': 'a' * 44, + 'keymaster_config_path': '/ets/swift/keymaster.conf'} + with self.assertRaises(ValueError) as err: + keymaster.KeyMaster(self.swift, conf) + self.assertEqual('keymaster_config_path is set, but there are ' + 'other config options specified!', + err.exception.message) + if __name__ == '__main__': unittest.main() From 90627f903adaecede63b90a3be2270178b3685e3 Mon Sep 17 00:00:00 2001 From: Mohit Motiani Date: Mon, 23 May 2016 21:10:08 +0000 Subject: [PATCH 065/156] Add region in ring structure & deployment guide Deployment guide does not talk about the region. Also, it does not specify that regions and zones need to be ints. This patch adds brief description about region and changes numbers to int. Also, adds region in the document that talks about ring data struture. Change-Id: I04ce42fb3e5c1f08e7f7ff6be23482cee8bdeb71 Partial-Bug: #1583551 --- doc/source/deployment_guide.rst | 8 ++++++-- doc/source/overview_ring.rst | 3 ++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 7458c4f2c1..c8aae9e555 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -101,8 +101,12 @@ into consideration can include physical location, power availability, and network connectivity. For example, in a small cluster you might decide to split the zones up by cabinet, with each cabinet having its own power and network connectivity. The zone concept is very abstract, so feel free to use -it in whatever way best isolates your data from failure. Zones are referenced -by number, beginning with 1. +it in whatever way best isolates your data from failure. Each zone exists +in a region. + +A region is also an abstract concept that may be used to distinguish between +geographically separated areas as well as can be used within same datacenter. +Regions and zones are referenced by a positive integer. You can now start building the ring with:: diff --git a/doc/source/overview_ring.rst b/doc/source/overview_ring.rst index 321b5ac8bc..c6aa882381 100644 --- a/doc/source/overview_ring.rst +++ b/doc/source/overview_ring.rst @@ -80,7 +80,8 @@ the list of devices is a dictionary with the following keys: ====== ======= ============================================================== id integer The index into the list devices. -zone integer The zone the devices resides in. +zone integer The zone the device resides in. +region integer The region the zone resides in. weight float The relative weight of the device in comparison to other devices. This usually corresponds directly to the amount of disk space the device has compared to other devices. For From 88238108f824e14fca8ca6adb3ec0e72df87fdf9 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Sun, 10 Jul 2016 10:00:09 -0700 Subject: [PATCH 066/156] authors and changelog updates for 2.9.0 release Change-Id: I3c3e779227aad1df6abb517817355c6732e4a2af --- .mailmap | 3 +++ AUTHORS | 13 +++++++++++-- CHANGELOG | 31 +++++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/.mailmap b/.mailmap index 37cbd5b123..db5940161d 100644 --- a/.mailmap +++ b/.mailmap @@ -106,3 +106,6 @@ Brian Cline Dharmendra Kushwaha Zhang Guoqing Kato Tomoyuki +Liang Jingtao +Yu Yafei +Zheng Yao diff --git a/AUTHORS b/AUTHORS index 8af597a2c3..54e88c8df5 100644 --- a/AUTHORS +++ b/AUTHORS @@ -123,6 +123,7 @@ Andreas Jaeger (aj@suse.de) Shri Javadekar (shrinand@maginatics.com) Iryoung Jeong (iryoung@gmail.com) Paul Jimenez (pj@place.org) +Liang Jingtao (liang.jingtao@zte.com.cn) Zhang Jinnan (ben.os@99cloud.net) Jason Johnson (jajohnson@softlayer.com) Brian K. Jones (bkjones@gmail.com) @@ -133,19 +134,21 @@ Takashi Kajinami (kajinamit@nttdata.co.jp) Matt Kassawara (mkassawara@gmail.com) Morita Kazutaka (morita.kazutaka@gmail.com) Josh Kearney (josh@jk0.org) +Ben Keller (bjkeller@us.ibm.com) +Bryan Keller (kellerbr@us.ibm.com) Ilya Kharin (ikharin@mirantis.com) Dae S. Kim (dae@velatum.com) Nathan Kinder (nkinder@redhat.com) Eugene Kirpichov (ekirpichov@gmail.com) -Ben Keller (bjkeller@us.ibm.com) -Bryan Keller (kellerbr@us.ibm.com) Leah Klearman (lklrmn@gmail.com) Martin Kletzander (mkletzan@redhat.com) Jaivish Kothari (jaivish.kothari@nectechnologies.in) +Petr Kovar (pkovar@redhat.com) Steve Kowalik (steven@wedontsleep.org) Sergey Kraynev (skraynev@mirantis.com) Sushil Kumar (sushil.kumar2@globallogic.com) Madhuri Kumari (madhuri.rai07@gmail.com) +Yatin Kumbhare (yatinkumbhare@gmail.com) Dharmendra Kushwaha (dharmendra.kushwaha@nectechnologies.in) Hugo Kuo (tonytkdk@gmail.com) Tin Lam (tl3438@att.com) @@ -172,6 +175,7 @@ Zhongyue Luo (zhongyue.nah@intel.com) Paul Luse (paul.e.luse@intel.com) Christopher MacGown (chris@pistoncloud.com) Ganesh Maharaj Mahalingam (ganesh.mahalingam@intel.com) +Maria Malyarova (savoreux69@gmail.com) Dragos Manolescu (dragosm@hp.com) Ben Martin (blmartin@us.ibm.com) Steve Martinelli (stevemar@ca.ibm.com) @@ -193,6 +197,7 @@ Jola Mirecka (jola.mirecka@hp.com) Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp) Alfredo Moralejo (amoralej@redhat.com) Daisuke Morita (morita.daisuke@ntti3.com) +Mohit Motiani (mohit.motiani@intel.com) Dirk Mueller (dirk@dmllr.de) Takashi Natsume (natsume.takashi@lab.ntt.co.jp) Russ Nelson (russ@crynwr.com) @@ -207,6 +212,7 @@ Timothy Okwii (tokwii@cisco.com) Matthew Oliver (matt@oliver.net.au) Hisashi Osanai (osanai.hisashi@jp.fujitsu.com) Eamonn O'Toole (eamonn.otoole@hpe.com) +Or Ozeri (oro@il.ibm.com) James Page (james.page@ubuntu.com) Prashanth Pai (ppai@redhat.com) Venkateswarlu Pallamala (p.venkatesh551@gmail.com) @@ -263,6 +269,7 @@ Tobias Stevenson (tstevenson@vbridges.com) Victor Stinner (vstinner@redhat.com) Akihito Takai (takaiak@nttdata.co.jp) Pearl Yajing Tan (pearl.y.tan@seagate.com) +Nandini Tata (nandini.tata.15@gmail.com) Yuriy Taraday (yorik.sar@gmail.com) Monty Taylor (mordred@inaugust.com) Caleb Tennis (caleb.tennis@gmail.com) @@ -294,6 +301,8 @@ Andrew Welleck (awellec@us.ibm.com) Wu Wenxiang (wu.wenxiang@99cloud.net) Cory Wright (cory.wright@rackspace.com) Ye Jia Xu (xyj.asmy@gmail.com) +Yu Yafei (yu.yafei@zte.com.cn) +Zheng Yao (zheng.yao1@zte.com.cn) Alex Yang (alex890714@gmail.com) Lin Yang (lin.a.yang@intel.com) Yee (mail.zhang.yee@gmail.com) diff --git a/CHANGELOG b/CHANGELOG index 629ba88a7a..92fef37071 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,3 +1,34 @@ +swift (2.9.0) + + * Swift now supports at-rest encryption. This feature encrypts all + object data and user-set object metadata as it is sent to the cluster. + This feature is designed to prevent information leaks if a hard drive + leaves the cluster. The encryption is transparent to the end-user. + + At-rest encryption in Swift is enabled on the proxy server by + adding two middlewares to the pipeline. The `keymaster` middleware + is responsible for managing the encryption keys and the `encryption` + middleware does the actual encryption and decryption. + + Existing clusters will continue to work without enabling + encryption. Although enabling this feature on existing clusters + is supported, best practice is to enable this feature on new + clusters when the cluster is created. + + For more information on the details of the at-rest encryption + feature, please see the docs at + http://docs.openstack.org/developer/swift/overview_encryption.html. + + * `swift-recon` can now be called with more than one server type. + + * Fixed a bug where non-ascii names could cause an error in logging + and cause a 5xx response to the client. + + * The install guide and API reference have been moved into Swift's + source code repository. + + * Various other minor bug fixes and improvements. + swift (2.8.0) * Allow concurrent bulk deletes for server-side deletes of static From 5d02b9578e48a2eacc5111a73a399642eb28fe15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9la=20Vancsics?= Date: Fri, 15 Jul 2016 12:40:29 +0200 Subject: [PATCH 067/156] Reduce code duplication Reduced source code by extracting duplicated code (swift/cli/ringbuilder.py) Change-Id: Ibd000df1dc9042e31b65b000199dff4a645e63b4 --- swift/cli/ringbuilder.py | 82 +++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 48 deletions(-) diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index 8f52849a93..0c35553185 100644 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -276,6 +276,33 @@ def _set_info_values(devs, change, opts): format_device(dev))) +def calculate_change_value(change_value, change, v_name, v_name_port): + ip = '' + if change_value and change_value[0].isdigit(): + i = 1 + while (i < len(change_value) and + change_value[i] in '0123456789.'): + i += 1 + ip = change_value[:i] + change_value = change_value[i:] + elif change_value and change_value.startswith('['): + i = 1 + while i < len(change_value) and change_value[i] != ']': + i += 1 + i += 1 + ip = change_value[:i].lstrip('[').rstrip(']') + change_value = change_value[i:] + if ip: + change[v_name] = validate_and_normalize_ip(ip) + if change_value.startswith(':'): + i = 1 + while i < len(change_value) and change_value[i].isdigit(): + i += 1 + change[v_name_port] = int(change_value[1:i]) + change_value = change_value[i:] + return change_value + + def _parse_set_info_values(argvish): new_cmd_format, opts, args = validate_args(argvish) @@ -294,56 +321,15 @@ def _parse_set_info_values(argvish): for search_value, change_value in searches_and_changes: devs = builder.search_devs(parse_search_value(search_value)) change = {} - ip = '' - if change_value and change_value[0].isdigit(): - i = 1 - while (i < len(change_value) and - change_value[i] in '0123456789.'): - i += 1 - ip = change_value[:i] - change_value = change_value[i:] - elif change_value and change_value.startswith('['): - i = 1 - while i < len(change_value) and change_value[i] != ']': - i += 1 - i += 1 - ip = change_value[:i].lstrip('[').rstrip(']') - change_value = change_value[i:] - if ip: - change['ip'] = validate_and_normalize_ip(ip) - if change_value.startswith(':'): - i = 1 - while i < len(change_value) and change_value[i].isdigit(): - i += 1 - change['port'] = int(change_value[1:i]) - change_value = change_value[i:] + + change_value = calculate_change_value(change_value, change, + 'ip', 'port') + if change_value.startswith('R'): change_value = change_value[1:] - replication_ip = '' - if change_value and change_value[0].isdigit(): - i = 1 - while (i < len(change_value) and - change_value[i] in '0123456789.'): - i += 1 - replication_ip = change_value[:i] - change_value = change_value[i:] - elif change_value and change_value.startswith('['): - i = 1 - while i < len(change_value) and change_value[i] != ']': - i += 1 - i += 1 - replication_ip = \ - change_value[:i].lstrip('[').rstrip(']') - change_value = change_value[i:] - if replication_ip: - change['replication_ip'] = \ - validate_and_normalize_ip(replication_ip) - if change_value.startswith(':'): - i = 1 - while i < len(change_value) and change_value[i].isdigit(): - i += 1 - change['replication_port'] = int(change_value[1:i]) - change_value = change_value[i:] + change_value = calculate_change_value(change_value, change, + 'replication_ip', + 'replication_port') if change_value.startswith('/'): i = 1 while i < len(change_value) and change_value[i] != '_': From 77d6d015f694eb4fdafc3b1ae6ff1caa9d0cbdb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A1bor=20Antal?= Date: Fri, 15 Jul 2016 14:46:41 +0200 Subject: [PATCH 068/156] Use more specific asserts in test/unit/common/ring I changed asserts with more specific assert methods. e.g.: from assertTrue(sth == None) to assertIsNone(*) or assertTrue(isinstance(inst, type)) to assertIsInstace(inst, type) or assertTrue(not sth) to assertFalse(sth). The code gets more readable, and a better description will be shown on fail. Change-Id: I9531c9939aa7c2dac127b5dc865b8d396dab318f --- test/unit/common/ring/test_builder.py | 34 +++++++++++++-------------- test/unit/common/ring/test_ring.py | 4 ++-- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index 0ced592741..c8d649d43a 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -69,7 +69,7 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(rb.min_part_hours, 1) self.assertEqual(rb.parts, 2 ** 8) self.assertEqual(rb.devs, []) - self.assertEqual(rb.devs_changed, False) + self.assertFalse(rb.devs_changed) self.assertEqual(rb.version, 0) def test_overlarge_part_powers(self): @@ -110,11 +110,11 @@ class TestRingBuilder(unittest.TestCase): rb_copy = copy.deepcopy(rb) self.assertEqual(rb.to_dict(), rb_copy.to_dict()) - self.assertTrue(rb.devs is not rb_copy.devs) - self.assertTrue(rb._replica2part2dev is not rb_copy._replica2part2dev) - self.assertTrue(rb._last_part_moves is not rb_copy._last_part_moves) - self.assertTrue(rb._remove_devs is not rb_copy._remove_devs) - self.assertTrue(rb._dispersion_graph is not rb_copy._dispersion_graph) + self.assertIsNot(rb.devs, rb_copy.devs) + self.assertIsNot(rb._replica2part2dev, rb_copy._replica2part2dev) + self.assertIsNot(rb._last_part_moves, rb_copy._last_part_moves) + self.assertIsNot(rb._remove_devs, rb_copy._remove_devs) + self.assertIsNot(rb._dispersion_graph, rb_copy._dispersion_graph) def test_get_ring(self): rb = ring.RingBuilder(8, 3, 1) @@ -129,14 +129,14 @@ class TestRingBuilder(unittest.TestCase): rb.remove_dev(1) rb.rebalance() r = rb.get_ring() - self.assertTrue(isinstance(r, ring.RingData)) + self.assertIsInstance(r, ring.RingData) r2 = rb.get_ring() - self.assertTrue(r is r2) + self.assertIs(r, r2) rb.rebalance() r3 = rb.get_ring() - self.assertTrue(r3 is not r2) + self.assertIsNot(r3, r2) r4 = rb.get_ring() - self.assertTrue(r3 is r4) + self.assertIs(r3, r4) def test_rebalance_with_seed(self): devs = [(0, 10000), (1, 10001), (2, 10002), (1, 10003)] @@ -157,7 +157,7 @@ class TestRingBuilder(unittest.TestCase): rb2 = ring_builders[2] r0 = rb0.get_ring() - self.assertTrue(rb0.get_ring() is r0) + self.assertIs(rb0.get_ring(), r0) rb0.rebalance() # NO SEED rb1.rebalance(seed=10) @@ -166,7 +166,7 @@ class TestRingBuilder(unittest.TestCase): r1 = rb1.get_ring() r2 = rb2.get_ring() - self.assertFalse(rb0.get_ring() is r0) + self.assertIsNot(rb0.get_ring(), r0) self.assertNotEqual(r0.to_dict(), r1.to_dict()) self.assertEqual(r1.to_dict(), r2.to_dict()) @@ -391,7 +391,7 @@ class TestRingBuilder(unittest.TestCase): _, balance, _ = rb.rebalance(seed=2) # maybe not *perfect*, but should be close - self.assertTrue(balance <= 1) + self.assertLessEqual(balance, 1) def test_multitier_partial(self): # Multitier test, nothing full @@ -2063,8 +2063,8 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(counts, {0: 128, 1: 128, 2: 256, 3: 256}) dev_usage, worst = rb.validate() - self.assertTrue(dev_usage is None) - self.assertTrue(worst is None) + self.assertIsNone(dev_usage) + self.assertIsNone(worst) dev_usage, worst = rb.validate(stats=True) self.assertEqual(list(dev_usage), [32, 32, 64, 64, @@ -2394,7 +2394,7 @@ class TestRingBuilder(unittest.TestCase): new_dev_id = rb.add_dev({'region': 0, 'zone': 0, 'ip': '127.0.0.1', 'port': 6200, 'weight': 1.0, 'device': 'sda'}) - self.assertTrue(new_dev_id < add_dev_count) + self.assertLess(new_dev_id, add_dev_count) # try with non-contiguous holes # [0, 1, None, 3, 4, None] @@ -2494,7 +2494,7 @@ class TestRingBuilder(unittest.TestCase): # Due to the increased partition power, the partition each object # is assigned to has changed. If the old partition was X, it will # now be either located in 2*X or 2*X+1 - self.assertTrue(new_part in [old_part * 2, old_part * 2 + 1]) + self.assertIn(new_part, [old_part * 2, old_part * 2 + 1]) # Importantly, we expect the objects to be placed on the same # nodes after increasing the partition power diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py index 7adceae17e..7df5b57fab 100644 --- a/test/unit/common/ring/test_ring.py +++ b/test/unit/common/ring/test_ring.py @@ -198,9 +198,9 @@ class TestRing(TestRingBase): utils.SWIFT_CONF_FILE = _orig_swift_conf_file def test_has_changed(self): - self.assertEqual(self.ring.has_changed(), False) + self.assertFalse(self.ring.has_changed()) os.utime(self.testgz, (time() + 60, time() + 60)) - self.assertEqual(self.ring.has_changed(), True) + self.assertTrue(self.ring.has_changed()) def test_reload(self): os.utime(self.testgz, (time() - 300, time() - 300)) From 75a58a6dd8ef439c613226b562eb3ab766d16c5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A1bor=20Antal?= Date: Fri, 15 Jul 2016 15:02:00 +0200 Subject: [PATCH 069/156] Use more specific asserts in test/unit/proxy tests I changed asserts with more specific assert methods. e.g.: from assertTrue(sth == None) to assertIsNone(*) or assertTrue(isinstance(inst, type)) to assertIsInstace(inst, type) or assertTrue(not sth) to assertFalse(sth). The code gets more readable, and a better description will be shown on fail. Change-Id: If6aad8681aab7c9a41d65a4f449d8abbe3e64616 --- test/unit/proxy/controllers/test_account.py | 14 +- test/unit/proxy/controllers/test_base.py | 44 ++-- test/unit/proxy/controllers/test_container.py | 12 +- test/unit/proxy/controllers/test_info.py | 74 +++--- test/unit/proxy/controllers/test_obj.py | 64 ++--- test/unit/proxy/test_server.py | 227 +++++++++--------- test/unit/proxy/test_sysmeta.py | 6 +- 7 files changed, 218 insertions(+), 223 deletions(-) diff --git a/test/unit/proxy/controllers/test_account.py b/test/unit/proxy/controllers/test_account.py index 86206f02a1..a46dcc90f1 100644 --- a/test/unit/proxy/controllers/test_account.py +++ b/test/unit/proxy/controllers/test_account.py @@ -86,7 +86,7 @@ class TestAccountController(unittest.TestCase): resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) for key in owner_headers: - self.assertTrue(key not in resp.headers) + self.assertNotIn(key, resp.headers) req = Request.blank('/v1/a', environ={'swift_owner': True}) with mock.patch('swift.proxy.controllers.base.http_connect', @@ -94,7 +94,7 @@ class TestAccountController(unittest.TestCase): resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) for key in owner_headers: - self.assertTrue(key in resp.headers) + self.assertIn(key, resp.headers) def test_get_deleted_account(self): resp_headers = { @@ -148,9 +148,9 @@ class TestAccountController(unittest.TestCase): fake_http_connect(200, 200, give_connect=callback)): controller.PUT(req) self.assertEqual(context['method'], 'PUT') - self.assertTrue(sys_meta_key in context['headers']) + self.assertIn(sys_meta_key, context['headers']) self.assertEqual(context['headers'][sys_meta_key], 'foo') - self.assertTrue(user_meta_key in context['headers']) + self.assertIn(user_meta_key, context['headers']) self.assertEqual(context['headers'][user_meta_key], 'bar') self.assertNotEqual(context['headers']['x-timestamp'], '1.0') @@ -171,9 +171,9 @@ class TestAccountController(unittest.TestCase): fake_http_connect(200, 200, give_connect=callback)): controller.POST(req) self.assertEqual(context['method'], 'POST') - self.assertTrue(sys_meta_key in context['headers']) + self.assertIn(sys_meta_key, context['headers']) self.assertEqual(context['headers'][sys_meta_key], 'foo') - self.assertTrue(user_meta_key in context['headers']) + self.assertIn(user_meta_key, context['headers']) self.assertEqual(context['headers'][user_meta_key], 'bar') self.assertNotEqual(context['headers']['x-timestamp'], '1.0') @@ -212,7 +212,7 @@ class TestAccountController(unittest.TestCase): self.assertEqual(resp.headers.get(header), value) else: # blank ACLs should result in no header - self.assertTrue(header not in resp.headers) + self.assertNotIn(header, resp.headers) def test_add_acls_impossible_cases(self): # For test coverage: verify that defensive coding does defend, in cases diff --git a/test/unit/proxy/controllers/test_base.py b/test/unit/proxy/controllers/test_base.py index 55214f6d03..a4a85479c5 100644 --- a/test/unit/proxy/controllers/test_base.py +++ b/test/unit/proxy/controllers/test_base.py @@ -432,8 +432,8 @@ class TestFuncs(unittest.TestCase): def test_headers_to_container_info_missing(self): resp = headers_to_container_info({}, 404) self.assertEqual(resp['status'], 404) - self.assertEqual(resp['read_acl'], None) - self.assertEqual(resp['write_acl'], None) + self.assertIsNone(resp['read_acl']) + self.assertIsNone(resp['write_acl']) def test_headers_to_container_info_meta(self): headers = {'X-Container-Meta-Whatevs': 14, @@ -482,8 +482,8 @@ class TestFuncs(unittest.TestCase): def test_headers_to_account_info_missing(self): resp = headers_to_account_info({}, 404) self.assertEqual(resp['status'], 404) - self.assertEqual(resp['bytes'], None) - self.assertEqual(resp['container_count'], None) + self.assertIsNone(resp['bytes']) + self.assertIsNone(resp['container_count']) def test_headers_to_account_info_meta(self): headers = {'X-Account-Meta-Whatevs': 14, @@ -519,8 +519,8 @@ class TestFuncs(unittest.TestCase): def test_headers_to_object_info_missing(self): resp = headers_to_object_info({}, 404) self.assertEqual(resp['status'], 404) - self.assertEqual(resp['length'], None) - self.assertEqual(resp['etag'], None) + self.assertIsNone(resp['length']) + self.assertIsNone(resp['etag']) def test_headers_to_object_info_meta(self): headers = {'X-Object-Meta-Whatevs': 14, @@ -564,18 +564,18 @@ class TestFuncs(unittest.TestCase): def test_base_have_quorum(self): base = Controller(self.app) # just throw a bunch of test cases at it - self.assertEqual(base.have_quorum([201, 404], 3), False) - self.assertEqual(base.have_quorum([201, 201], 4), True) - self.assertEqual(base.have_quorum([201], 4), False) - self.assertEqual(base.have_quorum([201, 201, 404, 404], 4), True) - self.assertEqual(base.have_quorum([201, 302, 418, 503], 4), False) - self.assertEqual(base.have_quorum([201, 503, 503, 201], 4), True) - self.assertEqual(base.have_quorum([201, 201], 3), True) - self.assertEqual(base.have_quorum([404, 404], 3), True) - self.assertEqual(base.have_quorum([201, 201], 2), True) - self.assertEqual(base.have_quorum([201, 404], 2), True) - self.assertEqual(base.have_quorum([404, 404], 2), True) - self.assertEqual(base.have_quorum([201, 404, 201, 201], 4), True) + self.assertFalse(base.have_quorum([201, 404], 3)) + self.assertTrue(base.have_quorum([201, 201], 4)) + self.assertFalse(base.have_quorum([201], 4)) + self.assertTrue(base.have_quorum([201, 201, 404, 404], 4)) + self.assertFalse(base.have_quorum([201, 302, 418, 503], 4)) + self.assertTrue(base.have_quorum([201, 503, 503, 201], 4)) + self.assertTrue(base.have_quorum([201, 201], 3)) + self.assertTrue(base.have_quorum([404, 404], 3)) + self.assertTrue(base.have_quorum([201, 201], 2)) + self.assertTrue(base.have_quorum([201, 404], 2)) + self.assertTrue(base.have_quorum([404, 404], 2)) + self.assertTrue(base.have_quorum([201, 404, 201, 201], 4)) def test_best_response_overrides(self): base = Controller(self.app) @@ -685,9 +685,9 @@ class TestFuncs(unittest.TestCase): 'x-base-meta-size': '151M', 'connection': 'close'} for k, v in expected_headers.items(): - self.assertTrue(k in dst_headers) + self.assertIn(k, dst_headers) self.assertEqual(v, dst_headers[k]) - self.assertFalse('new-owner' in dst_headers) + self.assertNotIn('new-owner', dst_headers) def test_generate_request_headers_with_sysmeta(self): base = Controller(self.app) @@ -699,10 +699,10 @@ class TestFuncs(unittest.TestCase): req = Request.blank('/v1/a/c/o', headers=hdrs) dst_headers = base.generate_request_headers(req, transfer=True) for k, v in good_hdrs.items(): - self.assertTrue(k.lower() in dst_headers) + self.assertIn(k.lower(), dst_headers) self.assertEqual(v, dst_headers[k.lower()]) for k, v in bad_hdrs.items(): - self.assertFalse(k.lower() in dst_headers) + self.assertNotIn(k.lower(), dst_headers) def test_generate_request_headers_with_no_orig_req(self): base = Controller(self.app) diff --git a/test/unit/proxy/controllers/test_container.py b/test/unit/proxy/controllers/test_container.py index f08a90dfb1..98176b53c0 100644 --- a/test/unit/proxy/controllers/test_container.py +++ b/test/unit/proxy/controllers/test_container.py @@ -122,7 +122,7 @@ class TestContainerController(TestRingBase): resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) for key in owner_headers: - self.assertTrue(key not in resp.headers) + self.assertNotIn(key, resp.headers) req = Request.blank('/v1/a/c', environ={'swift_owner': True}) with mock.patch('swift.proxy.controllers.base.http_connect', @@ -130,7 +130,7 @@ class TestContainerController(TestRingBase): resp = controller.HEAD(req) self.assertEqual(2, resp.status_int // 100) for key in owner_headers: - self.assertTrue(key in resp.headers) + self.assertIn(key, resp.headers) def test_sys_meta_headers_PUT(self): # check that headers in sys meta namespace make it through @@ -150,9 +150,9 @@ class TestContainerController(TestRingBase): fake_http_connect(200, 200, give_connect=callback)): controller.PUT(req) self.assertEqual(context['method'], 'PUT') - self.assertTrue(sys_meta_key in context['headers']) + self.assertIn(sys_meta_key, context['headers']) self.assertEqual(context['headers'][sys_meta_key], 'foo') - self.assertTrue(user_meta_key in context['headers']) + self.assertIn(user_meta_key, context['headers']) self.assertEqual(context['headers'][user_meta_key], 'bar') self.assertNotEqual(context['headers']['x-timestamp'], '1.0') @@ -173,9 +173,9 @@ class TestContainerController(TestRingBase): fake_http_connect(200, 200, give_connect=callback)): controller.POST(req) self.assertEqual(context['method'], 'POST') - self.assertTrue(sys_meta_key in context['headers']) + self.assertIn(sys_meta_key, context['headers']) self.assertEqual(context['headers'][sys_meta_key], 'foo') - self.assertTrue(user_meta_key in context['headers']) + self.assertIn(user_meta_key, context['headers']) self.assertEqual(context['headers'][user_meta_key], 'bar') self.assertNotEqual(context['headers']['x-timestamp'], '1.0') diff --git a/test/unit/proxy/controllers/test_info.py b/test/unit/proxy/controllers/test_info.py index adf3329683..2317acfbe1 100644 --- a/test/unit/proxy/controllers/test_info.py +++ b/test/unit/proxy/controllers/test_info.py @@ -49,7 +49,7 @@ class TestInfoController(unittest.TestCase): req = Request.blank( '/info', environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('403 Forbidden', str(resp)) def test_get_info(self): @@ -60,12 +60,12 @@ class TestInfoController(unittest.TestCase): req = Request.blank( '/info', environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('200 OK', str(resp)) info = json.loads(resp.body) - self.assertTrue('admin' not in info) - self.assertTrue('foo' in info) - self.assertTrue('bar' in info['foo']) + self.assertNotIn('admin', info) + self.assertIn('foo', info) + self.assertIn('bar', info['foo']) self.assertEqual(info['foo']['bar'], 'baz') def test_options_info(self): @@ -74,9 +74,9 @@ class TestInfoController(unittest.TestCase): req = Request.blank( '/info', environ={'REQUEST_METHOD': 'GET'}) resp = controller.OPTIONS(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('200 OK', str(resp)) - self.assertTrue('Allow' in resp.headers) + self.assertIn('Allow', resp.headers) def test_get_info_cors(self): controller = self.get_controller(expose_info=True) @@ -87,15 +87,15 @@ class TestInfoController(unittest.TestCase): '/info', environ={'REQUEST_METHOD': 'GET'}, headers={'Origin': 'http://example.com'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('200 OK', str(resp)) info = json.loads(resp.body) - self.assertTrue('admin' not in info) - self.assertTrue('foo' in info) - self.assertTrue('bar' in info['foo']) + self.assertNotIn('admin', info) + self.assertIn('foo', info) + self.assertIn('bar', info['foo']) self.assertEqual(info['foo']['bar'], 'baz') - self.assertTrue('Access-Control-Allow-Origin' in resp.headers) - self.assertTrue('Access-Control-Expose-Headers' in resp.headers) + self.assertIn('Access-Control-Allow-Origin', resp.headers) + self.assertIn('Access-Control-Expose-Headers', resp.headers) def test_head_info(self): controller = self.get_controller(expose_info=True) @@ -105,7 +105,7 @@ class TestInfoController(unittest.TestCase): req = Request.blank( '/info', environ={'REQUEST_METHOD': 'HEAD'}) resp = controller.HEAD(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('200 OK', str(resp)) def test_disallow_info(self): @@ -118,13 +118,13 @@ class TestInfoController(unittest.TestCase): req = Request.blank( '/info', environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('200 OK', str(resp)) info = json.loads(resp.body) - self.assertTrue('foo' in info) - self.assertTrue('bar' in info['foo']) + self.assertIn('foo', info) + self.assertIn('bar', info['foo']) self.assertEqual(info['foo']['bar'], 'baz') - self.assertTrue('foo2' not in info) + self.assertNotIn('foo2', info) def test_disabled_admin_info(self): controller = self.get_controller(expose_info=True, admin_key='') @@ -138,7 +138,7 @@ class TestInfoController(unittest.TestCase): req = Request.blank( path, environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('403 Forbidden', str(resp)) def test_get_admin_info(self): @@ -154,12 +154,12 @@ class TestInfoController(unittest.TestCase): req = Request.blank( path, environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('200 OK', str(resp)) info = json.loads(resp.body) - self.assertTrue('admin' in info) - self.assertTrue('qux' in info['admin']) - self.assertTrue('quux' in info['admin']['qux']) + self.assertIn('admin', info) + self.assertIn('qux', info['admin']) + self.assertIn('quux', info['admin']['qux']) self.assertEqual(info['admin']['qux']['quux'], 'corge') def test_head_admin_info(self): @@ -175,7 +175,7 @@ class TestInfoController(unittest.TestCase): req = Request.blank( path, environ={'REQUEST_METHOD': 'HEAD'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('200 OK', str(resp)) expires = int(time.time() + 86400) @@ -185,7 +185,7 @@ class TestInfoController(unittest.TestCase): req = Request.blank( path, environ={'REQUEST_METHOD': 'HEAD'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('200 OK', str(resp)) def test_get_admin_info_invalid_method(self): @@ -201,7 +201,7 @@ class TestInfoController(unittest.TestCase): req = Request.blank( path, environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('401 Unauthorized', str(resp)) def test_get_admin_info_invalid_expires(self): @@ -217,7 +217,7 @@ class TestInfoController(unittest.TestCase): req = Request.blank( path, environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('401 Unauthorized', str(resp)) expires = 'abc' @@ -227,7 +227,7 @@ class TestInfoController(unittest.TestCase): req = Request.blank( path, environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('401 Unauthorized', str(resp)) def test_get_admin_info_invalid_path(self): @@ -243,7 +243,7 @@ class TestInfoController(unittest.TestCase): req = Request.blank( path, environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('401 Unauthorized', str(resp)) def test_get_admin_info_invalid_key(self): @@ -259,7 +259,7 @@ class TestInfoController(unittest.TestCase): req = Request.blank( path, environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('401 Unauthorized', str(resp)) def test_admin_disallow_info(self): @@ -277,15 +277,15 @@ class TestInfoController(unittest.TestCase): req = Request.blank( path, environ={'REQUEST_METHOD': 'GET'}) resp = controller.GET(req) - self.assertTrue(isinstance(resp, HTTPException)) + self.assertIsInstance(resp, HTTPException) self.assertEqual('200 OK', str(resp)) info = json.loads(resp.body) - self.assertTrue('foo2' not in info) - self.assertTrue('admin' in info) - self.assertTrue('disallowed_sections' in info['admin']) - self.assertTrue('foo2' in info['admin']['disallowed_sections']) - self.assertTrue('qux' in info['admin']) - self.assertTrue('quux' in info['admin']['qux']) + self.assertNotIn('foo2', info) + self.assertIn('admin', info) + self.assertIn('disallowed_sections', info['admin']) + self.assertIn('foo2', info['admin']['disallowed_sections']) + self.assertIn('qux', info['admin']) + self.assertIn('quux', info['admin']['qux']) self.assertEqual(info['admin']['qux']['quux'], 'corge') diff --git a/test/unit/proxy/controllers/test_obj.py b/test/unit/proxy/controllers/test_obj.py index 741be84b20..d30525a985 100755 --- a/test/unit/proxy/controllers/test_obj.py +++ b/test/unit/proxy/controllers/test_obj.py @@ -231,7 +231,7 @@ class BaseObjectControllerMixin(object): # make sure we have enough local nodes (sanity) all_local_nodes = [n for n in all_nodes if self.app.write_affinity_is_local_fn(n)] - self.assertTrue(len(all_local_nodes) >= self.replicas() + 1) + self.assertGreaterEqual(len(all_local_nodes), self.replicas() + 1) # finally, create the local_first_nodes iter and flatten it out local_first_nodes = list(controller.iter_nodes_local_first( @@ -280,7 +280,7 @@ class BaseObjectControllerMixin(object): with set_http_connect(slow_connect=True): nodes = [dict(ip='', port='', device='')] res = controller._connect_put_node(nodes, '', req, {}, ('', '')) - self.assertTrue(res is None) + self.assertIsNone(res) def test_DELETE_simple(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE') @@ -1143,10 +1143,10 @@ class TestReplicatedObjController(BaseObjectControllerMixin, self.assertEqual(resp.status_int, 201) for given_headers in put_headers: self.assertEqual(given_headers.get('X-Delete-At'), t) - self.assertTrue('X-Delete-At-Host' in given_headers) - self.assertTrue('X-Delete-At-Device' in given_headers) - self.assertTrue('X-Delete-At-Partition' in given_headers) - self.assertTrue('X-Delete-At-Container' in given_headers) + self.assertIn('X-Delete-At-Host', given_headers) + self.assertIn('X-Delete-At-Device', given_headers) + self.assertIn('X-Delete-At-Partition', given_headers) + self.assertIn('X-Delete-At-Container', given_headers) def test_PUT_converts_delete_after_to_delete_at(self): req = swob.Request.blank('/v1/a/c/o', method='PUT', body='', @@ -1168,10 +1168,10 @@ class TestReplicatedObjController(BaseObjectControllerMixin, for given_headers in put_headers: self.assertEqual(given_headers.get('X-Delete-At'), expected_delete_at) - self.assertTrue('X-Delete-At-Host' in given_headers) - self.assertTrue('X-Delete-At-Device' in given_headers) - self.assertTrue('X-Delete-At-Partition' in given_headers) - self.assertTrue('X-Delete-At-Container' in given_headers) + self.assertIn('X-Delete-At-Host', given_headers) + self.assertIn('X-Delete-At-Device', given_headers) + self.assertIn('X-Delete-At-Partition', given_headers) + self.assertIn('X-Delete-At-Container', given_headers) def test_container_sync_put_x_timestamp_not_found(self): test_indexes = [None] + [int(p) for p in POLICIES] @@ -1915,9 +1915,9 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): frag_archives = [] for connection_id, info in put_requests.items(): body = unchunk_body(''.join(info['chunks'])) - self.assertTrue(info['boundary'] is not None, - "didn't get boundary for conn %r" % ( - connection_id,)) + self.assertIsNotNone(info['boundary'], + "didn't get boundary for conn %r" % ( + connection_id,)) self.assertTrue(size > int(info['backend-content-length']) > 0, "invalid backend-content-length for conn %r" % ( connection_id,)) @@ -2306,9 +2306,9 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): # ... regardless we should never need to fetch more than ec_ndata # frags for any given etag for etag, frags in collected_responses.items(): - self.assertTrue(len(frags) <= self.policy.ec_ndata, - 'collected %s frags for etag %s' % ( - len(frags), etag)) + self.assertLessEqual(len(frags), self.policy.ec_ndata, + 'collected %s frags for etag %s' % ( + len(frags), etag)) def test_GET_with_many_missed_overwrite_will_need_handoff(self): obj1 = self._make_ec_object_stub() @@ -2357,9 +2357,9 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): # ... regardless we should never need to fetch more than ec_ndata # frags for any given etag for etag, frags in collected_responses.items(): - self.assertTrue(len(frags) <= self.policy.ec_ndata, - 'collected %s frags for etag %s' % ( - len(frags), etag)) + self.assertLessEqual(len(frags), self.policy.ec_ndata, + 'collected %s frags for etag %s' % ( + len(frags), etag)) def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self): obj1 = self._make_ec_object_stub() @@ -2420,9 +2420,9 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): # ... regardless we should never need to fetch more than ec_ndata # frags for any given etag for etag, frags in collected_responses.items(): - self.assertTrue(len(frags) <= self.policy.ec_ndata, - 'collected %s frags for etag %s' % ( - len(frags), etag)) + self.assertLessEqual(len(frags), self.policy.ec_ndata, + 'collected %s frags for etag %s' % ( + len(frags), etag)) def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self): obj1 = self._make_ec_object_stub() @@ -2480,9 +2480,9 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): # ... regardless we should never need to fetch more than ec_ndata # frags for any given etag for etag, frags in collected_responses.items(): - self.assertTrue(len(frags) <= self.policy.ec_ndata, - 'collected %s frags for etag %s' % ( - len(frags), etag)) + self.assertLessEqual(len(frags), self.policy.ec_ndata, + 'collected %s frags for etag %s' % ( + len(frags), etag)) def test_GET_mixed_success_with_range(self): fragment_size = self.policy.fragment_size @@ -2682,8 +2682,8 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): error_lines = self.logger.get_lines_for_level('error') self.assertEqual(1, len(error_lines)) msg = error_lines[0] - self.assertTrue('Error decoding fragments' in msg) - self.assertTrue('/a/c/o' in msg) + self.assertIn('Error decoding fragments', msg) + self.assertIn('/a/c/o', msg) log_msg_args, log_msg_kwargs = self.logger.log_dict['error'][0] self.assertEqual(log_msg_kwargs['exc_info'][0], ECDriverError) @@ -2713,9 +2713,9 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertEqual(self.replicas(), len(error_lines)) nparity = self.policy.ec_nparity for line in error_lines[:nparity]: - self.assertTrue('retrying' in line) + self.assertIn('retrying', line) for line in error_lines[nparity:]: - self.assertTrue('ChunkReadTimeout (0.01s)' in line) + self.assertIn('ChunkReadTimeout (0.01s)', line) def test_GET_read_timeout_resume(self): segment_size = self.policy.ec_segment_size @@ -2741,7 +2741,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): self.assertTrue(md5(resp.body).hexdigest(), etag) error_lines = self.logger.get_lines_for_level('error') self.assertEqual(1, len(error_lines)) - self.assertTrue('retrying' in error_lines[0]) + self.assertIn('retrying', error_lines[0]) def test_fix_response_HEAD(self): headers = {'X-Object-Sysmeta-Ec-Content-Length': '10', @@ -2799,7 +2799,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): resp = req.get_response(self.app) response_time = time.time() - start self.assertEqual(resp.status_int, 201) - self.assertTrue(response_time < response_sleep) + self.assertLess(response_time, response_sleep) def test_PUT_with_just_enough_durable_responses(self): req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT', @@ -2861,7 +2861,7 @@ class TestECObjController(BaseObjectControllerMixin, unittest.TestCase): headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)), 'X-Object-Sysmeta-Ec-Etag': body_etag} start = int(req_range.split('-')[0]) - self.assertTrue(start >= 0) # sanity + self.assertGreaterEqual(start, 0) # sanity title, exp = swob.RESPONSE_REASONS[416] range_not_satisfiable_body = \ '

%s

%s

' % (title, exp) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 44a23ef6f4..fc34c8c9d0 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -350,7 +350,7 @@ class TestController(unittest.TestCase): partition, nodes, count = \ self.controller.account_info(self.account, self.request) self.check_account_info_return(partition, nodes, True) - self.assertEqual(count, None) + self.assertIsNone(count) # Test the internal representation in memcache # 'container_count' changed from 0 to None @@ -368,7 +368,7 @@ class TestController(unittest.TestCase): partition, nodes, count = \ self.controller.account_info(self.account, self.request) self.check_account_info_return(partition, nodes, True) - self.assertEqual(count, None) + self.assertIsNone(count) # tests if some http status codes are not cached def test_account_info_no_cache(self): @@ -378,7 +378,7 @@ class TestController(unittest.TestCase): self.controller.account_info(self.account, self.request) self.assertEqual(len(self.memcache.keys()), 0) self.check_account_info_return(partition, nodes, True) - self.assertEqual(count, None) + self.assertIsNone(count) with save_globals(): # We cache if we have two 404 responses - fail if only one @@ -394,7 +394,7 @@ class TestController(unittest.TestCase): partition, nodes, count = \ self.controller.account_info(self.account, self.request) self.check_account_info_return(partition, nodes, is_none=True) - self.assertEqual(count, None) + self.assertIsNone(count) def check_container_info_return(self, ret, is_none=False): if is_none: @@ -433,7 +433,7 @@ class TestController(unittest.TestCase): cache_key = get_cache_key(self.account, self.container) cache_value = self.memcache.get(cache_key) - self.assertTrue(isinstance(cache_value, dict)) + self.assertIsInstance(cache_value, dict) self.assertEqual(200, cache_value.get('status')) set_http_connect() @@ -455,7 +455,7 @@ class TestController(unittest.TestCase): cache_key = get_cache_key(self.account, self.container) cache_value = self.memcache.get(cache_key) - self.assertTrue(isinstance(cache_value, dict)) + self.assertIsInstance(cache_value, dict) self.assertEqual(404, cache_value.get('status')) set_http_connect() @@ -470,7 +470,7 @@ class TestController(unittest.TestCase): cache_key = get_cache_key(self.account, self.container) cache_value = self.memcache.get(cache_key) - self.assertTrue(isinstance(cache_value, dict)) + self.assertIsInstance(cache_value, dict) self.assertEqual(404, cache_value.get('status')) set_http_connect() @@ -507,17 +507,17 @@ class TestController(unittest.TestCase): # Test info is returned as strings self.assertEqual(ai.get('foo'), '\xe2\x98\x83') - self.assertTrue(isinstance(ai.get('foo'), str)) + self.assertIsInstance(ai.get('foo'), str) # Test info['meta'] is returned as strings m = ai.get('meta', {}) self.assertEqual(m.get('bar'), '\xe2\x98\x83') - self.assertTrue(isinstance(m.get('bar'), str)) + self.assertIsInstance(m.get('bar'), str) # Test info['sysmeta'] is returned as strings m = ai.get('sysmeta', {}) self.assertEqual(m.get('baz'), '\xe2\x98\x83') - self.assertTrue(isinstance(m.get('baz'), str)) + self.assertIsInstance(m.get('baz'), str) def test_get_container_info_returns_values_as_strings(self): app = mock.MagicMock() @@ -533,22 +533,22 @@ class TestController(unittest.TestCase): # Test info is returned as strings self.assertEqual(ci.get('foo'), '\xe2\x98\x83') - self.assertTrue(isinstance(ci.get('foo'), str)) + self.assertIsInstance(ci.get('foo'), str) # Test info['meta'] is returned as strings m = ci.get('meta', {}) self.assertEqual(m.get('bar'), '\xe2\x98\x83') - self.assertTrue(isinstance(m.get('bar'), str)) + self.assertIsInstance(m.get('bar'), str) # Test info['sysmeta'] is returned as strings m = ci.get('sysmeta', {}) self.assertEqual(m.get('baz'), '\xe2\x98\x83') - self.assertTrue(isinstance(m.get('baz'), str)) + self.assertIsInstance(m.get('baz'), str) # Test info['cors'] is returned as strings m = ci.get('cors', {}) self.assertEqual(m.get('expose_headers'), '\xe2\x98\x83') - self.assertTrue(isinstance(m.get('expose_headers'), str)) + self.assertIsInstance(m.get('expose_headers'), str) @patch_policies([StoragePolicy(0, 'zero', True, object_ring=FakeRing())]) @@ -814,7 +814,7 @@ class TestProxyServer(unittest.TestCase): FakeMemcache(), container_ring=FakeRing(), account_ring=FakeRing()) - self.assertEqual(baseapp.concurrent_gets, True) + self.assertTrue(baseapp.concurrent_gets) self.assertEqual(baseapp.concurrency_timeout, 0) baseapp.update_request(req) resp = baseapp.handle_request(req) @@ -852,11 +852,11 @@ class TestProxyServer(unittest.TestCase): container_ring=FakeRing()) self.assertTrue(app.expose_info) - self.assertTrue(isinstance(app.disallowed_sections, list)) + self.assertIsInstance(app.disallowed_sections, list) self.assertEqual(1, len(app.disallowed_sections)) self.assertEqual(['swift.valid_api_versions'], app.disallowed_sections) - self.assertTrue(app.admin_key is None) + self.assertIsNone(app.admin_key) def test_get_info_controller(self): req = Request.blank('/info') @@ -866,11 +866,11 @@ class TestProxyServer(unittest.TestCase): controller, path_parts = app.get_controller(req) - self.assertTrue('version' in path_parts) - self.assertTrue(path_parts['version'] is None) - self.assertTrue('disallowed_sections' in path_parts) - self.assertTrue('expose_info' in path_parts) - self.assertTrue('admin_key' in path_parts) + self.assertIn('version', path_parts) + self.assertIsNone(path_parts['version']) + self.assertIn('disallowed_sections', path_parts) + self.assertIn('expose_info', path_parts) + self.assertIn('admin_key', path_parts) self.assertEqual(controller.__name__, 'InfoController') @@ -947,8 +947,8 @@ class TestProxyServer(unittest.TestCase): except Exception as e1: app.exception_occurred(node, 'test1', 'test1 msg') line = logger.get_lines_for_level('error')[-1] - self.assertTrue('test1 server' in line) - self.assertTrue('test1 msg' in line) + self.assertIn('test1 server', line) + self.assertIn('test1 msg', line) log_args, log_kwargs = logger.log_dict['error'][-1] self.assertTrue(log_kwargs['exc_info']) self.assertEqual(log_kwargs['exc_info'][1], e1) @@ -961,8 +961,8 @@ class TestProxyServer(unittest.TestCase): app.exception_occurred(node, 'test2', 'test2 msg', level=logging.WARNING) line = logger.get_lines_for_level('warning')[-1] - self.assertTrue('test2 server' in line) - self.assertTrue('test2 msg' in line) + self.assertIn('test2 server', line) + self.assertIn('test2 msg', line) log_args, log_kwargs = logger.log_dict['warning'][-1] self.assertTrue(log_kwargs['exc_info']) self.assertEqual(log_kwargs['exc_info'][1], e2) @@ -980,8 +980,8 @@ class TestProxyServer(unittest.TestCase): app.exception_occurred(node, 'test3', 'test3 msg', level=logging.WARNING, exc_info=e3_info) line = logger.get_lines_for_level('warning')[-1] - self.assertTrue('test3 server' in line) - self.assertTrue('test3 msg' in line) + self.assertIn('test3 server', line) + self.assertIn('test3 msg', line) log_args, log_kwargs = logger.log_dict['warning'][-1] self.assertTrue(log_kwargs['exc_info']) self.assertEqual(log_kwargs['exc_info'][1], e3) @@ -1011,7 +1011,7 @@ class TestProxyServer(unittest.TestCase): '/v1.0/a/c/o']: req = Request.blank(path) controller, path_parts = app.get_controller(req) - self.assertTrue(controller is not None) + self.assertIsNotNone(controller) # Ensure settings valid API version constraint works for version in ["42", 42]: @@ -1025,7 +1025,7 @@ class TestProxyServer(unittest.TestCase): req = Request.blank('/%s/a' % version) controller, _ = app.get_controller(req) - self.assertTrue(controller is not None) + self.assertIsNotNone(controller) # In this case v1 is invalid req = Request.blank('/v1/a') @@ -1379,7 +1379,7 @@ class TestObjectController(unittest.TestCase): try: df.open() except DiskFileNotExist as e: - self.assertTrue(float(e.timestamp) > 0) + self.assertGreater(float(e.timestamp), 0) else: self.fail('did not raise DiskFileNotExist') @@ -1456,7 +1456,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(ct, 'multipart/byteranges') boundary = dict(params).get('boundary') - self.assertTrue(boundary is not None) + self.assertIsNotNone(boundary) got_mime_docs = [] for mime_doc_fh in iter_multipart_mime_documents(StringIO(res.body), @@ -1652,12 +1652,12 @@ class TestObjectController(unittest.TestCase): pass self.assertEqual(res.status_int, 206) - self.assertTrue(kaboomed[0] > 0) # sanity check + self.assertGreater(kaboomed[0], 0) # sanity check ct, params = parse_content_type(res.headers['Content-Type']) self.assertEqual(ct, 'multipart/byteranges') # sanity check boundary = dict(params).get('boundary') - self.assertTrue(boundary is not None) # sanity check + self.assertIsNotNone(boundary) # sanity check got_byteranges = [] for mime_doc_fh in iter_multipart_mime_documents(StringIO(body), boundary): @@ -1723,12 +1723,12 @@ class TestObjectController(unittest.TestCase): body = ''.join(res.app_iter) self.assertEqual(res.status_int, 206) - self.assertTrue(kaboomed[0] >= 1) # sanity check + self.assertGreaterEqual(kaboomed[0], 1) # sanity check ct, params = parse_content_type(res.headers['Content-Type']) self.assertEqual(ct, 'multipart/byteranges') # sanity check boundary = dict(params).get('boundary') - self.assertTrue(boundary is not None) # sanity check + self.assertIsNotNone(boundary) # sanity check got_byteranges = [] for mime_doc_fh in iter_multipart_mime_documents(StringIO(body), boundary): @@ -1761,12 +1761,12 @@ class TestObjectController(unittest.TestCase): body = ''.join(res.app_iter) self.assertEqual(res.status_int, 206) - self.assertTrue(kaboomed[0] >= 1) # sanity check + self.assertGreaterEqual(kaboomed[0], 1) # sanity check ct, params = parse_content_type(res.headers['Content-Type']) self.assertEqual(ct, 'multipart/byteranges') # sanity check boundary = dict(params).get('boundary') - self.assertTrue(boundary is not None) # sanity check + self.assertIsNotNone(boundary) # sanity check got_byteranges = [] for mime_doc_fh in iter_multipart_mime_documents(StringIO(body), boundary): @@ -1860,7 +1860,7 @@ class TestObjectController(unittest.TestCase): # verify at least 2 puts made it all the way to the end of 2nd # phase, ie at least 2 .durable statuses were written num_durable_puts = sum(d is True for d in got_durable) - self.assertTrue(num_durable_puts >= 2) + self.assertGreaterEqual(num_durable_puts, 2) @unpatch_policies def test_PUT_ec_multiple_segments(self): @@ -1948,7 +1948,7 @@ class TestObjectController(unittest.TestCase): # verify at least 2 puts made it all the way to the end of 2nd # phase, ie at least 2 .durable statuses were written num_durable_puts = sum(d is True for d in got_durable) - self.assertTrue(num_durable_puts >= 2) + self.assertGreaterEqual(num_durable_puts, 2) @unpatch_policies def test_PUT_ec_object_etag_mismatch(self): @@ -2353,8 +2353,8 @@ class TestObjectController(unittest.TestCase): # our EC segment size is 4 KiB, so this is multiple (3) segments; # we'll verify that with a sanity check obj = 'a moose once bit my sister' * 400 - self.assertTrue( - len(obj) > POLICIES.get_by_name("ec").ec_segment_size * 2, + self.assertGreater( + len(obj), POLICIES.get_by_name("ec").ec_segment_size * 2, "object is too small for proper testing") prolis = _test_sockets[0] @@ -3300,9 +3300,9 @@ class TestObjectController(unittest.TestCase): self.assertEqual(res.status[:len(str(expected))], str(expected)) if expected < 400: - self.assertTrue('x-works' in res.headers) + self.assertIn('x-works', res.headers) self.assertEqual(res.headers['x-works'], 'yes') - self.assertTrue('accept-ranges' in res.headers) + self.assertIn('accept-ranges', res.headers) self.assertEqual(res.headers['accept-ranges'], 'bytes') test_status_map((200, 200, 200, 404, 404), 200) @@ -3621,7 +3621,7 @@ class TestObjectController(unittest.TestCase): resp.body except ChunkReadTimeout: got_exc = True - self.assertTrue(not got_exc) + self.assertFalse(got_exc) self.app.recoverable_node_timeout = 0.1 set_http_connect(200, 200, 200, slow=1.0) resp = req.get_response(self.app) @@ -3668,7 +3668,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.body, 'lalala') except ChunkReadTimeout: got_exc = True - self.assertTrue(not got_exc) + self.assertFalse(got_exc) set_http_connect(200, 200, 200, body='lalala', slow=[1.0, 1.0], etags=['a', 'a', 'a']) @@ -3678,7 +3678,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.body, 'lalala') except ChunkReadTimeout: got_exc = True - self.assertTrue(not got_exc) + self.assertFalse(got_exc) set_http_connect(200, 200, 200, body='lalala', slow=[1.0, 1.0], etags=['a', 'b', 'a']) @@ -3688,7 +3688,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.body, 'lalala') except ChunkReadTimeout: got_exc = True - self.assertTrue(not got_exc) + self.assertFalse(got_exc) req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'}) set_http_connect(200, 200, 200, body='lalala', @@ -3863,11 +3863,11 @@ class TestObjectController(unittest.TestCase): object_ring = self.app.get_object_ring(None) first_nodes = list(self.app.iter_nodes(object_ring, 0)) second_nodes = list(self.app.iter_nodes(object_ring, 0)) - self.assertTrue(first_nodes[0] in second_nodes) + self.assertIn(first_nodes[0], second_nodes) self.app.error_limit(first_nodes[0], 'test') second_nodes = list(self.app.iter_nodes(object_ring, 0)) - self.assertTrue(first_nodes[0] not in second_nodes) + self.assertNotIn(first_nodes[0], second_nodes) def test_iter_nodes_gives_extra_if_error_limited_inline(self): object_ring = self.app.get_object_ring(None) @@ -3918,7 +3918,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'GET'}) resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3, 'Object') - self.assertEqual(resp.etag, None) + self.assertIsNone(resp.etag) resp = controller.best_response(req, [200] * 3, ['OK'] * 3, [''] * 3, 'Object', etag='68b329da9893e34099c7d8ad5cb9c940' @@ -4414,7 +4414,7 @@ class TestObjectController(unittest.TestCase): headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 204' self.assertEqual(headers[:len(exp)], exp) - self.assertTrue('\r\nContent-Length: 0\r\n' in headers) + self.assertIn('\r\nContent-Length: 0\r\n', headers) @unpatch_policies def test_chunked_put_utf8_all_the_way_down(self): @@ -4449,7 +4449,7 @@ class TestObjectController(unittest.TestCase): exp = 'HTTP/1.1 200' self.assertEqual(headers[:len(exp)], exp) containers = fd.read().split('\n') - self.assertTrue(ustr in containers) + self.assertIn(ustr, containers) # List account with ustr container (test json) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -4461,7 +4461,7 @@ class TestObjectController(unittest.TestCase): exp = 'HTTP/1.1 200' self.assertEqual(headers[:len(exp)], exp) listing = json.loads(fd.read()) - self.assertTrue(ustr.decode('utf8') in [l['name'] for l in listing]) + self.assertIn(ustr.decode('utf8'), [l['name'] for l in listing]) # List account with ustr container (test xml) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -4472,7 +4472,7 @@ class TestObjectController(unittest.TestCase): headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' self.assertEqual(headers[:len(exp)], exp) - self.assertTrue('%s' % ustr in fd.read()) + self.assertIn('%s' % ustr, fd.read()) # Create ustr object with ustr metadata in ustr container sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -4496,7 +4496,7 @@ class TestObjectController(unittest.TestCase): exp = 'HTTP/1.1 200' self.assertEqual(headers[:len(exp)], exp) objects = fd.read().split('\n') - self.assertTrue(ustr in objects) + self.assertIn(ustr, objects) # List ustr container with ustr object (test json) sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -4521,7 +4521,7 @@ class TestObjectController(unittest.TestCase): headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' self.assertEqual(headers[:len(exp)], exp) - self.assertTrue('%s' % ustr in fd.read()) + self.assertIn('%s' % ustr, fd.read()) # Retrieve ustr object with ustr metadata sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -4533,8 +4533,8 @@ class TestObjectController(unittest.TestCase): headers = readuntil2crlfs(fd) exp = 'HTTP/1.1 200' self.assertEqual(headers[:len(exp)], exp) - self.assertTrue('\r\nX-Object-Meta-%s: %s\r\n' % - (quote(ustr_short).lower(), quote(ustr)) in headers) + self.assertIn('\r\nX-Object-Meta-%s: %s\r\n' % + (quote(ustr_short).lower(), quote(ustr)), headers) @unpatch_policies def test_chunked_put_chunked_put(self): @@ -4652,7 +4652,7 @@ class TestObjectController(unittest.TestCase): self.app, 'account', 'container', 'object') set_http_connect(200, 200, 200) resp = controller.GET(req) - self.assertTrue('accept-ranges' in resp.headers) + self.assertIn('accept-ranges', resp.headers) self.assertEqual(resp.headers['accept-ranges'], 'bytes') def test_response_head_accept_ranges_header(self): @@ -4664,7 +4664,7 @@ class TestObjectController(unittest.TestCase): self.app, 'account', 'container', 'object') set_http_connect(200, 200, 200) resp = controller.HEAD(req) - self.assertTrue('accept-ranges' in resp.headers) + self.assertIn('accept-ranges', resp.headers) self.assertEqual(resp.headers['accept-ranges'], 'bytes') def test_GET_calls_authorize(self): @@ -4984,8 +4984,8 @@ class TestObjectController(unittest.TestCase): 'https://foo.bar', resp.headers['access-control-allow-origin']) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): - self.assertTrue( - verb in resp.headers['access-control-allow-methods']) + self.assertIn(verb, + resp.headers['access-control-allow-methods']) self.assertEqual( len(resp.headers['access-control-allow-methods'].split(', ')), 6) @@ -5002,8 +5002,7 @@ class TestObjectController(unittest.TestCase): resp = controller.OPTIONS(req) self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): - self.assertTrue( - verb in resp.headers['Allow']) + self.assertIn(verb, resp.headers['Allow']) self.assertEqual(len(resp.headers['Allow'].split(', ')), 6) req = Request.blank( '/v1/a/c/o.jpg', @@ -5038,8 +5037,8 @@ class TestObjectController(unittest.TestCase): self.assertEqual(200, resp.status_int) self.assertEqual('*', resp.headers['access-control-allow-origin']) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): - self.assertTrue( - verb in resp.headers['access-control-allow-methods']) + self.assertIn(verb, + resp.headers['access-control-allow-methods']) self.assertEqual( len(resp.headers['access-control-allow-methods'].split(', ')), 6) @@ -5412,7 +5411,7 @@ class TestECMismatchedFA(unittest.TestCase): environ={"REQUEST_METHOD": "PUT"}, headers={"X-Storage-Policy": "ec", "X-Auth-Token": "t"}) resp = ensure_container.get_response(prosrv) - self.assertTrue(resp.status_int in (201, 202)) + self.assertIn(resp.status_int, (201, 202)) obj1 = "first version..." put_req1 = Request.blank( @@ -5920,7 +5919,7 @@ class TestObjectECRangedGET(unittest.TestCase): self.assertEqual(content_type, 'multipart/byteranges') boundary = content_type_params.get('boundary') - self.assertTrue(boundary is not None) + self.assertIsNotNone(boundary) got_byteranges = self._parse_multipart(headers['Content-Type'], gotten_obj) @@ -5999,7 +5998,7 @@ class TestObjectECRangedGET(unittest.TestCase): self.assertEqual(content_type, 'multipart/byteranges') boundary = content_type_params.get('boundary') - self.assertTrue(boundary is not None) + self.assertIsNotNone(boundary) got_byteranges = self._parse_multipart(headers['Content-Type'], gotten_obj) @@ -6055,7 +6054,7 @@ class TestObjectECRangedGET(unittest.TestCase): self.assertEqual(content_type, 'multipart/byteranges') boundary = content_type_params.get('boundary') - self.assertTrue(boundary is not None) + self.assertIsNotNone(boundary) got_byteranges = self._parse_multipart(headers['Content-Type'], gotten_obj) @@ -6103,7 +6102,7 @@ class TestContainerController(unittest.TestCase): # default test req = Request.blank('/a/c', headers={'Content-Length': '0', 'Content-Type': 'text/plain'}) - self.assertEqual(controller._convert_policy_to_index(req), None) + self.assertIsNone(controller._convert_policy_to_index(req)) # negative test req = Request.blank('/a/c', headers={'Content-Length': '0', @@ -6140,7 +6139,7 @@ class TestContainerController(unittest.TestCase): resp = req.get_response(self.app) self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 404) - self.assertEqual(resp.headers['X-Storage-Policy'], None) + self.assertIsNone(resp.headers['X-Storage-Policy']) def test_error_convert_index_to_name(self): req = Request.blank('/v1/a/c') @@ -6150,13 +6149,13 @@ class TestContainerController(unittest.TestCase): resp = req.get_response(self.app) self.assertRaises(StopIteration, fake_conn.code_iter.next) self.assertEqual(resp.status_int, 200) - self.assertEqual(resp.headers['X-Storage-Policy'], None) + self.assertIsNone(resp.headers['X-Storage-Policy']) error_lines = self.app.logger.get_lines_for_level('error') self.assertEqual(2, len(error_lines)) for msg in error_lines: expected = "Could not translate " \ "X-Backend-Storage-Policy-Index ('-1')" - self.assertTrue(expected in msg) + self.assertIn(expected, msg) def test_transfer_headers(self): src_headers = {'x-remove-versions-location': 'x', @@ -6233,7 +6232,7 @@ class TestContainerController(unittest.TestCase): str(expected)) infocache = res.environ.get('swift.infocache', {}) if expected < 400: - self.assertTrue('x-works' in res.headers) + self.assertIn('x-works', res.headers) self.assertEqual(res.headers['x-works'], 'yes') if c_expected: self.assertIn('container/a/c', infocache) @@ -6295,9 +6294,9 @@ class TestContainerController(unittest.TestCase): self.assertEqual(res.status_int, 400) self.assertEqual(0, len(backend_requests)) expected = 'is deprecated' - self.assertTrue(expected in res.body, - '%r did not include %r' % ( - res.body, expected)) + self.assertIn(expected, res.body, + '%r did not include %r' % ( + res.body, expected)) return self.assertEqual(res.status_int, 201) self.assertEqual( @@ -6305,16 +6304,16 @@ class TestContainerController(unittest.TestCase): len(backend_requests)) for headers in backend_requests: if not requested_policy: - self.assertFalse('X-Backend-Storage-Policy-Index' in + self.assertNotIn('X-Backend-Storage-Policy-Index', headers) - self.assertTrue( - 'X-Backend-Storage-Policy-Default' in headers) + self.assertIn('X-Backend-Storage-Policy-Default', + headers) self.assertEqual( int(expected_policy), int(headers['X-Backend-Storage-Policy-Default'])) else: - self.assertTrue('X-Backend-Storage-Policy-Index' in - headers) + self.assertIn('X-Backend-Storage-Policy-Index', + headers) self.assertEqual(int(headers ['X-Backend-Storage-Policy-Index']), int(policy)) @@ -6420,9 +6419,9 @@ class TestContainerController(unittest.TestCase): self.assertEqual(10, len(calls)) for call in calls[3:6]: self.assertEqual('/account', call['path']) - self.assertTrue(key in call['headers'], - '%s call, key %s missing in headers %s' % - (call['method'], key, call['headers'])) + self.assertIn(key, call['headers'], + '%s call, key %s missing in headers %s' % ( + call['method'], key, call['headers'])) self.assertEqual(value, call['headers'][key]) def test_POST(self): @@ -6644,7 +6643,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/v1/a/c?format=json') self.app.update_request(req) res = controller.GET(req) - self.assertTrue('accept-ranges' in res.headers) + self.assertIn('accept-ranges', res.headers) self.assertEqual(res.headers['accept-ranges'], 'bytes') def test_response_head_accept_ranges_header(self): @@ -6655,7 +6654,7 @@ class TestContainerController(unittest.TestCase): req = Request.blank('/v1/a/c?format=json') self.app.update_request(req) res = controller.HEAD(req) - self.assertTrue('accept-ranges' in res.headers) + self.assertIn('accept-ranges', res.headers) self.assertEqual(res.headers['accept-ranges'], 'bytes') def test_PUT_metadata(self): @@ -6862,7 +6861,7 @@ class TestContainerController(unittest.TestCase): ic = res.environ['swift.infocache'] self.assertEqual(ic['container/a/c']['status'], 204) self.assertEqual(res.content_length, 0) - self.assertTrue('transfer-encoding' not in res.headers) + self.assertNotIn('transfer-encoding', res.headers) def test_GET_calls_authorize(self): called = [False] @@ -6994,7 +6993,7 @@ class TestContainerController(unittest.TestCase): headers={'Origin': 'http://foo.com', 'Access-Control-Request-Method': 'GET'}) controller.OPTIONS(req) - self.assertTrue(count[0] < 11) + self.assertLess(count[0], 11) def test_OPTIONS(self): with save_globals(): @@ -7042,8 +7041,8 @@ class TestContainerController(unittest.TestCase): 'https://foo.bar', resp.headers['access-control-allow-origin']) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): - self.assertTrue( - verb in resp.headers['access-control-allow-methods']) + self.assertIn(verb, + resp.headers['access-control-allow-methods']) self.assertEqual( len(resp.headers['access-control-allow-methods'].split(', ')), 6) @@ -7060,8 +7059,7 @@ class TestContainerController(unittest.TestCase): resp = controller.OPTIONS(req) self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): - self.assertTrue( - verb in resp.headers['Allow']) + self.assertIn(verb, resp.headers['Allow']) self.assertEqual(len(resp.headers['Allow'].split(', ')), 6) req = Request.blank( '/v1/a/c', @@ -7097,8 +7095,8 @@ class TestContainerController(unittest.TestCase): self.assertEqual(200, resp.status_int) self.assertEqual('*', resp.headers['access-control-allow-origin']) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): - self.assertTrue( - verb in resp.headers['access-control-allow-methods']) + self.assertIn(verb, + resp.headers['access-control-allow-methods']) self.assertEqual( len(resp.headers['access-control-allow-methods'].split(', ')), 6) @@ -7150,7 +7148,7 @@ class TestContainerController(unittest.TestCase): self.assertEqual('red', resp.headers['x-container-meta-color']) # X-Super-Secret is in the response, but not "exposed" self.assertEqual('hush', resp.headers['x-super-secret']) - self.assertTrue('access-control-expose-headers' in resp.headers) + self.assertIn('access-control-expose-headers', resp.headers) exposed = set( h.strip() for h in resp.headers['access-control-expose-headers'].split(',')) @@ -7369,8 +7367,7 @@ class TestAccountController(unittest.TestCase): resp = controller.OPTIONS(req) self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST HEAD'.split(): - self.assertTrue( - verb in resp.headers['Allow']) + self.assertIn(verb, resp.headers['Allow']) self.assertEqual(len(resp.headers['Allow'].split(', ')), 4) # Test a CORS OPTIONS request (i.e. including Origin and @@ -7385,8 +7382,7 @@ class TestAccountController(unittest.TestCase): resp = controller.OPTIONS(req) self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST HEAD'.split(): - self.assertTrue( - verb in resp.headers['Allow']) + self.assertIn(verb, resp.headers['Allow']) self.assertEqual(len(resp.headers['Allow'].split(', ')), 4) self.app.allow_account_management = True @@ -7396,8 +7392,7 @@ class TestAccountController(unittest.TestCase): resp = controller.OPTIONS(req) self.assertEqual(200, resp.status_int) for verb in 'OPTIONS GET POST PUT DELETE HEAD'.split(): - self.assertTrue( - verb in resp.headers['Allow']) + self.assertIn(verb, resp.headers['Allow']) self.assertEqual(len(resp.headers['Allow'].split(', ')), 6) def test_GET(self): @@ -7525,9 +7520,9 @@ class TestAccountController(unittest.TestCase): give_connect=callback) self.assertEqual(9, len(calls)) for call in calls: - self.assertTrue(key in call['headers'], - '%s call, key %s missing in headers %s' % - (call['method'], key, call['headers'])) + self.assertIn(key, call['headers'], + '%s call, key %s missing in headers %s' % + (call['method'], key, call['headers'])) self.assertEqual(value, call['headers'][key]) def test_connection_refused(self): @@ -7559,7 +7554,7 @@ class TestAccountController(unittest.TestCase): req = Request.blank('/v1/a?format=json') self.app.update_request(req) res = controller.GET(req) - self.assertTrue('accept-ranges' in res.headers) + self.assertIn('accept-ranges', res.headers) self.assertEqual(res.headers['accept-ranges'], 'bytes') def test_response_head_accept_ranges_header(self): @@ -7570,7 +7565,7 @@ class TestAccountController(unittest.TestCase): self.app.update_request(req) res = controller.HEAD(req) res.body - self.assertTrue('accept-ranges' in res.headers) + self.assertIn('accept-ranges', res.headers) self.assertEqual(res.headers['accept-ranges'], 'bytes') def test_PUT(self): @@ -7907,7 +7902,7 @@ class TestAccountControllerFakeGetResponse(unittest.TestCase): # Not a swift_owner -- ACLs should NOT be in response header = 'X-Account-Access-Control' - self.assertTrue(header not in resp.headers, '%r was in %r' % ( + self.assertNotIn(header, resp.headers, '%r was in %r' % ( header, resp.headers)) # Same setup -- mock acct server will provide ACLs @@ -7917,7 +7912,7 @@ class TestAccountControllerFakeGetResponse(unittest.TestCase): resp = app.handle_request(req) # For a swift_owner, the ACLs *should* be in response - self.assertTrue(header in resp.headers, '%r not in %r' % ( + self.assertIn(header, resp.headers, '%r not in %r' % ( header, resp.headers)) def test_account_acls_through_delegation(self): @@ -8143,7 +8138,7 @@ class TestSwiftInfo(unittest.TestCase): container_ring=FakeRing()) si = utils.get_swift_info()['swift'] - self.assertTrue('version' in si) + self.assertIn('version', si) self.assertEqual(si['max_file_size'], constraints.MAX_FILE_SIZE) self.assertEqual(si['max_meta_name_length'], constraints.MAX_META_NAME_LENGTH) @@ -8163,9 +8158,9 @@ class TestSwiftInfo(unittest.TestCase): constraints.MAX_CONTAINER_NAME_LENGTH) self.assertEqual(si['max_object_name_length'], constraints.MAX_OBJECT_NAME_LENGTH) - self.assertTrue('strict_cors_mode' in si) - self.assertEqual(si['allow_account_management'], False) - self.assertEqual(si['account_autocreate'], False) + self.assertIn('strict_cors_mode', si) + self.assertFalse(si['allow_account_management']) + self.assertFalse(si['account_autocreate']) # This setting is by default excluded by disallowed_sections self.assertEqual(si['valid_api_versions'], constraints.VALID_API_VERSIONS) @@ -8173,7 +8168,7 @@ class TestSwiftInfo(unittest.TestCase): # other items are added to swift info self.assertEqual(len(si), 18) - self.assertTrue('policies' in si) + self.assertIn('policies', si) sorted_pols = sorted(si['policies'], key=operator.itemgetter('name')) self.assertEqual(len(sorted_pols), 3) for policy in sorted_pols: diff --git a/test/unit/proxy/test_sysmeta.py b/test/unit/proxy/test_sysmeta.py index eb58523e39..70757c63f1 100644 --- a/test/unit/proxy/test_sysmeta.py +++ b/test/unit/proxy/test_sysmeta.py @@ -119,15 +119,15 @@ class TestObjectSysmeta(unittest.TestCase): def _assertInHeaders(self, resp, expected): for key, val in expected.items(): - self.assertTrue(key in resp.headers, - 'Header %s missing from %s' % (key, resp.headers)) + self.assertIn(key, resp.headers, + 'Header %s missing from %s' % (key, resp.headers)) self.assertEqual(val, resp.headers[key], 'Expected header %s:%s, got %s:%s' % (key, val, key, resp.headers[key])) def _assertNotInHeaders(self, resp, unexpected): for key, val in unexpected.items(): - self.assertFalse(key in resp.headers, + self.assertNotIn(key, resp.headers, 'Header %s not expected in %s' % (key, resp.headers)) From e278179b082336fb0870ebccecf16c5d02e5ae0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A1bor=20Antal?= Date: Fri, 15 Jul 2016 14:02:38 +0200 Subject: [PATCH 070/156] Use more specific asserts in functional tests I changed asserts with more specific assert methods. e.g.: from assertTrue(sth == None) to assertIsNone(*) or assertTrue(isinstance(inst, type)) to assertIsInstace(inst, type) or assertTrue(not sth) to assertFalse(sth). The code gets more readable, and a better description will be shown on fail. Change-Id: I80ec96e0b729bef38213a6be4ff4b6eb65c7612d --- test/functional/test_account.py | 52 +++++++++--------- test/functional/test_container.py | 30 +++++------ test/functional/test_object.py | 2 +- test/functional/tests.py | 87 ++++++++++++++++--------------- 4 files changed, 84 insertions(+), 87 deletions(-) diff --git a/test/functional/test_account.py b/test/functional/test_account.py index cb04a2ae0c..57bbe6b815 100755 --- a/test/functional/test_account.py +++ b/test/functional/test_account.py @@ -98,11 +98,11 @@ class TestAccount(unittest2.TestCase): resp = retry(head) resp.read() self.assertIn(resp.status, (200, 204)) - self.assertEqual(resp.getheader('x-account-meta-test'), None) + self.assertIsNone(resp.getheader('x-account-meta-test')) resp = retry(get) resp.read() self.assertIn(resp.status, (200, 204)) - self.assertEqual(resp.getheader('x-account-meta-test'), None) + self.assertIsNone(resp.getheader('x-account-meta-test')) resp = retry(post, 'Value') resp.read() self.assertEqual(resp.status, 204) @@ -164,7 +164,7 @@ class TestAccount(unittest2.TestCase): resp = retry(post, headers, use_account=1) resp.read() self.assertEqual(resp.status, 400) - self.assertEqual(resp.getheader('X-Account-Access-Control'), None) + self.assertIsNone(resp.getheader('X-Account-Access-Control')) @requires_acls def test_invalid_acl_values(self): @@ -180,7 +180,7 @@ class TestAccount(unittest2.TestCase): resp = retry(post, headers=headers, use_account=1) resp.read() self.assertEqual(resp.status, 400) - self.assertEqual(resp.getheader('X-Account-Access-Control'), None) + self.assertIsNone(resp.getheader('X-Account-Access-Control')) @requires_acls def test_read_only_acl(self): @@ -215,7 +215,7 @@ class TestAccount(unittest2.TestCase): resp.read() self.assertIn(resp.status, (200, 204)) # but not acls - self.assertEqual(resp.getheader('X-Account-Access-Control'), None) + self.assertIsNone(resp.getheader('X-Account-Access-Control')) # read-only can not write metadata headers = {'x-account-meta-test': 'value'} @@ -266,7 +266,7 @@ class TestAccount(unittest2.TestCase): resp.read() self.assertIn(resp.status, (200, 204)) # but not acls - self.assertEqual(resp.getheader('X-Account-Access-Control'), None) + self.assertIsNone(resp.getheader('X-Account-Access-Control')) # read-write can not write account metadata headers = {'x-account-meta-test': 'value'} @@ -368,12 +368,11 @@ class TestAccount(unittest2.TestCase): # read-only tester3 can read account metadata resp = retry(get, use_account=3) resp.read() - self.assertTrue( - resp.status in (200, 204), - 'Expected status in (200, 204), got %s' % resp.status) + self.assertIn(resp.status, (200, 204), + 'Expected status in (200, 204), got %s' % resp.status) self.assertEqual(resp.getheader('X-Account-Meta-Test'), value) # but not temp-url-key - self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None) + self.assertIsNone(resp.getheader('X-Account-Meta-Temp-Url-Key')) # grant read-write access to tester3 acl_user = tf.swift_test_user[2] @@ -387,12 +386,11 @@ class TestAccount(unittest2.TestCase): # read-write tester3 can read account metadata resp = retry(get, use_account=3) resp.read() - self.assertTrue( - resp.status in (200, 204), - 'Expected status in (200, 204), got %s' % resp.status) + self.assertIn(resp.status, (200, 204), + 'Expected status in (200, 204), got %s' % resp.status) self.assertEqual(resp.getheader('X-Account-Meta-Test'), value) # but not temp-url-key - self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None) + self.assertIsNone(resp.getheader('X-Account-Meta-Temp-Url-Key')) # grant admin access to tester3 acl_user = tf.swift_test_user[2] @@ -406,9 +404,8 @@ class TestAccount(unittest2.TestCase): # admin tester3 can read account metadata resp = retry(get, use_account=3) resp.read() - self.assertTrue( - resp.status in (200, 204), - 'Expected status in (200, 204), got %s' % resp.status) + self.assertIn(resp.status, (200, 204), + 'Expected status in (200, 204), got %s' % resp.status) self.assertEqual(resp.getheader('X-Account-Meta-Test'), value) # including temp-url-key self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), @@ -424,9 +421,8 @@ class TestAccount(unittest2.TestCase): self.assertEqual(resp.status, 204) resp = retry(get, use_account=3) resp.read() - self.assertTrue( - resp.status in (200, 204), - 'Expected status in (200, 204), got %s' % resp.status) + self.assertIn(resp.status, (200, 204), + 'Expected status in (200, 204), got %s' % resp.status) self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), secret) @@ -464,13 +460,13 @@ class TestAccount(unittest2.TestCase): use_account=1) resp.read() self.assertEqual(resp.status, 204) - self.assertEqual(resp.getheader('X-Account-Access-Control'), None) + self.assertIsNone(resp.getheader('X-Account-Access-Control')) # User1 can GET their own empty account resp = retry(get, use_account=1) resp.read() self.assertEqual(resp.status // 100, 2) - self.assertEqual(resp.getheader('X-Account-Access-Control'), None) + self.assertIsNone(resp.getheader('X-Account-Access-Control')) # User2 can't GET User1's account resp = retry(get, use_account=2, url_account=1) @@ -514,7 +510,7 @@ class TestAccount(unittest2.TestCase): resp = retry(head, use_account=2, url_account=1) resp.read() self.assertEqual(resp.status, 204) - self.assertEqual(resp.getheader('x-account-access-control'), None) + self.assertIsNone(resp.getheader('x-account-access-control')) # User2 can PUT and DELETE a container resp = retry(put, use_account=2, url_account=1, @@ -539,7 +535,7 @@ class TestAccount(unittest2.TestCase): resp = retry(head, use_account=2, url_account=1) resp.read() self.assertEqual(resp.status, 204) - self.assertEqual(resp.getheader('x-account-access-control'), None) + self.assertIsNone(resp.getheader('x-account-access-control')) # User2 can't PUT a container resp = retry(put, use_account=2, url_account=1, @@ -577,13 +573,13 @@ class TestAccount(unittest2.TestCase): resp = retry(post, headers={'X-Account-Access-Control': '{}'}) resp.read() self.assertEqual(resp.status, 204) - self.assertEqual(resp.getheader('X-Account-Access-Control'), None) + self.assertIsNone(resp.getheader('X-Account-Access-Control')) # User1 can GET their own empty account resp = retry(get) resp.read() self.assertEqual(resp.status // 100, 2) - self.assertEqual(resp.getheader('X-Account-Access-Control'), None) + self.assertIsNone(resp.getheader('X-Account-Access-Control')) # User1 can POST non-empty data acl_json = '{"admin":["bob"]}' @@ -636,13 +632,13 @@ class TestAccount(unittest2.TestCase): resp = retry(post, headers={'X-Account-Access-Control': '{}'}) resp.read() self.assertEqual(resp.status, 204) - self.assertEqual(resp.getheader('X-Account-Access-Control'), None) + self.assertIsNone(resp.getheader('X-Account-Access-Control')) # User1 can GET their own empty account resp = retry(get) resp.read() self.assertEqual(resp.status // 100, 2) - self.assertEqual(resp.getheader('X-Account-Access-Control'), None) + self.assertIsNone(resp.getheader('X-Account-Access-Control')) # User1 can POST non-empty data acl_json = '{"admin":["bob"]}' diff --git a/test/functional/test_container.py b/test/functional/test_container.py index 8b85c9c38c..458a186523 100755 --- a/test/functional/test_container.py +++ b/test/functional/test_container.py @@ -80,7 +80,7 @@ class TestContainer(unittest2.TestCase): body = resp.read() if resp.status == 404: break - self.assertTrue(resp.status // 100 == 2, resp.status) + self.assertEqual(resp.status // 100, 2, resp.status) objs = json.loads(body) if not objs: break @@ -223,11 +223,11 @@ class TestContainer(unittest2.TestCase): resp = retry(head, name) resp.read() self.assertIn(resp.status, (200, 204)) - self.assertEqual(resp.getheader('x-container-meta-test'), None) + self.assertIsNone(resp.getheader('x-container-meta-test')) resp = retry(get, name) resp.read() self.assertIn(resp.status, (200, 204)) - self.assertEqual(resp.getheader('x-container-meta-test'), None) + self.assertIsNone(resp.getheader('x-container-meta-test')) resp = retry(delete, name) resp.read() self.assertEqual(resp.status, 204) @@ -255,11 +255,11 @@ class TestContainer(unittest2.TestCase): resp = retry(head) resp.read() self.assertIn(resp.status, (200, 204)) - self.assertEqual(resp.getheader('x-container-meta-test'), None) + self.assertIsNone(resp.getheader('x-container-meta-test')) resp = retry(get) resp.read() self.assertIn(resp.status, (200, 204)) - self.assertEqual(resp.getheader('x-container-meta-test'), None) + self.assertIsNone(resp.getheader('x-container-meta-test')) resp = retry(post, 'Value') resp.read() self.assertEqual(resp.status, 204) @@ -965,7 +965,7 @@ class TestContainer(unittest2.TestCase): resp = retry(get, self.name, use_account=3) resp.read() self.assertEqual(resp.status, 204) - self.assertEqual(resp.getheader('X-Container-Meta-Test'), None) + self.assertIsNone(resp.getheader('X-Container-Meta-Test')) @requires_acls def test_admin_acl_listing(self): @@ -1107,7 +1107,7 @@ class TestContainer(unittest2.TestCase): resp = retry(get, self.name, use_account=3) resp.read() self.assertEqual(resp.status, 204) - self.assertEqual(resp.getheader('X-Container-Meta-Test'), None) + self.assertIsNone(resp.getheader('X-Container-Meta-Test')) @requires_acls def test_protected_container_sync(self): @@ -1158,7 +1158,7 @@ class TestContainer(unittest2.TestCase): self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # but not sync-key - self.assertEqual(resp.getheader('X-Container-Sync-Key'), None) + self.assertIsNone(resp.getheader('X-Container-Sync-Key')) # and can not write headers = {'x-container-sync-key': str(uuid4())} @@ -1180,7 +1180,7 @@ class TestContainer(unittest2.TestCase): self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # but not sync-key - self.assertEqual(resp.getheader('X-Container-Sync-Key'), None) + self.assertIsNone(resp.getheader('X-Container-Sync-Key')) # sanity check sync-key w/ account1 resp = retry(get, self.name, use_account=1) @@ -1282,8 +1282,8 @@ class TestContainer(unittest2.TestCase): self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # but not container acl - self.assertEqual(resp.getheader('X-Container-Read'), None) - self.assertEqual(resp.getheader('X-Container-Write'), None) + self.assertIsNone(resp.getheader('X-Container-Read')) + self.assertIsNone(resp.getheader('X-Container-Write')) # and can not write headers = { @@ -1308,8 +1308,8 @@ class TestContainer(unittest2.TestCase): self.assertEqual(resp.status, 204) self.assertEqual(resp.getheader('X-Container-Meta-Test'), value) # but not container acl - self.assertEqual(resp.getheader('X-Container-Read'), None) - self.assertEqual(resp.getheader('X-Container-Write'), None) + self.assertIsNone(resp.getheader('X-Container-Read')) + self.assertIsNone(resp.getheader('X-Container-Write')) # sanity check container acls with account1 resp = retry(get, self.name, use_account=1) @@ -1488,7 +1488,7 @@ class TestContainer(unittest2.TestCase): resp = retry(head) resp.read() headers = dict((k.lower(), v) for k, v in resp.getheaders()) - self.assertEqual(headers.get('x-storage-policy'), None) + self.assertIsNone(headers.get('x-storage-policy')) @requires_policies def test_conflict_change_storage_policy_with_put(self): @@ -1653,7 +1653,7 @@ class BaseTestContainerACLs(unittest2.TestCase): while True: resp = retry(get, use_account=self.account) body = resp.read() - self.assertTrue(resp.status // 100 == 2, resp.status) + self.assertEqual(resp.status // 100, 2, resp.status) objs = json.loads(body) if not objs: break diff --git a/test/functional/test_object.py b/test/functional/test_object.py index b2e375d879..f23ccbc78e 100755 --- a/test/functional/test_object.py +++ b/test/functional/test_object.py @@ -113,7 +113,7 @@ class TestObject(unittest2.TestCase): body = resp.read() if resp.status == 404: break - self.assertTrue(resp.status // 100 == 2, resp.status) + self.assertEqual(resp.status // 100, 2, resp.status) objs = json.loads(body) if not objs: break diff --git a/test/functional/tests.py b/test/functional/tests.py index 6f70bc0e6e..156094ac4b 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -79,8 +79,8 @@ class Base(unittest2.TestCase): def assert_body(self, body): response_body = self.env.conn.response.read() - self.assertTrue(response_body == body, - 'Body returned: %s' % (response_body)) + self.assertEqual(response_body, body, + 'Body returned: %s' % (response_body)) def assert_status(self, status_or_statuses): self.assertTrue( @@ -186,7 +186,7 @@ class TestAccount(Base): info = self.env.account.info() for field in ['object_count', 'container_count', 'bytes_used']: - self.assertTrue(info[field] >= 0) + self.assertGreaterEqual(info[field], 0) if info['container_count'] == len(self.env.containers): break @@ -213,8 +213,8 @@ class TestAccount(Base): for format_type in ['json', 'xml']: for a in self.env.account.containers( parms={'format': format_type}): - self.assertTrue(a['count'] >= 0) - self.assertTrue(a['bytes'] >= 0) + self.assertGreaterEqual(a['count'], 0) + self.assertGreaterEqual(a['bytes'], 0) headers = dict(self.env.conn.response.getheaders()) if format_type == 'json': @@ -230,7 +230,8 @@ class TestAccount(Base): p = {'limit': l} if l <= limit: - self.assertTrue(len(self.env.account.containers(parms=p)) <= l) + self.assertLessEqual(len(self.env.account.containers(parms=p)), + l) self.assert_status(200) else: self.assertRaises(ResponseError, @@ -316,11 +317,12 @@ class TestAccount(Base): parms={'format': format_type, 'marker': marker, 'limit': limit}) - self.assertTrue(len(containers) <= limit) + self.assertLessEqual(len(containers), limit) if containers: if isinstance(containers[0], dict): containers = [x['name'] for x in containers] - self.assertTrue(locale.strcoll(containers[0], marker) > 0) + self.assertGreater(locale.strcoll(containers[0], marker), + 0) def testContainersOrderedByName(self): for format_type in [None, 'json', 'xml']: @@ -545,12 +547,11 @@ class TestContainer(Base): for i in range(len(files)): f = files[i] for j in range(1, len(files) - i): - self.assertTrue( - cont.files(parms={'limit': j, 'marker': f}) == - files[i + 1: i + j + 1]) - self.assertTrue(cont.files(parms={'marker': f}) == files[i + 1:]) - self.assertTrue(cont.files(parms={'marker': f, 'prefix': f}) == []) - self.assertTrue(cont.files(parms={'prefix': f}) == [f]) + self.assertEqual(cont.files(parms={'limit': j, 'marker': f}), + files[i + 1: i + j + 1]) + self.assertEqual(cont.files(parms={'marker': f}), files[i + 1:]) + self.assertEqual(cont.files(parms={'marker': f, 'prefix': f}), []) + self.assertEqual(cont.files(parms={'prefix': f}), [f]) def testPrefixAndLimit(self): load_constraint('container_listing_limit') @@ -783,11 +784,11 @@ class TestContainer(Base): if isinstance(files[0], dict): files = [x['name'] for x in files] - self.assertTrue(len(files) <= limit) + self.assertLessEqual(len(files), limit) if files: if isinstance(files[0], dict): files = [x['name'] for x in files] - self.assertTrue(locale.strcoll(files[0], marker) > 0) + self.assertGreater(locale.strcoll(files[0], marker), 0) def testFileOrder(self): for format_type in [None, 'json', 'xml']: @@ -1083,7 +1084,7 @@ class TestContainerPaths(Base): for format_type in ('json', 'xml'): for file_item in self.env.container.files(parms={'format': format_type}): - self.assertTrue(int(file_item['bytes']) >= 0) + self.assertGreaterEqual(int(file_item['bytes']), 0) self.assertIn('last_modified', file_item) if file_item['name'].endswith('/'): self.assertEqual(file_item['content_type'], @@ -1207,9 +1208,9 @@ class TestFile(Base): file_item = cont.file(dest_filename) - self.assertTrue(data == file_item.read()) + self.assertEqual(data, file_item.read()) self.assertTrue(file_item.initialize()) - self.assertTrue(metadata == file_item.metadata) + self.assertEqual(metadata, file_item.metadata) def testCopyAccount(self): # makes sure to test encoded characters @@ -1240,9 +1241,9 @@ class TestFile(Base): file_item = cont.file(dest_filename) - self.assertTrue(data == file_item.read()) + self.assertEqual(data, file_item.read()) self.assertTrue(file_item.initialize()) - self.assertTrue(metadata == file_item.metadata) + self.assertEqual(metadata, file_item.metadata) dest_cont = self.env.account2.container(Utils.create_name()) self.assertTrue(dest_cont.create(hdrs={ @@ -1263,9 +1264,9 @@ class TestFile(Base): file_item = dest_cont.file(dest_filename) - self.assertTrue(data == file_item.read()) + self.assertEqual(data, file_item.read()) self.assertTrue(file_item.initialize()) - self.assertTrue(metadata == file_item.metadata) + self.assertEqual(metadata, file_item.metadata) def testCopy404s(self): source_filename = Utils.create_name() @@ -1423,9 +1424,9 @@ class TestFile(Base): file_item = cont.file(dest_filename) - self.assertTrue(data == file_item.read()) + self.assertEqual(data, file_item.read()) self.assertTrue(file_item.initialize()) - self.assertTrue(metadata == file_item.metadata) + self.assertEqual(metadata, file_item.metadata) def testCopyFromAccountHeader(self): acct = self.env.conn.account_name @@ -1466,9 +1467,9 @@ class TestFile(Base): file_item = cont.file(dest_filename) - self.assertTrue(data == file_item.read()) + self.assertEqual(data, file_item.read()) self.assertTrue(file_item.initialize()) - self.assertTrue(metadata == file_item.metadata) + self.assertEqual(metadata, file_item.metadata) def testCopyFromHeader404s(self): source_filename = Utils.create_name() @@ -1666,8 +1667,8 @@ class TestFile(Base): for i in range(0, file_length, range_size): range_string = 'bytes=%d-%d' % (i, i + range_size - 1) hdrs = {'Range': range_string} - self.assertTrue( - data[i: i + range_size] == file_item.read(hdrs=hdrs), + self.assertEqual( + data[i: i + range_size], file_item.read(hdrs=hdrs), range_string) range_string = 'bytes=-%d' % (i) @@ -1868,7 +1869,7 @@ class TestFile(Base): for r in ('BYTES=0-999', 'bytes = 0-999', 'BYTES = 0 - 999', 'bytes = 0 - 999', 'bytes=0 - 999', 'bytes=0-999 '): - self.assertTrue(file_item.read(hdrs={'Range': r}) == data[0:1000]) + self.assertEqual(file_item.read(hdrs={'Range': r}), data[0:1000]) def testFileSizeLimit(self): limit = load_constraint('max_file_size') @@ -1952,8 +1953,8 @@ class TestFile(Base): self.assert_status(405) # bad range headers - self.assertTrue( - len(file_item.read(hdrs={'Range': 'parsecs=8-12'})) == + self.assertEqual( + len(file_item.read(hdrs={'Range': 'parsecs=8-12'})), file_length) self.assert_status(200) @@ -1995,7 +1996,7 @@ class TestFile(Base): file_item = self.env.container.file(Utils.create_name()) data = file_item.write_random() self.assert_status(201) - self.assertTrue(data == file_item.read()) + self.assertEqual(data, file_item.read()) self.assert_status(200) def testHead(self): @@ -2171,9 +2172,9 @@ class TestFile(Base): lm_diff = max([f['last_modified'] for f in files]) -\ min([f['last_modified'] for f in files]) - self.assertTrue( - lm_diff < write_time + 1, 'Diff in last ' - 'modified times should be less than time to write files') + self.assertLess(lm_diff, write_time + 1, + 'Diff in last modified times ' + 'should be less than time to write files') for f in files: for format_type in ['json', 'xml']: @@ -2188,7 +2189,7 @@ class TestFile(Base): data = file_item.write_random(512) file_item.write(data) - self.assertTrue(file_item.read() == data) + self.assertEqual(file_item.read(), data) def testTooLongName(self): file_item = self.env.container.file('x' * 1025) @@ -2200,7 +2201,7 @@ class TestFile(Base): self.assertTrue(file_item.write('')) self.assertIn(file_item.name, self.env.container.files()) - self.assertTrue(file_item.read() == '') + self.assertEqual(file_item.read(), '') def testEtagResponse(self): file_item = self.env.container.file(Utils.create_name()) @@ -2235,7 +2236,7 @@ class TestFile(Base): file_item.chunked_write(j) self.assertTrue(file_item.chunked_write()) - self.assertTrue(data == file_item.read()) + self.assertEqual(data, file_item.read()) info = file_item.info() self.assertEqual(etag, info['etag']) @@ -3763,7 +3764,7 @@ class TestObjectVersioning(Base): self.env.versions_container.name) self.env.container.update_metadata( hdrs={'X-Versions-Location': ''}) - self.assertEqual(self.env.container.info().get('versions'), None) + self.assertIsNone(self.env.container.info().get('versions')) # set location back to the way it was self.env.container.update_metadata( @@ -3818,7 +3819,7 @@ class TestObjectVersioning(Base): self.assertEqual(v, resp_headers[k.lower()]) # make sure the new obj metadata did not leak to the prev. version - self.assertTrue('foo' not in prev_version.metadata) + self.assertNotIn('foo', prev_version.metadata) # check that POST does not create a new version versioned_obj.sync_metadata(metadata={'fu': 'baz'}) @@ -3832,8 +3833,8 @@ class TestObjectVersioning(Base): prev_version.initialize() self.assertEqual("bbbbb", prev_version.read()) self.assertEqual(prev_version.content_type, 'text/jibberish02') - self.assertTrue('foo' in prev_version.metadata) - self.assertTrue('fu' in prev_version.metadata) + self.assertIn('foo', prev_version.metadata) + self.assertIn('fu', prev_version.metadata) # as we delete things, the old contents return self.assertEqual("ccccc", versioned_obj.read()) From 45bde710a92823cb96b79755a86fe6076cb06b32 Mon Sep 17 00:00:00 2001 From: Maria Malyarova Date: Fri, 15 Jul 2016 21:38:14 +0300 Subject: [PATCH 071/156] Simplify chained comparison For example: a < b and b <= c is equal to a < b <= c Change-Id: I91ceb194bce60f6160ebdf0aadf0e8f0d7a35975 --- swift/common/db_replicator.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 7115a8afe4..1be17c9fc3 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -117,7 +117,6 @@ class ReplConnection(BufferedHTTPConnection): """ def __init__(self, node, partition, hash_, logger): - "" self.logger = logger self.node = node host = "%s:%s" % (node['replication_ip'], node['replication_port']) @@ -296,7 +295,7 @@ class Replicator(Daemon): return False with Timeout(replicate_timeout or self.node_timeout): response = http.replicate(replicate_method, local_id) - return response and response.status >= 200 and response.status < 300 + return response and 200 <= response.status < 300 def _usync_db(self, point, broker, http, remote_id, local_id): """ @@ -342,7 +341,7 @@ class Replicator(Daemon): else: with Timeout(self.node_timeout): response = http.replicate('merge_syncs', sync_table) - if response and response.status >= 200 and response.status < 300: + if response and 200 <= response.status < 300: broker.merge_syncs([{'remote_id': remote_id, 'sync_point': point}], incoming=False) @@ -429,7 +428,7 @@ class Replicator(Daemon): different_region=different_region) elif response.status == HTTP_INSUFFICIENT_STORAGE: raise DriveNotMounted() - elif response.status >= 200 and response.status < 300: + elif 200 <= response.status < 300: rinfo = json.loads(response.data) local_sync = broker.get_sync(rinfo['id'], incoming=False) if self._in_sync(rinfo, info, broker, local_sync): @@ -507,8 +506,7 @@ class Replicator(Daemon): # than the put_timestamp, and there are no objects. delete_timestamp = Timestamp(info.get('delete_timestamp') or 0) put_timestamp = Timestamp(info.get('put_timestamp') or 0) - if delete_timestamp < (now - self.reclaim_age) and \ - delete_timestamp > put_timestamp and \ + if (now - self.reclaim_age) > delete_timestamp > put_timestamp and \ info['count'] in (None, '', 0, '0'): if self.report_up_to_date(info): self.delete_db(broker) From ecce7947dda5b72757f88610ca83f2c0e78cf82a Mon Sep 17 00:00:00 2001 From: zheng yin Date: Sun, 17 Jul 2016 18:02:05 +0800 Subject: [PATCH 072/156] Add log format to i18n Change-Id: I3aff0de418b52b1f16f5863c95ad2700678143ae --- swift/proxy/server.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index c35a3e1a97..142f67801e 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -237,8 +237,8 @@ class Application(object): """ if self._read_affinity and self.sorting_method != 'affinity': self.logger.warning( - "sorting_method is set to '%s', not 'affinity'; " - "read_affinity setting will have no effect." % + _("sorting_method is set to '%s', not 'affinity'; " + "read_affinity setting will have no effect."), self.sorting_method) def get_object_ring(self, policy_idx): @@ -557,17 +557,18 @@ class Application(object): except ValueError: # not in pipeline; ignore it pass self.logger.info( - 'Adding required filter %s to pipeline at position %d' % - (filter_name, insert_at)) + _('Adding required filter %(filter_name)s to pipeline at ' + 'position %(insert_at)d'), + {'filter_name': filter_name, 'insert_at': insert_at}) ctx = pipe.create_filter(filter_name) pipe.insert_filter(ctx, index=insert_at) pipeline_was_modified = True if pipeline_was_modified: - self.logger.info("Pipeline was modified. New pipeline is \"%s\".", - pipe) + self.logger.info(_("Pipeline was modified. " + "New pipeline is \"%s\"."), pipe) else: - self.logger.debug("Pipeline is \"%s\"", pipe) + self.logger.debug(_("Pipeline is \"%s\""), pipe) def app_factory(global_conf, **local_conf): From bc09be4375b45cd98fd14f91f25b71bebc8bd08c Mon Sep 17 00:00:00 2001 From: zheng yin Date: Mon, 18 Jul 2016 15:01:21 +0800 Subject: [PATCH 073/156] Make comparision simplely For example: a>b and a<=c is equal to b 0 and \ - container_count >= self.app.max_containers_per_account and \ + if 0 < self.app.max_containers_per_account <= container_count and \ self.account_name not in self.app.max_containers_whitelist: container_info = \ self.container_info(self.account_name, self.container_name, From c7e5afb9c375b260c65782ce72b10a7739ad3bbb Mon Sep 17 00:00:00 2001 From: Mohit Motiani Date: Mon, 18 Jul 2016 19:49:44 +0000 Subject: [PATCH 074/156] Fixed typo in reaper.py Change-Id: I9b98da30e5f934164e490beb4d6cde840f08832a --- swift/account/reaper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swift/account/reaper.py b/swift/account/reaper.py index 93e1608ae6..050db4e189 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -109,7 +109,7 @@ class AccountReaper(Daemon): def run_forever(self, *args, **kwargs): """Main entry point when running the reaper in normal daemon mode. - This repeatedly calls :func:`reap_once` no quicker than the + This repeatedly calls :func:`run_once` no quicker than the configuration interval. """ self.logger.debug('Daemon started.') From cc0016399d78426dd4819fb97a508849194a7f4e Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Mon, 18 Jul 2016 14:01:57 -0700 Subject: [PATCH 075/156] Remove red herring from logs The object replicator can log some junk about the cluster ip instead of the replication ip in some specific error log lines that can make you think either you're crazy or your rings are crazy. ... in this case it was just the logging was crazy - so fix that. Change-Id: Ie5cbb2d1b30feb2529c17fc3d72af7df1aa3ffdd --- swift/obj/replicator.py | 5 +- test/unit/obj/test_replicator.py | 103 ++++++++++++++++++------------- 2 files changed, 62 insertions(+), 46 deletions(-) diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index 052213aade..d5b3580a54 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -434,8 +434,9 @@ class ObjectReplicator(Daemon): node['device'], job['partition'], 'REPLICATE', '', headers=headers).getresponse() if resp.status == HTTP_INSUFFICIENT_STORAGE: - self.logger.error(_('%(ip)s/%(device)s responded' - ' as unmounted'), node) + self.logger.error( + _('%(replication_ip)s/%(device)s ' + 'responded as unmounted'), node) attempts_left += 1 failure_devs_info.add((node['replication_ip'], node['device'])) diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py index 2609e2a60a..d069857c0f 100644 --- a/test/unit/obj/test_replicator.py +++ b/test/unit/obj/test_replicator.py @@ -148,7 +148,8 @@ def _create_test_rings(path, devs=None): {'id': 3, 'device': 'sda', 'zone': 4, 'region': 2, 'ip': '127.0.0.3', 'port': 6200}, {'id': 4, 'device': 'sda', 'zone': 5, - 'region': 1, 'ip': '127.0.0.4', 'port': 6200}, + 'region': 1, 'ip': '127.0.0.4', 'port': 6200, + 'replication_ip': '127.0.1.4'}, {'id': 5, 'device': 'sda', 'zone': 6, 'region': 3, 'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6200}, {'id': 6, 'device': 'sda', 'zone': 7, 'region': 1, @@ -1674,7 +1675,6 @@ class TestObjectReplicator(unittest.TestCase): self.headers = {'Content-Length': '0', 'user-agent': 'object-replicator %s' % os.getpid()} - self.replicator.logger = mock_logger = mock.MagicMock() mock_tpool_reraise.return_value = (0, {}) all_jobs = self.replicator.collect_jobs() @@ -1685,15 +1685,25 @@ class TestObjectReplicator(unittest.TestCase): # Check uncorrect http_connect with status 507 and # count of attempts and call args resp.status = 507 - error = '%(ip)s/%(device)s responded as unmounted' - expect = 'Error syncing partition' + error = '%(replication_ip)s/%(device)s responded as unmounted' + expect = 'Error syncing partition: ' for job in jobs: set_default(self) ring = job['policy'].object_ring self.headers['X-Backend-Storage-Policy-Index'] = int(job['policy']) self.replicator.update(job) - self.assertTrue(error in mock_logger.error.call_args[0][0]) - self.assertTrue(expect in mock_logger.exception.call_args[0][0]) + error_lines = self.logger.get_lines_for_level('error') + expected = [] + # ... first the primaries + for node in job['nodes']: + expected.append(error % node) + # ... then it will get handoffs + for node in job['policy'].object_ring.get_more_nodes( + int(job['partition'])): + expected.append(error % node) + # ... and finally it will exception out + expected.append(expect) + self.assertEqual(expected, error_lines) self.assertEqual(len(self.replicator.partition_times), 1) self.assertEqual(mock_http.call_count, len(ring._devs) - 1) reqs = [] @@ -1705,7 +1715,7 @@ class TestObjectReplicator(unittest.TestCase): self.assertEqual(self.replicator.suffix_hash, 0) mock_http.assert_has_calls(reqs, any_order=True) mock_http.reset_mock() - mock_logger.reset_mock() + self.logger.clear() # Check uncorrect http_connect with status 400 != HTTP_OK resp.status = 400 @@ -1713,20 +1723,27 @@ class TestObjectReplicator(unittest.TestCase): for job in jobs: set_default(self) self.replicator.update(job) - self.assertTrue(error in mock_logger.error.call_args[0][0]) + # ... only the primaries + expected = [error % {'resp': 400, 'ip': node['replication_ip']} + for node in job['nodes']] + self.assertEqual(expected, + self.logger.get_lines_for_level('error')) self.assertEqual(len(self.replicator.partition_times), 1) - mock_logger.reset_mock() + self.logger.clear() # Check successful http_connection and exception with # uncorrect pickle.loads(resp.read()) resp.status = 200 - expect = 'Error syncing with node:' + expect = 'Error syncing with node: %r: ' for job in jobs: set_default(self) self.replicator.update(job) - self.assertTrue(expect in mock_logger.exception.call_args[0][0]) + # ... only the primaries + expected = [expect % node for node in job['nodes']] + error_lines = self.logger.get_lines_for_level('error') + self.assertEqual(expected, error_lines) self.assertEqual(len(self.replicator.partition_times), 1) - mock_logger.reset_mock() + self.logger.clear() # Check successful http_connection and correct # pickle.loads(resp.read()) for non local node @@ -1740,13 +1757,12 @@ class TestObjectReplicator(unittest.TestCase): local_job = job.copy() continue self.replicator.update(job) - self.assertEqual(mock_logger.exception.call_count, 0) - self.assertEqual(mock_logger.error.call_count, 0) + self.assertEqual([], self.logger.get_lines_for_level('error')) self.assertEqual(len(self.replicator.partition_times), 1) self.assertEqual(self.replicator.suffix_hash, 0) self.assertEqual(self.replicator.suffix_sync, 0) self.assertEqual(self.replicator.suffix_count, 0) - mock_logger.reset_mock() + self.logger.clear() # Check successful http_connect and sync for local node mock_tpool_reraise.return_value = (1, {'a83': 'ba47fd314242ec8c' @@ -1787,7 +1803,7 @@ class TestObjectReplicator(unittest.TestCase): self.assertEqual(self.replicator.suffix_count, 1) mock_http.reset_mock() - mock_logger.reset_mock() + self.logger.clear() # test for replication params on policy 0 only repl_job = local_job.copy() @@ -1816,37 +1832,36 @@ class TestObjectReplicator(unittest.TestCase): jobs = self.replicator.collect_jobs() _m_rsync = mock.Mock(return_value=0) _m_os_path_exists = mock.Mock(return_value=True) - with mock.patch.object(self.replicator, '_rsync', _m_rsync): - with mock.patch('os.path.exists', _m_os_path_exists): - for job in jobs: - self.assertTrue('region' in job) - for node in job['nodes']: - for rsync_compress in (True, False): - self.replicator.rsync_compress = rsync_compress - ret = \ - self.replicator.sync(node, job, - ['fake_suffix']) - self.assertTrue(ret) - if node['region'] != job['region']: - if rsync_compress: - # --compress arg should be passed to rsync - # binary only when rsync_compress option is - # enabled AND destination node is in a - # different region - self.assertTrue('--compress' in - _m_rsync.call_args[0][0]) - else: - self.assertFalse('--compress' in - _m_rsync.call_args[0][0]) + with mock.patch.object(self.replicator, '_rsync', _m_rsync), \ + mock.patch('os.path.exists', _m_os_path_exists): + for job in jobs: + self.assertTrue('region' in job) + for node in job['nodes']: + for rsync_compress in (True, False): + self.replicator.rsync_compress = rsync_compress + ret = self.replicator.sync(node, job, + ['fake_suffix']) + self.assertTrue(ret) + if node['region'] != job['region']: + if rsync_compress: + # --compress arg should be passed to rsync + # binary only when rsync_compress option is + # enabled AND destination node is in a + # different region + self.assertTrue('--compress' in + _m_rsync.call_args[0][0]) else: self.assertFalse('--compress' in _m_rsync.call_args[0][0]) - self.assertEqual( - _m_os_path_exists.call_args_list[-1][0][0], - os.path.join(job['path'], 'fake_suffix')) - self.assertEqual( - _m_os_path_exists.call_args_list[-2][0][0], - os.path.join(job['path'])) + else: + self.assertFalse('--compress' in + _m_rsync.call_args[0][0]) + self.assertEqual( + _m_os_path_exists.call_args_list[-1][0][0], + os.path.join(job['path'], 'fake_suffix')) + self.assertEqual( + _m_os_path_exists.call_args_list[-2][0][0], + os.path.join(job['path'])) if __name__ == '__main__': From aaa631558785e447c9655b9ada7f894035999425 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 19 Jul 2016 08:27:04 +0000 Subject: [PATCH 076/156] Imported Translations from Zanata For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: Ica92e45dbc3f83ec806e092fd3ea5ada636ab7e9 --- swift/locale/de/LC_MESSAGES/swift.po | 12 +- swift/locale/es/LC_MESSAGES/swift.po | 252 ++++++++++++++++++++++-- swift/locale/fr/LC_MESSAGES/swift.po | 14 +- swift/locale/it/LC_MESSAGES/swift.po | 13 +- swift/locale/ja/LC_MESSAGES/swift.po | 12 +- swift/locale/ko_KR/LC_MESSAGES/swift.po | 12 +- swift/locale/pt_BR/LC_MESSAGES/swift.po | 12 +- swift/locale/ru/LC_MESSAGES/swift.po | 14 +- swift/locale/tr_TR/LC_MESSAGES/swift.po | 12 +- swift/locale/zh_CN/LC_MESSAGES/swift.po | 12 +- swift/locale/zh_TW/LC_MESSAGES/swift.po | 12 +- 11 files changed, 259 insertions(+), 118 deletions(-) diff --git a/swift/locale/de/LC_MESSAGES/swift.po b/swift/locale/de/LC_MESSAGES/swift.po index 162d3b9009..d99366153d 100644 --- a/swift/locale/de/LC_MESSAGES/swift.po +++ b/swift/locale/de/LC_MESSAGES/swift.po @@ -9,9 +9,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev175\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-03 04:47+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -337,10 +337,6 @@ msgstr "FEHLER %(status)d %(body)s Vom Objektserver bezüglich: %(path)s" msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "FEHLER %(status)d Erwartet: 100-continue von Objektserver" -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "FEHLER %(status)d Versuch, %(method)s %(path)sAus Container-Server" - #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -1006,10 +1002,6 @@ msgstr "Versuch, %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Versuch, %(full_path)s mit GET abzurufen" -#, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "Versuch, den finalen Status von PUT für %s abzurufen" - msgid "Trying to read during GET" msgstr "Versuch, während des GET-Vorgangs zu lesen" diff --git a/swift/locale/es/LC_MESSAGES/swift.po b/swift/locale/es/LC_MESSAGES/swift.po index 34aca4ae0f..d94e4b5884 100644 --- a/swift/locale/es/LC_MESSAGES/swift.po +++ b/swift/locale/es/LC_MESSAGES/swift.po @@ -4,16 +4,17 @@ # # Translators: # Andreas Jaeger , 2016. #zanata +# Pablo Caruana , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev169\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-02 04:58+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-29 11:20+0000\n" -"Last-Translator: Eugènia Torrella \n" +"PO-Revision-Date: 2016-07-18 10:57+0000\n" +"Last-Translator: Pablo Caruana \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" @@ -46,10 +47,18 @@ msgstr "" msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s han respondido como desmontados" +#, python-format +msgid "%(msg)s %(hdr)s: %(e)s" +msgstr "%(msg)s %(hdr)s: %(e)s" + #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" +#, python-format +msgid "%(msg)s: %(err)s" +msgstr "%(msg)s: %(err)s" + #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" @@ -68,6 +77,22 @@ msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) particiones replicadas en " "%(time).2fs (%(rate).2f/segundo, %(remaining)s restantes)" +#, python-format +msgid "%(server)s #%(number)d not running (%(conf)s)" +msgstr "%(server)s #%(number)d not running (%(conf)s)" + +#, python-format +msgid "%(server)s (%(pid)s) appears to have stopped" +msgstr "%(server)s (%(pid)s) parece haberse detenido" + +#, python-format +msgid "%(server)s running (%(pid)s - %(conf)s)" +msgstr "%(server)s running (%(pid)s - %(conf)s)" + +#, python-format +msgid "%(server)s running (%(pid)s - %(pid_file)s)" +msgstr "%(server)s corriendo (%(pid)s - %(pid_file)s)" + #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s éxitos, %(failure)s fallos" @@ -76,6 +101,10 @@ msgstr "%(success)s éxitos, %(failure)s fallos" msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s devuelve 503 para %(statuses)s" +#, python-format +msgid "%(type)s: %(value)s" +msgstr "%(type)s: %(value)s" + #, python-format msgid "%s already started..." msgstr "%s ya está iniciado..." @@ -104,11 +133,11 @@ msgstr ", %s contenedores suprimidos" msgid ", %s containers possibly remaining" msgstr ", %s contenedores posiblemente restantes" -#, fuzzy, python-format +#, python-format msgid ", %s containers remaining" msgstr ", %s contenedores restantes" -#, fuzzy, python-format +#, python-format msgid ", %s objects deleted" msgstr ", %s objetos suprimidos" @@ -116,7 +145,7 @@ msgstr ", %s objetos suprimidos" msgid ", %s objects possibly remaining" msgstr ", %s objetos posiblemente restantes" -#, fuzzy, python-format +#, python-format msgid ", %s objects remaining" msgstr ", %s objectos restantes" @@ -124,13 +153,16 @@ msgstr ", %s objectos restantes" msgid ", elapsed: %.02fs" msgstr ", transcurrido: %.02fs" -#, fuzzy msgid ", return codes: " msgstr ", códigos de retorno:" msgid "Account" msgstr "Cuenta" +#, python-format +msgid "Account %(account)s has not been reaped since %(time)s" +msgstr "La cuenta %(account)s no se ha cosechado desde %(time)s" + #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Auditoría de cuenta en modalidad de \"una vez\" finalizada: %.02fs" @@ -146,6 +178,14 @@ msgstr "" "Se han intentado replicar %(count)d bases de datos en %(time).5f segundos " "(%(rate).5f/s)" +#, python-format +msgid "Audit Failed for %(path)s: %(err)s" +msgstr "Ha fallado la auditoría para %(path)s: %(err)s" + +#, python-format +msgid "Bad key for %(name)r: %(err)s" +msgstr "Clave errónea para %(name)r: %(err)s" + #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Código de retorno de resincronización erróneo: %(ret)d <- %(args)s" @@ -171,6 +211,12 @@ msgstr "Comenzar el barrido de hebra única de actualización del contenedor" msgid "Begin container update sweep" msgstr "Comenzar el barrido de actualización del contenedor" +#, python-format +msgid "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)" +msgstr "" +"Comenzar auditoría de objetos en modalidad \"%(mode)s\" mode (%(audi_type)s" +"%(description)s)" + msgid "Begin object update single threaded sweep" msgstr "Comenzar el barrido de hebra única de actualización del objeto" @@ -195,6 +241,14 @@ msgstr "No se puede acceder al archivo %s." msgid "Can not load profile data from %s." msgstr "No se pueden cargar los datos de perfil desde %s." +#, python-format +msgid "Cannot read %(auditor_status)s (%(err)s)" +msgstr "No se puede leer %(auditor_status)s (%(err)s)" + +#, python-format +msgid "Cannot write %(auditor_status)s (%(err)s)" +msgstr "No se puede escribir %(auditor_status)s (%(err)s)" + #, python-format msgid "Client did not read from proxy within %ss" msgstr "El cliente pudo realizar la lectura desde el proxy en %ss" @@ -245,6 +299,19 @@ msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" "Sincronización de contenedor en modalidad de \"una vez\" finalizada: %.02fs" +#, python-format +msgid "" +"Container sync report: %(container)s, time window start: %(start)s, time " +"window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, " +"bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, " +"total_rows: %(total)s" +msgstr "" +"Informe de sincronización de contenedores: %(container)s, inicio de la " +"ventana de tiempo: %(start)s, extremo ventana de tiempo: %(end)s, " +"colocaciones: %(puts)s, publicaciones:: %(posts)s, eliminados: %(deletes)s, " +"bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, " +"total_filas: %(total)s" + #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " @@ -267,6 +334,17 @@ msgstr "" "%(elapsed).02fs, %(success)s con éxito, %(fail)s fallos, %(no_change)s sin " "cambios" +#, python-format +msgid "" +"Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds" +msgstr "" +"No se ha podido enlazar a %(addr)s:%(port)s después de intentarlo durante " +"%(timeout)ssegundos" + +#, python-format +msgid "Could not load %(conf)r: %(error)s" +msgstr "No se ha podido cargar %(conf)r: %(error)s" + #, python-format msgid "Data download error: %s" msgstr "Error de descarga de datos: %s" @@ -275,6 +353,15 @@ msgstr "Error de descarga de datos: %s" msgid "Devices pass completed: %.02fs" msgstr "Paso de dispositivos finalizado: %.02fs" +msgid "Did not get a keys dict" +msgstr "No tuvimos un diccionario de claves" + +#, python-format +msgid "Directory %(directory)r does not map to a valid policy (%(error)s)" +msgstr "" +"El directorio %(directory)r no está correlacionado con una política válida " +"(%(error)s)" + #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERROR %(db_file)s: %(validate_sync_to_err)s" @@ -292,10 +379,10 @@ msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERROR %(status)d Esperado: 100-continuo Desde el servidor de objeto" #, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" +msgid "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server" msgstr "" -"ERROR %(status)d Intentando %(method)s %(path)sDesde el servidor de " -"contenedor" +"ERROR %(status)d Intentando %(method)s %(path)s Desde %(type)s de " +"Servidor" #, python-format msgid "" @@ -305,6 +392,14 @@ msgstr "" "ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/" "%(device)s (se volverá a intentar más tarde): Respuesta %(status)s %(reason)s" +#, python-format +msgid "" +"ERROR Account update failed: different numbers of hosts and devices in " +"request: \"%(hosts)s\" vs \"%(devices)s\"" +msgstr "" +"ERROR La actualización de la cuenta ha fallado: hay números distintos de " +"hosts y dispositivos en la solicitud: %(hosts)s\" frente a %(devices)s\"" + #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERROR Respuesta errónea %(status)s desde %(host)s" @@ -322,6 +417,14 @@ msgstr "" "actualización asíncrona posterior): %(status)d respuesta desde %(ip)s:" "%(port)s/%(dev)s" +#, python-format +msgid "" +"ERROR Container update failed: different numbers of hosts and devices in " +"request: \"%(hosts)s\" vs \"%(devices)s\"" +msgstr "" +"ERROR La actualización de la cuenta ha fallado: hay números distintos de " +"hosts y dispositivos en la solicitud: %(hosts)s\" frente a %(devices)s\"" + #, python-format msgid "ERROR Could not get account info %s" msgstr "ERROR No se ha podido obtener la información de cuenta %s" @@ -417,6 +520,13 @@ msgstr "" "ERROR La actualización del contenedor ha fallado con %(ip)s:%(port)s/%(dev)s " "(guardando para una actualización asíncrona posterior)" +msgid "ERROR get_keys() missing callback" +msgstr "ERROR get_keys() No se proporciona devolución de llamada " + +#, python-format +msgid "ERROR get_keys(): from callback: %s" +msgstr "ERROR get_keys() No se proporciona devolución de llamada: %s" + #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERROR al leer la respuesta HTTP desde %s" @@ -480,6 +590,10 @@ msgstr "Error en la codificación a UTF-8: %s" msgid "Error hashing suffix" msgstr "Error en el hash del sufijo" +#, python-format +msgid "Error in %(conf)r with mtime_check_interval: %(error)s" +msgstr "Error en %(conf)r con mtime_check_interval: %(error)s" + #, python-format msgid "Error limiting server %s" msgstr "Error al limitar el servidor %s" @@ -543,6 +657,14 @@ msgstr "Excepción en el bucle de réplica de nivel superior" msgid "Exception in top-levelreconstruction loop" msgstr "Excepción en el bucle de reconstrucción de nivel superior" +#, python-format +msgid "Exception while deleting container %(container)s %(err)s" +msgstr "Excepción al suprimir el contenedor %(container)s %(err)s" + +#, python-format +msgid "Exception while deleting object %(container)s %(obj)s %(err)s" +msgstr "Excepción al suprimir el objeto %(container)s %(obj)s %(err)s" + #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Excepción con %(ip)s:%(port)s/%(device)s" @@ -619,6 +741,10 @@ msgstr "" msgid "Killing long-running rsync: %s" msgstr "Interrumpiendo resincronización (rsync) de larga duración: %s" +#, python-format +msgid "Loading JSON from %(auditor_status)s failed (%(err)s)" +msgstr "Error al cargar JSON desde %(auditor_status)s falla (%(err)s)" + msgid "Lockup detected.. killing live coros." msgstr "Bloqueo detectado. Interrumpiendo coros activos." @@ -626,10 +752,18 @@ msgstr "Bloqueo detectado. Interrumpiendo coros activos." msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "Se ha correlacionado %(given_domain)s con %(found_domain)s" +#, python-format +msgid "Missing key for %r" +msgstr "Falta una clave en %r" + #, python-format msgid "No %s running" msgstr "Ningún %s en ejecución" +#, python-format +msgid "No cluster endpoint for %(realm)r %(cluster)r" +msgstr "No hay ningún punto final %(realm)r %(cluster)r" + #, python-format msgid "No permission to signal PID %d" msgstr "No hay permiso para señalar el PID %d" @@ -642,6 +776,10 @@ msgstr "No hay ninguna política que tenga el índice %s" msgid "No realm key for %r" msgstr "No hay clave de dominio para %r" +#, python-format +msgid "No space left on device for %(file)s (%(err)s)" +msgstr "No queda espacio libre en el dispositivo para %(file)s (%(err)s)" + #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Error de nodo limitado %(ip)s:%(port)s (%(device)s)" @@ -672,6 +810,21 @@ msgstr "Objeto" msgid "Object PUT" msgstr "Objeto PUT" +#, python-format +msgid "" +"Object PUT exceptions after last send, %(conns)s/%(nodes)s required " +"connections" +msgstr "" +"excepciones de objeto PUT después de la última emisión, %(conns)s/%(nodes)s " +"conexiones requeridas" + +#, python-format +msgid "" +"Object PUT exceptions during send, %(conns)s/%(nodes)s required connections" +msgstr "" +"excepciones de objeto PUT después de la última emisión, %(conns)s/%(nodes)s " +"conexiones requeridas" + #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" @@ -681,6 +834,10 @@ msgstr "" msgid "Object PUT returning 412, %(statuses)r" msgstr "El objeto PUT devuelve 412, %(statuses)r" +#, python-format +msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections" +msgstr "Retorno de objecto PUT 503, %(conns)s/%(nodes)s conexiones requeridas" + #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " @@ -760,17 +917,47 @@ msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Tiempos de partición: máximo %(max).4fs, mínimo %(min).4fs, medio %(med).4fs" +#, python-format +msgid "" +"Pass beginning; %(containers)s possible containers; %(objects)s possible " +"objects" +msgstr "" +"Inicio del paso; %(containers)s posibles contenedores; %(objects)s posibles " +"objetos" + +#, python-format +msgid "Pass completed in %(time)ds; %(objects)d objects expired" +msgstr "Paso completado en %(time)ds; %(objects)d objetos caducados" + +#, python-format +msgid "Pass so far %(time)ds; %(objects)d objects expired" +msgstr "Paso hasta ahora%(time)ds; %(objects)d objetos caducados" + msgid "Path required in X-Container-Sync-To" msgstr "Vía de acceso necesaria en X-Container-Sync-To" +#, python-format +msgid "Problem cleaning up %(datadir)s (%(err)s)" +msgstr "Problema al limpiar %(datadir)s (%(err)s)" + #, python-format msgid "Problem cleaning up %s" msgstr "Problema al limpiar %s" +#, python-format +msgid "Problem writing durable state file %(file)s (%(err)s)" +msgstr "" +"Problema al escribir en el archivo de estado durable %(file)s (%(err)s)" + #, python-format msgid "Profiling Error: %s" msgstr "Error de perfil: %s" +#, python-format +msgid "Quarantined %(db_dir)s to %(quar_path)s due to %(exc_hint)s database" +msgstr "" +"En cuarentena%(db_dir)s hasta %(quar_path)s debido a %(exc_hint)s database" + #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" @@ -851,6 +1038,10 @@ msgstr "Ejecutando reconstructor de objeto en modo script." msgid "Running object replicator in script mode." msgstr "Ejecutando replicador de objeto en modalidad de script." +#, python-format +msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s" +msgstr "Señal %(server)s pid: %(pid)s Señal : %(signal)s" + #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " @@ -879,6 +1070,10 @@ msgstr "" msgid "Skipping %(device)s as it is not mounted" msgstr "Omitiendo %(device)s, ya que no está montado" +#, python-format +msgid "Skipping %(dir)s: %(err)s" +msgstr "Omitiendo %(dir)s: %(err)s" + #, python-format msgid "Skipping %s as it is not mounted" msgstr "Omitiendo %s, ya que no está montado" @@ -932,8 +1127,8 @@ msgid "Trying to GET %(full_path)s" msgstr "Intentando hacer un GET de %(full_path)s" #, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "Intentando obtener el estado final de PUT en %s" +msgid "Trying to get %(status_type)s status of PUT to %(path)s" +msgstr "Intentando obtener %(status_type)s el estado de PUT a %(path)s" msgid "Trying to read during GET" msgstr "Intentado leer durante GET" @@ -955,6 +1150,16 @@ msgstr "Intentando escribir en %s" msgid "UNCAUGHT EXCEPTION" msgstr "UNCAUGHT EXCEPTION" +#, python-format +msgid "Unable to find %(section)s config section in %(conf)s" +msgstr "No se ha podido encontrar %(section)s de la configuración en %(conf)s" + +#, python-format +msgid "Unable to load internal client from config: %(conf)r (%(error)s)" +msgstr "" +"No se puede cargar el cliente interno a partir de la configuración: %(conf)r " +"(%(error)s)" + #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "No se ha podido localizar %s en libc. Se dejará como no operativo." @@ -963,12 +1168,21 @@ msgstr "No se ha podido localizar %s en libc. Se dejará como no operativo." msgid "Unable to locate config for %s" msgstr "No se ha podido encontrar el número de configuración de %s" +#, python-format +msgid "Unable to locate config number %(number)s for %(server)s" +msgstr "" +"No se ha podido encontrar el número de configuración %(number)s de %(server)s" + msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "No se ha podido localizar fallocate, posix_fallocate en libc. Se dejará como " "no operativo." +#, python-format +msgid "Unable to perform fsync() on directory %(dir)s: %(err)s" +msgstr "No se puede realizar fsync() en el directorio %(dir)s: %(err)s" + #, python-format msgid "Unable to read config from %s" msgstr "No se ha podido leer la configuración de %s" @@ -1020,6 +1234,18 @@ msgstr "" "AVISO: no se ha podido modificar el límite de memoria. ¿Está en ejecución " "como no root?" +#, python-format +msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up" +msgstr "" +"Se han esperado %(kill_wait)s segundos a que terminara %(server)s; " +"abandonando" + +#, python-format +msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing" +msgstr "" +"Se han esperado %(kill_wait)s segundos a que terminara %(server)s ; " +"terminando" + msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Aviso: no se puede ajustar el límite sin un cliente almacenado en memoria " diff --git a/swift/locale/fr/LC_MESSAGES/swift.po b/swift/locale/fr/LC_MESSAGES/swift.po index 8c93eff9e4..b15b7810d1 100644 --- a/swift/locale/fr/LC_MESSAGES/swift.po +++ b/swift/locale/fr/LC_MESSAGES/swift.po @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev169\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-02 04:58+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -295,12 +295,6 @@ msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" "ERREUR %(status)d Attendu(s) : 100 - poursuivre depuis le serveur d'objets" -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "" -"ERREUR %(status)d Tentative d'exécution de %(method)s %(path)s à partir du " -"serveur de conteneur" - #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -948,10 +942,6 @@ msgstr "Tentative d'exécution de %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Tentative de lecture de %(full_path)s" -#, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "Tentative d'obtention du statut final de l'opération PUT sur %s" - msgid "Trying to read during GET" msgstr "Tentative de lecture pendant une opération GET" diff --git a/swift/locale/it/LC_MESSAGES/swift.po b/swift/locale/it/LC_MESSAGES/swift.po index e69a5a1b82..c42f9b9823 100644 --- a/swift/locale/it/LC_MESSAGES/swift.po +++ b/swift/locale/it/LC_MESSAGES/swift.po @@ -6,9 +6,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev169\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-02 04:58+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -296,11 +296,6 @@ msgstr "ERRORE %(status)d %(body)s Dal server degli oggetti re: %(path)s" msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERRORE %(status)d Previsto: 100-continue dal server degli oggetti" -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "" -"ERRORE %(status)d Tentativo di %(method)s %(path)s dal server contenitore" - #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -935,10 +930,6 @@ msgstr "Tentativo di %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Tentativo di eseguire GET %(full_path)s" -#, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "Tentativo di acquisire lo stato finale di PUT su %s" - msgid "Trying to read during GET" msgstr "Tentativo di lettura durante GET" diff --git a/swift/locale/ja/LC_MESSAGES/swift.po b/swift/locale/ja/LC_MESSAGES/swift.po index bfcd353e3b..fe36a7496f 100644 --- a/swift/locale/ja/LC_MESSAGES/swift.po +++ b/swift/locale/ja/LC_MESSAGES/swift.po @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev169\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-02 04:58+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -286,10 +286,6 @@ msgstr "エラー %(status)d: オブジェクトサーバーからの %(body)s msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "エラー %(status)d: 予期: オブジェクトサーバーからの 100-continue" -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "エラー %(status)d: コンテナーサーバーから %(method)s %(path)s を試行中" - #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -911,10 +907,6 @@ msgstr "%(method)s %(path)s を試行中" msgid "Trying to GET %(full_path)s" msgstr "GET %(full_path)s を試行中" -#, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "%s への PUT の最終状況の取得を試行中" - msgid "Trying to read during GET" msgstr "GET 時に読み取りを試行中" diff --git a/swift/locale/ko_KR/LC_MESSAGES/swift.po b/swift/locale/ko_KR/LC_MESSAGES/swift.po index 760bde07ee..d5a5231659 100644 --- a/swift/locale/ko_KR/LC_MESSAGES/swift.po +++ b/swift/locale/ko_KR/LC_MESSAGES/swift.po @@ -8,9 +8,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev169\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-02 04:58+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -287,10 +287,6 @@ msgstr "오류 %(status)d %(body)s, 오브젝트 서버 발신, 회신: %(path)s msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "오류 %(status)d. 예상: 100-continue, 오브젝트 서버 발신" -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "오류 %(status)d, 컨테이너 서버에서 %(method)s %(path)s 시도 중" - #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -898,10 +894,6 @@ msgstr "%(method)s %(path)s 시도 중" msgid "Trying to GET %(full_path)s" msgstr "GET %(full_path)s 시도 중" -#, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "PUT의 최종 상태를 %s(으)로 가져오는 중" - msgid "Trying to read during GET" msgstr "가져오기 중 읽기를 시도함" diff --git a/swift/locale/pt_BR/LC_MESSAGES/swift.po b/swift/locale/pt_BR/LC_MESSAGES/swift.po index 11ed050976..63ac1f071b 100644 --- a/swift/locale/pt_BR/LC_MESSAGES/swift.po +++ b/swift/locale/pt_BR/LC_MESSAGES/swift.po @@ -10,9 +10,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev169\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-02 04:58+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -291,10 +291,6 @@ msgstr "ERRO %(status)d %(body)s No Servidor de Objetos re: %(path)s" msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERRO %(status)d Expectativa: 100-continuar Do Servidor de Objeto" -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "ERRO %(status)d Tentando %(method)s %(path)s Do Servidor de Contêiner" - #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -919,10 +915,6 @@ msgstr "Tentando %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Tentando GET %(full_path)s" -#, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "Tentando obter o status final do PUT para o %s" - msgid "Trying to read during GET" msgstr "Tentando ler durante GET" diff --git a/swift/locale/ru/LC_MESSAGES/swift.po b/swift/locale/ru/LC_MESSAGES/swift.po index 0bb2e8f3c5..303c19824a 100644 --- a/swift/locale/ru/LC_MESSAGES/swift.po +++ b/swift/locale/ru/LC_MESSAGES/swift.po @@ -6,9 +6,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev169\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-02 04:58+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -289,12 +289,6 @@ msgstr "Ошибка %(status)d %(body)s, ответ от сервера объ msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "Ошибка %(status)d. Ожидаемое значение от сервера объекта: 100-continue" -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "" -"Ошибка %(status)d. попытка выполнить метод %(method)s %(path)s из сервера " -"контейнера" - #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -930,10 +924,6 @@ msgstr "Попытка выполнения метода %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "Попытка GET-запроса %(full_path)s" -#, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "Попытка получения конечного состояния PUT в %s" - msgid "Trying to read during GET" msgstr "Попытка чтения во время операции GET" diff --git a/swift/locale/tr_TR/LC_MESSAGES/swift.po b/swift/locale/tr_TR/LC_MESSAGES/swift.po index 3b4b8abe1c..3bfe5d4698 100644 --- a/swift/locale/tr_TR/LC_MESSAGES/swift.po +++ b/swift/locale/tr_TR/LC_MESSAGES/swift.po @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev169\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-02 04:58+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -283,10 +283,6 @@ msgstr "HATA %(status)d %(body)s Nesne Sunucu re'den: %(path)s" msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "HATA %(status)d Beklenen: 100-Nesne Sunucusundan devam et" -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "HATA %(status)d Kap Sunucusundan %(method)s %(path)s denenirken" - #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -875,10 +871,6 @@ msgstr "%(method)s %(path)s deneniyor" msgid "Trying to GET %(full_path)s" msgstr "%(full_path)s GET deneniyor" -#, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "%s'e PUT için son durum alınmaya çalışılıyor" - msgid "Trying to read during GET" msgstr "GET sırasında okuma deneniyor" diff --git a/swift/locale/zh_CN/LC_MESSAGES/swift.po b/swift/locale/zh_CN/LC_MESSAGES/swift.po index b47427299c..a5dc565f6d 100644 --- a/swift/locale/zh_CN/LC_MESSAGES/swift.po +++ b/swift/locale/zh_CN/LC_MESSAGES/swift.po @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev169\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-02 04:58+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -281,10 +281,6 @@ msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s" msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "发生 %(status)d 错误,需要 100 - 从对象服务器继续" -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "尝试从容器服务器执行 %(method)s %(path)s 时发生 %(status)d 错误" - #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -877,10 +873,6 @@ msgstr "尝试执行%(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "正尝试获取 %(full_path)s" -#, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "尝试执行获取最后的PUT状态%s" - msgid "Trying to read during GET" msgstr "执行GET时尝试读取" diff --git a/swift/locale/zh_TW/LC_MESSAGES/swift.po b/swift/locale/zh_TW/LC_MESSAGES/swift.po index 64e38b13b3..91591b1593 100644 --- a/swift/locale/zh_TW/LC_MESSAGES/swift.po +++ b/swift/locale/zh_TW/LC_MESSAGES/swift.po @@ -6,9 +6,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: swift 2.7.1.dev169\n" +"Project-Id-Version: swift 2.9.1.dev6\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-02 04:58+0000\n" +"POT-Creation-Date: 2016-07-19 04:02+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -281,10 +281,6 @@ msgstr "錯誤:%(status)d %(body)s 來自物件伺服器 re:%(path)s" msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "錯誤:%(status)d 預期:100 繼續自物件伺服器" -#, python-format -msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" -msgstr "錯誤:%(status)d 正在嘗試從儲存器伺服器 %(method)s %(path)s" - #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " @@ -882,10 +878,6 @@ msgstr "正在嘗試 %(method)s %(path)s" msgid "Trying to GET %(full_path)s" msgstr "正在嘗試對 %(full_path)s 執行 GET 動作" -#, python-format -msgid "Trying to get final status of PUT to %s" -msgstr "正在嘗試使 PUT 的最終狀態為 %s" - msgid "Trying to read during GET" msgstr "正在嘗試於 GET 期間讀取" From f33742127339a39c1b7bd9bf26f31c2cd9fbb341 Mon Sep 17 00:00:00 2001 From: Cheng Li Date: Tue, 19 Jul 2016 17:25:22 +0800 Subject: [PATCH 077/156] Change assertTrue to assertEqual In test_ringbuilder.py, there is one assertTrue should be replaced with assertEqual. Change-Id: I9a0e4a7363a5e16cc9b6df045953dfbb4f9dbd07 Closes-bug: #1604320 --- test/unit/cli/test_ringbuilder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index b61907c967..5e86b6fea1 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -1783,7 +1783,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertEqual(ring.min_part_seconds_left, 0) self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) - self.assertTrue(ring.min_part_seconds_left, 3600) + self.assertEqual(ring.min_part_seconds_left, 3600) def test_rebalance_failure_does_not_reset_last_moves_epoch(self): ring = RingBuilder(8, 3, 1) From d9b765320d58571b6b123839f8555f95c20a6bbf Mon Sep 17 00:00:00 2001 From: Mohit Motiani Date: Tue, 19 Jul 2016 21:37:54 +0000 Subject: [PATCH 078/156] Fixed Typo in updater.py Change-Id: Ic0b5445e313924c683e9889d94569c2554fd0b8b --- swift/container/updater.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swift/container/updater.py b/swift/container/updater.py index fb6d741e60..42204e4ca6 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -114,7 +114,7 @@ class ContainerUpdater(Daemon): def run_forever(self, *args, **kwargs): """ - Run the updator continuously. + Run the updater continuously. """ time.sleep(random() * self.interval) while True: From cdf505a50c107c14e626f308bd26eee9b4b9f305 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 20 Jul 2016 17:59:25 +0000 Subject: [PATCH 079/156] Make swift-oldies py3-compatible Change-Id: I0388f4738966bc453e922e9598ff9df60ecda4eb --- bin/swift-oldies | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/bin/swift-oldies b/bin/swift-oldies index 74854d78d8..53845c07e5 100755 --- a/bin/swift-oldies +++ b/bin/swift-oldies @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function import optparse import subprocess import sys @@ -30,16 +31,20 @@ Lists old Swift processes. listing = [] for line in subprocess.Popen( ['ps', '-eo', 'etime,pid,args', '--no-headers'], - stdout=subprocess.PIPE).communicate()[0].split('\n'): + stdout=subprocess.PIPE).communicate()[0].split(b'\n'): if not line: continue hours = 0 try: - etime, pid, args = line.split(None, 2) + etime, pid, args = line.decode('ascii').split(None, 2) except ValueError: + # This covers both decoding and not-enough-values-to-unpack errors sys.exit('Could not process ps line %r' % line) - if not args.startswith('/usr/bin/python /usr/bin/swift-') and \ - not args.startswith('/usr/bin/python /usr/local/bin/swift-'): + if not args.startswith(( + '/usr/bin/python /usr/bin/swift-', + '/usr/bin/python /usr/local/bin/swift-', + '/usr/bin/python3 /usr/bin/swift-', + '/usr/bin/python3 /usr/local/bin/swift-')): continue args = args.split('-', 1)[1] etime = etime.split('-') @@ -70,8 +75,6 @@ Lists old Swift processes. args_len = max(args_len, len(args)) args_len = min(args_len, 78 - hours_len - pid_len) - print ('%%%ds %%%ds %%s' % (hours_len, pid_len)) % \ - ('Hours', 'PID', 'Command') + print('%*s %*s %s' % (hours_len, 'Hours', pid_len, 'PID', 'Command')) for hours, pid, args in listing: - print ('%%%ds %%%ds %%s' % (hours_len, pid_len)) % \ - (hours, pid, args[:args_len]) + print('%*s %*s %s' % (hours_len, hours, pid_len, pid, args[:args_len])) From 49f250736d0d9fa0a0ff166caffcacc58d24fc1d Mon Sep 17 00:00:00 2001 From: Lokesh S Date: Wed, 20 Jul 2016 19:00:00 +0000 Subject: [PATCH 080/156] Python3 fixes generator object issue Fixes generator' object has no attribute 'next' issues Change-Id: I1f21eaed0ae7062073438503d3f6860d8b4f36c8 --- test/unit/container/test_backend.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/test/unit/container/test_backend.py b/test/unit/container/test_backend.py index 45b0f15423..4300b9bd28 100644 --- a/test/unit/container/test_backend.py +++ b/test/unit/container/test_backend.py @@ -560,8 +560,8 @@ class TestContainerBroker(unittest.TestCase): def _test_put_object_multiple_encoded_timestamps(self, broker): ts = (Timestamp(t) for t in itertools.count(int(time()))) - broker.initialize(ts.next().internal, 0) - t = [ts.next() for _ in range(9)] + broker.initialize(next(ts).internal, 0) + t = [next(ts) for _ in range(9)] # Create initial object broker.put_object('obj_name', t[0].internal, 123, @@ -630,8 +630,8 @@ class TestContainerBroker(unittest.TestCase): def _test_put_object_multiple_explicit_timestamps(self, broker): ts = (Timestamp(t) for t in itertools.count(int(time()))) - broker.initialize(ts.next().internal, 0) - t = [ts.next() for _ in range(11)] + broker.initialize(next(ts).internal, 0) + t = [next(ts) for _ in range(11)] # Create initial object broker.put_object('obj_name', t[0].internal, 123, @@ -735,10 +735,10 @@ class TestContainerBroker(unittest.TestCase): # timestamp as last-modified time ts = (Timestamp(t) for t in itertools.count(int(time()))) broker = ContainerBroker(':memory:', account='a', container='c') - broker.initialize(ts.next().internal, 0) + broker.initialize(next(ts).internal, 0) # simple 'single' timestamp case - t0 = ts.next() + t0 = next(ts) broker.put_object('obj1', t0.internal, 0, 'text/plain', 'hash1') listing = broker.list_objects_iter(100, '', None, None, '') self.assertEqual(len(listing), 1) @@ -746,7 +746,7 @@ class TestContainerBroker(unittest.TestCase): self.assertEqual(listing[0][1], t0.internal) # content-type and metadata are updated at t1 - t1 = ts.next() + t1 = next(ts) t_encoded = encode_timestamps(t0, t1, t1) broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1') listing = broker.list_objects_iter(100, '', None, None, '') @@ -755,10 +755,10 @@ class TestContainerBroker(unittest.TestCase): self.assertEqual(listing[0][1], t1.internal) # used later - t2 = ts.next() + t2 = next(ts) # metadata is updated at t3 - t3 = ts.next() + t3 = next(ts) t_encoded = encode_timestamps(t0, t1, t3) broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1') listing = broker.list_objects_iter(100, '', None, None, '') @@ -775,7 +775,7 @@ class TestContainerBroker(unittest.TestCase): self.assertEqual(listing[0][1], t3.internal) # all parts updated at t4, last-modified should be t4 - t4 = ts.next() + t4 = next(ts) t_encoded = encode_timestamps(t4, t4, t4) broker.put_object('obj1', t_encoded, 0, 'text/plain', 'hash1') listing = broker.list_objects_iter(100, '', None, None, '') From 873331185713b2e2466456e8cfcdb02ec37ff955 Mon Sep 17 00:00:00 2001 From: John Dickinson Date: Wed, 20 Jul 2016 11:09:35 -0700 Subject: [PATCH 081/156] Prevent CPU spinning when there are no children If you deploy an object server but have no rings at all (and are using servers-per-port), then the CPU will spin as it checks for child processes since there are actually no child processes to check. This patch adds a sleep so that the CPU doesn't spin. Change-Id: Iece62367aa2481a21752144b1f4477a3713282fe --- swift/common/wsgi.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 8ee12c8b82..88e61f2293 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -952,12 +952,21 @@ def run_wsgi(conf_path, app_section, *args, **kwargs): with Timeout(loop_timeout, exception=False): try: - pid, status = green_os.wait() - if os.WIFEXITED(status) or os.WIFSIGNALED(status): - strategy.register_worker_exit(pid) - except OSError as err: - if err.errno not in (errno.EINTR, errno.ECHILD): - raise + try: + pid, status = green_os.wait() + if os.WIFEXITED(status) or os.WIFSIGNALED(status): + strategy.register_worker_exit(pid) + except OSError as err: + if err.errno not in (errno.EINTR, errno.ECHILD): + raise + if err.errno == errno.ECHILD: + # If there are no children at all (ECHILD), then + # there's nothing to actually wait on. We sleep + # for a little bit to avoid a tight CPU spin + # and still are able to catch any KeyboardInterrupt + # events that happen. The value of 0.01 matches the + # value in eventlet's waitpid(). + sleep(0.01) except KeyboardInterrupt: logger.notice('User quit') running[0] = False From 26d91f2b1006a3affff963d6aa8d0d0ced134723 Mon Sep 17 00:00:00 2001 From: Lokesh S Date: Wed, 20 Jul 2016 13:49:29 +0000 Subject: [PATCH 082/156] Python3 eventlet patched httplib _MAXHEADERS This change patches the correct eventlet monkey patched httplib's _MAXHEADERS as required by Swift. It also makes the configparser inside the copy middleware py3 compatable. Change-Id: I2f7dbcecbbecd7cb69b7031faa39f35bcfadcfc8 --- swift/common/bufferedhttp.py | 5 ++++- swift/common/middleware/copy.py | 2 +- test/probe/test_wsgi_servers.py | 4 +++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/swift/common/bufferedhttp.py b/swift/common/bufferedhttp.py index 2db36b155f..1294609133 100644 --- a/swift/common/bufferedhttp.py +++ b/swift/common/bufferedhttp.py @@ -38,7 +38,10 @@ from eventlet.green.httplib import CONTINUE, HTTPConnection, HTTPMessage, \ from six.moves.urllib.parse import quote import six -httplib = eventlet.import_patched('httplib') +if six.PY2: + httplib = eventlet.import_patched('httplib') +else: + httplib = eventlet.import_patched('http.client') httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT diff --git a/swift/common/middleware/copy.py b/swift/common/middleware/copy.py index 1bb2234a05..9ef44a2243 100644 --- a/swift/common/middleware/copy.py +++ b/swift/common/middleware/copy.py @@ -132,7 +132,7 @@ backwards compatibility. At first chance, set ``object_post_as_copy`` to """ import os -from ConfigParser import ConfigParser, NoSectionError, NoOptionError +from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError from six.moves.urllib.parse import quote, unquote from swift.common import utils diff --git a/test/probe/test_wsgi_servers.py b/test/probe/test_wsgi_servers.py index 437912dcf8..46175cf458 100644 --- a/test/probe/test_wsgi_servers.py +++ b/test/probe/test_wsgi_servers.py @@ -15,9 +15,11 @@ # limitations under the License. import unittest -import httplib + import random +from six.moves import http_client as httplib + from swift.common.storage_policy import POLICIES from swift.common.ring import Ring from swift.common.manager import Manager From afe3968dd158d829c667764a3acd223b52aa5756 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 6 Jul 2016 11:27:58 +0100 Subject: [PATCH 083/156] Copy headers correctly when copying object Fix copy middleware so that all client-defined object headers that object servers allow to be persisted are copied. For example, content-encoding and content-disposition will now be copied. Fix treatment of x-fresh-metadata header so that, when it is used, new object sysmeta is applied to the object copy in the same way as a copy without x-fresh-metadata. Remove unnecessary passing of original request headers to sink PUT request constructor: passing the environ is sufficient to have the new request inherit the original's headers. Add tests for this change and to verify that content-type gets either copied or updated if supplied with the copy request. Add tests for x-fresh-metadata treatment. Closes-Bug: #1391826 Closes-Bug: #1600247 Co-Authored-By: Thiago da Silva Change-Id: I917fb0b4e831c13e04ade1c5e0b9821802dec967 --- swift/common/middleware/copy.py | 48 +++-- test/functional/tests.py | 88 ++++++++-- test/unit/common/middleware/test_copy.py | 212 +++++++++++++++++++++++ 3 files changed, 319 insertions(+), 29 deletions(-) diff --git a/swift/common/middleware/copy.py b/swift/common/middleware/copy.py index 1bb2234a05..4bc6c234af 100644 --- a/swift/common/middleware/copy.py +++ b/swift/common/middleware/copy.py @@ -455,10 +455,33 @@ class ServerSideCopyMiddleware(object): close_if_possible(source_resp.app_iter) return source_resp(source_resp.environ, start_response) - # Create a new Request object based on the original req instance. - # This will preserve env and headers. - sink_req = Request.blank(req.path_info, - environ=req.environ, headers=req.headers) + # Create a new Request object based on the original request instance. + # This will preserve original request environ including headers. + sink_req = Request.blank(req.path_info, environ=req.environ) + + def is_object_sysmeta(k): + return is_sys_meta('object', k) + + if 'swift.post_as_copy' in sink_req.environ: + # Post-as-copy: ignore new sysmeta, copy existing sysmeta + remove_items(sink_req.headers, is_object_sysmeta) + copy_header_subset(source_resp, sink_req, is_object_sysmeta) + elif config_true_value(req.headers.get('x-fresh-metadata', 'false')): + # x-fresh-metadata only applies to copy, not post-as-copy: ignore + # existing user metadata, update existing sysmeta with new + copy_header_subset(source_resp, sink_req, is_object_sysmeta) + copy_header_subset(req, sink_req, is_object_sysmeta) + else: + # First copy existing sysmeta, user meta and other headers from the + # source to the sink, apart from headers that are conditionally + # copied below and timestamps. + exclude_headers = ('x-static-large-object', 'x-object-manifest', + 'etag', 'content-type', 'x-timestamp', + 'x-backend-timestamp') + copy_header_subset(source_resp, sink_req, + lambda k: k.lower() not in exclude_headers) + # now update with original req headers + sink_req.headers.update(req.headers) params = sink_req.params if params.get('multipart-manifest') == 'get': @@ -489,32 +512,19 @@ class ServerSideCopyMiddleware(object): else: # since we're not copying the source etag, make sure that any # container update override values are not copied. - remove_items(source_resp.headers, lambda k: k.startswith( + remove_items(sink_req.headers, lambda k: k.startswith( 'X-Object-Sysmeta-Container-Update-Override-')) # We no longer need these headers sink_req.headers.pop('X-Copy-From', None) sink_req.headers.pop('X-Copy-From-Account', None) + # If the copy request does not explicitly override content-type, # use the one present in the source object. if not req.headers.get('content-type'): sink_req.headers['Content-Type'] = \ source_resp.headers['Content-Type'] - fresh_meta_flag = config_true_value( - sink_req.headers.get('x-fresh-metadata', 'false')) - - if fresh_meta_flag or 'swift.post_as_copy' in sink_req.environ: - # Post-as-copy: ignore new sysmeta, copy existing sysmeta - condition = lambda k: is_sys_meta('object', k) - remove_items(sink_req.headers, condition) - copy_header_subset(source_resp, sink_req, condition) - else: - # Copy/update existing sysmeta, transient-sysmeta and user meta - _copy_headers(source_resp.headers, sink_req.headers) - # Copy/update new metadata provided in request if any - _copy_headers(req.headers, sink_req.headers) - # Create response headers for PUT response resp_headers = self._create_response_headers(source_path, source_resp, sink_req) diff --git a/test/functional/tests.py b/test/functional/tests.py index 156094ac4b..29194964d1 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -1186,11 +1186,24 @@ class TestFile(Base): file_item = self.env.container.file(source_filename) metadata = {} - for i in range(1): - metadata[Utils.create_ascii_name()] = Utils.create_name() + metadata[Utils.create_ascii_name()] = Utils.create_name() + put_headers = {'Content-Type': 'application/test', + 'Content-Encoding': 'gzip', + 'Content-Disposition': 'attachment; filename=myfile'} + file_item.metadata = metadata + data = file_item.write_random(hdrs=put_headers) - data = file_item.write_random() - file_item.sync_metadata(metadata) + # the allowed headers are configurable in object server, so we cannot + # assert that content-encoding and content-disposition get *copied* + # unless they were successfully set on the original PUT, so populate + # expected_headers by making a HEAD on the original object + file_item.initialize() + self.assertEqual('application/test', file_item.content_type) + resp_headers = dict(file_item.conn.response.getheaders()) + expected_headers = {} + for k, v in put_headers.items(): + if k.lower() in resp_headers: + expected_headers[k] = v dest_cont = self.env.account.container(Utils.create_name()) self.assertTrue(dest_cont.create()) @@ -1201,16 +1214,71 @@ class TestFile(Base): for prefix in ('', '/'): dest_filename = Utils.create_name() - file_item = self.env.container.file(source_filename) - file_item.copy('%s%s' % (prefix, cont), dest_filename) + extra_hdrs = {'X-Object-Meta-Extra': 'fresh'} + self.assertTrue(file_item.copy( + '%s%s' % (prefix, cont), dest_filename, hdrs=extra_hdrs)) self.assertIn(dest_filename, cont.files()) - file_item = cont.file(dest_filename) + file_copy = cont.file(dest_filename) - self.assertEqual(data, file_item.read()) - self.assertTrue(file_item.initialize()) - self.assertEqual(metadata, file_item.metadata) + self.assertEqual(data, file_copy.read()) + self.assertTrue(file_copy.initialize()) + expected_metadata = dict(metadata) + # new metadata should be merged with existing + expected_metadata['extra'] = 'fresh' + self.assertDictEqual(expected_metadata, file_copy.metadata) + resp_headers = dict(file_copy.conn.response.getheaders()) + for k, v in expected_headers.items(): + self.assertIn(k.lower(), resp_headers) + self.assertEqual(v, resp_headers[k.lower()]) + + # repeat copy with updated content-type, content-encoding and + # content-disposition, which should get updated + extra_hdrs = { + 'X-Object-Meta-Extra': 'fresher', + 'Content-Type': 'application/test-changed', + 'Content-Encoding': 'not_gzip', + 'Content-Disposition': 'attachment; filename=notmyfile'} + self.assertTrue(file_item.copy( + '%s%s' % (prefix, cont), dest_filename, hdrs=extra_hdrs)) + + self.assertIn(dest_filename, cont.files()) + + file_copy = cont.file(dest_filename) + + self.assertEqual(data, file_copy.read()) + self.assertTrue(file_copy.initialize()) + expected_metadata['extra'] = 'fresher' + self.assertDictEqual(expected_metadata, file_copy.metadata) + resp_headers = dict(file_copy.conn.response.getheaders()) + # if k is in expected_headers then we can assert its new value + for k, v in expected_headers.items(): + v = extra_hdrs.get(k, v) + self.assertIn(k.lower(), resp_headers) + self.assertEqual(v, resp_headers[k.lower()]) + + # repeat copy with X-Fresh-Metadata header - existing user + # metadata should not be copied, new completely replaces it. + extra_hdrs = {'Content-Type': 'application/test-updated', + 'X-Object-Meta-Extra': 'fresher', + 'X-Fresh-Metadata': 'true'} + self.assertTrue(file_item.copy( + '%s%s' % (prefix, cont), dest_filename, hdrs=extra_hdrs)) + + self.assertIn(dest_filename, cont.files()) + + file_copy = cont.file(dest_filename) + + self.assertEqual(data, file_copy.read()) + self.assertTrue(file_copy.initialize()) + self.assertEqual('application/test-updated', + file_copy.content_type) + expected_metadata = {'extra': 'fresher'} + self.assertDictEqual(expected_metadata, file_copy.metadata) + resp_headers = dict(file_copy.conn.response.getheaders()) + for k in ('Content-Disposition', 'Content-Encoding'): + self.assertNotIn(k.lower(), resp_headers) def testCopyAccount(self): # makes sure to test encoded characters diff --git a/test/unit/common/middleware/test_copy.py b/test/unit/common/middleware/test_copy.py index 3a6663db00..4c2643dd92 100644 --- a/test/unit/common/middleware/test_copy.py +++ b/test/unit/common/middleware/test_copy.py @@ -1198,6 +1198,218 @@ class TestServerSideCopyMiddleware(unittest.TestCase): self.assertEqual('OPTIONS', self.authorized[0].method) self.assertEqual('/v1/a/c/o', self.authorized[0].path) + def _test_COPY_source_headers(self, extra_put_headers): + # helper method to perform a COPY with some metadata headers that + # should always be sent to the destination + put_headers = {'Destination': '/c1/o', + 'X-Object-Meta-Test2': 'added', + 'X-Object-Sysmeta-Test2': 'added', + 'X-Object-Transient-Sysmeta-Test2': 'added'} + put_headers.update(extra_put_headers) + get_resp_headers = { + 'X-Timestamp': '1234567890.12345', + 'X-Backend-Timestamp': '1234567890.12345', + 'Content-Type': 'text/original', + 'Content-Encoding': 'gzip', + 'Content-Disposition': 'attachment; filename=myfile', + 'X-Object-Meta-Test': 'original', + 'X-Object-Sysmeta-Test': 'original', + 'X-Object-Transient-Sysmeta-Test': 'original', + 'X-Foo': 'Bar'} + self.app.register( + 'GET', '/v1/a/c/o', swob.HTTPOk, headers=get_resp_headers) + self.app.register('PUT', '/v1/a/c1/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', method='COPY', headers=put_headers) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '201 Created') + calls = self.app.calls_with_headers + self.assertEqual(2, len(calls)) + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + # these headers should always be applied to the destination + self.assertEqual('added', req_headers.get('X-Object-Meta-Test2')) + self.assertEqual('added', req_headers.get('X-Object-Sysmeta-Test2')) + self.assertEqual('added', + req_headers.get('X-Object-Transient-Sysmeta-Test2')) + return req_headers + + def test_COPY_source_headers_no_updates(self): + # copy should preserve existing metadata if not updated + req_headers = self._test_COPY_source_headers({}) + self.assertEqual('text/original', req_headers.get('Content-Type')) + self.assertEqual('gzip', req_headers.get('Content-Encoding')) + self.assertEqual('attachment; filename=myfile', + req_headers.get('Content-Disposition')) + self.assertEqual('original', req_headers.get('X-Object-Meta-Test')) + self.assertEqual('original', req_headers.get('X-Object-Sysmeta-Test')) + self.assertEqual('original', + req_headers.get('X-Object-Transient-Sysmeta-Test')) + self.assertEqual('Bar', req_headers.get('X-Foo')) + self.assertNotIn('X-Timestamp', req_headers) + self.assertNotIn('X-Backend-Timestamp', req_headers) + + def test_COPY_source_headers_with_updates(self): + # copy should apply any updated values to existing metadata + put_headers = { + 'Content-Type': 'text/not_original', + 'Content-Encoding': 'not_gzip', + 'Content-Disposition': 'attachment; filename=notmyfile', + 'X-Object-Meta-Test': 'not_original', + 'X-Object-Sysmeta-Test': 'not_original', + 'X-Object-Transient-Sysmeta-Test': 'not_original', + 'X-Foo': 'Not Bar'} + req_headers = self._test_COPY_source_headers(put_headers) + self.assertEqual('text/not_original', req_headers.get('Content-Type')) + self.assertEqual('not_gzip', req_headers.get('Content-Encoding')) + self.assertEqual('attachment; filename=notmyfile', + req_headers.get('Content-Disposition')) + self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test')) + self.assertEqual('not_original', + req_headers.get('X-Object-Sysmeta-Test')) + self.assertEqual('not_original', + req_headers.get('X-Object-Transient-Sysmeta-Test')) + self.assertEqual('Not Bar', req_headers.get('X-Foo')) + self.assertNotIn('X-Timestamp', req_headers) + self.assertNotIn('X-Backend-Timestamp', req_headers) + + def test_COPY_x_fresh_metadata_no_updates(self): + # existing user metadata should not be copied, sysmeta is copied + put_headers = { + 'X-Fresh-Metadata': 'true', + 'X-Extra': 'Fresh'} + req_headers = self._test_COPY_source_headers(put_headers) + self.assertEqual('text/original', req_headers.get('Content-Type')) + self.assertEqual('Fresh', req_headers.get('X-Extra')) + self.assertEqual('original', + req_headers.get('X-Object-Sysmeta-Test')) + self.assertIn('X-Fresh-Metadata', req_headers) + self.assertNotIn('X-Object-Meta-Test', req_headers) + self.assertNotIn('X-Object-Transient-Sysmeta-Test', req_headers) + self.assertNotIn('X-Timestamp', req_headers) + self.assertNotIn('X-Backend-Timestamp', req_headers) + self.assertNotIn('Content-Encoding', req_headers) + self.assertNotIn('Content-Disposition', req_headers) + self.assertNotIn('X-Foo', req_headers) + + def test_COPY_x_fresh_metadata_with_updates(self): + # existing user metadata should not be copied, new metadata replaces it + put_headers = { + 'X-Fresh-Metadata': 'true', + 'Content-Type': 'text/not_original', + 'Content-Encoding': 'not_gzip', + 'Content-Disposition': 'attachment; filename=notmyfile', + 'X-Object-Meta-Test': 'not_original', + 'X-Object-Sysmeta-Test': 'not_original', + 'X-Object-Transient-Sysmeta-Test': 'not_original', + 'X-Foo': 'Not Bar', + 'X-Extra': 'Fresh'} + req_headers = self._test_COPY_source_headers(put_headers) + self.assertEqual('Fresh', req_headers.get('X-Extra')) + self.assertEqual('text/not_original', req_headers.get('Content-Type')) + self.assertEqual('not_gzip', req_headers.get('Content-Encoding')) + self.assertEqual('attachment; filename=notmyfile', + req_headers.get('Content-Disposition')) + self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test')) + self.assertEqual('not_original', + req_headers.get('X-Object-Sysmeta-Test')) + self.assertEqual('not_original', + req_headers.get('X-Object-Transient-Sysmeta-Test')) + self.assertEqual('Not Bar', req_headers.get('X-Foo')) + + def _test_POST_source_headers(self, extra_post_headers): + # helper method to perform a POST with metadata headers that should + # always be sent to the destination + post_headers = {'X-Object-Meta-Test2': 'added', + 'X-Object-Sysmeta-Test2': 'added', + 'X-Object-Transient-Sysmeta-Test2': 'added'} + post_headers.update(extra_post_headers) + get_resp_headers = { + 'X-Timestamp': '1234567890.12345', + 'X-Backend-Timestamp': '1234567890.12345', + 'Content-Type': 'text/original', + 'Content-Encoding': 'gzip', + 'Content-Disposition': 'attachment; filename=myfile', + 'X-Object-Meta-Test': 'original', + 'X-Object-Sysmeta-Test': 'original', + 'X-Object-Transient-Sysmeta-Test': 'original', + 'X-Foo': 'Bar'} + self.app.register( + 'GET', '/v1/a/c/o', swob.HTTPOk, headers=get_resp_headers) + self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {}) + req = Request.blank('/v1/a/c/o', method='POST', headers=post_headers) + status, headers, body = self.call_ssc(req) + self.assertEqual(status, '202 Accepted') + calls = self.app.calls_with_headers + self.assertEqual(2, len(calls)) + method, path, req_headers = calls[1] + self.assertEqual('PUT', method) + # these headers should always be applied to the destination + self.assertEqual('added', req_headers.get('X-Object-Meta-Test2')) + self.assertEqual('added', + req_headers.get('X-Object-Transient-Sysmeta-Test2')) + # POSTed sysmeta should never be applied to the destination + self.assertNotIn('X-Object-Sysmeta-Test2', req_headers) + # existing sysmeta should always be preserved + self.assertEqual('original', + req_headers.get('X-Object-Sysmeta-Test')) + return req_headers + + def test_POST_no_updates(self): + post_headers = {} + req_headers = self._test_POST_source_headers(post_headers) + self.assertEqual('text/original', req_headers.get('Content-Type')) + self.assertNotIn('X-Object-Meta-Test', req_headers) + self.assertNotIn('X-Object-Transient-Sysmeta-Test', req_headers) + self.assertNotIn('X-Timestamp', req_headers) + self.assertNotIn('X-Backend-Timestamp', req_headers) + self.assertNotIn('Content-Encoding', req_headers) + self.assertNotIn('Content-Disposition', req_headers) + self.assertNotIn('X-Foo', req_headers) + + def test_POST_with_updates(self): + post_headers = { + 'Content-Type': 'text/not_original', + 'Content-Encoding': 'not_gzip', + 'Content-Disposition': 'attachment; filename=notmyfile', + 'X-Object-Meta-Test': 'not_original', + 'X-Object-Sysmeta-Test': 'not_original', + 'X-Object-Transient-Sysmeta-Test': 'not_original', + 'X-Foo': 'Not Bar', + } + req_headers = self._test_POST_source_headers(post_headers) + self.assertEqual('text/not_original', req_headers.get('Content-Type')) + self.assertEqual('not_gzip', req_headers.get('Content-Encoding')) + self.assertEqual('attachment; filename=notmyfile', + req_headers.get('Content-Disposition')) + self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test')) + self.assertEqual('not_original', + req_headers.get('X-Object-Transient-Sysmeta-Test')) + self.assertEqual('Not Bar', req_headers.get('X-Foo')) + + def test_POST_x_fresh_metadata_with_updates(self): + # post-as-copy trumps x-fresh-metadata i.e. existing user metadata + # should not be copied, sysmeta is copied *and not updated with new* + post_headers = { + 'X-Fresh-Metadata': 'true', + 'Content-Type': 'text/not_original', + 'Content-Encoding': 'not_gzip', + 'Content-Disposition': 'attachment; filename=notmyfile', + 'X-Object-Meta-Test': 'not_original', + 'X-Object-Sysmeta-Test': 'not_original', + 'X-Object-Transient-Sysmeta-Test': 'not_original', + 'X-Foo': 'Not Bar', + } + req_headers = self._test_POST_source_headers(post_headers) + self.assertEqual('text/not_original', req_headers.get('Content-Type')) + self.assertEqual('not_gzip', req_headers.get('Content-Encoding')) + self.assertEqual('attachment; filename=notmyfile', + req_headers.get('Content-Disposition')) + self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test')) + self.assertEqual('not_original', + req_headers.get('X-Object-Transient-Sysmeta-Test')) + self.assertEqual('Not Bar', req_headers.get('X-Foo')) + self.assertIn('X-Fresh-Metadata', req_headers) + class TestServerSideCopyConfiguration(unittest.TestCase): From 2876f59d4cdbafc297d79f4e6295f05e3448ae47 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Wed, 20 Jul 2016 18:16:27 -0700 Subject: [PATCH 084/156] Cache fragment size for EC policy ECStoragePolicy.fragment_size is never changed on running Swift because it is from ec_segment_size and ec_type defined in swift.conf statically so let's cache the value after retrieving the value from the pyeclib driver. And more, pyeclib <= 1.2.1 (current newest) has a bug [1] to leak the reference count of the items in the returned dict (i.e. causes memory leak) so that this caching will be mitigation of the memory leak because this saves the call count fewer than current as possible. Note that the complete fix for the memory leak for pyeclib is proposed at https://review.openstack.org/#/c/344066/ 1: https://bugs.launchpad.net/pyeclib/+bug/1604335 Related-Bug: #1604335 Change-Id: I6bbaa4063dc462383c949764b6567b2bee233689 --- swift/common/storage_policy.py | 8 ++++++-- test/unit/common/test_storage_policy.py | 24 ++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py index 19b9f26e77..fd0b54dfa8 100644 --- a/swift/common/storage_policy.py +++ b/swift/common/storage_policy.py @@ -475,6 +475,7 @@ class ECStoragePolicy(BaseStoragePolicy): # quorum size in the EC case depends on the choice of EC scheme. self._ec_quorum_size = \ self._ec_ndata + self.pyeclib_driver.min_parity_fragments_needed() + self._fragment_size = None @property def ec_type(self): @@ -511,8 +512,11 @@ class ECStoragePolicy(BaseStoragePolicy): # segment_size we'll still only read *the whole one and only last # fragment* and pass than into pyeclib who will know what to do with # it just as it always does when the last fragment is < fragment_size. - return self.pyeclib_driver.get_segment_info( - self.ec_segment_size, self.ec_segment_size)['fragment_size'] + if self._fragment_size is None: + self._fragment_size = self.pyeclib_driver.get_segment_info( + self.ec_segment_size, self.ec_segment_size)['fragment_size'] + + return self._fragment_size @property def ec_scheme_description(self): diff --git a/test/unit/common/test_storage_policy.py b/test/unit/common/test_storage_policy.py index 3fd721b732..14f4ac7ff3 100755 --- a/test/unit/common/test_storage_policy.py +++ b/test/unit/common/test_storage_policy.py @@ -27,6 +27,7 @@ from swift.common.storage_policy import ( VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE, BindPortsCache) from swift.common.ring import RingData from swift.common.exceptions import RingValidationError +from pyeclib.ec_iface import ECDriver @BaseStoragePolicy.register('fake') @@ -1244,6 +1245,29 @@ class TestStoragePolicies(unittest.TestCase): expected_info = expected[(int(policy), False)] self.assertEqual(policy.get_info(config=False), expected_info) + def test_ec_fragment_size_cached(self): + policy = ECStoragePolicy( + 0, 'ec2-1', ec_type=DEFAULT_TEST_EC_TYPE, + ec_ndata=2, ec_nparity=1, object_ring=FakeRing(replicas=3), + ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, is_default=True) + + ec_driver = ECDriver(ec_type=DEFAULT_TEST_EC_TYPE, + k=2, m=1) + expected_fragment_size = ec_driver.get_segment_info( + DEFAULT_EC_OBJECT_SEGMENT_SIZE, + DEFAULT_EC_OBJECT_SEGMENT_SIZE)['fragment_size'] + + with mock.patch.object( + policy.pyeclib_driver, 'get_segment_info') as fake: + fake.return_value = { + 'fragment_size': expected_fragment_size} + + for x in range(10): + self.assertEqual(expected_fragment_size, + policy.fragment_size) + # pyeclib_driver.get_segment_info is called only once + self.assertEqual(1, fake.call_count) + if __name__ == '__main__': unittest.main() From 3a1a198780433e6ebf2bcc3862bd63c7d033930d Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Sun, 3 Jul 2016 22:03:20 -0700 Subject: [PATCH 085/156] Raise ValueError if empty value coming into encrypt_header_val encrypt_header_val is used to translate a raw header value into an encrypted value. Semantically, a header with an empty value won't be stored and all callers seem to remove such a header before calling encrypted_header_val. So if no reason for returning ('', None), I think it's better to change it to raise ValueError not to cause another error for users of the return value. (e.g. dump_crypto_meta) Plus this patch addes a few unit tests for those cases above. Change-Id: Ic1237f4afb8c0e466be5ce59fe31b667c39242b0 --- swift/common/middleware/crypto/encrypter.py | 3 +- .../middleware/crypto/test_encrypter.py | 33 +++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/swift/common/middleware/crypto/encrypter.py b/swift/common/middleware/crypto/encrypter.py index b6c651fb84..9af1a7e646 100644 --- a/swift/common/middleware/crypto/encrypter.py +++ b/swift/common/middleware/crypto/encrypter.py @@ -39,9 +39,10 @@ def encrypt_header_val(crypto, value, key): :returns: a tuple of (encrypted value, crypto_meta) where crypto_meta is a dict of form returned by :py:func:`~swift.common.middleware.crypto.Crypto.get_crypto_meta` + :raises ValueError: if value is empty """ if not value: - return '', None + raise ValueError('empty value is not acceptable') crypto_meta = crypto.create_crypto_meta() crypto_ctxt = crypto.create_encryption_ctxt(key, crypto_meta['iv']) diff --git a/test/unit/common/middleware/crypto/test_encrypter.py b/test/unit/common/middleware/crypto/test_encrypter.py index 1fa765c66b..c269e5d007 100644 --- a/test/unit/common/middleware/crypto/test_encrypter.py +++ b/test/unit/common/middleware/crypto/test_encrypter.py @@ -536,6 +536,7 @@ class TestEncrypter(unittest.TestCase): env = {'REQUEST_METHOD': 'POST', CRYPTO_KEY_CALLBACK: fetch_crypto_keys} hdrs = {'x-object-meta-test': 'encrypt me', + 'x-object-meta-test2': '', 'x-object-sysmeta-test': 'do not encrypt me'} req = Request.blank('/v1/a/c/o', environ=env, body=body, headers=hdrs) key = fetch_crypto_keys()['object'] @@ -551,6 +552,8 @@ class TestEncrypter(unittest.TestCase): # user meta is encrypted self._verify_user_metadata(req_hdrs, 'Test', 'encrypt me', key) + # unless it had no value + self.assertEqual('', req_hdrs['X-Object-Meta-Test2']) # sysmeta is not encrypted self.assertEqual('do not encrypt me', @@ -878,6 +881,36 @@ class TestEncrypter(unittest.TestCase): req.get_response(app) self.assertEqual(FakeAppThatExcepts.MESSAGE, catcher.exception.body) + def test_encrypt_header_val(self): + # Prepare key and Crypto instance + object_key = fetch_crypto_keys()['object'] + + # - Normal string can be crypted + encrypted = encrypter.encrypt_header_val(Crypto(), 'aaa', object_key) + # sanity: return value is 2 item tuple + self.assertEqual(2, len(encrypted)) + crypted_val, crypt_info = encrypted + expected_crypt_val = base64.b64encode( + encrypt('aaa', object_key, FAKE_IV)) + expected_crypt_info = { + 'cipher': 'AES_CTR_256', 'iv': 'This is an IV123'} + self.assertEqual(expected_crypt_val, crypted_val) + self.assertEqual(expected_crypt_info, crypt_info) + + # - Empty string raises a ValueError for safety + with self.assertRaises(ValueError) as cm: + encrypter.encrypt_header_val(Crypto(), '', object_key) + + self.assertEqual('empty value is not acceptable', + cm.exception.message) + + # - None also raises a ValueError for safety + with self.assertRaises(ValueError) as cm: + encrypter.encrypt_header_val(Crypto(), None, object_key) + + self.assertEqual('empty value is not acceptable', + cm.exception.message) + if __name__ == '__main__': unittest.main() From 1eb96397e7a6f477ba31df85eda892769a8a182e Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Fri, 15 Jan 2016 03:53:01 -0800 Subject: [PATCH 086/156] Fix EC ring validation at ring reload Swift EC has a strong constraint about the ring must have a number of replicas which fits ec_k + ec_m. That is validated when servers waking up. However, Swift has more chance to load such an invalid ring when a request comming, calling some node iteration like get_nodes, get_part_nodes or so, and no ring validation is there. This patch moves ring validation from policy validate_ring into the ring instance as validation_hook that will run at ring reload. Since this patch, ring instance will allow to use the old ring if the reload is not fourced. Note that the exception if invalid ring found was changed from RingValidationError to RingLoadError because RingValidationError is a child of RingBuilderError but the ring reload is obviously outside of "builder". Closes-Bug: #1534572 Change-Id: I6428fbfb04e0c79679b917d5e57bd2a34f2a0875 --- swift/common/exceptions.py | 4 ++ swift/common/ring/ring.py | 20 +++++- swift/common/storage_policy.py | 62 +++++++++-------- test/unit/common/test_storage_policy.py | 33 +++++---- test/unit/proxy/test_mem_server.py | 3 + test/unit/proxy/test_server.py | 92 ++++++++++++++++++++++++- 6 files changed, 170 insertions(+), 44 deletions(-) diff --git a/swift/common/exceptions.py b/swift/common/exceptions.py index 05f972f972..f3e633707d 100644 --- a/swift/common/exceptions.py +++ b/swift/common/exceptions.py @@ -145,6 +145,10 @@ class LockTimeout(MessageTimeout): pass +class RingLoadError(SwiftException): + pass + + class RingBuilderError(SwiftException): pass diff --git a/swift/common/ring/ring.py b/swift/common/ring/ring.py index 4a509ccffb..9f3d4f7b1b 100644 --- a/swift/common/ring/ring.py +++ b/swift/common/ring/ring.py @@ -29,6 +29,7 @@ from tempfile import NamedTemporaryFile from six.moves import range +from swift.common.exceptions import RingLoadError from swift.common.utils import hash_path, validate_configuration from swift.common.ring.utils import tiers_for_dev @@ -156,9 +157,14 @@ class Ring(object): :param serialized_path: path to serialized RingData instance :param reload_time: time interval in seconds to check for a ring change + :param ring_name: ring name string (basically specified from policy) + :param validation_hook: hook point to validate ring configuration ontime + + :raises: RingLoadError if the loaded ring data violates its constraint """ - def __init__(self, serialized_path, reload_time=15, ring_name=None): + def __init__(self, serialized_path, reload_time=15, ring_name=None, + validation_hook=lambda ring_data: None): # can't use the ring unless HASH_PATH_SUFFIX is set validate_configuration() if ring_name: @@ -167,12 +173,24 @@ class Ring(object): else: self.serialized_path = os.path.join(serialized_path) self.reload_time = reload_time + self._validation_hook = validation_hook self._reload(force=True) def _reload(self, force=False): self._rtime = time() + self.reload_time if force or self.has_changed(): ring_data = RingData.load(self.serialized_path) + + try: + self._validation_hook(ring_data) + except RingLoadError: + if force: + raise + else: + # In runtime reload at working server, it's ok to use old + # ring data if the new ring data is invalid. + return + self._mtime = getmtime(self.serialized_path) self._devs = ring_data.devs # NOTE(akscram): Replication parameters like replication_ip diff --git a/swift/common/storage_policy.py b/swift/common/storage_policy.py index fd0b54dfa8..0714e51dab 100644 --- a/swift/common/storage_policy.py +++ b/swift/common/storage_policy.py @@ -21,7 +21,7 @@ from swift.common.utils import ( config_true_value, SWIFT_CONF_FILE, whataremyips, list_from_csv) from swift.common.ring import Ring, RingData from swift.common.utils import quorum_size -from swift.common.exceptions import RingValidationError +from swift.common.exceptions import RingLoadError from pyeclib.ec_iface import ECDriver, ECDriverError, VALID_EC_TYPES LEGACY_POLICY_NAME = 'Policy-0' @@ -350,13 +350,6 @@ class BaseStoragePolicy(object): self._validate_policy_name(name) self.alias_list.insert(0, name) - def _validate_ring(self): - """ - Hook, called when the ring is loaded. Can be used to - validate the ring against the StoragePolicy configuration. - """ - pass - def load_ring(self, swift_dir): """ Load the ring for this policy immediately. @@ -367,9 +360,6 @@ class BaseStoragePolicy(object): return self.object_ring = Ring(swift_dir, ring_name=self.ring_name) - # Validate ring to make sure it conforms to policy requirements - self._validate_ring() - @property def quorum(self): """ @@ -552,25 +542,6 @@ class ECStoragePolicy(BaseStoragePolicy): info.pop('ec_type') return info - def _validate_ring(self): - """ - EC specific validation - - Replica count check - we need _at_least_ (#data + #parity) replicas - configured. Also if the replica count is larger than exactly that - number there's a non-zero risk of error for code that is considering - the number of nodes in the primary list from the ring. - """ - if not self.object_ring: - raise PolicyError('Ring is not loaded') - nodes_configured = self.object_ring.replica_count - if nodes_configured != (self.ec_ndata + self.ec_nparity): - raise RingValidationError( - 'EC ring for policy %s needs to be configured with ' - 'exactly %d nodes. Got %d.' % ( - self.name, self.ec_ndata + self.ec_nparity, - nodes_configured)) - @property def quorum(self): """ @@ -593,6 +564,37 @@ class ECStoragePolicy(BaseStoragePolicy): """ return self._ec_quorum_size + def load_ring(self, swift_dir): + """ + Load the ring for this policy immediately. + + :param swift_dir: path to rings + """ + if self.object_ring: + return + + def validate_ring_data(ring_data): + """ + EC specific validation + + Replica count check - we need _at_least_ (#data + #parity) replicas + configured. Also if the replica count is larger than exactly that + number there's a non-zero risk of error for code that is + considering the number of nodes in the primary list from the ring. + """ + + nodes_configured = len(ring_data._replica2part2dev_id) + if nodes_configured != (self.ec_ndata + self.ec_nparity): + raise RingLoadError( + 'EC ring for policy %s needs to be configured with ' + 'exactly %d replicas. Got %d.' % ( + self.name, self.ec_ndata + self.ec_nparity, + nodes_configured)) + + self.object_ring = Ring( + swift_dir, ring_name=self.ring_name, + validation_hook=validate_ring_data) + class StoragePolicyCollection(object): """ diff --git a/test/unit/common/test_storage_policy.py b/test/unit/common/test_storage_policy.py index 14f4ac7ff3..5b9c38b913 100755 --- a/test/unit/common/test_storage_policy.py +++ b/test/unit/common/test_storage_policy.py @@ -26,7 +26,7 @@ from swift.common.storage_policy import ( BaseStoragePolicy, StoragePolicy, ECStoragePolicy, REPL_POLICY, EC_POLICY, VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE, BindPortsCache) from swift.common.ring import RingData -from swift.common.exceptions import RingValidationError +from swift.common.exceptions import RingLoadError from pyeclib.ec_iface import ECDriver @@ -1146,23 +1146,32 @@ class TestStoragePolicies(unittest.TestCase): test_policies = [ ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=8, ec_nparity=2, - object_ring=FakeRing(replicas=8), is_default=True), ECStoragePolicy(1, 'ec10-4', ec_type=DEFAULT_TEST_EC_TYPE, - ec_ndata=10, ec_nparity=4, - object_ring=FakeRing(replicas=10)), + ec_ndata=10, ec_nparity=4), ECStoragePolicy(2, 'ec4-2', ec_type=DEFAULT_TEST_EC_TYPE, - ec_ndata=4, ec_nparity=2, - object_ring=FakeRing(replicas=7)), + ec_ndata=4, ec_nparity=2), ] + actual_load_ring_replicas = [8, 10, 7] policies = StoragePolicyCollection(test_policies) - for policy in policies: - msg = 'EC ring for policy %s needs to be configured with ' \ - 'exactly %d nodes.' % \ - (policy.name, policy.ec_ndata + policy.ec_nparity) - self.assertRaisesWithMessage(RingValidationError, msg, - policy._validate_ring) + def create_mock_ring_data(num_replica): + class mock_ring_data_klass(object): + def __init__(self): + self._replica2part2dev_id = [0] * num_replica + + return mock_ring_data_klass() + + for policy, ring_replicas in zip(policies, actual_load_ring_replicas): + with mock.patch('swift.common.ring.ring.RingData.load', + return_value=create_mock_ring_data(ring_replicas)): + with mock.patch( + 'swift.common.ring.ring.validate_configuration'): + msg = 'EC ring for policy %s needs to be configured with ' \ + 'exactly %d replicas.' % \ + (policy.name, policy.ec_ndata + policy.ec_nparity) + self.assertRaisesWithMessage(RingLoadError, msg, + policy.load_ring, 'mock') def test_storage_policy_get_info(self): test_policies = [ diff --git a/test/unit/proxy/test_mem_server.py b/test/unit/proxy/test_mem_server.py index f8bc2e3215..2221ee926e 100644 --- a/test/unit/proxy/test_mem_server.py +++ b/test/unit/proxy/test_mem_server.py @@ -51,6 +51,9 @@ class TestObjectController(test_server.TestObjectController): def test_PUT_ec_fragment_archive_etag_mismatch(self): pass + def test_reload_ring_ec(self): + pass + class TestContainerController(test_server.TestContainerController): pass diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 44a23ef6f4..9e1582ef00 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -24,7 +24,7 @@ import sys import traceback import unittest from contextlib import contextmanager -from shutil import rmtree +from shutil import rmtree, copyfile import gc import time from textwrap import dedent @@ -3018,6 +3018,96 @@ class TestObjectController(unittest.TestCase): test_content_type('test.css', iter(['', '', 'text/css', 'text/css', 'text/css'])) + @unpatch_policies + def test_reload_ring_ec(self): + policy = POLICIES[3] + self.put_container("ec", "ec-con") + + orig_rtime = policy.object_ring._rtime + # save original file as back up + copyfile(policy.object_ring.serialized_path, + policy.object_ring.serialized_path + '.bak') + + try: + # overwrite with 2 replica, 2 devices ring + obj_devs = [] + obj_devs.append( + {'port': _test_sockets[-3].getsockname()[1], + 'device': 'sdg1'}) + obj_devs.append( + {'port': _test_sockets[-2].getsockname()[1], + 'device': 'sdh1'}) + write_fake_ring(policy.object_ring.serialized_path, + *obj_devs) + + def get_ring_reloaded_response(method): + # force to reload at the request + policy.object_ring._rtime = 0 + + trans_data = ['%s /v1/a/ec-con/o2 HTTP/1.1\r\n' % method, + 'Host: localhost\r\n', + 'Connection: close\r\n', + 'X-Storage-Token: t\r\n'] + + if method == 'PUT': + # small, so we don't get multiple EC stripes + obj = 'abCD' * 10 + + extra_trans_data = [ + 'Etag: "%s"\r\n' % md5(obj).hexdigest(), + 'Content-Length: %d\r\n' % len(obj), + 'Content-Type: application/octet-stream\r\n', + '\r\n%s' % obj + ] + trans_data.extend(extra_trans_data) + else: + trans_data.append('\r\n') + + prolis = _test_sockets[0] + sock = connect_tcp(('localhost', prolis.getsockname()[1])) + fd = sock.makefile() + fd.write(''.join(trans_data)) + fd.flush() + headers = readuntil2crlfs(fd) + + # use older ring with rollbacking + return headers + + for method in ('PUT', 'HEAD', 'GET', 'POST', 'DELETE'): + headers = get_ring_reloaded_response(method) + exp = 'HTTP/1.1 20' + self.assertEqual(headers[:len(exp)], exp) + + # proxy didn't load newest ring, use older one + self.assertEqual(3, policy.object_ring.replica_count) + + if method == 'POST': + # Take care fast post here! + orig_post_as_copy = getattr( + _test_servers[0], 'object_post_as_copy', None) + try: + _test_servers[0].object_post_as_copy = False + with mock.patch.object( + _test_servers[0], + 'object_post_as_copy', False): + headers = get_ring_reloaded_response(method) + finally: + if orig_post_as_copy is None: + del _test_servers[0].object_post_as_copy + else: + _test_servers[0].object_post_as_copy = \ + orig_post_as_copy + + exp = 'HTTP/1.1 20' + self.assertEqual(headers[:len(exp)], exp) + # sanity + self.assertEqual(3, policy.object_ring.replica_count) + + finally: + policy.object_ring._rtime = orig_rtime + os.rename(policy.object_ring.serialized_path + '.bak', + policy.object_ring.serialized_path) + def test_custom_mime_types_files(self): swift_dir = mkdtemp() try: From e6776306b7d486ebd35c8f388b0ff6db51b0752b Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Thu, 23 Jun 2016 13:42:01 +0200 Subject: [PATCH 087/156] Python 3: fix usage of reload() Replace reload() builtin function with six.moves.reload_module() to make the code compatible with Python 2 and Python 3. Change-Id: I7572d613fef700b392d412501facc3bd5ee72a66 --- test/unit/common/test_daemon.py | 3 ++- test/unit/common/test_db_replicator.py | 8 +++++--- test/unit/common/test_manager.py | 4 +++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/test/unit/common/test_daemon.py b/test/unit/common/test_daemon.py index b81e231571..bae3a146ad 100644 --- a/test/unit/common/test_daemon.py +++ b/test/unit/common/test_daemon.py @@ -17,6 +17,7 @@ import os from six import StringIO +from six.moves import reload_module import unittest from getpass import getuser import logging @@ -69,7 +70,7 @@ class TestRunDaemon(unittest.TestCase): utils.capture_stdio = lambda *args: None def tearDown(self): - reload(utils) + reload_module(utils) def test_run(self): d = MyDaemon({}) diff --git a/test/unit/common/test_db_replicator.py b/test/unit/common/test_db_replicator.py index 29d66df99d..5552c7405a 100644 --- a/test/unit/common/test_db_replicator.py +++ b/test/unit/common/test_db_replicator.py @@ -21,12 +21,14 @@ import logging import errno import math import time -from mock import patch, call from shutil import rmtree, copy from tempfile import mkdtemp, NamedTemporaryFile -import mock import json +import mock +from mock import patch, call +from six.moves import reload_module + from swift.container.backend import DATADIR from swift.common import db_replicator from swift.common.utils import (normalize_timestamp, hash_path, @@ -44,7 +46,7 @@ TEST_CONTAINER_NAME = 'c o n' def teardown_module(): "clean up my monkey patching" - reload(db_replicator) + reload_module(db_replicator) @contextmanager diff --git a/test/unit/common/test_manager.py b/test/unit/common/test_manager.py index c973e4afa0..dc8bc58f1b 100644 --- a/test/unit/common/test_manager.py +++ b/test/unit/common/test_manager.py @@ -24,6 +24,8 @@ import errno from collections import defaultdict from time import sleep, time +from six.moves import reload_module + from swift.common import manager from swift.common.exceptions import InvalidPidFileException @@ -283,7 +285,7 @@ class TestManagerModule(unittest.TestCase): class TestServer(unittest.TestCase): def tearDown(self): - reload(manager) + reload_module(manager) def join_swift_dir(self, path): return os.path.join(manager.SWIFT_DIR, path) From 5677a04c8f5d46da80ac4cf8be135549b422772c Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Thu, 23 Jun 2016 13:53:49 +0200 Subject: [PATCH 088/156] Python 3: Fix usage of dict methods * Replace "c = dict(a.items() + b.items())" with "c = dict(a); c.update(b)". It works on Python 2 and Python 3, and it may be a little bit more efficient on Python 2 (no need to create a temporary list of items). * Replace "dict.values() + dict.values()" with "list(dict.values()) + list(dict.values())": on Python 3, dict.values() is a view which doesn't support a+b operator. Change-Id: Id5a65628fe2fb7a02c713b758fcaa81154db28a0 --- .../unit/common/middleware/test_gatekeeper.py | 20 +++++++++---------- test/unit/obj/test_ssync.py | 8 ++++---- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/test/unit/common/middleware/test_gatekeeper.py b/test/unit/common/middleware/test_gatekeeper.py index 5f4e87b5a2..d07c4c007a 100644 --- a/test/unit/common/middleware/test_gatekeeper.py +++ b/test/unit/common/middleware/test_gatekeeper.py @@ -79,13 +79,11 @@ class TestGatekeeper(unittest.TestCase): 'x-object-transient-sysmeta-foo': 'value'} x_timestamp_headers = {'X-Timestamp': '1455952805.719739'} - forbidden_headers_out = dict(sysmeta_headers.items() + - x_backend_headers.items() + - object_transient_sysmeta_headers.items()) - forbidden_headers_in = dict(sysmeta_headers.items() + - x_backend_headers.items() + - object_transient_sysmeta_headers.items()) - shunted_headers_in = dict(x_timestamp_headers.items()) + forbidden_headers_out = dict(sysmeta_headers) + forbidden_headers_out.update(x_backend_headers) + forbidden_headers_out.update(object_transient_sysmeta_headers) + forbidden_headers_in = dict(forbidden_headers_out) + shunted_headers_in = dict(x_timestamp_headers) def _assertHeadersEqual(self, expected, actual): for key in expected: @@ -126,8 +124,8 @@ class TestGatekeeper(unittest.TestCase): expected_headers.update({'X-Backend-Inbound-' + k: v for k, v in self.shunted_headers_in.items()}) self._assertHeadersEqual(expected_headers, fake_app.req.headers) - unexpected_headers = dict(self.forbidden_headers_in.items() + - self.shunted_headers_in.items()) + unexpected_headers = dict(self.forbidden_headers_in) + unexpected_headers.update(self.shunted_headers_in) self._assertHeadersAbsent(unexpected_headers, fake_app.req.headers) def test_reserved_header_removed_inbound(self): @@ -163,8 +161,8 @@ class TestGatekeeper(unittest.TestCase): app = self.get_app(fake_app, {}, shunt_inbound_x_timestamp='false') resp = req.get_response(app) self.assertEqual('200 OK', resp.status) - expected_headers = dict(self.allowed_headers.items() + - self.shunted_headers_in.items()) + expected_headers = dict(self.allowed_headers) + expected_headers.update(self.shunted_headers_in) self._assertHeadersEqual(expected_headers, fake_app.req.headers) def test_reserved_header_shunt_bypassed_inbound(self): diff --git a/test/unit/obj/test_ssync.py b/test/unit/obj/test_ssync.py index e51a7c4455..21c09b59f5 100644 --- a/test/unit/obj/test_ssync.py +++ b/test/unit/obj/test_ssync.py @@ -346,7 +346,7 @@ class TestSsyncEC(TestBaseSsync): tx_tombstones['o5'][0].delete(t5) suffixes = set() - for diskfiles in (tx_objs.values() + tx_tombstones.values()): + for diskfiles in list(tx_objs.values()) + list(tx_tombstones.values()): for df in diskfiles: suffixes.add(os.path.basename(os.path.dirname(df._datadir))) @@ -536,7 +536,7 @@ class TestSsyncEC(TestBaseSsync): tx_tombstones['o5'][0].delete(t5) suffixes = set() - for diskfiles in (tx_objs.values() + tx_tombstones.values()): + for diskfiles in list(tx_objs.values()) + list(tx_tombstones.values()): for df in diskfiles: suffixes.add(os.path.basename(os.path.dirname(df._datadir))) @@ -706,7 +706,7 @@ class TestSsyncReplication(TestBaseSsync): tx_tombstones['o7'][0].delete(t7b) suffixes = set() - for diskfiles in (tx_objs.values() + tx_tombstones.values()): + for diskfiles in list(tx_objs.values()) + list(tx_tombstones.values()): for df in diskfiles: suffixes.add(os.path.basename(os.path.dirname(df._datadir))) @@ -879,7 +879,7 @@ class TestSsyncReplication(TestBaseSsync): rx_tombstones['o7'][0].delete(next(self.ts_iter)) suffixes = set() - for diskfiles in (tx_objs.values() + tx_tombstones.values()): + for diskfiles in list(tx_objs.values()) + list(tx_tombstones.values()): for df in diskfiles: suffixes.add(os.path.basename(os.path.dirname(df._datadir))) From bf17d968008addadfb06fe35e83ac9de38ba2af4 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Mon, 25 Jul 2016 15:02:03 -0700 Subject: [PATCH 089/156] Use extract_swift_bytes in override_bytes_from_content_type About half the logic was the same, and it seems better to just implement that once. Change-Id: I350da34ef7a3cd0cb74f585f4691992ae64c7eab --- swift/common/utils.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/swift/common/utils.py b/swift/common/utils.py index 7822363e29..cedafecef4 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -3429,17 +3429,14 @@ def override_bytes_from_content_type(listing_dict, logger=None): Takes a dict from a container listing and overrides the content_type, bytes fields if swift_bytes is set. """ - content_type, params = parse_content_type(listing_dict['content_type']) - for key, value in params: - if key == 'swift_bytes': - try: - listing_dict['bytes'] = int(value) - except ValueError: - if logger: - logger.exception("Invalid swift_bytes") - else: - content_type += ';%s=%s' % (key, value) - listing_dict['content_type'] = content_type + listing_dict['content_type'], swift_bytes = extract_swift_bytes( + listing_dict['content_type']) + if swift_bytes is not None: + try: + listing_dict['bytes'] = int(swift_bytes) + except ValueError: + if logger: + logger.exception("Invalid swift_bytes") def clean_content_type(value): From c1c18da82c8e8d03c4e5b52910661632f8acfe46 Mon Sep 17 00:00:00 2001 From: cheng Date: Mon, 25 Jul 2016 13:23:04 +0000 Subject: [PATCH 090/156] check _last_part_moves when pretend_min_part_hours_passed pretend_min_part_hours_passed do things like this: self._last_part_moves[part] = 0xff this will throw exception if self._last_part_moves is None. this patch is to check self._last_part_moves to prevent exception. Closes-bug: #1578835 Change-Id: Ic83c7a338b45bfcf61f5ab6100e6db335c3fa81a --- swift/common/ring/builder.py | 4 +++- test/unit/cli/test_ringbuilder.py | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index ee25ad7caa..028983966b 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -741,9 +741,11 @@ class RingBuilder(object): 255 hours ago and last move epoch to 'the beginning of time'. This can be used to force a full rebalance on the next call to rebalance. """ + self._last_part_moves_epoch = 0 + if not self._last_part_moves: + return for part in range(self.parts): self._last_part_moves[part] = 0xff - self._last_part_moves_epoch = 0 def get_part_devices(self, part): """ diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index 5e86b6fea1..49be6f26ec 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -1718,6 +1718,23 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): ring_invalid_re = re.compile("Ring file .*\.ring\.gz is invalid") self.assertTrue(ring_invalid_re.findall(mock_stdout.getvalue())) + def test_pretend_min_part_hours_passed(self): + self.run_srb("create", 8, 3, 1) + argv_pretend = ["", self.tmpfile, "pretend_min_part_hours_passed"] + # pretend_min_part_hours_passed should success, even not rebalanced + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv_pretend) + self.run_srb("add", + "r1z1-10.1.1.1:2345/sda", 100.0, + "r1z1-10.1.1.1:2345/sdb", 100.0, + "r1z1-10.1.1.1:2345/sdc", 100.0) + argv_rebalance = ["", self.tmpfile, "rebalance"] + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv_rebalance) + self.run_srb("add", "r1z1-10.1.1.1:2345/sdd", 100.0) + # rebalance fail without pretend_min_part_hours_passed + self.assertSystemExit(EXIT_WARNING, ringbuilder.main, argv_rebalance) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv_pretend) + self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv_rebalance) + def test_rebalance(self): self.create_sample_ring() argv = ["", self.tmpfile, "rebalance", "3"] From 699953508ad1fd82c221e57bccfb1de8bf7a7e31 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Thu, 9 Jun 2016 06:17:22 +0000 Subject: [PATCH 091/156] Add doc entry to check partition count An high or increasing partition count due to storing handoffs can have some severe side-effects, and replication might never be able to catch up. This patch adds a note to the admin_guide how to check this. Change-Id: Ib4e161d68f1a82236dbf5fac13ef9a13ac4bbf18 --- doc/source/admin_guide.rst | 81 +++++++++++++++++++++++++++++++++++++- 1 file changed, 80 insertions(+), 1 deletion(-) diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index 4a5c2db3a4..91ee2d00c3 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -617,13 +617,90 @@ have 6 replicas in region 1. You should be aware that, if you have data coming into SF faster than -your link to NY can transfer it, then your cluster's data distribution +your replicators are transferring it to NY, then your cluster's data distribution will get worse and worse over time as objects pile up in SF. If this happens, it is recommended to disable write_affinity and simply let object PUTs traverse the WAN link, as that will naturally limit the object growth rate to what your WAN link can handle. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Checking handoff partition distribution +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can check if handoff partitions are piling up on a server by +comparing the expected number of partitions with the actual number on +your disks. First get the number of partitions that are currently +assigned to a server using the ``dispersion`` command from +``swift-ring-builder``:: + + swift-ring-builder sample.builder dispersion --verbose + Dispersion is 0.000000, Balance is 0.000000, Overload is 0.00% + Required overload is 0.000000% + -------------------------------------------------------------------------- + Tier Parts % Max 0 1 2 3 + -------------------------------------------------------------------------- + r1 8192 0.00 2 0 0 8192 0 + r1z1 4096 0.00 1 4096 4096 0 0 + r1z1-172.16.10.1 4096 0.00 1 4096 4096 0 0 + r1z1-172.16.10.1/sda1 4096 0.00 1 4096 4096 0 0 + r1z2 4096 0.00 1 4096 4096 0 0 + r1z2-172.16.10.2 4096 0.00 1 4096 4096 0 0 + r1z2-172.16.10.2/sda1 4096 0.00 1 4096 4096 0 0 + r1z3 4096 0.00 1 4096 4096 0 0 + r1z3-172.16.10.3 4096 0.00 1 4096 4096 0 0 + r1z3-172.16.10.3/sda1 4096 0.00 1 4096 4096 0 0 + r1z4 4096 0.00 1 4096 4096 0 0 + r1z4-172.16.20.4 4096 0.00 1 4096 4096 0 0 + r1z4-172.16.20.4/sda1 4096 0.00 1 4096 4096 0 0 + r2 8192 0.00 2 0 8192 0 0 + r2z1 4096 0.00 1 4096 4096 0 0 + r2z1-172.16.20.1 4096 0.00 1 4096 4096 0 0 + r2z1-172.16.20.1/sda1 4096 0.00 1 4096 4096 0 0 + r2z2 4096 0.00 1 4096 4096 0 0 + r2z2-172.16.20.2 4096 0.00 1 4096 4096 0 0 + r2z2-172.16.20.2/sda1 4096 0.00 1 4096 4096 0 0 + +As you can see from the output, each server should store 4096 partitions, and +each region should store 8192 partitions. This example used a partition power +of 13 and 3 replicas. + +With write_affinity enabled it is expected to have a higher number of +partitions on disk compared to the value reported by the +swift-ring-builder dispersion command. The number of additional (handoff) +partitions in region r1 depends on your cluster size, the amount +of incoming data as well as the replication speed. + +Let's use the example from above with 6 nodes in 2 regions, and write_affinity +configured to write to region r1 first. `swift-ring-builder` reported that +each node should store 4096 partitions:: + + Expected partitions for region r2: 8192 + Handoffs stored across 4 nodes in region r1: 8192 / 4 = 2048 + Maximum number of partitions on each server in region r1: 2048 + 4096 = 6144 + +Worst case is that handoff partitions in region 1 are populated with new +object replicas faster than replication is able to move them to region 2. +In that case you will see ~ 6144 partitions per +server in region r1. Your actual number should be lower and +between 4096 and 6144 partitions (preferably on the lower side). + +Now count the number of object partitions on a given server in region 1, +for example on 172.16.10.1. Note that the pathnames might be +different; `/srv/node/` is the default mount location, and `objects` +applies only to storage policy 0 (storage policy 1 would use +`objects-1` and so on):: + + find -L /srv/node/ -maxdepth 3 -type d -wholename "*objects/*" | wc -l + +If this number is always on the upper end of the expected partition +number range (4096 to 6144) or increasing you should check your +replication speed and maybe even disable write_affinity. +Please refer to the next section how to collect metrics from Swift, and +especially :ref:`swift-recon -r ` how to check replication +stats. + + -------------------------------- Cluster Telemetry and Monitoring -------------------------------- @@ -748,6 +825,8 @@ This information can also be queried via the swift-recon command line utility:: Time to wait for a response from a server --swiftdir=SWIFTDIR Default = /etc/swift +.. _recon-replication: + For example, to obtain container replication info from all hosts in zone "3":: fhines@ubuntu:~$ swift-recon container -r --zone 3 From a81d60472fc237234bcd4798d9f37d554ac0ba3f Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Tue, 26 Jul 2016 12:36:50 +0200 Subject: [PATCH 092/156] Fix Python 3 issues in diskfile * Fix bytes vs Unicode issues * On Python 3, encode JSON to UTF-8 and decode it from UTF-8 * Open files in binary mode to avoid Unicode issues * test_auditor: use bytes for content, open files in binary mode Change-Id: Ifa84001493cfb57975d3b140b0d7e09020504bca --- swift/obj/diskfile.py | 15 +++++++++++---- test/unit/obj/test_auditor.py | 36 +++++++++++++++++------------------ 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index 9e69e954f2..620e641086 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -49,6 +49,7 @@ from collections import defaultdict from eventlet import Timeout from eventlet.hubs import trampoline +import six from swift import gettext_ as _ from swift.common.constraints import check_mount, check_dir @@ -112,7 +113,7 @@ def read_metadata(fd): :returns: dictionary of metadata """ - metadata = '' + metadata = b'' key = 0 try: while True: @@ -281,7 +282,7 @@ def consolidate_hashes(partition_dir): # Now that all the invalidations are reflected in hashes.pkl, it's # safe to clear out the invalidations file. try: - with open(invalidations_file, 'w') as inv_fh: + with open(invalidations_file, 'wb') as inv_fh: pass except OSError as e: if e.errno != errno.ENOENT: @@ -416,7 +417,11 @@ def get_auditor_status(datadir_path, logger, auditor_type): datadir_path, "auditor_status_%s.json" % auditor_type) status = {} try: - with open(auditor_status) as statusfile: + if six.PY3: + statusfile = open(auditor_status, encoding='utf8') + else: + statusfile = open(auditor_status, 'rb') + with statusfile: status = statusfile.read() except (OSError, IOError) as e: if e.errno != errno.ENOENT and logger: @@ -435,6 +440,8 @@ def get_auditor_status(datadir_path, logger, auditor_type): def update_auditor_status(datadir_path, logger, partitions, auditor_type): status = json.dumps({'partitions': partitions}) + if six.PY3: + status = status.encode('utf8') auditor_status = os.path.join( datadir_path, "auditor_status_%s.json" % auditor_type) try: @@ -2516,7 +2523,7 @@ class ECDiskFileWriter(BaseDiskFileWriter): exc = None try: try: - with open(durable_file_path, 'w') as _fp: + with open(durable_file_path, 'wb') as _fp: fsync(_fp.fileno()) fsync_dir(self._datadir) except (OSError, IOError) as err: diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index 78aa08a246..afc3657431 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -153,7 +153,7 @@ class TestAuditor(unittest.TestCase): def run_tests(disk_file): auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices) - data = '0' * 1024 + data = b'0' * 1024 etag = md5() with disk_file.create() as writer: writer.write(data) @@ -174,7 +174,7 @@ class TestAuditor(unittest.TestCase): policy=disk_file.policy)) self.assertEqual(auditor_worker.quarantines, pre_quarantines) - os.write(writer._fd, 'extra_data') + os.write(writer._fd, b'extra_data') auditor_worker.object_audit( AuditLocation(disk_file._datadir, 'sda', '0', @@ -188,7 +188,7 @@ class TestAuditor(unittest.TestCase): def test_object_audit_diff_data(self): auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices) - data = '0' * 1024 + data = b'0' * 1024 etag = md5() timestamp = str(normalize_timestamp(time.time())) with self.disk_file.create() as writer: @@ -212,9 +212,7 @@ class TestAuditor(unittest.TestCase): AuditLocation(self.disk_file._datadir, 'sda', '0', policy=POLICIES.legacy)) self.assertEqual(auditor_worker.quarantines, pre_quarantines) - etag = md5() - etag.update('1' + '0' * 1023) - etag = etag.hexdigest() + etag = md5(b'1' + b'0' * 1023).hexdigest() metadata['ETag'] = etag with self.disk_file.create() as writer: @@ -231,8 +229,8 @@ class TestAuditor(unittest.TestCase): timestamp = str(normalize_timestamp(time.time())) path = os.path.join(self.disk_file._datadir, timestamp + '.data') mkdirs(self.disk_file._datadir) - fp = open(path, 'w') - fp.write('0' * 1024) + fp = open(path, 'wb') + fp.write(b'0' * 1024) fp.close() invalidate_hash(os.path.dirname(self.disk_file._datadir)) auditor_worker = auditor.AuditorWorker(self.conf, self.logger, @@ -362,7 +360,7 @@ class TestAuditor(unittest.TestCase): location = AuditLocation(self.disk_file._datadir, 'sda', '0', policy=self.disk_file.policy) - data = 'VERIFY' + data = b'VERIFY' etag = md5() timestamp = str(normalize_timestamp(time.time())) with self.disk_file.create() as writer: @@ -440,7 +438,7 @@ class TestAuditor(unittest.TestCase): auditor_worker.last_logged = time.time() timestamp = str(normalize_timestamp(time.time())) pre_errors = auditor_worker.errors - data = '0' * 1024 + data = b'0' * 1024 etag = md5() with self.disk_file.create() as writer: writer.write(data) @@ -464,7 +462,7 @@ class TestAuditor(unittest.TestCase): auditor_worker.log_time = 0 timestamp = str(normalize_timestamp(time.time())) pre_quarantines = auditor_worker.quarantines - data = '0' * 1024 + data = b'0' * 1024 def write_file(df): with df.create() as writer: @@ -491,7 +489,7 @@ class TestAuditor(unittest.TestCase): self.assertEqual(auditor_worker.stats_buckets[10240], 0) # pick up some additional code coverage, large file - data = '0' * 1024 * 1024 + data = b'0' * 1024 * 1024 for df in (self.disk_file, self.disk_file_ec): with df.create() as writer: writer.write(data) @@ -545,7 +543,7 @@ class TestAuditor(unittest.TestCase): pre_quarantines = auditor_worker.quarantines # pretend that we logged (and reset counters) just now auditor_worker.last_logged = time.time() - data = '0' * 1024 + data = b'0' * 1024 etag = md5() with self.disk_file.create() as writer: writer.write(data) @@ -557,7 +555,7 @@ class TestAuditor(unittest.TestCase): 'Content-Length': str(os.fstat(writer._fd).st_size), } writer.put(metadata) - os.write(writer._fd, 'extra_data') + os.write(writer._fd, b'extra_data') writer.commit(Timestamp(timestamp)) auditor_worker.audit_all_objects() self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1) @@ -569,7 +567,7 @@ class TestAuditor(unittest.TestCase): auditor_worker.last_logged = time.time() timestamp = str(normalize_timestamp(time.time())) pre_quarantines = auditor_worker.quarantines - data = '0' * 10 + data = b'0' * 10 etag = md5() with self.disk_file.create() as writer: writer.write(data) @@ -585,7 +583,7 @@ class TestAuditor(unittest.TestCase): auditor_worker.audit_all_objects() self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'ob', policy=POLICIES.legacy) - data = '1' * 10 + data = b'1' * 10 etag = md5() with self.disk_file.create() as writer: writer.write(data) @@ -598,14 +596,14 @@ class TestAuditor(unittest.TestCase): } writer.put(metadata) writer.commit(Timestamp(timestamp)) - os.write(writer._fd, 'extra_data') + os.write(writer._fd, b'extra_data') auditor_worker.audit_all_objects() self.assertEqual(auditor_worker.quarantines, pre_quarantines + 1) def test_object_run_fast_track_non_zero(self): self.auditor = auditor.ObjectAuditor(self.conf) self.auditor.log_time = 0 - data = '0' * 1024 + data = b'0' * 1024 etag = md5() with self.disk_file.create() as writer: writer.write(data) @@ -620,7 +618,7 @@ class TestAuditor(unittest.TestCase): writer.put(metadata) writer.commit(Timestamp(timestamp)) etag = md5() - etag.update('1' + '0' * 1023) + etag.update(b'1' + b'0' * 1023) etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(writer._fd, metadata) From 7958638e8f7800813fe8ec5bb860c14e3b81c6c0 Mon Sep 17 00:00:00 2001 From: Ellen Leahy Date: Thu, 21 Jul 2016 11:27:59 +0100 Subject: [PATCH 093/156] Added quotes to example echo in swift-temp-url If the curl command is used exactly as in the help, the ampersand in the signature is interpreted as an operator and the curl command breaks. I am aware of developers who have wasted a lot of time because of this. Change-Id: I6468c9a098b56db8242a2cf2c23b7a4857bd8574 --- bin/swift-temp-url | 8 ++++---- doc/source/api/temporary_url_middleware.rst | 5 +++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/bin/swift-temp-url b/bin/swift-temp-url index 34445e8be8..09b042fede 100755 --- a/bin/swift-temp-url +++ b/bin/swift-temp-url @@ -40,13 +40,13 @@ if __name__ == '__main__': print() print('This can be used to form a URL to give out for the access ') print('allowed. For example:') - print(' echo https://swift-cluster.example.com`%s GET 60 ' - '/v1/AUTH_account/c/o mykey`' % prog) + print(' echo \\"https://swift-cluster.example.com`%s GET 60 ' + '/v1/AUTH_account/c/o mykey`\\"' % prog) print() print('Might output:') - print(' https://swift-cluster.example.com/v1/AUTH_account/c/o?' + print(' "https://swift-cluster.example.com/v1/AUTH_account/c/o?' 'temp_url_sig=34d49efc32fe6e3082e411eeeb85bd8a&' - 'temp_url_expires=1323482948') + 'temp_url_expires=1323482948"') exit(1) method, seconds, path, key = argv[1:5] try: diff --git a/doc/source/api/temporary_url_middleware.rst b/doc/source/api/temporary_url_middleware.rst index 093bd528ec..1debf7d542 100644 --- a/doc/source/api/temporary_url_middleware.rst +++ b/doc/source/api/temporary_url_middleware.rst @@ -178,3 +178,8 @@ storage host name. For example, prefix the path with https://swift-cluster.example.com/v1/my_account/container/object ?temp_url_sig=5c4cc8886f36a9d0919d708ade98bf0cc71c9e91 &temp_url_expires=1374497657 + +Note that if the above example is copied exactly, and used in a command +shell, then the ampersand is interpreted as an operator and the URL +will be truncated. Enclose the URL in quotation marks to avoid this. + From 77e476376c954d041dfb3f0cca8c0557f8482964 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 26 Jul 2016 17:44:56 +0100 Subject: [PATCH 094/156] Mention SWIFT_TEST_DEBUG_LOGS in development guide Change-Id: If764de0a28f5afb858b3e892b35fe5fa147a0650 --- doc/source/development_guidelines.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst index 6f0012c35f..401e31922e 100644 --- a/doc/source/development_guidelines.rst +++ b/doc/source/development_guidelines.rst @@ -109,6 +109,8 @@ set using environment variables: - the proxy-server ``object_post_as_copy`` option may be set using the environment variable ``SWIFT_TEST_IN_PROCESS_OBJECT_POST_AS_COPY``. +- logging to stdout may be enabled by setting ``SWIFT_TEST_DEBUG_LOGS``. + For example, this command would run the in-process mode functional tests with the proxy-server using object_post_as_copy=False (the 'fast-POST' mode):: From a97537e158a78b7c49f11aef2756fa65093a05dd Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Mon, 25 Jul 2016 12:22:20 -0700 Subject: [PATCH 095/156] Split up backend/sysmeta header/footer preference tests The change in preference to the Related-Change is simply to prefer sysmeta overrides in the headers to backend overrides in the footers: Before: sysmeta footers > backend footers > sysmeta headers > backend headers After: sysmeta footers > sysmeta headers > backend footers > backend headers This change just breaks up the tests to try to make it more obvious what already worked and what has changed. The justification seems to be that overrides in sysmeta headers only work on policies that don't send backend footers, but sysmeta overrides should always have a higher preference than backend overrides. Related-Change: Idb40361ac72da51e1390dff690723dbc2c653a13 Change-Id: I074fbecb6440fb1d04279cd892d38d2acc44b47d --- test/unit/obj/test_server.py | 155 +++++++++++++++++++---------------- 1 file changed, 84 insertions(+), 71 deletions(-) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 536a746037..540bd0e2fa 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -121,6 +121,7 @@ class TestObjectController(unittest.TestCase): self.object_controller.logger) self.logger = debug_logger('test-object-controller') + self.ts = make_timestamp_iter() def tearDown(self): """Tear down for testing swift.object.server.ObjectController""" @@ -1435,85 +1436,97 @@ class TestObjectController(unittest.TestCase): with open(objfile) as fh: self.assertEqual(fh.read(), "obj data") - def test_PUT_container_override_etag_in_footer(self): - ts_iter = make_timestamp_iter() + def _check_container_override_etag_preference(self, override_headers, + override_footers): + def mock_container_update(ctlr, op, account, container, obj, req, + headers_out, objdevice, policy): + calls_made.append((headers_out, policy)) + calls_made = [] + ts_put = next(self.ts) - def do_test(override_headers, override_footers): - def mock_container_update(ctlr, op, account, container, obj, req, - headers_out, objdevice, policy): - calls_made.append((headers_out, policy)) - calls_made = [] - ts_put = next(ts_iter) + headers = { + 'X-Timestamp': ts_put.internal, + 'Content-Type': 'text/plain', + 'Transfer-Encoding': 'chunked', + 'Etag': 'other-etag', + 'X-Backend-Obj-Metadata-Footer': 'yes', + 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'} + headers.update(override_headers) + req = Request.blank( + '/sda1/p/a/c/o', headers=headers, + environ={'REQUEST_METHOD': 'PUT'}) - headers = { - 'X-Timestamp': ts_put.internal, - 'Content-Type': 'text/plain', - 'Transfer-Encoding': 'chunked', - 'Etag': 'other-etag', - 'X-Backend-Obj-Metadata-Footer': 'yes', - 'X-Backend-Obj-Multipart-Mime-Boundary': 'boundary'} - headers.update(override_headers) - req = Request.blank( - '/sda1/p/a/c/o', headers=headers, - environ={'REQUEST_METHOD': 'PUT'}) + obj_etag = md5("obj data").hexdigest() + footers = {'Etag': obj_etag} + footers.update(override_footers) + footer_meta = json.dumps(footers) + footer_meta_cksum = md5(footer_meta).hexdigest() - obj_etag = md5("obj data").hexdigest() - footers = {'Etag': obj_etag} - footers.update(override_footers) - footer_meta = json.dumps(footers) - footer_meta_cksum = md5(footer_meta).hexdigest() + req.body = "\r\n".join(( + "--boundary", + "", + "obj data", + "--boundary", + "Content-MD5: " + footer_meta_cksum, + "", + footer_meta, + "--boundary--", + )) + req.headers.pop("Content-Length", None) - req.body = "\r\n".join(( - "--boundary", - "", - "obj data", - "--boundary", - "Content-MD5: " + footer_meta_cksum, - "", - footer_meta, - "--boundary--", - )) - req.headers.pop("Content-Length", None) + with mock.patch( + 'swift.obj.server.ObjectController.container_update', + mock_container_update): + resp = req.get_response(self.object_controller) + self.assertEqual(resp.etag, obj_etag) + self.assertEqual(resp.status_int, 201) + self.assertEqual(1, len(calls_made)) + self.assertEqual({ + 'X-Size': str(len('obj data')), + 'X-Etag': 'update-etag', + 'X-Content-Type': 'text/plain', + 'X-Timestamp': ts_put.internal, + }, calls_made[0][0]) + self.assertEqual(POLICIES[0], calls_made[0][1]) - with mock.patch( - 'swift.obj.server.ObjectController.container_update', - mock_container_update): - resp = req.get_response(self.object_controller) - self.assertEqual(resp.etag, obj_etag) - self.assertEqual(resp.status_int, 201) - self.assertEqual(1, len(calls_made)) - self.assertEqual({ - 'X-Size': str(len('obj data')), - 'X-Etag': 'update-etag', - 'X-Content-Type': 'text/plain', - 'X-Timestamp': ts_put.internal, - }, calls_made[0][0]) - self.assertEqual(POLICIES[0], calls_made[0][1]) + def test_lone_header_footer_override_preference(self): + self._check_container_override_etag_preference( + {'X-Backend-Container-Update-Override-Etag': 'update-etag'}, {}) + self._check_container_override_etag_preference( + {}, {'X-Backend-Container-Update-Override-Etag': 'update-etag'}) + self._check_container_override_etag_preference( + {'X-Object-Sysmeta-Container-Update-Override-Etag': + 'update-etag'}, {}) + self._check_container_override_etag_preference( + {}, {'X-Object-Sysmeta-Container-Update-Override-Etag': + 'update-etag'}), - # lone headers/footers work - do_test({'X-Backend-Container-Update-Override-Etag': 'update-etag'}, - {}) - do_test({}, - {'X-Backend-Container-Update-Override-Etag': 'update-etag'}) - do_test({'X-Object-Sysmeta-Container-Update-Override-Etag': - 'update-etag'}, - {}) - do_test({}, - {'X-Object-Sysmeta-Container-Update-Override-Etag': + def test_footer_trumps_header(self): + self._check_container_override_etag_preference( + {'X-Backend-Container-Update-Override-Etag': 'ignored-etag'}, + {'X-Backend-Container-Update-Override-Etag': 'update-etag'}) + self._check_container_override_etag_preference( + {'X-Object-Sysmeta-Container-Update-Override-Etag': + 'ignored-etag'}, + {'X-Object-Sysmeta-Container-Update-Override-Etag': + 'update-etag'}) + + def test_sysmeta_trumps_backend(self): + self._check_container_override_etag_preference( + {'X-Backend-Container-Update-Override-Etag': 'ignored-etag', + 'X-Object-Sysmeta-Container-Update-Override-Etag': + 'update-etag'}, {}) + self._check_container_override_etag_preference( + {}, {'X-Backend-Container-Update-Override-Etag': 'ignored-etag', + 'X-Object-Sysmeta-Container-Update-Override-Etag': 'update-etag'}) - # footer trumps header - do_test({'X-Backend-Container-Update-Override-Etag': 'ignored-etag'}, - {'X-Backend-Container-Update-Override-Etag': 'update-etag'}) - do_test({'X-Object-Sysmeta-Container-Update-Override-Etag': - 'ignored-etag'}, - {'X-Object-Sysmeta-Container-Update-Override-Etag': - 'update-etag'}) - - # but sysmeta header trumps backend footer - do_test({'X-Object-Sysmeta-Container-Update-Override-Etag': - 'update-etag'}, - {'X-Backend-Container-Update-Override-Etag': 'ignored-etag'}) + def test_sysmeta_header_trumps_backend_footer(self): + headers = {'X-Object-Sysmeta-Container-Update-Override-Etag': + 'update-etag'} + footers = {'X-Backend-Container-Update-Override-Etag': + 'ignored-etag'} + self._check_container_override_etag_preference(headers, footers) def test_PUT_etag_in_footer_mismatch(self): timestamp = normalize_timestamp(time()) From cf1f7af38751f280906954b7a01ee4f273336264 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Mon, 25 Jul 2016 12:30:34 -0700 Subject: [PATCH 096/156] Use self.ts more consistently in obj.test_server A make_timestamp_iter was added into setUp in the Related-Change. There was a couple of different tests that were using timestamp iters in different ways, consistently we apply the use of next(self.ts) which I believe is becoming more common/standardized in unitests. Related-Change: I074fbecb6440fb1d04279cd892d38d2acc44b47d Change-Id: Ib6b883afec242355ae08c50c1e685a20e5efadc7 --- test/unit/obj/test_server.py | 68 ++++++++++++++---------------------- 1 file changed, 26 insertions(+), 42 deletions(-) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 540bd0e2fa..ce1796f2fd 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -32,7 +32,6 @@ from shutil import rmtree from time import gmtime, strftime, time, struct_time from tempfile import mkdtemp from hashlib import md5 -import itertools import tempfile from collections import defaultdict from contextlib import contextmanager @@ -345,8 +344,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.headers['X-Backend-Timestamp'], orig_timestamp) def test_POST_conflicts_with_later_POST(self): - ts_iter = make_timestamp_iter() - t_put = next(ts_iter).internal + t_put = next(self.ts).internal req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': t_put, @@ -355,8 +353,8 @@ class TestObjectController(unittest.TestCase): resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) - t_post1 = next(ts_iter).internal - t_post2 = next(ts_iter).internal + t_post1 = next(self.ts).internal + t_post2 = next(self.ts).internal req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': t_post2}) @@ -499,8 +497,7 @@ class TestObjectController(unittest.TestCase): def _test_POST_container_updates(self, policy, update_etag=None): # Test that POST requests result in correct calls to container_update - ts_iter = (Timestamp(t) for t in itertools.count(int(time()))) - t = [ts_iter.next() for _ in range(0, 5)] + t = [next(self.ts) for _ in range(0, 5)] calls_made = [] update_etag = update_etag or '098f6bcd4621d373cade4e832627b4f6' @@ -714,14 +711,13 @@ class TestObjectController(unittest.TestCase): def test_POST_container_updates_precedence(self): # Verify correct etag and size being sent with container updates for a # PUT and for a subsequent POST. - ts_iter = make_timestamp_iter() def do_test(body, headers, policy): def mock_container_update(ctlr, op, account, container, obj, req, headers_out, objdevice, policy): calls_made.append((headers_out, policy)) calls_made = [] - ts_put = next(ts_iter) + ts_put = next(self.ts) # make PUT with given headers and verify correct etag is sent in # container update @@ -755,7 +751,7 @@ class TestObjectController(unittest.TestCase): # make a POST and verify container update has the same etag calls_made = [] - ts_post = next(ts_iter) + ts_post = next(self.ts) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': ts_post.internal, @@ -814,8 +810,7 @@ class TestObjectController(unittest.TestCase): raise Exception('test') device_dir = os.path.join(self.testdir, 'sda1') - ts_iter = make_timestamp_iter() - t_put = ts_iter.next() + t_put = next(self.ts) update_etag = update_etag or '098f6bcd4621d373cade4e832627b4f6' put_headers = { @@ -868,7 +863,7 @@ class TestObjectController(unittest.TestCase): # POST with newer metadata returns success and container update # is expected - t_post = ts_iter.next() + t_post = next(self.ts) post_headers = { 'X-Trans-Id': 'post_trans_id', 'X-Timestamp': t_post.internal, @@ -1163,9 +1158,8 @@ class TestObjectController(unittest.TestCase): 'Content-Encoding': 'gzip'}) def test_PUT_overwrite_to_older_ts_succcess(self): - ts_iter = make_timestamp_iter() - old_timestamp = next(ts_iter) - new_timestamp = next(ts_iter) + old_timestamp = next(self.ts) + new_timestamp = next(self.ts) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, @@ -1201,9 +1195,8 @@ class TestObjectController(unittest.TestCase): 'Content-Encoding': 'gzip'}) def test_PUT_overwrite_to_newer_ts_failed(self): - ts_iter = make_timestamp_iter() - old_timestamp = next(ts_iter) - new_timestamp = next(ts_iter) + old_timestamp = next(self.ts) + new_timestamp = next(self.ts) req = Request.blank( '/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'DELETE'}, @@ -1800,8 +1793,7 @@ class TestObjectController(unittest.TestCase): 'X-Object-Transient-Sysmeta-Foo': 'Bar'}) def test_PUT_succeeds_with_later_POST(self): - ts_iter = make_timestamp_iter() - t_put = next(ts_iter).internal + t_put = next(self.ts).internal req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': t_put, @@ -1810,8 +1802,8 @@ class TestObjectController(unittest.TestCase): resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) - t_put2 = next(ts_iter).internal - t_post = next(ts_iter).internal + t_put2 = next(self.ts).internal + t_post = next(self.ts).internal req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': t_post}) @@ -3332,8 +3324,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(len(os.listdir(os.path.dirname(ts_1003_file))), 1) def test_DELETE_succeeds_with_later_POST(self): - ts_iter = make_timestamp_iter() - t_put = next(ts_iter).internal + t_put = next(self.ts).internal req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'PUT'}, headers={'X-Timestamp': t_put, @@ -3342,8 +3333,8 @@ class TestObjectController(unittest.TestCase): resp = req.get_response(self.object_controller) self.assertEqual(resp.status_int, 201) - t_delete = next(ts_iter).internal - t_post = next(ts_iter).internal + t_delete = next(self.ts).internal + t_post = next(self.ts).internal req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'POST'}, headers={'X-Timestamp': t_post}) @@ -3492,14 +3483,12 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.status_int, 507) def test_object_update_with_offset(self): - ts = (utils.Timestamp(t).internal for t in - itertools.count(int(time()))) container_updates = [] def capture_updates(ip, port, method, path, headers, *args, **kwargs): container_updates.append((ip, port, method, path, headers)) # create a new object - create_timestamp = next(ts) + create_timestamp = next(self.ts).internal req = Request.blank('/sda1/p/a/c/o', method='PUT', body='test1', headers={'X-Timestamp': create_timestamp, 'X-Container-Host': '10.0.0.1:8080', @@ -3576,7 +3565,7 @@ class TestObjectController(unittest.TestCase): offset_timestamp) self.assertEqual(resp.body, 'test2') # now overwrite with a newer time - overwrite_timestamp = next(ts) + overwrite_timestamp = next(self.ts).internal req = Request.blank('/sda1/p/a/c/o', method='PUT', body='test3', headers={'X-Timestamp': overwrite_timestamp, 'X-Container-Host': '10.0.0.1:8080', @@ -3646,7 +3635,7 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.headers['X-Timestamp'], None) self.assertEqual(resp.headers['X-Backend-Timestamp'], offset_delete) # and one more delete with a newer timestamp - delete_timestamp = next(ts) + delete_timestamp = next(self.ts).internal req = Request.blank('/sda1/p/a/c/o', method='DELETE', headers={'X-Timestamp': delete_timestamp, 'X-Container-Host': '10.0.0.1:8080', @@ -4281,17 +4270,15 @@ class TestObjectController(unittest.TestCase): def test_object_delete_at_async_update(self): policy = random.choice(list(POLICIES)) - ts = (utils.Timestamp(t) for t in - itertools.count(int(time()))) container_updates = [] def capture_updates(ip, port, method, path, headers, *args, **kwargs): container_updates.append((ip, port, method, path, headers)) - put_timestamp = next(ts).internal + put_timestamp = next(self.ts).internal delete_at_timestamp = utils.normalize_delete_at_timestamp( - next(ts).normal) + next(self.ts).normal) delete_at_container = ( int(delete_at_timestamp) / self.object_controller.expiring_objects_container_divisor * @@ -4571,7 +4558,6 @@ class TestObjectController(unittest.TestCase): 'referer': 'PUT http://localhost/sda1/0/a/c/o'})) def test_PUT_container_update_overrides(self): - ts_iter = make_timestamp_iter() def do_test(override_headers): container_updates = [] @@ -4580,7 +4566,7 @@ class TestObjectController(unittest.TestCase): ip, port, method, path, headers, *args, **kwargs): container_updates.append((ip, port, method, path, headers)) - ts_put = next(ts_iter) + ts_put = next(self.ts) headers = { 'X-Timestamp': ts_put.internal, 'X-Trans-Id': '123', @@ -6105,8 +6091,6 @@ class TestObjectController(unittest.TestCase): def test_storage_policy_index_is_validated(self): # sanity check that index for existing policy is ok - ts = (utils.Timestamp(t).internal for t in - itertools.count(int(time()))) methods = ('PUT', 'POST', 'GET', 'HEAD', 'REPLICATE', 'DELETE') valid_indices = sorted([int(policy) for policy in POLICIES]) for index in valid_indices: @@ -6116,7 +6100,7 @@ class TestObjectController(unittest.TestCase): self.assertFalse(os.path.isdir(object_dir)) for method in methods: headers = { - 'X-Timestamp': next(ts), + 'X-Timestamp': next(self.ts).internal, 'Content-Type': 'application/x-test', 'X-Backend-Storage-Policy-Index': index} if POLICIES[index].policy_type == EC_POLICY: @@ -6136,7 +6120,7 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': method}, headers={ - 'X-Timestamp': next(ts), + 'X-Timestamp': next(self.ts).internal, 'Content-Type': 'application/x-test', 'X-Backend-Storage-Policy-Index': index}) req.body = 'VERIFY' From 7cc2392611cccb48100cafc1b4ec5fdf03855b3d Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 26 Jul 2016 18:12:26 +0100 Subject: [PATCH 097/156] Document how to run a single functional test Change-Id: Icabc5a8316f5e8fd887bb42358ad03e9c43d0765 --- doc/source/development_guidelines.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst index 6f0012c35f..2e9a6e2f23 100644 --- a/doc/source/development_guidelines.rst +++ b/doc/source/development_guidelines.rst @@ -90,6 +90,11 @@ For example, this command would run the functional tests using policy SWIFT_TEST_POLICY=silver tox -e func +To run a single functional test, use the ``--no-discover`` option together with +a path to a specific test method, for example:: + + tox -e func -- --no-discover test.functional.tests.TestFile.testCopy + In-process functional testing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From fbf0e499171d0880d0dc745767ac3a531d08c0c6 Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Tue, 26 Jul 2016 19:42:46 +0200 Subject: [PATCH 098/156] monkey_patch_mimetools() now does nothing on py3 The mimetools module has been removed from Python 3: modify monkey_patch_mimetools() to do nothing on Python 3. Skip test_monkey_patch_mimetools() on Python 3. Change-Id: I50f01ec159efedbb4df759ddd1e13928ac28fba6 --- swift/common/wsgi.py | 6 +++++- test/unit/common/test_wsgi.py | 14 +++++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 88e61f2293..ccd4287f2c 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -22,7 +22,6 @@ import inspect import os import signal import time -import mimetools from swift import gettext_ as _ from textwrap import dedent @@ -35,6 +34,8 @@ import six from six import BytesIO from six import StringIO from six.moves.urllib.parse import unquote +if six.PY2: + import mimetools from swift.common import utils, constraints from swift.common.storage_policy import BindPortsCache @@ -147,6 +148,9 @@ def monkey_patch_mimetools(): mimetools.Message defaults content-type to "text/plain" This changes it to default to None, so we can detect missing headers. """ + if six.PY3: + # The mimetools has been removed from Python 3 + return orig_parsetype = mimetools.Message.parsetype diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index cc33833714..4917dbeaa7 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -17,7 +17,6 @@ import errno import logging -import mimetools import socket import unittest import os @@ -25,11 +24,15 @@ from textwrap import dedent from collections import defaultdict from eventlet import listen +import six from six import BytesIO from six import StringIO from six.moves.urllib.parse import quote +if six.PY2: + import mimetools import mock +import nose import swift.common.middleware.catch_errors import swift.common.middleware.gatekeeper @@ -65,12 +68,17 @@ class TestWSGI(unittest.TestCase): def setUp(self): utils.HASH_PATH_PREFIX = 'startcap' - self._orig_parsetype = mimetools.Message.parsetype + if six.PY2: + self._orig_parsetype = mimetools.Message.parsetype def tearDown(self): - mimetools.Message.parsetype = self._orig_parsetype + if six.PY2: + mimetools.Message.parsetype = self._orig_parsetype def test_monkey_patch_mimetools(self): + if six.PY3: + raise nose.SkipTest('test specific to Python 2') + sio = StringIO('blah') self.assertEqual(mimetools.Message(sio).type, 'text/plain') sio = StringIO('blah') From 149a331f1cd2e2488bb0aab3c06c4c70cf74ece6 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 27 Jul 2016 09:08:11 +0100 Subject: [PATCH 099/156] Additional test for container override etag preferences Adds one more scenario to the tests for preference of container etag override values in sysmeta or backend headers and footers. Change-Id: Iacdaec8c98c7001029163a9d50321a13dc8d5a19 Related-Change: I074fbecb6440fb1d04279cd892d38d2acc44b47d --- test/unit/obj/test_server.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 540bd0e2fa..1ee466ff74 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -1489,7 +1489,7 @@ class TestObjectController(unittest.TestCase): }, calls_made[0][0]) self.assertEqual(POLICIES[0], calls_made[0][1]) - def test_lone_header_footer_override_preference(self): + def test_override_etag_lone_header_footer(self): self._check_container_override_etag_preference( {'X-Backend-Container-Update-Override-Etag': 'update-etag'}, {}) self._check_container_override_etag_preference( @@ -1501,7 +1501,7 @@ class TestObjectController(unittest.TestCase): {}, {'X-Object-Sysmeta-Container-Update-Override-Etag': 'update-etag'}), - def test_footer_trumps_header(self): + def test_override_etag_footer_trumps_header(self): self._check_container_override_etag_preference( {'X-Backend-Container-Update-Override-Etag': 'ignored-etag'}, {'X-Backend-Container-Update-Override-Etag': 'update-etag'}) @@ -1511,7 +1511,7 @@ class TestObjectController(unittest.TestCase): {'X-Object-Sysmeta-Container-Update-Override-Etag': 'update-etag'}) - def test_sysmeta_trumps_backend(self): + def test_override_etag_sysmeta_trumps_backend(self): self._check_container_override_etag_preference( {'X-Backend-Container-Update-Override-Etag': 'ignored-etag', 'X-Object-Sysmeta-Container-Update-Override-Etag': @@ -1521,13 +1521,20 @@ class TestObjectController(unittest.TestCase): 'X-Object-Sysmeta-Container-Update-Override-Etag': 'update-etag'}) - def test_sysmeta_header_trumps_backend_footer(self): + def test_override_etag_sysmeta_header_trumps_backend_footer(self): headers = {'X-Object-Sysmeta-Container-Update-Override-Etag': 'update-etag'} footers = {'X-Backend-Container-Update-Override-Etag': 'ignored-etag'} self._check_container_override_etag_preference(headers, footers) + def test_override_etag_sysmeta_footer_trumps_backend_header(self): + headers = {'X-Backend-Container-Update-Override-Etag': + 'ignored-etag'} + footers = {'X-Object-Sysmeta-Container-Update-Override-Etag': + 'update-etag'} + self._check_container_override_etag_preference(headers, footers) + def test_PUT_etag_in_footer_mismatch(self): timestamp = normalize_timestamp(time()) req = Request.blank( From 6575ac0ea232f46bdea2a0ae1ad847b8d9062ac2 Mon Sep 17 00:00:00 2001 From: zheng yin Date: Thu, 28 Jul 2016 14:44:56 +0800 Subject: [PATCH 100/156] Simple code and improve the readability of the code In a function,when a variable is defined at the front,it is unnecessary to define the variable again. Modify the variable name to improve the readability of the code. For example: container_nodes=container_info["nodes"] instead of containers=container_info["nodes"] Change-Id: I30b887fed08ff650076ce8ce33b1502bc1795b3f --- swift/proxy/controllers/obj.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index a948bbb6d2..574d16c8b1 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -218,13 +218,13 @@ class BaseObjectController(Controller): container_info = self.container_info( self.account_name, self.container_name, req) container_partition = container_info['partition'] - containers = container_info['nodes'] + container_nodes = container_info['nodes'] req.acl = container_info['write_acl'] if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp - if not containers: + if not container_nodes: return HTTPNotFound(request=req) req, delete_at_container, delete_at_part, \ @@ -241,7 +241,7 @@ class BaseObjectController(Controller): req.headers['X-Timestamp'] = Timestamp(time.time()).internal headers = self._backend_requests( - req, len(nodes), container_partition, containers, + req, len(nodes), container_partition, container_nodes, delete_at_container, delete_at_part, delete_at_nodes) return self._post_object(req, obj_ring, partition, headers) @@ -652,7 +652,7 @@ class BaseObjectController(Controller): if aresp: return aresp - if not container_info['nodes']: + if not container_nodes: return HTTPNotFound(request=req) # update content type in case it is missing @@ -702,14 +702,14 @@ class BaseObjectController(Controller): # pass the policy index to storage nodes via req header req.headers['X-Backend-Storage-Policy-Index'] = policy_index container_partition = container_info['partition'] - containers = container_info['nodes'] + container_nodes = container_info['nodes'] req.acl = container_info['write_acl'] req.environ['swift_sync_key'] = container_info['sync_key'] if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp - if not containers: + if not container_nodes: return HTTPNotFound(request=req) partition, nodes = obj_ring.get_nodes( self.account_name, self.container_name, self.object_name) @@ -727,7 +727,7 @@ class BaseObjectController(Controller): req.headers['X-Timestamp'] = Timestamp(time.time()).internal headers = self._backend_requests( - req, len(nodes), container_partition, containers) + req, len(nodes), container_partition, container_nodes) return self._delete_object(req, obj_ring, partition, headers) From 8bf28c869d814c33f232d982420f713bbe629003 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Thu, 28 Jul 2016 11:09:48 +0100 Subject: [PATCH 101/156] Fix broken link in associated projects doc Change-Id: I6f0cc1004a40e77345c641c0e5076f2f5dadb891 --- doc/source/associated_projects.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/associated_projects.rst b/doc/source/associated_projects.rst index 12ef46800e..ff192367ae 100644 --- a/doc/source/associated_projects.rst +++ b/doc/source/associated_projects.rst @@ -114,4 +114,4 @@ Other * `Swift Browser `_ - JavaScript interface for Swift * `swift-ui `_ - OpenStack Swift web browser * `Swift Durability Calculator `_ - Data Durability Calculation Tool for Swift -* `swiftbackmeup ` - Utility that allows one to create backups and upload them to OpenStack Swift +* `swiftbackmeup `_ - Utility that allows one to create backups and upload them to OpenStack Swift From 457cea864c4b49bcf79327369c0bc5ebc24668e2 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Fri, 11 Mar 2016 16:15:17 -0800 Subject: [PATCH 102/156] Handle IPv6 addresses in swift-get-nodes. The curl commands needed a little tweaking. Change-Id: I6551d65241950c65e7160587cc414deb4a2122f5 Closes-Bug: 1555860 --- swift/cli/info.py | 44 ++++++++++++------ test/unit/cli/test_info.py | 92 +++++++++++++++++++++++++++++++++++++- 2 files changed, 122 insertions(+), 14 deletions(-) diff --git a/swift/cli/info.py b/swift/cli/info.py index ba02cfd25a..23dbdea401 100644 --- a/swift/cli/info.py +++ b/swift/cli/info.py @@ -20,7 +20,7 @@ from six.moves import urllib from swift.common.utils import hash_path, storage_directory, \ Timestamp -from swift.common.ring import Ring +from swift.common.ring import Ring, utils as ring_utils from swift.common.request_helpers import is_sys_meta, is_user_meta, \ strip_sys_meta_prefix, strip_user_meta_prefix from swift.account.backend import AccountBroker, DATADIR as ABDATADIR @@ -37,6 +37,32 @@ class InfoSystemExit(Exception): pass +def curl_head_command(ip, port, device, part, target, policy_index): + """ + Provide a string that is a well formatted curl command to HEAD an object + on a storage node. + + :param ip: the ip of the node + :param port: the port of the node + :param device: the device of the node + :param target: the path of the target resource + :param policy_index: the policy_index of the target resource (can be None) + + :returns: a string, a well formatted curl command + """ + if ring_utils.is_valid_ipv6(ip): + formatted_ip = '[%s]' % ip + else: + formatted_ip = ip + + cmd = 'curl -g -I -XHEAD "http://%s:%s/%s/%s/%s"' % ( + formatted_ip, port, device, part, urllib.parse.quote(target)) + if policy_index is not None: + cmd += ' -H "%s: %s"' % ('X-Backend-Storage-Policy-Index', + policy_index) + return cmd + + def print_ring_locations(ring, datadir, account, container=None, obj=None, tpart=None, all_nodes=False, policy_index=None): """ @@ -99,20 +125,12 @@ def print_ring_locations(ring, datadir, account, container=None, obj=None, print("\n") for node in primary_nodes: - cmd = 'curl -I -XHEAD "http://%s:%s/%s/%s/%s"' \ - % (node['ip'], node['port'], node['device'], part, - urllib.parse.quote(target)) - if policy_index is not None: - cmd += ' -H "%s: %s"' % ('X-Backend-Storage-Policy-Index', - policy_index) + cmd = curl_head_command(node['ip'], node['port'], node['device'], + part, target, policy_index) print(cmd) for node in handoff_nodes: - cmd = 'curl -I -XHEAD "http://%s:%s/%s/%s/%s"' \ - % (node['ip'], node['port'], node['device'], part, - urllib.parse.quote(target)) - if policy_index is not None: - cmd += ' -H "%s: %s"' % ('X-Backend-Storage-Policy-Index', - policy_index) + cmd = curl_head_command(node['ip'], node['port'], node['device'], + part, target, policy_index) cmd += ' # [Handoff]' print(cmd) diff --git a/test/unit/cli/test_info.py b/test/unit/cli/test_info.py index a97362dcbc..ee5922d3d0 100644 --- a/test/unit/cli/test_info.py +++ b/test/unit/cli/test_info.py @@ -34,7 +34,8 @@ from swift.obj.diskfile import write_metadata @patch_policies([StoragePolicy(0, 'zero', True), StoragePolicy(1, 'one', False), - StoragePolicy(2, 'two', False)]) + StoragePolicy(2, 'two', False), + StoragePolicy(3, 'three', False)]) class TestCliInfoBase(unittest.TestCase): def setUp(self): self.orig_hp = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX @@ -72,6 +73,13 @@ class TestCliInfoBase(unittest.TestCase): # ... and another for policy 2 self.two_ring_path = os.path.join(self.testdir, 'object-2.ring.gz') write_fake_ring(self.two_ring_path, *object_devs) + # ... and one for policy 3 with some v6 IPs in it + object_devs_ipv6 = [ + {'ip': 'feed:face::dead:beef', 'port': 42}, + {'ip': 'deca:fc0f:feeb:ad11::1', 'port': 43} + ] + self.three_ring_path = os.path.join(self.testdir, 'object-3.ring.gz') + write_fake_ring(self.three_ring_path, *object_devs_ipv6) def tearDown(self): utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX = self.orig_hp @@ -569,6 +577,88 @@ class TestPrintObjFullMeta(TestCliInfoBase): os.chdir(cwd) self.assertTrue('X-Backend-Storage-Policy-Index: 1' in out.getvalue()) + def test_print_obj_curl_command_ipv4(self): + # Note: policy 2 has IPv4 addresses in its ring + datafile2 = os.path.join( + self.testdir, + 'sda', 'objects-2', '1', 'ea8', + 'db4449e025aca992307c7c804a67eea8', '1402017884.18202.data') + utils.mkdirs(os.path.dirname(datafile2)) + with open(datafile2, 'wb') as fp: + md = {'name': '/AUTH_admin/c/obj', + 'Content-Type': 'application/octet-stream', + 'ETag': 'd41d8cd98f00b204e9800998ecf8427e', + 'Content-Length': 0} + write_metadata(fp, md) + + object_ring = ring.Ring(self.testdir, ring_name='object-2') + part, nodes = object_ring.get_nodes('AUTH_admin', 'c', 'obj') + node = nodes[0] + + out = StringIO() + hash_dir = os.path.dirname(datafile2) + file_name = os.path.basename(datafile2) + + # Change working directory to object hash dir + cwd = os.getcwd() + try: + os.chdir(hash_dir) + with mock.patch('sys.stdout', out): + print_obj(file_name, swift_dir=self.testdir) + finally: + os.chdir(cwd) + + exp_curl = ( + 'curl -g -I -XHEAD ' + '"http://{host}:{port}/{device}/{part}/AUTH_admin/c/obj" ' + '-H "X-Backend-Storage-Policy-Index: 2"').format( + host=node['ip'], + port=node['port'], + device=node['device'], + part=part) + self.assertIn(exp_curl, out.getvalue()) + + def test_print_obj_curl_command_ipv6(self): + # Note: policy 3 has IPv6 addresses in its ring + datafile3 = os.path.join( + self.testdir, + 'sda', 'objects-3', '1', 'ea8', + 'db4449e025aca992307c7c804a67eea8', '1402017884.18202.data') + utils.mkdirs(os.path.dirname(datafile3)) + with open(datafile3, 'wb') as fp: + md = {'name': '/AUTH_admin/c/obj', + 'Content-Type': 'application/octet-stream', + 'ETag': 'd41d8cd98f00b204e9800998ecf8427e', + 'Content-Length': 0} + write_metadata(fp, md) + + object_ring = ring.Ring(self.testdir, ring_name='object-3') + part, nodes = object_ring.get_nodes('AUTH_admin', 'c', 'obj') + node = nodes[0] + + out = StringIO() + hash_dir = os.path.dirname(datafile3) + file_name = os.path.basename(datafile3) + + # Change working directory to object hash dir + cwd = os.getcwd() + try: + os.chdir(hash_dir) + with mock.patch('sys.stdout', out): + print_obj(file_name, swift_dir=self.testdir) + finally: + os.chdir(cwd) + + exp_curl = ( + 'curl -g -I -XHEAD ' + '"http://[{host}]:{port}' + '/{device}/{part}/AUTH_admin/c/obj" ').format( + host=node['ip'], + port=node['port'], + device=node['device'], + part=part) + self.assertIn(exp_curl, out.getvalue()) + def test_print_obj_meta_and_ts_files(self): # verify that print_obj will also read from meta and ts files base = os.path.splitext(self.datafile)[0] From aa2a84ba8a8d6ba08141492502b3872ad0d81c2d Mon Sep 17 00:00:00 2001 From: Rebecca Finn Date: Wed, 13 Jul 2016 19:09:39 +0000 Subject: [PATCH 103/156] Check object metadata constraints after authorizing In the object proxy controller, the POST method checked the metadata of an object before calling swift.authorize. This could allow an auth middleware to set metadata that violates constraints. Instead, checking the metadata should take place after authorization. Change-Id: I5f05039498c406473952e78c6a40ec11e8b53f8e Closes-Bug: #1596944 --- swift/proxy/controllers/obj.py | 6 +++--- test/unit/proxy/test_server.py | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index a948bbb6d2..be9c2bf472 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -212,9 +212,6 @@ class BaseObjectController(Controller): @delay_denial def POST(self, req): """HTTP POST request handler.""" - error_response = check_metadata(req, 'object') - if error_response: - return error_response container_info = self.container_info( self.account_name, self.container_name, req) container_partition = container_info['partition'] @@ -226,6 +223,9 @@ class BaseObjectController(Controller): return aresp if not containers: return HTTPNotFound(request=req) + error_response = check_metadata(req, 'object') + if error_response: + return error_response req, delete_at_container, delete_at_part, \ delete_at_nodes = self._config_obj_expiration(req) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index 44a23ef6f4..bea71a4c12 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -3418,6 +3418,25 @@ class TestObjectController(unittest.TestCase): res = req.get_response(self.app) self.assertEqual(res.status_int, 400) + def test_POST_meta_authorize(self): + def authorize(req): + req.headers['X-Object-Meta-Foo'] = 'x' * (limit + 1) + return + with save_globals(): + limit = constraints.MAX_META_VALUE_LENGTH + self.app.object_post_as_copy = False + controller = ReplicatedObjectController( + self.app, 'account', 'container', 'object') + set_http_connect(200, 200, 202, 202, 202) + # acct cont obj obj obj + req = Request.blank('/v1/a/c/o', {'REQUEST_METHOD': 'POST'}, + headers={'Content-Type': 'foo/bar', + 'X-Object-Meta-Foo': 'x'}) + req.environ['swift.authorize'] = authorize + self.app.update_request(req) + res = controller.POST(req) + self.assertEqual(res.status_int, 400) + def test_POST_meta_key_len(self): with save_globals(): limit = constraints.MAX_META_NAME_LENGTH From 5c9732ac8e797e9235ccd6df56253a802e517ab6 Mon Sep 17 00:00:00 2001 From: Nandini Tata Date: Tue, 26 Jul 2016 20:20:38 +0000 Subject: [PATCH 104/156] Moved ipv4 & ipv6 validations to the common utils Validating ip addresses for ipv4 and ipv6 formats have more generic use cases outside of rings. swift-get-nodes and other utilities that need to handle ipv6 adrresses often require importing ip validation methods from swift/common/rings/utils (see Related-Change). Also, expand_ipv6 method already exists in swift/common/utils. Hence moving validation of ips also into swift/common/utils from swift/common/ring/utils. Related-Change: I6551d65241950c65e7160587cc414deb4a2122f5 Change-Id: I720a9586469cf55acab74b4b005907ce106b3da4 --- swift/cli/info.py | 6 +-- swift/common/ring/utils.py | 32 +----------- swift/common/utils.py | 36 +++++++++++--- test/unit/common/ring/test_utils.py | 75 +---------------------------- test/unit/common/test_utils.py | 73 ++++++++++++++++++++++++++++ 5 files changed, 109 insertions(+), 113 deletions(-) diff --git a/swift/cli/info.py b/swift/cli/info.py index 23dbdea401..867a36237b 100644 --- a/swift/cli/info.py +++ b/swift/cli/info.py @@ -19,8 +19,8 @@ from hashlib import md5 from six.moves import urllib from swift.common.utils import hash_path, storage_directory, \ - Timestamp -from swift.common.ring import Ring, utils as ring_utils + Timestamp, is_valid_ipv6 +from swift.common.ring import Ring from swift.common.request_helpers import is_sys_meta, is_user_meta, \ strip_sys_meta_prefix, strip_user_meta_prefix from swift.account.backend import AccountBroker, DATADIR as ABDATADIR @@ -50,7 +50,7 @@ def curl_head_command(ip, port, device, part, target, policy_index): :returns: a string, a well formatted curl command """ - if ring_utils.is_valid_ipv6(ip): + if is_valid_ipv6(ip): formatted_ip = '[%s]' % ip else: formatted_ip = ip diff --git a/swift/common/ring/utils.py b/swift/common/ring/utils.py index 1b48acc3c8..bac85a81ee 100644 --- a/swift/common/ring/utils.py +++ b/swift/common/ring/utils.py @@ -17,7 +17,8 @@ import optparse import re import socket -from swift.common.utils import expand_ipv6 +from swift.common.utils import expand_ipv6, is_valid_ip, is_valid_ipv4, \ + is_valid_ipv6 def tiers_for_dev(dev): @@ -188,35 +189,6 @@ def validate_and_normalize_address(address): raise ValueError('Invalid address %s' % address) -def is_valid_ip(ip): - """ - Return True if the provided ip is a valid IP-address - """ - return is_valid_ipv4(ip) or is_valid_ipv6(ip) - - -def is_valid_ipv4(ip): - """ - Return True if the provided ip is a valid IPv4-address - """ - try: - socket.inet_pton(socket.AF_INET, ip) - except socket.error: - return False - return True - - -def is_valid_ipv6(ip): - """ - Return True if the provided ip is a valid IPv6-address - """ - try: - socket.inet_pton(socket.AF_INET6, ip) - except socket.error: # not a valid address - return False - return True - - def is_valid_hostname(hostname): """ Return True if the provided hostname is a valid hostname diff --git a/swift/common/utils.py b/swift/common/utils.py index 9547bf8f6a..66a0e20583 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -1853,6 +1853,35 @@ def parse_options(parser=None, once=False, test_args=None): return config, options +def is_valid_ip(ip): + """ + Return True if the provided ip is a valid IP-address + """ + return is_valid_ipv4(ip) or is_valid_ipv6(ip) + + +def is_valid_ipv4(ip): + """ + Return True if the provided ip is a valid IPv4-address + """ + try: + socket.inet_pton(socket.AF_INET, ip) + except socket.error: # not a valid IPv4 address + return False + return True + + +def is_valid_ipv6(ip): + """ + Returns True if the provided ip is a valid IPv6-address + """ + try: + socket.inet_pton(socket.AF_INET6, ip) + except socket.error: # not a valid IPv6 address + return False + return True + + def expand_ipv6(address): """ Expand ipv6 address. @@ -2889,12 +2918,7 @@ def rsync_ip(ip): :returns: a string ip address """ - try: - socket.inet_pton(socket.AF_INET6, ip) - except socket.error: # it's IPv4 - return ip - else: - return '[%s]' % ip + return '[%s]' % ip if is_valid_ipv6(ip) else ip def rsync_module_interpolation(template, device): diff --git a/test/unit/common/ring/test_utils.py b/test/unit/common/ring/test_utils.py index 705d619b9b..cccaa50ed4 100644 --- a/test/unit/common/ring/test_utils.py +++ b/test/unit/common/ring/test_utils.py @@ -19,8 +19,7 @@ from swift.common import ring from swift.common.ring.utils import (tiers_for_dev, build_tier_tree, validate_and_normalize_ip, validate_and_normalize_address, - is_valid_ip, is_valid_ipv4, - is_valid_ipv6, is_valid_hostname, + is_valid_hostname, is_local_device, parse_search_value, parse_search_values_from_opts, parse_change_values_from_opts, @@ -102,78 +101,6 @@ class TestUtils(unittest.TestCase): (1, 2, '192.168.2.2', 10), (1, 2, '192.168.2.2', 11)])) - def test_is_valid_ip(self): - self.assertTrue(is_valid_ip("127.0.0.1")) - self.assertTrue(is_valid_ip("10.0.0.1")) - ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" - self.assertTrue(is_valid_ip(ipv6)) - ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" - self.assertTrue(is_valid_ip(ipv6)) - ipv6 = "fe80::204:61ff:fe9d:f156" - self.assertTrue(is_valid_ip(ipv6)) - ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" - self.assertTrue(is_valid_ip(ipv6)) - ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" - self.assertTrue(is_valid_ip(ipv6)) - ipv6 = "fe80::204:61ff:254.157.241.86" - self.assertTrue(is_valid_ip(ipv6)) - ipv6 = "fe80::" - self.assertTrue(is_valid_ip(ipv6)) - ipv6 = "::1" - self.assertTrue(is_valid_ip(ipv6)) - not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" - self.assertFalse(is_valid_ip(not_ipv6)) - not_ipv6 = "1:2:3:4:5:6::7:8" - self.assertFalse(is_valid_ip(not_ipv6)) - - def test_is_valid_ipv4(self): - self.assertTrue(is_valid_ipv4("127.0.0.1")) - self.assertTrue(is_valid_ipv4("10.0.0.1")) - ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" - self.assertFalse(is_valid_ipv4(ipv6)) - ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" - self.assertFalse(is_valid_ipv4(ipv6)) - ipv6 = "fe80::204:61ff:fe9d:f156" - self.assertFalse(is_valid_ipv4(ipv6)) - ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" - self.assertFalse(is_valid_ipv4(ipv6)) - ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" - self.assertFalse(is_valid_ipv4(ipv6)) - ipv6 = "fe80::204:61ff:254.157.241.86" - self.assertFalse(is_valid_ipv4(ipv6)) - ipv6 = "fe80::" - self.assertFalse(is_valid_ipv4(ipv6)) - ipv6 = "::1" - self.assertFalse(is_valid_ipv4(ipv6)) - not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" - self.assertFalse(is_valid_ipv4(not_ipv6)) - not_ipv6 = "1:2:3:4:5:6::7:8" - self.assertFalse(is_valid_ipv4(not_ipv6)) - - def test_is_valid_ipv6(self): - self.assertFalse(is_valid_ipv6("127.0.0.1")) - self.assertFalse(is_valid_ipv6("10.0.0.1")) - ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" - self.assertTrue(is_valid_ipv6(ipv6)) - ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" - self.assertTrue(is_valid_ipv6(ipv6)) - ipv6 = "fe80::204:61ff:fe9d:f156" - self.assertTrue(is_valid_ipv6(ipv6)) - ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" - self.assertTrue(is_valid_ipv6(ipv6)) - ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" - self.assertTrue(is_valid_ipv6(ipv6)) - ipv6 = "fe80::204:61ff:254.157.241.86" - self.assertTrue(is_valid_ipv6(ipv6)) - ipv6 = "fe80::" - self.assertTrue(is_valid_ipv6(ipv6)) - ipv6 = "::1" - self.assertTrue(is_valid_ipv6(ipv6)) - not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" - self.assertFalse(is_valid_ipv6(not_ipv6)) - not_ipv6 = "1:2:3:4:5:6::7:8" - self.assertFalse(is_valid_ipv6(not_ipv6)) - def test_is_valid_hostname(self): self.assertTrue(is_valid_hostname("local")) self.assertTrue(is_valid_hostname("test.test.com")) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 3ebc8f6dc4..647938e54f 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -59,6 +59,7 @@ from swift.common.exceptions import Timeout, MessageTimeout, \ ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \ MimeInvalid, ThreadPoolDead from swift.common import utils +from swift.common.utils import is_valid_ip, is_valid_ipv4, is_valid_ipv6 from swift.common.container_sync_realms import ContainerSyncRealms from swift.common.header_key_dict import HeaderKeyDict from swift.common.swob import Request, Response @@ -1667,6 +1668,78 @@ class TestUtils(unittest.TestCase): self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'), 'objects/1/DEF/ABCDEF') + def test_is_valid_ip(self): + self.assertTrue(is_valid_ip("127.0.0.1")) + self.assertTrue(is_valid_ip("10.0.0.1")) + ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" + self.assertTrue(is_valid_ip(ipv6)) + ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" + self.assertTrue(is_valid_ip(ipv6)) + ipv6 = "fe80::204:61ff:fe9d:f156" + self.assertTrue(is_valid_ip(ipv6)) + ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" + self.assertTrue(is_valid_ip(ipv6)) + ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" + self.assertTrue(is_valid_ip(ipv6)) + ipv6 = "fe80::204:61ff:254.157.241.86" + self.assertTrue(is_valid_ip(ipv6)) + ipv6 = "fe80::" + self.assertTrue(is_valid_ip(ipv6)) + ipv6 = "::1" + self.assertTrue(is_valid_ip(ipv6)) + not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" + self.assertFalse(is_valid_ip(not_ipv6)) + not_ipv6 = "1:2:3:4:5:6::7:8" + self.assertFalse(is_valid_ip(not_ipv6)) + + def test_is_valid_ipv4(self): + self.assertTrue(is_valid_ipv4("127.0.0.1")) + self.assertTrue(is_valid_ipv4("10.0.0.1")) + ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" + self.assertFalse(is_valid_ipv4(ipv6)) + ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" + self.assertFalse(is_valid_ipv4(ipv6)) + ipv6 = "fe80::204:61ff:fe9d:f156" + self.assertFalse(is_valid_ipv4(ipv6)) + ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" + self.assertFalse(is_valid_ipv4(ipv6)) + ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" + self.assertFalse(is_valid_ipv4(ipv6)) + ipv6 = "fe80::204:61ff:254.157.241.86" + self.assertFalse(is_valid_ipv4(ipv6)) + ipv6 = "fe80::" + self.assertFalse(is_valid_ipv4(ipv6)) + ipv6 = "::1" + self.assertFalse(is_valid_ipv4(ipv6)) + not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" + self.assertFalse(is_valid_ipv4(not_ipv6)) + not_ipv6 = "1:2:3:4:5:6::7:8" + self.assertFalse(is_valid_ipv4(not_ipv6)) + + def test_is_valid_ipv6(self): + self.assertFalse(is_valid_ipv6("127.0.0.1")) + self.assertFalse(is_valid_ipv6("10.0.0.1")) + ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" + self.assertTrue(is_valid_ipv6(ipv6)) + ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156" + self.assertTrue(is_valid_ipv6(ipv6)) + ipv6 = "fe80::204:61ff:fe9d:f156" + self.assertTrue(is_valid_ipv6(ipv6)) + ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86" + self.assertTrue(is_valid_ipv6(ipv6)) + ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86" + self.assertTrue(is_valid_ipv6(ipv6)) + ipv6 = "fe80::204:61ff:254.157.241.86" + self.assertTrue(is_valid_ipv6(ipv6)) + ipv6 = "fe80::" + self.assertTrue(is_valid_ipv6(ipv6)) + ipv6 = "::1" + self.assertTrue(is_valid_ipv6(ipv6)) + not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a" + self.assertFalse(is_valid_ipv6(not_ipv6)) + not_ipv6 = "1:2:3:4:5:6::7:8" + self.assertFalse(is_valid_ipv6(not_ipv6)) + def test_expand_ipv6(self): expanded_ipv6 = "fe80::204:61ff:fe9d:f156" upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156" From 66520146cfea2db77cbc4a1eaf5648a5122654ec Mon Sep 17 00:00:00 2001 From: Janie Richling Date: Mon, 25 Jul 2016 22:07:15 -0500 Subject: [PATCH 105/156] Enable in-process func tests to optionally use encryption Running functional tests in the in-process mode uses the default value for the pipeline. This patch adds support to specify the SWIFT_TEST_IN_PROCESS_CONF_LOADER variable to point to a labeled function that changes the proxy configuration for the functional test. The patch also adds a new tox environment func-in-process-encryption which runs with the environment variable SWIFT_TEST_IN_PROCESS_CONF_LOADER=encryption The motivation for this change is to put support in place for an upstream CI job that will functionally test using encryption middleware in the pipeline. The gate job is proposed at: https://review.openstack.org/#/c/348292/ Change-Id: I15c4b20f1d2be57ae21c69c614f6a9579145bee9 --- doc/source/development_guidelines.rst | 5 ++ test/functional/__init__.py | 70 +++++++++++++++++++++++++++ tox.ini | 5 ++ 3 files changed, 80 insertions(+) diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst index 6f0012c35f..20a75d6275 100644 --- a/doc/source/development_guidelines.rst +++ b/doc/source/development_guidelines.rst @@ -106,6 +106,11 @@ set using environment variables: - the optional in-memory object server may be selected by setting the environment variable ``SWIFT_TEST_IN_MEMORY_OBJ`` to a true value. +- encryption may be added to the proxy pipeline by setting the + environment variable ``SWIFT_TEST_IN_PROCESS_CONF_LOADER`` to + ``encryption``. Or when using tox, specify the tox environment + ``func-in-process-encryption`` + - the proxy-server ``object_post_as_copy`` option may be set using the environment variable ``SWIFT_TEST_IN_PROCESS_OBJECT_POST_AS_COPY``. diff --git a/test/functional/__init__.py b/test/functional/__init__.py index 52be849bfa..4d0b71f293 100644 --- a/test/functional/__init__.py +++ b/test/functional/__init__.py @@ -287,6 +287,56 @@ def _in_process_setup_ring(swift_conf, conf_src_dir, testdir): return obj_sockets +def _load_encryption(proxy_conf_file, **kwargs): + """ + Load encryption configuration and override proxy-server.conf contents. + + :param proxy_conf_file: Source proxy conf filename + :returns: Path to the test proxy conf file to use + :raises InProcessException: raised if proxy conf contents are invalid + """ + _debug('Setting configuration for encryption') + + # The global conf dict cannot be used to modify the pipeline. + # The pipeline loader requires the pipeline to be set in the local_conf. + # If pipeline is set in the global conf dict (which in turn populates the + # DEFAULTS options) then it prevents pipeline being loaded into the local + # conf during wsgi load_app. + # Therefore we must modify the [pipeline:main] section. + + conf = ConfigParser() + conf.read(proxy_conf_file) + try: + section = 'pipeline:main' + pipeline = conf.get(section, 'pipeline') + pipeline = pipeline.replace( + "proxy-logging proxy-server", + "keymaster encryption proxy-logging proxy-server") + conf.set(section, 'pipeline', pipeline) + root_secret = os.urandom(32).encode("base64") + conf.set('filter:keymaster', 'encryption_root_secret', root_secret) + except NoSectionError as err: + msg = 'Error problem with proxy conf file %s: %s' % \ + (proxy_conf_file, err) + raise InProcessException(msg) + + test_conf_file = os.path.join(_testdir, 'proxy-server.conf') + with open(test_conf_file, 'w') as fp: + conf.write(fp) + + return test_conf_file + + +# Mapping from possible values of the variable +# SWIFT_TEST_IN_PROCESS_CONF_LOADER +# to the method to call for loading the associated configuration +# The expected signature for these methods is: +# conf_filename_to_use loader(input_conf_filename, **kwargs) +conf_loaders = { + 'encryption': _load_encryption +} + + def in_process_setup(the_object_server=object_server): _info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS') _info('Using object_server class: %s' % the_object_server.__name__) @@ -318,6 +368,26 @@ def in_process_setup(the_object_server=object_server): utils.mkdirs(os.path.join(_testdir, 'sdb1')) utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp')) + # Call the associated method for the value of + # 'SWIFT_TEST_IN_PROCESS_CONF_LOADER', if one exists + conf_loader_label = os.environ.get( + 'SWIFT_TEST_IN_PROCESS_CONF_LOADER') + if conf_loader_label is not None: + try: + conf_loader = conf_loaders[conf_loader_label] + _debug('Calling method %s mapped to conf loader %s' % + (conf_loader.__name__, conf_loader_label)) + except KeyError as missing_key: + raise InProcessException('No function mapped for conf loader %s' % + missing_key) + + try: + # Pass-in proxy_conf + proxy_conf = conf_loader(proxy_conf) + _debug('Now using proxy conf %s' % proxy_conf) + except Exception as err: # noqa + raise InProcessException(err) + swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir) obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir) diff --git a/tox.ini b/tox.ini index 24df9c6320..d4f0863d34 100644 --- a/tox.ini +++ b/tox.ini @@ -53,6 +53,11 @@ commands = ./.functests {posargs} setenv = SWIFT_TEST_IN_PROCESS=1 SWIFT_TEST_IN_PROCESS_OBJECT_POST_AS_COPY=False +[testenv:func-in-process-encryption] +commands = ./.functests {posargs} +setenv = SWIFT_TEST_IN_PROCESS=1 + SWIFT_TEST_IN_PROCESS_CONF_LOADER=encryption + [testenv:venv] commands = {posargs} From 0d41b2326009c470f41f365c508e473ebdacb11c Mon Sep 17 00:00:00 2001 From: Matthew Oliver Date: Tue, 7 Jun 2016 14:52:28 +1000 Subject: [PATCH 106/156] Add end_marker and reverse options to direct_client Currently the direct_get_container and direct_get_account methods of the direct client don't support passing in the 'end_marker' and 'reverse' params. This change adds support for these params in direct client. Change-Id: I846fc70ff3abdb1674152a8d9e0521c709f254c4 --- swift/common/direct_client.py | 22 ++++++++++++++++++---- test/unit/common/test_direct_client.py | 9 +++++++-- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/swift/common/direct_client.py b/swift/common/direct_client.py index 94be486d74..cfc25d6eb8 100644 --- a/swift/common/direct_client.py +++ b/swift/common/direct_client.py @@ -79,8 +79,9 @@ def _make_req(node, part, method, path, _headers, stype, def _get_direct_account_container(path, stype, node, part, marker=None, limit=None, - prefix=None, delimiter=None, conn_timeout=5, - response_timeout=15): + prefix=None, delimiter=None, + conn_timeout=5, response_timeout=15, + end_marker=None, reverse=None): """Base class for get direct account and container. Do not use directly use the get_direct_account or @@ -95,6 +96,10 @@ def _get_direct_account_container(path, stype, node, part, qs += '&prefix=%s' % quote(prefix) if delimiter: qs += '&delimiter=%s' % quote(delimiter) + if end_marker: + qs += '&end_marker=%s' % quote(end_marker) + if reverse: + qs += '&reverse=%s' % quote(reverse) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'GET', path, query_string=qs, @@ -124,7 +129,7 @@ def gen_headers(hdrs_in=None, add_ts=False): def direct_get_account(node, part, account, marker=None, limit=None, prefix=None, delimiter=None, conn_timeout=5, - response_timeout=15): + response_timeout=15, end_marker=None, reverse=None): """ Get listings directly from the account server. @@ -137,6 +142,8 @@ def direct_get_account(node, part, account, marker=None, limit=None, :param delimiter: delimiter for the query :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response + :param end_marker: end_marker query + :param reverse: reverse the returned listing :returns: a tuple of (response headers, a list of containers) The response headers will HeaderKeyDict. """ @@ -145,6 +152,8 @@ def direct_get_account(node, part, account, marker=None, limit=None, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, + end_marker=end_marker, + reverse=reverse, conn_timeout=conn_timeout, response_timeout=response_timeout) @@ -185,7 +194,8 @@ def direct_head_container(node, part, account, container, conn_timeout=5, def direct_get_container(node, part, account, container, marker=None, limit=None, prefix=None, delimiter=None, - conn_timeout=5, response_timeout=15): + conn_timeout=5, response_timeout=15, end_marker=None, + reverse=None): """ Get container listings directly from the container server. @@ -199,6 +209,8 @@ def direct_get_container(node, part, account, container, marker=None, :param delimiter: delimiter for the query :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response + :param end_marker: end_marker query + :param reverse: reverse the returned listing :returns: a tuple of (response headers, a list of objects) The response headers will be a HeaderKeyDict. """ @@ -207,6 +219,8 @@ def direct_get_container(node, part, account, container, marker=None, part, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, + end_marker=end_marker, + reverse=reverse, conn_timeout=conn_timeout, response_timeout=response_timeout) diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py index aae8129340..1426f28406 100644 --- a/test/unit/common/test_direct_client.py +++ b/test/unit/common/test_direct_client.py @@ -172,7 +172,8 @@ class TestDirectClient(unittest.TestCase): with mocked_http_conn(200, stub_headers, body) as conn: resp_headers, resp = direct_client.direct_get_account( self.node, self.part, self.account, marker='marker', - prefix='prefix', delimiter='delimiter', limit=1000) + prefix='prefix', delimiter='delimiter', limit=1000, + end_marker='endmarker', reverse='on') self.assertEqual(conn.method, 'GET') self.assertEqual(conn.path, self.account_path) @@ -184,6 +185,8 @@ class TestDirectClient(unittest.TestCase): self.assertTrue('limit=1000' in conn.query_string) self.assertTrue('prefix=prefix' in conn.query_string) self.assertTrue('format=json' in conn.query_string) + self.assertTrue('end_marker=endmarker' in conn.query_string) + self.assertTrue('reverse=on' in conn.query_string) def test_direct_client_exception(self): stub_headers = {'X-Trans-Id': 'txb5f59485c578460f8be9e-0053478d09'} @@ -335,7 +338,7 @@ class TestDirectClient(unittest.TestCase): resp_headers, resp = direct_client.direct_get_container( self.node, self.part, self.account, self.container, marker='marker', prefix='prefix', delimiter='delimiter', - limit=1000) + limit=1000, end_marker='endmarker', reverse='on') self.assertEqual(conn.req_headers['user-agent'], 'direct-client %s' % os.getpid()) @@ -346,6 +349,8 @@ class TestDirectClient(unittest.TestCase): self.assertTrue('limit=1000' in conn.query_string) self.assertTrue('prefix=prefix' in conn.query_string) self.assertTrue('format=json' in conn.query_string) + self.assertTrue('end_marker=endmarker' in conn.query_string) + self.assertTrue('reverse=on' in conn.query_string) def test_direct_get_container_no_content_does_not_decode_body(self): headers = {} From eb535904bac7c9f8a73b672471dd01f5971716a1 Mon Sep 17 00:00:00 2001 From: gengchc2 Date: Fri, 29 Jul 2016 11:43:32 +0800 Subject: [PATCH 107/156] modify the home-page info with the developer documentation update home-page info Change-Id: I625c25a8a5698d98174603c6fa2b42391471c03d --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index cb4cda4419..ea73b2c785 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,7 +5,7 @@ description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org -home-page = http://www.openstack.org/ +home-page = http://docs.openstack.org/developer/swift/ classifier = Development Status :: 5 - Production/Stable Environment :: OpenStack From 1533eb3f3fb358eea92a9f23c4558dd8a0577d1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A9la=20Vancsics?= Date: Fri, 15 Jul 2016 12:22:30 +0200 Subject: [PATCH 108/156] Reduce code duplication Reduced source code by extracting duplicated code (swift/cli/ringbuilder.py) http://openqa.sed.hu/dashboard/index?did=1&id=OpenStack%3Aswift, in 127~CloneClass Change-Id: Id1081363610075f306eff7cf003c3355f283f1d1 Closes-Bug: 1536127 --- swift/cli/ringbuilder.py | 53 +++++++++++++++------------------------- 1 file changed, 20 insertions(+), 33 deletions(-) diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index 8f52849a93..4991c45d74 100644 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -181,7 +181,8 @@ def _parse_add_values(argvish): return parsed_devs -def _set_weight_values(devs, weight, opts): +def check_devs(devs, input_question, opts, abort_msg): + if not devs: print('Search value matched 0 devices.\n' 'The on-disk ring builder is unchanged.') @@ -191,12 +192,18 @@ def _set_weight_values(devs, weight, opts): print('Matched more than one device:') for dev in devs: print(' %s' % format_device(dev)) - if not opts.yes and \ - input('Are you sure you want to update the weight for ' - 'these %s devices? (y/N) ' % len(devs)) != 'y': - print('Aborting device modifications') + if not opts.yes and input(input_question) != 'y': + print(abort_msg) exit(EXIT_ERROR) + +def _set_weight_values(devs, weight, opts): + + input_question = 'Are you sure you want to update the weight for these ' \ + '%s devices? (y/N) ' % len(devs) + abort_msg = 'Aborting device modifications' + check_devs(devs, input_question, opts, abort_msg) + for dev in devs: builder.set_dev_weight(dev['id'], weight) print('%s weight set to %s' % (format_device(dev), @@ -240,20 +247,10 @@ def _parse_set_weight_values(argvish): def _set_info_values(devs, change, opts): - if not devs: - print("Search value matched 0 devices.\n" - "The on-disk ring builder is unchanged.") - exit(EXIT_ERROR) - - if len(devs) > 1: - print('Matched more than one device:') - for dev in devs: - print(' %s' % format_device(dev)) - if not opts.yes and \ - input('Are you sure you want to update the info for ' - 'these %s devices? (y/N) ' % len(devs)) != 'y': - print('Aborting device modifications') - exit(EXIT_ERROR) + input_question = 'Are you sure you want to update the info for these ' \ + '%s devices? (y/N) ' % len(devs) + abort_msg = 'Aborting device modifications' + check_devs(devs, input_question, opts, abort_msg) for dev in devs: orig_dev_string = format_device(dev) @@ -760,20 +757,10 @@ swift-ring-builder remove devs, opts = _parse_remove_values(argv[3:]) - if not devs: - print('Search value matched 0 devices.\n' - 'The on-disk ring builder is unchanged.') - exit(EXIT_ERROR) - - if len(devs) > 1: - print('Matched more than one device:') - for dev in devs: - print(' %s' % format_device(dev)) - if not opts.yes and \ - input('Are you sure you want to remove these %s ' - 'devices? (y/N) ' % len(devs)) != 'y': - print('Aborting device removals') - exit(EXIT_ERROR) + input_question = 'Are you sure you want to remove these ' \ + '%s devices? (y/N) ' % len(devs) + abort_msg = 'Aborting device removals' + check_devs(devs, input_question, opts, abort_msg) for dev in devs: try: From 99a6f915ffc3aba2086e22dfba1f33b4fee46e81 Mon Sep 17 00:00:00 2001 From: Nandini Tata Date: Fri, 20 May 2016 17:41:29 +0000 Subject: [PATCH 109/156] swift-ring-builder output corrected for ipv6 Adjusted width of ip and port columns in swift-ring-builder command output to dynamically span to the longest ip or the longest port in the devices list. Also combined the port and ip address columns for better visual clarity. Took care of ipv6 format [ipv6]:port Modified the corresponding test case with expected output. Change-Id: I65837f8fed70be60b53d5a817a4ce529ad0f070e Closes-Bug: #1567105 --- swift/cli/ringbuilder.py | 69 ++++++++++++++--- test/unit/cli/test_default_output.stub | 11 +++ test/unit/cli/test_ipv6_output.stub | 10 +++ test/unit/cli/test_ringbuilder.py | 103 ++++++++++++++++++++++--- 4 files changed, 171 insertions(+), 22 deletions(-) create mode 100644 test/unit/cli/test_default_output.stub create mode 100644 test/unit/cli/test_ipv6_output.stub diff --git a/swift/cli/ringbuilder.py b/swift/cli/ringbuilder.py index 0c35553185..5b7f8108d5 100644 --- a/swift/cli/ringbuilder.py +++ b/swift/cli/ringbuilder.py @@ -39,7 +39,7 @@ from swift.common.ring.utils import validate_args, \ parse_builder_ring_filename_args, parse_search_value, \ parse_search_values_from_opts, parse_change_values_from_opts, \ dispersion_report, parse_add_value -from swift.common.utils import lock_parent_directory +from swift.common.utils import lock_parent_directory, is_valid_ipv6 MAJOR_VERSION = 1 MINOR_VERSION = 3 @@ -376,6 +376,58 @@ def _parse_remove_values(argvish): exit(EXIT_ERROR) +def _make_display_device_table(builder): + ip_width = 10 + port_width = 4 + rep_ip_width = 14 + rep_port_width = 4 + ip_ipv6 = rep_ipv6 = False + for dev in builder._iter_devs(): + if is_valid_ipv6(dev['ip']): + ip_ipv6 = True + if is_valid_ipv6(dev['replication_ip']): + rep_ipv6 = True + ip_width = max(len(dev['ip']), ip_width) + rep_ip_width = max(len(dev['replication_ip']), rep_ip_width) + port_width = max(len(str(dev['port'])), port_width) + rep_port_width = max(len(str(dev['replication_port'])), + rep_port_width) + if ip_ipv6: + ip_width += 2 + if rep_ipv6: + rep_ip_width += 2 + header_line = ('Devices:%5s %6s %4s %' + str(ip_width) + + 's:%-' + str(port_width) + 's %' + + str(rep_ip_width) + 's:%-' + str(rep_port_width) + + 's %5s %6s %10s %7s %5s %s') % ( + 'id', 'region', 'zone', 'ip address', + 'port', 'replication ip', 'port', 'name', + 'weight', 'partitions', 'balance', 'flags', + 'meta') + + def print_dev_f(dev, balance_per_dev=0.00, flags=''): + def get_formated_ip(key): + value = dev[key] + if ':' in value: + value = '[%s]' % value + return value + dev_ip = get_formated_ip('ip') + dev_replication_ip = get_formated_ip('replication_ip') + format_string = ''.join(['%13d %6d %4d ', + '%', str(ip_width), 's:%-', + str(port_width), 'd ', '%', + str(rep_ip_width), 's', ':%-', + str(rep_port_width), 'd %5s %6.02f' + ' %10s %7.02f %5s %s']) + args = (dev['id'], dev['region'], dev['zone'], dev_ip, dev['port'], + dev_replication_ip, dev['replication_port'], dev['device'], + dev['weight'], dev['parts'], balance_per_dev, flags, + dev['meta']) + print(format_string % args) + + return header_line, print_dev_f + + class Commands(object): @staticmethod def unknown(): @@ -458,18 +510,11 @@ swift-ring-builder if builder.devs: balance_per_dev = builder._build_balance_per_dev() - print('Devices: id region zone ip address port ' - 'replication ip replication port name ' - 'weight partitions balance flags meta') + header_line, print_dev_f = _make_display_device_table(builder) + print(header_line) for dev in builder._iter_devs(): flags = 'DEL' if dev in builder._remove_devs else '' - print(' %5d %7d %5d %15s %5d %15s %17d %9s %6.02f ' - '%10s %7.02f %5s %s' % - (dev['id'], dev['region'], dev['zone'], dev['ip'], - dev['port'], dev['replication_ip'], - dev['replication_port'], dev['device'], dev['weight'], - dev['parts'], balance_per_dev[dev['id']], flags, - dev['meta'])) + print_dev_f(dev, balance_per_dev[dev['id']], flags) exit(EXIT_SUCCESS) @staticmethod @@ -905,7 +950,7 @@ swift-ring-builder dispersion [options] --verbose option will display dispersion graph broken down by tier You can filter which tiers are evaluated to drill down using a regex - in the optional search_filter arguemnt. i.e. + in the optional search_filter argument. i.e. swift-ring-builder dispersion "r\d+z\d+$" -v diff --git a/test/unit/cli/test_default_output.stub b/test/unit/cli/test_default_output.stub new file mode 100644 index 0000000000..3b4786b479 --- /dev/null +++ b/test/unit/cli/test_default_output.stub @@ -0,0 +1,11 @@ +__RINGFILE__, build version 4 +64 partitions, 3.000000 replicas, 4 regions, 4 zones, 4 devices, 100.00 balance, 0.00 dispersion +The minimum number of hours before a partition can be reassigned is 1 (0:00:00 remaining) +The overload factor is 0.00% (0.000000) +Ring file __RINGFILE__.ring.gz not found, probably it hasn't been written yet +Devices: id region zone ip address:port replication ip:port name weight partitions balance flags meta + 0 0 0 127.0.0.1:6200 127.0.0.1:6200 sda1 100.00 0 -100.00 some meta data + 1 1 1 127.0.0.2:6201 127.0.0.2:6201 sda2 100.00 0 -100.00 + 2 2 2 127.0.0.3:6202 127.0.0.3:6202 sdc3 100.00 0 -100.00 + 3 3 3 127.0.0.4:6203 127.0.0.4:6203 sdd4 100.00 0 -100.00 + diff --git a/test/unit/cli/test_ipv6_output.stub b/test/unit/cli/test_ipv6_output.stub new file mode 100644 index 0000000000..1691bb43af --- /dev/null +++ b/test/unit/cli/test_ipv6_output.stub @@ -0,0 +1,10 @@ +__RINGFILE__, build version 4 +256 partitions, 3.000000 replicas, 4 regions, 4 zones, 4 devices, 100.00 balance, 0.00 dispersion +The minimum number of hours before a partition can be reassigned is 1 (0:00:00 remaining) +The overload factor is 0.00% (0.000000) +Ring file __RINGFILE__.ring.gz not found, probably it hasn't been written yet +Devices: id region zone ip address:port replication ip:port name weight partitions balance flags meta + 0 0 0 [2001:db8:85a3::8a2e:370:7334]:6200 [2001:db8:85a3::8a2e:370:7334]:6200 sda1 100.00 0 -100.00 some meta data + 1 1 1 127.0.0.1:66201 127.0.0.1:66201 sda2 100.00 0 -100.00 + 2 2 2 [2001:db8:85a3::8a2e:370:7336]:6202 127.0.10.127:7070 sdc3 100.00 0 -100.00 + 3 3 3 [2001:db8:85a3::8a2e:370:7337]:6203 [7001:db8:85a3::8a2e:370:7337]:11664 sdd4 100.00 0 -100.00 diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index 5e86b6fea1..bf994012a1 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import errno +import itertools import logging import mock import os @@ -92,6 +94,43 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): except OSError: pass + def assertOutputStub(self, output, ext='stub'): + """ + assert that the given output string is equal to a in-tree stub file, + if a test needs to check multiple outputs it can use custom ext's + """ + filepath = os.path.abspath( + os.path.join(os.path.dirname(__file__), self.id().split('.')[-1])) + print(filepath) + filepath = '%s.%s' % (filepath, ext) + try: + with open(filepath, 'r') as f: + stub = f.read() + except (IOError, OSError) as e: + if e.errno == errno.ENOENT: + self.fail('%r does not exist' % filepath) + else: + self.fail('%r could not be read (%s)' % (filepath, e)) + output = output.replace(self.tempfile, '__RINGFILE__') + for i, (value, expected) in enumerate( + itertools.izip_longest( + output.splitlines(), stub.splitlines())): + # N.B. differences in trailing whitespace are ignored! + value = (value or '').rstrip() + expected = (expected or '').rstrip() + try: + self.assertEqual(value, expected) + except AssertionError: + msg = 'Line #%s value is not like expected:\n%r\n%r' % ( + i, value, expected) + msg += '\n\nFull output was:\n' + for i, line in enumerate(output.splitlines()): + msg += '%3d: %s\n' % (i, line) + msg += '\n\nCompared to stub:\n' + for i, line in enumerate(stub.splitlines()): + msg += '%3d: %s\n' % (i, line) + self.fail(msg) + def create_sample_ring(self, part_power=6): """ Create a sample ring with four devices @@ -1614,6 +1653,50 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): argv = ["", self.tmpfile] self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) + def test_default_output(self): + self.create_sample_ring() + out, err = self.run_srb('') + self.assertOutputStub(out) + + def test_ipv6_output(self): + ring = RingBuilder(8, 3, 1) + ring.add_dev({'weight': 100.0, + 'region': 0, + 'zone': 0, + 'ip': '2001:db8:85a3::8a2e:370:7334', + 'port': 6200, + 'device': 'sda1', + 'meta': 'some meta data', + }) + ring.add_dev({'weight': 100.0, + 'region': 1, + 'zone': 1, + 'ip': '127.0.0.1', + 'port': 66201, + 'device': 'sda2', + }) + ring.add_dev({'weight': 100.0, + 'region': 2, + 'zone': 2, + 'ip': '2001:db8:85a3::8a2e:370:7336', + 'port': 6202, + 'device': 'sdc3', + 'replication_ip': '127.0.10.127', + 'replication_port': 7070, + }) + ring.add_dev({'weight': 100.0, + 'region': 3, + 'zone': 3, + 'ip': '2001:db8:85a3::8a2e:370:7337', + 'port': 6203, + 'device': 'sdd4', + 'replication_ip': '7001:db8:85a3::8a2e:370:7337', + 'replication_port': 11664, + }) + ring.save(self.tmpfile) + out, err = self.run_srb('') + self.assertOutputStub(out) + def test_default_show_removed(self): mock_stdout = six.StringIO() mock_stderr = six.StringIO() @@ -1642,20 +1725,20 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): "The overload factor is 0.00%% (0.000000)\n" \ "Ring file %s.ring.gz not found, probably " \ "it hasn't been written yet\n" \ - "Devices: id region zone ip address port " \ - "replication ip replication port name weight " \ + "Devices: id region zone ip address:port " \ + "replication ip:port name weight " \ "partitions balance flags meta\n" \ - " 0 0 0 127.0.0.1 6200 " \ - "127.0.0.1 6200 sda1 100.00" \ + " 0 0 0 127.0.0.1:6200 " \ + " 127.0.0.1:6200 sda1 100.00" \ " 0 -100.00 some meta data\n" \ - " 1 1 1 127.0.0.2 6201 " \ - "127.0.0.2 6201 sda2 0.00" \ + " 1 1 1 127.0.0.2:6201 " \ + " 127.0.0.2:6201 sda2 0.00" \ " 0 0.00 DEL \n" \ - " 2 2 2 127.0.0.3 6202 " \ - "127.0.0.3 6202 sdc3 100.00" \ + " 2 2 2 127.0.0.3:6202 " \ + " 127.0.0.3:6202 sdc3 100.00" \ " 0 -100.00 \n" \ - " 3 3 3 127.0.0.4 6203 " \ - "127.0.0.4 6203 sdd4 0.00" \ + " 3 3 3 127.0.0.4:6203 " \ + " 127.0.0.4:6203 sdd4 0.00" \ " 0 0.00 \n" % (self.tmpfile, self.tmpfile) self.assertEqual(expected, mock_stdout.getvalue()) From e7f025f7fa56056ddad82aa5c601894b25c8927b Mon Sep 17 00:00:00 2001 From: maoshuai Date: Mon, 1 Aug 2016 21:39:50 +0800 Subject: [PATCH 110/156] made link in README.rst more clear Escaping the underscore is not necessary in this case. See http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#inline-markup-recognition-rules Change-Id: I21d95d6baaf471246eb8a931c7df366634529512 --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 2c2831dede..b3f3ac250e 100644 --- a/README.rst +++ b/README.rst @@ -111,7 +111,7 @@ For Deployers Deployer docs are also available at http://docs.openstack.org/developer/swift/. A good starting point is at -http://docs.openstack.org/developer/swift/deployment\_guide.html +http://docs.openstack.org/developer/swift/deployment_guide.html There is an `ops runbook `__ that gives information about how to diagnose and troubleshoot common issues From d819ae00a5f589fc7cd11ddf95cfce7323d926e8 Mon Sep 17 00:00:00 2001 From: Shashirekha Gundur Date: Mon, 1 Aug 2016 22:13:58 +0000 Subject: [PATCH 111/156] update swift-dispersion manpages to add policy-name This change adds -P / --policy-name option to the swift-dispersion manpages. Also tidied up a little removing some extra whitespace at the end of lines. Change-Id: Ic3372379994964e96258939580452f94fb575a06 Closes-Bug: #1605686 --- doc/manpages/swift-dispersion-populate.1 | 40 ++++++++++---------- doc/manpages/swift-dispersion-report.1 | 48 +++++++++++++----------- 2 files changed, 47 insertions(+), 41 deletions(-) diff --git a/doc/manpages/swift-dispersion-populate.1 b/doc/manpages/swift-dispersion-populate.1 index 5ce1404569..71047bef05 100644 --- a/doc/manpages/swift-dispersion-populate.1 +++ b/doc/manpages/swift-dispersion-populate.1 @@ -14,26 +14,26 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-dispersion-populate 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-dispersion-populate -\- Openstack-swift dispersion populate +\- Openstack-swift dispersion populate .SH SYNOPSIS .LP .B swift-dispersion-populate [--container-suffix-start] [--object-suffix-start] [--container-only|--object-only] [--insecure] [conf_file] -.SH DESCRIPTION +.SH DESCRIPTION .PP This is one of the swift-dispersion utilities that is used to evaluate the -overall cluster health. This is accomplished by checking if a set of +overall cluster health. This is accomplished by checking if a set of deliberately distributed containers and objects are currently in their proper places within the cluster. -.PP +.PP For instance, a common deployment has three replicas of each object. The health of that object can be measured by checking if each replica is in its proper place. If only 2 of the 3 is in place the object's health @@ -48,13 +48,13 @@ we need to run the \fBswift-dispersion-report\fR tool to check the health of eac of these containers and objects. .PP -These tools need direct access to the entire cluster and to the ring files. -Installing them on a proxy server will probably do or a box used for swift -administration purposes that also contains the common swift packages and ring. -Both \fBswift-dispersion-populate\fR and \fBswift-dispersion-report\fR use the +These tools need direct access to the entire cluster and to the ring files. +Installing them on a proxy server will probably do or a box used for swift +administration purposes that also contains the common swift packages and ring. +Both \fBswift-dispersion-populate\fR and \fBswift-dispersion-report\fR use the same configuration file, /etc/swift/dispersion.conf . The account used by these tool should be a dedicated account for the dispersion stats and also have admin -privileges. +privileges. .SH OPTIONS .RS 0 @@ -70,14 +70,14 @@ Start object suffix at NUMBER and resume population at this point; default: 0 Only run object population .IP "\fB--container-only\fR" Only run container population -.IP "\fB--object-only\fR" -Only run object population .IP "\fB--no-overlap\fR" Increase coverage by amount in dispersion_coverage option with no overlap of existing partitions (if run more than once) +.IP "\fB-P, --policy-name\fR" +Specify storage policy name .SH CONFIGURATION -.PD 0 -Example \fI/etc/swift/dispersion.conf\fR: +.PD 0 +Example \fI/etc/swift/dispersion.conf\fR: .RS 3 .IP "[dispersion]" @@ -93,10 +93,10 @@ Example \fI/etc/swift/dispersion.conf\fR: .IP "# concurrency = 25" .IP "# endpoint_type = publicURL" .RE -.PD +.PD .SH EXAMPLE -.PP +.PP .PD 0 $ swift-dispersion-populate .RS 1 @@ -105,14 +105,14 @@ $ swift-dispersion-populate .RE .PD - + .SH DOCUMENTATION .LP More in depth documentation about the swift-dispersion utilities and -also Openstack-Swift as a whole can be found at +also Openstack-Swift as a whole can be found at .BI http://swift.openstack.org/admin_guide.html#cluster-health -and +and .BI http://swift.openstack.org diff --git a/doc/manpages/swift-dispersion-report.1 b/doc/manpages/swift-dispersion-report.1 index c66eba1af6..ff27280e7a 100644 --- a/doc/manpages/swift-dispersion-report.1 +++ b/doc/manpages/swift-dispersion-report.1 @@ -14,45 +14,45 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-dispersion-report 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-dispersion-report -\- Openstack-swift dispersion report +\- Openstack-swift dispersion report .SH SYNOPSIS .LP .B swift-dispersion-report [-d|--debug] [-j|--dump-json] [-p|--partitions] [--container-only|--object-only] [--insecure] [conf_file] -.SH DESCRIPTION +.SH DESCRIPTION .PP This is one of the swift-dispersion utilities that is used to evaluate the -overall cluster health. This is accomplished by checking if a set of +overall cluster health. This is accomplished by checking if a set of deliberately distributed containers and objects are currently in their proper places within the cluster. -.PP +.PP For instance, a common deployment has three replicas of each object. The health of that object can be measured by checking if each replica is in its proper place. If only 2 of the 3 is in place the object's health can be said to be at 66.66%, where 100% would be perfect. .PP -Once the \fBswift-dispersion-populate\fR has been used to populate the -dispersion account, one should run the \fBswift-dispersion-report\fR tool +Once the \fBswift-dispersion-populate\fR has been used to populate the +dispersion account, one should run the \fBswift-dispersion-report\fR tool repeatedly for the life of the cluster, in order to check the health of each of these containers and objects. .PP -These tools need direct access to the entire cluster and to the ring files. -Installing them on a proxy server will probably do or a box used for swift -administration purposes that also contains the common swift packages and ring. -Both \fBswift-dispersion-populate\fR and \fBswift-dispersion-report\fR use the +These tools need direct access to the entire cluster and to the ring files. +Installing them on a proxy server will probably do or a box used for swift +administration purposes that also contains the common swift packages and ring. +Both \fBswift-dispersion-populate\fR and \fBswift-dispersion-report\fR use the same configuration file, /etc/swift/dispersion.conf . The account used by these tool should be a dedicated account for the dispersion stats and also have admin -privileges. +privileges. .SH OPTIONS .RS 0 @@ -91,9 +91,15 @@ Only run the object report Allow accessing insecure keystone server. The keystone's certificate will not be verified. +.SH OPTIONS +.RS 0 +.PD 1 +.IP "\fB-P, --policy-name\fR" +Specify storage policy name + .SH CONFIGURATION -.PD 0 -Example \fI/etc/swift/dispersion.conf\fR: +.PD 0 +Example \fI/etc/swift/dispersion.conf\fR: .RS 3 .IP "[dispersion]" @@ -110,12 +116,12 @@ Example \fI/etc/swift/dispersion.conf\fR: .IP "# dump_json = no" .IP "# endpoint_type = publicURL" .RE -.PD +.PD .SH EXAMPLE -.PP +.PP .PD 0 -$ swift-dispersion-report +$ swift-dispersion-report .RS 1 @@ -129,14 +135,14 @@ $ swift-dispersion-report .RE .PD - + .SH DOCUMENTATION .LP More in depth documentation about the swift-dispersion utilities and -also Openstack-Swift as a whole can be found at +also Openstack-Swift as a whole can be found at .BI http://swift.openstack.org/admin_guide.html#cluster-health -and +and .BI http://swift.openstack.org From e0c7e6bf78c4bb0f2ad047a9d6f6926a2ae02d9a Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 2 Aug 2016 10:38:45 +0100 Subject: [PATCH 112/156] Fix repeated OPTIONS in swift-dispersion-report manpage Remove the repeated OPTIONS section headers, which looks odd and is inconsistent with swift-dispersion-populate. Change-Id: I6d894e3b61002ddf7c0ea8a78cde226617eb11a6 --- doc/manpages/swift-dispersion-report.1 | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/doc/manpages/swift-dispersion-report.1 b/doc/manpages/swift-dispersion-report.1 index ff27280e7a..4d095d248a 100644 --- a/doc/manpages/swift-dispersion-report.1 +++ b/doc/manpages/swift-dispersion-report.1 @@ -60,40 +60,22 @@ privileges. .IP "\fB-d, --debug\fR" output any 404 responses to standard error -.SH OPTIONS -.RS 0 -.PD 1 .IP "\fB-j, --dump-json\fR" output dispersion report in json format -.SH OPTIONS -.RS 0 -.PD 1 .IP "\fB-p, --partitions\fR" output the partition numbers that have any missing replicas -.SH OPTIONS -.RS 0 -.PD 1 .IP "\fB--container-only\fR" Only run the container report -.SH OPTIONS -.RS 0 -.PD 1 .IP "\fB--object-only\fR" Only run the object report -.SH OPTIONS -.RS 0 -.PD 1 .IP "\fB--insecure\fR" Allow accessing insecure keystone server. The keystone's certificate will not be verified. -.SH OPTIONS -.RS 0 -.PD 1 .IP "\fB-P, --policy-name\fR" Specify storage policy name From 5cf24ecf165e82076db5c9e113b2127de7782a90 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 2 Aug 2016 10:57:59 +0100 Subject: [PATCH 113/156] Fix link to docs from swift-dispersion manpages Change the link to the admin docs to point to #dispersion-report rather than non-existent #cluster-health. Change-Id: Ia2f4262c266201d4d555e7bedb8c5c2eb9fb1264 --- doc/manpages/swift-dispersion-populate.1 | 6 +++--- doc/manpages/swift-dispersion-report.1 | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/manpages/swift-dispersion-populate.1 b/doc/manpages/swift-dispersion-populate.1 index 71047bef05..33eccd4f5b 100644 --- a/doc/manpages/swift-dispersion-populate.1 +++ b/doc/manpages/swift-dispersion-populate.1 @@ -110,12 +110,12 @@ $ swift-dispersion-populate .SH DOCUMENTATION .LP More in depth documentation about the swift-dispersion utilities and -also Openstack-Swift as a whole can be found at -.BI http://swift.openstack.org/admin_guide.html#cluster-health +also OpenStack Swift as a whole can be found at +.BI http://swift.openstack.org/admin_guide.html#dispersion-report and .BI http://swift.openstack.org .SH "SEE ALSO" .BR swift-dispersion-report(1), -.BR dispersion.conf (5) +.BR dispersion.conf(5) diff --git a/doc/manpages/swift-dispersion-report.1 b/doc/manpages/swift-dispersion-report.1 index ff27280e7a..6ccf88b611 100644 --- a/doc/manpages/swift-dispersion-report.1 +++ b/doc/manpages/swift-dispersion-report.1 @@ -140,12 +140,12 @@ $ swift-dispersion-report .SH DOCUMENTATION .LP More in depth documentation about the swift-dispersion utilities and -also Openstack-Swift as a whole can be found at -.BI http://swift.openstack.org/admin_guide.html#cluster-health +also OpenStack Swift as a whole can be found at +.BI http://swift.openstack.org/admin_guide.html#dispersion-report and .BI http://swift.openstack.org .SH "SEE ALSO" .BR swift-dispersion-populate(1), -.BR dispersion.conf (5) +.BR dispersion.conf(5) From 488f88e30abe2b9d67ce43697e3abc63c75699b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A1bor=20Antal?= Date: Fri, 15 Jul 2016 14:35:54 +0200 Subject: [PATCH 114/156] Use more specific asserts in test/unit/cli tests I changed asserts with more specific assert methods. e.g.: from assertTrue(sth == None) to assertIsNone(*) or assertTrue(isinstance(inst, type)) to assertIsInstace(inst, type) or assertTrue(not sth) to assertFalse(sth). The code gets more readable, and a better description will be shown on fail. Change-Id: I39305808ad2349dc11a42261b41dbb347ac0618a --- test/unit/cli/test_form_signature.py | 12 +- test/unit/cli/test_info.py | 127 ++++++++++---------- test/unit/cli/test_recon.py | 40 +++--- test/unit/cli/test_ring_builder_analyzer.py | 2 +- test/unit/cli/test_ringbuilder.py | 42 +++---- 5 files changed, 111 insertions(+), 112 deletions(-) diff --git a/test/unit/cli/test_form_signature.py b/test/unit/cli/test_form_signature.py index bf2f8505a2..427da89d2b 100644 --- a/test/unit/cli/test_form_signature.py +++ b/test/unit/cli/test_form_signature.py @@ -49,14 +49,14 @@ class TestFormSignature(unittest.TestCase): max_file_count, str(expires), key]) self.assertEqual(exitcode, 0) - self.assertTrue("Signature: %s" % expected_signature - in out.getvalue()) - self.assertTrue("Expires: %d" % (the_time + expires,) - in out.getvalue()) + self.assertIn("Signature: %s" % expected_signature, + out.getvalue()) + self.assertIn("Expires: %d" % (the_time + expires,), + out.getvalue()) sig_input = ('' % expected_signature) - self.assertTrue(sig_input in out.getvalue()) + self.assertIn(sig_input, out.getvalue()) def test_too_few_args(self): out = StringIO() @@ -67,7 +67,7 @@ class TestFormSignature(unittest.TestCase): self.assertNotEqual(exitcode, 0) usage = 'Syntax: swift-form-signature ' - self.assertTrue(usage in out.getvalue()) + self.assertIn(usage, out.getvalue()) def test_invalid_filesize_arg(self): out = StringIO() diff --git a/test/unit/cli/test_info.py b/test/unit/cli/test_info.py index ee5922d3d0..1327c79afc 100644 --- a/test/unit/cli/test_info.py +++ b/test/unit/cli/test_info.py @@ -89,10 +89,9 @@ class TestCliInfoBase(unittest.TestCase): try: func(*args, **kwargs) except Exception as e: - self.assertTrue(msg in str(e), - "Expected %r in %r" % (msg, str(e))) - self.assertTrue(isinstance(e, exc), - "Expected %s, got %s" % (exc, type(e))) + self.assertIn(msg, str(e), "Expected %r in %r" % (msg, str(e))) + self.assertIsInstance(e, exc, + "Expected %s, got %s" % (exc, type(e))) class TestCliInfo(TestCliInfoBase): @@ -206,9 +205,9 @@ No user metadata found in db file''' % POLICIES[0].name print_ring_locations(acctring, 'dir', 'acct') exp_db = os.path.join('${DEVICE:-/srv/node*}', 'sdb1', 'dir', '3', 'b47', 'dc5be2aa4347a22a0fee6bc7de505b47') - self.assertTrue(exp_db in out.getvalue()) - self.assertTrue('127.0.0.1' in out.getvalue()) - self.assertTrue('127.0.0.2' in out.getvalue()) + self.assertIn(exp_db, out.getvalue()) + self.assertIn('127.0.0.1', out.getvalue()) + self.assertIn('127.0.0.2', out.getvalue()) def test_print_ring_locations_container(self): out = StringIO() @@ -217,7 +216,7 @@ No user metadata found in db file''' % POLICIES[0].name print_ring_locations(contring, 'dir', 'acct', 'con') exp_db = os.path.join('${DEVICE:-/srv/node*}', 'sdb1', 'dir', '1', 'fe6', '63e70955d78dfc62821edc07d6ec1fe6') - self.assertTrue(exp_db in out.getvalue()) + self.assertIn(exp_db, out.getvalue()) def test_print_ring_locations_obj(self): out = StringIO() @@ -226,7 +225,7 @@ No user metadata found in db file''' % POLICIES[0].name print_ring_locations(objring, 'dir', 'acct', 'con', 'obj') exp_obj = os.path.join('${DEVICE:-/srv/node*}', 'sda1', 'dir', '1', '117', '4a16154fc15c75e26ba6afadf5b1c117') - self.assertTrue(exp_obj in out.getvalue()) + self.assertIn(exp_obj, out.getvalue()) def test_print_ring_locations_partition_number(self): out = StringIO() @@ -237,8 +236,8 @@ No user metadata found in db file''' % POLICIES[0].name 'objects', '1') exp_obj2 = os.path.join('${DEVICE:-/srv/node*}', 'sdb1', 'objects', '1') - self.assertTrue(exp_obj1 in out.getvalue()) - self.assertTrue(exp_obj2 in out.getvalue()) + self.assertIn(exp_obj1, out.getvalue()) + self.assertIn(exp_obj2, out.getvalue()) def test_print_item_locations_invalid_args(self): # No target specified @@ -265,9 +264,9 @@ No user metadata found in db file''' % POLICIES[0].name # Test mismatch of ring and policy name (valid policy) self.assertRaises(InfoSystemExit, print_item_locations, objring, policy_name='zero') - self.assertTrue('Warning: mismatch between ring and policy name!' - in out.getvalue()) - self.assertTrue('No target specified' in out.getvalue()) + self.assertIn('Warning: mismatch between ring and policy name!', + out.getvalue()) + self.assertIn('No target specified', out.getvalue()) def test_print_item_locations_invalid_policy_no_target(self): out = StringIO() @@ -277,8 +276,8 @@ No user metadata found in db file''' % POLICIES[0].name self.assertRaises(InfoSystemExit, print_item_locations, objring, policy_name=policy_name) exp_msg = 'Warning: Policy %s is not valid' % policy_name - self.assertTrue(exp_msg in out.getvalue()) - self.assertTrue('No target specified' in out.getvalue()) + self.assertIn(exp_msg, out.getvalue()) + self.assertIn('No target specified', out.getvalue()) def test_print_item_locations_policy_object(self): out = StringIO() @@ -290,10 +289,10 @@ No user metadata found in db file''' % POLICIES[0].name exp_acct_msg = 'Account \tNone' exp_cont_msg = 'Container\tNone' exp_obj_msg = 'Object \tNone' - self.assertTrue(exp_part_msg in out.getvalue()) - self.assertTrue(exp_acct_msg in out.getvalue()) - self.assertTrue(exp_cont_msg in out.getvalue()) - self.assertTrue(exp_obj_msg in out.getvalue()) + self.assertIn(exp_part_msg, out.getvalue()) + self.assertIn(exp_acct_msg, out.getvalue()) + self.assertIn(exp_cont_msg, out.getvalue()) + self.assertIn(exp_obj_msg, out.getvalue()) def test_print_item_locations_dashed_ring_name_partition(self): out = StringIO() @@ -306,10 +305,10 @@ No user metadata found in db file''' % POLICIES[0].name exp_acct_msg = 'Account \tNone' exp_cont_msg = 'Container\tNone' exp_obj_msg = 'Object \tNone' - self.assertTrue(exp_part_msg in out.getvalue()) - self.assertTrue(exp_acct_msg in out.getvalue()) - self.assertTrue(exp_cont_msg in out.getvalue()) - self.assertTrue(exp_obj_msg in out.getvalue()) + self.assertIn(exp_part_msg, out.getvalue()) + self.assertIn(exp_acct_msg, out.getvalue()) + self.assertIn(exp_cont_msg, out.getvalue()) + self.assertIn(exp_obj_msg, out.getvalue()) def test_print_item_locations_account_with_ring(self): out = StringIO() @@ -318,16 +317,16 @@ No user metadata found in db file''' % POLICIES[0].name account_ring = ring.Ring(self.testdir, ring_name=account) print_item_locations(account_ring, account=account) exp_msg = 'Account \t%s' % account - self.assertTrue(exp_msg in out.getvalue()) + self.assertIn(exp_msg, out.getvalue()) exp_warning = 'Warning: account specified ' + \ 'but ring not named "account"' - self.assertTrue(exp_warning in out.getvalue()) + self.assertIn(exp_warning, out.getvalue()) exp_acct_msg = 'Account \t%s' % account exp_cont_msg = 'Container\tNone' exp_obj_msg = 'Object \tNone' - self.assertTrue(exp_acct_msg in out.getvalue()) - self.assertTrue(exp_cont_msg in out.getvalue()) - self.assertTrue(exp_obj_msg in out.getvalue()) + self.assertIn(exp_acct_msg, out.getvalue()) + self.assertIn(exp_cont_msg, out.getvalue()) + self.assertIn(exp_obj_msg, out.getvalue()) def test_print_item_locations_account_no_ring(self): out = StringIO() @@ -338,9 +337,9 @@ No user metadata found in db file''' % POLICIES[0].name exp_acct_msg = 'Account \t%s' % account exp_cont_msg = 'Container\tNone' exp_obj_msg = 'Object \tNone' - self.assertTrue(exp_acct_msg in out.getvalue()) - self.assertTrue(exp_cont_msg in out.getvalue()) - self.assertTrue(exp_obj_msg in out.getvalue()) + self.assertIn(exp_acct_msg, out.getvalue()) + self.assertIn(exp_cont_msg, out.getvalue()) + self.assertIn(exp_obj_msg, out.getvalue()) def test_print_item_locations_account_container_ring(self): out = StringIO() @@ -353,9 +352,9 @@ No user metadata found in db file''' % POLICIES[0].name exp_acct_msg = 'Account \t%s' % account exp_cont_msg = 'Container\t%s' % container exp_obj_msg = 'Object \tNone' - self.assertTrue(exp_acct_msg in out.getvalue()) - self.assertTrue(exp_cont_msg in out.getvalue()) - self.assertTrue(exp_obj_msg in out.getvalue()) + self.assertIn(exp_acct_msg, out.getvalue()) + self.assertIn(exp_cont_msg, out.getvalue()) + self.assertIn(exp_obj_msg, out.getvalue()) def test_print_item_locations_account_container_no_ring(self): out = StringIO() @@ -367,9 +366,9 @@ No user metadata found in db file''' % POLICIES[0].name exp_acct_msg = 'Account \t%s' % account exp_cont_msg = 'Container\t%s' % container exp_obj_msg = 'Object \tNone' - self.assertTrue(exp_acct_msg in out.getvalue()) - self.assertTrue(exp_cont_msg in out.getvalue()) - self.assertTrue(exp_obj_msg in out.getvalue()) + self.assertIn(exp_acct_msg, out.getvalue()) + self.assertIn(exp_cont_msg, out.getvalue()) + self.assertIn(exp_obj_msg, out.getvalue()) def test_print_item_locations_account_container_object_ring(self): out = StringIO() @@ -384,9 +383,9 @@ No user metadata found in db file''' % POLICIES[0].name exp_acct_msg = 'Account \t%s' % account exp_cont_msg = 'Container\t%s' % container exp_obj_msg = 'Object \t%s' % obj - self.assertTrue(exp_acct_msg in out.getvalue()) - self.assertTrue(exp_cont_msg in out.getvalue()) - self.assertTrue(exp_obj_msg in out.getvalue()) + self.assertIn(exp_acct_msg, out.getvalue()) + self.assertIn(exp_cont_msg, out.getvalue()) + self.assertIn(exp_obj_msg, out.getvalue()) def test_print_item_locations_account_container_object_dashed_ring(self): out = StringIO() @@ -401,9 +400,9 @@ No user metadata found in db file''' % POLICIES[0].name exp_acct_msg = 'Account \t%s' % account exp_cont_msg = 'Container\t%s' % container exp_obj_msg = 'Object \t%s' % obj - self.assertTrue(exp_acct_msg in out.getvalue()) - self.assertTrue(exp_cont_msg in out.getvalue()) - self.assertTrue(exp_obj_msg in out.getvalue()) + self.assertIn(exp_acct_msg, out.getvalue()) + self.assertIn(exp_cont_msg, out.getvalue()) + self.assertIn(exp_obj_msg, out.getvalue()) def test_print_info(self): db_file = 'foo' @@ -431,7 +430,7 @@ No user metadata found in db file''' % POLICIES[0].name if exp_raised: self.fail("Unexpected exception raised") else: - self.assertTrue(len(out.getvalue().strip()) > 800) + self.assertGreater(len(out.getvalue().strip()), 800) controller = ContainerController( {'devices': self.testdir, 'mount_check': 'false'}) @@ -459,7 +458,7 @@ No user metadata found in db file''' % POLICIES[0].name if exp_raised: self.fail("Unexpected exception raised") else: - self.assertTrue(len(out.getvalue().strip()) > 600) + self.assertGreater(len(out.getvalue().strip()), 600) out = StringIO() exp_raised = False @@ -517,8 +516,8 @@ class TestPrintObj(TestCliInfoBase): print_obj(self.datafile, swift_dir=self.testdir) etag_msg = 'ETag: Not found in metadata' length_msg = 'Content-Length: Not found in metadata' - self.assertTrue(etag_msg in out.getvalue()) - self.assertTrue(length_msg in out.getvalue()) + self.assertIn(etag_msg, out.getvalue()) + self.assertIn(length_msg, out.getvalue()) def test_print_obj_with_policy(self): out = StringIO() @@ -527,15 +526,15 @@ class TestPrintObj(TestCliInfoBase): etag_msg = 'ETag: Not found in metadata' length_msg = 'Content-Length: Not found in metadata' ring_loc_msg = 'ls -lah' - self.assertTrue(etag_msg in out.getvalue()) - self.assertTrue(length_msg in out.getvalue()) - self.assertTrue(ring_loc_msg in out.getvalue()) + self.assertIn(etag_msg, out.getvalue()) + self.assertIn(length_msg, out.getvalue()) + self.assertIn(ring_loc_msg, out.getvalue()) def test_missing_etag(self): out = StringIO() with mock.patch('sys.stdout', out): print_obj(self.datafile) - self.assertTrue('ETag: Not found in metadata' in out.getvalue()) + self.assertIn('ETag: Not found in metadata', out.getvalue()) class TestPrintObjFullMeta(TestCliInfoBase): @@ -558,7 +557,7 @@ class TestPrintObjFullMeta(TestCliInfoBase): out = StringIO() with mock.patch('sys.stdout', out): print_obj(self.datafile, swift_dir=self.testdir) - self.assertTrue('/objects-1/' in out.getvalue()) + self.assertIn('/objects-1/', out.getvalue()) def test_print_obj_policy_index(self): # Check an output of policy index when current directory is in @@ -575,7 +574,7 @@ class TestPrintObjFullMeta(TestCliInfoBase): print_obj(file_name, swift_dir=self.testdir) finally: os.chdir(cwd) - self.assertTrue('X-Backend-Storage-Policy-Index: 1' in out.getvalue()) + self.assertIn('X-Backend-Storage-Policy-Index: 1', out.getvalue()) def test_print_obj_curl_command_ipv4(self): # Note: policy 2 has IPv4 addresses in its ring @@ -668,7 +667,7 @@ class TestPrintObjFullMeta(TestCliInfoBase): out = StringIO() with mock.patch('sys.stdout', out): print_obj(test_file, swift_dir=self.testdir) - self.assertTrue('/objects-1/' in out.getvalue()) + self.assertIn('/objects-1/', out.getvalue()) def test_print_obj_no_ring(self): no_rings_dir = os.path.join(self.testdir, 'no_rings_here') @@ -677,22 +676,22 @@ class TestPrintObjFullMeta(TestCliInfoBase): out = StringIO() with mock.patch('sys.stdout', out): print_obj(self.datafile, swift_dir=no_rings_dir) - self.assertTrue('d41d8cd98f00b204e9800998ecf8427e' in out.getvalue()) - self.assertTrue('Partition' not in out.getvalue()) + self.assertIn('d41d8cd98f00b204e9800998ecf8427e', out.getvalue()) + self.assertNotIn('Partition', out.getvalue()) def test_print_obj_policy_name_mismatch(self): out = StringIO() with mock.patch('sys.stdout', out): print_obj(self.datafile, policy_name='two', swift_dir=self.testdir) ring_alert_msg = 'Warning: Ring does not match policy!' - self.assertTrue(ring_alert_msg in out.getvalue()) + self.assertIn(ring_alert_msg, out.getvalue()) def test_valid_etag(self): out = StringIO() with mock.patch('sys.stdout', out): print_obj(self.datafile) - self.assertTrue('ETag: d41d8cd98f00b204e9800998ecf8427e (valid)' - in out.getvalue()) + self.assertIn('ETag: d41d8cd98f00b204e9800998ecf8427e (valid)', + out.getvalue()) def test_invalid_etag(self): with open(self.datafile, 'wb') as fp: @@ -705,15 +704,15 @@ class TestPrintObjFullMeta(TestCliInfoBase): out = StringIO() with mock.patch('sys.stdout', out): print_obj(self.datafile) - self.assertTrue('ETag: badetag doesn\'t match file hash' - in out.getvalue()) + self.assertIn('ETag: badetag doesn\'t match file hash', + out.getvalue()) def test_unchecked_etag(self): out = StringIO() with mock.patch('sys.stdout', out): print_obj(self.datafile, check_etag=False) - self.assertTrue('ETag: d41d8cd98f00b204e9800998ecf8427e (not checked)' - in out.getvalue()) + self.assertIn('ETag: d41d8cd98f00b204e9800998ecf8427e (not checked)', + out.getvalue()) def test_print_obj_metadata(self): self.assertRaisesMessage(ValueError, 'Metadata is None', diff --git a/test/unit/cli/test_recon.py b/test/unit/cli/test_recon.py index 4532c680ba..ec056ecbb7 100644 --- a/test/unit/cli/test_recon.py +++ b/test/unit/cli/test_recon.py @@ -74,7 +74,7 @@ class TestScout(unittest.TestCase): mock_urlopen.side_effect = urllib2.URLError("") url, content, status, ts_start, ts_end = self.scout_instance.scout( ("127.0.0.1", "8080")) - self.assertTrue(isinstance(content, urllib2.URLError)) + self.assertIsInstance(content, urllib2.URLError) self.assertEqual(url, self.url) self.assertEqual(status, -1) @@ -85,7 +85,7 @@ class TestScout(unittest.TestCase): url, content, status, ts_start, ts_end = self.scout_instance.scout( ("127.0.0.1", "8080")) self.assertEqual(url, self.url) - self.assertTrue(isinstance(content, urllib2.HTTPError)) + self.assertIsInstance(content, urllib2.HTTPError) self.assertEqual(status, 404) @mock.patch('eventlet.green.urllib2.urlopen') @@ -93,7 +93,7 @@ class TestScout(unittest.TestCase): mock_urlopen.side_effect = socket.timeout("timeout") url, content, status, ts_start, ts_end = self.scout_instance.scout( ("127.0.0.1", "8080")) - self.assertTrue(isinstance(content, socket.timeout)) + self.assertIsInstance(content, socket.timeout) self.assertEqual(url, self.url) self.assertEqual(status, -1) @@ -114,7 +114,7 @@ class TestScout(unittest.TestCase): mock_urlopen.side_effect = urllib2.URLError("") url, content, status = self.scout_instance.scout_server_type( ("127.0.0.1", "8080")) - self.assertTrue(isinstance(content, urllib2.URLError)) + self.assertIsInstance(content, urllib2.URLError) self.assertEqual(url, self.server_type_url) self.assertEqual(status, -1) @@ -125,7 +125,7 @@ class TestScout(unittest.TestCase): url, content, status = self.scout_instance.scout_server_type( ("127.0.0.1", "8080")) self.assertEqual(url, self.server_type_url) - self.assertTrue(isinstance(content, urllib2.HTTPError)) + self.assertIsInstance(content, urllib2.HTTPError) self.assertEqual(status, 404) @mock.patch('eventlet.green.urllib2.urlopen') @@ -133,7 +133,7 @@ class TestScout(unittest.TestCase): mock_urlopen.side_effect = socket.timeout("timeout") url, content, status = self.scout_instance.scout_server_type( ("127.0.0.1", "8080")) - self.assertTrue(isinstance(content, socket.timeout)) + self.assertIsInstance(content, socket.timeout) self.assertEqual(url, self.server_type_url) self.assertEqual(status, -1) @@ -596,8 +596,8 @@ class TestReconCommands(unittest.TestCase): self.recon.server_type_check(hosts) output = stdout.getvalue() - self.assertTrue(res_container in output.splitlines()) - self.assertTrue(res_account in output.splitlines()) + self.assertIn(res_container, output.splitlines()) + self.assertIn(res_account, output.splitlines()) stdout.truncate(0) # Test ok for object server type - default @@ -607,7 +607,7 @@ class TestReconCommands(unittest.TestCase): self.recon.server_type_check([hosts[0]]) output = stdout.getvalue() - self.assertTrue(valid in output.splitlines()) + self.assertIn(valid, output.splitlines()) stdout.truncate(0) # Test for account server type @@ -618,8 +618,8 @@ class TestReconCommands(unittest.TestCase): self.recon.server_type_check(hosts) output = stdout.getvalue() - self.assertTrue(res_container in output.splitlines()) - self.assertTrue(res_object in output.splitlines()) + self.assertIn(res_container, output.splitlines()) + self.assertIn(res_object, output.splitlines()) stdout.truncate(0) # Test ok for account server type @@ -630,7 +630,7 @@ class TestReconCommands(unittest.TestCase): self.recon.server_type_check([hosts[2]]) output = stdout.getvalue() - self.assertTrue(valid in output.splitlines()) + self.assertIn(valid, output.splitlines()) stdout.truncate(0) # Test for container server type @@ -641,8 +641,8 @@ class TestReconCommands(unittest.TestCase): self.recon.server_type_check(hosts) output = stdout.getvalue() - self.assertTrue(res_account in output.splitlines()) - self.assertTrue(res_object in output.splitlines()) + self.assertIn(res_account, output.splitlines()) + self.assertIn(res_object, output.splitlines()) stdout.truncate(0) # Test ok for container server type @@ -653,7 +653,7 @@ class TestReconCommands(unittest.TestCase): self.recon.server_type_check([hosts[1]]) output = stdout.getvalue() - self.assertTrue(valid in output.splitlines()) + self.assertIn(valid, output.splitlines()) def test_get_swiftconfmd5(self): hosts = set([('10.1.1.1', 10000), @@ -672,7 +672,7 @@ class TestReconCommands(unittest.TestCase): self.recon.get_swiftconfmd5(hosts, printfn=printed.append) output = '\n'.join(printed) + '\n' - self.assertTrue("2/2 hosts matched" in output) + self.assertIn("2/2 hosts matched", output) def test_get_swiftconfmd5_mismatch(self): hosts = set([('10.1.1.1', 10000), @@ -691,9 +691,9 @@ class TestReconCommands(unittest.TestCase): self.recon.get_swiftconfmd5(hosts, printfn=printed.append) output = '\n'.join(printed) + '\n' - self.assertTrue("1/2 hosts matched" in output) - self.assertTrue("http://10.2.2.2:10000/recon/swiftconfmd5 (bogus) " - "doesn't match on disk md5sum" in output) + self.assertIn("1/2 hosts matched", output) + self.assertIn("http://10.2.2.2:10000/recon/swiftconfmd5 (bogus) " + "doesn't match on disk md5sum", output) def test_object_auditor_check(self): # Recon middleware response from an object server @@ -738,7 +738,7 @@ class TestReconCommands(unittest.TestCase): computed = response.get(name) self.assertTrue(computed) for key in keys: - self.assertTrue(key in computed) + self.assertIn(key, computed) def test_disk_usage(self): def dummy_request(*args, **kwargs): diff --git a/test/unit/cli/test_ring_builder_analyzer.py b/test/unit/cli/test_ring_builder_analyzer.py index 980a016c51..3b7ca8030d 100644 --- a/test/unit/cli/test_ring_builder_analyzer.py +++ b/test/unit/cli/test_ring_builder_analyzer.py @@ -46,7 +46,7 @@ class TestRunScenario(unittest.TestCase): # Just test that it produced some output as it ran; the fact that # this doesn't crash and produces output that resembles something # useful is good enough. - self.assertTrue('Rebalance' in fake_stdout.getvalue()) + self.assertIn('Rebalance', fake_stdout.getvalue()) self.assertTrue(os.path.exists(builder_path)) diff --git a/test/unit/cli/test_ringbuilder.py b/test/unit/cli/test_ringbuilder.py index 5e86b6fea1..5c38a3fbcb 100644 --- a/test/unit/cli/test_ringbuilder.py +++ b/test/unit/cli/test_ringbuilder.py @@ -443,7 +443,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): # Check that ring was created with sane value for region ring = RingBuilder.load(self.tmpfile) dev = ring.devs[-1] - self.assertTrue(dev['region'] > 0) + self.assertGreater(dev['region'], 0) def test_remove_device(self): for search_value in self.search_values: @@ -1266,8 +1266,8 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): out, err = self.run_srb(*argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.overload, 0.1) - self.assertTrue('10.00%' in out) - self.assertTrue('0.100000' in out) + self.assertIn('10.00%', out) + self.assertIn('0.100000', out) def test_set_overload_percent_strange_input(self): self.create_sample_ring() @@ -1275,8 +1275,8 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): out, err = self.run_srb(*argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.overload, 0.26) - self.assertTrue('26.00%' in out) - self.assertTrue('0.260000' in out) + self.assertIn('26.00%', out) + self.assertIn('0.260000', out) def test_server_overload_crazy_high(self): self.create_sample_ring() @@ -1284,17 +1284,17 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): out, err = self.run_srb(*argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.overload, 10.0) - self.assertTrue('Warning overload is greater than 100%' in out) - self.assertTrue('1000.00%' in out) - self.assertTrue('10.000000' in out) + self.assertIn('Warning overload is greater than 100%', out) + self.assertIn('1000.00%', out) + self.assertIn('10.000000', out) # but it's cool if you do it on purpose argv[-1] = '1000%' out, err = self.run_srb(*argv) ring = RingBuilder.load(self.tmpfile) self.assertEqual(ring.overload, 10.0) - self.assertTrue('Warning overload is greater than 100%' not in out) - self.assertTrue('1000.00%' in out) - self.assertTrue('10.000000' in out) + self.assertNotIn('Warning overload is greater than 100%', out) + self.assertIn('1000.00%', out) + self.assertIn('10.000000', out) def test_set_overload_number_of_arguments(self): self.create_sample_ring() @@ -1755,7 +1755,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv) ring = RingBuilder.load(self.tmpfile) self.assertTrue(ring.validate()) - self.assertEqual(ring.devs[3], None) + self.assertIsNone(ring.devs[3]) def test_rebalance_resets_time_remaining(self): self.create_sample_ring() @@ -1860,7 +1860,7 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin): os.remove(self.tmpfile) # loses file... argv = ["", backup_file, "write_builder", "24"] - self.assertEqual(ringbuilder.main(argv), None) + self.assertIsNone(ringbuilder.main(argv)) def test_warn_at_risk(self): # when the number of total part replicas (3 * 2 ** 4 = 48 in @@ -2047,7 +2047,7 @@ class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin): "r1z1-10.1.1.1:2345/sdc", 100.0, "r1z1-10.1.1.1:2345/sdd", 100.0) out, err = self.run_srb("rebalance") - self.assertTrue("rebalance/repush" not in out) + self.assertNotIn("rebalance/repush", out) # 2 machines of equal size: balanceable, but not in one pass due to # min_part_hours > 0 @@ -2058,12 +2058,12 @@ class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin): "r1z1-10.1.1.2:2345/sdd", 100.0) self.run_srb("pretend_min_part_hours_passed") out, err = self.run_srb("rebalance") - self.assertTrue("rebalance/repush" in out) + self.assertIn("rebalance/repush", out) # after two passes, it's all balanced out self.run_srb("pretend_min_part_hours_passed") out, err = self.run_srb("rebalance") - self.assertTrue("rebalance/repush" not in out) + self.assertNotIn("rebalance/repush", out) def test_rebalance_warning_with_overload(self): self.run_srb("create", 8, 3, 24) @@ -2075,14 +2075,14 @@ class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin): "r1z1-10.1.1.1:2345/sdb", 100.0, "r1z1-10.1.1.1:2345/sdc", 120.0) out, err = self.run_srb("rebalance") - self.assertTrue("rebalance/repush" not in out) + self.assertNotIn("rebalance/repush", out) # Now we add in a really big device, but not enough partitions move # to fill it in one pass, so we see the rebalance warning. self.run_srb("add", "r1z1-10.1.1.1:2345/sdd", 99999.0) self.run_srb("pretend_min_part_hours_passed") out, err = self.run_srb("rebalance") - self.assertTrue("rebalance/repush" in out) + self.assertIn("rebalance/repush", out) def test_cached_dispersion_value(self): self.run_srb("create", 8, 3, 24) @@ -2093,18 +2093,18 @@ class TestRebalanceCommand(unittest.TestCase, RunSwiftRingBuilderMixin): "r1z1-10.1.1.1:2345/sdd", 100.0) self.run_srb('rebalance') out, err = self.run_srb() # list devices - self.assertTrue('dispersion' in out) + self.assertIn('dispersion', out) # remove cached dispersion value builder = RingBuilder.load(self.tempfile) builder.dispersion = None builder.save(self.tempfile) # now dispersion output is suppressed out, err = self.run_srb() # list devices - self.assertFalse('dispersion' in out) + self.assertNotIn('dispersion', out) # but will show up after rebalance self.run_srb('rebalance', '-f') out, err = self.run_srb() # list devices - self.assertTrue('dispersion' in out) + self.assertIn('dispersion', out) if __name__ == '__main__': From 425bb0d8b0f78feb362fac69cd75bfe8baa3f841 Mon Sep 17 00:00:00 2001 From: Samuel Merritt Date: Wed, 3 Aug 2016 16:46:48 -0700 Subject: [PATCH 115/156] Fix stale docstring in SegmentedIterable. At some point, we added stuff to listing_iter, but didn't update the docstring. I noticed this while trying to write code using a SegmentedIterable when it wouldn't take my 3-tuples like the docstring claimed it would. Change-Id: I8f6667e97b1277f5b403a5f6fa7f9d708bb19249 --- swift/common/request_helpers.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/swift/common/request_helpers.py b/swift/common/request_helpers.py index 65f21bebce..9d231900ca 100644 --- a/swift/common/request_helpers.py +++ b/swift/common/request_helpers.py @@ -304,10 +304,18 @@ class SegmentedIterable(object): :param req: original request object :param app: WSGI application from which segments will come + :param listing_iter: iterable yielding the object segments to fetch, - along with the byte subranges to fetch, in the - form of a tuple (object-path, first-byte, last-byte) - or (object-path, None, None) to fetch the whole thing. + along with the byte subranges to fetch, in the form of a 5-tuple + (object-path, object-etag, object-size, first-byte, last-byte). + + If object-etag is None, no MD5 verification will be done. + + If object-size is None, no length verification will be done. + + If first-byte and last-byte are None, then the entire object will be + fetched. + :param max_get_time: maximum permitted duration of a GET request (seconds) :param logger: logger object :param swift_source: value of swift.source in subrequest environ From 303e4a1d83bc94793a47756b9d0b3f169c1eeb23 Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Thu, 4 Aug 2016 03:05:40 +0000 Subject: [PATCH 116/156] missing parens in functional test I was watching the logs and noticed it creates an object named: ">" Change-Id: I8dcbb40125b84a914e3c01566ae9c3f08dc9ea0f --- test/functional/tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/tests.py b/test/functional/tests.py index 29194964d1..82b91d9a26 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -2976,7 +2976,7 @@ class TestSlo(Base): def test_slo_container_listing(self): # the listing object size should equal the sum of the size of the # segments, not the size of the manifest body - file_item = self.env.container.file(Utils.create_name) + file_item = self.env.container.file(Utils.create_name()) file_item.write( json.dumps([self.env.seg_info['seg_a']]), parms={'multipart-manifest': 'put'}) From 4f94e71d554f885233da1a055bf7972353f3c6ec Mon Sep 17 00:00:00 2001 From: Paul Dardeau Date: Sun, 31 Jul 2016 18:31:37 -0400 Subject: [PATCH 117/156] add swift-oldies man page Closes-bug: #1607017 Change-Id: I365edf77c5bf34e2e51d7f10c9ea1012c1bda8ed --- doc/manpages/swift-oldies.1 | 69 +++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 doc/manpages/swift-oldies.1 diff --git a/doc/manpages/swift-oldies.1 b/doc/manpages/swift-oldies.1 new file mode 100644 index 0000000000..3e74b5d55f --- /dev/null +++ b/doc/manpages/swift-oldies.1 @@ -0,0 +1,69 @@ +.\" +.\" Author: Paul Dardeau +.\" Copyright (c) 2016 OpenStack Foundation. +.\" +.\" Licensed under the Apache License, Version 2.0 (the "License"); +.\" you may not use this file except in compliance with the License. +.\" You may obtain a copy of the License at +.\" +.\" http://www.apache.org/licenses/LICENSE-2.0 +.\" +.\" Unless required by applicable law or agreed to in writing, software +.\" distributed under the License is distributed on an "AS IS" BASIS, +.\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +.\" implied. +.\" See the License for the specific language governing permissions and +.\" limitations under the License. +.\" +.TH swift-oldies 1 "8/04/2016" "Linux" "OpenStack Swift" + +.SH NAME +.LP +.B swift-oldies +\- OpenStack Swift oldies tool + +.SH SYNOPSIS +.LP +.B swift-oldies +[-h|--help] [-a|--age] + + +.SH DESCRIPTION +.PP +Lists Swift processes that have been running more than a specific length of +time (in hours). This is done by scanning the list of currently executing +processes (via ps command) and examining the execution time of those python +processes whose program names begin with 'swift-'. + +Example (see all Swift processes older than two days): +swift-oldies \-a 48 + +The options are as follows: + +.RS 4 +.PD 0 +.IP "-a HOURS" +.IP "--age=HOURS" +.RS 4 +.IP "Look for processes at least HOURS old; default: 720 (30 days)" +.RE +.PD 0 + +.IP "-h" +.IP "--help" +.RS 4 +.IP "Display program help and exit" +.PD +.RE + + +.SH DOCUMENTATION +.LP +More documentation about OpenStack Swift can be found at +.BI http://swift.openstack.org/index.html + + +.SH "SEE ALSO" + +.BR swift-orphans(1) + From fb5fcb189e32746f8c884cd53cb5239a384bc070 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 2 Aug 2016 21:50:45 -0700 Subject: [PATCH 118/156] Fix encryption-delimiter interaction Previously, if a container listing produced `subdir` elements the decrypter would raise a KeyError. Additionally, update the functests so this sort of thing would have been caught at the gate. Closes-Bug: 1609904 Change-Id: Idc1907d19f90af7a086f45f8faecee9fbc3c69c2 --- swift/common/middleware/crypto/decrypter.py | 5 ++- test/functional/swift_test_client.py | 37 ++++++++++++------- test/functional/tests.py | 32 ++++++++++++---- .../middleware/crypto/test_decrypter.py | 26 +++++++++---- 4 files changed, 70 insertions(+), 30 deletions(-) diff --git a/swift/common/middleware/crypto/decrypter.py b/swift/common/middleware/crypto/decrypter.py index 46e2dbc484..e797ddde33 100644 --- a/swift/common/middleware/crypto/decrypter.py +++ b/swift/common/middleware/crypto/decrypter.py @@ -395,8 +395,9 @@ class DecrypterContContext(BaseDecrypterContext): return [new_body] def decrypt_obj_dict(self, obj_dict, key): - ciphertext = obj_dict['hash'] - obj_dict['hash'] = self.decrypt_value_with_meta(ciphertext, key) + if 'hash' in obj_dict: + ciphertext = obj_dict['hash'] + obj_dict['hash'] = self.decrypt_value_with_meta(ciphertext, key) return obj_dict def process_xml_resp(self, key, resp_iter): diff --git a/test/functional/swift_test_client.py b/test/functional/swift_test_client.py index 98262f5892..67660400a8 100644 --- a/test/functional/swift_test_client.py +++ b/test/functional/swift_test_client.py @@ -565,27 +565,38 @@ class Container(Base): files = json.loads(self.conn.response.read()) for file_item in files: - file_item['name'] = file_item['name'].encode('utf-8') - file_item['content_type'] = file_item['content_type'].\ - encode('utf-8') + for key in ('name', 'subdir', 'content_type'): + if key in file_item: + file_item[key] = file_item[key].encode('utf-8') return files elif format_type == 'xml': files = [] tree = minidom.parseString(self.conn.response.read()) - for x in tree.getElementsByTagName('object'): + container = tree.getElementsByTagName('container')[0] + for x in container.childNodes: file_item = {} - for key in ['name', 'hash', 'bytes', 'content_type', - 'last_modified']: - - file_item[key] = x.getElementsByTagName(key)[0].\ - childNodes[0].nodeValue + if x.tagName == 'object': + for key in ['name', 'hash', 'bytes', 'content_type', + 'last_modified']: + file_item[key] = x.getElementsByTagName(key)[0].\ + childNodes[0].nodeValue + elif x.tagName == 'subdir': + file_item['subdir'] = x.getElementsByTagName( + 'name')[0].childNodes[0].nodeValue + else: + raise ValueError('Found unexpected element %s' + % x.tagName) files.append(file_item) for file_item in files: - file_item['name'] = file_item['name'].encode('utf-8') - file_item['content_type'] = file_item['content_type'].\ - encode('utf-8') - file_item['bytes'] = int(file_item['bytes']) + if 'subdir' in file_item: + file_item['subdir'] = file_item['subdir'].\ + encode('utf-8') + else: + file_item['name'] = file_item['name'].encode('utf-8') + file_item['content_type'] = file_item['content_type'].\ + encode('utf-8') + file_item['bytes'] = int(file_item['bytes']) return files else: content = self.conn.response.read() diff --git a/test/functional/tests.py b/test/functional/tests.py index 29194964d1..693144b59e 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -573,13 +573,19 @@ class TestContainer(Base): for format_type in [None, 'json', 'xml']: for prefix in prefixs: - files = cont.files(parms={'prefix': prefix}) + files = cont.files(parms={'prefix': prefix, + 'format': format_type}) + if isinstance(files[0], dict): + files = [x.get('name', x.get('subdir')) for x in files] self.assertEqual(files, sorted(prefix_files[prefix])) for format_type in [None, 'json', 'xml']: for prefix in prefixs: files = cont.files(parms={'limit': limit_count, - 'prefix': prefix}) + 'prefix': prefix, + 'format': format_type}) + if isinstance(files[0], dict): + files = [x.get('name', x.get('subdir')) for x in files] self.assertEqual(len(files), limit_count) for file_item in files: @@ -596,12 +602,24 @@ class TestContainer(Base): file_item = cont.file(f) self.assertTrue(file_item.write_random()) - results = cont.files() - results = cont.files(parms={'delimiter': delimiter}) - self.assertEqual(results, ['test', 'test-']) + for format_type in [None, 'json', 'xml']: + results = cont.files(parms={'format': format_type}) + if isinstance(results[0], dict): + results = [x.get('name', x.get('subdir')) for x in results] + self.assertEqual(results, ['test', 'test-bar', 'test-foo']) - results = cont.files(parms={'delimiter': delimiter, 'reverse': 'yes'}) - self.assertEqual(results, ['test-', 'test']) + results = cont.files(parms={'delimiter': delimiter, + 'format': format_type}) + if isinstance(results[0], dict): + results = [x.get('name', x.get('subdir')) for x in results] + self.assertEqual(results, ['test', 'test-']) + + results = cont.files(parms={'delimiter': delimiter, + 'format': format_type, + 'reverse': 'yes'}) + if isinstance(results[0], dict): + results = [x.get('name', x.get('subdir')) for x in results] + self.assertEqual(results, ['test-', 'test']) def testListDelimiterAndPrefix(self): cont = self.env.account.container(Utils.create_name()) diff --git a/test/unit/common/middleware/crypto/test_decrypter.py b/test/unit/common/middleware/crypto/test_decrypter.py index b70d65029b..d38cdb0950 100644 --- a/test/unit/common/middleware/crypto/test_decrypter.py +++ b/test/unit/common/middleware/crypto/test_decrypter.py @@ -874,6 +874,8 @@ class TestDecrypterContainerRequests(unittest.TestCase): pt_etag2 = 'ac0374ed4d43635f803c82469d0b5a10' key = fetch_crypto_keys()['container'] + subdir = {"subdir": "pseudo-dir/"} + obj_dict_1 = {"bytes": 16, "last_modified": "2015-04-14T23:33:06.439040", "hash": encrypt_and_append_meta( @@ -888,7 +890,7 @@ class TestDecrypterContainerRequests(unittest.TestCase): "name": "testfile2", "content_type": content_type_2} - listing = [obj_dict_1, obj_dict_2] + listing = [subdir, obj_dict_1, obj_dict_2] fake_body = json.dumps(listing) resp = self._make_cont_get_req(fake_body, 'json') @@ -897,11 +899,12 @@ class TestDecrypterContainerRequests(unittest.TestCase): body = resp.body self.assertEqual(len(body), int(resp.headers['Content-Length'])) body_json = json.loads(body) - self.assertEqual(2, len(body_json)) + self.assertEqual(3, len(body_json)) + self.assertDictEqual(subdir, body_json[0]) obj_dict_1['hash'] = pt_etag1 - self.assertDictEqual(obj_dict_1, body_json[0]) + self.assertDictEqual(obj_dict_1, body_json[1]) obj_dict_2['hash'] = pt_etag2 - self.assertDictEqual(obj_dict_2, body_json[1]) + self.assertDictEqual(obj_dict_2, body_json[2]) def test_GET_container_json_with_crypto_override(self): content_type_1 = 'image/jpeg' @@ -958,6 +961,10 @@ class TestDecrypterContainerRequests(unittest.TestCase): self.assertIn("Cipher must be AES_CTR_256", self.decrypter.logger.get_lines_for_level('error')[0]) + def _assert_element(self, name, expected, element): + self.assertEqual(element.tagName, name) + self._assert_element_contains_dict(expected, element) + def _assert_element_contains_dict(self, expected, element): for k, v in expected.items(): entry = element.getElementsByTagName(k) @@ -976,6 +983,7 @@ class TestDecrypterContainerRequests(unittest.TestCase): fake_body = ''' \ +test-subdir\ \ ''' + encrypt_and_append_meta(pt_etag1.encode('utf8'), key) + '''\ \ @@ -1001,21 +1009,23 @@ class TestDecrypterContainerRequests(unittest.TestCase): self.assertEqual('testc', containers[0].attributes.getNamedItem("name").value) - objs = tree.getElementsByTagName('object') - self.assertEqual(2, len(objs)) + results = containers[0].childNodes + self.assertEqual(3, len(results)) + + self._assert_element('subdir', {"name": "test-subdir"}, results[0]) obj_dict_1 = {"bytes": "16", "last_modified": "2015-04-19T02:37:39.601660", "hash": pt_etag1, "name": "testfile", "content_type": content_type_1} - self._assert_element_contains_dict(obj_dict_1, objs[0]) + self._assert_element('object', obj_dict_1, results[1]) obj_dict_2 = {"bytes": "24", "last_modified": "2015-04-19T02:37:39.684740", "hash": pt_etag2, "name": "testfile2", "content_type": content_type_2} - self._assert_element_contains_dict(obj_dict_2, objs[1]) + self._assert_element('object', obj_dict_2, results[2]) def test_GET_container_xml_with_crypto_override(self): content_type_1 = 'image/jpeg' From 3096dc6b1e299d2e0a372f7979158d1a4dd38144 Mon Sep 17 00:00:00 2001 From: houweichao Date: Thu, 4 Aug 2016 19:31:18 +0800 Subject: [PATCH 119/156] Make the logger information format The logger usually use the _() func to ensure the international message. This commit modify the logger format in proxy pkg. Change-Id: I9a8e118dc6ba55e462f326aebff280289e8ab4fe --- swift/proxy/controllers/base.py | 5 +++-- swift/proxy/controllers/obj.py | 10 +++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index f9b1175e6d..ace3832c1d 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -1747,10 +1747,11 @@ class Controller(object): self.app.account_ring, partition, 'PUT', path, [headers] * len(nodes)) if is_success(resp.status_int): - self.app.logger.info('autocreate account %r' % path) + self.app.logger.info(_('autocreate account %r'), path) clear_info_cache(self.app, req.environ, account) else: - self.app.logger.warning('Could not autocreate account %r' % path) + self.app.logger.warning(_('Could not autocreate account %r'), + path) def GETorHEAD_base(self, req, server_type, node_iter, partition, path, concurrency=1, client_chunk_size=None): diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index e1e8fa4266..afc3c63c83 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -1258,11 +1258,11 @@ class ECAppIter(object): pass except ChunkReadTimeout: # unable to resume in GetOrHeadHandler - self.logger.exception("Timeout fetching fragments for %r" % + self.logger.exception(_("Timeout fetching fragments for %r"), self.path) except: # noqa - self.logger.exception("Exception fetching fragments for %r" % - self.path) + self.logger.exception(_("Exception fetching fragments for" + " %r"), self.path) finally: queue.resize(2) # ensure there's room queue.put(None) @@ -1290,8 +1290,8 @@ class ECAppIter(object): try: segment = self.policy.pyeclib_driver.decode(fragments) except ECDriverError: - self.logger.exception("Error decoding fragments for %r" % - self.path) + self.logger.exception(_("Error decoding fragments for" + " %r"), self.path) raise yield segment From ce44ec45a4722681184e9a68f00b473abf723aa3 Mon Sep 17 00:00:00 2001 From: zheng yin Date: Thu, 4 Aug 2016 13:05:32 +0800 Subject: [PATCH 120/156] Make log information format Change-Id: I4a7699ce17c136490327776c0a3c3be450526814 --- swift/account/auditor.py | 2 +- swift/common/utils.py | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/swift/account/auditor.py b/swift/account/auditor.py index dddc3d1d91..2d3588bb8e 100644 --- a/swift/account/auditor.py +++ b/swift/account/auditor.py @@ -140,7 +140,7 @@ class AccountAuditor(Daemon): self.validate_per_policy_counts(broker) self.logger.increment('passes') self.account_passes += 1 - self.logger.debug('Audit passed for %s' % broker) + self.logger.debug(_('Audit passed for %s'), broker) except InvalidAccountInfo as e: self.logger.increment('failures') self.account_failures += 1 diff --git a/swift/common/utils.py b/swift/common/utils.py index 57e0e9be3f..0a0f3189f8 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -1420,8 +1420,8 @@ class StatsdClient(object): except IOError as err: if self.logger: self.logger.warning( - 'Error sending UDP message to %r: %s', - self._target, err) + _('Error sending UDP message to %(target)r: %(err)s'), + {'target': self._target, 'err': err}) def _open_socket(self): return socket.socket(self._sock_family, socket.SOCK_DGRAM) @@ -2453,7 +2453,8 @@ def audit_location_generator(devices, datadir, suffix='', partitions = listdir(datadir_path) except OSError as e: if logger: - logger.warning('Skipping %s because %s', datadir_path, e) + logger.warning(_('Skipping %(datadir)s because %(err)s'), + {'datadir': datadir_path, 'err': e}) continue for partition in partitions: part_path = os.path.join(datadir_path, partition) @@ -3460,7 +3461,7 @@ def override_bytes_from_content_type(listing_dict, logger=None): listing_dict['bytes'] = int(swift_bytes) except ValueError: if logger: - logger.exception("Invalid swift_bytes") + logger.exception(_("Invalid swift_bytes")) def clean_content_type(value): @@ -3766,7 +3767,7 @@ def document_iters_to_http_response_body(ranges_iter, boundary, multipart, pass else: logger.warning( - "More than one part in a single-part response?") + _("More than one part in a single-part response?")) return string_along(response_body_iter, ranges_iter, logger) From a9fa5abdc37fe9ce40da9b6c310cef93dae1a781 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Fri, 5 Aug 2016 14:28:22 +0100 Subject: [PATCH 121/156] Tighten up direct client unit tests Make the tests for direct_get_container and direct_get_account verify all combinations of request parameters. Change-Id: I3b929ca83b37c32927b9bf619f445d698b9bdab9 Related-Change: I846fc70ff3abdb1674152a8d9e0521c709f254c4 --- test/unit/common/test_direct_client.py | 115 ++++++++++++++++--------- 1 file changed, 73 insertions(+), 42 deletions(-) diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py index 1426f28406..cda259dec4 100644 --- a/test/unit/common/test_direct_client.py +++ b/test/unit/common/test_direct_client.py @@ -160,33 +160,47 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(expected_header_count, len(headers)) def test_direct_get_account(self): - stub_headers = HeaderKeyDict({ - 'X-Account-Container-Count': '1', - 'X-Account-Object-Count': '1', - 'X-Account-Bytes-Used': '1', - 'X-Timestamp': '1234567890', - 'X-PUT-Timestamp': '1234567890'}) + def do_test(req_params): + stub_headers = HeaderKeyDict({ + 'X-Account-Container-Count': '1', + 'X-Account-Object-Count': '1', + 'X-Account-Bytes-Used': '1', + 'X-Timestamp': '1234567890', + 'X-PUT-Timestamp': '1234567890'}) - body = '[{"count": 1, "bytes": 20971520, "name": "c1"}]' + body = '[{"count": 1, "bytes": 20971520, "name": "c1"}]' - with mocked_http_conn(200, stub_headers, body) as conn: - resp_headers, resp = direct_client.direct_get_account( - self.node, self.part, self.account, marker='marker', - prefix='prefix', delimiter='delimiter', limit=1000, - end_marker='endmarker', reverse='on') - self.assertEqual(conn.method, 'GET') - self.assertEqual(conn.path, self.account_path) + with mocked_http_conn(200, stub_headers, body) as conn: + resp_headers, resp = direct_client.direct_get_account( + self.node, self.part, self.account, **req_params) + try: + self.assertEqual(conn.method, 'GET') + self.assertEqual(conn.path, self.account_path) + self.assertEqual(conn.req_headers['user-agent'], + self.user_agent) + self.assertEqual(resp_headers, stub_headers) + self.assertEqual(json.loads(body), resp) + self.assertIn('format=json', conn.query_string) + for k, v in req_params.items(): + if v is None: + self.assertNotIn('&%s' % k, conn.query_string) + else: + self.assertIn('&%s=%s' % (k, v), conn.query_string) - self.assertEqual(conn.req_headers['user-agent'], self.user_agent) - self.assertEqual(resp_headers, stub_headers) - self.assertEqual(json.loads(body), resp) - self.assertTrue('marker=marker' in conn.query_string) - self.assertTrue('delimiter=delimiter' in conn.query_string) - self.assertTrue('limit=1000' in conn.query_string) - self.assertTrue('prefix=prefix' in conn.query_string) - self.assertTrue('format=json' in conn.query_string) - self.assertTrue('end_marker=endmarker' in conn.query_string) - self.assertTrue('reverse=on' in conn.query_string) + except AssertionError as err: + self.fail('Failed with params %s: %s' % (req_params, err)) + + test_params = (dict(marker=marker, prefix=prefix, delimiter=delimiter, + limit=limit, end_marker=end_marker, reverse=reverse) + for marker in (None, 'my-marker') + for prefix in (None, 'my-prefix') + for delimiter in (None, 'my-delimiter') + for limit in (None, 1000) + for end_marker in (None, 'my-endmarker') + for reverse in (None, 'on')) + + for params in test_params: + do_test(params) def test_direct_client_exception(self): stub_headers = {'X-Trans-Id': 'txb5f59485c578460f8be9e-0053478d09'} @@ -331,26 +345,43 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(err.http_headers, headers) def test_direct_get_container(self): - headers = HeaderKeyDict({'key': 'value'}) - body = '[{"hash": "8f4e3", "last_modified": "317260", "bytes": 209}]' + def do_test(req_params): + headers = HeaderKeyDict({'key': 'value'}) + body = ('[{"hash": "8f4e3", "last_modified": "317260", ' + '"bytes": 209}]') - with mocked_http_conn(200, headers, body) as conn: - resp_headers, resp = direct_client.direct_get_container( - self.node, self.part, self.account, self.container, - marker='marker', prefix='prefix', delimiter='delimiter', - limit=1000, end_marker='endmarker', reverse='on') + with mocked_http_conn(200, headers, body) as conn: + resp_headers, resp = direct_client.direct_get_container( + self.node, self.part, self.account, self.container, + **req_params) - self.assertEqual(conn.req_headers['user-agent'], - 'direct-client %s' % os.getpid()) - self.assertEqual(headers, resp_headers) - self.assertEqual(json.loads(body), resp) - self.assertTrue('marker=marker' in conn.query_string) - self.assertTrue('delimiter=delimiter' in conn.query_string) - self.assertTrue('limit=1000' in conn.query_string) - self.assertTrue('prefix=prefix' in conn.query_string) - self.assertTrue('format=json' in conn.query_string) - self.assertTrue('end_marker=endmarker' in conn.query_string) - self.assertTrue('reverse=on' in conn.query_string) + try: + self.assertEqual(conn.method, 'GET') + self.assertEqual(conn.path, self.container_path) + self.assertEqual(conn.req_headers['user-agent'], + self.user_agent) + self.assertEqual(headers, resp_headers) + self.assertEqual(json.loads(body), resp) + self.assertIn('format=json', conn.query_string) + for k, v in req_params.items(): + if v is None: + self.assertNotIn('&%s' % k, conn.query_string) + else: + self.assertIn('&%s=%s' % (k, v), conn.query_string) + except AssertionError as err: + self.fail('Failed with params %s: %s' % (req_params, err)) + + test_params = (dict(marker=marker, prefix=prefix, delimiter=delimiter, + limit=limit, end_marker=end_marker, reverse=reverse) + for marker in (None, 'my-marker') + for prefix in (None, 'my-prefix') + for delimiter in (None, 'my-delimiter') + for limit in (None, 1000) + for end_marker in (None, 'my-endmarker') + for reverse in (None, 'on')) + + for params in test_params: + do_test(params) def test_direct_get_container_no_content_does_not_decode_body(self): headers = {} From c5ff9932a45cccadf89d9a99759a81eec2dd337f Mon Sep 17 00:00:00 2001 From: Shashirekha Gundur Date: Thu, 4 Aug 2016 20:35:19 +0000 Subject: [PATCH 122/156] NIT: fixing inconsistent naming of OpenStack Swift Throughout the manpages maintaining references to OpenStack Swift. Change-Id: I2a0c2658e10a92671bfc092c0a3abaddfd8cd7d9 Closes-Bug: #1609687 --- doc/manpages/account-server.conf.5 | 4 +- doc/manpages/container-server.conf.5 | 4 +- doc/manpages/dispersion.conf.5 | 30 +++++------ doc/manpages/object-expirer.conf.5 | 64 +++++++++++------------ doc/manpages/object-server.conf.5 | 4 +- doc/manpages/proxy-server.conf.5 | 4 +- doc/manpages/swift-account-auditor.1 | 26 ++++----- doc/manpages/swift-account-info.1 | 28 +++++----- doc/manpages/swift-account-reaper.1 | 22 ++++---- doc/manpages/swift-account-replicator.1 | 38 +++++++------- doc/manpages/swift-account-server.1 | 16 +++--- doc/manpages/swift-container-auditor.1 | 28 +++++----- doc/manpages/swift-container-info.1 | 30 +++++------ doc/manpages/swift-container-replicator.1 | 36 ++++++------- doc/manpages/swift-container-server.1 | 24 ++++----- doc/manpages/swift-container-sync.1 | 18 +++---- doc/manpages/swift-container-updater.1 | 32 ++++++------ doc/manpages/swift-dispersion-populate.1 | 2 +- doc/manpages/swift-dispersion-report.1 | 2 +- doc/manpages/swift-get-nodes.1 | 26 ++++----- doc/manpages/swift-init.1 | 28 +++++----- doc/manpages/swift-object-auditor.1 | 28 +++++----- doc/manpages/swift-object-expirer.1 | 4 +- doc/manpages/swift-object-info.1 | 26 ++++----- doc/manpages/swift-object-replicator.1 | 36 ++++++------- doc/manpages/swift-object-server.1 | 22 ++++---- doc/manpages/swift-object-updater.1 | 42 +++++++-------- doc/manpages/swift-orphans.1 | 4 +- doc/manpages/swift-proxy-server.1 | 24 ++++----- doc/manpages/swift-recon.1 | 4 +- doc/manpages/swift-ring-builder.1 | 36 ++++++------- 31 files changed, 346 insertions(+), 346 deletions(-) diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5 index 1ad115f53a..ef52ad05a9 100644 --- a/doc/manpages/account-server.conf.5 +++ b/doc/manpages/account-server.conf.5 @@ -20,7 +20,7 @@ .SH NAME .LP .B account-server.conf -\- configuration file for the openstack-swift account server +\- configuration file for the OpenStack Swift account server @@ -348,7 +348,7 @@ requested by delay_reaping. .SH DOCUMENTATION .LP More in depth documentation about the swift-account-server and -also Openstack-Swift as a whole can be found at +also OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/admin_guide.html and .BI http://swift.openstack.org diff --git a/doc/manpages/container-server.conf.5 b/doc/manpages/container-server.conf.5 index 1954ead453..11eb109874 100644 --- a/doc/manpages/container-server.conf.5 +++ b/doc/manpages/container-server.conf.5 @@ -20,7 +20,7 @@ .SH NAME .LP .B container-server.conf -\- configuration file for the openstack-swift container server +\- configuration file for the OpenStack Swift container server @@ -381,7 +381,7 @@ Internal client config file path. .SH DOCUMENTATION .LP More in depth documentation about the swift-container-server and -also Openstack-Swift as a whole can be found at +also OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/admin_guide.html and .BI http://swift.openstack.org diff --git a/doc/manpages/dispersion.conf.5 b/doc/manpages/dispersion.conf.5 index 723954747b..084bbec3de 100644 --- a/doc/manpages/dispersion.conf.5 +++ b/doc/manpages/dispersion.conf.5 @@ -14,33 +14,33 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH dispersion.conf 5 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B dispersion.conf -\- configuration file for the openstack-swift dispersion tools +\- configuration file for the OpenStack Swift dispersion tools .SH SYNOPSIS .LP .B dispersion.conf -.SH DESCRIPTION +.SH DESCRIPTION .PP This is the configuration file used by the dispersion populate and report tools. -The file format consists of the '[dispersion]' module as the header and available parameters. -Any line that begins with a '#' symbol is ignored. +The file format consists of the '[dispersion]' module as the header and available parameters. +Any line that begins with a '#' symbol is ignored. .SH PARAMETERS -.PD 1 +.PD 1 .RS 0 .IP "\fBauth_version\fR" Authentication system API version. The default is 1.0. .IP "\fBauth_url\fR" -Authentication system URL -.IP "\fBauth_user\fR" +Authentication system URL +.IP "\fBauth_user\fR" Authentication system account/user name .IP "\fBauth_key\fR" Authentication system account/user password @@ -55,7 +55,7 @@ The default is 'publicURL'. .IP "\fBkeystone_api_insecure\fR" The default is false. .IP "\fBswift_dir\fR" -Location of openstack-swift configuration and ring files +Location of OpenStack Swift configuration and ring files .IP "\fBdispersion_coverage\fR" Percentage of partition coverage to use. The default is 1.0. .IP "\fBretries\fR" @@ -76,7 +76,7 @@ Whether to run the object report. The default is yes. .PD .SH SAMPLE -.PD 0 +.PD 0 .RS 0 .IP "[dispersion]" .IP "auth_url = https://127.0.0.1:443/auth/v1.0" @@ -94,15 +94,15 @@ Whether to run the object report. The default is yes. .IP "# container_report = yes" .IP "# object_report = yes" .RE -.PD +.PD + - .SH DOCUMENTATION .LP More in depth documentation about the swift-dispersion utilities and -also Openstack-Swift as a whole can be found at +also OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/admin_guide.html#cluster-health -and +and .BI http://swift.openstack.org diff --git a/doc/manpages/object-expirer.conf.5 b/doc/manpages/object-expirer.conf.5 index c8a47a45c1..2e5ea46a93 100644 --- a/doc/manpages/object-expirer.conf.5 +++ b/doc/manpages/object-expirer.conf.5 @@ -14,13 +14,13 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH object-expirer.conf 5 "03/15/2012" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B object-expirer.conf -\- configuration file for the openstack-swift object exprier daemon +\- configuration file for the OpenStack Swift object expirer daemon @@ -30,38 +30,38 @@ -.SH DESCRIPTION +.SH DESCRIPTION .PP -This is the configuration file used by the object expirer daemon. The daemon's -function is to query the internal hidden expiring_objects_account to discover +This is the configuration file used by the object expirer daemon. The daemon's +function is to query the internal hidden expiring_objects_account to discover objects that need to be deleted and to then delete them. The configuration file follows the python-pastedeploy syntax. The file is divided -into sections, which are enclosed by square brackets. Each section will contain a -certain number of key/value parameters which are described later. +into sections, which are enclosed by square brackets. Each section will contain a +certain number of key/value parameters which are described later. -Any line that begins with a '#' symbol is ignored. +Any line that begins with a '#' symbol is ignored. -You can find more information about python-pastedeploy configuration format at +You can find more information about python-pastedeploy configuration format at \fIhttp://pythonpaste.org/deploy/#config-format\fR .SH GLOBAL SECTION -.PD 1 +.PD 1 .RS 0 -This is indicated by section named [DEFAULT]. Below are the parameters that -are acceptable within this section. +This is indicated by section named [DEFAULT]. Below are the parameters that +are acceptable within this section. -.IP \fBswift_dir\fR +.IP \fBswift_dir\fR Swift configuration directory. The default is /etc/swift. -.IP \fBuser\fR -The system user that the object server will run as. The default is swift. -.IP \fBlog_name\fR +.IP \fBuser\fR +The system user that the object server will run as. The default is swift. +.IP \fBlog_name\fR Label used when logging. The default is swift. -.IP \fBlog_facility\fR +.IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. -.IP \fBlog_level\fR +.IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. @@ -94,13 +94,13 @@ The default is empty. .SH PIPELINE SECTION -.PD 1 +.PD 1 .RS 0 This is indicated by section name [pipeline:main]. Below are the parameters that -are acceptable within this section. +are acceptable within this section. .IP "\fBpipeline\fR" -It is used when you need to apply a number of filters. It is a list of filters +It is used when you need to apply a number of filters. It is a list of filters ended by an application. The default should be \fB"catch_errors cache proxy-server"\fR .RE .PD @@ -108,24 +108,24 @@ ended by an application. The default should be \fB"catch_errors cache proxy-serv .SH APP SECTION -.PD 1 +.PD 1 .RS 0 This is indicated by section name [app:object-server]. Below are the parameters that are acceptable within this section. .IP "\fBuse\fR" -Entry point for paste.deploy for the object server. This is the reference to the installed python egg. -The default is \fBegg:swift#proxy\fR. See proxy-server.conf-sample for options or See proxy-server.conf manpage. +Entry point for paste.deploy for the object server. This is the reference to the installed python egg. +The default is \fBegg:swift#proxy\fR. See proxy-server.conf-sample for options or See proxy-server.conf manpage. .RE .PD .SH FILTER SECTION -.PD 1 +.PD 1 .RS 0 Any section that has its name prefixed by "filter:" indicates a filter section. Filters are used to specify configuration parameters for specific swift middlewares. -Below are the filters available and respective acceptable parameters. +Below are the filters available and respective acceptable parameters. .RS 0 .IP "\fB[filter:cache]\fR" @@ -140,8 +140,8 @@ The default is \fBegg:swift#memcache\fR. See proxy-server.conf-sample for option .RE -.RS 0 -.IP "\fB[filter:catch_errors]\fR" +.RS 0 +.IP "\fB[filter:catch_errors]\fR" .RE .RS 3 .IP \fBuse\fR @@ -206,9 +206,9 @@ Path to recon cache directory. The default is /var/cache/swift. .SH DOCUMENTATION .LP More in depth documentation about the swift-object-expirer and -also Openstack-Swift as a whole can be found at -.BI http://swift.openstack.org/admin_guide.html -and +also OpenStack Swift as a whole can be found at +.BI http://swift.openstack.org/admin_guide.html +and .BI http://swift.openstack.org diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index 24156a47c6..0649b86fb8 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -20,7 +20,7 @@ .SH NAME .LP .B object-server.conf -\- configuration file for the openstack-swift object server +\- configuration file for the OpenStack Swift object server @@ -511,7 +511,7 @@ will try to use object-replicator's rsync_timeout + 900 or fall-back to 86400 (1 .SH DOCUMENTATION .LP More in depth documentation about the swift-object-server and -also Openstack-Swift as a whole can be found at +also OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/admin_guide.html and .BI http://swift.openstack.org diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 index 6fd9d16ea3..22880db60d 100644 --- a/doc/manpages/proxy-server.conf.5 +++ b/doc/manpages/proxy-server.conf.5 @@ -20,7 +20,7 @@ .SH NAME .LP .B proxy-server.conf -\- configuration file for the openstack-swift proxy server +\- configuration file for the OpenStack Swift proxy server @@ -1036,7 +1036,7 @@ The default is 'x-container-read, x-container-write, x-container-sync-key, x-con .SH DOCUMENTATION .LP More in depth documentation about the swift-proxy-server and -also Openstack-Swift as a whole can be found at +also OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/admin_guide.html and .BI http://swift.openstack.org diff --git a/doc/manpages/swift-account-auditor.1 b/doc/manpages/swift-account-auditor.1 index 14c8e1374c..258d2eb18a 100644 --- a/doc/manpages/swift-account-auditor.1 +++ b/doc/manpages/swift-account-auditor.1 @@ -14,24 +14,24 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-account-auditor 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP -.B swift-account-auditor -\- Openstack-swift account auditor +.B swift-account-auditor +\- OpenStack Swift account auditor .SH SYNOPSIS .LP -.B swift-account-auditor +.B swift-account-auditor [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] -.SH DESCRIPTION +.SH DESCRIPTION .PP -The account auditor crawls the local account system checking the integrity of accounts -objects. If corruption is found (in the case of bit rot, for example), the file is +The account auditor crawls the local account system checking the integrity of accounts +objects. If corruption is found (in the case of bit rot, for example), the file is quarantined, and replication will replace the bad file from another replica. The options are as follows: @@ -46,16 +46,16 @@ The options are as follows: .IP "-o" .IP "--once" .RS 4 -.IP "only run one pass of daemon" +.IP "only run one pass of daemon" .RE .PD .RE - + .SH DOCUMENTATION .LP -More in depth documentation in regards to -.BI swift-account-auditor -and also about Openstack-Swift as a whole can be found at +More in depth documentation in regards to +.BI swift-account-auditor +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html .SH "SEE ALSO" diff --git a/doc/manpages/swift-account-info.1 b/doc/manpages/swift-account-info.1 index e34b285417..62a924d624 100644 --- a/doc/manpages/swift-account-info.1 +++ b/doc/manpages/swift-account-info.1 @@ -1,5 +1,5 @@ .\" -.\" Author: Madhuri Kumari +.\" Author: Madhuri Kumari .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. @@ -13,28 +13,28 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-account-info 1 "3/22/2014" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-account-info -\- Openstack-swift account-info tool +\- OpenStack Swift account-info tool .SH SYNOPSIS .LP .B swift-account-info -[ACCOUNT_DB_FILE] [SWIFT_DIR] +[ACCOUNT_DB_FILE] [SWIFT_DIR] -.SH DESCRIPTION +.SH DESCRIPTION .PP -This is a very simple swift tool that allows a swiftop engineer to retrieve -information about an account that is located on the storage node. One calls -the tool with a given db file as it is stored on the storage node system. -It will then return several information about that account such as; +This is a very simple swift tool that allows a swiftop engineer to retrieve +information about an account that is located on the storage node. One calls +the tool with a given db file as it is stored on the storage node system. +It will then return several information about that account such as; .PD 0 -.IP "- Account" +.IP "- Account" .IP "- Account hash " .IP "- Created timestamp " .IP "- Put timestamp " @@ -46,11 +46,11 @@ It will then return several information about that account such as; .IP "- ID" .IP "- User Metadata " .IP "- Ring Location" -.PD - +.PD + .SH DOCUMENTATION .LP -More documentation about Openstack-Swift can be found at +More documentation about OpenStack Swift can be found at .BI http://swift.openstack.org/index.html .SH "SEE ALSO" diff --git a/doc/manpages/swift-account-reaper.1 b/doc/manpages/swift-account-reaper.1 index 66003a6c57..f60831361e 100644 --- a/doc/manpages/swift-account-reaper.1 +++ b/doc/manpages/swift-account-reaper.1 @@ -14,24 +14,24 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-account-reaper 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-account-reaper -\- Openstack-swift account reaper +\- OpenStack Swift account reaper .SH SYNOPSIS .LP -.B swift-account-reaper +.B swift-account-reaper [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] -.SH DESCRIPTION +.SH DESCRIPTION .PP Removes data from status=DELETED accounts. These are accounts that have been asked to be removed by the reseller via services remove_storage_account -XMLRPC call. +XMLRPC call. .PP The account is not deleted immediately by the services call, but instead the account is simply marked for deletion by setting the status column in @@ -51,17 +51,17 @@ The options are as follows: .IP "-o" .IP "--once" .RS 4 -.IP "only run one pass of daemon" +.IP "only run one pass of daemon" .RE .PD .RE - + .SH DOCUMENTATION .LP -More in depth documentation in regards to -.BI swift-object-auditor -and also about Openstack-Swift as a whole can be found at +More in depth documentation in regards to +.BI swift-object-auditor +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-account-replicator.1 b/doc/manpages/swift-account-replicator.1 index f0b9bbbd6a..f1f86cbb7f 100644 --- a/doc/manpages/swift-account-replicator.1 +++ b/doc/manpages/swift-account-replicator.1 @@ -14,31 +14,31 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-account-replicator 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP -.B swift-account-replicator -\- Openstack-swift account replicator +.B swift-account-replicator +\- OpenStack Swift account replicator .SH SYNOPSIS .LP -.B swift-account-replicator +.B swift-account-replicator [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] -.SH DESCRIPTION +.SH DESCRIPTION .PP -Replication is designed to keep the system in a consistent state in the face of -temporary error conditions like network outages or drive failures. The replication -processes compare local data with each remote copy to ensure they all contain the -latest version. Account replication uses a combination of hashes and shared high +Replication is designed to keep the system in a consistent state in the face of +temporary error conditions like network outages or drive failures. The replication +processes compare local data with each remote copy to ensure they all contain the +latest version. Account replication uses a combination of hashes and shared high water marks to quickly compare subsections of each partition. .PP -Replication updates are push based. Account replication push missing records over +Replication updates are push based. Account replication push missing records over HTTP or rsync whole database files. The replicator also ensures that data is removed -from the system. When an account item is deleted a tombstone is set as the latest -version of the item. The replicator will see the tombstone and ensure that the item +from the system. When an account item is deleted a tombstone is set as the latest +version of the item. The replicator will see the tombstone and ensure that the item is removed from the entire system. The options are as follows: @@ -53,17 +53,17 @@ The options are as follows: .IP "-o" .IP "--once" .RS 4 -.IP "only run one pass of daemon" +.IP "only run one pass of daemon" .RE -.PD +.PD .RE - - + + .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-account-replicator -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-account-server.1 b/doc/manpages/swift-account-server.1 index a59ec25ebe..c710cb3bdb 100644 --- a/doc/manpages/swift-account-server.1 +++ b/doc/manpages/swift-account-server.1 @@ -14,32 +14,32 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-account-server 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-account-server -\- Openstack-swift account server +\- OpenStack Swift account server .SH SYNOPSIS .LP .B swift-account-server [CONFIG] [-h|--help] [-v|--verbose] -.SH DESCRIPTION +.SH DESCRIPTION .PP The Account Server's primary job is to handle listings of containers. The listings are stored as sqlite database files, and replicated across the cluster similar to how -objects are. +objects are. .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-account-server -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html -and +and .BI http://docs.openstack.org diff --git a/doc/manpages/swift-container-auditor.1 b/doc/manpages/swift-container-auditor.1 index 462870d114..f780b1a121 100644 --- a/doc/manpages/swift-container-auditor.1 +++ b/doc/manpages/swift-container-auditor.1 @@ -14,24 +14,24 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-container-auditor 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP -.B swift-container-auditor -\- Openstack-swift container auditor +.B swift-container-auditor +\- OpenStack Swift container auditor .SH SYNOPSIS .LP -.B swift-container-auditor +.B swift-container-auditor [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] -.SH DESCRIPTION +.SH DESCRIPTION .PP -The container auditor crawls the local container system checking the integrity of container -objects. If corruption is found (in the case of bit rot, for example), the file is +The container auditor crawls the local container system checking the integrity of container +objects. If corruption is found (in the case of bit rot, for example), the file is quarantined, and replication will replace the bad file from another replica. The options are as follows: @@ -46,17 +46,17 @@ The options are as follows: .IP "-o" .IP "--once" .RS 4 -.IP "only run one pass of daemon" +.IP "only run one pass of daemon" .RE .PD .RE - - + + .SH DOCUMENTATION .LP -More in depth documentation in regards to -.BI swift-container-auditor -and also about Openstack-Swift as a whole can be found at +More in depth documentation in regards to +.BI swift-container-auditor +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-container-info.1 b/doc/manpages/swift-container-info.1 index 8872a63ab4..0f1ff49612 100644 --- a/doc/manpages/swift-container-info.1 +++ b/doc/manpages/swift-container-info.1 @@ -14,29 +14,29 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-container-info 1 "3/20/2013" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-container-info -\- Openstack-swift container-info tool +\- OpenStack Swift container-info tool .SH SYNOPSIS .LP .B swift-container-info -[CONTAINER_DB_FILE] [SWIFT_DIR] +[CONTAINER_DB_FILE] [SWIFT_DIR] -.SH DESCRIPTION +.SH DESCRIPTION .PP -This is a very simple swift tool that allows a swiftop engineer to retrieve +This is a very simple swift tool that allows a swiftop engineer to retrieve information about a container that is located on the storage node. -One calls the tool with a given container db file as -it is stored on the storage node system. -It will then return several information about that container such as; +One calls the tool with a given container db file as +it is stored on the storage node system. +It will then return several information about that container such as; .PD 0 -.IP "- Account it belongs to" +.IP "- Account it belongs to" .IP "- Container " .IP "- Created timestamp " .IP "- Put timestamp " @@ -50,14 +50,14 @@ It will then return several information about that container such as; .IP "- Hash " .IP "- ID " .IP "- User metadata " -.IP "- X-Container-Sync-Point 1 " -.IP "- X-Container-Sync-Point 2 " +.IP "- X-Container-Sync-Point 1 " +.IP "- X-Container-Sync-Point 2 " .IP "- Location on the ring " -.PD - +.PD + .SH DOCUMENTATION .LP -More documentation about Openstack-Swift can be found at +More documentation about OpenStack Swift can be found at .BI http://swift.openstack.org/index.html .SH "SEE ALSO" diff --git a/doc/manpages/swift-container-replicator.1 b/doc/manpages/swift-container-replicator.1 index 96fb8fbd57..3b597084a8 100644 --- a/doc/manpages/swift-container-replicator.1 +++ b/doc/manpages/swift-container-replicator.1 @@ -14,31 +14,31 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-container-replicator 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP -.B swift-container-replicator -\- Openstack-swift container replicator +.B swift-container-replicator +\- OpenStack Swift container replicator .SH SYNOPSIS .LP -.B swift-container-replicator +.B swift-container-replicator [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] -.SH DESCRIPTION +.SH DESCRIPTION .PP -Replication is designed to keep the system in a consistent state in the face of -temporary error conditions like network outages or drive failures. The replication -processes compare local data with each remote copy to ensure they all contain the -latest version. Container replication uses a combination of hashes and shared high +Replication is designed to keep the system in a consistent state in the face of +temporary error conditions like network outages or drive failures. The replication +processes compare local data with each remote copy to ensure they all contain the +latest version. Container replication uses a combination of hashes and shared high water marks to quickly compare subsections of each partition. .PP -Replication updates are push based. Container replication push missing records over +Replication updates are push based. Container replication push missing records over HTTP or rsync whole database files. The replicator also ensures that data is removed -from the system. When an container item is deleted a tombstone is set as the latest -version of the item. The replicator will see the tombstone and ensure that the item +from the system. When an container item is deleted a tombstone is set as the latest +version of the item. The replicator will see the tombstone and ensure that the item is removed from the entire system. The options are as follows: @@ -53,17 +53,17 @@ The options are as follows: .IP "-o" .IP "--once" .RS 4 -.IP "only run one pass of daemon" +.IP "only run one pass of daemon" .RE .PD .RE - - + + .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-container-replicator -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-container-server.1 b/doc/manpages/swift-container-server.1 index 9a478968a7..53cd1208dd 100644 --- a/doc/manpages/swift-container-server.1 +++ b/doc/manpages/swift-container-server.1 @@ -14,37 +14,37 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-container-server 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-container-server -\- Openstack-swift container server +\- OpenStack Swift container server .SH SYNOPSIS .LP .B swift-container-server [CONFIG] [-h|--help] [-v|--verbose] -.SH DESCRIPTION +.SH DESCRIPTION .PP -The Container Server's primary job is to handle listings of objects. It doesn't know -where those objects are, just what objects are in a specific container. The listings -are stored as sqlite database files, and replicated across the cluster similar to how -objects are. Statistics are also tracked that include the total number of objects, and +The Container Server's primary job is to handle listings of objects. It doesn't know +where those objects are, just what objects are in a specific container. The listings +are stored as sqlite database files, and replicated across the cluster similar to how +objects are. Statistics are also tracked that include the total number of objects, and total storage usage for that container. .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-container-server -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html -and +and .BI http://docs.openstack.org -.LP +.LP .SH "SEE ALSO" .BR container-server.conf(5) diff --git a/doc/manpages/swift-container-sync.1 b/doc/manpages/swift-container-sync.1 index e35e559af7..6f1c2b50f0 100644 --- a/doc/manpages/swift-container-sync.1 +++ b/doc/manpages/swift-container-sync.1 @@ -14,25 +14,25 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-container-sync 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-container-sync -\- Openstack-swift container sync +\- OpenStack Swift container sync .SH SYNOPSIS .LP .B swift-container-sync [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] -.SH DESCRIPTION +.SH DESCRIPTION .PP Swift has a feature where all the contents of a container can be mirrored to another container through background synchronization. Swift cluster operators configure their cluster to allow/accept sync requests to/from other clusters, -and the user specifies where to sync their container to along with a secret +and the user specifies where to sync their container to along with a secret synchronization key. .PP The swift-container-sync does the job of sending updates to the remote container. @@ -42,14 +42,14 @@ newer rows since the last sync will trigger PUTs or DELETEs to the other contain .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-container-sync -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/overview_container_sync.html -and +and .BI http://docs.openstack.org -.LP +.LP .SH "SEE ALSO" .BR container-server.conf(5) diff --git a/doc/manpages/swift-container-updater.1 b/doc/manpages/swift-container-updater.1 index a90802d985..910bbe7fa0 100644 --- a/doc/manpages/swift-container-updater.1 +++ b/doc/manpages/swift-container-updater.1 @@ -14,31 +14,31 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-container-updater 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-container-updater -\- Openstack-swift container updater +\- OpenStack Swift container updater .SH SYNOPSIS .LP -.B swift-container-updater +.B swift-container-updater [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] -.SH DESCRIPTION +.SH DESCRIPTION .PP -The container updater is responsible for updating container information in the account database. +The container updater is responsible for updating container information in the account database. It will walk the container path in the system looking for container DBs and sending updates -to the account server as needed as it goes along. +to the account server as needed as it goes along. -There are times when account data can not be immediately updated. This usually occurs -during failure scenarios or periods of high load. This is where an eventual consistency -window will most likely come in to play. +There are times when account data can not be immediately updated. This usually occurs +during failure scenarios or periods of high load. This is where an eventual consistency +window will most likely come in to play. -In practice, the consistency window is only as large as the frequency at which -the updater runs and may not even be noticed as the proxy server will route +In practice, the consistency window is only as large as the frequency at which +the updater runs and may not even be noticed as the proxy server will route listing requests to the first account server which responds. The server under load may not be the one that serves subsequent listing requests – one of the other two replicas may handle the listing. @@ -55,16 +55,16 @@ The options are as follows: .IP "-o" .IP "--once" .RS 4 -.IP "only run one pass of daemon" +.IP "only run one pass of daemon" .RE .PD .RE - + .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-container-updater -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-dispersion-populate.1 b/doc/manpages/swift-dispersion-populate.1 index 33eccd4f5b..ef12b31c7f 100644 --- a/doc/manpages/swift-dispersion-populate.1 +++ b/doc/manpages/swift-dispersion-populate.1 @@ -20,7 +20,7 @@ .SH NAME .LP .B swift-dispersion-populate -\- Openstack-swift dispersion populate +\- OpenStack Swift dispersion populate .SH SYNOPSIS .LP diff --git a/doc/manpages/swift-dispersion-report.1 b/doc/manpages/swift-dispersion-report.1 index 6ccf88b611..1979e0027d 100644 --- a/doc/manpages/swift-dispersion-report.1 +++ b/doc/manpages/swift-dispersion-report.1 @@ -20,7 +20,7 @@ .SH NAME .LP .B swift-dispersion-report -\- Openstack-swift dispersion report +\- OpenStack Swift dispersion report .SH SYNOPSIS .LP diff --git a/doc/manpages/swift-get-nodes.1 b/doc/manpages/swift-get-nodes.1 index a9fbc65ee1..4231788de5 100644 --- a/doc/manpages/swift-get-nodes.1 +++ b/doc/manpages/swift-get-nodes.1 @@ -14,25 +14,25 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-get-nodes 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-get-nodes -\- Openstack-swift get-nodes tool +\- OpenStack Swift get-nodes tool .SH SYNOPSIS .LP -.B swift-get-nodes +.B swift-get-nodes \ [ []] - -.SH DESCRIPTION + +.SH DESCRIPTION .PP The swift-get-nodes tool can be used to find out the location where -a particular account, container or object item is located within the -swift cluster nodes. For example, if you have the account hash and a container -name that belongs to that account, you can use swift-get-nodes to lookup +a particular account, container or object item is located within the +swift cluster nodes. For example, if you have the account hash and a container +name that belongs to that account, you can use swift-get-nodes to lookup where the container resides by using the container ring. .RS 0 @@ -40,7 +40,7 @@ where the container resides by using the container ring. .RE .RS 4 -.PD 0 +.PD 0 .IP "$ swift-get-nodes /etc/swift/account.ring.gz MyAccount-12ac01446be2" .PD 0 @@ -67,12 +67,12 @@ where the container resides by using the container ring. .IP "ssh 172.24.24.32 ls -lah /srv/node/sde/accounts/221082/cce/d7e6ba68cfdce0f0e4ca7890e46cacce/" .IP "ssh 172.24.24.26 ls -lah /srv/node/sdv/accounts/221082/cce/d7e6ba68cfdce0f0e4ca7890e46cacce/ # [Handoff] " -.PD -.RE +.PD +.RE .SH DOCUMENTATION .LP -More documentation about Openstack-Swift can be found at +More documentation about OpenStack Swift can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-init.1 b/doc/manpages/swift-init.1 index de700bfb0a..552e097349 100644 --- a/doc/manpages/swift-init.1 +++ b/doc/manpages/swift-init.1 @@ -14,25 +14,25 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-init 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-init -\- Openstack-swift swift-init tool +\- OpenStack Swift swift-init tool .SH SYNOPSIS .LP .B swift-init [ ...] [options] - -.SH DESCRIPTION + +.SH DESCRIPTION .PP The swift-init tool can be used to initialize all swift daemons available as part of -openstack-swift. Instead of calling individual init scripts for each -swift daemon, one can just use swift-init. With swift-init you can initialize -just one swift service, such as the "proxy", or a combination of them. The tool also +OpenStack Swift. Instead of calling individual init scripts for each +swift daemon, one can just use swift-init. With swift-init you can initialize +just one swift service, such as the "proxy", or a combination of them. The tool also allows one to use the keywords such as "all", "main" and "rest" for the argument. @@ -41,7 +41,7 @@ allows one to use the keywords such as "all", "main" and "rest" for the .PD 0 .RS 4 .IP "\fIproxy\fR" "4" -.IP " - Initializes the swift proxy daemon" +.IP " - Initializes the swift proxy daemon" .RE .RS 4 @@ -75,7 +75,7 @@ allows one to use the keywords such as "all", "main" and "rest" for the .IP " - Initializes all the other \fBswift background daemons\fR" .IP " (updater, replicator, auditor, reaper, etc)" .RE -.PD +.PD \fBCommands:\fR @@ -92,14 +92,14 @@ allows one to use the keywords such as "all", "main" and "rest" for the .IP "\fIstart\fR: \t\t\t starts a server" .IP "\fIstatus\fR: \t\t\t display status of tracked pids for server" .IP "\fIstop\fR: \t\t\t stops a server" -.PD +.PD .RE \fBOptions:\fR .RS 4 -.PD 0 +.PD 0 .IP "-h, --help \t\t\t show this help message and exit" .IP "-v, --verbose \t\t\t display verbose output" .IP "-w, --no-wait \t\t\t won't wait for server to start before returning @@ -112,14 +112,14 @@ allows one to use the keywords such as "all", "main" and "rest" for the .IP "--strict return non-zero status code if some config is missing. Default mode if server is explicitly named." .IP "--non-strict return zero status code even if some config is missing. Default mode if server is one of aliases `all`, `main` or `rest`." .IP "--kill-after-timeout kill daemon and all children after kill-wait period." -.PD +.PD .RE .SH DOCUMENTATION .LP -More documentation about Openstack-Swift can be found at +More documentation about OpenStack Swift can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-object-auditor.1 b/doc/manpages/swift-object-auditor.1 index 34d12a44f1..0772b568da 100644 --- a/doc/manpages/swift-object-auditor.1 +++ b/doc/manpages/swift-object-auditor.1 @@ -14,23 +14,23 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-object-auditor 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP -.B swift-object-auditor -\- Openstack-swift object auditor +.B swift-object-auditor +\- OpenStack Swift object auditor .SH SYNOPSIS .LP -.B swift-object-auditor +.B swift-object-auditor [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] [-z|--zero_byte_fps] -.SH DESCRIPTION +.SH DESCRIPTION .PP -The object auditor crawls the local object system checking the integrity of objects. -If corruption is found (in the case of bit rot, for example), the file is +The object auditor crawls the local object system checking the integrity of objects. +If corruption is found (in the case of bit rot, for example), the file is quarantined, and replication will replace the bad file from another replica. The options are as follows: @@ -46,7 +46,7 @@ The options are as follows: .IP "-o" .IP "--once" .RS 4 -.IP "only run one pass of daemon" +.IP "only run one pass of daemon" .RE .IP "-z ZERO_BYTE_FPS" @@ -56,13 +56,13 @@ The options are as follows: .RE .PD .RE - - + + .SH DOCUMENTATION .LP -More in depth documentation in regards to -.BI swift-object-auditor -and also about Openstack-Swift as a whole can be found at +More in depth documentation in regards to +.BI swift-object-auditor +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-object-expirer.1 b/doc/manpages/swift-object-expirer.1 index 3b5b1b10ba..bac99d23a8 100644 --- a/doc/manpages/swift-object-expirer.1 +++ b/doc/manpages/swift-object-expirer.1 @@ -20,7 +20,7 @@ .SH NAME .LP .B swift-object-expirer -\- Openstack-swift object expirer +\- OpenStack Swift object expirer .SH SYNOPSIS .LP @@ -65,7 +65,7 @@ More in depth documentation in regards to .BI swift-object-expirer can be found at .BI http://swift.openstack.org/overview_expiring_objects.html -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-object-info.1 b/doc/manpages/swift-object-info.1 index be9c91d776..67860d59e4 100644 --- a/doc/manpages/swift-object-info.1 +++ b/doc/manpages/swift-object-info.1 @@ -14,28 +14,28 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-object-info 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-object-info -\- Openstack-swift object-info tool +\- OpenStack Swift object-info tool .SH SYNOPSIS .LP .B swift-object-info -[OBJECT_FILE] [SWIFT_DIR] +[OBJECT_FILE] [SWIFT_DIR] -.SH DESCRIPTION +.SH DESCRIPTION .PP -This is a very simple swift tool that allows a swiftop engineer to retrieve -information about an object that is located on the storage node. One calls -the tool with a given object file as it is stored on the storage node system. -It will then return several information about that object such as; +This is a very simple swift tool that allows a swiftop engineer to retrieve +information about an object that is located on the storage node. One calls +the tool with a given object file as it is stored on the storage node system. +It will then return several information about that object such as; .PD 0 -.IP "- Account it belongs to" +.IP "- Account it belongs to" .IP "- Container " .IP "- Object hash " .IP "- Content Type " @@ -44,11 +44,11 @@ It will then return several information about that object such as; .IP "- Content Length " .IP "- User Metadata " .IP "- Location on the ring " -.PD - +.PD + .SH DOCUMENTATION .LP -More documentation about Openstack-Swift can be found at +More documentation about OpenStack Swift can be found at .BI http://swift.openstack.org/index.html .SH "SEE ALSO" diff --git a/doc/manpages/swift-object-replicator.1 b/doc/manpages/swift-object-replicator.1 index bbc33fee00..29c5b1b082 100644 --- a/doc/manpages/swift-object-replicator.1 +++ b/doc/manpages/swift-object-replicator.1 @@ -14,31 +14,31 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-object-replicator 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP -.B swift-object-replicator -\- Openstack-swift object replicator +.B swift-object-replicator +\- OpenStack Swift object replicator .SH SYNOPSIS .LP -.B swift-object-replicator +.B swift-object-replicator [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] -.SH DESCRIPTION +.SH DESCRIPTION .PP -Replication is designed to keep the system in a consistent state in the face of -temporary error conditions like network outages or drive failures. The replication -processes compare local data with each remote copy to ensure they all contain the -latest version. Object replication uses a hash list to quickly compare subsections +Replication is designed to keep the system in a consistent state in the face of +temporary error conditions like network outages or drive failures. The replication +processes compare local data with each remote copy to ensure they all contain the +latest version. Object replication uses a hash list to quickly compare subsections of each partition. .PP -Replication updates are push based. For object replication, updating is just a matter +Replication updates are push based. For object replication, updating is just a matter of rsyncing files to the peer. The replicator also ensures that data is removed -from the system. When an object item is deleted a tombstone is set as the latest -version of the item. The replicator will see the tombstone and ensure that the item +from the system. When an object item is deleted a tombstone is set as the latest +version of the item. The replicator will see the tombstone and ensure that the item is removed from the entire system. The options are as follows: @@ -53,17 +53,17 @@ The options are as follows: .IP "-o" .IP "--once" .RS 4 -.IP "only run one pass of daemon" +.IP "only run one pass of daemon" .RE .PD .RE - - + + .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-object-replicator -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-object-server.1 b/doc/manpages/swift-object-server.1 index d4ae197aa2..b832598783 100644 --- a/doc/manpages/swift-object-server.1 +++ b/doc/manpages/swift-object-server.1 @@ -14,39 +14,39 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-object-server 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-object-server -\- Openstack-swift object server. +\- OpenStack Swift object server. .SH SYNOPSIS .LP .B swift-object-server [CONFIG] [-h|--help] [-v|--verbose] -.SH DESCRIPTION +.SH DESCRIPTION .PP The Object Server is a very simple blob storage server that can store, retrieve -and delete objects stored on local devices. Objects are stored as binary files +and delete objects stored on local devices. Objects are stored as binary files on the filesystem with metadata stored in the file's extended attributes (xattrs). -This requires that the underlying filesystem choice for object servers support -xattrs on files. Some filesystems, like ext3, have xattrs turned off by default. +This requires that the underlying filesystem choice for object servers support +xattrs on files. Some filesystems, like ext3, have xattrs turned off by default. Each object is stored using a path derived from the object name's hash and the operation's timestamp. Last write always wins, and ensures that the latest object version will be served. A deletion is also treated as a version of the file (a 0 byte file ending with -".ts", which stands for tombstone). This ensures that deleted files are replicated +".ts", which stands for tombstone). This ensures that deleted files are replicated correctly and older versions don't magically reappear due to failure scenarios. .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-object-server -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html -and +and .BI http://docs.openstack.org diff --git a/doc/manpages/swift-object-updater.1 b/doc/manpages/swift-object-updater.1 index 63bc173cd8..9ade768af4 100644 --- a/doc/manpages/swift-object-updater.1 +++ b/doc/manpages/swift-object-updater.1 @@ -14,36 +14,36 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-object-updater 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP .B swift-object-updater -\- Openstack-swift object updater +\- OpenStack Swift object updater .SH SYNOPSIS .LP .B swift-object-updater [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] -.SH DESCRIPTION +.SH DESCRIPTION .PP -The object updater is responsible for updating object information in container listings. -It will check to see if there are any locally queued updates on the filesystem of each -devices, what is also known as async pending file(s), walk each one and update the +The object updater is responsible for updating object information in container listings. +It will check to see if there are any locally queued updates on the filesystem of each +devices, what is also known as async pending file(s), walk each one and update the container listing. -For example, suppose a container server is under load and a new object is put -into the system. The object will be immediately available for reads as soon as -the proxy server responds to the client with success. However, the object -server has not been able to update the object listing in the container server. -Therefore, the update would be queued locally for a later update. Container listings, +For example, suppose a container server is under load and a new object is put +into the system. The object will be immediately available for reads as soon as +the proxy server responds to the client with success. However, the object +server has not been able to update the object listing in the container server. +Therefore, the update would be queued locally for a later update. Container listings, therefore, may not immediately contain the object. This is where an eventual consistency -window will most likely come in to play. +window will most likely come in to play. -In practice, the consistency window is only as large as the frequency at which -the updater runs and may not even be noticed as the proxy server will route +In practice, the consistency window is only as large as the frequency at which +the updater runs and may not even be noticed as the proxy server will route listing requests to the first container server which responds. The server under load may not be the one that serves subsequent listing requests – one of the other two replicas may handle the listing. @@ -60,17 +60,17 @@ The options are as follows: .IP "-o" .IP "--once" .RS 4 -.IP "only run one pass of daemon" +.IP "only run one pass of daemon" .RE -.PD +.PD .RE - - + + .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-object-updater -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-orphans.1 b/doc/manpages/swift-orphans.1 index 1ef3488f1d..28ffa7bf12 100644 --- a/doc/manpages/swift-orphans.1 +++ b/doc/manpages/swift-orphans.1 @@ -20,7 +20,7 @@ .SH NAME .LP .B swift-orphans -\- Openstack-swift orphans tool +\- OpenStack Swift orphans tool .SH SYNOPSIS .LP @@ -65,6 +65,6 @@ The options are as follows: .SH DOCUMENTATION .LP -More documentation about Openstack-Swift can be found at +More documentation about OpenStack Swift can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-proxy-server.1 b/doc/manpages/swift-proxy-server.1 index 7352837956..a93a8cedcb 100644 --- a/doc/manpages/swift-proxy-server.1 +++ b/doc/manpages/swift-proxy-server.1 @@ -14,35 +14,35 @@ .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. -.\" +.\" .TH swift-proxy-server 1 "8/26/2011" "Linux" "OpenStack Swift" -.SH NAME +.SH NAME .LP -.B swift-proxy-server -\- Openstack-swift proxy server. +.B swift-proxy-server +\- OpenStack Swift proxy server. .SH SYNOPSIS .LP .B swift-proxy-server [CONFIG] [-h|--help] [-v|--verbose] -.SH DESCRIPTION +.SH DESCRIPTION .PP -The Swift Proxy Server is responsible for tying together the rest of the Swift architecture. -For each request, it will look up the location of the account, container, or object in the -ring and route the request accordingly. The public API is also exposed through the Proxy -Server. A large number of failures are also handled in the Proxy Server. For example, +The Swift Proxy Server is responsible for tying together the rest of the Swift architecture. +For each request, it will look up the location of the account, container, or object in the +ring and route the request accordingly. The public API is also exposed through the Proxy +Server. A large number of failures are also handled in the Proxy Server. For example, if a server is unavailable for an object PUT, it will ask the ring for a handoff server and route there instead. When objects are streamed to or from an object server, they are -streamed directly through the proxy server to or from the user the proxy server does +streamed directly through the proxy server to or from the user the proxy server does not spool them. .SH DOCUMENTATION .LP -More in depth documentation in regards to +More in depth documentation in regards to .BI swift-proxy-server -and also about Openstack-Swift as a whole can be found at +and also about OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/index.html diff --git a/doc/manpages/swift-recon.1 b/doc/manpages/swift-recon.1 index 15865a2500..929f0b12eb 100644 --- a/doc/manpages/swift-recon.1 +++ b/doc/manpages/swift-recon.1 @@ -20,7 +20,7 @@ .SH NAME .LP .B swift-recon -\- Openstack-swift recon middleware cli tool +\- OpenStack Swift recon middleware cli tool .SH SYNOPSIS .LP @@ -124,7 +124,7 @@ cronjob to run the swift-recon-cron script periodically: .SH DOCUMENTATION .LP -More documentation about Openstack-Swift can be found at +More documentation about OpenStack Swift can be found at .BI http://swift.openstack.org/index.html Also more specific documentation about swift-recon can be found at .BI http://swift.openstack.org/admin_guide.html#cluster-telemetry-and-monitoring diff --git a/doc/manpages/swift-ring-builder.1 b/doc/manpages/swift-ring-builder.1 index 12b15bb3a8..f8690fcdb7 100644 --- a/doc/manpages/swift-ring-builder.1 +++ b/doc/manpages/swift-ring-builder.1 @@ -20,7 +20,7 @@ .SH NAME .LP .B swift-ring-builder -\- Openstack-swift ring builder +\- OpenStack Swift ring builder .SH SYNOPSIS .LP @@ -124,15 +124,15 @@ Shows information about matching devices. .IP "\fBadd\fR rz-:/_ " .IP "\fBadd\fR -r -z -i -p -d -m -w " .RS 5 -Adds a device to the ring with the given information. No partitions will be -assigned to the new device until after running 'rebalance'. This is so you +Adds a device to the ring with the given information. No partitions will be +assigned to the new device until after running 'rebalance'. This is so you can make multiple device changes and rebalance them all just once. .RE .IP "\fBcreate\fR " .RS 5 -Creates with 2^ partitions and . +Creates with 2^ partitions and . is number of hours to restrict moving a partition more than once. .RE @@ -156,37 +156,37 @@ Attempts to rebalance the ring by reassigning partitions that haven't been recen .IP "\fBremove\fR " .RS 5 -Removes the device(s) from the ring. This should normally just be used for -a device that has failed. For a device you wish to decommission, it's best -to set its weight to 0, wait for it to drain all its data, then use this -remove command. This will not take effect until after running 'rebalance'. +Removes the device(s) from the ring. This should normally just be used for +a device that has failed. For a device you wish to decommission, it's best +to set its weight to 0, wait for it to drain all its data, then use this +remove command. This will not take effect until after running 'rebalance'. This is so you can make multiple device changes and rebalance them all just once. .RE .IP "\fBset_info\fR :/_" .RS 5 -Resets the device's information. This information isn't used to assign -partitions, so you can use 'write_ring' afterward to rewrite the current -ring with the newer device information. Any of the parts are optional -in the final :/_ parameter; just give what you -want to change. For instance set_info d74 _"snet: 5.6.7.8" would just +Resets the device's information. This information isn't used to assign +partitions, so you can use 'write_ring' afterward to rewrite the current +ring with the newer device information. Any of the parts are optional +in the final :/_ parameter; just give what you +want to change. For instance set_info d74 _"snet: 5.6.7.8" would just update the meta data for device id 74. .RE .IP "\fBset_min_part_hours\fR " .RS 5 -Changes the to the given . This should be set to -however long a full replication/update cycle takes. We're working on a way +Changes the to the given . This should be set to +however long a full replication/update cycle takes. We're working on a way to determine this more easily than scanning logs. .RE .IP "\fBset_weight\fR " .RS 5 -Resets the device's weight. No partitions will be reassigned to or from the -device until after running 'rebalance'. This is so you can make multiple +Resets the device's weight. No partitions will be reassigned to or from the +device until after running 'rebalance'. This is so you can make multiple device changes and rebalance them all just once. .RE @@ -216,7 +216,7 @@ calls when no rebalance is needed but you want to send out the new device inform .SH DOCUMENTATION .LP -More in depth documentation about the swift ring and also Openstack-Swift as a +More in depth documentation about the swift ring and also OpenStack Swift as a whole can be found at .BI http://swift.openstack.org/overview_ring.html, .BI http://swift.openstack.org/admin_guide.html#managing-the-rings From 4aa1ae61cb62d9f0e8ca6ceecabb0b9b40fedf96 Mon Sep 17 00:00:00 2001 From: Or Ozeri Date: Wed, 15 Jun 2016 19:56:03 +0300 Subject: [PATCH 123/156] Raise 412 response on expirer Currently, the expirer daemon treats 412 (precondition failed) as successful DELETEs. On the other hand, it treats 404 as failed while reclaim_age (usually a week) has not passed. This patch unifies both cases to the same handling: waiting for reclaim_age to pass, then deleting the entry. The reason the expirer should not delete a 412 entry right away, is that it might be the case that 412 is returned because of a split brain, where the updated object servers are currently down. Same reason holds for a 404 response. Change-Id: Icabbdd72746a211b68f266a49231881f0f4ace94 --- swift/obj/expirer.py | 5 +- test/probe/test_object_expirer.py | 90 +++++++++++++++++++++++++++---- test/unit/obj/test_expirer.py | 90 ++++++++++++++++--------------- 3 files changed, 130 insertions(+), 55 deletions(-) diff --git a/swift/obj/expirer.py b/swift/obj/expirer.py index 115920dd6d..68d2cdd44b 100644 --- a/swift/obj/expirer.py +++ b/swift/obj/expirer.py @@ -260,7 +260,8 @@ class ObjectExpirer(Daemon): try: self.delete_actual_object(actual_obj, timestamp) except UnexpectedResponse as err: - if err.resp.status_int != HTTP_NOT_FOUND: + if err.resp.status_int not in {HTTP_NOT_FOUND, + HTTP_PRECONDITION_FAILED}: raise if float(timestamp) > time() - self.reclaim_age: # we'll have to retry the DELETE later @@ -301,4 +302,4 @@ class ObjectExpirer(Daemon): self.swift.make_request('DELETE', path, {'X-If-Delete-At': str(timestamp), 'X-Timestamp': str(timestamp)}, - (2, HTTP_PRECONDITION_FAILED)) + (2,)) diff --git a/test/probe/test_object_expirer.py b/test/probe/test_object_expirer.py index 97351c746e..b9e78d10b6 100644 --- a/test/probe/test_object_expirer.py +++ b/test/probe/test_object_expirer.py @@ -50,6 +50,15 @@ class TestObjectExpirer(ReplProbeTest): self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name) + def _check_obj_in_container_listing(self): + for obj in self.client.iter_objects(self.account, + self.container_name): + + if self.object_name == obj['name']: + return True + + return False + def test_expirer_object_split_brain(self): if len(ENABLED_POLICIES) < 2: raise SkipTest('Need more than one policy') @@ -93,12 +102,8 @@ class TestObjectExpirer(ReplProbeTest): create_timestamp) # but it is still in the listing - for obj in self.client.iter_objects(self.account, - self.container_name): - if self.object_name == obj['name']: - break - else: - self.fail('Did not find listing for %s' % self.object_name) + self.assertTrue(self._check_obj_in_container_listing(), + msg='Did not find listing for %s' % self.object_name) # clear proxy cache client.post_container(self.url, self.token, self.container_name, {}) @@ -106,10 +111,8 @@ class TestObjectExpirer(ReplProbeTest): self.expirer.once() # object is not in the listing - for obj in self.client.iter_objects(self.account, - self.container_name): - if self.object_name == obj['name']: - self.fail('Found listing for %s' % self.object_name) + self.assertFalse(self._check_obj_in_container_listing(), + msg='Found listing for %s' % self.object_name) # and validate object is tombstoned found_in_policy = None @@ -226,6 +229,73 @@ class TestObjectExpirer(ReplProbeTest): self.assertIn('x-object-meta-expired', metadata) + def _test_expirer_delete_outdated_object_version(self, object_exists): + # This test simulates a case where the expirer tries to delete + # an outdated version of an object. + # One case is where the expirer gets a 404, whereas the newest version + # of the object is offline. + # Another case is where the expirer gets a 412, since the old version + # of the object mismatches the expiration time sent by the expirer. + # In any of these cases, the expirer should retry deleting the object + # later, for as long as a reclaim age has not passed. + obj_brain = BrainSplitter(self.url, self.token, self.container_name, + self.object_name, 'object', self.policy) + + obj_brain.put_container() + + if object_exists: + obj_brain.put_object() + + # currently, the object either doesn't exist, or does not have + # an expiration + + # stop primary servers and put a newer version of the object, this + # time with an expiration. only the handoff servers will have + # the new version + obj_brain.stop_primary_half() + now = time.time() + delete_at = int(now + 2.0) + obj_brain.put_object({'X-Delete-At': delete_at}) + + # make sure auto-created containers get in the account listing + Manager(['container-updater']).once() + + # update object record in the container listing + Manager(['container-replicator']).once() + + # take handoff servers down, and bring up the outdated primary servers + obj_brain.start_primary_half() + obj_brain.stop_handoff_half() + + # wait until object expiration time + while time.time() <= delete_at: + time.sleep(0.1) + + # run expirer against the outdated servers. it should fail since + # the outdated version does not match the expiration time + self.expirer.once() + + # bring all servers up, and run replicator to update servers + obj_brain.start_handoff_half() + Manager(['object-replicator']).once() + + # verify the deletion has failed by checking the container listing + self.assertTrue(self._check_obj_in_container_listing(), + msg='Did not find listing for %s' % self.object_name) + + # run expirer again, delete should now succeed + self.expirer.once() + + # verify the deletion by checking the container listing + self.assertFalse(self._check_obj_in_container_listing(), + msg='Found listing for %s' % self.object_name) + + def test_expirer_delete_returns_outdated_404(self): + self._test_expirer_delete_outdated_object_version(object_exists=False) + + def test_expirer_delete_returns_outdated_412(self): + self._test_expirer_delete_outdated_object_version(object_exists=True) + if __name__ == "__main__": unittest.main() diff --git a/test/unit/obj/test_expirer.py b/test/unit/obj/test_expirer.py index 02a04dda01..355295082b 100644 --- a/test/unit/obj/test_expirer.py +++ b/test/unit/obj/test_expirer.py @@ -24,7 +24,7 @@ import mock import six from six.moves import urllib -from swift.common import internal_client, utils +from swift.common import internal_client, utils, swob from swift.obj import expirer @@ -55,7 +55,7 @@ class TestObjectExpirer(TestCase): self.rcache = mkdtemp() self.conf = {'recon_cache_path': self.rcache} - self.logger = debug_logger('test-recon') + self.logger = debug_logger('test-expirer') def tearDown(self): rmtree(self.rcache) @@ -185,52 +185,55 @@ class TestObjectExpirer(TestCase): self.assertEqual(len(set(x.obj_containers_in_order[:4])), 4) def test_delete_object(self): - class InternalClient(object): - - container_ring = None - - def __init__(self, test, account, container, obj): - self.test = test - self.account = account - self.container = container - self.obj = obj - self.delete_object_called = False - - class DeleteActualObject(object): - def __init__(self, test, actual_obj, timestamp): - self.test = test - self.actual_obj = actual_obj - self.timestamp = timestamp - self.called = False - - def __call__(self, actual_obj, timestamp): - self.test.assertEqual(self.actual_obj, actual_obj) - self.test.assertEqual(self.timestamp, timestamp) - self.called = True - + x = expirer.ObjectExpirer({}, logger=self.logger) + actual_obj = 'actual_obj' + timestamp = int(time()) + reclaim_ts = timestamp - x.reclaim_age container = 'container' obj = 'obj' - actual_obj = 'actual_obj' - timestamp = 'timestamp' - x = expirer.ObjectExpirer({}, logger=self.logger) - x.swift = \ - InternalClient(self, x.expiring_objects_account, container, obj) - x.delete_actual_object = \ - DeleteActualObject(self, actual_obj, timestamp) + http_exc = { + resp_code: + internal_client.UnexpectedResponse( + str(resp_code), swob.HTTPException(status=resp_code)) + for resp_code in {404, 412, 500} + } + exc_other = Exception() - delete_object_called = [] + def check_call_to_delete_object(exc, ts, should_pop): + x.logger.clear() + start_reports = x.report_objects + with mock.patch.object(x, 'delete_actual_object', + side_effect=exc) as delete_actual: + with mock.patch.object(x, 'pop_queue') as pop_queue: + x.delete_object(actual_obj, ts, container, obj) - def pop_queue(c, o): - self.assertEqual(container, c) - self.assertEqual(obj, o) - delete_object_called[:] = [True] + delete_actual.assert_called_once_with(actual_obj, ts) + log_lines = x.logger.get_lines_for_level('error') + if should_pop: + pop_queue.assert_called_once_with(container, obj) + self.assertEqual(start_reports + 1, x.report_objects) + self.assertFalse(log_lines) + else: + self.assertFalse(pop_queue.called) + self.assertEqual(start_reports, x.report_objects) + self.assertEqual(1, len(log_lines)) + self.assertIn('Exception while deleting object container obj', + log_lines[0]) - x.pop_queue = pop_queue + # verify pop_queue logic on exceptions + for exc, ts, should_pop in [(None, timestamp, True), + (http_exc[404], timestamp, False), + (http_exc[412], timestamp, False), + (http_exc[500], reclaim_ts, False), + (exc_other, reclaim_ts, False), + (http_exc[404], reclaim_ts, True), + (http_exc[412], reclaim_ts, True)]: - x.delete_object(actual_obj, timestamp, container, obj) - self.assertTrue(delete_object_called) - self.assertTrue(x.delete_actual_object.called) + try: + check_call_to_delete_object(exc, ts, should_pop) + except AssertionError as err: + self.fail("Failed on %r at %f: %s" % (exc, ts, err)) def test_report(self): x = expirer.ObjectExpirer({}, logger=self.logger) @@ -710,7 +713,7 @@ class TestObjectExpirer(TestCase): self.assertRaises(internal_client.UnexpectedResponse, x.delete_actual_object, '/path/to/object', '1234') - def test_delete_actual_object_handles_412(self): + def test_delete_actual_object_raises_412(self): def fake_app(env, start_response): start_response('412 Precondition Failed', @@ -720,7 +723,8 @@ class TestObjectExpirer(TestCase): internal_client.loadapp = lambda *a, **kw: fake_app x = expirer.ObjectExpirer({}) - x.delete_actual_object('/path/to/object', '1234') + self.assertRaises(internal_client.UnexpectedResponse, + x.delete_actual_object, '/path/to/object', '1234') def test_delete_actual_object_does_not_handle_odd_stuff(self): From 8b5578c362125d0ec7efdc5742d185b344f892f0 Mon Sep 17 00:00:00 2001 From: Petr Kovar Date: Mon, 25 Jul 2016 18:34:21 +0200 Subject: [PATCH 124/156] [install-guide] Include environment-networking environment-networking.rst and edit_hosts_file.txt were left out when swift-specific content was moved from OpenStack Installation Guide to swift install-guide. Change-Id: I334cca0634e3071a7ea285e6ad49ed9baaf7dca8 Partially-Implements: blueprint projectspecificinstallguides Closes-Bug: 1605021 --- install-guide/source/edit_hosts_file.txt | 19 ++++++ .../source/environment-networking.rst | 66 +++++++++++++++++++ install-guide/source/index.rst | 1 + 3 files changed, 86 insertions(+) create mode 100644 install-guide/source/edit_hosts_file.txt create mode 100644 install-guide/source/environment-networking.rst diff --git a/install-guide/source/edit_hosts_file.txt b/install-guide/source/edit_hosts_file.txt new file mode 100644 index 0000000000..2e9bc9659f --- /dev/null +++ b/install-guide/source/edit_hosts_file.txt @@ -0,0 +1,19 @@ +Edit the ``/etc/hosts`` file to contain the following: + +.. code-block:: none + + # controller + 10.0.0.11 controller + + # compute1 + 10.0.0.31 compute1 + + # block1 + 10.0.0.41 block1 + + # object1 + 10.0.0.51 object1 + + # object2 + 10.0.0.52 object2 + diff --git a/install-guide/source/environment-networking.rst b/install-guide/source/environment-networking.rst new file mode 100644 index 0000000000..befb309fa7 --- /dev/null +++ b/install-guide/source/environment-networking.rst @@ -0,0 +1,66 @@ +.. _networking: + +Configure networking +~~~~~~~~~~~~~~~~~~~~ + +Before you start deploying the Object Storage service in your OpenStack +environment, configure networking for two additional storage nodes. + +First node +---------- + +Configure network interfaces +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Configure the management interface: + + * IP address: ``10.0.0.51`` + + * Network mask: ``255.255.255.0`` (or ``/24``) + + * Default gateway: ``10.0.0.1`` + +Configure name resolution +^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Set the hostname of the node to ``object1``. + +#. .. include:: edit_hosts_file.txt + +#. Reboot the system to activate the changes. + +Second node +----------- + +Configure network interfaces +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* Configure the management interface: + + * IP address: ``10.0.0.52`` + + * Network mask: ``255.255.255.0`` (or ``/24``) + + * Default gateway: ``10.0.0.1`` + +Configure name resolution +^^^^^^^^^^^^^^^^^^^^^^^^^ + +#. Set the hostname of the node to ``object2``. + +#. .. include:: edit_hosts_file.txt + +#. Reboot the system to activate the changes. + +.. warning:: + + Some distributions add an extraneous entry in the ``/etc/hosts`` + file that resolves the actual hostname to another loopback IP + address such as ``127.0.1.1``. You must comment out or remove this + entry to prevent name resolution problems. **Do not remove the + 127.0.0.1 entry.** + +.. note:: + + To reduce complexity of this guide, we add host entries for optional + services regardless of whether you choose to deploy them. diff --git a/install-guide/source/index.rst b/install-guide/source/index.rst index 7869e05f0c..c6aee024e7 100644 --- a/install-guide/source/index.rst +++ b/install-guide/source/index.rst @@ -6,6 +6,7 @@ Object Storage service :maxdepth: 2 get_started.rst + environment-networking.rst controller-install.rst storage-install.rst initial-rings.rst From bd29a3e3c7e49b1d7749c605bc150de0af168fd4 Mon Sep 17 00:00:00 2001 From: KATO Tomoyuki Date: Tue, 9 Aug 2016 13:17:38 +0900 Subject: [PATCH 125/156] Remove the duplicated word 'be' Change-Id: I3ff4e7135d8d10c62dfcde90f34befe328ac39b2 --- etc/account-server.conf-sample | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index 3a4a163a16..7ef875e777 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -158,7 +158,7 @@ use = egg:swift#recon # seconds; 2592000 = 30 days for example. # delay_reaping = 0 # -# If the account fails to be be reaped due to a persistent error, the +# If the account fails to be reaped due to a persistent error, the # account reaper will log a message such as: # Account has not been reaped since # You can search logs for this message if space is not being reclaimed From ddbab0509442e55ca6a68b9ef9a879e49e0e4f67 Mon Sep 17 00:00:00 2001 From: Thiago da Silva Date: Fri, 5 Aug 2016 14:22:28 -0400 Subject: [PATCH 126/156] add reminder how to run debug func tests added comments on how to run in_process and specific test cases Change-Id: I485755996b15753323d30de09914d35e262fcedc Signed-off-by: Thiago da Silva --- .functests | 3 +++ doc/source/development_guidelines.rst | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.functests b/.functests index af989f50ff..5e3b177176 100755 --- a/.functests +++ b/.functests @@ -1,5 +1,8 @@ #!/bin/bash +# How-To debug functional tests: +# SWIFT_TEST_IN_PROCESS=1 tox -e func -- --pdb test.functional.tests.TestFile.testCopy + SRC_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))") set -e diff --git a/doc/source/development_guidelines.rst b/doc/source/development_guidelines.rst index 8097abf8b2..17e4aed984 100644 --- a/doc/source/development_guidelines.rst +++ b/doc/source/development_guidelines.rst @@ -132,6 +132,12 @@ tox environment:: tox -e func-in-process-fast-post +To debug the functional tests, use the 'in-process test' mode and pass the +``--pdb`` flag to tox:: + + SWIFT_TEST_IN_PROCESS=1 tox -e func -- --pdb \ + test.functional.tests.TestFile.testCopy + The 'in-process test' mode searches for ``proxy-server.conf`` and ``swift.conf`` config files from which it copies config options and overrides some options to suit in process testing. The search will first look for config @@ -236,4 +242,3 @@ another year added, and date ranges are not needed.:: # implied. # See the License for the specific language governing permissions and # limitations under the License. - From 44ba3c310a90f6db49417220ca979fd945ca1799 Mon Sep 17 00:00:00 2001 From: Doron Chen Date: Wed, 10 Aug 2016 09:29:44 +0300 Subject: [PATCH 127/156] Added a test for delayed reap. The test schedules a 3-second delayed account reaping. The test checks that no reaping after an immediate reap, and that full reaping occurs 3 seconds later. Change-Id: I0ab954ed3c59d808f32d84dc53fd512fd0a651be --- test/probe/test_account_reaper.py | 75 +++++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 13 deletions(-) diff --git a/test/probe/test_account_reaper.py b/test/probe/test_account_reaper.py index f5d2efee87..bc83d7716b 100644 --- a/test/probe/test_account_reaper.py +++ b/test/probe/test_account_reaper.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from time import sleep import uuid import unittest from swiftclient import client -from swift.common.storage_policy import POLICIES +from swift.account import reaper +from swift.common import utils from swift.common.manager import Manager from swift.common.direct_client import direct_delete_account, \ direct_get_object, direct_head_container, ClientException @@ -25,18 +27,19 @@ from test.probe.common import ReplProbeTest, ENABLED_POLICIES class TestAccountReaper(ReplProbeTest): - - def test_sync(self): - all_objects = [] + def setUp(self): + super(TestAccountReaper, self).setUp() + self.all_objects = [] # upload some containers + body = 'test-body' for policy in ENABLED_POLICIES: container = 'container-%s-%s' % (policy.name, uuid.uuid4()) client.put_container(self.url, self.token, container, headers={'X-Storage-Policy': policy.name}) obj = 'object-%s' % uuid.uuid4() - body = 'test-body' client.put_object(self.url, self.token, container, obj, body) - all_objects.append((policy, container, obj)) + self.all_objects.append((policy, container, obj)) + policy.load_ring('/etc/swift') Manager(['container-updater']).once() @@ -50,13 +53,18 @@ class TestAccountReaper(ReplProbeTest): len(ENABLED_POLICIES) * len(body)) part, nodes = self.account_ring.get_nodes(self.account) + for node in nodes: direct_delete_account(node, part, self.account) + def test_sync(self): # run the reaper Manager(['account-reaper']).once() - for policy, container, obj in all_objects: + self._verify_account_reaped() + + def _verify_account_reaped(self): + for policy, container, obj in self.all_objects: # verify that any container deletes were at same timestamp cpart, cnodes = self.container_ring.get_nodes( self.account, container) @@ -72,7 +80,6 @@ class TestAccountReaper(ReplProbeTest): # 'X-Backend-DELETE-Timestamp' confirms it was deleted self.assertTrue(delete_time) delete_times.add(delete_time) - else: # Container replicas may not yet be deleted if we have a # policy with object replicas < container replicas, so @@ -82,8 +89,8 @@ class TestAccountReaper(ReplProbeTest): self.assertEqual(1, len(delete_times), delete_times) # verify that all object deletes were at same timestamp - object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/') - part, nodes = object_ring.get_nodes(self.account, container, obj) + part, nodes = policy.object_ring.get_nodes(self.account, + container, obj) headers = {'X-Backend-Storage-Policy-Index': int(policy)} delete_times = set() for node in nodes: @@ -104,7 +111,7 @@ class TestAccountReaper(ReplProbeTest): # run replicators and updaters self.get_to_final_state() - for policy, container, obj in all_objects: + for policy, container, obj in self.all_objects: # verify that ALL container replicas are now deleted cpart, cnodes = self.container_ring.get_nodes( self.account, container) @@ -123,10 +130,11 @@ class TestAccountReaper(ReplProbeTest): else: self.fail('Found un-reaped /%s/%s on %r' % (self.account, container, cnode)) + self.assertEqual(1, len(delete_times)) # sanity check that object state is still consistent... - object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/') - part, nodes = object_ring.get_nodes(self.account, container, obj) + part, nodes = policy.object_ring.get_nodes(self.account, + container, obj) headers = {'X-Backend-Storage-Policy-Index': int(policy)} delete_times = set() for node in nodes: @@ -144,6 +152,47 @@ class TestAccountReaper(ReplProbeTest): (self.account, container, obj, node, policy)) self.assertEqual(1, len(delete_times)) + def test_delayed_reap(self): + # define reapers which are supposed to operate 3 seconds later + account_reapers = [] + for conf_file in self.configs['account-server'].values(): + conf = utils.readconf(conf_file, 'account-reaper') + conf['delay_reaping'] = '3' + account_reapers.append(reaper.AccountReaper(conf)) + + self.assertTrue(account_reapers) + + # run reaper, and make sure that nothing is reaped + for account_reaper in account_reapers: + account_reaper.run_once() + + for policy, container, obj in self.all_objects: + cpart, cnodes = self.container_ring.get_nodes( + self.account, container) + for cnode in cnodes: + try: + direct_head_container(cnode, cpart, self.account, + container) + except ClientException: + self.fail("Nothing should be reaped. Object should exist") + + part, nodes = policy.object_ring.get_nodes(self.account, + container, obj) + headers = {'X-Backend-Storage-Policy-Index': int(policy)} + for node in nodes: + try: + direct_get_object(node, part, self.account, + container, obj, headers=headers) + except ClientException: + self.fail("Nothing should be reaped. Object should exist") + + # wait 3 seconds, run reaper, and make sure that all is reaped + sleep(3) + for account_reaper in account_reapers: + account_reaper.run_once() + + self._verify_account_reaped() + if __name__ == "__main__": unittest.main() From ed772236c7289d178561ce014beb4879f726fc48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20Lis=C3=A1k?= Date: Thu, 22 Oct 2015 10:19:49 +0200 Subject: [PATCH 128/156] Change schedule priority of daemon/server in config The goal is to modify schedule priority and I/O scheduling class and priority of daemon/server via configuration. Setting is optional, default keeps current behaviour. Use case: Prioritize object-server to object-auditor, because all user's requests needed to be served in peak hours and audit could wait. Co-Authored-By: Clay Gerrard DocImpact Change-Id: I1018a18f4706daabdb84574ffd9a58d831e68396 --- doc/manpages/account-server.conf.5 | 60 ++++++++++++ doc/manpages/container-server.conf.5 | 72 +++++++++++++++ doc/manpages/object-expirer.conf.5 | 24 +++++ doc/manpages/object-server.conf.5 | 66 +++++++++++++ doc/manpages/proxy-server.conf.5 | 30 ++++++ doc/source/deployment_guide.rst | 89 +++++++++++++++++- etc/account-server.conf-sample | 60 ++++++++++++ etc/container-reconciler.conf-sample | 24 +++++ etc/container-server.conf-sample | 72 +++++++++++++++ etc/object-expirer.conf-sample | 20 ++++ etc/object-server.conf-sample | 72 +++++++++++++++ etc/proxy-server.conf-sample | 24 +++++ swift/common/daemon.py | 3 + swift/common/utils.py | 133 ++++++++++++++++++++++++++- swift/common/wsgi.py | 3 + test/unit/common/test_utils.py | 68 +++++++++++++- 16 files changed, 816 insertions(+), 4 deletions(-) diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5 index ef52ad05a9..b9f8b9395c 100644 --- a/doc/manpages/account-server.conf.5 +++ b/doc/manpages/account-server.conf.5 @@ -125,6 +125,18 @@ You can set fallocate_reserve to the number of bytes or percentage of disk space you'd like fallocate to reserve, whether there is space for the given file size or not. Percentage will be used if the value ends with a '%'. The default is 1%. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD @@ -172,6 +184,18 @@ To handle all verbs, including replication verbs, do not specify set to a true value (e.g. "true" or "1"). To handle only non-replication verbs, set to "false". Unless you have a separate replication network, you should not specify any value for "replication_server". The default is empty. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD @@ -281,6 +305,18 @@ Format of the rysnc module where the replicator will send data. See etc/rsyncd.conf-sample for some usage examples. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE @@ -303,6 +339,18 @@ Will audit, at most, 1 account per device per interval. The default is 1800 seco Maximum accounts audited per second. Should be tuned according to individual system specs. 0 is unlimited. The default is 200. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE @@ -339,6 +387,18 @@ You can search logs for this message if space is not being reclaimed after you delete account(s). Default is 2592000 seconds (30 days). This is in addition to any time requested by delay_reaping. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD diff --git a/doc/manpages/container-server.conf.5 b/doc/manpages/container-server.conf.5 index 11eb109874..b396398d4b 100644 --- a/doc/manpages/container-server.conf.5 +++ b/doc/manpages/container-server.conf.5 @@ -131,6 +131,18 @@ You can set fallocate_reserve to the number of bytes or percentage of disk space you'd like fallocate to reserve, whether there is space for the given file size or not. Percentage will be used if the value ends with a '%'. The default is 1%. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD @@ -184,6 +196,18 @@ To handle all verbs, including replication verbs, do not specify set to a True value (e.g. "True" or "1"). To handle only non-replication verbs, set to "False". Unless you have a separate replication network, you should not specify any value for "replication_server". +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD @@ -293,6 +317,18 @@ Format of the rysnc module where the replicator will send data. See etc/rsyncd.conf-sample for some usage examples. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE @@ -322,6 +358,18 @@ Slowdown will sleep that amount between containers. The default is 0.01 seconds. Seconds to suppress updating an account that has generated an error. The default is 60 seconds. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD @@ -344,6 +392,18 @@ Will audit, at most, 1 container per device per interval. The default is 1800 se Maximum containers audited per second. Should be tuned according to individual system specs. 0 is unlimited. The default is 200. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE @@ -372,6 +432,18 @@ Connection timeout to external services. The default is 5 seconds. Server errors from requests will be retried by default. The default is 3. .IP \fBinternal_client_conf_path\fR Internal client config file path. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD diff --git a/doc/manpages/object-expirer.conf.5 b/doc/manpages/object-expirer.conf.5 index 2e5ea46a93..1e98216c10 100644 --- a/doc/manpages/object-expirer.conf.5 +++ b/doc/manpages/object-expirer.conf.5 @@ -88,6 +88,18 @@ The default is 1. The default is 1. .IP \fBlog_statsd_metric_prefix\fR The default is empty. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD @@ -115,6 +127,18 @@ that are acceptable within this section. .IP "\fBuse\fR" Entry point for paste.deploy for the object server. This is the reference to the installed python egg. The default is \fBegg:swift#proxy\fR. See proxy-server.conf-sample for options or See proxy-server.conf manpage. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index 0649b86fb8..51e5bccf7b 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -142,6 +142,18 @@ backend node. The default is 60. The default is 65536. .IP \fBdisk_chunk_size\fR The default is 65536. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD @@ -233,6 +245,24 @@ version 3.0 or greater. If you set "splice = yes" but the kernel does not support it, error messages will appear in the object server logs at startup, but your object servers should continue to function. The default is false. +.IP \fBnode_timeout\fR +Request timeout to external services. The default is 3 seconds. +.IP \fBconn_timeout\fR +Connection timeout to external services. The default is 0.5 seconds. +.IP \fBcontainer_update_timeout\fR +Time to wait while sending a container update on object update. The default is 1 second. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD @@ -386,6 +416,18 @@ The handoffs_first and handoff_delete are options for a special case such as disk full in the cluster. These two options SHOULD NOT BE CHANGED, except for such an extreme situations. (e.g. disks filled up or are about to fill up. Anyway, DO NOT let your drives fill up). +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE @@ -461,6 +503,18 @@ Slowdown will sleep that amount between objects. The default is 0.01 seconds. The recon_cache_path simply sets the directory where stats for a few items will be stored. Depending on the method of deployment you may need to create this directory manually and ensure that swift has read/write. The default is /var/cache/swift. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD @@ -503,6 +557,18 @@ points and report the result after a full scan. .IP \fBrsync_tempfile_timeout\fR Time elapsed in seconds before rsync tempfiles will be unlinked. Config value of "auto" will try to use object-replicator's rsync_timeout + 900 or fall-back to 86400 (1 day). +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 index 22880db60d..6c11368fe8 100644 --- a/doc/manpages/proxy-server.conf.5 +++ b/doc/manpages/proxy-server.conf.5 @@ -143,6 +143,18 @@ This is very useful when one is managing more than one swift cluster. Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar) .IP \fBstrict_cors_mode\fR The default is true. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD @@ -1030,6 +1042,24 @@ These are the headers whose values will only be shown to swift_owners. The exact definition of a swift_owner is up to the auth system in use, but usually indicates administrative responsibilities. The default is 'x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control'. +.IP \fBrate_limit_after_segment\fR +Start rate-limiting object segments after the Nth segment of a segmented +object. The default is 10 segments. +.IP \fBrate_limit_segments_per_sec\fR +Once segment rate-limiting kicks in for an object, limit segments served to N +per second. The default is 1. +.IP \fBnice_priority\fR +Modify scheduling priority of server processes. Niceness values range from -20 +(most favorable to the process) to 19 (least favorable to the process). +The default does not modify priority. +.IP \fBionice_class\fR +Modify I/O scheduling class of server processes. I/O niceness class values +are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +Work only with ionice_priority. +.IP \fBionice_priority\fR +Modify I/O scheduling priority of server processes. I/O niceness priority +is a number which goes from 0 to 7. The higher the value, the lower +the I/O priority of the process. Work only with ionice_class. .RE .PD diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index c8aae9e555..524bab8c5c 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -1,4 +1,4 @@ -================ + Deployment Guide ================ @@ -515,6 +515,25 @@ network_chunk_size 65536 Size of chunks to read/write over t disk_chunk_size 65536 Size of chunks to read/write to disk container_update_timeout 1 Time to wait while sending a container update on object update. +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, and IOPRIO_CLASS_IDLE. + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower the I/O + priority of the process. Work only with + ionice_class. ================================ ========== ========================================== .. _object-server-options: @@ -821,6 +840,24 @@ db_preallocation off If you don't mind the extra disk sp in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation. +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, and IOPRIO_CLASS_IDLE. + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 + with the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server processes. + I/O niceness priority is a number which + goes from 0 to 7. The higher the value, + the lower the I/O priority of the process. + Work only with ionice_class. =============================== ========== ============================================ [container-server] @@ -1035,6 +1072,24 @@ fallocate_reserve 1% You can set fallocate_reserve to th they completely run out of space; you can make the services pretend they're out of space early. +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, and IOPRIO_CLASS_IDLE. + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server processes. + I/O niceness priority is a number which + goes from 0 to 7. The higher the value, + the lower the I/O priority of the process. + Work only with ionice_class. =============================== ========== ============================================= [account-server] @@ -1276,6 +1331,28 @@ disallowed_sections swift.valid_api_versions Allows the abili the dict level with a ".". expiring_objects_container_divisor 86400 expiring_objects_account_name expiring_objects +nice_priority None Scheduling priority of server + processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server + processes. I/O niceness class values + are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and + IOPRIO_CLASS_IDLE. + The default does not + modify class and priority. Linux + supports io scheduling priorities + and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower + the I/O priority of the process. + Work only with ionice_class. ==================================== ======================== ======================================== [proxy-server] @@ -1542,6 +1619,16 @@ more workers, raising the number of workers and lowering the maximum number of clients serviced per worker can lessen the impact of CPU intensive or stalled requests. +The `nice_priority` parameter can be used to set program scheduling priority. +The `ionice_class` and `ionice_priority` parameters can be used to set I/O scheduling +class and priority on the systems that use an I/O scheduler that supports +I/O priorities. As at kernel 2.6.17 the only such scheduler is the Completely +Fair Queuing (CFQ) I/O scheduler. If you run your Storage servers all together +on the same servers, you can slow down the auditors or prioritize +object-server I/O via these parameters (but probably do not need to change +it on the proxy). It is a new feature and the best practices are still +being developed. + The above configuration setting should be taken as suggestions and testing of configuration settings should be done to ensure best utilization of CPU, network connectivity, and disk I/O. diff --git a/etc/account-server.conf-sample b/etc/account-server.conf-sample index 7ef875e777..63584851e2 100644 --- a/etc/account-server.conf-sample +++ b/etc/account-server.conf-sample @@ -51,6 +51,18 @@ bind_port = 6202 # space you'd like fallocate to reserve, whether there is space for the given # file size or not. Percentage will be used if the value ends with a '%'. # fallocate_reserve = 1% +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [pipeline:main] pipeline = healthcheck recon account-server @@ -73,6 +85,18 @@ use = egg:swift#account # verbs, set to "False". Unless you have a separate replication network, you # should not specify any value for "replication_server". Default is empty. # replication_server = false +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [filter:healthcheck] use = egg:swift#healthcheck @@ -127,6 +151,18 @@ use = egg:swift#recon # rsync_module = {replication_ip}::account # # recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [account-auditor] # You can override the default log routing for this app here (don't use set!): @@ -140,6 +176,18 @@ use = egg:swift#recon # # accounts_per_second = 200 # recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [account-reaper] # You can override the default log routing for this app here (don't use set!): @@ -166,6 +214,18 @@ use = egg:swift#recon # Default is 2592000 seconds (30 days). This is in addition to any time # requested by delay_reaping. # reap_warn_after = 2592000 +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = # Note: Put it at the beginning of the pipeline to profile all middleware. But # it is safer to put this after healthcheck. diff --git a/etc/container-reconciler.conf-sample b/etc/container-reconciler.conf-sample index 6e8f109f5d..ea8bc53a19 100644 --- a/etc/container-reconciler.conf-sample +++ b/etc/container-reconciler.conf-sample @@ -22,6 +22,18 @@ # log_statsd_default_sample_rate = 1.0 # log_statsd_sample_rate_factor = 1.0 # log_statsd_metric_prefix = +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [container-reconciler] # The reconciler will re-attempt reconciliation if the source object is not @@ -32,6 +44,18 @@ # interval = 30 # Server errors from requests will be retried by default # request_tries = 3 +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [pipeline:main] pipeline = catch_errors proxy-logging cache proxy-server diff --git a/etc/container-server.conf-sample b/etc/container-server.conf-sample index c5452a4c1e..89ac04817d 100644 --- a/etc/container-server.conf-sample +++ b/etc/container-server.conf-sample @@ -57,6 +57,18 @@ bind_port = 6201 # space you'd like fallocate to reserve, whether there is space for the given # file size or not. Percentage will be used if the value ends with a '%'. # fallocate_reserve = 1% +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [pipeline:main] pipeline = healthcheck recon container-server @@ -82,6 +94,18 @@ use = egg:swift#container # verbs, set to "False". Unless you have a separate replication network, you # should not specify any value for "replication_server". # replication_server = false +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [filter:healthcheck] use = egg:swift#healthcheck @@ -136,6 +160,18 @@ use = egg:swift#recon # rsync_module = {replication_ip}::container # # recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [container-updater] # You can override the default log routing for this app here (don't use set!): @@ -156,6 +192,18 @@ use = egg:swift#recon # account_suppression_time = 60 # # recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [container-auditor] # You can override the default log routing for this app here (don't use set!): @@ -169,6 +217,18 @@ use = egg:swift#recon # # containers_per_second = 200 # recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [container-sync] # You can override the default log routing for this app here (don't use set!): @@ -195,6 +255,18 @@ use = egg:swift#recon # # Internal client config file path # internal_client_conf_path = /etc/swift/internal-client.conf +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = # Note: Put it at the beginning of the pipeline to profile all middleware. But # it is safer to put this after healthcheck. diff --git a/etc/object-expirer.conf-sample b/etc/object-expirer.conf-sample index 4b161de1d5..b60c204b4b 100644 --- a/etc/object-expirer.conf-sample +++ b/etc/object-expirer.conf-sample @@ -25,6 +25,16 @@ # log_statsd_default_sample_rate = 1.0 # log_statsd_sample_rate_factor = 1.0 # log_statsd_metric_prefix = +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are realtime, best-effort and idle. I/O niceness +# priority is a number which goes from 0 to 7. The higher the value, the lower +# the I/O priority of the process. Work only with ionice_class. +# ionice_class = +# ionice_priority = [object-expirer] # interval = 300 @@ -51,6 +61,16 @@ # queue. # reclaim_age = 604800 # recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are realtime, best-effort and idle. I/O niceness +# priority is a number which goes from 0 to 7. The higher the value, the lower +# the I/O priority of the process. Work only with ionice_class. +# ionice_class = +# ionice_priority = [pipeline:main] pipeline = catch_errors proxy-logging cache proxy-server diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample index b0361c4d65..a1dea066c4 100644 --- a/etc/object-server.conf-sample +++ b/etc/object-server.conf-sample @@ -68,6 +68,18 @@ bind_port = 6200 # # network_chunk_size = 65536 # disk_chunk_size = 65536 +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [pipeline:main] pipeline = healthcheck recon object-server @@ -145,6 +157,18 @@ use = egg:swift#object # logs at startup, but your object servers should continue to function. # # splice = no +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [filter:healthcheck] use = egg:swift#healthcheck @@ -233,6 +257,18 @@ use = egg:swift#recon # than or equal to this number. By default(auto), handoff partitions will be # removed when it has successfully replicated to all the canonical nodes. # handoff_delete = auto +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [object-reconstructor] # You can override the default log routing for this app here (don't use set!): @@ -261,6 +297,18 @@ use = egg:swift#recon # ring_check_interval = 15 # recon_cache_path = /var/cache/swift # handoffs_first = False +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [object-updater] # You can override the default log routing for this app here (don't use set!): @@ -276,6 +324,18 @@ use = egg:swift#recon # slowdown = 0.01 # # recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [object-auditor] # You can override the default log routing for this app here (don't use set!): @@ -301,6 +361,18 @@ use = egg:swift#recon # increment a counter for every object whose size is <= to the given break # points and report the result after a full scan. # object_size_stats = +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = # The auditor will cleanup old rsync tempfiles after they are "old # enough" to delete. You can configure the time elapsed in seconds diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index a74320b1b0..3bcdc4b508 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -75,6 +75,18 @@ bind_port = 8080 # # client_timeout = 60 # eventlet_debug = false +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [pipeline:main] # This sample pipeline uses tempauth and is used for SAIO dev work and @@ -218,6 +230,18 @@ use = egg:swift#proxy # exact definition of a swift_owner is up to the auth system in use, but # usually indicates administrative responsibilities. # swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = [filter:tempauth] use = egg:swift#tempauth diff --git a/swift/common/daemon.py b/swift/common/daemon.py index a5d415638f..9a14024310 100644 --- a/swift/common/daemon.py +++ b/swift/common/daemon.py @@ -88,6 +88,9 @@ def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): log_to_console=kwargs.pop('verbose', False), log_route=section_name) + # optional nice/ionice priority scheduling + utils.modify_priority(conf, logger) + # disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() diff --git a/swift/common/utils.py b/swift/common/utils.py index 57e0e9be3f..08c1fd254a 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -31,6 +31,7 @@ import sys import time import uuid import functools +import platform import email.parser from hashlib import md5, sha1 from random import random, shuffle @@ -92,6 +93,10 @@ _posix_fadvise = None _libc_socket = None _libc_bind = None _libc_accept = None +# see man -s 2 setpriority +_libc_setpriority = None +# see man -s 2 syscall +_posix_syscall = None # If set to non-zero, fallocate routines will fail based on free space # available being at or below this amount, in bytes. @@ -100,6 +105,53 @@ FALLOCATE_RESERVE = 0 # the number of bytes (False). FALLOCATE_IS_PERCENT = False +# from /usr/src/linux-headers-*/include/uapi/linux/resource.h +PRIO_PROCESS = 0 + + +# /usr/include/x86_64-linux-gnu/asm/unistd_64.h defines syscalls there +# are many like it, but this one is mine, see man -s 2 ioprio_set +def NR_ioprio_set(): + """Give __NR_ioprio_set value for your system.""" + architecture = os.uname()[4] + arch_bits = platform.architecture()[0] + # check if supported system, now support only x86_64 + if architecture == 'x86_64' and arch_bits == '64bit': + return 251 + raise OSError("Swift doesn't support ionice priority for %s %s" % + (architecture, arch_bits)) + +# this syscall integer probably only works on x86_64 linux systems, you +# can check if it's correct on yours with something like this: +""" +#include +#include + +int main(int argc, const char* argv[]) { + printf("%d\n", __NR_ioprio_set); + return 0; +} +""" + +# this is the value for "which" that says our who value will be a pid +# pulled out of /usr/src/linux-headers-*/include/linux/ioprio.h +IOPRIO_WHO_PROCESS = 1 + + +IO_CLASS_ENUM = { + 'IOPRIO_CLASS_RT': 1, + 'IOPRIO_CLASS_BE': 2, + 'IOPRIO_CLASS_IDLE': 3, +} + +# the IOPRIO_PRIO_VALUE "macro" is also pulled from +# /usr/src/linux-headers-*/include/linux/ioprio.h +IOPRIO_CLASS_SHIFT = 13 + + +def IOPRIO_PRIO_VALUE(class_, data): + return (((class_) << IOPRIO_CLASS_SHIFT) | data) + # Used by hash_path to offer a bit more security when generating hashes for # paths. It simply appends this value to all paths; guessing the hash a path # will end up with would also require knowing this suffix. @@ -382,7 +434,7 @@ def validate_configuration(): def load_libc_function(func_name, log_error=True, - fail_if_missing=False): + fail_if_missing=False, errcheck=False): """ Attempt to find the function in libc, otherwise return a no-op func. @@ -390,10 +442,13 @@ def load_libc_function(func_name, log_error=True, :param log_error: log an error when a function can't be found :param fail_if_missing: raise an exception when a function can't be found. Default behavior is to return a no-op function. + :param errcheck: boolean, if true install a wrapper on the function + to check for a return values of -1 and call + ctype.get_errno and raise an OSError """ try: libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) - return getattr(libc, func_name) + func = getattr(libc, func_name) except AttributeError: if fail_if_missing: raise @@ -401,6 +456,14 @@ def load_libc_function(func_name, log_error=True, logging.warning(_("Unable to locate %s in libc. Leaving as a " "no-op."), func_name) return noop_libc_function + if errcheck: + def _errcheck(result, f, args): + if result == -1: + errcode = ctypes.get_errno() + raise OSError(errcode, os.strerror(errcode)) + return result + func.errcheck = _errcheck + return func def generate_trans_id(trans_id_suffix): @@ -3897,3 +3960,69 @@ def get_md5_socket(): raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket") return md5_sockfd + + +def modify_priority(conf, logger): + """ + Modify priority by nice and ionice. + """ + + global _libc_setpriority + if _libc_setpriority is None: + _libc_setpriority = load_libc_function('setpriority', + errcheck=True) + + def _setpriority(nice_priority): + """ + setpriority for this pid + + :param nice_priority: valid values are -19 to 20 + """ + try: + _libc_setpriority(PRIO_PROCESS, os.getpid(), + int(nice_priority)) + except (ValueError, OSError): + print(_("WARNING: Unable to modify scheduling priority of process." + " Keeping unchanged! Check logs for more info. ")) + logger.exception('Unable to modify nice priority') + else: + logger.debug('set nice priority to %s' % nice_priority) + + nice_priority = conf.get('nice_priority') + if nice_priority is not None: + _setpriority(nice_priority) + + global _posix_syscall + if _posix_syscall is None: + _posix_syscall = load_libc_function('syscall', errcheck=True) + + def _ioprio_set(io_class, io_priority): + """ + ioprio_set for this process + + :param io_class: the I/O class component, can be + IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, + or IOPRIO_CLASS_IDLE + :param io_priority: priority value in the I/O class + """ + try: + io_class = IO_CLASS_ENUM[io_class] + io_priority = int(io_priority) + _posix_syscall(NR_ioprio_set(), + IOPRIO_WHO_PROCESS, + os.getpid(), + IOPRIO_PRIO_VALUE(io_class, io_priority)) + except (KeyError, ValueError, OSError): + print(_("WARNING: Unable to modify I/O scheduling class " + "and priority of process. Keeping unchanged! " + "Check logs for more info.")) + logger.exception("Unable to modify ionice priority") + else: + logger.debug('set ionice class %s priority %s', + io_class, io_priority) + + io_class = conf.get("ionice_class") + if io_class is None: + return + io_priority = conf.get("ionice_priority", 0) + _ioprio_set(io_class, io_priority) diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 88e61f2293..79c3961246 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -877,6 +877,9 @@ def run_wsgi(conf_path, app_section, *args, **kwargs): print(e) return 1 + # optional nice/ionice priority scheduling + utils.modify_priority(conf, logger) + servers_per_port = int(conf.get('servers_per_port', '0') or 0) # NOTE: for now servers_per_port is object-server-only; future work could diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 707a293d6e..bfa7ddb072 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -15,7 +15,7 @@ """Tests for swift.common.utils""" from __future__ import print_function -from test.unit import temptree +from test.unit import temptree, debug_logger import ctypes import contextlib @@ -3524,6 +3524,72 @@ cluster_dfw1 = http://dfw1.host/v1/ if tempdir: shutil.rmtree(tempdir) + def test_modify_priority(self): + pid = os.getpid() + logger = debug_logger() + called = {} + + def _fake_setpriority(*args): + called['setpriority'] = args + + def _fake_syscall(*args): + called['syscall'] = args + + with patch('swift.common.utils._libc_setpriority', + _fake_setpriority), \ + patch('swift.common.utils._posix_syscall', _fake_syscall): + called = {} + # not set / default + utils.modify_priority({}, logger) + self.assertEqual(called, {}) + called = {} + # just nice + utils.modify_priority({'nice_priority': '1'}, logger) + self.assertEqual(called, {'setpriority': (0, pid, 1)}) + called = {} + # just ionice class uses default priority 0 + utils.modify_priority({'ionice_class': 'IOPRIO_CLASS_RT'}, logger) + self.assertEqual(called, {'syscall': (251, 1, pid, 1 << 13)}) + called = {} + # just ionice priority is ignored + utils.modify_priority({'ionice_priority': '4'}, logger) + self.assertEqual(called, {}) + called = {} + # bad ionice class + utils.modify_priority({'ionice_class': 'class_foo'}, logger) + self.assertEqual(called, {}) + called = {} + # ionice class & priority + utils.modify_priority({ + 'ionice_class': 'IOPRIO_CLASS_BE', + 'ionice_priority': '4', + }, logger) + self.assertEqual(called, {'syscall': (251, 1, pid, 2 << 13 | 4)}) + called = {} + # all + utils.modify_priority({ + 'nice_priority': '-15', + 'ionice_class': 'IOPRIO_CLASS_IDLE', + 'ionice_priority': '6', + }, logger) + self.assertEqual(called, { + 'setpriority': (0, pid, -15), + 'syscall': (251, 1, pid, 3 << 13 | 6), + }) + + def test__NR_ioprio_set(self): + with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \ + patch('platform.architecture', return_value=('64bit', '')): + self.assertEqual(251, utils.NR_ioprio_set()) + + with patch('os.uname', return_value=('', '', '', '', 'x86_64')), \ + patch('platform.architecture', return_value=('32bit', '')): + self.assertRaises(OSError, utils.NR_ioprio_set) + + with patch('os.uname', return_value=('', '', '', '', 'alpha')), \ + patch('platform.architecture', return_value=('64bit', '')): + self.assertRaises(OSError, utils.NR_ioprio_set) + class ResellerConfReader(unittest.TestCase): From 4638171ece4e70408f5b897a9bd1c12a70696aa4 Mon Sep 17 00:00:00 2001 From: zheng yin Date: Wed, 10 Aug 2016 16:48:45 +0800 Subject: [PATCH 129/156] Fix assertEqual error I think assertEqual(a,a) is error that caused by carelessness in test case, so I modify it. Change-Id: I8767c35e8a1a47f1b64241f67959277074a37b21 --- test/unit/common/test_direct_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/unit/common/test_direct_client.py b/test/unit/common/test_direct_client.py index 1426f28406..31909388c3 100644 --- a/test/unit/common/test_direct_client.py +++ b/test/unit/common/test_direct_client.py @@ -221,7 +221,7 @@ class TestDirectClient(unittest.TestCase): 'X-Account-Object-Count': '1', 'X-Account-Bytes-Used': '1', 'X-Timestamp': '1234567890', - 'X-PUT-Timestamp': '1234567890'} + 'X-Put-Timestamp': '1234567890'} with mocked_http_conn(204, headers) as conn: resp_headers, resp = direct_client.direct_get_account( self.node, self.part, self.account) @@ -229,7 +229,7 @@ class TestDirectClient(unittest.TestCase): self.assertEqual(conn.path, self.account_path) self.assertEqual(conn.req_headers['user-agent'], self.user_agent) - self.assertEqual(resp_headers, resp_headers) + self.assertDictEqual(resp_headers, headers) self.assertEqual([], resp) def test_direct_get_account_error(self): From 844d0c965df1fcec53f118ed71c2a560cfe88a2b Mon Sep 17 00:00:00 2001 From: Shashank Kumar Shankar Date: Thu, 11 Aug 2016 05:21:54 +0000 Subject: [PATCH 130/156] Corrects spelling error in swift/common/middleware/slo.py Fixes 'perfomed' to 'performed' Change-Id: I54adf45494cd4c6edae7bb5b404d377527c6c5a0 Closes-Bug: 1612051 --- swift/common/middleware/slo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swift/common/middleware/slo.py b/swift/common/middleware/slo.py index 47e9a660ff..ccf6684549 100644 --- a/swift/common/middleware/slo.py +++ b/swift/common/middleware/slo.py @@ -134,7 +134,7 @@ objects from the manifest much like DLO. If any of the segments from the manifest are not found or their Etag/Content Length have changed since upload, the connection will drop. In this case a 409 Conflict will be logged in the proxy logs and the user will receive incomplete results. Note that this will be -enforced regardless of whether the user perfomed per-segment validation during +enforced regardless of whether the user performed per-segment validation during upload. The headers from this GET or HEAD request will return the metadata attached From 7e2cb23f887cf0092e0bd9f2baa42d3ddbb257ce Mon Sep 17 00:00:00 2001 From: Nakul Dahiwade Date: Thu, 11 Aug 2016 16:10:35 +0000 Subject: [PATCH 131/156] Grammer error : swift/doc/source/overview_ring.rst Changed sentence: "Regions can be used to describe geo-graphically systems characterized by lower-bandwidth" To: "Regions can be used to describe geographical systems characterized by lower-bandwidth" Change-Id: I0f614a4c53dd31459f1b6297dd32a8c0f609d9ce Closes-Bug: 1612302 --- doc/source/overview_ring.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/overview_ring.rst b/doc/source/overview_ring.rst index c6aa882381..1723220317 100644 --- a/doc/source/overview_ring.rst +++ b/doc/source/overview_ring.rst @@ -22,7 +22,7 @@ number, each replica will be assigned to a different device in the ring. Devices are added to the ring to describe the capacity available for part-replica assignment. Devices are placed into failure domains consisting -of region, zone, and server. Regions can be used to describe geo-graphically +of region, zone, and server. Regions can be used to describe geographical systems characterized by lower-bandwidth or higher latency between machines in different regions. Many rings will consist of only a single region. Zones can be used to group devices based on physical locations, power separations, From 05b8d9d7fc4e5d1c64385c157431215acda38852 Mon Sep 17 00:00:00 2001 From: Thiago da Silva Date: Thu, 11 Aug 2016 16:24:57 -0400 Subject: [PATCH 132/156] fix swift_oldies on RH based systems Change-Id: Icbb4c6d461ded4fab2afade09e718b3a74917717 Signed-off-by: Thiago da Silva --- bin/swift-oldies | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bin/swift-oldies b/bin/swift-oldies index 53845c07e5..46de955dec 100755 --- a/bin/swift-oldies +++ b/bin/swift-oldies @@ -43,8 +43,10 @@ Lists old Swift processes. if not args.startswith(( '/usr/bin/python /usr/bin/swift-', '/usr/bin/python /usr/local/bin/swift-', + '/bin/python /usr/bin/swift-', '/usr/bin/python3 /usr/bin/swift-', - '/usr/bin/python3 /usr/local/bin/swift-')): + '/usr/bin/python3 /usr/local/bin/swift-', + '/bin/python3 /usr/bin/swift-')): continue args = args.split('-', 1)[1] etime = etime.split('-') From f7a820ed3a72992948e22e7366a9a1780ad40388 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Fri, 12 Aug 2016 05:46:33 +0000 Subject: [PATCH 133/156] Wait for a non-empty chunk in WSGIContext._app_call We're functioning as a WSGI server here, so this bit from PEP-3333 seems to apply: > The start_response callable must not actually transmit the response > headers. Instead, it must store them for the server or gateway to > transmit only after the first iteration of the application return > value that yields a non-empty bytestrin ... . In other words, response > headers must not be sent until there is actual body data available, or > until the application's returned iterable is exhausted. Plus, it mirrors what swob.Request.call_application does. Change-Id: I1e8501f8ce91ea912780db64fee1c56bef809a98 --- swift/common/wsgi.py | 17 ++++++----------- test/unit/common/test_wsgi.py | 2 ++ 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 88e61f2293..7749f1562c 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -42,7 +42,7 @@ from swift.common.swob import Request from swift.common.utils import capture_stdio, disable_fallocate, \ drop_privileges, get_logger, NullLogger, config_true_value, \ validate_configuration, get_hub, config_auto_int_value, \ - CloseableChain + reiterate # Set maximum line size of message headers to be accepted. wsgi.MAX_HEADER_LINE = constraints.MAX_HEADER_SIZE @@ -1053,16 +1053,11 @@ class WSGIContext(object): self._response_headers = None self._response_exc_info = None resp = self.app(env, self._start_response) - # if start_response has been called, just return the iter - if self._response_status is not None: - return resp - resp = iter(resp) - try: - first_chunk = next(resp) - except StopIteration: - return iter([]) - else: # We got a first_chunk - return CloseableChain([first_chunk], resp) + # if start_response has not been called, iterate until we've got a + # non-empty chunk, by which time the app *should* have called it + if self._response_status is None: + resp = reiterate(resp) + return resp def _get_status_int(self): """ diff --git a/test/unit/common/test_wsgi.py b/test/unit/common/test_wsgi.py index cc33833714..51ac12599d 100644 --- a/test/unit/common/test_wsgi.py +++ b/test/unit/common/test_wsgi.py @@ -1242,6 +1242,8 @@ class TestWSGIContext(unittest.TestCase): def test_app_iter_is_closable(self): def app(env, start_response): + yield '' + yield '' start_response('200 OK', [('Content-Length', '25')]) yield 'aaaaa' yield 'bbbbb' From aab2cee827b18fe1c48f121d2c9066af2dd0c366 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Fri, 12 Aug 2016 21:18:07 +0200 Subject: [PATCH 134/156] Move other-requirements.txt to bindep.txt The default filename for documenting binary dependencies has been changed from "other-requirements.txt" to "bindep.txt" with the release of bindep 2.1.0. While the previous name is still supported, it will be deprecated. Move the file around to follow this change. Note that this change is self-testing, the OpenStack CI infrastructure will use a "bindep.txt" file to setup nodes for testing. For more information about bindep, see also: http://docs.openstack.org/infra/manual/drivers.html#package-requirements http://docs.openstack.org/infra/bindep/ As well as this announcement: http://lists.openstack.org/pipermail/openstack-dev/2016-August/101590.html Change-Id: I000a4e708006263acc6d9731a6677a6e62e285b6 --- other-requirements.txt => bindep.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename other-requirements.txt => bindep.txt (100%) diff --git a/other-requirements.txt b/bindep.txt similarity index 100% rename from other-requirements.txt rename to bindep.txt From bb87fcefced099a56537edd0db7ca965e9902f5a Mon Sep 17 00:00:00 2001 From: liujiong Date: Sun, 14 Aug 2016 00:43:47 +0800 Subject: [PATCH 135/156] Fix typo in the file Change-Id: I6539e9b9fb7918e387e8ae802be7b4efbcb07f4d --- test/unit/obj/test_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index 33a2ffa4ae..fa7be18a76 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -1157,7 +1157,7 @@ class TestObjectController(unittest.TestCase): 'name': '/a/c/o', 'Content-Encoding': 'gzip'}) - def test_PUT_overwrite_to_older_ts_succcess(self): + def test_PUT_overwrite_to_older_ts_success(self): old_timestamp = next(self.ts) new_timestamp = next(self.ts) From c7283be4fe9ebaf0e8feba695a1c664eec6fb355 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Wed, 19 Aug 2015 12:17:47 -0700 Subject: [PATCH 136/156] Add "history" mode to versioned_writes middleware This change introduces the concept of a "versioning mode" for versioned_writes. The following modes are supported: * stack When deleting, check whether any previous versions exist in the versions container. If none is found, the object is deleted. If the most-recent version in the versions container is not a delete marker, it is copied into the versioned container (overwriting the current version if one exists) and then deleted from the versions container. This preserves the previous behavior. If the most-recent version in the versions container is a delete marker and a current version exists in the versioned container, the current version is deleted. If the most-recent version in the versions container is a delete marker and no current version exists in the versioned container, we copy the next-most-recent version from the versions container into the versioned container (assuming it exists and is not a delete marker) and delete both the most-recent version (i.e., the delete marker) and the just-copied next-most-recent version from the versions container. With this mode, DELETEs to versioned containers "undo" operations on containers. Previously this was limited to undoing PUTs, but now it will also undo DELETEs performed while in "history" mode. * history When deleting, check whether a current version exists in the versioned container. If one is found, it is copied to the versions container. Then an empty "delete marker" object is also put into the versions container; this records when the object was deleted. Finally, the original current version is deleted from the versioned container. As a result, subsequent GETs or HEADs will return a 404, and container listings for the versioned container do not include the object. With this mode, DELETEs to versioned containers behave like DELETEs to other containers, but with a history of what has happened. Clients may specify (via a new X-Versions-Mode header) which mode a container should use. By default, the existing "stack" mode is used. Upgrade consideration: ====================== Clients should not use the "history" mode until all proxies in the cluster have been upgraded. Attempting to use the "history" mode during a rolling upgrade may result in some requests being served by proxies running old code (which necessarily uses the "stack" mode), leading to data loss. Change-Id: I555dc17fefd0aa9ade681aa156da24e018ebe74b --- api-ref/source/parameters.yaml | 23 +- api-ref/source/storage-container-services.inc | 3 + doc/source/api/object_versioning.rst | 206 +++++++-- swift/common/middleware/versioned_writes.py | 430 ++++++++++++++---- test/unit/common/middleware/helpers.py | 8 +- .../middleware/test_versioned_writes.py | 422 ++++++++++++++++- 6 files changed, 941 insertions(+), 151 deletions(-) diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml index 9213ada101..ebb2a20989 100644 --- a/api-ref/source/parameters.yaml +++ b/api-ref/source/parameters.yaml @@ -720,21 +720,24 @@ X-Trans-Id-Extra: type: string X-Versions-Location: description: | - Enables versioning on this container. The value - is the name of another container. You must UTF-8-encode and then - URL-encode the name before you include it in the header. To - disable versioning, set the header to an empty string. + The URL-encoded UTF-8 representation of the container that stores + previous versions of objects. If not set, versioning is disabled + for this container. For more information about object versioning, + see `Object versioning `_. in: header required: false type: string -X-Versions-Location_1: +X-Versions-Mode: description: | - Enables versioning on this container. The value - is the name of another container. You must UTF-8-encode and then - URL-encode the name before you include it in the header. To - disable versioning, set the header to an empty string. + The versioning mode for this container. The value must be either + ``stack`` or ``history``. If not set, ``stack`` mode will be used. + This setting has no impact unless ``X-Versions-Location`` is set + for the container. For more information about object versioning, + see `Object versioning `_. in: header - required: true + required: false type: string # variables in path diff --git a/api-ref/source/storage-container-services.inc b/api-ref/source/storage-container-services.inc index 6b69ef9d00..2213a78840 100644 --- a/api-ref/source/storage-container-services.inc +++ b/api-ref/source/storage-container-services.inc @@ -172,6 +172,7 @@ Request - X-Container-Sync-To: X-Container-Sync-To - X-Container-Sync-Key: X-Container-Sync-Key - X-Versions-Location: X-Versions-Location + - X-Versions-Mode: X-Versions-Mode - X-Container-Meta-name: X-Container-Meta-name - X-Container-Meta-Access-Control-Allow-Origin: X-Container-Meta-Access-Control-Allow-Origin - X-Container-Meta-Access-Control-Max-Age: X-Container-Meta-Access-Control-Max-Age @@ -302,6 +303,7 @@ Request - X-Container-Sync-To: X-Container-Sync-To - X-Container-Sync-Key: X-Container-Sync-Key - X-Versions-Location: X-Versions-Location + - X-Versions-Mode: X-Versions-Mode - X-Remove-Versions-Location: X-Remove-Versions-Location - X-Container-Meta-name: X-Container-Meta-name - X-Container-Meta-Access-Control-Allow-Origin: X-Container-Meta-Access-Control-Allow-Origin @@ -409,6 +411,7 @@ Response Parameters - Content-Type: Content-Type - X-Container-Meta-Quota-Bytes: X-Container-Meta-Quota-Bytes - X-Versions-Location: X-Versions-Location + - X-Versions-Mode: X-Versions-Mode diff --git a/doc/source/api/object_versioning.rst b/doc/source/api/object_versioning.rst index 0b3cdbe8c4..23b8f9406a 100644 --- a/doc/source/api/object_versioning.rst +++ b/doc/source/api/object_versioning.rst @@ -6,19 +6,19 @@ You can store multiple versions of your content so that you can recover from unintended overwrites. Object versioning is an easy way to implement version control, which you can use with any type of content. -Note -~~~~ +.. note:: + You cannot version a large-object manifest file, but the large-object + manifest file can point to versioned segments. -You cannot version a large-object manifest file, but the large-object -manifest file can point to versioned segments. +.. note:: + It is strongly recommended that you put non-current objects in a + different container than the container where current object versions + reside. -It is strongly recommended that you put non-current objects in a -different container than the container where current object versions -reside. - -To enable object versioning, the cloud provider sets the -``allow_versions`` option to ``TRUE`` in the container configuration -file. +To allow object versioning within a cluster, the cloud provider should add the +``versioned_writes`` filter to the pipeline and set the +``allow_versioned_writes`` option to ``true`` in the +``[filter:versioned_writes]`` section of the proxy-server configuration file. The ``X-Versions-Location`` header defines the container that holds the non-current versions of your objects. You @@ -29,13 +29,21 @@ object versioning for all objects in the container. With a comparable container automatically create non-current versions in the ``archive`` container. -Here's an example: +The ``X-Versions-Mode`` header defines the behavior of ``DELETE`` requests to +objects in the versioned container. In the default ``stack`` mode, deleting an +object will restore the most-recent version from the ``archive`` container, +overwriting the curent version. Alternatively you may specify ``history`` +mode, where deleting an object will copy the current version to the +``archive`` then remove it from the ``current`` container. + +Example Using ``stack`` Mode +---------------------------- #. Create the ``current`` container: .. code:: - # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: archive" + # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: archive" -H "X-Versions-Mode: stack" .. code:: @@ -70,7 +78,7 @@ Here's an example: .. code:: - + / Where ``length`` is the 3-character, zero-padded hexadecimal character length of the object, ```` is the object name, @@ -117,12 +125,10 @@ Here's an example: 009my_object/1390512682.92052 -Note -~~~~ - - A **POST** request to a versioned object updates only the metadata - for the object and does not create a new version of the object. New - versions are created only when the content of the object changes. + .. note:: + A **POST** request to a versioned object updates only the metadata + for the object and does not create a new version of the object. New + versions are created only when the content of the object changes. #. Issue a **DELETE** request to a versioned object to remove the current version of the object and replace it with the next-most @@ -163,21 +169,163 @@ Note on it. If want to completely remove an object and you have five versions of it, you must **DELETE** it five times. -#. To disable object versioning for the ``current`` container, remove - its ``X-Versions-Location`` metadata header by sending an empty key - value. +Example Using ``history`` Mode +---------------------------- + +#. Create the ``current`` container: .. code:: - # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: " + # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: archive" -H "X-Versions-Mode: history" .. code:: - HTTP/1.1 202 Accepted - Content-Length: 76 + HTTP/1.1 201 Created + Content-Length: 0 Content-Type: text/html; charset=UTF-8 - X-Trans-Id: txe2476de217134549996d0-0052e19038 - Date: Thu, 23 Jan 2014 21:57:12 GMT + X-Trans-Id: txb91810fb717347d09eec8-0052e18997 + Date: Thu, 23 Jan 2014 21:28:55 GMT -

Accepted

The request is accepted for processing.

+#. Create the first version of an object in the ``current`` container: + + .. code:: + + # curl -i $publicURL/current/my_object --data-binary 1 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" + + .. code:: + + HTTP/1.1 201 Created + Last-Modified: Thu, 23 Jan 2014 21:31:22 GMT + Content-Length: 0 + Etag: d41d8cd98f00b204e9800998ecf8427e + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx5992d536a4bd4fec973aa-0052e18a2a + Date: Thu, 23 Jan 2014 21:31:22 GMT + + Nothing is written to the non-current version container when you + initially **PUT** an object in the ``current`` container. However, + subsequent **PUT** requests that edit an object trigger the creation + of a version of that object in the ``archive`` container. + + These non-current versions are named as follows: + + .. code:: + + / + + Where ``length`` is the 3-character, zero-padded hexadecimal + character length of the object, ```` is the object name, + and ```` is the time when the object was initially created + as a current version. + +#. Create a second version of the object in the ``current`` container: + + .. code:: + + # curl -i $publicURL/current/my_object --data-binary 2 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" + + .. code:: + + HTTP/1.1 201 Created + Last-Modified: Thu, 23 Jan 2014 21:41:32 GMT + Content-Length: 0 + Etag: d41d8cd98f00b204e9800998ecf8427e + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx468287ce4fc94eada96ec-0052e18c8c + Date: Thu, 23 Jan 2014 21:41:32 GMT + +#. Issue a **GET** request to a versioned object to get the current + version of the object. You do not have to do any request redirects or + metadata lookups. + + List older versions of the object in the ``archive`` container: + + .. code:: + + # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token" + + .. code:: + + HTTP/1.1 200 OK + Content-Length: 30 + X-Container-Object-Count: 1 + Accept-Ranges: bytes + X-Timestamp: 1390513280.79684 + X-Container-Bytes-Used: 0 + Content-Type: text/plain; charset=utf-8 + X-Trans-Id: tx9a441884997542d3a5868-0052e18d8e + Date: Thu, 23 Jan 2014 21:45:50 GMT + + 009my_object/1390512682.92052 + + .. note:: + A **POST** request to a versioned object updates only the metadata + for the object and does not create a new version of the object. New + versions are created only when the content of the object changes. + +#. Issue a **DELETE** request to a versioned object to copy the + current version of the object to the archive container then delete it from + the current container. Subsequent **GET** requests to the object in the + current container will return 404 Not Found. + + .. code:: + + # curl -i $publicURL/current/my_object -X DELETE -H "X-Auth-Token: $token" + + .. code:: + + HTTP/1.1 204 No Content + Content-Length: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx006d944e02494e229b8ee-0052e18edd + Date: Thu, 23 Jan 2014 21:51:25 GMT + + List older versions of the object in the ``archive`` container:: + + .. code:: + + # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token" + + .. code:: + + HTTP/1.1 200 OK + Content-Length: 90 + X-Container-Object-Count: 3 + Accept-Ranges: bytes + X-Timestamp: 1390513280.79684 + X-Container-Bytes-Used: 0 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: tx044f2a05f56f4997af737-0052e18eed + Date: Thu, 23 Jan 2014 21:51:41 GMT + + 009my_object/1390512682.92052 + 009my_object/1390512692.23062 + 009my_object/1390513885.67732 + + In addition to the two previous versions of the object, the archive + container has a "delete marker" to record when the object was deleted. + + To permanently delete a previous version, issue a **DELETE** to the version + in the archive container. + +Disabling Object Versioning +--------------------------- + +To disable object versioning for the ``current`` container, remove +its ``X-Versions-Location`` metadata header by sending an empty key +value. + +.. code:: + + # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: " + +.. code:: + + HTTP/1.1 202 Accepted + Content-Length: 76 + Content-Type: text/html; charset=UTF-8 + X-Trans-Id: txe2476de217134549996d0-0052e19038 + Date: Thu, 23 Jan 2014 21:57:12 GMT + +

Accepted

The request is accepted for processing.

diff --git a/swift/common/middleware/versioned_writes.py b/swift/common/middleware/versioned_writes.py index ae091cff20..ebe3a825f5 100644 --- a/swift/common/middleware/versioned_writes.py +++ b/swift/common/middleware/versioned_writes.py @@ -17,14 +17,17 @@ Object versioning in swift is implemented by setting a flag on the container to tell swift to version all objects in the container. The flag is the ``X-Versions-Location`` header on the container, and its value is the -container where the versions are stored. It is recommended to use a different -``X-Versions-Location`` container for each container that is being versioned. +container where the versions are stored. + +.. note:: + It is recommended to use a different ``X-Versions-Location`` container for + each container that is being versioned. When data is ``PUT`` into a versioned container (a container with the versioning flag turned on), the existing data in the file is redirected to a new object and the data in the ``PUT`` request is saved as the data for the versioned object. The new object name (for the previous version) is -``//``, where ``length`` +``//``, where ``length`` is the 3-character zero-padded hexadecimal length of the ```` and ```` is the timestamp of when the previous version was created. @@ -35,9 +38,39 @@ A ``POST`` to a versioned object will update the object metadata as normal, but will not create a new version of the object. In other words, new versions are only created when the content of the object changes. -A ``DELETE`` to a versioned object will only remove the current version of the -object. If you have 5 total versions of the object, you must delete the -object 5 times to completely remove the object. +A ``DELETE`` to a versioned object will be handled in one of two ways, +depending on the value of a ``X-Versions-Mode`` header set on the container. +The available modes are: + + * ``stack`` + + Only remove the current version of the object. If any previous versions + exist in the archive container, the most recent one is copied over the + current version, and the copy in the archive container is deleted. As a + result, if you have 5 total versions of the object, you must delete the + object 5 times to completely remove the object. This is the default + behavior if ``X-Versions-Mode`` has not been set for the container. + + * ``history`` + + Copy the current version of the object to the archive container, write + a zero-byte "delete marker" object that notes when the delete took place, + and delete the object from the versioned container. The object will no + longer appear in container listings for the versioned container and future + requests there will return 404 Not Found. However, the content will still + be recoverable from the archive container. + +.. note:: + While it is possible to switch between 'stack' and 'history' mode on a + container, it is not recommended. + +To restore a previous version of an object, find the desired version in the +archive container then issue a ``COPY`` with a ``Destination`` header +indicating the original location. This will retain a copy of the current +version similar to a ``PUT`` over the versioned object. Additionally, if the +container is in ``stack`` mode and the client wishes to permanently delete the +current version, it may issue a ``DELETE`` to the versioned object as +described above. -------------------------------------------------- How to Enable Object Versioning in a Swift Cluster @@ -57,23 +90,31 @@ set ``allow_versioned_writes`` to ``True`` in the middleware options to enable the information about this middleware to be returned in a /info request. -Upgrade considerations: If ``allow_versioned_writes`` is set in the filter -configuration, you can leave the ``allow_versions`` flag in the container -server configuration files untouched. If you decide to disable or remove the -``allow_versions`` flag, you must re-set any existing containers that had -the 'X-Versions-Location' flag configured so that it can now be tracked by the -versioned_writes middleware. +Upgrade considerations: ++++++++++++++++++++++++ ------------------------ -Examples Using ``curl`` ------------------------ +If ``allow_versioned_writes`` is set in the filter configuration, you can leave +the ``allow_versions`` flag in the container server configuration files +untouched. If you decide to disable or remove the ``allow_versions`` flag, you +must re-set any existing containers that had the 'X-Versions-Location' flag +configured so that it can now be tracked by the versioned_writes middleware. + +Clients should not use the 'history' mode until all proxies in the cluster +have been upgraded to a version of Swift that supports it. Attempting to use +the 'history' mode during a rolling upgrade may result in some requests being +served by proxies running old code (which necessarily uses the 'stack' mode), +leading to data loss. + +------------------------------------------- +Examples Using ``curl`` with ``stack`` Mode +------------------------------------------- First, create a container with the ``X-Versions-Location`` header or add the header to an existing container. Also make sure the container referenced by the ``X-Versions-Location`` exists. In this example, the name of that container is "versions":: - curl -i -XPUT -H "X-Auth-Token: " \ + curl -i -XPUT -H "X-Auth-Token: " -H "X-Versions-Mode: stack" \ -H "X-Versions-Location: versions" http:///container curl -i -XPUT -H "X-Auth-Token: " http:///versions @@ -102,6 +143,59 @@ http:///versions?prefix=008myobject/ curl -i -XGET -H "X-Auth-Token: " \ http:///container/myobject +--------------------------------------------- +Examples Using ``curl`` with ``history`` Mode +--------------------------------------------- + +As above, create a container with the ``X-Versions-Location`` header and ensure +that the container referenced by the ``X-Versions-Location`` exists. In this +example, the name of that container is "versions":: + + curl -i -XPUT -H "X-Auth-Token: " -H "X-Versions-Mode: history" \ +-H "X-Versions-Location: versions" http:///container + curl -i -XPUT -H "X-Auth-Token: " http:///versions + +Create an object (the first version):: + + curl -i -XPUT --data-binary 1 -H "X-Auth-Token: " \ +http:///container/myobject + +Now create a new version of that object:: + + curl -i -XPUT --data-binary 2 -H "X-Auth-Token: " \ +http:///container/myobject + +Now delete the current version of the object. Subsequent requests will 404:: + + curl -i -XDELETE -H "X-Auth-Token: " \ +http:///container/myobject + curl -i -H "X-Auth-Token: " \ +http:///container/myobject + +A listing of the older versions of the object will include both the first and +second versions of the object, as well as a "delete marker" object:: + + curl -i -H "X-Auth-Token: " \ +http:///versions?prefix=008myobject/ + +To restore a previous version, simply ``COPY`` it from the archive container:: + + curl -i -XCOPY -H "X-Auth-Token: " \ +http:///versions/008myobject/ \ +-H "Destination: container/myobject" + +Note that the archive container still has all previous versions of the object, +including the source for the restore:: + + curl -i -H "X-Auth-Token: " \ +http:///versions?prefix=008myobject/ + +To permanently delete a previous version, ``DELETE`` it from the archive +container:: + + curl -i -XDELETE -H "X-Auth-Token: " \ +http:///versions/008myobject/ \ + --------------------------------------------------- How to Disable Object Versioning in a Swift Cluster --------------------------------------------------- @@ -132,11 +226,19 @@ from swift.proxy.controllers.base import get_container_info from swift.common.http import ( is_success, is_client_error, HTTP_NOT_FOUND) from swift.common.swob import HTTPPreconditionFailed, HTTPServiceUnavailable, \ - HTTPServerError + HTTPServerError, HTTPBadRequest from swift.common.exceptions import ( ListingIterNotFound, ListingIterError) +VERSIONING_MODES = ('stack', 'history') +DELETE_MARKER_CONTENT_TYPE = 'application/x-deleted;swift_versions_deleted=1' +VERSIONS_LOC_CLIENT = 'x-versions-location' +VERSIONS_LOC_SYSMETA = get_sys_meta_prefix('container') + 'versions-location' +VERSIONS_MODE_CLIENT = 'x-versions-mode' +VERSIONS_MODE_SYSMETA = get_sys_meta_prefix('container') + 'versions-mode' + + class VersionedWritesContext(WSGIContext): def __init__(self, wsgi_app, logger): @@ -298,6 +400,48 @@ class VersionedWritesContext(WSGIContext): # could not version the data, bail raise HTTPServiceUnavailable(request=req) + def _build_versions_object_prefix(self, object_name): + return '%03x%s/' % ( + len(object_name), + object_name) + + def _build_versions_object_name(self, object_name, ts): + return ''.join(( + self._build_versions_object_prefix(object_name), + Timestamp(ts).internal)) + + def _copy_current(self, req, versions_cont, api_version, account_name, + object_name): + get_resp = self._get_source_object(req, req.path_info) + + if 'X-Object-Manifest' in get_resp.headers: + # do not version DLO manifest, proceed with original request + close_if_possible(get_resp.app_iter) + return + if get_resp.status_int == HTTP_NOT_FOUND: + # nothing to version, proceed with original request + close_if_possible(get_resp.app_iter) + return + + # check for any other errors + self._check_response_error(req, get_resp) + + # if there's an existing object, then copy it to + # X-Versions-Location + ts_source = get_resp.headers.get( + 'x-timestamp', + calendar.timegm(time.strptime( + get_resp.headers['last-modified'], + '%a, %d %b %Y %H:%M:%S GMT'))) + vers_obj_name = self._build_versions_object_name( + object_name, ts_source) + + put_path_info = "/%s/%s/%s/%s" % ( + api_version, account_name, versions_cont, vers_obj_name) + put_resp = self._put_versioned_obj(req, put_path_info, get_resp) + + self._check_response_error(req, put_resp) + def handle_obj_versions_put(self, req, versions_cont, api_version, account_name, object_name): """ @@ -315,41 +459,77 @@ class VersionedWritesContext(WSGIContext): # do not version DLO manifest, proceed with original request return self.app - get_resp = self._get_source_object(req, req.path_info) - - if 'X-Object-Manifest' in get_resp.headers: - # do not version DLO manifest, proceed with original request - close_if_possible(get_resp.app_iter) - return self.app - if get_resp.status_int == HTTP_NOT_FOUND: - # nothing to version, proceed with original request - close_if_possible(get_resp.app_iter) - return self.app - - # check for any other errors - self._check_response_error(req, get_resp) - - # if there's an existing object, then copy it to - # X-Versions-Location - prefix_len = '%03x' % len(object_name) - lprefix = prefix_len + object_name + '/' - ts_source = get_resp.headers.get( - 'x-timestamp', - calendar.timegm(time.strptime( - get_resp.headers['last-modified'], - '%a, %d %b %Y %H:%M:%S GMT'))) - vers_obj_name = lprefix + Timestamp(ts_source).internal - - put_path_info = "/%s/%s/%s/%s" % ( - api_version, account_name, versions_cont, vers_obj_name) - put_resp = self._put_versioned_obj(req, put_path_info, get_resp) - - self._check_response_error(req, put_resp) + self._copy_current(req, versions_cont, api_version, account_name, + object_name) return self.app - def handle_obj_versions_delete(self, req, versions_cont, api_version, - account_name, container_name, object_name): + def handle_obj_versions_delete_push(self, req, versions_cont, api_version, + account_name, container_name, + object_name): """ + Handle DELETE requests when in history mode. + + Copy current version of object to versions_container and write a + delete marker before proceding with original request. + + :param req: original request. + :param versions_cont: container where previous versions of the object + are stored. + :param api_version: api version. + :param account_name: account name. + :param object_name: name of object of original request + """ + self._copy_current(req, versions_cont, api_version, account_name, + object_name) + + marker_path = "/%s/%s/%s/%s" % ( + api_version, account_name, versions_cont, + self._build_versions_object_name(object_name, time.time())) + marker_headers = { + # Definitive source of truth is Content-Type, and since we add + # a swift_* param, we know users haven't set it themselves. + # This is still open to users POSTing to update the content-type + # but they're just shooting themselves in the foot then. + 'content-type': DELETE_MARKER_CONTENT_TYPE, + 'content-length': '0', + 'x-auth-token': req.headers.get('x-auth-token')} + marker_req = make_pre_authed_request( + req.environ, path=marker_path, + headers=marker_headers, method='PUT', swift_source='VW') + marker_req.environ['swift.content_type_overridden'] = True + marker_resp = marker_req.get_response(self.app) + self._check_response_error(req, marker_resp) + + # successfully copied and created delete marker; safe to delete + return self.app + + def _restore_data(self, req, versions_cont, api_version, account_name, + container_name, object_name, prev_obj_name): + get_path = "/%s/%s/%s/%s" % ( + api_version, account_name, versions_cont, prev_obj_name) + + get_resp = self._get_source_object(req, get_path) + + # if the version isn't there, keep trying with previous version + if get_resp.status_int == HTTP_NOT_FOUND: + return False + + self._check_response_error(req, get_resp) + + put_path_info = "/%s/%s/%s/%s" % ( + api_version, account_name, container_name, object_name) + put_resp = self._put_versioned_obj( + req, put_path_info, get_resp) + + self._check_response_error(req, put_resp) + return get_path + + def handle_obj_versions_delete_pop(self, req, versions_cont, api_version, + account_name, container_name, + object_name): + """ + Handle DELETE requests when in stack mode. + Delete current version of object and pop previous version in its place. :param req: original request. @@ -360,12 +540,11 @@ class VersionedWritesContext(WSGIContext): :param container_name: container name. :param object_name: object name. """ - prefix_len = '%03x' % len(object_name) - lprefix = prefix_len + object_name + '/' - - item_iter = self._listing_iter(account_name, versions_cont, lprefix, - req) + listing_prefix = self._build_versions_object_prefix(object_name) + item_iter = self._listing_iter(account_name, versions_cont, + listing_prefix, req) + auth_token_header = {'X-Auth-Token': req.headers.get('X-Auth-Token')} authed = False for previous_version in item_iter: if not authed: @@ -380,33 +559,66 @@ class VersionedWritesContext(WSGIContext): return aresp authed = True - # there are older versions so copy the previous version to the - # current object and delete the previous version - prev_obj_name = previous_version['name'].encode('utf-8') + if previous_version['content_type'] == DELETE_MARKER_CONTENT_TYPE: + # check whether we have data in the versioned container + obj_head_headers = {'X-Newest': 'True'} + obj_head_headers.update(auth_token_header) + head_req = make_pre_authed_request( + req.environ, path=req.path_info, method='HEAD', + headers=obj_head_headers, swift_source='VW') + hresp = head_req.get_response(self.app) - get_path = "/%s/%s/%s/%s" % ( - api_version, account_name, versions_cont, prev_obj_name) + if hresp.status_int != HTTP_NOT_FOUND: + self._check_response_error(req, hresp) + # if there's an existing object, then just let the delete + # through (i.e., restore to the delete-marker state): + break - get_resp = self._get_source_object(req, get_path) + # no data currently in the container (delete marker is current) + for version_to_restore in item_iter: + if version_to_restore['content_type'] == \ + DELETE_MARKER_CONTENT_TYPE: + # Nothing to restore + break + prev_obj_name = version_to_restore['name'].encode('utf-8') + restored_path = self._restore_data( + req, versions_cont, api_version, account_name, + container_name, object_name, prev_obj_name) + if not restored_path: + continue - # if the version isn't there, keep trying with previous version - if get_resp.status_int == HTTP_NOT_FOUND: - continue + old_del_req = make_pre_authed_request( + req.environ, path=restored_path, method='DELETE', + headers=auth_token_header, swift_source='VW') + del_resp = old_del_req.get_response(self.app) + if del_resp.status_int != HTTP_NOT_FOUND: + self._check_response_error(req, del_resp) + # else, well, it existed long enough to do the + # copy; we won't worry too much + break + marker_path = "/%s/%s/%s/%s" % ( + api_version, account_name, versions_cont, + previous_version['name'].encode('utf-8')) + # done restoring, redirect the delete to the marker + req = make_pre_authed_request( + req.environ, path=marker_path, method='DELETE', + headers=auth_token_header, swift_source='VW') + else: + # there are older versions so copy the previous version to the + # current object and delete the previous version + prev_obj_name = previous_version['name'].encode('utf-8') + restored_path = self._restore_data( + req, versions_cont, api_version, account_name, + container_name, object_name, prev_obj_name) + if not restored_path: + continue - self._check_response_error(req, get_resp) - - put_path_info = "/%s/%s/%s/%s" % ( - api_version, account_name, container_name, object_name) - put_resp = self._put_versioned_obj(req, put_path_info, get_resp) - - self._check_response_error(req, put_resp) - - # redirect the original DELETE to the source of the reinstated - # version object - we already auth'd original req so make a - # pre-authed request - req = make_pre_authed_request( - req.environ, path=get_path, method='DELETE', - swift_source='VW') + # redirect the original DELETE to the source of the reinstated + # version object - we already auth'd original req so make a + # pre-authed request + req = make_pre_authed_request( + req.environ, path=restored_path, method='DELETE', + headers=auth_token_header, swift_source='VW') # remove 'X-If-Delete-At', since it is not for the older copy if 'X-If-Delete-At' in req.headers: @@ -420,15 +632,19 @@ class VersionedWritesContext(WSGIContext): app_resp = self._app_call(env) if self._response_headers is None: self._response_headers = [] - sysmeta_version_hdr = get_sys_meta_prefix('container') + \ - 'versions-location' - location = '' + mode = location = '' for key, val in self._response_headers: - if key.lower() == sysmeta_version_hdr: + if key.lower() == VERSIONS_LOC_SYSMETA: location = val + elif key.lower() == VERSIONS_MODE_SYSMETA: + mode = val if location: - self._response_headers.extend([('X-Versions-Location', location)]) + self._response_headers.extend([ + (VERSIONS_LOC_CLIENT.title(), location)]) + if mode: + self._response_headers.extend([ + (VERSIONS_MODE_CLIENT.title(), mode)]) start_response(self._response_status, self._response_headers, @@ -444,12 +660,9 @@ class VersionedWritesMiddleware(object): self.logger = get_logger(conf, log_route='versioned_writes') def container_request(self, req, start_response, enabled): - sysmeta_version_hdr = get_sys_meta_prefix('container') + \ - 'versions-location' - # set version location header as sysmeta - if 'X-Versions-Location' in req.headers: - val = req.headers.get('X-Versions-Location') + if VERSIONS_LOC_CLIENT in req.headers: + val = req.headers.get(VERSIONS_LOC_CLIENT) if val: # differently from previous version, we are actually # returning an error if user tries to set versions location @@ -461,11 +674,11 @@ class VersionedWritesMiddleware(object): body='Versioned Writes is disabled') location = check_container_format(req, val) - req.headers[sysmeta_version_hdr] = location + req.headers[VERSIONS_LOC_SYSMETA] = location # reset original header to maintain sanity # now only sysmeta is source of Versions Location - req.headers['X-Versions-Location'] = '' + req.headers[VERSIONS_LOC_CLIENT] = '' # if both headers are in the same request # adding location takes precedence over removing @@ -478,10 +691,31 @@ class VersionedWritesMiddleware(object): # handle removing versions container val = req.headers.get('X-Remove-Versions-Location') if val: - req.headers.update({sysmeta_version_hdr: ''}) - req.headers.update({'X-Versions-Location': ''}) + req.headers.update({VERSIONS_LOC_SYSMETA: '', + VERSIONS_LOC_CLIENT: ''}) del req.headers['X-Remove-Versions-Location'] + # handle versioning mode + if VERSIONS_MODE_CLIENT in req.headers: + val = req.headers.pop(VERSIONS_MODE_CLIENT) + if val: + if not config_true_value(enabled) and \ + req.method in ('PUT', 'POST'): + raise HTTPPreconditionFailed( + request=req, content_type='text/plain', + body='Versioned Writes is disabled') + if val not in VERSIONING_MODES: + raise HTTPBadRequest( + request=req, content_type='text/plain', + body='X-Versions-Mode must be one of %s' % ', '.join( + VERSIONING_MODES)) + req.headers[VERSIONS_MODE_SYSMETA] = val + else: + req.headers['X-Remove-Versions-Mode'] = 'x' + + if req.headers.pop('X-Remove-Versions-Mode', None): + req.headers.update({VERSIONS_MODE_SYSMETA: ''}) + # send request and translate sysmeta headers from response vw_ctx = VersionedWritesContext(self.app, self.logger) return vw_ctx.handle_container_request(req.environ, start_response) @@ -503,6 +737,8 @@ class VersionedWritesMiddleware(object): # for backwards compatibility feature is enabled. versions_cont = container_info.get( 'sysmeta', {}).get('versions-location') + versioning_mode = container_info.get( + 'sysmeta', {}).get('versions-mode', 'stack') if not versions_cont: versions_cont = container_info.get('versions') # if allow_versioned_writes is not set in the configuration files @@ -518,8 +754,13 @@ class VersionedWritesMiddleware(object): resp = vw_ctx.handle_obj_versions_put( req, versions_cont, api_version, account_name, object_name) - else: # handle DELETE - resp = vw_ctx.handle_obj_versions_delete( + # handle DELETE + elif versioning_mode == 'history': + resp = vw_ctx.handle_obj_versions_delete_push( + req, versions_cont, api_version, account_name, + container_name, object_name) + else: + resp = vw_ctx.handle_obj_versions_delete_pop( req, versions_cont, api_version, account_name, container_name, object_name) @@ -573,7 +814,8 @@ def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) if config_true_value(conf.get('allow_versioned_writes')): - register_swift_info('versioned_writes') + register_swift_info('versioned_writes', + allowed_versions_mode=VERSIONING_MODES) def obj_versions_filter(app): return VersionedWritesMiddleware(app, conf) diff --git a/test/unit/common/middleware/helpers.py b/test/unit/common/middleware/helpers.py index 1e31362f0d..fa8e675c09 100644 --- a/test/unit/common/middleware/helpers.py +++ b/test/unit/common/middleware/helpers.py @@ -15,7 +15,7 @@ # This stuff can't live in test/unit/__init__.py due to its swob dependency. -from collections import defaultdict +from collections import defaultdict, namedtuple from hashlib import md5 from swift.common import swob from swift.common.header_key_dict import HeaderKeyDict @@ -41,6 +41,9 @@ class LeakTrackingIter(object): self.fake_swift.mark_closed(self.path) +FakeSwiftCall = namedtuple('FakeSwiftCall', ['method', 'path', 'headers']) + + class FakeSwift(object): """ A good-enough fake Swift proxy server to use in testing middleware. @@ -148,7 +151,8 @@ class FakeSwift(object): # note: tests may assume this copy of req_headers is case insensitive # so we deliberately use a HeaderKeyDict - self._calls.append((method, path, HeaderKeyDict(req.headers))) + self._calls.append( + FakeSwiftCall(method, path, HeaderKeyDict(req.headers))) # range requests ought to work, hence conditional_response=True if isinstance(body, list): diff --git a/test/unit/common/middleware/test_versioned_writes.py b/test/unit/common/middleware/test_versioned_writes.py index 27b8914555..4d7d0552b3 100644 --- a/test/unit/common/middleware/test_versioned_writes.py +++ b/test/unit/common/middleware/test_versioned_writes.py @@ -17,8 +17,9 @@ import functools import json import os import time +import mock import unittest -from swift.common import swob +from swift.common import swob, utils from swift.common.middleware import versioned_writes, copy from swift.common.swob import Request from test.unit.common.middleware.helpers import FakeSwift @@ -121,7 +122,31 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): method, path, req_headers = calls[0] self.assertEqual('PUT', method) self.assertEqual('/v1/a/c', path) - self.assertTrue('x-container-sysmeta-versions-location' in req_headers) + self.assertIn('x-container-sysmeta-versions-location', req_headers) + self.assertNotIn('x-container-sysmeta-versions-mode', req_headers) + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_put_container_history(self): + self.app.register('PUT', '/v1/a/c', swob.HTTPOk, {}, 'passed') + req = Request.blank('/v1/a/c', + headers={'X-Versions-Location': 'ver_cont', + 'X-Versions-Mode': 'history'}, + environ={'REQUEST_METHOD': 'PUT'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + + # check for sysmeta header + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('PUT', method) + self.assertEqual('/v1/a/c', path) + self.assertIn('x-container-sysmeta-versions-location', req_headers) + self.assertEqual('ver_cont', + req_headers['x-container-sysmeta-versions-location']) + self.assertIn('x-container-sysmeta-versions-mode', req_headers) + self.assertEqual('history', + req_headers['x-container-sysmeta-versions-mode']) self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) @@ -160,10 +185,10 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): method, path, req_headers = calls[0] self.assertEqual('POST', method) self.assertEqual('/v1/a/c', path) - self.assertTrue('x-container-sysmeta-versions-location' in req_headers) + self.assertIn('x-container-sysmeta-versions-location', req_headers) self.assertEqual('', req_headers['x-container-sysmeta-versions-location']) - self.assertTrue('x-versions-location' in req_headers) + self.assertIn('x-versions-location', req_headers) self.assertEqual('', req_headers['x-versions-location']) self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) @@ -181,14 +206,84 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): method, path, req_headers = calls[0] self.assertEqual('POST', method) self.assertEqual('/v1/a/c', path) - self.assertTrue('x-container-sysmeta-versions-location' in req_headers) + self.assertIn('x-container-sysmeta-versions-location', req_headers) self.assertEqual('', req_headers['x-container-sysmeta-versions-location']) - self.assertTrue('x-versions-location' in req_headers) + self.assertIn('x-versions-location', req_headers) self.assertEqual('', req_headers['x-versions-location']) self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) + def test_post_versions_mode(self): + self.app.register('POST', '/v1/a/c', swob.HTTPOk, {}, 'passed') + req = Request.blank('/v1/a/c', + headers={'X-Versions-Mode': 'stack'}, + environ={'REQUEST_METHOD': 'POST'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + + # check for sysmeta header + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('POST', method) + self.assertEqual('/v1/a/c', path) + self.assertIn('x-container-sysmeta-versions-mode', req_headers) + self.assertEqual('stack', + req_headers['x-container-sysmeta-versions-mode']) + self.assertNotIn('x-versions-mode', req_headers) + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_remove_versions_mode(self): + self.app.register('POST', '/v1/a/c', swob.HTTPOk, {}, 'passed') + req = Request.blank('/v1/a/c', + headers={'X-Remove-Versions-Mode': 'x'}, + environ={'REQUEST_METHOD': 'POST'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + + # check for sysmeta header + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('POST', method) + self.assertEqual('/v1/a/c', path) + self.assertIn('x-container-sysmeta-versions-mode', req_headers) + self.assertEqual('', + req_headers['x-container-sysmeta-versions-mode']) + self.assertNotIn('x-versions-mode', req_headers) + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_empty_versions_mode(self): + self.app.register('POST', '/v1/a/c', swob.HTTPOk, {}, 'passed') + req = Request.blank('/v1/a/c', + headers={'X-Versions-Mode': ''}, + environ={'REQUEST_METHOD': 'POST'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + + # check for sysmeta header + calls = self.app.calls_with_headers + method, path, req_headers = calls[0] + self.assertEqual('POST', method) + self.assertEqual('/v1/a/c', path) + self.assertIn('x-container-sysmeta-versions-mode', req_headers) + self.assertEqual('', + req_headers['x-container-sysmeta-versions-mode']) + self.assertNotIn('x-versions-mode', req_headers) + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_bad_versions_mode(self): + self.app.register('POST', '/v1/a/c', swob.HTTPOk, {}, 'passed') + req = Request.blank('/v1/a/c', + headers={'X-Versions-Mode': 'foo'}, + environ={'REQUEST_METHOD': 'POST'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '400 Bad Request') + self.assertEqual(len(self.authorized), 0) + self.assertEqual('X-Versions-Mode must be one of stack, history', body) + def test_remove_add_versions_precedence(self): self.app.register( 'POST', '/v1/a/c', swob.HTTPOk, @@ -201,28 +296,45 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): status, headers, body = self.call_vw(req) self.assertEqual(status, '200 OK') - self.assertTrue(('X-Versions-Location', 'ver_cont') in headers) + self.assertIn(('X-Versions-Location', 'ver_cont'), headers) # check for sysmeta header calls = self.app.calls_with_headers method, path, req_headers = calls[0] self.assertEqual('POST', method) self.assertEqual('/v1/a/c', path) - self.assertTrue('x-container-sysmeta-versions-location' in req_headers) - self.assertTrue('x-remove-versions-location' not in req_headers) + self.assertIn('x-container-sysmeta-versions-location', req_headers) + self.assertNotIn('x-remove-versions-location', req_headers) self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) def test_get_container(self): self.app.register( 'GET', '/v1/a/c', swob.HTTPOk, - {'x-container-sysmeta-versions-location': 'ver_cont'}, None) + {'x-container-sysmeta-versions-location': 'ver_cont', + 'x-container-sysmeta-versions-mode': 'stack'}, None) req = Request.blank( '/v1/a/c', environ={'REQUEST_METHOD': 'GET'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '200 OK') - self.assertTrue(('X-Versions-Location', 'ver_cont') in headers) + self.assertIn(('X-Versions-Location', 'ver_cont'), headers) + self.assertIn(('X-Versions-Mode', 'stack'), headers) + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + def test_head_container(self): + self.app.register( + 'HEAD', '/v1/a/c', swob.HTTPOk, + {'x-container-sysmeta-versions-location': 'other_ver_cont', + 'x-container-sysmeta-versions-mode': 'history'}, None) + req = Request.blank( + '/v1/a/c', + environ={'REQUEST_METHOD': 'HEAD'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + self.assertIn(('X-Versions-Location', 'other_ver_cont'), headers) + self.assertIn(('X-Versions-Mode', 'history'), headers) self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) @@ -311,7 +423,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual(len(self.authorized), 1) self.assertRequestEqual(req, self.authorized[0]) called_method = [method for (method, path, hdrs) in self.app._calls] - self.assertTrue('GET' not in called_method) + self.assertNotIn('GET', called_method) def test_put_request_is_dlo_manifest_with_container_config_true(self): # set x-object-manifest on request and expect no versioning occurred @@ -364,8 +476,8 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertRequestEqual(req, self.authorized[0]) called_method = \ [method for (method, path, rheaders) in self.app._calls] - self.assertTrue('PUT' not in called_method) - self.assertTrue('GET' not in called_method) + self.assertNotIn('PUT', called_method) + self.assertNotIn('GET', called_method) self.assertEqual(1, self.app.call_count) def test_new_version_success(self): @@ -474,7 +586,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): self.assertEqual('PUT', method) self.assertEqual('/v1/a/ver_cont/001o/0000000000.00000', path) - def test_delete_first_object_success(self): + def test_delete_no_versions_container_success(self): self.app.register( 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') self.app.register( @@ -501,7 +613,31 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): ('DELETE', '/v1/a/c/o'), ]) - def test_delete_latest_version_success(self): + def test_delete_first_object_success(self): + self.app.register( + 'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register( + 'GET', + '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + swob.HTTPOk, {}, '[]') + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('DELETE', '/v1/a/c/o'), + ]) + + def test_delete_latest_version_no_marker_success(self): self.app.register( 'GET', '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', @@ -551,6 +687,235 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): ('DELETE', '/v1/a/ver_cont/001o/2'), ]) + def test_delete_latest_version_restores_marker_success(self): + self.app.register( + 'GET', + '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + swob.HTTPOk, {}, + '[{"hash": "x", ' + '"last_modified": "2014-11-21T14:23:02.206740", ' + '"bytes": 3, ' + '"name": "001o/2", ' + '"content_type": "application/x-deleted;swift_versions_deleted=1"' + '}, {"hash": "y", ' + '"last_modified": "2014-11-21T14:14:27.409100", ' + '"bytes": 3, ' + '"name": "001o/1", ' + '"content_type": "text/plain"' + '}]') + self.app.register( + 'HEAD', '/v1/a/c/o', swob.HTTPOk, {}, 'passed') + self.app.register( + 'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {}) + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + headers={'X-If-Delete-At': 1}, + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '204 No Content') + self.assertEqual(len(self.authorized), 2) + self.assertRequestEqual(req, self.authorized[0]) + self.assertRequestEqual(req, self.authorized[1]) + + calls = self.app.calls_with_headers + self.assertEqual(['GET', 'HEAD', 'DELETE'], + [c.method for c in calls]) + + self.assertIn('X-Newest', calls[1].headers) + self.assertEqual('True', calls[1].headers['X-Newest']) + + method, path, req_headers = calls.pop() + self.assertTrue(path.startswith('/v1/a/c/o')) + # Since we're deleting the original, this *should* still be present: + self.assertEqual('1', req_headers.get('X-If-Delete-At')) + + def test_delete_latest_version_is_marker_success(self): + # Test popping a delete marker off the stack. So, there's data in the + # versions container, topped by a delete marker, and there's nothing + # in the base versioned container. + self.app.register( + 'GET', + '/v1/a/ver_cont?format=json&prefix=001o/&marker=&reverse=on', + swob.HTTPOk, {}, + '[{"hash": "y", ' + '"last_modified": "2014-11-21T14:23:02.206740", ' + '"bytes": 3, ' + '"name": "001o/2", ' + '"content_type": "application/x-deleted;swift_versions_deleted=1"' + '},{"hash": "x", ' + '"last_modified": "2014-11-21T14:14:27.409100", ' + '"bytes": 3, ' + '"name": "001o/1", ' + '"content_type": "text/plain"' + '}]') + self.app.register( + 'HEAD', '/v1/a/c/o', swob.HTTPNotFound, {}, 'passed') + self.app.register( + 'GET', '/v1/a/ver_cont/001o/1', swob.HTTPOk, {}, 'passed') + self.app.register( + 'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None) + self.app.register( + 'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk, {}, 'passed') + self.app.register( + 'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk, {}, 'passed') + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + headers={'X-If-Delete-At': 1}, + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + prefix_listing_prefix = '/v1/a/ver_cont?format=json&prefix=001o/&' + self.assertEqual(self.app.calls, [ + ('GET', prefix_listing_prefix + 'marker=&reverse=on'), + ('HEAD', '/v1/a/c/o'), + ('GET', '/v1/a/ver_cont/001o/1'), + ('PUT', '/v1/a/c/o'), + ('DELETE', '/v1/a/ver_cont/001o/1'), + ('DELETE', '/v1/a/ver_cont/001o/2'), + ]) + self.assertIn('X-Newest', self.app.headers[1]) + self.assertEqual('True', self.app.headers[1]['X-Newest']) + self.assertIn('X-Newest', self.app.headers[2]) + self.assertEqual('True', self.app.headers[2]['X-Newest']) + + # check that X-If-Delete-At was removed from DELETE request + for req_headers in self.app.headers[-2:]: + self.assertNotIn('x-if-delete-at', + [h.lower() for h in req_headers]) + + def test_delete_latest_version_doubled_up_markers_success(self): + self.app.register( + 'GET', '/v1/a/ver_cont?format=json&prefix=001o/' + '&marker=&reverse=on', + swob.HTTPOk, {}, + '[{"hash": "x", ' + '"last_modified": "2014-11-21T14:23:02.206740", ' + '"bytes": 3, ' + '"name": "001o/3", ' + '"content_type": "application/x-deleted;swift_versions_deleted=1"' + '}, {"hash": "y", ' + '"last_modified": "2014-11-21T14:14:27.409100", ' + '"bytes": 3, ' + '"name": "001o/2", ' + '"content_type": "application/x-deleted;swift_versions_deleted=1"' + '}, {"hash": "y", ' + '"last_modified": "2014-11-20T14:23:02.206740", ' + '"bytes": 30, ' + '"name": "001o/1", ' + '"content_type": "text/plain"' + '}]') + self.app.register( + 'HEAD', '/v1/a/c/o', swob.HTTPNotFound, {}, 'passed') + self.app.register( + 'DELETE', '/v1/a/ver_cont/001o/3', swob.HTTPOk, {}, 'passed') + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + headers={'X-If-Delete-At': 1}, + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '200 OK') + self.assertEqual(len(self.authorized), 1) + self.assertRequestEqual(req, self.authorized[0]) + + # check that X-If-Delete-At was removed from DELETE request + calls = self.app.calls_with_headers + self.assertEqual(['GET', 'HEAD', 'DELETE'], + [c.method for c in calls]) + + method, path, req_headers = calls.pop() + self.assertTrue(path.startswith('/v1/a/ver_cont/001o/3')) + self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers]) + + def test_post_bad_mode(self): + req = Request.blank( + '/v1/a/c', + environ={'REQUEST_METHOD': 'POST', + 'CONTENT_LENGTH': '0', + 'HTTP_X_VERSIONS_MODE': 'bad-mode'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '400 Bad Request') + self.assertEqual('X-Versions-Mode must be one of stack, history', body) + self.assertFalse(self.app.calls_with_headers) + + @mock.patch('swift.common.middleware.versioned_writes.time.time', + return_value=1234) + def test_history_delete_marker_no_object_success(self, mock_time): + self.app.register( + 'GET', '/v1/a/c/o', swob.HTTPNotFound, + {}, 'passed') + self.app.register( + 'PUT', '/v1/a/ver_cont/001o/0000001234.00000', swob.HTTPCreated, + {}, 'passed') + self.app.register( + 'DELETE', '/v1/a/c/o', swob.HTTPNotFound, {}, None) + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont', + 'versions-mode': 'history'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '404 Not Found') + self.assertEqual(len(self.authorized), 1) + + req.environ['REQUEST_METHOD'] = 'PUT' + self.assertRequestEqual(req, self.authorized[0]) + + calls = self.app.calls_with_headers + self.assertEqual(['GET', 'PUT', 'DELETE'], [c.method for c in calls]) + self.assertEqual('application/x-deleted;swift_versions_deleted=1', + calls[1].headers.get('Content-Type')) + + @mock.patch('swift.common.middleware.versioned_writes.time.time', + return_value=123456789.54321) + def test_history_delete_marker_over_object_success(self, mock_time): + self.app.register( + 'GET', '/v1/a/c/o', swob.HTTPOk, + {'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed') + self.app.register( + 'PUT', '/v1/a/ver_cont/001o/1416421142.00000', swob.HTTPCreated, + {}, 'passed') + self.app.register( + 'PUT', '/v1/a/ver_cont/001o/0123456789.54321', swob.HTTPCreated, + {}, 'passed') + self.app.register( + 'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {}, None) + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont', + 'versions-mode': 'history'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache, + 'CONTENT_LENGTH': '0'}) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '204 No Content') + self.assertEqual('', body) + self.assertEqual(len(self.authorized), 1) + + req.environ['REQUEST_METHOD'] = 'PUT' + self.assertRequestEqual(req, self.authorized[0]) + + calls = self.app.calls_with_headers + self.assertEqual(['GET', 'PUT', 'PUT', 'DELETE'], + [c.method for c in calls]) + self.assertEqual('/v1/a/ver_cont/001o/1416421142.00000', + calls[1].path) + self.assertEqual('application/x-deleted;swift_versions_deleted=1', + calls[2].headers.get('Content-Type')) + def test_delete_single_version_success(self): # check that if the first listing page has just a single item then # it is not erroneously inferred to be a non-reversed listing @@ -1098,3 +1463,28 @@ class VersionedWritesCopyingTestCase(VersionedWritesBaseTestCase): self.assertEqual('PUT', self.authorized[1].method) self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path) self.assertEqual(2, self.app.call_count) + + +class TestSwiftInfo(unittest.TestCase): + def setUp(self): + utils._swift_info = {} + utils._swift_admin_info = {} + + def test_registered_defaults(self): + versioned_writes.filter_factory({})('have to pass in an app') + swift_info = utils.get_swift_info() + # in default, versioned_writes is not in swift_info + self.assertNotIn('versioned_writes', swift_info) + + def test_registered_explicitly_set(self): + versioned_writes.filter_factory( + {'allow_versioned_writes': 'true'})('have to pass in an app') + swift_info = utils.get_swift_info() + self.assertIn('versioned_writes', swift_info) + self.assertEqual( + swift_info['versioned_writes'].get('allowed_versions_mode'), + ('stack', 'history')) + + +if __name__ == '__main__': + unittest.main() From 65b1820407ea40bd7d65a5356a58a689befe3cb5 Mon Sep 17 00:00:00 2001 From: Charles Hsu Date: Thu, 11 Aug 2016 00:53:13 +0800 Subject: [PATCH 137/156] Ignore auditor status files to prevent replicator reports errors Ignore `auditor_status_*.json` files during the collecting jobs and replicator won't use these wrong paths to find objects that causes an exception to increase failure count in replicator report. Co-Authored-By: Clay Gerrard Co-Authored-By: Mark Kirkwood Change-Id: Ib15a0987288d9ee32432c1998aefe638ca3b223b Closes-Bug: #1583305 --- swift/obj/replicator.py | 9 ++++++-- test/unit/obj/test_replicator.py | 37 ++++++++++++++++++++++++++++++-- 2 files changed, 42 insertions(+), 4 deletions(-) diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index d5b3580a54..114730a8ca 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -357,12 +357,12 @@ class ObjectReplicator(Daemon): handoff_partition_deleted = True except (Exception, Timeout): self.logger.exception(_("Error syncing handoff partition")) + self._add_failure_stats(failure_devs_info) finally: target_devs_info = set([(target_dev['replication_ip'], target_dev['device']) for target_dev in job['nodes']]) self.stats['success'] += len(target_devs_info - failure_devs_info) - self._add_failure_stats(failure_devs_info) if not handoff_partition_deleted: self.handoffs_remaining += 1 self.partition_times.append(time.time() - begin) @@ -491,10 +491,10 @@ class ObjectReplicator(Daemon): self.suffix_count += len(local_hash) except (Exception, Timeout): failure_devs_info.update(target_devs_info) + self._add_failure_stats(failure_devs_info) self.logger.exception(_("Error syncing partition")) finally: self.stats['success'] += len(target_devs_info - failure_devs_info) - self._add_failure_stats(failure_devs_info) self.partition_times.append(time.time() - begin) self.logger.timing_since('partition.update.timing', begin) @@ -613,6 +613,11 @@ class ObjectReplicator(Daemon): and partition not in override_partitions): continue + if (partition.startswith('auditor_status_') and + partition.endswith('.json')): + # ignore auditor status files + continue + part_nodes = None try: job_path = join(obj_path, partition) diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py index d069857c0f..e16056079e 100644 --- a/test/unit/obj/test_replicator.py +++ b/test/unit/obj/test_replicator.py @@ -235,7 +235,7 @@ class TestObjectReplicator(unittest.TestCase): config, )) - def _write_disk_data(self, disk_name): + def _write_disk_data(self, disk_name, with_json=False): os.mkdir(os.path.join(self.devices, disk_name)) objects = os.path.join(self.devices, disk_name, diskfile.get_data_dir(POLICIES[0])) @@ -251,6 +251,13 @@ class TestObjectReplicator(unittest.TestCase): parts_1[part] = os.path.join(objects_1, part) os.mkdir(parts_1[part]) + if with_json: + for json_file in ['auditor_status_ZBF.json', + 'auditor_status_ALL.json']: + for obj_dir in [objects, objects_1]: + with open(os.path.join(obj_dir, json_file), 'w'): + pass + return objects, objects_1, parts, parts_1 def _create_replicator(self): @@ -418,6 +425,32 @@ class TestObjectReplicator(unittest.TestCase): self.assertEqual(jobs_by_pol_part[part]['path'], os.path.join(self.objects_1, part[1:])) + def test_collect_jobs_failure_report_with_auditor_stats_json(self): + devs = [ + {'id': 0, 'device': 'sda', 'zone': 0, + 'region': 1, 'ip': '1.1.1.1', 'port': 1111, + 'replication_ip': '127.0.0.0', 'replication_port': 6200}, + {'id': 1, 'device': 'sdb', 'zone': 1, + 'region': 1, 'ip': '1.1.1.1', 'port': 1111, + 'replication_ip': '127.0.0.0', 'replication_port': 6200}, + {'id': 2, 'device': 'sdc', 'zone': 2, + 'region': 1, 'ip': '1.1.1.1', 'port': 1111, + 'replication_ip': '127.0.0.1', 'replication_port': 6200}, + {'id': 3, 'device': 'sdd', 'zone': 3, + 'region': 1, 'ip': '1.1.1.1', 'port': 1111, + 'replication_ip': '127.0.0.1', 'replication_port': 6200}, + ] + objects_sdb, objects_1_sdb, _, _ = \ + self._write_disk_data('sdb', with_json=True) + objects_sdc, objects_1_sdc, _, _ = \ + self._write_disk_data('sdc', with_json=True) + objects_sdd, objects_1_sdd, _, _ = \ + self._write_disk_data('sdd', with_json=True) + _create_test_rings(self.testdir, devs) + + self.replicator.collect_jobs() + self.assertEqual(self.replicator.stats['failure'], 0) + @mock.patch('swift.obj.replicator.random.shuffle', side_effect=lambda l: l) def test_collect_jobs_multi_disk(self, mock_shuffle): devs = [ @@ -1599,7 +1632,7 @@ class TestObjectReplicator(unittest.TestCase): return 2, {'abc': 'def'} def fake_exc(tester, *args, **kwargs): - if 'Error syncing partition' in args[0]: + if 'Error syncing partition timeout' in args[0]: tester.i_failed = True self.i_failed = False From cc2b2cf9c8a5cb913817e2f900c676809cd2e027 Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Tue, 16 Aug 2016 16:21:59 +0100 Subject: [PATCH 138/156] Improve doc for using container-sync with large objects Clarify that synced segment container names must be the same when syncing large objects. Also add multipart-menifest query string option to API ref for object GETs. Change-Id: Ib2d2a1e6c1e5eff215fc75c2b49e7d6758b17b7e Partial-Bug: #1613681 Closes-Bug: #1613316 --- api-ref/source/parameters.yaml | 11 ++++++++++- api-ref/source/storage-object-services.inc | 3 ++- doc/source/overview_container_sync.rst | 10 +++++++--- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml index 9213ada101..974ad4e67d 100644 --- a/api-ref/source/parameters.yaml +++ b/api-ref/source/parameters.yaml @@ -840,7 +840,7 @@ multipart-manifest_1: in: query required: false type: string -multipart-manifest_2: +multipart-manifest_get: description: | If you include the ``multipart-manifest=get`` query parameter and the object is a large object, the object @@ -850,6 +850,15 @@ multipart-manifest_2: in: query required: false type: string +multipart-manifest_head: + description: | + If you include the ``multipart-manifest=get`` query parameter and the + object is a large object, the object metadata is not returned. Instead, the + response headers will include the manifest metadata and for dynamic large + objects the ``X-Object-Manifest`` response header. + in: query + required: false + type: string path: description: | For a string value, returns the object names that diff --git a/api-ref/source/storage-object-services.inc b/api-ref/source/storage-object-services.inc index 56d861158d..d9ce9b3b81 100644 --- a/api-ref/source/storage-object-services.inc +++ b/api-ref/source/storage-object-services.inc @@ -103,7 +103,7 @@ Request - temp_url_sig: temp_url_sig - temp_url_expires: temp_url_expires - filename: filename - - multipart-manifest: multipart-manifest + - multipart-manifest: multipart-manifest_get - Range: Range - If-Match: If-Match - If-None-Match: If-None-Match @@ -524,6 +524,7 @@ Request - temp_url_sig: temp_url_sig - temp_url_expires: temp_url_expires - filename: filename + - multipart-manifest: multipart-manifest_head - X-Newest: X-Newest - X-Trans-Id-Extra: X-Trans-Id-Extra diff --git a/doc/source/overview_container_sync.rst b/doc/source/overview_container_sync.rst index 9947fc3d10..0fb24f50d5 100644 --- a/doc/source/overview_container_sync.rst +++ b/doc/source/overview_container_sync.rst @@ -14,9 +14,13 @@ synchronization key. .. note:: - If you are using the large objects feature you will need to ensure both - your manifest file and your segment files are synced if they happen to be - in different containers. + If you are using the large objects feature and syncing to another cluster + then you will need to ensure that manifest files and segment files are + synced. If segment files are in a different container than their manifest + then both the manifest's container and the segments' container must be + synced. The target container for synced segment files must always have the + same name as their source container in order for them to be resolved by + synced manifests. .. note:: From 30b97f2367ed078a7fe278c79518cd6d8c1162b0 Mon Sep 17 00:00:00 2001 From: Tim Burke Date: Tue, 19 Apr 2016 09:41:50 -0700 Subject: [PATCH 139/156] Drop X-Auth-Token from all versioned_writes subrequests It is not necessary for versioned_writes to function (all of these were pre-authed requests anyway), and transaction ID should be used to trace requests instead. Change-Id: If55c1586aa38f9a3bc9e1d00768ca00201af94cd --- swift/common/middleware/versioned_writes.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/swift/common/middleware/versioned_writes.py b/swift/common/middleware/versioned_writes.py index ae091cff20..2010d29d6e 100644 --- a/swift/common/middleware/versioned_writes.py +++ b/swift/common/middleware/versioned_writes.py @@ -256,16 +256,12 @@ class VersionedWritesContext(WSGIContext): yield sublisting def _get_source_object(self, req, path_info): - # make a GET request to check object versions - _headers = {'X-Newest': 'True', - 'x-auth-token': req.headers.get('x-auth-token')} - # make a pre_auth request in case the user has write access # to container, but not READ. This was allowed in previous version # (i.e., before middleware) so keeping the same behavior here get_req = make_pre_authed_request( req.environ, path=path_info, - headers=_headers, method='GET', swift_source='VW') + headers={'X-Newest': 'True'}, method='GET', swift_source='VW') source_resp = get_req.get_response(self.app) if source_resp.content_length is None or \ @@ -282,7 +278,6 @@ class VersionedWritesContext(WSGIContext): swift_source='VW') copy_header_subset(source_resp, put_req, lambda k: k.lower() != 'x-timestamp') - put_req.headers['x-auth-token'] = req.headers.get('x-auth-token') put_req.environ['wsgi.input'] = FileLikeIter(source_resp.app_iter) return put_req.get_response(self.app) From 95a5a4a7ec1ec8a48db756121e7b3440f5704536 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Mon, 15 Aug 2016 18:26:00 -0700 Subject: [PATCH 140/156] Don't run probe tests if resetswift failed Probe test is cleaning up the swift environment for each test in setUp method. However, probe tests will run even if we cannot use the resetswift script for some reasons (e.g. not permitted, the script not found) and probably the probe tests will fail after a long time passed for the execution. To prevent such an unfortunate situation and also to find the reason easily, this patch adds the exit code check for "resetswift" and if it failed, the test will raise AssertionError with the stdout and stderr to make it easy to find the reason. Closes-Bug: #1613494 Change-Id: Id80d56ab6b71402ead4fe22c120064d78c1e74ac --- test/probe/common.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/probe/common.py b/test/probe/common.py index 5bb4b5d546..cf1b6936ce 100644 --- a/test/probe/common.py +++ b/test/probe/common.py @@ -266,6 +266,11 @@ def get_policy(**kwargs): def resetswift(): p = Popen("resetswift 2>&1", shell=True, stdout=PIPE) stdout, _stderr = p.communicate() + if p.returncode: + raise AssertionError( + 'Cleanup with "resetswift" failed: stdout: %s, stderr: %s' + % (stdout, _stderr)) + print(stdout) Manager(['all']).stop() From dfa5523d8c729dd44f79015d8ad3d0260e7f86c3 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Mon, 11 Apr 2016 20:51:00 -0700 Subject: [PATCH 141/156] Add Pros/Cons docs for global cluster consideration This comes from discussion in Bristol Hackathon (Feb 2016). Currently Swift has a couple of choices (Global Cluster and Container Sync) to sync the stored data into geographically distributed locations. This patch adds the summary of the discussion comparing between Global Cluster and Container Sync to enable operators to know which functionality fits their own use case. And, to be fairness with container-sync, this patch moves global cluster docs into overview_global_cluster.rst from admin_guide.rst. Co-Authored-By: Alistair Coles Change-Id: I624eb519503ae71dbc82245c33dab6e8637d0f8b --- doc/source/admin_guide.rst | 170 ++++++++----------------- doc/source/index.rst | 1 + doc/source/overview_global_cluster.rst | 133 +++++++++++++++++++ 3 files changed, 185 insertions(+), 119 deletions(-) create mode 100644 doc/source/overview_global_cluster.rst diff --git a/doc/source/admin_guide.rst b/doc/source/admin_guide.rst index 91ee2d00c3..e5ea68f9fb 100644 --- a/doc/source/admin_guide.rst +++ b/doc/source/admin_guide.rst @@ -496,133 +496,65 @@ When you specify a policy the containers created also include the policy index, thus even when running a container_only report, you will need to specify the policy not using the default. ------------------------------------ -Geographically Distributed Clusters ------------------------------------ +----------------------------------------------- +Geographically Distributed Swift Considerations +----------------------------------------------- -Swift's default configuration is currently designed to work in a -single region, where a region is defined as a group of machines with -high-bandwidth, low-latency links between them. However, configuration -options exist that make running a performant multi-region Swift -cluster possible. +Swift provides two features that may be used to distribute replicas of objects +across multiple geographically distributed data-centers: with +:doc:`overview_global_cluster` object replicas may be dispersed across devices +from different data-centers by using `regions` in ring device descriptors; with +:doc:`overview_container_sync` objects may be copied between independent Swift +clusters in each data-center. The operation and configuration of each are +described in their respective documentation. The following points should be +considered when selecting the feature that is most appropriate for a particular +use case: -For the rest of this section, we will assume a two-region Swift -cluster: region 1 in San Francisco (SF), and region 2 in New York -(NY). Each region shall contain within it 3 zones, numbered 1, 2, and -3, for a total of 6 zones. + #. Global Clusters allows the distribution of object replicas across + data-centers to be controlled by the cluster operator on per-policy basis, + since the distribution is determined by the assignment of devices from + each data-center in each policy's ring file. With Container Sync the end + user controls the distribution of objects across clusters on a + per-container basis. -~~~~~~~~~~~~~ -read_affinity -~~~~~~~~~~~~~ + #. Global Clusters requires an operator to coordinate ring deployments across + multiple data-centers. Container Sync allows for independent management of + separate Swift clusters in each data-center, and for existing Swift + clusters to be used as peers in Container Sync relationships without + deploying new policies/rings. -This setting, combined with sorting_method setting, makes the proxy server prefer local backend servers for -GET and HEAD requests over non-local ones. For example, it is -preferable for an SF proxy server to service object GET requests -by talking to SF object servers, as the client will receive lower -latency and higher throughput. + #. Global Clusters seamlessly supports features that may rely on + cross-container operations such as large objects and versioned writes. + Container Sync requires the end user to ensure that all required + containers are sync'd for these features to work in all data-centers. -By default, Swift randomly chooses one of the three replicas to give -to the client, thereby spreading the load evenly. In the case of a -geographically-distributed cluster, the administrator is likely to -prioritize keeping traffic local over even distribution of results. -This is where the read_affinity setting comes in. + #. Global Clusters makes objects available for GET or HEAD requests in both + data-centers even if a replica of the object has not yet been + asynchronously migrated between data-centers, by forwarding requests + between data-centers. Container Sync is unable to serve requests for an + object in a particular data-center until the asynchronous sync process has + copied the object to that data-center. -Example:: + #. Global Clusters may require less storage capacity than Container Sync to + achieve equivalent durability of objects in each data-center. Global + Clusters can restore replicas that are lost or corrupted in one + data-center using replicas from other data-centers. Container Sync + requires each data-center to independently manage the durability of + objects, which may result in each data-center storing more replicas than + with Global Clusters. - [app:proxy-server] - sorting_method = affinity - read_affinity = r1=100 - -This will make the proxy attempt to service GET and HEAD requests from -backends in region 1 before contacting any backends in region 2. -However, if no region 1 backends are available (due to replica -placement, failed hardware, or other reasons), then the proxy will -fall back to backend servers in other regions. - -Example:: - - [app:proxy-server] - sorting_method = affinity - read_affinity = r1z1=100, r1=200 - -This will make the proxy attempt to service GET and HEAD requests from -backends in region 1 zone 1, then backends in region 1, then any other -backends. If a proxy is physically close to a particular zone or -zones, this can provide bandwidth savings. For example, if a zone -corresponds to servers in a particular rack, and the proxy server is -in that same rack, then setting read_affinity to prefer reads from -within the rack will result in less traffic between the top-of-rack -switches. - -The read_affinity setting may contain any number of region/zone -specifiers; the priority number (after the equals sign) determines the -ordering in which backend servers will be contacted. A lower number -means higher priority. - -Note that read_affinity only affects the ordering of primary nodes -(see ring docs for definition of primary node), not the ordering of -handoff nodes. - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -write_affinity and write_affinity_node_count -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This setting makes the proxy server prefer local backend servers for -object PUT requests over non-local ones. For example, it may be -preferable for an SF proxy server to service object PUT requests -by talking to SF object servers, as the client will receive lower -latency and higher throughput. However, if this setting is used, note -that a NY proxy server handling a GET request for an object that was -PUT using write affinity may have to fetch it across the WAN link, as -the object won't immediately have any replicas in NY. However, -replication will move the object's replicas to their proper homes in -both SF and NY. - -Note that only object PUT requests are affected by the write_affinity -setting; POST, GET, HEAD, DELETE, OPTIONS, and account/container PUT -requests are not affected. - -This setting lets you trade data distribution for throughput. If -write_affinity is enabled, then object replicas will initially be -stored all within a particular region or zone, thereby decreasing the -quality of the data distribution, but the replicas will be distributed -over fast WAN links, giving higher throughput to clients. Note that -the replicators will eventually move objects to their proper, -well-distributed homes. - -The write_affinity setting is useful only when you don't typically -read objects immediately after writing them. For example, consider a -workload of mainly backups: if you have a bunch of machines in NY that -periodically write backups to Swift, then odds are that you don't then -immediately read those backups in SF. If your workload doesn't look -like that, then you probably shouldn't use write_affinity. - -The write_affinity_node_count setting is only useful in conjunction -with write_affinity; it governs how many local object servers will be -tried before falling back to non-local ones. - -Example:: - - [app:proxy-server] - write_affinity = r1 - write_affinity_node_count = 2 * replicas - -Assuming 3 replicas, this configuration will make object PUTs try -storing the object's replicas on up to 6 disks ("2 * replicas") in -region 1 ("r1"). Proxy server tries to find 3 devices for storing the -object. While a device is unavailable, it queries the ring for the 4th -device and so on until 6th device. If the 6th disk is still unavailable, -the last replica will be sent to other region. It doesn't mean there'll -have 6 replicas in region 1. - - -You should be aware that, if you have data coming into SF faster than -your replicators are transferring it to NY, then your cluster's data distribution -will get worse and worse over time as objects pile up in SF. If this -happens, it is recommended to disable write_affinity and simply let -object PUTs traverse the WAN link, as that will naturally limit the -object growth rate to what your WAN link can handle. + #. Global Clusters execute all account/container metadata updates + synchronously to account/container replicas in all data-centers, which may + incur delays when making updates across WANs. Container Sync only copies + objects between data-centers and all Swift internal traffic is + confined to each data-center. + #. Global Clusters does not yet guarantee the availability of objects stored + in Erasure Coded policies when one data-center is offline. With Container + Sync the availability of objects in each data-center is independent of the + state of other data-centers once objects have been synced. Container Sync + also allows objects to be stored using different policy types in different + data-centers. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Checking handoff partition distribution diff --git a/doc/source/index.rst b/doc/source/index.rst index 4784d91337..dbe54e8a41 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -52,6 +52,7 @@ Overview and Concepts ratelimit overview_large_objects overview_object_versioning + overview_global_cluster overview_container_sync overview_expiring_objects cors diff --git a/doc/source/overview_global_cluster.rst b/doc/source/overview_global_cluster.rst new file mode 100644 index 0000000000..4a7e13b48c --- /dev/null +++ b/doc/source/overview_global_cluster.rst @@ -0,0 +1,133 @@ +=============== +Global Clusters +=============== + +-------- +Overview +-------- + +Swift's default configuration is currently designed to work in a +single region, where a region is defined as a group of machines with +high-bandwidth, low-latency links between them. However, configuration +options exist that make running a performant multi-region Swift +cluster possible. + +For the rest of this section, we will assume a two-region Swift +cluster: region 1 in San Francisco (SF), and region 2 in New York +(NY). Each region shall contain within it 3 zones, numbered 1, 2, and +3, for a total of 6 zones. + +--------------------------- +Configuring Global Clusters +--------------------------- +~~~~~~~~~~~~~ +read_affinity +~~~~~~~~~~~~~ + +This setting, combined with sorting_method setting, makes the proxy +server prefer local backend servers for GET and HEAD requests over +non-local ones. For example, it is preferable for an SF proxy server +to service object GET requests by talking to SF object servers, as the +client will receive lower latency and higher throughput. + +By default, Swift randomly chooses one of the three replicas to give +to the client, thereby spreading the load evenly. In the case of a +geographically-distributed cluster, the administrator is likely to +prioritize keeping traffic local over even distribution of results. +This is where the read_affinity setting comes in. + +Example:: + + [app:proxy-server] + sorting_method = affinity + read_affinity = r1=100 + +This will make the proxy attempt to service GET and HEAD requests from +backends in region 1 before contacting any backends in region 2. +However, if no region 1 backends are available (due to replica +placement, failed hardware, or other reasons), then the proxy will +fall back to backend servers in other regions. + +Example:: + + [app:proxy-server] + sorting_method = affinity + read_affinity = r1z1=100, r1=200 + +This will make the proxy attempt to service GET and HEAD requests from +backends in region 1 zone 1, then backends in region 1, then any other +backends. If a proxy is physically close to a particular zone or +zones, this can provide bandwidth savings. For example, if a zone +corresponds to servers in a particular rack, and the proxy server is +in that same rack, then setting read_affinity to prefer reads from +within the rack will result in less traffic between the top-of-rack +switches. + +The read_affinity setting may contain any number of region/zone +specifiers; the priority number (after the equals sign) determines the +ordering in which backend servers will be contacted. A lower number +means higher priority. + +Note that read_affinity only affects the ordering of primary nodes +(see ring docs for definition of primary node), not the ordering of +handoff nodes. + +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +write_affinity and write_affinity_node_count +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This setting makes the proxy server prefer local backend servers for +object PUT requests over non-local ones. For example, it may be +preferable for an SF proxy server to service object PUT requests +by talking to SF object servers, as the client will receive lower +latency and higher throughput. However, if this setting is used, note +that a NY proxy server handling a GET request for an object that was +PUT using write affinity may have to fetch it across the WAN link, as +the object won't immediately have any replicas in NY. However, +replication will move the object's replicas to their proper homes in +both SF and NY. + +Note that only object PUT requests are affected by the write_affinity +setting; POST, GET, HEAD, DELETE, OPTIONS, and account/container PUT +requests are not affected. + +This setting lets you trade data distribution for throughput. If +write_affinity is enabled, then object replicas will initially be +stored all within a particular region or zone, thereby decreasing the +quality of the data distribution, but the replicas will be distributed +over fast WAN links, giving higher throughput to clients. Note that +the replicators will eventually move objects to their proper, +well-distributed homes. + +The write_affinity setting is useful only when you don't typically +read objects immediately after writing them. For example, consider a +workload of mainly backups: if you have a bunch of machines in NY that +periodically write backups to Swift, then odds are that you don't then +immediately read those backups in SF. If your workload doesn't look +like that, then you probably shouldn't use write_affinity. + +The write_affinity_node_count setting is only useful in conjunction +with write_affinity; it governs how many local object servers will be +tried before falling back to non-local ones. + +Example:: + + [app:proxy-server] + write_affinity = r1 + write_affinity_node_count = 2 * replicas + +Assuming 3 replicas, this configuration will make object PUTs try +storing the object's replicas on up to 6 disks ("2 * replicas") in +region 1 ("r1"). Proxy server tries to find 3 devices for storing the +object. While a device is unavailable, it queries the ring for the 4th +device and so on until 6th device. If the 6th disk is still unavailable, +the last replica will be sent to other region. It doesn't mean there'll +have 6 replicas in region 1. + + +You should be aware that, if you have data coming into SF faster than +your replicators are transferring it to NY, then your cluster's data +distribution will get worse and worse over time as objects pile up in SF. +If this happens, it is recommended to disable write_affinity and simply let +object PUTs traverse the WAN link, as that will naturally limit the +object growth rate to what your WAN link can handle. From ce49a296c6f3e0bdeae0c3cd6615d31141f5c23e Mon Sep 17 00:00:00 2001 From: Alistair Coles Date: Wed, 17 Aug 2016 17:20:21 +0100 Subject: [PATCH 142/156] Add rm to tox whitelist_externals Avoids a warning when running tox -e api-ref Change-Id: Ib02849075e6424f1db84499fd7500d7bb76dde67 --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 769520e0b2..4a69f2b69e 100644 --- a/tox.ini +++ b/tox.ini @@ -16,6 +16,7 @@ commands = find . -type f -name "*.py[c|o]" -delete find . -type d -name "__pycache__" -delete nosetests {posargs:test/unit} whitelist_externals = find + rm passenv = SWIFT_* *_proxy [testenv:cover] From 26633af3af9a1998b1e9e3fdf061482255329571 Mon Sep 17 00:00:00 2001 From: zheng yin Date: Thu, 11 Aug 2016 20:44:37 +0800 Subject: [PATCH 143/156] Reset AccountReaper stats in __init__ Make the AccountReaper __init__ method reset its stats variables. This saves the unit test having to initialise the stats variables. Also add more asserts to some AccountReaper test cases. Change-Id: Iea112962d89ebfa3450f43b2a28ac8e8ed8b07b0 --- swift/account/reaper.py | 1 + test/unit/account/test_reaper.py | 39 ++++++++++++++++++++++++++------ 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/swift/account/reaper.py b/swift/account/reaper.py index 050db4e189..676fac55c6 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -84,6 +84,7 @@ class AccountReaper(Daemon): reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30) self.reap_not_done_after = reap_warn_after + self.delay_reaping self.start_time = time() + self.reset_stats() def get_account_ring(self): """The account :class:`swift.common.ring.Ring` for the cluster.""" diff --git a/test/unit/account/test_reaper.py b/test/unit/account/test_reaper.py index 3c90673948..4961f19acb 100644 --- a/test/unit/account/test_reaper.py +++ b/test/unit/account/test_reaper.py @@ -232,13 +232,6 @@ class TestReaper(unittest.TestCase): myips = ['10.10.10.1'] r = reaper.AccountReaper(conf) - r.stats_return_codes = {} - r.stats_containers_deleted = 0 - r.stats_containers_remaining = 0 - r.stats_containers_possibly_remaining = 0 - r.stats_objects_deleted = 0 - r.stats_objects_remaining = 0 - r.stats_objects_possibly_remaining = 0 r.myips = myips if fakelogger: r.logger = unit.debug_logger('test-reaper') @@ -304,6 +297,33 @@ class TestReaper(unittest.TestCase): finally: reaper.time = time_orig + def test_reset_stats(self): + conf = {} + r = reaper.AccountReaper(conf) + self.assertDictEqual(r.stats_return_codes, {}) + self.assertEqual(r.stats_containers_deleted, 0) + self.assertEqual(r.stats_containers_remaining, 0) + self.assertEqual(r.stats_containers_possibly_remaining, 0) + self.assertEqual(r.stats_objects_deleted, 0) + self.assertEqual(r.stats_objects_remaining, 0) + self.assertEqual(r.stats_objects_possibly_remaining, 0) + # also make sure reset actually resets values + r.stats_return_codes = {"hello": "swift"} + r.stats_containers_deleted = random.randint(1, 100) + r.stats_containers_remaining = random.randint(1, 100) + r.stats_containers_possibly_remaining = random.randint(1, 100) + r.stats_objects_deleted = random.randint(1, 100) + r.stats_objects_remaining = random.randint(1, 100) + r.stats_objects_possibly_remaining = random.randint(1, 100) + r.reset_stats() + self.assertDictEqual(r.stats_return_codes, {}) + self.assertEqual(r.stats_containers_deleted, 0) + self.assertEqual(r.stats_containers_remaining, 0) + self.assertEqual(r.stats_containers_possibly_remaining, 0) + self.assertEqual(r.stats_objects_deleted, 0) + self.assertEqual(r.stats_objects_remaining, 0) + self.assertEqual(r.stats_objects_possibly_remaining, 0) + def test_reap_object(self): conf = { 'mount_check': 'false', @@ -372,6 +392,9 @@ class TestReaper(unittest.TestCase): policy.object_ring.replicas - 2) self.assertEqual(r.stats_objects_remaining, 1) self.assertEqual(r.stats_objects_possibly_remaining, 1) + self.assertEqual(r.stats_return_codes[2], + policy.object_ring.replicas - 1) + self.assertEqual(r.stats_return_codes[4], 1) def test_reap_object_timeout(self): r = self.init_reaper({}, fakelogger=True) @@ -381,7 +404,9 @@ class TestReaper(unittest.TestCase): with patch('swift.account.reaper.direct_delete_object', self.fake_direct_delete_object): r.reap_object('a', 'c', 'partition', cont_nodes, 'o', 1) + self.assertEqual(r.stats_objects_deleted, 0) self.assertEqual(r.stats_objects_remaining, 4) + self.assertEqual(r.stats_objects_possibly_remaining, 0) self.assertTrue(r.logger.get_lines_for_level( 'error')[-1].startswith('Timeout Exception')) From b7b77c7aa3aea2d210f77b0da4b69e6bdec49c84 Mon Sep 17 00:00:00 2001 From: Kota Tsuyuzaki Date: Mon, 15 Aug 2016 17:52:58 -0700 Subject: [PATCH 144/156] Follow up delayed reap probe test This is a follow up patch for https://review.openstack.org/#/c/321041 This patch includes following items: - Move test_sync to below _verify_account_reaped to be the actual tests as a bunch - Change the test name fron "test_sync" to "test_reap" to clarify the purpose - Fix a typo from "Object" to "Container" for an error message Change-Id: I51bc01113056e2eb99f731d38e9f1c7a6c5c96be --- test/probe/test_account_reaper.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/test/probe/test_account_reaper.py b/test/probe/test_account_reaper.py index bc83d7716b..6530205faf 100644 --- a/test/probe/test_account_reaper.py +++ b/test/probe/test_account_reaper.py @@ -57,12 +57,6 @@ class TestAccountReaper(ReplProbeTest): for node in nodes: direct_delete_account(node, part, self.account) - def test_sync(self): - # run the reaper - Manager(['account-reaper']).once() - - self._verify_account_reaped() - def _verify_account_reaped(self): for policy, container, obj in self.all_objects: # verify that any container deletes were at same timestamp @@ -152,6 +146,12 @@ class TestAccountReaper(ReplProbeTest): (self.account, container, obj, node, policy)) self.assertEqual(1, len(delete_times)) + def test_reap(self): + # run the reaper + Manager(['account-reaper']).once() + + self._verify_account_reaped() + def test_delayed_reap(self): # define reapers which are supposed to operate 3 seconds later account_reapers = [] @@ -174,7 +174,8 @@ class TestAccountReaper(ReplProbeTest): direct_head_container(cnode, cpart, self.account, container) except ClientException: - self.fail("Nothing should be reaped. Object should exist") + self.fail( + "Nothing should be reaped. Container should exist") part, nodes = policy.object_ring.get_nodes(self.account, container, obj) From 13747021a8743d358961c1c4820fddfe9c955ea2 Mon Sep 17 00:00:00 2001 From: Janie Richling Date: Tue, 26 Jul 2016 15:20:52 -0500 Subject: [PATCH 145/156] Add test for POST to DLO manifest file In the past, a POST to a DLO manifest file when object_post_as_copy was true resulted in the manifest file contents being replaced by the concatenation of the DLO segments. This no longer happens, but tests for this case are missing. This patch adds a functional test to assert that the manifest file is preserved in a POST request. Change-Id: I90546014a7dcc7266f0d0e0ff6339688b7954b96 Related-bug: #1487791 Related-bug: #1514317 --- test/functional/tests.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/test/functional/tests.py b/test/functional/tests.py index 3ad0cc325d..ff75272e54 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -2668,6 +2668,41 @@ class TestDlo(Base): contents, "ffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj") + def test_dlo_post_with_manifest_header(self): + # verify that performing a POST to a DLO manifest + # preserves the fact that it is a manifest file. + + # create a new manifest for this test to avoid test coupling. + x_o_m = self.env.container.file('man1').info()['x_object_manifest'] + file_item = self.env.container.file(Utils.create_name()) + file_item.write('manifest-contents', hdrs={"X-Object-Manifest": x_o_m}) + + # sanity checks + manifest_contents = file_item.read(parms={'multipart-manifest': 'get'}) + self.assertEqual('manifest-contents', manifest_contents) + expected_contents = ''.join([(c * 10) for c in 'abcde']) + contents = file_item.read(parms={}) + self.assertEqual(expected_contents, contents) + + # POST to the manifest file + # include the x-object-manifest in case running with fast-post + file_item.post({'x-object-meta-foo': 'bar', + 'x-object-manifest': x_o_m}) + + # Verify x-object-manifest still intact + file_item.info() + resp_headers = file_item.conn.response.getheaders() + self.assertIn(('x-object-manifest', x_o_m), resp_headers) + self.assertIn(('x-object-meta-foo', 'bar'), resp_headers) + + # verify that manifest content was not changed + manifest_contents = file_item.read(parms={'multipart-manifest': 'get'}) + self.assertEqual('manifest-contents', manifest_contents) + + # verify that manifest still points to original content + contents = file_item.read(parms={}) + self.assertEqual(expected_contents, contents) + class TestDloUTF8(Base2, TestDlo): set_up = False From 8bf2233b40dbf2321f754b2a08e8271b4c0ee007 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20Lis=C3=A1k?= Date: Thu, 18 Aug 2016 16:14:36 +0200 Subject: [PATCH 146/156] Documantation enhancements of nice/ionice feature Based on comments from patch #238799. Change-Id: I9455cf6dc7fd12fee62439ff3c5f3255287ab1be --- doc/manpages/account-server.conf.5 | 20 +- doc/manpages/container-server.conf.5 | 24 +- doc/manpages/object-expirer.conf.5 | 8 +- doc/manpages/object-server.conf.5 | 20 +- doc/manpages/proxy-server.conf.5 | 8 +- doc/source/deployment_guide.rst | 326 ++++++++++++++++++++++++++- etc/object-expirer.conf-sample | 2 + 7 files changed, 379 insertions(+), 29 deletions(-) diff --git a/doc/manpages/account-server.conf.5 b/doc/manpages/account-server.conf.5 index b9f8b9395c..0ef790aa4a 100644 --- a/doc/manpages/account-server.conf.5 +++ b/doc/manpages/account-server.conf.5 @@ -131,12 +131,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD @@ -190,12 +192,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD @@ -311,12 +315,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE @@ -345,12 +351,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE @@ -393,12 +401,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD diff --git a/doc/manpages/container-server.conf.5 b/doc/manpages/container-server.conf.5 index b396398d4b..ca95a431ac 100644 --- a/doc/manpages/container-server.conf.5 +++ b/doc/manpages/container-server.conf.5 @@ -137,12 +137,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD @@ -202,12 +204,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD @@ -323,12 +327,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE @@ -364,12 +370,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD @@ -398,12 +406,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE @@ -438,12 +448,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD diff --git a/doc/manpages/object-expirer.conf.5 b/doc/manpages/object-expirer.conf.5 index 1e98216c10..cd005b8727 100644 --- a/doc/manpages/object-expirer.conf.5 +++ b/doc/manpages/object-expirer.conf.5 @@ -94,12 +94,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD @@ -133,12 +135,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD diff --git a/doc/manpages/object-server.conf.5 b/doc/manpages/object-server.conf.5 index 51e5bccf7b..c2382c7cb5 100644 --- a/doc/manpages/object-server.conf.5 +++ b/doc/manpages/object-server.conf.5 @@ -148,12 +148,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD @@ -257,12 +259,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD @@ -422,12 +426,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE @@ -509,12 +515,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD @@ -563,12 +571,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE diff --git a/doc/manpages/proxy-server.conf.5 b/doc/manpages/proxy-server.conf.5 index 6c11368fe8..ff61457952 100644 --- a/doc/manpages/proxy-server.conf.5 +++ b/doc/manpages/proxy-server.conf.5 @@ -149,12 +149,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD @@ -1054,12 +1056,14 @@ Modify scheduling priority of server processes. Niceness values range from -20 The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values -are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE. The default does not modify class and priority. +are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). +The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. +Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD diff --git a/doc/source/deployment_guide.rst b/doc/source/deployment_guide.rst index 524bab8c5c..cbda8f5545 100644 --- a/doc/source/deployment_guide.rst +++ b/doc/source/deployment_guide.rst @@ -521,8 +521,9 @@ nice_priority None Scheduling priority of server proce favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. - I/O niceness class values are IOPRIO_CLASS_RT, - IOPRIO_CLASS_BE, and IOPRIO_CLASS_IDLE. + I/O niceness class values are IOPRIO_CLASS_RT + (realtime), IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with @@ -534,6 +535,7 @@ ionice_priority None I/O scheduling priority of server The higher the value, the lower the I/O priority of the process. Work only with ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ================================ ========== ========================================== .. _object-server-options: @@ -618,6 +620,27 @@ splice no Use splice() for zero-copy will appear in the object server logs at startup, but your object servers should continue to function. +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are IOPRIO_CLASS_RT + (realtime), IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower the I/O + priority of the process. Work only with + ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ============================= ====================== =============================================== [object-replicator] @@ -710,6 +733,33 @@ rsync_error_log_line_length 0 Limits how long rsync err ring_check_interval 15 Interval for checking new ring file recon_cache_path /var/cache/swift Path to recon cache +nice_priority None Scheduling priority of server + processes. Niceness values + range from -20 (most favorable + to the process) to 19 (least + favorable to the process). + The default does not modify + priority. +ionice_class None I/O scheduling class of server + processes. I/O niceness class + values are IOPRIO_CLASS_RT (realtime), + IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify + class and priority. + Linux supports io scheduling + priorities and classes since + 2.6.13 with the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority + is a number which goes from + 0 to 7. The higher the value, + the lower the I/O priority of + the process. + Work only with ionice_class. + Ignored if IOPRIO_CLASS_IDLE + is set. =========================== ======================== ================================ [object-updater] @@ -729,6 +779,27 @@ node_timeout DEFAULT or 10 Request timeout to external services. Th sections use 3 as the final default). slowdown 0.01 Time in seconds to wait between objects recon_cache_path /var/cache/swift Path to recon cache +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are IOPRIO_CLASS_RT + (realtime), IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower the I/O + priority of the process. Work only with + ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ================== =================== ========================================== [object-auditor] @@ -760,6 +831,27 @@ rsync_tempfile_timeout auto Time elapsed in seconds before r of "auto" try to use object-replicator's rsync_timeout + 900 or fallback to 86400 (1 day). +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are IOPRIO_CLASS_RT + (realtime), IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower the I/O + priority of the process. Work only with + ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. =========================== =================== ========================================== ------------------------------ @@ -846,8 +938,9 @@ nice_priority None Scheduling priority of server proce favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. - I/O niceness class values are IOPRIO_CLASS_RT, - IOPRIO_CLASS_BE, and IOPRIO_CLASS_IDLE. + I/O niceness class values are IOPRIO_CLASS_RT + (realtime), IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 @@ -858,6 +951,7 @@ ionice_priority None I/O scheduling priority of server p goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. =============================== ========== ============================================ [container-server] @@ -890,6 +984,28 @@ replication_server Configure parameter for creati have a separate replication network, you should not specify any value for "replication_server". +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are + IOPRIO_CLASS_RT (realtime), + IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower the I/O + priority of the process. Work only with + ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ============================== ================ ======================================== [container-replicator] @@ -952,6 +1068,35 @@ rsync_compress no Allow rsync to compress data example: .tar.gz, mp3) might slow down the syncing process. recon_cache_path /var/cache/swift Path to recon cache +nice_priority None Scheduling priority of server + processes. Niceness values + range from -20 (most favorable + to the process) to 19 (least + favorable to the process). + The default does not modify + priority. +ionice_class None I/O scheduling class of server + processes. I/O niceness class + values are + IOPRIO_CLASS_RT (realtime), + IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify + class and priority. Linux + supports io scheduling + priorities and classes since + 2.6.13 with the CFQ io + scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of + server processes. I/O niceness + priority is a number which goes + from 0 to 7. + The higher the value, the lower + the I/O priority of the process. + Work only with ionice_class. + Ignored if IOPRIO_CLASS_IDLE + is set. ================== =========================== ============================= [container-updater] @@ -976,6 +1121,29 @@ account_suppression_time 60 Seconds to suppress updating an error (timeout, not yet found, etc.) recon_cache_path /var/cache/swift Path to recon cache +nice_priority None Scheduling priority of server + processes. Niceness values range + from -20 (most favorable to the + process) to 19 (least favorable + to the process). The default does + not modify priority. +ionice_class None I/O scheduling class of server + processes. I/O niceness class + values are IOPRIO_CLASS_RT (realtime), + IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower + the I/O priority of the process. + Work only with ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ======================== ================= ================================== [container-auditor] @@ -992,6 +1160,28 @@ containers_per_second 200 Maximum containers audited per second. Should be tuned according to individual system specs. 0 is unlimited. recon_cache_path /var/cache/swift Path to recon cache +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are + IOPRIO_CLASS_RT (realtime), + IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower the I/O + priority of the process. Work only with + ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ===================== ================= ======================================= ---------------------------- @@ -1078,8 +1268,9 @@ nice_priority None Scheduling priority of server proce favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. - I/O niceness class values are IOPRIO_CLASS_RT, - IOPRIO_CLASS_BE, and IOPRIO_CLASS_IDLE. + I/O niceness class values are IOPRIO_CLASS_RT + (realtime), IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with @@ -1090,6 +1281,7 @@ ionice_priority None I/O scheduling priority of server p goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. =============================== ========== ============================================= [account-server] @@ -1120,6 +1312,27 @@ replication_server Configure parameter for creating have a separate replication network, you should not specify any value for "replication_server". +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are IOPRIO_CLASS_RT + (realtime), IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower the I/O + priority of the process. Work only with + ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ============================= ============== ========================================== [account-replicator] @@ -1180,6 +1393,32 @@ rsync_compress no Allow rsync to compress data .tar.gz, mp3) might slow down the syncing process. recon_cache_path /var/cache/swift Path to recon cache +nice_priority None Scheduling priority of server + processes. Niceness values + range from -20 (most favorable + to the process) to 19 (least + favorable to the process). + The default does not modify + priority. +ionice_class None I/O scheduling class of server + processes. I/O niceness class + values are IOPRIO_CLASS_RT + (realtime), IOPRIO_CLASS_BE + (best-effort), and IOPRIO_CLASS_IDLE + (idle). + The default does not modify + class and priority. Linux supports + io scheduling priorities and classes + since 2.6.13 with the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority + is a number which goes from 0 to 7. + The higher the value, the lower + the I/O priority of the process. + Work only with ionice_class. + Ignored if IOPRIO_CLASS_IDLE + is set. ================== ========================= =============================== [account-auditor] @@ -1196,6 +1435,28 @@ accounts_per_second 200 Maximum accounts audited per second. Should be tuned according to individual system specs. 0 is unlimited. recon_cache_path /var/cache/swift Path to recon cache +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are + IOPRIO_CLASS_RT (realtime), + IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower the I/O + priority of the process. Work only with + ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ==================== ================ ======================================= [account-reaper] @@ -1224,6 +1485,27 @@ reap_warn_after 2892000 If the account fails to be be reaped due space is not being reclaimed after you delete account(s). This is in addition to any time requested by delay_reaping. +nice_priority None Scheduling priority of server processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server processes. + I/O niceness class values are IOPRIO_CLASS_RT + (realtime), IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 with + the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower the I/O + priority of the process. Work only with + ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ================== =============== ========================================= .. _proxy-server-config: @@ -1339,8 +1621,9 @@ nice_priority None Scheduling prior does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values - are IOPRIO_CLASS_RT, IOPRIO_CLASS_BE and - IOPRIO_CLASS_IDLE. + are IOPRIO_CLASS_RT (realtime), + IOPRIO_CLASS_BE (best-effort) and + IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities @@ -1353,6 +1636,7 @@ ionice_priority None I/O scheduling p The higher the value, the lower the I/O priority of the process. Work only with ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ==================================== ======================== ======================================== [proxy-server] @@ -1479,6 +1763,29 @@ concurrency_timeout conn_timeout This parameter controls how long firing of the threads. This number should be between 0 and node_timeout. The default is conn_timeout (0.5). +nice_priority None Scheduling priority of server + processes. + Niceness values range from -20 (most + favorable to the process) to 19 (least + favorable to the process). The default + does not modify priority. +ionice_class None I/O scheduling class of server + processes. I/O niceness class values + are IOPRIO_CLASS_RT (realtime), + IOPRIO_CLASS_BE (best-effort), + and IOPRIO_CLASS_IDLE (idle). + The default does not modify class and + priority. Linux supports io scheduling + priorities and classes since 2.6.13 + with the CFQ io scheduler. + Work only with ionice_priority. +ionice_priority None I/O scheduling priority of server + processes. I/O niceness priority is + a number which goes from 0 to 7. + The higher the value, the lower the + I/O priority of the process. Work + only with ionice_class. + Ignored if IOPRIO_CLASS_IDLE is set. ============================ =============== ===================================== [tempauth] @@ -1627,7 +1934,8 @@ Fair Queuing (CFQ) I/O scheduler. If you run your Storage servers all together on the same servers, you can slow down the auditors or prioritize object-server I/O via these parameters (but probably do not need to change it on the proxy). It is a new feature and the best practices are still -being developed. +being developed. On some systems it may be required to run the daemons as root. +For more info also see setpriority(2) and ioprio_set(2). The above configuration setting should be taken as suggestions and testing of configuration settings should be done to ensure best utilization of CPU, diff --git a/etc/object-expirer.conf-sample b/etc/object-expirer.conf-sample index b60c204b4b..acce1e7d3f 100644 --- a/etc/object-expirer.conf-sample +++ b/etc/object-expirer.conf-sample @@ -29,6 +29,7 @@ # You can set scheduling priority of processes. Niceness values range from -20 # (most favorable to the process) to 19 (least favorable to the process). # nice_priority = +# # You can set I/O scheduling class and priority of processes. I/O niceness # class values are realtime, best-effort and idle. I/O niceness # priority is a number which goes from 0 to 7. The higher the value, the lower @@ -65,6 +66,7 @@ # You can set scheduling priority of processes. Niceness values range from -20 # (most favorable to the process) to 19 (least favorable to the process). # nice_priority = +# # You can set I/O scheduling class and priority of processes. I/O niceness # class values are realtime, best-effort and idle. I/O niceness # priority is a number which goes from 0 to 7. The higher the value, the lower From aa893d90778f912fa6668f9e3c462885654c423a Mon Sep 17 00:00:00 2001 From: Graham Hayes Date: Fri, 19 Aug 2016 14:25:06 +0100 Subject: [PATCH 147/156] Get ready for os-api-ref sphinx theme change Change-Id: Ib4aa4a26814273efafa3453237d18acf8cc966cb --- api-ref/source/conf.py | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py index e01012aeb0..3ec1303ce5 100644 --- a/api-ref/source/conf.py +++ b/api-ref/source/conf.py @@ -29,6 +29,36 @@ import subprocess import sys import warnings +# TODO(Graham Hayes): Remove the following block of code when os-api-ref is +# using openstackdocstheme + +import os_api_ref + +if getattr(os_api_ref, 'THEME', 'olsosphinx') == 'openstackdocstheme': + # We are on the new version with openstackdocstheme support + + extensions = [ + 'os_api_ref', + ] + + import openstackdocstheme # noqa + + html_theme = 'openstackdocs' + html_theme_path = [openstackdocstheme.get_html_theme_path()] + html_theme_options = { + "sidebar_mode": "toc", + } + +else: + # We are on the old version without openstackdocstheme support + + extensions = [ + 'os_api_ref', + 'oslosphinx', + ] + +# End temporary block + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -41,11 +71,6 @@ sys.path.insert(0, os.path.abspath('./')) # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ - 'os_api_ref', - 'oslosphinx', -] - # The suffix of source filenames. source_suffix = '.rst' From d98928caa99cbfac1b8710ac0761925a8146422e Mon Sep 17 00:00:00 2001 From: Victor Stinner Date: Mon, 22 Aug 2016 10:41:59 +0200 Subject: [PATCH 148/156] py3: tox.ini: use substituation to py35 commands Use tox substitution to avoid duplicating testenv:py34 commands in testenv:py35, to not have to maintain the whitelist of Python 3 tests in two different places. Write also the list of tests in a newline to be able to more easily add new unit tests. Change-Id: I6e7f238f1c5d3fc9b6560918dcbb93e9dd8ec084 --- tox.ini | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index 4a69f2b69e..a698de78c6 100644 --- a/tox.ini +++ b/tox.ini @@ -28,11 +28,11 @@ setenv = VIRTUAL_ENV={envdir} [testenv:py34] commands = - nosetests test/unit/common/test_exceptions.py + nosetests \ + test/unit/common/test_exceptions.py [testenv:py35] -commands = - nosetests test/unit/common/test_exceptions.py +commands = {[testenv:py34]commands} [testenv:pep8] basepython = python2.7 From 89388bf232b54ec0adf5cb815efff23cd479e2c1 Mon Sep 17 00:00:00 2001 From: Mohit Motiani Date: Mon, 22 Aug 2016 19:27:35 +0000 Subject: [PATCH 149/156] Fix typos and grammer in builder.py Change-Id: Ib87f4df8f741809840e92db9bacf2af847a5f77f Closes-Bug: #1600403 --- swift/common/ring/builder.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index 028983966b..71f4661407 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -407,8 +407,8 @@ class RingBuilder(object): lot more analysis and therefore a lot more time -- I had code that did that before). Because of this, it keeps rebalancing until the device skew (number of partitions a device wants compared to what it has) gets - below 1% or doesn't change by more than 1% (only happens with ring that - can't be balanced no matter what). + below 1% or doesn't change by more than 1% (only happens with a ring + that can't be balanced no matter what). :returns: (number_of_partitions_altered, resulting_balance, number_of_removed_devices) @@ -494,7 +494,7 @@ class RingBuilder(object): Build a dict of all tiers in the cluster to a list of the number of parts with a replica count at each index. The values of the dict will be lists of length the maximum whole replica + 1 so that the - graph[tier][3] is the number of parts with in the tier with 3 replicas + graph[tier][3] is the number of parts within the tier with 3 replicas and graph [tier][0] is the number of parts not assigned in this tier. i.e. @@ -509,7 +509,7 @@ class RingBuilder(object): } :param old_replica2part2dev: if called from rebalance, the - old_replica2part2dev can be used to count moved moved parts. + old_replica2part2dev can be used to count moved parts. :returns: number of parts with different assignments than old_replica2part2dev if provided @@ -693,7 +693,7 @@ class RingBuilder(object): def get_balance(self): """ Get the balance of the ring. The balance value is the highest - percentage off the desired amount of partitions a given device + percentage of the desired amount of partitions a given device wants. For instance, if the "worst" device wants (based on its weight relative to the sum of all the devices' weights) 123 partitions and it has 124 partitions, the balance value would @@ -710,7 +710,7 @@ class RingBuilder(object): dispersed. The required overload is the largest percentage change of any single - device from its weighted replicanth to its wanted replicanth (note + device from its weighted replicanth to its wanted replicanth (note: under weighted devices have a negative percentage change) to archive dispersion - that is to say a single device that must be overloaded by 5% is worse than 5 devices in a single tier overloaded by 1%. @@ -992,9 +992,10 @@ class RingBuilder(object): undispersed_dev_replicas.sort( key=lambda dr: dr[0]['parts_wanted']) for dev, replica in undispersed_dev_replicas: - # the min part hour check is ignored iff a device has more - # than one replica of a part assigned to it - which would have - # only been possible on rings built with older version of code + # the min part hour check is ignored if and only if a device + # has more than one replica of a part assigned to it - which + # would have only been possible on rings built with an older + # version of the code if (self._last_part_moves[part] < self.min_part_hours and not replicas_at_tier[dev['tiers'][-1]] > 1): continue @@ -1072,8 +1073,8 @@ class RingBuilder(object): """ Gather parts that look like they should move for balance reasons. - A simple gather of parts that looks dispersible normally works out, - we'll switch strategies if things don't be seem to moving... + A simple gathers of parts that looks dispersible normally works out, + we'll switch strategies if things don't seem to move. """ # pick a random starting point on the other side of the ring quarter_turn = (self.parts // 4) @@ -1139,7 +1140,7 @@ class RingBuilder(object): def _reassign_parts(self, reassign_parts, replica_plan): """ - For an existing ring data set, partitions are reassigned similarly to + For an existing ring data set, partitions are reassigned similar to the initial assignment. The devices are ordered by how many partitions they still want and @@ -1168,7 +1169,7 @@ class RingBuilder(object): # account how many partitions a given tier wants to shed. # # If we did not do this, we could have a zone where, at some - # point during assignment, number-of-parts-to-gain equals + # point during an assignment, number-of-parts-to-gain equals # number-of-parts-to-shed. At that point, no further placement # into that zone would occur since its parts_available_in_tier # would be 0. This would happen any time a zone had any device @@ -1398,7 +1399,7 @@ class RingBuilder(object): N.B. _build_max_replicas_by_tier calculates the upper bound on the replicanths each tier may hold irrespective of the weights of the tier; this method will calculate the minimum replicanth <= - max_replicas[tier] that will still solve dispersion. However it is + max_replicas[tier] that will still solve dispersion. However, it is not guaranteed to return a fully dispersed solution if failure domains are over-weighted for their device count. """ @@ -1427,7 +1428,7 @@ class RingBuilder(object): def place_replicas(tier, replicanths): if replicanths > num_devices[tier]: raise exceptions.RingValidationError( - 'More than replicanths (%s) than devices (%s) ' + 'More replicanths (%s) than devices (%s) ' 'in tier (%s)' % (replicanths, num_devices[tier], tier)) wanted_replicas[tier] = replicanths sub_tiers = sorted(tier2children[tier]) From b81f53b964fdb8f3b50dd369ce2e194ee4dbb0b7 Mon Sep 17 00:00:00 2001 From: zheng yin Date: Tue, 23 Aug 2016 14:26:47 +0800 Subject: [PATCH 150/156] Improve readability in the obj server's unit tests This change improves the readability of the object-server's unit tests by breaking down some long assertTrue statements into smaller and much easier to read and more relevant assert statements. For example: assertTrue(a in resp.headers and b in resp.headers and c not in resp.headers) Is equal to: assertIn(a, resp.headers) assertIn(b, resp.headers) assertNotIn(c, resp.headers) Change-Id: Iba746ecfb1a1dc541856b7a4c9d2f00d08e4ad51 --- test/unit/obj/test_server.py | 68 ++++++++++++++++++------------------ 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/test/unit/obj/test_server.py b/test/unit/obj/test_server.py index fa7be18a76..5262c6d876 100755 --- a/test/unit/obj/test_server.py +++ b/test/unit/obj/test_server.py @@ -208,27 +208,27 @@ class TestObjectController(unittest.TestCase): req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assertTrue("X-Object-Meta-1" not in resp.headers and - "X-Object-Meta-Two" not in resp.headers and - "X-Object-Meta-3" in resp.headers and - "X-Object-Meta-4" in resp.headers and - "Foo" in resp.headers and - "Bar" in resp.headers and - "Baz" not in resp.headers and - "Content-Encoding" in resp.headers) + self.assertNotIn("X-Object-Meta-1", resp.headers) + self.assertNotIn("X-Object-Meta-Two", resp.headers) + self.assertIn("X-Object-Meta-3", resp.headers) + self.assertIn("X-Object-Meta-4", resp.headers) + self.assertIn("Foo", resp.headers) + self.assertIn("Bar", resp.headers) + self.assertNotIn("Baz", resp.headers) + self.assertIn("Content-Encoding", resp.headers) self.assertEqual(resp.headers['Content-Type'], 'application/x-test') req = Request.blank('/sda1/p/a/c/o', environ={'REQUEST_METHOD': 'HEAD'}) resp = req.get_response(self.object_controller) - self.assertTrue("X-Object-Meta-1" not in resp.headers and - "X-Object-Meta-Two" not in resp.headers and - "X-Object-Meta-3" in resp.headers and - "X-Object-Meta-4" in resp.headers and - "Foo" in resp.headers and - "Bar" in resp.headers and - "Baz" not in resp.headers and - "Content-Encoding" in resp.headers) + self.assertNotIn("X-Object-Meta-1", resp.headers) + self.assertNotIn("X-Object-Meta-Two", resp.headers) + self.assertIn("X-Object-Meta-3", resp.headers) + self.assertIn("X-Object-Meta-4", resp.headers) + self.assertIn("Foo", resp.headers) + self.assertIn("Bar", resp.headers) + self.assertNotIn("Baz", resp.headers) + self.assertIn("Content-Encoding", resp.headers) self.assertEqual(resp.headers['Content-Type'], 'application/x-test') timestamp = normalize_timestamp(time()) @@ -240,11 +240,11 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assertTrue("X-Object-Meta-3" not in resp.headers and - "X-Object-Meta-4" not in resp.headers and - "Foo" not in resp.headers and - "Bar" not in resp.headers and - "Content-Encoding" not in resp.headers) + self.assertNotIn("X-Object-Meta-3", resp.headers) + self.assertNotIn("X-Object-Meta-4", resp.headers) + self.assertNotIn("Foo", resp.headers) + self.assertNotIn("Bar", resp.headers) + self.assertNotIn("Content-Encoding", resp.headers) self.assertEqual(resp.headers['Content-Type'], 'application/x-test') # test defaults @@ -265,12 +265,12 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.status_int, 201) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assertTrue("X-Object-Meta-1" in resp.headers and - "Foo" not in resp.headers and - "Content-Encoding" in resp.headers and - "X-Object-Manifest" in resp.headers and - "Content-Disposition" in resp.headers and - "X-Static-Large-Object" in resp.headers) + self.assertIn("X-Object-Meta-1", resp.headers) + self.assertNotIn("Foo", resp.headers) + self.assertIn("Content-Encoding", resp.headers) + self.assertIn("X-Object-Manifest", resp.headers) + self.assertIn("Content-Disposition", resp.headers) + self.assertIn("X-Static-Large-Object", resp.headers) self.assertEqual(resp.headers['Content-Type'], 'application/x-test') timestamp = normalize_timestamp(time()) @@ -284,13 +284,13 @@ class TestObjectController(unittest.TestCase): self.assertEqual(resp.status_int, 202) req = Request.blank('/sda1/p/a/c/o') resp = req.get_response(self.object_controller) - self.assertTrue("X-Object-Meta-1" not in resp.headers and - "Foo" not in resp.headers and - "Content-Encoding" not in resp.headers and - "X-Object-Manifest" not in resp.headers and - "Content-Disposition" not in resp.headers and - "X-Object-Meta-3" in resp.headers and - "X-Static-Large-Object" in resp.headers) + self.assertNotIn("X-Object-Meta-1", resp.headers) + self.assertNotIn("Foo", resp.headers) + self.assertNotIn("Content-Encoding", resp.headers) + self.assertNotIn("X-Object-Manifest", resp.headers) + self.assertNotIn("Content-Disposition", resp.headers) + self.assertIn("X-Object-Meta-3", resp.headers) + self.assertIn("X-Static-Large-Object", resp.headers) self.assertEqual(resp.headers['Content-Type'], 'application/x-test') # Test for empty metadata From 01477c78c1163822de41484e914a0736e622085b Mon Sep 17 00:00:00 2001 From: zheng yin Date: Thu, 25 Aug 2016 15:37:42 +0800 Subject: [PATCH 151/156] Fix ValueError information in obj/expirer I fix error information in raise ValueError(...) For example: if a>=b: # It should be under below and not 'a must be less than or equal to b' raise ValueError('a must be less than b') Change-Id: I3d12b79470d122b2114f9ee486b15d381f290f95 --- swift/obj/expirer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swift/obj/expirer.py b/swift/obj/expirer.py index 68d2cdd44b..9315d2f7f9 100644 --- a/swift/obj/expirer.py +++ b/swift/obj/expirer.py @@ -252,7 +252,7 @@ class ObjectExpirer(Daemon): if self.processes and self.process >= self.processes: raise ValueError( - 'process must be less than or equal to processes') + 'process must be less than processes') def delete_object(self, actual_obj, timestamp, container, obj): start_time = time() From c1ef6539b6ba9d2e4354c9cd2eec8a0195cdb19f Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Thu, 25 Aug 2016 11:00:49 -0700 Subject: [PATCH 152/156] add test for expirer processes == process This is a follow up from a change that improved the error message. Related-Change: I3d12b79470d122b2114f9ee486b15d381f290f95 Change-Id: I093801f3516a60b298c13e2aa026c11c68a63792 --- test/unit/obj/test_expirer.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/test/unit/obj/test_expirer.py b/test/unit/obj/test_expirer.py index 355295082b..fbf711af06 100644 --- a/test/unit/obj/test_expirer.py +++ b/test/unit/obj/test_expirer.py @@ -118,6 +118,23 @@ class TestObjectExpirer(TestCase): x = expirer.ObjectExpirer({}) self.assertRaises(ValueError, x.get_process_values, vals) + def test_get_process_values_process_equal_to_processes(self): + vals = { + 'processes': 5, + 'process': 5, + } + # from config + x = expirer.ObjectExpirer(vals) + expected_msg = 'process must be less than processes' + with self.assertRaises(ValueError) as ctx: + x.get_process_values({}) + self.assertEqual(str(ctx.exception), expected_msg) + # from kwargs + x = expirer.ObjectExpirer({}) + with self.assertRaises(ValueError) as ctx: + x.get_process_values(vals) + self.assertEqual(str(ctx.exception), expected_msg) + def test_init_concurrency_too_small(self): conf = { 'concurrency': 0, From d2fc2614575b04fd9cab5ae589880b92eee9b186 Mon Sep 17 00:00:00 2001 From: Matthew Oliver Date: Fri, 19 Aug 2016 16:17:31 +1000 Subject: [PATCH 153/156] Authorise versioned write PUTs before copy Currently a versioned write PUT uses a pre-authed request to move it into the versioned container before checking whether the user is authorised. This can lead to some interesting behaviour whereby a user can select a versioned object path that it does not have access to, request a put on that versioned object, and this request will execute the copy part of the request before it fails due to lack of permissions. This patch changes the behaviour to be the same as versioned DELETE where the request is authorised before anything is moved. Change-Id: Ia8b92251718d10b1eb44a456f28d3d2569a30003 Closes-Bug: #1562175 --- swift/common/middleware/versioned_writes.py | 10 +++ test/functional/tests.py | 5 ++ .../middleware/test_versioned_writes.py | 62 ++++++++++++++++--- 3 files changed, 68 insertions(+), 9 deletions(-) diff --git a/swift/common/middleware/versioned_writes.py b/swift/common/middleware/versioned_writes.py index 3ad8bd2eb1..c8c48b761d 100644 --- a/swift/common/middleware/versioned_writes.py +++ b/swift/common/middleware/versioned_writes.py @@ -407,6 +407,16 @@ class VersionedWritesContext(WSGIContext): def _copy_current(self, req, versions_cont, api_version, account_name, object_name): + # validate the write access to the versioned container before + # making any backend requests + if 'swift.authorize' in req.environ: + container_info = get_container_info( + req.environ, self.app) + req.acl = container_info.get('write_acl') + aresp = req.environ['swift.authorize'](req) + if aresp: + raise aresp + get_resp = self._get_source_object(req, req.path_info) if 'X-Object-Manifest' in get_resp.headers: diff --git a/test/functional/tests.py b/test/functional/tests.py index ff75272e54..092c3800d9 100644 --- a/test/functional/tests.py +++ b/test/functional/tests.py @@ -4123,11 +4123,16 @@ class TestObjectVersioning(Base): cfg={'use_token': self.env.storage_token3}) # user3 cannot write or delete from source container either + number_of_versions = versions_container.info()['object_count'] self.assertRaises(ResponseError, versioned_obj.write, "some random user trying to write data", cfg={'use_token': self.env.storage_token3}) + self.assertEqual(number_of_versions, + versions_container.info()['object_count']) self.assertRaises(ResponseError, versioned_obj.delete, cfg={'use_token': self.env.storage_token3}) + self.assertEqual(number_of_versions, + versions_container.info()['object_count']) # user2 can't read or delete from versions-location self.assertRaises(ResponseError, backup_file.read, diff --git a/test/unit/common/middleware/test_versioned_writes.py b/test/unit/common/middleware/test_versioned_writes.py index 4d7d0552b3..1c3176911e 100644 --- a/test/unit/common/middleware/test_versioned_writes.py +++ b/test/unit/common/middleware/test_versioned_writes.py @@ -402,8 +402,12 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): 'swift.trans_id': 'fake_trans_id'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '200 OK') - self.assertEqual(len(self.authorized), 1) + self.assertEqual(len(self.authorized), 2) + # Versioned writes middleware now calls auth on the incoming request + # before we try the GET and then at the proxy, so there are 2 + # atuhorized for the same request. self.assertRequestEqual(req, self.authorized[0]) + self.assertRequestEqual(req, self.authorized[1]) self.assertEqual(2, self.app.call_count) self.assertEqual(['VW', None], self.app.swift_sources) self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids)) @@ -456,8 +460,12 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): 'CONTENT_LENGTH': '100'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '201 Created') - self.assertEqual(len(self.authorized), 1) + # The middleware now auths the request before the inital GET, the + # same GET that gets the X-Object-Manifest back. So a second auth is + # now done. + self.assertEqual(len(self.authorized), 2) self.assertRequestEqual(req, self.authorized[0]) + self.assertRequestEqual(req, self.authorized[1]) self.assertEqual(2, self.app.call_count) def test_delete_object_no_versioning_with_container_config_true(self): @@ -497,7 +505,9 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): 'swift.trans_id': 'fake_trans_id'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '201 Created') - self.assertEqual(len(self.authorized), 1) + # authorized twice now because versioned_writes now makes a check on + # PUT + self.assertEqual(len(self.authorized), 2) self.assertRequestEqual(req, self.authorized[0]) self.assertEqual(['VW', 'VW', None], self.app.swift_sources) self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids)) @@ -577,7 +587,9 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): 'CONTENT_LENGTH': '100'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '200 OK') - self.assertEqual(len(self.authorized), 1) + # authorized twice now because versioned_writes now makes a check on + # PUT + self.assertEqual(len(self.authorized), 2) self.assertRequestEqual(req, self.authorized[0]) # check that sysmeta header was used @@ -869,7 +881,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): 'CONTENT_LENGTH': '0'}) status, headers, body = self.call_vw(req) self.assertEqual(status, '404 Not Found') - self.assertEqual(len(self.authorized), 1) + self.assertEqual(len(self.authorized), 2) req.environ['REQUEST_METHOD'] = 'PUT' self.assertRequestEqual(req, self.authorized[0]) @@ -903,7 +915,7 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): status, headers, body = self.call_vw(req) self.assertEqual(status, '204 No Content') self.assertEqual('', body) - self.assertEqual(len(self.authorized), 1) + self.assertEqual(len(self.authorized), 2) req.environ['REQUEST_METHOD'] = 'PUT' self.assertRequestEqual(req, self.authorized[0]) @@ -1044,6 +1056,32 @@ class VersionedWritesTestCase(VersionedWritesBaseTestCase): ('GET', prefix_listing_prefix + 'marker=&reverse=on'), ]) + def test_denied_PUT_of_versioned_object(self): + authorize_call = [] + self.app.register( + 'GET', '/v1/a/c/o', swob.HTTPOk, + {'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed') + + def fake_authorize(req): + # we should deny the object PUT + authorize_call.append(req) + return swob.HTTPForbidden() + + cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}}) + req = Request.blank( + '/v1/a/c/o', + environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache, + 'swift.authorize': fake_authorize, + 'CONTENT_LENGTH': '0'}) + # Save off a copy, as the middleware may modify the original + expected_req = Request(req.environ.copy()) + status, headers, body = self.call_vw(req) + self.assertEqual(status, '403 Forbidden') + self.assertEqual(len(authorize_call), 1) + self.assertRequestEqual(expected_req, authorize_call[0]) + + self.assertEqual(self.app.calls, []) + class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase): def test_delete_latest_version_success(self): @@ -1379,11 +1417,17 @@ class VersionedWritesCopyingTestCase(VersionedWritesBaseTestCase): headers={'Destination': 'tgt_cont/tgt_obj'}) status, headers, body = self.call_filter(req) self.assertEqual(status, '201 Created') - self.assertEqual(len(self.authorized), 2) + self.assertEqual(len(self.authorized), 3) self.assertEqual('GET', self.authorized[0].method) self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path) + # At the moment we are calling authorize on the incoming request in + # the middleware before we do the PUT (and the source GET) and again + # on the incoming request when it gets to the proxy. So the 2nd and + # 3rd auths look the same. self.assertEqual('PUT', self.authorized[1].method) self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path) + self.assertEqual('PUT', self.authorized[2].method) + self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[2].path) # note the GET on tgt_cont/tgt_obj is pre-authed self.assertEqual(3, self.app.call_count, self.app.calls) @@ -1407,7 +1451,7 @@ class VersionedWritesCopyingTestCase(VersionedWritesBaseTestCase): headers={'Destination': 'tgt_cont/tgt_obj'}) status, headers, body = self.call_filter(req) self.assertEqual(status, '201 Created') - self.assertEqual(len(self.authorized), 2) + self.assertEqual(len(self.authorized), 3) self.assertEqual('GET', self.authorized[0].method) self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path) self.assertEqual('PUT', self.authorized[1].method) @@ -1435,7 +1479,7 @@ class VersionedWritesCopyingTestCase(VersionedWritesBaseTestCase): 'Destination-Account': 'tgt_a'}) status, headers, body = self.call_filter(req) self.assertEqual(status, '201 Created') - self.assertEqual(len(self.authorized), 2) + self.assertEqual(len(self.authorized), 3) self.assertEqual('GET', self.authorized[0].method) self.assertEqual('/v1/src_a/src_cont/src_obj', self.authorized[0].path) self.assertEqual('PUT', self.authorized[1].method) From d68b1bd6ddf44c5088e9d02dcb2f1b802c71411b Mon Sep 17 00:00:00 2001 From: zhufl Date: Mon, 29 Aug 2016 14:31:27 +0800 Subject: [PATCH 154/156] Remove unnecessary tearDown This is to remove unnecessary tearDown to keep code clean. Change-Id: Ie70e40d6b55f379b0cc9bc372a35705462cade8b --- test/probe/test_object_metadata_replication.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/probe/test_object_metadata_replication.py b/test/probe/test_object_metadata_replication.py index 57ef8e455e..1555a5051c 100644 --- a/test/probe/test_object_metadata_replication.py +++ b/test/probe/test_object_metadata_replication.py @@ -49,9 +49,6 @@ class Test(ReplProbeTest): self.container_name) self.int_client = self.make_internal_client(object_post_as_copy=False) - def tearDown(self): - super(Test, self).tearDown() - def _get_object_info(self, account, container, obj, number): obj_conf = self.configs['object-server'] config_path = obj_conf[number] From f88e7fc0da2ed6a63e0ea3c3459d80772b3068cd Mon Sep 17 00:00:00 2001 From: zheng yin Date: Mon, 29 Aug 2016 20:21:44 +0800 Subject: [PATCH 155/156] Clarify test case in common/ring/test_builder They use a bare assertRaises(ValueError, ring.RingBuilder, *,*,*), but it's not clear which one raises which ValueError(), so I extend them to validate the error strings as well. Change-Id: I63280a9fc47ff678fe143e635046a0b402fd4506 --- test/unit/common/ring/test_builder.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/test/unit/common/ring/test_builder.py b/test/unit/common/ring/test_builder.py index c8d649d43a..5d270baa06 100644 --- a/test/unit/common/ring/test_builder.py +++ b/test/unit/common/ring/test_builder.py @@ -73,16 +73,22 @@ class TestRingBuilder(unittest.TestCase): self.assertEqual(rb.version, 0) def test_overlarge_part_powers(self): - ring.RingBuilder(32, 3, 1) # passes by not crashing - self.assertRaises(ValueError, ring.RingBuilder, 33, 3, 1) + expected_msg = 'part_power must be at most 32 (was 33)' + with self.assertRaises(ValueError) as ctx: + ring.RingBuilder(33, 3, 1) + self.assertEqual(str(ctx.exception), expected_msg) def test_insufficient_replicas(self): - ring.RingBuilder(8, 1.0, 1) # passes by not crashing - self.assertRaises(ValueError, ring.RingBuilder, 8, 0.999, 1) + expected_msg = 'replicas must be at least 1 (was 0.999000)' + with self.assertRaises(ValueError) as ctx: + ring.RingBuilder(8, 0.999, 1) + self.assertEqual(str(ctx.exception), expected_msg) def test_negative_min_part_hours(self): - ring.RingBuilder(8, 3, 0) # passes by not crashing - self.assertRaises(ValueError, ring.RingBuilder, 8, 3, -1) + expected_msg = 'min_part_hours must be non-negative (was -1)' + with self.assertRaises(ValueError) as ctx: + ring.RingBuilder(8, 3, -1) + self.assertEqual(str(ctx.exception), expected_msg) def test_deepcopy(self): rb = ring.RingBuilder(8, 3, 1) From 3b5850836c59c46f2507a7f62aceccf4c37e5d41 Mon Sep 17 00:00:00 2001 From: gecong1973 Date: Tue, 30 Aug 2016 15:08:49 +0800 Subject: [PATCH 156/156] Remove white space between print and () There is a white space between print and () in /tempauth.py, This patch fix it Change-Id: Id3493bdef12223aa3a2bffc200db8710f5949101 --- swift/common/middleware/tempauth.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/swift/common/middleware/tempauth.py b/swift/common/middleware/tempauth.py index 00c342406c..11dad41f88 100644 --- a/swift/common/middleware/tempauth.py +++ b/swift/common/middleware/tempauth.py @@ -156,8 +156,8 @@ class TempAuth(object): format_acl(version=2, acl_dict=acl_data)} header_str = ' '.join(["-H '%s: %s'" % (k, v) for k, v in headers.items()]) - print ('curl -D- -X POST -H "x-auth-token: $token" %s ' - '$storage_url' % header_str) + print('curl -D- -X POST -H "x-auth-token: $token" %s ' + '$storage_url' % header_str) ' :param app: The next WSGI app in the pipeline