Merge remote-tracking branch 'remotes/origin/master' into merge-master
Conflicts: swift/obj/diskfile.py Change-Id: I3b1508c9555bcff9016fd7ba918cebcaa4eb484c
This commit is contained in:
commit
3ba65cb7ec
|
@ -3,7 +3,7 @@
|
|||
# How-To debug functional tests:
|
||||
# SWIFT_TEST_IN_PROCESS=1 tox -e func -- --pdb test.functional.tests.TestFile.testCopy
|
||||
|
||||
SRC_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))")
|
||||
SRC_DIR=$(python -c "import os; print(os.path.dirname(os.path.realpath('$0')))")
|
||||
|
||||
cd ${SRC_DIR} > /dev/null
|
||||
export TESTS_DIR=${SRC_DIR}/test/functional
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
SRC_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))")
|
||||
SRC_DIR=$(python -c "import os; print(os.path.dirname(os.path.realpath('$0')))")
|
||||
|
||||
cd ${SRC_DIR}/test/probe
|
||||
nosetests --exe $@
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
TOP_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))")
|
||||
TOP_DIR=$(python -c "import os; print(os.path.dirname(os.path.realpath('$0')))")
|
||||
|
||||
python -c 'from distutils.version import LooseVersion as Ver; import nose, sys; sys.exit(0 if Ver(nose.__version__) >= Ver("1.2.0") else 1)'
|
||||
if [ $? != 0 ]; then
|
||||
|
|
24
.zuul.yaml
24
.zuul.yaml
|
@ -81,6 +81,21 @@
|
|||
vars:
|
||||
tox_envlist: func
|
||||
|
||||
- job:
|
||||
name: swift-tox-func-py37
|
||||
parent: swift-tox-base
|
||||
nodeset: ubuntu-bionic
|
||||
description: |
|
||||
Run functional tests for swift under cPython version 3.7.
|
||||
|
||||
Uses tox with the ``func-py3`` environment.
|
||||
It sets TMPDIR to an XFS mount point created via
|
||||
tools/test-setup.sh.
|
||||
vars:
|
||||
tox_envlist: func-py3
|
||||
bindep_profile: test py37
|
||||
python_version: 3.7
|
||||
|
||||
- job:
|
||||
name: swift-tox-func-centos-7
|
||||
parent: swift-tox-func
|
||||
|
@ -334,6 +349,7 @@
|
|||
- job:
|
||||
name: swift-build-image
|
||||
parent: opendev-build-docker-image
|
||||
voting: false
|
||||
description: Build SAIO docker images.
|
||||
vars: &swift_image_vars
|
||||
docker_images:
|
||||
|
@ -343,6 +359,7 @@
|
|||
- job:
|
||||
name: swift-upload-image
|
||||
parent: opendev-upload-docker-image
|
||||
voting: false
|
||||
description: Build SAIO docker images and upload to Docker Hub.
|
||||
secrets:
|
||||
name: docker_credentials
|
||||
|
@ -353,6 +370,7 @@
|
|||
- job:
|
||||
name: swift-promote-image
|
||||
parent: opendev-promote-docker-image
|
||||
voting: false
|
||||
description: Promote previously uploaded Docker images.
|
||||
secrets:
|
||||
name: docker_credentials
|
||||
|
@ -390,6 +408,11 @@
|
|||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- swift-tox-func-py37:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
- ^test/probe/.*$
|
||||
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
|
||||
- swift-tox-func-encryption:
|
||||
irrelevant-files:
|
||||
- ^(api-ref|doc|releasenotes)/.*$
|
||||
|
@ -465,6 +488,7 @@
|
|||
- swift-tox-py27
|
||||
- swift-tox-py37
|
||||
- swift-tox-func
|
||||
- swift-tox-func-py37
|
||||
- swift-tox-func-encryption
|
||||
- swift-tox-func-domain-remap-staticweb
|
||||
- swift-tox-func-ec
|
||||
|
|
|
@ -2,7 +2,8 @@
|
|||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
# this is required for the docs build jobs
|
||||
sphinx>=1.6.2 # BSD
|
||||
sphinx>=1.6.2,<2.0.0;python_version=='2.7' # BSD
|
||||
sphinx>=1.6.2;python_version>='3.4' # BSD
|
||||
openstackdocstheme>=1.11.0 # Apache-2.0
|
||||
reno>=1.8.0 # Apache-2.0
|
||||
os-api-ref>=1.0.0 # Apache-2.0
|
||||
|
|
|
@ -42,7 +42,6 @@ ceph_s3:
|
|||
s3tests.functional.test_s3.test_object_acl_xml_writeacp: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_canned_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_not_owned_object_bucket: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_replacing_metadata: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_giveaway: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_header_acl_grants: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get: {status: KNOWN}
|
||||
|
|
|
@ -23,7 +23,6 @@ ceph_s3:
|
|||
s3tests.functional.test_s3.test_logging_toggle: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_multipart_resend_first_finishes_last: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_canned_acl: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_copy_replacing_metadata: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_header_acl_grants: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get: {status: KNOWN}
|
||||
s3tests.functional.test_s3.test_object_raw_get_bucket_acl: {status: KNOWN}
|
||||
|
|
|
@ -16,8 +16,8 @@ start =
|
|||
)
|
||||
),
|
||||
element MaxKeys { xsd:int },
|
||||
element EncodingType { xsd:string }?,
|
||||
element Delimiter { xsd:string }?,
|
||||
element EncodingType { xsd:string }?,
|
||||
element IsTruncated { xsd:boolean },
|
||||
element Contents {
|
||||
element Key { xsd:string },
|
||||
|
|
|
@ -53,7 +53,6 @@ Overview and Concepts
|
|||
overview_replication
|
||||
ratelimit
|
||||
overview_large_objects
|
||||
overview_object_versioning
|
||||
overview_global_cluster
|
||||
overview_container_sync
|
||||
overview_expiring_objects
|
||||
|
|
|
@ -47,4 +47,6 @@ Install and configure components
|
|||
|
||||
.. code-block:: console
|
||||
|
||||
# curl -o /etc/swift/proxy-server.conf https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/proxy-server.conf-sample
|
||||
# curl -o /etc/swift/proxy-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/proxy-server.conf-sample
|
||||
|
||||
4. .. include:: controller-include.txt
|
||||
|
|
|
@ -45,6 +45,6 @@ Install and configure components
|
|||
|
||||
.. code-block:: console
|
||||
|
||||
# curl -o /etc/swift/proxy-server.conf https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/proxy-server.conf-sample
|
||||
# curl -o /etc/swift/proxy-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/proxy-server.conf-sample
|
||||
|
||||
3. .. include:: controller-include.txt
|
||||
|
|
|
@ -47,6 +47,6 @@ Install and configure components
|
|||
|
||||
.. code-block:: console
|
||||
|
||||
# curl -o /etc/swift/proxy-server.conf https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/proxy-server.conf-sample
|
||||
# curl -o /etc/swift/proxy-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/proxy-server.conf-sample
|
||||
|
||||
4. .. include:: controller-include.txt
|
||||
|
|
|
@ -19,7 +19,7 @@ This section applies to Red Hat Enterprise Linux 7 and CentOS 7.
|
|||
.. code-block:: console
|
||||
|
||||
# curl -o /etc/swift/swift.conf \
|
||||
https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/swift.conf-sample
|
||||
https://opendev.org/openstack/swift/raw/branch/master/etc/swift.conf-sample
|
||||
|
||||
#. Edit the ``/etc/swift/swift.conf`` file and complete the following
|
||||
actions:
|
||||
|
|
|
@ -19,7 +19,7 @@ This section applies to Ubuntu 14.04 (LTS) and Debian.
|
|||
.. code-block:: console
|
||||
|
||||
# curl -o /etc/swift/swift.conf \
|
||||
https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/swift.conf-sample
|
||||
https://opendev.org/openstack/swift/raw/branch/master/etc/swift.conf-sample
|
||||
|
||||
#. Edit the ``/etc/swift/swift.conf`` file and complete the following
|
||||
actions:
|
||||
|
|
|
@ -133,9 +133,9 @@ Install and configure components
|
|||
|
||||
.. code-block:: console
|
||||
|
||||
# curl -o /etc/swift/account-server.conf https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/account-server.conf-sample
|
||||
# curl -o /etc/swift/container-server.conf https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/container-server.conf-sample
|
||||
# curl -o /etc/swift/object-server.conf https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/object-server.conf-sample
|
||||
# curl -o /etc/swift/account-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/account-server.conf-sample
|
||||
# curl -o /etc/swift/container-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/container-server.conf-sample
|
||||
# curl -o /etc/swift/object-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/object-server.conf-sample
|
||||
|
||||
3. .. include:: storage-include1.txt
|
||||
4. .. include:: storage-include2.txt
|
||||
|
|
|
@ -137,9 +137,9 @@ Install and configure components
|
|||
|
||||
.. code-block:: console
|
||||
|
||||
# curl -o /etc/swift/account-server.conf https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/account-server.conf-sample
|
||||
# curl -o /etc/swift/container-server.conf https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/container-server.conf-sample
|
||||
# curl -o /etc/swift/object-server.conf https://opendev.org/openstack/swift/raw/branch/stable/queens/etc/object-server.conf-sample
|
||||
# curl -o /etc/swift/account-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/account-server.conf-sample
|
||||
# curl -o /etc/swift/container-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/container-server.conf-sample
|
||||
# curl -o /etc/swift/object-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/object-server.conf-sample
|
||||
|
||||
3. .. include:: storage-include1.txt
|
||||
4. .. include:: storage-include2.txt
|
||||
|
|
|
@ -37,7 +37,7 @@ AWS S3 Api
|
|||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.etree
|
||||
:members:
|
||||
:members: _Element
|
||||
:show-inheritance:
|
||||
|
||||
.. automodule:: swift.common.middleware.s3api.utils
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
Object Versioning
|
||||
=================
|
||||
|
||||
.. automodule:: swift.common.middleware.versioned_writes
|
||||
:show-inheritance:
|
|
@ -359,7 +359,8 @@ can't get perfect balance due to too many partitions recently moved).
|
|||
---------------
|
||||
Composite Rings
|
||||
---------------
|
||||
.. automodule:: swift.common.ring.composite_builder
|
||||
|
||||
See :ref:`composite_builder`.
|
||||
|
||||
**********************************
|
||||
swift-ring-composer (Experimental)
|
||||
|
|
|
@ -24,6 +24,8 @@ Ring Builder
|
|||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. _composite_builder:
|
||||
|
||||
Composite Ring Builder
|
||||
======================
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@ asn1crypto==0.24.0
|
|||
Babel==2.5.3
|
||||
bandit==1.1.0
|
||||
boto==2.32.1
|
||||
boto3==1.9
|
||||
botocore==1.12
|
||||
castellan==0.13.0
|
||||
certifi==2018.1.18
|
||||
cffi==1.11.5
|
||||
|
|
|
@ -81,6 +81,7 @@ keystone =
|
|||
[entry_points]
|
||||
console_scripts =
|
||||
swift-manage-shard-ranges = swift.cli.manage_shard_ranges:main
|
||||
swift-container-deleter = swift.cli.container_deleter:main
|
||||
|
||||
paste.app_factory =
|
||||
proxy = swift.proxy.server:app_factory
|
||||
|
|
|
@ -42,7 +42,7 @@ from swift.common.swob import HTTPAccepted, HTTPBadRequest, \
|
|||
HTTPCreated, HTTPForbidden, HTTPInternalServerError, \
|
||||
HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, \
|
||||
HTTPPreconditionFailed, HTTPConflict, Request, \
|
||||
HTTPInsufficientStorage, HTTPException
|
||||
HTTPInsufficientStorage, HTTPException, wsgi_to_str
|
||||
from swift.common.request_helpers import is_sys_or_user_meta
|
||||
|
||||
|
||||
|
@ -299,7 +299,7 @@ class AccountController(BaseStorageServer):
|
|||
start_time = time.time()
|
||||
req = Request(env)
|
||||
self.logger.txn_id = req.headers.get('x-trans-id', None)
|
||||
if not check_utf8(req.path_info):
|
||||
if not check_utf8(wsgi_to_str(req.path_info)):
|
||||
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
|
||||
else:
|
||||
try:
|
||||
|
|
|
@ -0,0 +1,174 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
# use this file except in compliance with the License. You may obtain a copy
|
||||
# of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''
|
||||
Enqueue background jobs to delete portions of a container's namespace.
|
||||
|
||||
Accepts prefix, marker, and end-marker args that work as in container
|
||||
listings. Objects found in the listing will be marked to be deleted
|
||||
by the object-expirer; until the object is actually deleted, it will
|
||||
continue to appear in listings.
|
||||
|
||||
If there are many objects, this operation may take some time. Stats will
|
||||
periodically be emitted so you know the process hasn't hung. These will
|
||||
also include the last object marked for deletion; if there is a failure,
|
||||
pass this as the ``--marker`` when retrying to minimize duplicative work.
|
||||
'''
|
||||
|
||||
import argparse
|
||||
import io
|
||||
import itertools
|
||||
import json
|
||||
import six
|
||||
import time
|
||||
|
||||
from swift.common.internal_client import InternalClient
|
||||
from swift.common.utils import Timestamp, MD5_OF_EMPTY_STRING
|
||||
from swift.obj.expirer import build_task_obj, ASYNC_DELETE_TYPE
|
||||
|
||||
OBJECTS_PER_UPDATE = 10000
|
||||
|
||||
|
||||
def make_delete_jobs(account, container, objects, timestamp):
|
||||
'''
|
||||
Create a list of async-delete jobs
|
||||
|
||||
:param account: (native or unicode string) account to delete from
|
||||
:param container: (native or unicode string) container to delete from
|
||||
:param objects: (list of native or unicode strings) objects to delete
|
||||
:param timestamp: (Timestamp) time at which objects should be marked
|
||||
deleted
|
||||
:returns: list of dicts appropriate for an UPDATE request to an
|
||||
expiring-object queue
|
||||
'''
|
||||
if six.PY2:
|
||||
if isinstance(account, str):
|
||||
account = account.decode('utf8')
|
||||
if isinstance(container, str):
|
||||
container = container.decode('utf8')
|
||||
return [
|
||||
{
|
||||
'name': build_task_obj(
|
||||
timestamp, account, container,
|
||||
obj.decode('utf8') if six.PY2 and isinstance(obj, str)
|
||||
else obj),
|
||||
'deleted': 0,
|
||||
'created_at': timestamp.internal,
|
||||
'etag': MD5_OF_EMPTY_STRING,
|
||||
'size': 0,
|
||||
'storage_policy_index': 0,
|
||||
'content_type': ASYNC_DELETE_TYPE,
|
||||
} for obj in objects]
|
||||
|
||||
|
||||
def mark_for_deletion(swift, account, container, marker, end_marker,
|
||||
prefix, timestamp=None, yield_time=10):
|
||||
'''
|
||||
Enqueue jobs to async-delete some portion of a container's namespace
|
||||
|
||||
:param swift: InternalClient to use
|
||||
:param account: account to delete from
|
||||
:param container: container to delete from
|
||||
:param marker: only delete objects after this name
|
||||
:param end_marker: only delete objects before this name. Use ``None`` or
|
||||
empty string to delete to the end of the namespace.
|
||||
:param prefix: only delete objects starting with this prefix
|
||||
:param timestamp: delete all objects as of this time. If ``None``, the
|
||||
current time will be used.
|
||||
:param yield_time: approximate period with which intermediate results
|
||||
should be returned. If ``None``, disable intermediate
|
||||
results.
|
||||
:returns: If ``yield_time`` is ``None``, the number of objects marked for
|
||||
deletion. Otherwise, a generator that will yield out tuples of
|
||||
``(number of marked objects, last object name)`` approximately
|
||||
every ``yield_time`` seconds. The final tuple will have ``None``
|
||||
as the second element. This form allows you to retry when an
|
||||
error occurs partway through while minimizing duplicate work.
|
||||
'''
|
||||
if timestamp is None:
|
||||
timestamp = Timestamp.now()
|
||||
|
||||
def enqueue_deletes():
|
||||
deleted = 0
|
||||
obj_iter = swift.iter_objects(
|
||||
account, container,
|
||||
marker=marker, end_marker=end_marker, prefix=prefix)
|
||||
time_marker = time.time()
|
||||
while True:
|
||||
to_delete = [obj['name'] for obj in itertools.islice(
|
||||
obj_iter, OBJECTS_PER_UPDATE)]
|
||||
if not to_delete:
|
||||
break
|
||||
delete_jobs = make_delete_jobs(
|
||||
account, container, to_delete, timestamp)
|
||||
swift.make_request(
|
||||
'UPDATE',
|
||||
swift.make_path('.expiring_objects', str(int(timestamp))),
|
||||
headers={'X-Backend-Allow-Private-Methods': 'True',
|
||||
'X-Backend-Storage-Policy-Index': '0',
|
||||
'X-Timestamp': timestamp.internal},
|
||||
acceptable_statuses=(2,),
|
||||
body_file=io.BytesIO(json.dumps(delete_jobs).encode('ascii')))
|
||||
deleted += len(delete_jobs)
|
||||
if yield_time is not None and \
|
||||
time.time() - time_marker > yield_time:
|
||||
yield deleted, to_delete[-1]
|
||||
time_marker = time.time()
|
||||
yield deleted, None
|
||||
|
||||
if yield_time is None:
|
||||
for deleted, marker in enqueue_deletes():
|
||||
if marker is None:
|
||||
return deleted
|
||||
else:
|
||||
return enqueue_deletes()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.RawTextHelpFormatter)
|
||||
parser.add_argument('--config', default='/etc/swift/internal-client.conf',
|
||||
help=('internal-client config file '
|
||||
'(default: /etc/swift/internal-client.conf'))
|
||||
parser.add_argument('--request-tries', type=int, default=3,
|
||||
help='(default: 3)')
|
||||
parser.add_argument('account', help='account from which to delete')
|
||||
parser.add_argument('container', help='container from which to delete')
|
||||
parser.add_argument(
|
||||
'--prefix', default='',
|
||||
help='only delete objects with this prefix (default: none)')
|
||||
parser.add_argument(
|
||||
'--marker', default='',
|
||||
help='only delete objects after this marker (default: none)')
|
||||
parser.add_argument(
|
||||
'--end-marker', default='',
|
||||
help='only delete objects before this end-marker (default: none)')
|
||||
parser.add_argument(
|
||||
'--timestamp', type=Timestamp, default=Timestamp.now(),
|
||||
help='delete all objects as of this time (default: now)')
|
||||
args = parser.parse_args()
|
||||
|
||||
swift = InternalClient(
|
||||
args.config, 'Swift Container Deleter', args.request_tries)
|
||||
for deleted, marker in mark_for_deletion(
|
||||
swift, args.account, args.container,
|
||||
args.marker, args.end_marker, args.prefix, args.timestamp):
|
||||
if marker is None:
|
||||
print('Finished. Marked %d objects for deletion.' % deleted)
|
||||
else:
|
||||
print('Marked %d objects for deletion, through %r' % (
|
||||
deleted, marker))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -41,7 +41,11 @@ if six.PY2:
|
|||
httplib = eventlet.import_patched('httplib')
|
||||
else:
|
||||
httplib = eventlet.import_patched('http.client')
|
||||
httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT
|
||||
|
||||
# Apparently http.server uses this to decide when/whether to send a 431.
|
||||
# Give it some slack, so the app is more likely to get the chance to reject
|
||||
# with a 400 instead.
|
||||
httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT * 1.6
|
||||
|
||||
|
||||
class BufferedHTTPResponse(HTTPResponse):
|
||||
|
|
|
@ -191,7 +191,7 @@ payload sent to the proxy (the list of objects/containers to be deleted).
|
|||
"""
|
||||
|
||||
import json
|
||||
from six.moves.urllib.parse import quote, unquote
|
||||
import six
|
||||
import tarfile
|
||||
from xml.sax import saxutils
|
||||
from time import time
|
||||
|
@ -200,7 +200,8 @@ import zlib
|
|||
from swift.common.swob import Request, HTTPBadGateway, \
|
||||
HTTPCreated, HTTPBadRequest, HTTPNotFound, HTTPUnauthorized, HTTPOk, \
|
||||
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPNotAcceptable, \
|
||||
HTTPLengthRequired, HTTPException, HTTPServerError, wsgify
|
||||
HTTPLengthRequired, HTTPException, HTTPServerError, wsgify, \
|
||||
bytes_to_wsgi, str_to_wsgi, wsgi_unquote, wsgi_quote, wsgi_to_str
|
||||
from swift.common.utils import get_logger, register_swift_info, \
|
||||
StreamingPile
|
||||
from swift.common import constraints
|
||||
|
@ -234,7 +235,7 @@ def get_response_body(data_format, data_dict, error_list, root_tag):
|
|||
"""
|
||||
if data_format == 'application/json':
|
||||
data_dict['Errors'] = error_list
|
||||
return json.dumps(data_dict)
|
||||
return json.dumps(data_dict).encode('ascii')
|
||||
if data_format and data_format.endswith('/xml'):
|
||||
output = ['<', root_tag, '>\n']
|
||||
for key in sorted(data_dict):
|
||||
|
@ -251,7 +252,9 @@ def get_response_body(data_format, data_dict, error_list, root_tag):
|
|||
saxutils.escape(status), '</status></object>\n',
|
||||
])
|
||||
output.extend(['</errors>\n</', root_tag, '>\n'])
|
||||
return ''.join(output)
|
||||
if six.PY2:
|
||||
return ''.join(output)
|
||||
return ''.join(output).encode('utf-8')
|
||||
|
||||
output = []
|
||||
for key in sorted(data_dict):
|
||||
|
@ -260,7 +263,9 @@ def get_response_body(data_format, data_dict, error_list, root_tag):
|
|||
output.extend(
|
||||
'%s, %s\n' % (name, status)
|
||||
for name, status in error_list)
|
||||
return ''.join(output)
|
||||
if six.PY2:
|
||||
return ''.join(output)
|
||||
return ''.join(output).encode('utf-8')
|
||||
|
||||
|
||||
def pax_key_to_swift_header(pax_key):
|
||||
|
@ -269,10 +274,14 @@ def pax_key_to_swift_header(pax_key):
|
|||
return "Content-Type"
|
||||
elif pax_key.startswith(u"SCHILY.xattr.user.meta."):
|
||||
useful_part = pax_key[len(u"SCHILY.xattr.user.meta."):]
|
||||
return "X-Object-Meta-" + useful_part.encode("utf-8")
|
||||
if six.PY2:
|
||||
return "X-Object-Meta-" + useful_part.encode("utf-8")
|
||||
return str_to_wsgi("X-Object-Meta-" + useful_part)
|
||||
elif pax_key.startswith(u"LIBARCHIVE.xattr.user.meta."):
|
||||
useful_part = pax_key[len(u"LIBARCHIVE.xattr.user.meta."):]
|
||||
return "X-Object-Meta-" + useful_part.encode("utf-8")
|
||||
if six.PY2:
|
||||
return "X-Object-Meta-" + useful_part.encode("utf-8")
|
||||
return str_to_wsgi("X-Object-Meta-" + useful_part)
|
||||
else:
|
||||
# You can get things like atime/mtime/ctime or filesystem ACLs in
|
||||
# pax headers; those aren't really user metadata. The same goes for
|
||||
|
@ -308,7 +317,7 @@ class Bulk(object):
|
|||
:raises CreateContainerError: when unable to create container
|
||||
"""
|
||||
head_cont_req = make_subrequest(
|
||||
req.environ, method='HEAD', path=quote(container_path),
|
||||
req.environ, method='HEAD', path=wsgi_quote(container_path),
|
||||
headers={'X-Auth-Token': req.headers.get('X-Auth-Token')},
|
||||
swift_source='EA')
|
||||
resp = head_cont_req.get_response(self.app)
|
||||
|
@ -316,7 +325,7 @@ class Bulk(object):
|
|||
return False
|
||||
if resp.status_int == HTTP_NOT_FOUND:
|
||||
create_cont_req = make_subrequest(
|
||||
req.environ, method='PUT', path=quote(container_path),
|
||||
req.environ, method='PUT', path=wsgi_quote(container_path),
|
||||
headers={'X-Auth-Token': req.headers.get('X-Auth-Token')},
|
||||
swift_source='EA')
|
||||
resp = create_cont_req.get_response(self.app)
|
||||
|
@ -333,7 +342,7 @@ class Bulk(object):
|
|||
:returns: a list of the contents of req.body when separated by newline.
|
||||
:raises HTTPException: on failures
|
||||
"""
|
||||
line = ''
|
||||
line = b''
|
||||
data_remaining = True
|
||||
objs_to_delete = []
|
||||
if req.content_length is None and \
|
||||
|
@ -341,21 +350,31 @@ class Bulk(object):
|
|||
raise HTTPLengthRequired(request=req)
|
||||
|
||||
while data_remaining:
|
||||
if '\n' in line:
|
||||
obj_to_delete, line = line.split('\n', 1)
|
||||
obj_to_delete = obj_to_delete.strip()
|
||||
objs_to_delete.append(
|
||||
{'name': unquote(obj_to_delete)})
|
||||
if b'\n' in line:
|
||||
obj_to_delete, line = line.split(b'\n', 1)
|
||||
if six.PY2:
|
||||
obj_to_delete = wsgi_unquote(obj_to_delete.strip())
|
||||
else:
|
||||
# yeah, all this chaining is pretty terrible...
|
||||
# but it gets even worse trying to use UTF-8 and
|
||||
# errors='surrogateescape' when dealing with terrible
|
||||
# input like b'\xe2%98\x83'
|
||||
obj_to_delete = wsgi_to_str(wsgi_unquote(
|
||||
bytes_to_wsgi(obj_to_delete.strip())))
|
||||
objs_to_delete.append({'name': obj_to_delete})
|
||||
else:
|
||||
data = req.body_file.read(self.max_path_length)
|
||||
if data:
|
||||
line += data
|
||||
else:
|
||||
data_remaining = False
|
||||
obj_to_delete = line.strip()
|
||||
if six.PY2:
|
||||
obj_to_delete = wsgi_unquote(line.strip())
|
||||
else:
|
||||
obj_to_delete = wsgi_to_str(wsgi_unquote(
|
||||
bytes_to_wsgi(line.strip())))
|
||||
if obj_to_delete:
|
||||
objs_to_delete.append(
|
||||
{'name': unquote(obj_to_delete)})
|
||||
objs_to_delete.append({'name': obj_to_delete})
|
||||
if len(objs_to_delete) > self.max_deletes_per_request:
|
||||
raise HTTPRequestEntityTooLarge(
|
||||
'Maximum Bulk Deletes: %d per request' %
|
||||
|
@ -376,15 +395,15 @@ class Bulk(object):
|
|||
|
||||
:params req: a swob Request
|
||||
:params objs_to_delete: a list of dictionaries that specifies the
|
||||
objects to be deleted. If None, uses self.get_objs_to_delete to
|
||||
query request.
|
||||
(native string) objects to be deleted. If None, uses
|
||||
self.get_objs_to_delete to query request.
|
||||
"""
|
||||
last_yield = time()
|
||||
if out_content_type and out_content_type.endswith('/xml'):
|
||||
to_yield = '<?xml version="1.0" encoding="UTF-8"?>\n'
|
||||
to_yield = b'<?xml version="1.0" encoding="UTF-8"?>\n'
|
||||
else:
|
||||
to_yield = ' '
|
||||
separator = ''
|
||||
to_yield = b' '
|
||||
separator = b''
|
||||
failed_files = []
|
||||
resp_dict = {'Response Status': HTTPOk().status,
|
||||
'Response Body': '',
|
||||
|
@ -399,6 +418,8 @@ class Bulk(object):
|
|||
vrs, account, _junk = req.split_path(2, 3, True)
|
||||
except ValueError:
|
||||
raise HTTPNotFound(request=req)
|
||||
vrs = wsgi_to_str(vrs)
|
||||
account = wsgi_to_str(account)
|
||||
|
||||
incoming_format = req.headers.get('Content-Type')
|
||||
if incoming_format and \
|
||||
|
@ -422,13 +443,13 @@ class Bulk(object):
|
|||
resp_dict['Number Not Found'] += 1
|
||||
else:
|
||||
failed_files.append([
|
||||
quote(obj_name),
|
||||
wsgi_quote(str_to_wsgi(obj_name)),
|
||||
obj_to_delete['error']['message']])
|
||||
continue
|
||||
delete_path = '/'.join(['', vrs, account,
|
||||
obj_name.lstrip('/')])
|
||||
if not constraints.check_utf8(delete_path):
|
||||
failed_files.append([quote(obj_name),
|
||||
failed_files.append([wsgi_quote(str_to_wsgi(obj_name)),
|
||||
HTTPPreconditionFailed().status])
|
||||
continue
|
||||
yield (obj_name, delete_path)
|
||||
|
@ -443,7 +464,8 @@ class Bulk(object):
|
|||
|
||||
def do_delete(obj_name, delete_path):
|
||||
delete_obj_req = make_subrequest(
|
||||
req.environ, method='DELETE', path=quote(delete_path),
|
||||
req.environ, method='DELETE',
|
||||
path=wsgi_quote(str_to_wsgi(delete_path)),
|
||||
headers={'X-Auth-Token': req.headers.get('X-Auth-Token')},
|
||||
body='', agent='%(orig)s ' + user_agent,
|
||||
swift_source=swift_source)
|
||||
|
@ -456,7 +478,7 @@ class Bulk(object):
|
|||
if last_yield + self.yield_frequency < time():
|
||||
last_yield = time()
|
||||
yield to_yield
|
||||
to_yield, separator = ' ', '\r\n\r\n'
|
||||
to_yield, separator = b' ', b'\r\n\r\n'
|
||||
self._process_delete(resp, pile, obj_name,
|
||||
resp_dict, failed_files,
|
||||
failed_file_response, retry)
|
||||
|
@ -466,7 +488,7 @@ class Bulk(object):
|
|||
if last_yield + self.yield_frequency < time():
|
||||
last_yield = time()
|
||||
yield to_yield
|
||||
to_yield, separator = ' ', '\r\n\r\n'
|
||||
to_yield, separator = b' ', b'\r\n\r\n'
|
||||
# Don't pass in the pile, as we shouldn't retry
|
||||
self._process_delete(
|
||||
resp, None, obj_name, resp_dict,
|
||||
|
@ -484,7 +506,7 @@ class Bulk(object):
|
|||
|
||||
except HTTPException as err:
|
||||
resp_dict['Response Status'] = err.status
|
||||
resp_dict['Response Body'] = err.body
|
||||
resp_dict['Response Body'] = err.body.decode('utf-8')
|
||||
except Exception:
|
||||
self.logger.exception('Error in bulk delete.')
|
||||
resp_dict['Response Status'] = HTTPServerError().status
|
||||
|
@ -511,10 +533,10 @@ class Bulk(object):
|
|||
failed_files = []
|
||||
last_yield = time()
|
||||
if out_content_type and out_content_type.endswith('/xml'):
|
||||
to_yield = '<?xml version="1.0" encoding="UTF-8"?>\n'
|
||||
to_yield = b'<?xml version="1.0" encoding="UTF-8"?>\n'
|
||||
else:
|
||||
to_yield = ' '
|
||||
separator = ''
|
||||
to_yield = b' '
|
||||
separator = b''
|
||||
containers_accessed = set()
|
||||
req.environ['eventlet.minimum_write_chunk_size'] = 0
|
||||
try:
|
||||
|
@ -539,13 +561,16 @@ class Bulk(object):
|
|||
if last_yield + self.yield_frequency < time():
|
||||
last_yield = time()
|
||||
yield to_yield
|
||||
to_yield, separator = ' ', '\r\n\r\n'
|
||||
tar_info = next(tar)
|
||||
to_yield, separator = b' ', b'\r\n\r\n'
|
||||
tar_info = tar.next()
|
||||
if tar_info is None or \
|
||||
len(failed_files) >= self.max_failed_extractions:
|
||||
break
|
||||
if tar_info.isfile():
|
||||
obj_path = tar_info.name
|
||||
if not six.PY2:
|
||||
obj_path = obj_path.encode('utf-8', 'surrogateescape')
|
||||
obj_path = bytes_to_wsgi(obj_path)
|
||||
if obj_path.startswith('./'):
|
||||
obj_path = obj_path[2:]
|
||||
obj_path = obj_path.lstrip('/')
|
||||
|
@ -557,14 +582,14 @@ class Bulk(object):
|
|||
destination = '/'.join(
|
||||
['', vrs, account, obj_path])
|
||||
container = obj_path.split('/', 1)[0]
|
||||
if not constraints.check_utf8(destination):
|
||||
if not constraints.check_utf8(wsgi_to_str(destination)):
|
||||
failed_files.append(
|
||||
[quote(obj_path[:self.max_path_length]),
|
||||
[wsgi_quote(obj_path[:self.max_path_length]),
|
||||
HTTPPreconditionFailed().status])
|
||||
continue
|
||||
if tar_info.size > constraints.MAX_FILE_SIZE:
|
||||
failed_files.append([
|
||||
quote(obj_path[:self.max_path_length]),
|
||||
wsgi_quote(obj_path[:self.max_path_length]),
|
||||
HTTPRequestEntityTooLarge().status])
|
||||
continue
|
||||
container_failure = None
|
||||
|
@ -581,13 +606,13 @@ class Bulk(object):
|
|||
# the object PUT to this container still may
|
||||
# succeed if acls are set
|
||||
container_failure = [
|
||||
quote(cont_path[:self.max_path_length]),
|
||||
wsgi_quote(cont_path[:self.max_path_length]),
|
||||
err.status]
|
||||
if err.status_int == HTTP_UNAUTHORIZED:
|
||||
raise HTTPUnauthorized(request=req)
|
||||
except ValueError:
|
||||
failed_files.append([
|
||||
quote(obj_path[:self.max_path_length]),
|
||||
wsgi_quote(obj_path[:self.max_path_length]),
|
||||
HTTPBadRequest().status])
|
||||
continue
|
||||
|
||||
|
@ -598,7 +623,8 @@ class Bulk(object):
|
|||
}
|
||||
|
||||
create_obj_req = make_subrequest(
|
||||
req.environ, method='PUT', path=quote(destination),
|
||||
req.environ, method='PUT',
|
||||
path=wsgi_quote(destination),
|
||||
headers=create_headers,
|
||||
agent='%(orig)s BulkExpand', swift_source='EA')
|
||||
create_obj_req.environ['wsgi.input'] = tar_file
|
||||
|
@ -621,13 +647,13 @@ class Bulk(object):
|
|||
failed_files.append(container_failure)
|
||||
if resp.status_int == HTTP_UNAUTHORIZED:
|
||||
failed_files.append([
|
||||
quote(obj_path[:self.max_path_length]),
|
||||
wsgi_quote(obj_path[:self.max_path_length]),
|
||||
HTTPUnauthorized().status])
|
||||
raise HTTPUnauthorized(request=req)
|
||||
if resp.status_int // 100 == 5:
|
||||
failed_response_type = HTTPBadGateway
|
||||
failed_files.append([
|
||||
quote(obj_path[:self.max_path_length]),
|
||||
wsgi_quote(obj_path[:self.max_path_length]),
|
||||
resp.status])
|
||||
|
||||
if failed_files:
|
||||
|
@ -638,7 +664,7 @@ class Bulk(object):
|
|||
|
||||
except HTTPException as err:
|
||||
resp_dict['Response Status'] = err.status
|
||||
resp_dict['Response Body'] = err.body
|
||||
resp_dict['Response Body'] = err.body.decode('utf-8')
|
||||
except (tarfile.TarError, zlib.error) as tar_error:
|
||||
resp_dict['Response Status'] = HTTPBadRequest().status
|
||||
resp_dict['Response Body'] = 'Invalid Tar File: %s' % tar_error
|
||||
|
@ -656,7 +682,7 @@ class Bulk(object):
|
|||
elif resp.status_int == HTTP_NOT_FOUND:
|
||||
resp_dict['Number Not Found'] += 1
|
||||
elif resp.status_int == HTTP_UNAUTHORIZED:
|
||||
failed_files.append([quote(obj_name),
|
||||
failed_files.append([wsgi_quote(str_to_wsgi(obj_name)),
|
||||
HTTPUnauthorized().status])
|
||||
elif resp.status_int == HTTP_CONFLICT and pile and \
|
||||
self.retry_count > 0 and self.retry_count > retry:
|
||||
|
@ -671,7 +697,8 @@ class Bulk(object):
|
|||
else:
|
||||
if resp.status_int // 100 == 5:
|
||||
failed_file_response['type'] = HTTPBadGateway
|
||||
failed_files.append([quote(obj_name), resp.status])
|
||||
failed_files.append([wsgi_quote(str_to_wsgi(obj_name)),
|
||||
resp.status])
|
||||
|
||||
@wsgify
|
||||
def __call__(self, req):
|
||||
|
|
|
@ -179,16 +179,16 @@ class BucketController(Controller):
|
|||
else:
|
||||
name = objects[-1]['subdir']
|
||||
if encoding_type == 'url':
|
||||
name = quote(name)
|
||||
name = quote(name.encode('utf-8'))
|
||||
SubElement(elem, 'NextMarker').text = name
|
||||
elif listing_type == 'version-2':
|
||||
if is_truncated:
|
||||
if 'name' in objects[-1]:
|
||||
SubElement(elem, 'NextContinuationToken').text = \
|
||||
b64encode(objects[-1]['name'].encode('utf8'))
|
||||
b64encode(objects[-1]['name'].encode('utf-8'))
|
||||
if 'subdir' in objects[-1]:
|
||||
SubElement(elem, 'NextContinuationToken').text = \
|
||||
b64encode(objects[-1]['subdir'].encode('utf8'))
|
||||
b64encode(objects[-1]['subdir'].encode('utf-8'))
|
||||
if 'continuation-token' in req.params:
|
||||
SubElement(elem, 'ContinuationToken').text = \
|
||||
req.params['continuation-token']
|
||||
|
|
|
@ -67,7 +67,7 @@ import time
|
|||
|
||||
import six
|
||||
|
||||
from swift.common.swob import Range
|
||||
from swift.common.swob import Range, bytes_to_wsgi
|
||||
from swift.common.utils import json, public, reiterate
|
||||
from swift.common.db import utf8encode
|
||||
|
||||
|
@ -529,7 +529,8 @@ class UploadController(Controller):
|
|||
objects = json.loads(resp.body)
|
||||
for o in objects:
|
||||
container = req.container_name + MULTIUPLOAD_SUFFIX
|
||||
req.get_response(self.app, container=container, obj=o['name'])
|
||||
obj = bytes_to_wsgi(o['name'].encode('utf-8'))
|
||||
req.get_response(self.app, container=container, obj=obj)
|
||||
|
||||
return HTTPNoContent()
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from swift.common.swob import bytes_to_wsgi
|
||||
from swift.common.utils import json, public
|
||||
|
||||
from swift.common.middleware.s3api.controllers.base import Controller
|
||||
|
@ -51,8 +52,9 @@ class ServiceController(Controller):
|
|||
buckets = SubElement(elem, 'Buckets')
|
||||
for c in containers:
|
||||
if self.conf.s3_acl and self.conf.check_bucket_owner:
|
||||
container = bytes_to_wsgi(c['name'].encode('utf8'))
|
||||
try:
|
||||
req.get_response(self.app, 'HEAD', c['name'])
|
||||
req.get_response(self.app, 'HEAD', container)
|
||||
except AccessDenied:
|
||||
continue
|
||||
except NoSuchBucket:
|
||||
|
|
|
@ -1111,14 +1111,13 @@ class S3Request(swob.Request):
|
|||
env['HTTP_X_COPY_FROM'] = env['HTTP_X_AMZ_COPY_SOURCE']
|
||||
del env['HTTP_X_AMZ_COPY_SOURCE']
|
||||
env['CONTENT_LENGTH'] = '0'
|
||||
# Content type cannot be modified on COPY
|
||||
env.pop('CONTENT_TYPE', None)
|
||||
if env.pop('HTTP_X_AMZ_METADATA_DIRECTIVE', None) == 'REPLACE':
|
||||
env['HTTP_X_FRESH_METADATA'] = 'True'
|
||||
else:
|
||||
copy_exclude_headers = ('HTTP_CONTENT_DISPOSITION',
|
||||
'HTTP_CONTENT_ENCODING',
|
||||
'HTTP_CONTENT_LANGUAGE',
|
||||
'CONTENT_TYPE',
|
||||
'HTTP_EXPIRES',
|
||||
'HTTP_CACHE_CONTROL',
|
||||
'HTTP_X_ROBOTS_TAG')
|
||||
|
|
|
@ -45,12 +45,12 @@
|
|||
<data type="int"/>
|
||||
</element>
|
||||
<optional>
|
||||
<element name="EncodingType">
|
||||
<element name="Delimiter">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
<optional>
|
||||
<element name="Delimiter">
|
||||
<element name="EncodingType">
|
||||
<data type="string"/>
|
||||
</element>
|
||||
</optional>
|
||||
|
|
|
@ -319,9 +319,11 @@ from datetime import datetime
|
|||
import json
|
||||
import mimetypes
|
||||
import re
|
||||
import six
|
||||
import time
|
||||
from hashlib import md5
|
||||
|
||||
import six
|
||||
|
||||
from swift.common.exceptions import ListingIterError, SegmentError
|
||||
from swift.common.middleware.listing_formats import \
|
||||
MAX_CONTAINER_LISTING_CONTENT_LENGTH
|
||||
|
@ -329,7 +331,7 @@ from swift.common.swob import Request, HTTPBadRequest, HTTPServerError, \
|
|||
HTTPMethodNotAllowed, HTTPRequestEntityTooLarge, HTTPLengthRequired, \
|
||||
HTTPOk, HTTPPreconditionFailed, HTTPException, HTTPNotFound, \
|
||||
HTTPUnauthorized, HTTPConflict, HTTPUnprocessableEntity, Response, Range, \
|
||||
RESPONSE_REASONS
|
||||
RESPONSE_REASONS, str_to_wsgi, wsgi_to_str, wsgi_quote
|
||||
from swift.common.utils import get_logger, config_true_value, \
|
||||
get_valid_utf8_str, override_bytes_from_content_type, split_path, \
|
||||
register_swift_info, RateLimitedIterator, quote, close_if_possible, \
|
||||
|
@ -498,7 +500,10 @@ def parse_and_validate_input(req_body, req_path):
|
|||
% (seg_index,))
|
||||
continue
|
||||
# re-encode to normalize padding
|
||||
seg_dict['data'] = base64.b64encode(data)
|
||||
if six.PY2:
|
||||
seg_dict['data'] = base64.b64encode(data)
|
||||
else:
|
||||
seg_dict['data'] = base64.b64encode(data).decode('ascii')
|
||||
|
||||
if parsed_data and all('data' in d for d in parsed_data):
|
||||
errors.append(b"Inline data segments require at least one "
|
||||
|
@ -524,9 +529,18 @@ class SloGetContext(WSGIContext):
|
|||
"""
|
||||
Fetch the submanifest, parse it, and return it.
|
||||
Raise exception on failures.
|
||||
|
||||
:param req: the upstream request
|
||||
:param version: whatever
|
||||
:param acc: native
|
||||
:param con: native
|
||||
:param obj: native
|
||||
"""
|
||||
sub_req = make_subrequest(
|
||||
req.environ, path=quote('/'.join(['', version, acc, con, obj])),
|
||||
req.environ,
|
||||
path=wsgi_quote('/'.join([
|
||||
'', str_to_wsgi(version),
|
||||
str_to_wsgi(acc), str_to_wsgi(con), str_to_wsgi(obj)])),
|
||||
method='GET',
|
||||
headers={'x-auth-token': req.headers.get('x-auth-token')},
|
||||
agent='%(orig)s SLO MultipartGET', swift_source='SLO')
|
||||
|
@ -541,7 +555,7 @@ class SloGetContext(WSGIContext):
|
|||
|
||||
try:
|
||||
with closing_if_possible(sub_resp.app_iter):
|
||||
return json.loads(''.join(sub_resp.app_iter))
|
||||
return json.loads(b''.join(sub_resp.app_iter))
|
||||
except ValueError as err:
|
||||
raise ListingIterError(
|
||||
'while fetching %s, JSON-decoding of submanifest %s '
|
||||
|
@ -656,7 +670,10 @@ class SloGetContext(WSGIContext):
|
|||
"While processing manifest %r, "
|
||||
"max recursion depth was exceeded" % req.path)
|
||||
|
||||
sub_path = get_valid_utf8_str(seg_dict['name'])
|
||||
if six.PY2:
|
||||
sub_path = get_valid_utf8_str(seg_dict['name'])
|
||||
else:
|
||||
sub_path = seg_dict['name']
|
||||
sub_cont, sub_obj = split_path(sub_path, 2, 2, True)
|
||||
if last_sub_path != sub_path:
|
||||
sub_segments = cached_fetch_sub_slo_segments(
|
||||
|
@ -675,7 +692,7 @@ class SloGetContext(WSGIContext):
|
|||
recursion_depth=recursion_depth + 1):
|
||||
yield sub_seg_dict
|
||||
else:
|
||||
if isinstance(seg_dict['name'], six.text_type):
|
||||
if six.PY2 and isinstance(seg_dict['name'], six.text_type):
|
||||
seg_dict['name'] = seg_dict['name'].encode("utf-8")
|
||||
yield dict(seg_dict,
|
||||
first_byte=max(0, first_byte) + range_start,
|
||||
|
@ -865,7 +882,7 @@ class SloGetContext(WSGIContext):
|
|||
|
||||
def _get_manifest_read(self, resp_iter):
|
||||
with closing_if_possible(resp_iter):
|
||||
resp_body = ''.join(resp_iter)
|
||||
resp_body = b''.join(resp_iter)
|
||||
try:
|
||||
segments = json.loads(resp_body)
|
||||
except ValueError:
|
||||
|
@ -904,13 +921,12 @@ class SloGetContext(WSGIContext):
|
|||
|
||||
if slo_etag is None:
|
||||
if 'raw_data' in seg_dict:
|
||||
calculated_etag.update(
|
||||
md5(seg_dict['raw_data']).hexdigest())
|
||||
r = md5(seg_dict['raw_data']).hexdigest()
|
||||
elif seg_dict.get('range'):
|
||||
calculated_etag.update(
|
||||
'%s:%s;' % (seg_dict['hash'], seg_dict['range']))
|
||||
r = '%s:%s;' % (seg_dict['hash'], seg_dict['range'])
|
||||
else:
|
||||
calculated_etag.update(seg_dict['hash'])
|
||||
r = seg_dict['hash']
|
||||
calculated_etag.update(r.encode('ascii') if six.PY3 else r)
|
||||
|
||||
if content_length is None:
|
||||
if config_true_value(seg_dict.get('sub_slo')):
|
||||
|
@ -934,7 +950,7 @@ class SloGetContext(WSGIContext):
|
|||
|
||||
def _manifest_head_response(self, req, response_headers):
|
||||
conditional_etag = resolve_etag_is_at_header(req, response_headers)
|
||||
return HTTPOk(request=req, headers=response_headers, body='',
|
||||
return HTTPOk(request=req, headers=response_headers, body=b'',
|
||||
conditional_etag=conditional_etag,
|
||||
conditional_response=True)
|
||||
|
||||
|
@ -950,6 +966,7 @@ class SloGetContext(WSGIContext):
|
|||
byteranges = []
|
||||
|
||||
ver, account, _junk = req.split_path(3, 3, rest_with_last=True)
|
||||
account = wsgi_to_str(account)
|
||||
plain_listing_iter = self._segment_listing_iterator(
|
||||
req, ver, account, segments, byteranges)
|
||||
|
||||
|
@ -1073,18 +1090,19 @@ class StaticLargeObject(object):
|
|||
:raises HttpException: on errors
|
||||
"""
|
||||
vrs, account, container, obj = req.split_path(4, rest_with_last=True)
|
||||
if req.content_length > self.max_manifest_size:
|
||||
raise HTTPRequestEntityTooLarge(
|
||||
"Manifest File > %d bytes" % self.max_manifest_size)
|
||||
if req.headers.get('X-Copy-From'):
|
||||
raise HTTPMethodNotAllowed(
|
||||
'Multipart Manifest PUTs cannot be COPY requests')
|
||||
if req.content_length is None and \
|
||||
req.headers.get('transfer-encoding', '').lower() != 'chunked':
|
||||
raise HTTPLengthRequired(request=req)
|
||||
if req.content_length is None:
|
||||
if req.headers.get('transfer-encoding', '').lower() != 'chunked':
|
||||
raise HTTPLengthRequired(request=req)
|
||||
else:
|
||||
if req.content_length > self.max_manifest_size:
|
||||
raise HTTPRequestEntityTooLarge(
|
||||
"Manifest File > %d bytes" % self.max_manifest_size)
|
||||
parsed_data = parse_and_validate_input(
|
||||
req.body_file.read(self.max_manifest_size),
|
||||
req.path)
|
||||
wsgi_to_str(req.path))
|
||||
problem_segments = []
|
||||
|
||||
object_segments = [seg for seg in parsed_data if 'path' in seg]
|
||||
|
@ -1109,8 +1127,13 @@ class StaticLargeObject(object):
|
|||
path2indices[seg_dict['path']].append(index)
|
||||
|
||||
def do_head(obj_name):
|
||||
obj_path = quote('/'.join([
|
||||
'', vrs, account, get_valid_utf8_str(obj_name).lstrip('/')]))
|
||||
if six.PY2:
|
||||
obj_path = '/'.join(['', vrs, account,
|
||||
get_valid_utf8_str(obj_name).lstrip('/')])
|
||||
else:
|
||||
obj_path = '/'.join(['', vrs, account,
|
||||
str_to_wsgi(obj_name.lstrip('/'))])
|
||||
obj_path = wsgi_quote(obj_path)
|
||||
|
||||
sub_req = make_subrequest(
|
||||
req.environ, path=obj_path + '?', # kill the query string
|
||||
|
@ -1194,7 +1217,7 @@ class StaticLargeObject(object):
|
|||
return segment_length, seg_data
|
||||
|
||||
heartbeat = config_true_value(req.params.get('heartbeat'))
|
||||
separator = ''
|
||||
separator = b''
|
||||
if heartbeat:
|
||||
# Apparently some ways of deploying require that this to happens
|
||||
# *before* the return? Not sure why.
|
||||
|
@ -1202,13 +1225,13 @@ class StaticLargeObject(object):
|
|||
start_response('202 Accepted', [ # NB: not 201 !
|
||||
('Content-Type', out_content_type),
|
||||
])
|
||||
separator = '\r\n\r\n'
|
||||
separator = b'\r\n\r\n'
|
||||
|
||||
def resp_iter(total_size=total_size):
|
||||
# wsgi won't propagate start_response calls until some data has
|
||||
# been yielded so make sure first heartbeat is sent immediately
|
||||
if heartbeat:
|
||||
yield ' '
|
||||
yield b' '
|
||||
last_yield_time = time.time()
|
||||
with StreamingPile(self.concurrency) as pile:
|
||||
for obj_name, resp in pile.asyncstarmap(do_head, (
|
||||
|
@ -1218,7 +1241,7 @@ class StaticLargeObject(object):
|
|||
self.yield_frequency):
|
||||
# Make sure we've called start_response before
|
||||
# sending data
|
||||
yield ' '
|
||||
yield b' '
|
||||
last_yield_time = now
|
||||
for i in path2indices[obj_name]:
|
||||
segment_length, seg_data = validate_seg_dict(
|
||||
|
@ -1241,7 +1264,10 @@ class StaticLargeObject(object):
|
|||
resp_dict = {}
|
||||
if heartbeat:
|
||||
resp_dict['Response Status'] = err.status
|
||||
resp_dict['Response Body'] = err.body or '\n'.join(
|
||||
err_body = err.body
|
||||
if six.PY3:
|
||||
err_body = err_body.decode('utf-8', errors='replace')
|
||||
resp_dict['Response Body'] = err_body or '\n'.join(
|
||||
RESPONSE_REASONS.get(err.status_int, ['']))
|
||||
else:
|
||||
start_response(err.status,
|
||||
|
@ -1255,23 +1281,28 @@ class StaticLargeObject(object):
|
|||
for seg_data in data_for_storage:
|
||||
if 'data' in seg_data:
|
||||
raw_data = base64.b64decode(seg_data['data'])
|
||||
slo_etag.update(md5(raw_data).hexdigest())
|
||||
r = md5(raw_data).hexdigest()
|
||||
elif seg_data.get('range'):
|
||||
slo_etag.update('%s:%s;' % (seg_data['hash'],
|
||||
seg_data['range']))
|
||||
r = '%s:%s;' % (seg_data['hash'], seg_data['range'])
|
||||
else:
|
||||
slo_etag.update(seg_data['hash'])
|
||||
r = seg_data['hash']
|
||||
slo_etag.update(r.encode('ascii') if six.PY3 else r)
|
||||
|
||||
slo_etag = slo_etag.hexdigest()
|
||||
client_etag = req.headers.get('Etag')
|
||||
if client_etag and client_etag.strip('"') != slo_etag:
|
||||
err = HTTPUnprocessableEntity(request=req)
|
||||
if heartbeat:
|
||||
yield separator + get_response_body(out_content_type, {
|
||||
'Response Status': err.status,
|
||||
'Response Body': err.body or '\n'.join(
|
||||
RESPONSE_REASONS.get(err.status_int, [''])),
|
||||
}, problem_segments, 'upload')
|
||||
resp_dict = {}
|
||||
resp_dict['Response Status'] = err.status
|
||||
err_body = err.body
|
||||
if six.PY3 and isinstance(err_body, bytes):
|
||||
err_body = err_body.decode('utf-8', errors='replace')
|
||||
resp_dict['Response Body'] = err_body or '\n'.join(
|
||||
RESPONSE_REASONS.get(err.status_int, ['']))
|
||||
yield separator + get_response_body(
|
||||
out_content_type, resp_dict, problem_segments,
|
||||
'upload')
|
||||
else:
|
||||
for chunk in err(req.environ, start_response):
|
||||
yield chunk
|
||||
|
@ -1298,7 +1329,8 @@ class StaticLargeObject(object):
|
|||
|
||||
env = req.environ
|
||||
if not env.get('CONTENT_TYPE'):
|
||||
guessed_type, _junk = mimetypes.guess_type(req.path_info)
|
||||
guessed_type, _junk = mimetypes.guess_type(
|
||||
wsgi_to_str(req.path_info))
|
||||
env['CONTENT_TYPE'] = (guessed_type or
|
||||
'application/octet-stream')
|
||||
env['swift.content_type_overridden'] = True
|
||||
|
@ -1312,7 +1344,10 @@ class StaticLargeObject(object):
|
|||
resp_dict['Last Modified'] = resp.headers['Last-Modified']
|
||||
|
||||
if heartbeat:
|
||||
resp_dict['Response Body'] = resp.body
|
||||
resp_body = resp.body
|
||||
if six.PY3 and isinstance(resp_body, bytes):
|
||||
resp_body = resp_body.decode('utf-8')
|
||||
resp_dict['Response Body'] = resp_body
|
||||
yield separator + get_response_body(
|
||||
out_content_type, resp_dict, [], 'upload')
|
||||
else:
|
||||
|
@ -1332,14 +1367,18 @@ class StaticLargeObject(object):
|
|||
:raises HTTPBadRequest: on too many buffered sub segments and
|
||||
on invalid SLO manifest path
|
||||
"""
|
||||
if not check_utf8(req.path_info):
|
||||
if not check_utf8(wsgi_to_str(req.path_info)):
|
||||
raise HTTPPreconditionFailed(
|
||||
request=req, body='Invalid UTF8 or contains NULL')
|
||||
vrs, account, container, obj = req.split_path(4, 4, True)
|
||||
if six.PY2:
|
||||
obj_path = ('/%s/%s' % (container, obj)).decode('utf-8')
|
||||
else:
|
||||
obj_path = '/%s/%s' % (wsgi_to_str(container), wsgi_to_str(obj))
|
||||
|
||||
segments = [{
|
||||
'sub_slo': True,
|
||||
'name': ('/%s/%s' % (container, obj)).decode('utf-8')}]
|
||||
'name': obj_path}]
|
||||
while segments:
|
||||
if len(segments) > MAX_BUFFERED_SLO_SEGMENTS:
|
||||
raise HTTPBadRequest(
|
||||
|
@ -1353,14 +1392,18 @@ class StaticLargeObject(object):
|
|||
self.get_slo_segments(seg_data['name'], req))
|
||||
except HTTPException as err:
|
||||
# allow bulk delete response to report errors
|
||||
err_body = err.body
|
||||
if six.PY3 and isinstance(err_body, bytes):
|
||||
err_body = err_body.decode('utf-8', errors='replace')
|
||||
seg_data['error'] = {'code': err.status_int,
|
||||
'message': err.body}
|
||||
'message': err_body}
|
||||
|
||||
# add manifest back to be deleted after segments
|
||||
seg_data['sub_slo'] = False
|
||||
segments.append(seg_data)
|
||||
else:
|
||||
seg_data['name'] = seg_data['name'].encode('utf-8')
|
||||
if six.PY2:
|
||||
seg_data['name'] = seg_data['name'].encode('utf-8')
|
||||
yield seg_data
|
||||
|
||||
def get_slo_segments(self, obj_name, req):
|
||||
|
@ -1386,9 +1429,15 @@ class StaticLargeObject(object):
|
|||
new_env['HTTP_USER_AGENT'] = \
|
||||
'%s MultipartDELETE' % new_env.get('HTTP_USER_AGENT')
|
||||
new_env['swift.source'] = 'SLO'
|
||||
new_env['PATH_INFO'] = (
|
||||
'/%s/%s/%s' % (vrs, account, obj_name.lstrip('/').encode('utf-8'))
|
||||
)
|
||||
if six.PY2:
|
||||
new_env['PATH_INFO'] = (
|
||||
'/%s/%s/%s' % (vrs, account,
|
||||
obj_name.lstrip('/').encode('utf-8'))
|
||||
)
|
||||
else:
|
||||
new_env['PATH_INFO'] = (
|
||||
'/%s/%s/%s' % (vrs, account, str_to_wsgi(obj_name.lstrip('/')))
|
||||
)
|
||||
resp = Request.blank('', new_env).get_response(self.app)
|
||||
|
||||
if resp.is_success:
|
||||
|
|
|
@ -158,16 +158,16 @@ configuration steps are required:
|
|||
import json
|
||||
import os
|
||||
from cgi import parse_header
|
||||
from six.moves.urllib.parse import unquote
|
||||
|
||||
from swift.common.utils import get_logger, register_swift_info, split_path, \
|
||||
MD5_OF_EMPTY_STRING, closing_if_possible, quote
|
||||
MD5_OF_EMPTY_STRING, closing_if_possible
|
||||
from swift.common.constraints import check_account_format
|
||||
from swift.common.wsgi import WSGIContext, make_subrequest
|
||||
from swift.common.request_helpers import get_sys_meta_prefix, \
|
||||
check_path_header
|
||||
from swift.common.swob import Request, HTTPBadRequest, HTTPTemporaryRedirect, \
|
||||
HTTPException, HTTPConflict, HTTPPreconditionFailed
|
||||
HTTPException, HTTPConflict, HTTPPreconditionFailed, wsgi_quote, \
|
||||
wsgi_unquote
|
||||
from swift.common.http import is_success
|
||||
from swift.common.exceptions import LinkIterError
|
||||
from swift.common.header_key_dict import HeaderKeyDict
|
||||
|
@ -197,29 +197,39 @@ def _check_symlink_header(req):
|
|||
# copy middleware may accept the format. In the symlink, API
|
||||
# says apparently to use "container/object" format so add the
|
||||
# validation first, here.
|
||||
if unquote(req.headers[TGT_OBJ_SYMLINK_HDR]).startswith('/'):
|
||||
error_body = 'X-Symlink-Target header must be of the form ' \
|
||||
'<container name>/<object name>'
|
||||
try:
|
||||
if wsgi_unquote(req.headers[TGT_OBJ_SYMLINK_HDR]).startswith('/'):
|
||||
raise HTTPPreconditionFailed(
|
||||
body=error_body,
|
||||
request=req, content_type='text/plain')
|
||||
except TypeError:
|
||||
raise HTTPPreconditionFailed(
|
||||
body='X-Symlink-Target header must be of the '
|
||||
'form <container name>/<object name>',
|
||||
body=error_body,
|
||||
request=req, content_type='text/plain')
|
||||
|
||||
# check container and object format
|
||||
container, obj = check_path_header(
|
||||
req, TGT_OBJ_SYMLINK_HDR, 2,
|
||||
'X-Symlink-Target header must be of the '
|
||||
'form <container name>/<object name>')
|
||||
req.headers[TGT_OBJ_SYMLINK_HDR] = quote('%s/%s' % (container, obj))
|
||||
error_body)
|
||||
req.headers[TGT_OBJ_SYMLINK_HDR] = wsgi_quote('%s/%s' % (container, obj))
|
||||
|
||||
# Check account format if it exists
|
||||
account = check_account_format(
|
||||
req, unquote(req.headers[TGT_ACCT_SYMLINK_HDR])) \
|
||||
if TGT_ACCT_SYMLINK_HDR in req.headers else None
|
||||
try:
|
||||
account = check_account_format(
|
||||
req, wsgi_unquote(req.headers[TGT_ACCT_SYMLINK_HDR])) \
|
||||
if TGT_ACCT_SYMLINK_HDR in req.headers else None
|
||||
except TypeError:
|
||||
raise HTTPPreconditionFailed(
|
||||
body='Account name cannot contain slashes',
|
||||
request=req, content_type='text/plain')
|
||||
|
||||
# Extract request path
|
||||
_junk, req_acc, req_cont, req_obj = req.split_path(4, 4, True)
|
||||
|
||||
if account:
|
||||
req.headers[TGT_ACCT_SYMLINK_HDR] = quote(account)
|
||||
req.headers[TGT_ACCT_SYMLINK_HDR] = wsgi_quote(account)
|
||||
else:
|
||||
account = req_acc
|
||||
|
||||
|
@ -383,7 +393,7 @@ class SymlinkObjectContext(WSGIContext):
|
|||
"""
|
||||
version, account, _junk = req.split_path(2, 3, True)
|
||||
account = self._response_header_value(
|
||||
TGT_ACCT_SYSMETA_SYMLINK_HDR) or quote(account)
|
||||
TGT_ACCT_SYSMETA_SYMLINK_HDR) or wsgi_quote(account)
|
||||
target_path = os.path.join(
|
||||
'/', version, account,
|
||||
symlink_target.lstrip('/'))
|
||||
|
@ -488,7 +498,7 @@ class SymlinkObjectContext(WSGIContext):
|
|||
if tgt_co:
|
||||
version, account, _junk = req.split_path(2, 3, True)
|
||||
target_acc = self._response_header_value(
|
||||
TGT_ACCT_SYSMETA_SYMLINK_HDR) or quote(account)
|
||||
TGT_ACCT_SYSMETA_SYMLINK_HDR) or wsgi_quote(account)
|
||||
location_hdr = os.path.join(
|
||||
'/', version, target_acc, tgt_co)
|
||||
req.environ['swift.leave_relative_location'] = True
|
||||
|
|
|
@ -531,8 +531,7 @@ class VersionedWritesContext(WSGIContext):
|
|||
|
||||
put_path_info = "/%s/%s/%s/%s" % (
|
||||
api_version, account_name, container_name, object_name)
|
||||
put_resp = self._put_versioned_obj(
|
||||
req, put_path_info, get_resp)
|
||||
put_resp = self._put_versioned_obj(req, put_path_info, get_resp)
|
||||
|
||||
self._check_response_error(req, put_resp)
|
||||
close_if_possible(put_resp.app_iter)
|
||||
|
|
|
@ -921,9 +921,10 @@ class Request(object):
|
|||
"""
|
||||
headers = headers or {}
|
||||
environ = environ or {}
|
||||
if six.PY2 and isinstance(path, six.text_type):
|
||||
path = path.encode('utf-8')
|
||||
elif not six.PY2:
|
||||
if six.PY2:
|
||||
if isinstance(path, six.text_type):
|
||||
path = path.encode('utf-8')
|
||||
else:
|
||||
if isinstance(path, six.binary_type):
|
||||
path = path.decode('latin1')
|
||||
else:
|
||||
|
@ -941,16 +942,11 @@ class Request(object):
|
|||
'https': 443}.get(parsed_path.scheme, 80)
|
||||
if parsed_path.scheme and parsed_path.scheme not in ['http', 'https']:
|
||||
raise TypeError('Invalid scheme: %s' % parsed_path.scheme)
|
||||
if six.PY2:
|
||||
path_info = urllib.parse.unquote(parsed_path.path)
|
||||
else:
|
||||
path_info = urllib.parse.unquote(parsed_path.path,
|
||||
encoding='latin-1')
|
||||
env = {
|
||||
'REQUEST_METHOD': 'GET',
|
||||
'SCRIPT_NAME': '',
|
||||
'QUERY_STRING': parsed_path.query,
|
||||
'PATH_INFO': path_info,
|
||||
'PATH_INFO': wsgi_unquote(parsed_path.path),
|
||||
'SERVER_NAME': server_name,
|
||||
'SERVER_PORT': str(server_port),
|
||||
'HTTP_HOST': '%s:%d' % (server_name, server_port),
|
||||
|
@ -1037,13 +1033,8 @@ class Request(object):
|
|||
@property
|
||||
def path(self):
|
||||
"Provides the full path of the request, excluding the QUERY_STRING"
|
||||
if six.PY2:
|
||||
return urllib.parse.quote(self.environ.get('SCRIPT_NAME', '') +
|
||||
self.environ['PATH_INFO'])
|
||||
else:
|
||||
return urllib.parse.quote(self.environ.get('SCRIPT_NAME', '') +
|
||||
self.environ['PATH_INFO'],
|
||||
encoding='latin-1')
|
||||
return wsgi_quote(self.environ.get('SCRIPT_NAME', '') +
|
||||
self.environ['PATH_INFO'])
|
||||
|
||||
@property
|
||||
def swift_entity_path(self):
|
||||
|
@ -1481,7 +1472,7 @@ class Response(object):
|
|||
realm = 'unknown'
|
||||
except (AttributeError, ValueError):
|
||||
realm = 'unknown'
|
||||
return 'Swift realm="%s"' % urllib.parse.quote(realm)
|
||||
return 'Swift realm="%s"' % wsgi_quote(realm)
|
||||
|
||||
@property
|
||||
def is_success(self):
|
||||
|
|
|
@ -3740,6 +3740,17 @@ def public(func):
|
|||
return func
|
||||
|
||||
|
||||
def private(func):
|
||||
"""
|
||||
Decorator to declare which methods are privately accessible as HTTP
|
||||
requests with an ``X-Backend-Allow-Private-Methods: True`` override
|
||||
|
||||
:param func: function to make private
|
||||
"""
|
||||
func.privately_accessible = True
|
||||
return func
|
||||
|
||||
|
||||
def majority_size(n):
|
||||
return (n // 2) + 1
|
||||
|
||||
|
@ -4450,6 +4461,10 @@ def mime_to_document_iters(input_file, boundary, read_chunk_size=4096):
|
|||
(e.g. "divider", not "--divider")
|
||||
:param read_chunk_size: size of strings read via input_file.read()
|
||||
"""
|
||||
if six.PY3 and isinstance(boundary, str):
|
||||
# Since the boundary is in client-supplied headers, it can contain
|
||||
# garbage that trips us and we don't like client-induced 500.
|
||||
boundary = boundary.encode('latin-1', errors='replace')
|
||||
doc_files = iter_multipart_mime_documents(input_file, boundary,
|
||||
read_chunk_size)
|
||||
for i, doc_file in enumerate(doc_files):
|
||||
|
|
|
@ -751,6 +751,32 @@ class ContainerController(BaseStorageServer):
|
|||
ret.request = req
|
||||
return ret
|
||||
|
||||
@public
|
||||
@timing_stats()
|
||||
def UPDATE(self, req):
|
||||
"""
|
||||
Handle HTTP UPDATE request (merge_items RPCs coming from the proxy.)
|
||||
"""
|
||||
drive, part, account, container = split_and_validate_path(req, 4)
|
||||
req_timestamp = valid_timestamp(req)
|
||||
try:
|
||||
check_drive(self.root, drive, self.mount_check)
|
||||
except ValueError:
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
if not self.check_free_space(drive):
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
|
||||
requested_policy_index = self.get_and_validate_policy_index(req)
|
||||
broker = self._get_container_broker(drive, part, account, container)
|
||||
self._maybe_autocreate(broker, req_timestamp, account,
|
||||
requested_policy_index)
|
||||
try:
|
||||
objs = json.load(req.environ['wsgi.input'])
|
||||
except ValueError as err:
|
||||
return HTTPBadRequest(body=str(err), content_type='text/plain')
|
||||
broker.merge_items(objs)
|
||||
return HTTPAccepted(request=req)
|
||||
|
||||
@public
|
||||
@timing_stats()
|
||||
def POST(self, req):
|
||||
|
@ -780,7 +806,7 @@ class ContainerController(BaseStorageServer):
|
|||
start_time = time.time()
|
||||
req = Request(env)
|
||||
self.logger.txn_id = req.headers.get('x-trans-id', None)
|
||||
if not check_utf8(req.path_info):
|
||||
if not check_utf8(wsgi_to_str(req.path_info)):
|
||||
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
|
||||
else:
|
||||
try:
|
||||
|
|
|
@ -153,10 +153,16 @@ def _encode_metadata(metadata):
|
|||
|
||||
:param metadata: a dict
|
||||
"""
|
||||
def encode_str(item):
|
||||
if isinstance(item, six.text_type):
|
||||
return item.encode('utf8')
|
||||
return item
|
||||
if six.PY2:
|
||||
def encode_str(item):
|
||||
if isinstance(item, six.text_type):
|
||||
return item.encode('utf8')
|
||||
return item
|
||||
else:
|
||||
def encode_str(item):
|
||||
if isinstance(item, six.text_type):
|
||||
return item.encode('utf8', 'surrogateescape')
|
||||
return item
|
||||
|
||||
return dict(((encode_str(k), encode_str(v)) for k, v in metadata.items()))
|
||||
|
||||
|
@ -392,7 +398,7 @@ def consolidate_hashes(partition_dir):
|
|||
|
||||
found_invalidation_entry = False
|
||||
try:
|
||||
with open(invalidations_file, 'rb') as inv_fh:
|
||||
with open(invalidations_file, 'r') as inv_fh:
|
||||
for line in inv_fh:
|
||||
found_invalidation_entry = True
|
||||
suffix = line.strip()
|
||||
|
@ -1133,7 +1139,14 @@ class BaseDiskFileManager(object):
|
|||
partition_path = os.path.dirname(path)
|
||||
objects_path = os.path.dirname(partition_path)
|
||||
device_path = os.path.dirname(objects_path)
|
||||
quar_path = quarantine_renamer(device_path, hsh_path)
|
||||
# The made-up filename is so that the eventual dirpath()
|
||||
# will result in this object directory that we care about.
|
||||
# Some failures will result in an object directory
|
||||
# becoming a file, thus causing the parent directory to
|
||||
# be qarantined.
|
||||
quar_path = quarantine_renamer(
|
||||
device_path, os.path.join(
|
||||
hsh_path, "made-up-filename"))
|
||||
logging.exception(
|
||||
_('Quarantined %(hsh_path)s to %(quar_path)s because '
|
||||
'it is not a directory'), {'hsh_path': hsh_path,
|
||||
|
@ -1448,7 +1461,14 @@ class BaseDiskFileManager(object):
|
|||
filenames = self.cleanup_ondisk_files(object_path)['files']
|
||||
except OSError as err:
|
||||
if err.errno == errno.ENOTDIR:
|
||||
quar_path = self.quarantine_renamer(dev_path, object_path)
|
||||
# The made-up filename is so that the eventual dirpath()
|
||||
# will result in this object directory that we care about.
|
||||
# Some failures will result in an object directory
|
||||
# becoming a file, thus causing the parent directory to
|
||||
# be qarantined.
|
||||
quar_path = self.quarantine_renamer(
|
||||
dev_path, os.path.join(
|
||||
object_path, "made-up-filename"))
|
||||
logging.exception(
|
||||
_('Quarantined %(object_path)s to %(quar_path)s because '
|
||||
'it is not a directory'), {'object_path': object_path,
|
||||
|
@ -2184,8 +2204,12 @@ class BaseDiskFileReader(object):
|
|||
|
||||
"""
|
||||
if not ranges:
|
||||
yield ''
|
||||
yield b''
|
||||
else:
|
||||
if not isinstance(content_type, bytes):
|
||||
content_type = content_type.encode('utf8')
|
||||
if not isinstance(boundary, bytes):
|
||||
boundary = boundary.encode('ascii')
|
||||
try:
|
||||
self._suppress_file_closing = True
|
||||
for chunk in multi_range_iterator(
|
||||
|
@ -2668,9 +2692,9 @@ class BaseDiskFile(object):
|
|||
ctypefile_metadata = self._failsafe_read_metadata(
|
||||
ctype_file, ctype_file)
|
||||
if ('Content-Type' in ctypefile_metadata
|
||||
and (ctypefile_metadata.get('Content-Type-Timestamp') >
|
||||
self._metafile_metadata.get('Content-Type-Timestamp'))
|
||||
and (ctypefile_metadata.get('Content-Type-Timestamp') >
|
||||
and (ctypefile_metadata.get('Content-Type-Timestamp', '') >
|
||||
self._metafile_metadata.get('Content-Type-Timestamp', ''))
|
||||
and (ctypefile_metadata.get('Content-Type-Timestamp', '') >
|
||||
self.data_timestamp)):
|
||||
self._metafile_metadata['Content-Type'] = \
|
||||
ctypefile_metadata['Content-Type']
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from six.moves import urllib
|
||||
import six
|
||||
|
||||
from random import random
|
||||
from time import time
|
||||
|
@ -28,13 +28,42 @@ from eventlet.greenpool import GreenPool
|
|||
from swift.common.daemon import Daemon
|
||||
from swift.common.internal_client import InternalClient, UnexpectedResponse
|
||||
from swift.common.utils import get_logger, dump_recon_cache, split_path, \
|
||||
Timestamp, config_true_value
|
||||
Timestamp, config_true_value, normalize_delete_at_timestamp
|
||||
from swift.common.http import HTTP_NOT_FOUND, HTTP_CONFLICT, \
|
||||
HTTP_PRECONDITION_FAILED
|
||||
from swift.common.swob import wsgi_quote, str_to_wsgi
|
||||
|
||||
from swift.container.reconciler import direct_delete_container_entry
|
||||
|
||||
MAX_OBJECTS_TO_CACHE = 100000
|
||||
ASYNC_DELETE_TYPE = 'application/async-deleted'
|
||||
|
||||
|
||||
def build_task_obj(timestamp, target_account, target_container,
|
||||
target_obj):
|
||||
"""
|
||||
:return: a task object name in format of
|
||||
"<timestamp>-<target_account>/<target_container>/<target_obj>"
|
||||
"""
|
||||
timestamp = Timestamp(timestamp)
|
||||
return '%s-%s/%s/%s' % (
|
||||
normalize_delete_at_timestamp(timestamp),
|
||||
target_account, target_container, target_obj)
|
||||
|
||||
|
||||
def parse_task_obj(task_obj):
|
||||
"""
|
||||
:param task_obj: a task object name in format of
|
||||
"<timestamp>-<target_account>/<target_container>" +
|
||||
"/<target_obj>"
|
||||
:return: 4-tuples of (delete_at_time, target_account, target_container,
|
||||
target_obj)
|
||||
"""
|
||||
timestamp, target_path = task_obj.split('-', 1)
|
||||
timestamp = Timestamp(timestamp)
|
||||
target_account, target_container, target_obj = \
|
||||
split_path('/' + target_path, 3, 3, True)
|
||||
return timestamp, target_account, target_container, target_obj
|
||||
|
||||
|
||||
class ObjectExpirer(Daemon):
|
||||
|
@ -122,18 +151,7 @@ class ObjectExpirer(Daemon):
|
|||
self.report_last_time = time()
|
||||
|
||||
def parse_task_obj(self, task_obj):
|
||||
"""
|
||||
:param task_obj: a task object name in format of
|
||||
"<timestamp>-<target_account>/<target_container>" +
|
||||
"/<target_obj>"
|
||||
:return: 4-tuples of (delete_at_time, target_account, target_container,
|
||||
target_obj)
|
||||
"""
|
||||
timestamp, target_path = task_obj.split('-', 1)
|
||||
timestamp = Timestamp(timestamp)
|
||||
target_account, target_container, target_obj = \
|
||||
split_path('/' + target_path, 3, 3, True)
|
||||
return timestamp, target_account, target_container, target_obj
|
||||
return parse_task_obj(task_obj)
|
||||
|
||||
def round_robin_order(self, task_iter):
|
||||
"""
|
||||
|
@ -182,6 +200,8 @@ class ObjectExpirer(Daemon):
|
|||
:param divisor: a divisor number
|
||||
:return: an integer to decide which expirer is assigned to the task
|
||||
"""
|
||||
if not isinstance(name, bytes):
|
||||
name = name.encode('utf8')
|
||||
# md5 is only used for shuffling mod
|
||||
return int(hashlib.md5(name).hexdigest(), 16) % divisor
|
||||
|
||||
|
@ -229,10 +249,13 @@ class ObjectExpirer(Daemon):
|
|||
"""
|
||||
for task_account, task_container in task_account_container_list:
|
||||
for o in self.swift.iter_objects(task_account, task_container):
|
||||
task_object = o['name'].encode('utf8')
|
||||
if six.PY2:
|
||||
task_object = o['name'].encode('utf8')
|
||||
else:
|
||||
task_object = o['name']
|
||||
try:
|
||||
delete_timestamp, target_account, target_container, \
|
||||
target_object = self.parse_task_obj(task_object)
|
||||
target_object = parse_task_obj(task_object)
|
||||
except ValueError:
|
||||
self.logger.exception('Unexcepted error handling task %r' %
|
||||
task_object)
|
||||
|
@ -247,12 +270,14 @@ class ObjectExpirer(Daemon):
|
|||
divisor) != my_index:
|
||||
continue
|
||||
|
||||
is_async = o.get('content_type') == ASYNC_DELETE_TYPE
|
||||
yield {'task_account': task_account,
|
||||
'task_container': task_container,
|
||||
'task_object': task_object,
|
||||
'target_path': '/'.join([
|
||||
target_account, target_container, target_object]),
|
||||
'delete_timestamp': delete_timestamp}
|
||||
'delete_timestamp': delete_timestamp,
|
||||
'is_async_delete': is_async}
|
||||
|
||||
def run_once(self, *args, **kwargs):
|
||||
"""
|
||||
|
@ -384,11 +409,13 @@ class ObjectExpirer(Daemon):
|
|||
'process must be less than processes')
|
||||
|
||||
def delete_object(self, target_path, delete_timestamp,
|
||||
task_account, task_container, task_object):
|
||||
task_account, task_container, task_object,
|
||||
is_async_delete):
|
||||
start_time = time()
|
||||
try:
|
||||
try:
|
||||
self.delete_actual_object(target_path, delete_timestamp)
|
||||
self.delete_actual_object(target_path, delete_timestamp,
|
||||
is_async_delete)
|
||||
except UnexpectedResponse as err:
|
||||
if err.resp.status_int not in {HTTP_NOT_FOUND,
|
||||
HTTP_PRECONDITION_FAILED}:
|
||||
|
@ -425,7 +452,7 @@ class ObjectExpirer(Daemon):
|
|||
direct_delete_container_entry(self.swift.container_ring, task_account,
|
||||
task_container, task_object)
|
||||
|
||||
def delete_actual_object(self, actual_obj, timestamp):
|
||||
def delete_actual_object(self, actual_obj, timestamp, is_async_delete):
|
||||
"""
|
||||
Deletes the end-user object indicated by the actual object name given
|
||||
'<account>/<container>/<object>' if and only if the X-Delete-At value
|
||||
|
@ -436,13 +463,19 @@ class ObjectExpirer(Daemon):
|
|||
:param timestamp: The swift.common.utils.Timestamp instance the
|
||||
X-Delete-At value must match to perform the actual
|
||||
delete.
|
||||
:param is_async_delete: False if the object should be deleted because
|
||||
of "normal" expiration, or True if it should
|
||||
be async-deleted.
|
||||
:raises UnexpectedResponse: if the delete was unsuccessful and
|
||||
should be retried later
|
||||
"""
|
||||
path = '/v1/' + urllib.parse.quote(actual_obj.lstrip('/'))
|
||||
self.swift.make_request(
|
||||
'DELETE', path,
|
||||
{'X-If-Delete-At': timestamp.normal,
|
||||
'X-Timestamp': timestamp.normal,
|
||||
'X-Backend-Clean-Expiring-Object-Queue': 'no'},
|
||||
(2, HTTP_CONFLICT))
|
||||
path = '/v1/' + wsgi_quote(str_to_wsgi(actual_obj.lstrip('/')))
|
||||
if is_async_delete:
|
||||
headers = {'X-Timestamp': timestamp.normal}
|
||||
acceptable_statuses = (2, HTTP_CONFLICT, HTTP_NOT_FOUND)
|
||||
else:
|
||||
headers = {'X-Timestamp': timestamp.normal,
|
||||
'X-If-Delete-At': timestamp.normal,
|
||||
'X-Backend-Clean-Expiring-Object-Queue': 'no'}
|
||||
acceptable_statuses = (2, HTTP_CONFLICT)
|
||||
self.swift.make_request('DELETE', path, headers, acceptable_statuses)
|
||||
|
|
|
@ -55,8 +55,9 @@ from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
|
|||
HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
|
||||
HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \
|
||||
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \
|
||||
HTTPServerError, wsgi_to_bytes
|
||||
HTTPServerError, wsgi_to_bytes, wsgi_to_str
|
||||
from swift.obj.diskfile import RESERVED_DATAFILE_META, DiskFileRouter
|
||||
from swift.obj.expirer import build_task_obj
|
||||
|
||||
|
||||
def iter_mime_headers_and_bodies(wsgi_input, mime_boundary, read_chunk_size):
|
||||
|
@ -493,7 +494,7 @@ class ObjectController(BaseStorageServer):
|
|||
for host, contdevice in updates:
|
||||
self.async_update(
|
||||
op, self.expiring_objects_account, delete_at_container,
|
||||
'%s-%s/%s/%s' % (delete_at, account, container, obj),
|
||||
build_task_obj(delete_at, account, container, obj),
|
||||
host, partition, contdevice, headers_out, objdevice,
|
||||
policy)
|
||||
|
||||
|
@ -1256,7 +1257,8 @@ class ObjectController(BaseStorageServer):
|
|||
except DiskFileDeviceUnavailable:
|
||||
resp = HTTPInsufficientStorage(drive=device, request=request)
|
||||
else:
|
||||
resp = Response(body=pickle.dumps(hashes))
|
||||
# force pickle protocol for compatibility with py2 nodes
|
||||
resp = Response(body=pickle.dumps(hashes, protocol=2))
|
||||
return resp
|
||||
|
||||
@public
|
||||
|
@ -1271,7 +1273,7 @@ class ObjectController(BaseStorageServer):
|
|||
req = Request(env)
|
||||
self.logger.txn_id = req.headers.get('x-trans-id', None)
|
||||
|
||||
if not check_utf8(req.path_info):
|
||||
if not check_utf8(wsgi_to_str(req.path_info)):
|
||||
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
|
||||
else:
|
||||
try:
|
||||
|
|
|
@ -57,7 +57,7 @@ from swift.common.http import is_informational, is_success, is_redirection, \
|
|||
HTTP_INSUFFICIENT_STORAGE, HTTP_UNAUTHORIZED, HTTP_CONTINUE, HTTP_GONE
|
||||
from swift.common.swob import Request, Response, Range, \
|
||||
HTTPException, HTTPRequestedRangeNotSatisfiable, HTTPServiceUnavailable, \
|
||||
status_map
|
||||
status_map, wsgi_to_str, str_to_wsgi, wsgi_quote
|
||||
from swift.common.request_helpers import strip_sys_meta_prefix, \
|
||||
strip_user_meta_prefix, is_user_meta, is_sys_meta, is_sys_or_user_meta, \
|
||||
http_response_to_document_iters, is_object_transient_sysmeta, \
|
||||
|
@ -327,8 +327,10 @@ def get_container_info(env, app, swift_source=None):
|
|||
This call bypasses auth. Success does not imply that the request has
|
||||
authorization to the container.
|
||||
"""
|
||||
(version, account, container, unused) = \
|
||||
(version, wsgi_account, wsgi_container, unused) = \
|
||||
split_path(env['PATH_INFO'], 3, 4, True)
|
||||
account = wsgi_to_str(wsgi_account)
|
||||
container = wsgi_to_str(wsgi_container)
|
||||
|
||||
# Check in environment cache and in memcache (in that order)
|
||||
info = _get_info_from_caches(app, env, account, container)
|
||||
|
@ -350,7 +352,7 @@ def get_container_info(env, app, swift_source=None):
|
|||
return headers_to_container_info({}, 0)
|
||||
|
||||
req = _prepare_pre_auth_info_request(
|
||||
env, ("/%s/%s/%s" % (version, account, container)),
|
||||
env, ("/%s/%s/%s" % (version, wsgi_account, wsgi_container)),
|
||||
(swift_source or 'GET_CONTAINER_INFO'))
|
||||
resp = req.get_response(app)
|
||||
# Check in infocache to see if the proxy (or anyone else) already
|
||||
|
@ -395,7 +397,8 @@ def get_account_info(env, app, swift_source=None):
|
|||
|
||||
:raises ValueError: when path doesn't contain an account
|
||||
"""
|
||||
(version, account, _junk, _junk) = split_path(env['PATH_INFO'], 2, 4, True)
|
||||
(version, wsgi_account, _junk) = split_path(env['PATH_INFO'], 2, 3, True)
|
||||
account = wsgi_to_str(wsgi_account)
|
||||
|
||||
# Check in environment cache and in memcache (in that order)
|
||||
info = _get_info_from_caches(app, env, account)
|
||||
|
@ -404,7 +407,7 @@ def get_account_info(env, app, swift_source=None):
|
|||
if not info:
|
||||
env.setdefault('swift.infocache', {})
|
||||
req = _prepare_pre_auth_info_request(
|
||||
env, "/%s/%s" % (version, account),
|
||||
env, "/%s/%s" % (version, wsgi_account),
|
||||
(swift_source or 'GET_ACCOUNT_INFO'))
|
||||
resp = req.get_response(app)
|
||||
# Check in infocache to see if the proxy (or anyone else) already
|
||||
|
@ -625,7 +628,7 @@ def _prepare_pre_auth_info_request(env, path, swift_source):
|
|||
Prepares a pre authed request to obtain info using a HEAD.
|
||||
|
||||
:param env: the environment used by the current request
|
||||
:param path: The unquoted request path
|
||||
:param path: The unquoted, WSGI-str request path
|
||||
:param swift_source: value for swift.source in WSGI environment
|
||||
:returns: the pre authed request
|
||||
"""
|
||||
|
@ -641,7 +644,7 @@ def _prepare_pre_auth_info_request(env, path, swift_source):
|
|||
newenv['swift_owner'] = True
|
||||
|
||||
# Note that Request.blank expects quoted path
|
||||
return Request.blank(quote(path), environ=newenv)
|
||||
return Request.blank(wsgi_quote(path), environ=newenv)
|
||||
|
||||
|
||||
def get_info(app, env, account, container=None, swift_source=None):
|
||||
|
@ -685,9 +688,9 @@ def _get_object_info(app, env, account, container, obj, swift_source=None):
|
|||
|
||||
:param app: the application object
|
||||
:param env: the environment used by the current request
|
||||
:param account: The unquoted name of the account
|
||||
:param container: The unquoted name of the container
|
||||
:param obj: The unquoted name of the object
|
||||
:param account: The unquoted, WSGI-str name of the account
|
||||
:param container: The unquoted, WSGI-str name of the container
|
||||
:param obj: The unquoted, WSGI-str name of the object
|
||||
:returns: the cached info or None if cannot be retrieved
|
||||
"""
|
||||
cache_key = get_cache_key(account, container, obj)
|
||||
|
@ -1514,6 +1517,7 @@ class Controller(object):
|
|||
self.app = app
|
||||
self.trans_id = '-'
|
||||
self._allowed_methods = None
|
||||
self._private_methods = None
|
||||
|
||||
@property
|
||||
def allowed_methods(self):
|
||||
|
@ -1525,6 +1529,16 @@ class Controller(object):
|
|||
self._allowed_methods.add(name)
|
||||
return self._allowed_methods
|
||||
|
||||
@property
|
||||
def private_methods(self):
|
||||
if self._private_methods is None:
|
||||
self._private_methods = set()
|
||||
all_methods = inspect.getmembers(self, predicate=inspect.ismethod)
|
||||
for name, m in all_methods:
|
||||
if getattr(m, 'privately_accessible', False):
|
||||
self._private_methods.add(name)
|
||||
return self._private_methods
|
||||
|
||||
def _x_remove_headers(self):
|
||||
"""
|
||||
Returns a list of headers that must not be sent to the backend
|
||||
|
@ -1584,7 +1598,7 @@ class Controller(object):
|
|||
"""
|
||||
Get account information, and also verify that the account exists.
|
||||
|
||||
:param account: name of the account to get the info for
|
||||
:param account: native str name of the account to get the info for
|
||||
:param req: caller's HTTP request context object (optional)
|
||||
:returns: tuple of (account partition, account nodes, container_count)
|
||||
or (None, None, None) if it does not exist
|
||||
|
@ -1596,7 +1610,7 @@ class Controller(object):
|
|||
env = {}
|
||||
env.setdefault('swift.infocache', {})
|
||||
path_env = env.copy()
|
||||
path_env['PATH_INFO'] = "/v1/%s" % (account,)
|
||||
path_env['PATH_INFO'] = "/v1/%s" % (str_to_wsgi(account),)
|
||||
|
||||
info = get_account_info(path_env, self.app)
|
||||
if (not info
|
||||
|
@ -1611,8 +1625,8 @@ class Controller(object):
|
|||
Get container information and thusly verify container existence.
|
||||
This will also verify account existence.
|
||||
|
||||
:param account: account name for the container
|
||||
:param container: container name to look up
|
||||
:param account: native-str account name for the container
|
||||
:param container: native-str container name to look up
|
||||
:param req: caller's HTTP request context object (optional)
|
||||
:returns: dict containing at least container partition ('partition'),
|
||||
container nodes ('containers'), container read
|
||||
|
@ -1627,7 +1641,8 @@ class Controller(object):
|
|||
env = {}
|
||||
env.setdefault('swift.infocache', {})
|
||||
path_env = env.copy()
|
||||
path_env['PATH_INFO'] = "/v1/%s/%s" % (account, container)
|
||||
path_env['PATH_INFO'] = "/v1/%s/%s" % (
|
||||
str_to_wsgi(account), str_to_wsgi(container))
|
||||
info = get_container_info(path_env, self.app)
|
||||
if not info or not is_success(info.get('status')):
|
||||
info = headers_to_container_info({}, 0)
|
||||
|
@ -1639,7 +1654,7 @@ class Controller(object):
|
|||
return info
|
||||
|
||||
def _make_request(self, nodes, part, method, path, headers, query,
|
||||
logger_thread_locals):
|
||||
body, logger_thread_locals):
|
||||
"""
|
||||
Iterates over the given node iterator, sending an HTTP request to one
|
||||
node at a time. The first non-informational, non-server-error
|
||||
|
@ -1653,12 +1668,18 @@ class Controller(object):
|
|||
(full path ends up being /<$device>/<$part>/<$path>)
|
||||
:param headers: dictionary of headers
|
||||
:param query: query string to send to the backend.
|
||||
:param body: byte string to use as the request body.
|
||||
Try to keep it small.
|
||||
:param logger_thread_locals: The thread local values to be set on the
|
||||
self.app.logger to retain transaction
|
||||
logging information.
|
||||
:returns: a swob.Response object, or None if no responses were received
|
||||
"""
|
||||
self.app.logger.thread_locals = logger_thread_locals
|
||||
if body:
|
||||
if not isinstance(body, bytes):
|
||||
raise TypeError('body must be bytes, not %s' % type(body))
|
||||
headers['Content-Length'] = str(len(body))
|
||||
for node in nodes:
|
||||
try:
|
||||
start_node_timing = time.time()
|
||||
|
@ -1668,6 +1689,9 @@ class Controller(object):
|
|||
headers=headers, query_string=query)
|
||||
conn.node = node
|
||||
self.app.set_node_timing(node, time.time() - start_node_timing)
|
||||
if body:
|
||||
with Timeout(self.app.node_timeout):
|
||||
conn.send(body)
|
||||
with Timeout(self.app.node_timeout):
|
||||
resp = conn.getresponse()
|
||||
if not is_informational(resp.status) and \
|
||||
|
@ -1694,7 +1718,7 @@ class Controller(object):
|
|||
|
||||
def make_requests(self, req, ring, part, method, path, headers,
|
||||
query_string='', overrides=None, node_count=None,
|
||||
node_iterator=None):
|
||||
node_iterator=None, body=None):
|
||||
"""
|
||||
Sends an HTTP request to multiple nodes and aggregates the results.
|
||||
It attempts the primary nodes concurrently, then iterates over the
|
||||
|
@ -1723,7 +1747,7 @@ class Controller(object):
|
|||
|
||||
for head in headers:
|
||||
pile.spawn(self._make_request, nodes, part, method, path,
|
||||
head, query_string, self.app.logger.thread_locals)
|
||||
head, query_string, body, self.app.logger.thread_locals)
|
||||
response = []
|
||||
statuses = []
|
||||
for resp in pile:
|
||||
|
|
|
@ -18,7 +18,7 @@ import json
|
|||
|
||||
from six.moves.urllib.parse import unquote
|
||||
|
||||
from swift.common.utils import public, csv_append, Timestamp, \
|
||||
from swift.common.utils import public, private, csv_append, Timestamp, \
|
||||
config_true_value, ShardRange
|
||||
from swift.common.constraints import check_metadata, CONTAINER_LISTING_LIMIT
|
||||
from swift.common.http import HTTP_ACCEPTED, is_success
|
||||
|
@ -356,6 +356,27 @@ class ContainerController(Controller):
|
|||
return HTTPNotFound(request=req)
|
||||
return resp
|
||||
|
||||
@private
|
||||
def UPDATE(self, req):
|
||||
"""HTTP UPDATE request handler.
|
||||
|
||||
Method to perform bulk operations on container DBs,
|
||||
similar to a merge_items REPLICATE request.
|
||||
|
||||
Not client facing; internal clients or middlewares must include
|
||||
``X-Backend-Allow-Method: UPDATE`` header to access.
|
||||
"""
|
||||
container_partition, containers = self.app.container_ring.get_nodes(
|
||||
self.account_name, self.container_name)
|
||||
# Since this isn't client facing, expect callers to supply an index
|
||||
policy_index = req.headers['X-Backend-Storage-Policy-Index']
|
||||
headers = self._backend_requests(
|
||||
req, len(containers), account_partition=None, accounts=[],
|
||||
policy_index=policy_index)
|
||||
return self.make_requests(
|
||||
req, self.app.container_ring, container_partition, 'UPDATE',
|
||||
req.swift_entity_path, headers, body=req.body)
|
||||
|
||||
def _backend_requests(self, req, n_outgoing, account_partition, accounts,
|
||||
policy_index=None):
|
||||
additional = {'X-Timestamp': Timestamp.now().internal}
|
||||
|
|
|
@ -507,9 +507,14 @@ class Application(object):
|
|||
controller.trans_id = req.environ['swift.trans_id']
|
||||
self.logger.client_ip = get_remote_client(req)
|
||||
|
||||
if req.method not in controller.allowed_methods:
|
||||
allowed_methods = controller.allowed_methods
|
||||
if config_true_value(req.headers.get(
|
||||
'X-Backend-Allow-Private-Methods', False)):
|
||||
allowed_methods = set(allowed_methods).union(
|
||||
controller.private_methods)
|
||||
if req.method not in allowed_methods:
|
||||
return HTTPMethodNotAllowed(request=req, headers={
|
||||
'Allow': ', '.join(controller.allowed_methods)})
|
||||
'Allow': ', '.join(allowed_methods)})
|
||||
handler = getattr(controller, req.method)
|
||||
|
||||
old_authorize = None
|
||||
|
|
|
@ -15,6 +15,8 @@ python-keystoneclient!=2.1.0,>=2.0.0 # Apache-2.0
|
|||
reno>=1.8.0 # Apache-2.0
|
||||
python-openstackclient>=3.12.0
|
||||
boto>=2.32.1
|
||||
boto3>=1.9
|
||||
botocore>=1.12
|
||||
requests-mock>=1.2.0 # Apache-2.0
|
||||
fixtures>=3.0.0 # Apache-2.0/BSD
|
||||
keystonemiddleware>=4.17.0 # Apache-2.0
|
||||
|
|
|
@ -608,7 +608,7 @@ def in_process_setup(the_object_server=object_server):
|
|||
"Content-Language, Expires, X-Robots-Tag",
|
||||
# Below are values used by the functional test framework, as well as
|
||||
# by the various in-process swift servers
|
||||
'auth_uri': 'http://127.0.0.1:%d/auth/v1.0' % prolis.getsockname()[1],
|
||||
'auth_uri': 'http://127.0.0.1:%d/auth/v1.0/' % prolis.getsockname()[1],
|
||||
# Primary functional test account (needs admin access to the
|
||||
# account)
|
||||
'account': 'test',
|
||||
|
@ -882,10 +882,11 @@ def setup_package():
|
|||
# improve it to take a s3_storage_url option
|
||||
parsed = urlsplit(config['auth_uri'])
|
||||
config.update({
|
||||
'auth_ssl': parsed.scheme == 'https',
|
||||
'auth_ssl': str(parsed.scheme == 'https'),
|
||||
'auth_host': parsed.hostname,
|
||||
'auth_port': (parsed.port if parsed.port is not None else
|
||||
443 if parsed.scheme == 'https' else 80),
|
||||
'auth_port': str(
|
||||
parsed.port if parsed.port is not None else
|
||||
443 if parsed.scheme == 'https' else 80),
|
||||
'auth_prefix': parsed.path,
|
||||
})
|
||||
elif 'auth_host' in config:
|
||||
|
|
|
@ -16,7 +16,8 @@
|
|||
import unittest2
|
||||
import traceback
|
||||
import test.functional as tf
|
||||
from test.functional.s3api.s3_test_client import Connection
|
||||
from test.functional.s3api.s3_test_client import (
|
||||
Connection, get_boto3_conn, tear_down_s3)
|
||||
|
||||
|
||||
def setUpModule():
|
||||
|
@ -59,3 +60,23 @@ class S3ApiBase(unittest2.TestCase):
|
|||
if etag is not None:
|
||||
self.assertTrue('etag' in headers) # sanity
|
||||
self.assertEqual(etag, headers['etag'].strip('"'))
|
||||
|
||||
|
||||
class S3ApiBaseBoto3(S3ApiBase):
|
||||
def setUp(self):
|
||||
if 's3api' not in tf.cluster_info:
|
||||
raise tf.SkipTest('s3api middleware is not enabled')
|
||||
try:
|
||||
self.conn = get_boto3_conn()
|
||||
self.endpoint_url = self.conn._endpoint.host
|
||||
self.access_key = self.conn._request_signer._credentials.access_key
|
||||
self.region = self.conn._client_config.region_name
|
||||
tear_down_s3(self.conn)
|
||||
except Exception:
|
||||
message = '%s got an error during initialize process.\n\n%s' % \
|
||||
(self.method_name, traceback.format_exc())
|
||||
# TODO: Find a way to make this go to FAIL instead of Error
|
||||
self.fail(message)
|
||||
|
||||
def tearDown(self):
|
||||
tear_down_s3(self.conn)
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
|
||||
import os
|
||||
import test.functional as tf
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
from boto.s3.connection import S3Connection, OrdinaryCallingFormat, \
|
||||
S3ResponseError
|
||||
import six
|
||||
|
@ -135,6 +137,53 @@ class Connection(object):
|
|||
return url, {}
|
||||
|
||||
|
||||
def get_boto3_conn(aws_access_key='test:tester', aws_secret_key='testing'):
|
||||
host = tf.config['auth_host']
|
||||
port = int(tf.config['auth_port'])
|
||||
config = boto3.session.Config(s3={'addressing_style': 'path'})
|
||||
return boto3.client(
|
||||
's3', aws_access_key_id=aws_access_key,
|
||||
aws_secret_access_key=aws_secret_key,
|
||||
config=config, region_name='us-east-1', use_ssl=False,
|
||||
endpoint_url='http://{}:{}'.format(host, port))
|
||||
|
||||
|
||||
def tear_down_s3(conn):
|
||||
"""
|
||||
Reset all swift environment to keep clean. As a result by calling this
|
||||
method, we can assume the backend swift keeps no containers and no
|
||||
objects on this connection's account.
|
||||
"""
|
||||
exceptions = []
|
||||
for i in range(RETRY_COUNT):
|
||||
try:
|
||||
resp = conn.list_buckets()
|
||||
buckets = [bucket['Name'] for bucket in resp.get('Buckets', [])]
|
||||
for bucket in buckets:
|
||||
try:
|
||||
resp = conn.list_multipart_uploads(Bucket=bucket)
|
||||
for upload in resp.get('Uploads', []):
|
||||
conn.abort_multipart_upload(
|
||||
Bucket=bucket,
|
||||
Key=upload['Key'],
|
||||
UploadId=upload['UploadId'])
|
||||
|
||||
resp = conn.list_objects(Bucket=bucket)
|
||||
for obj in resp.get('Contents', []):
|
||||
conn.delete_object(Bucket=bucket, Key=obj['Key'])
|
||||
conn.delete_bucket(Bucket=bucket)
|
||||
except ClientError as e:
|
||||
# 404 means NoSuchBucket, NoSuchKey, or NoSuchUpload
|
||||
if e.response['ResponseMetadata']['HTTPStatusCode'] != 404:
|
||||
raise
|
||||
except Exception as e:
|
||||
exceptions.append(''.join(
|
||||
traceback.format_exception(*sys.exc_info())))
|
||||
if exceptions:
|
||||
exceptions.insert(0, 'Too many errors to continue:')
|
||||
raise Exception('\n========\n'.join(exceptions))
|
||||
|
||||
|
||||
# TODO: make sure where this function is used
|
||||
def get_admin_connection():
|
||||
"""
|
||||
|
|
|
@ -13,16 +13,15 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import botocore
|
||||
import datetime
|
||||
import unittest2
|
||||
import os
|
||||
|
||||
import test.functional as tf
|
||||
from swift.common.utils import config_true_value
|
||||
from swift.common.middleware.s3api.etree import fromstring, tostring, Element, \
|
||||
SubElement
|
||||
from test.functional.s3api import S3ApiBase
|
||||
from test.functional.s3api.s3_test_client import Connection
|
||||
from test.functional.s3api.utils import get_error_code
|
||||
from test.functional.s3api import S3ApiBaseBoto3
|
||||
from test.functional.s3api.s3_test_client import get_boto3_conn
|
||||
|
||||
|
||||
def setUpModule():
|
||||
|
@ -33,14 +32,21 @@ def tearDownModule():
|
|||
tf.teardown_package()
|
||||
|
||||
|
||||
class TestS3ApiBucket(S3ApiBase):
|
||||
def setUp(self):
|
||||
super(TestS3ApiBucket, self).setUp()
|
||||
|
||||
def _gen_location_xml(self, location):
|
||||
elem = Element('CreateBucketConfiguration')
|
||||
SubElement(elem, 'LocationConstraint').text = location
|
||||
return tostring(elem)
|
||||
class TestS3ApiBucket(S3ApiBaseBoto3):
|
||||
def _validate_object_listing(self, resp_objects, req_objects,
|
||||
expect_owner=True):
|
||||
self.assertEqual(len(resp_objects), len(req_objects))
|
||||
for i, obj in enumerate(resp_objects):
|
||||
self.assertEqual(obj['Key'], req_objects[i])
|
||||
self.assertEqual(type(obj['LastModified']), datetime.datetime)
|
||||
self.assertIn('ETag', obj)
|
||||
self.assertIn('Size', obj)
|
||||
self.assertEqual(obj['StorageClass'], 'STANDARD')
|
||||
if expect_owner:
|
||||
self.assertEqual(obj['Owner']['ID'], self.access_key)
|
||||
self.assertEqual(obj['Owner']['DisplayName'], self.access_key)
|
||||
else:
|
||||
self.assertNotIn('Owner', obj)
|
||||
|
||||
def test_bucket(self):
|
||||
bucket = 'bucket'
|
||||
|
@ -48,112 +54,95 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
'max_bucket_listing', 1000)
|
||||
|
||||
# PUT Bucket
|
||||
status, headers, body = self.conn.make_request('PUT', bucket)
|
||||
self.assertEqual(status, 200)
|
||||
resp = self.conn.create_bucket(Bucket=bucket)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
headers = resp['ResponseMetadata']['HTTPHeaders']
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertIn(headers['location'], (
|
||||
'/' + bucket, # swob won't touch it...
|
||||
# but webob (which we get because of auth_token) *does*
|
||||
'http://%s%s/%s' % (
|
||||
self.conn.host,
|
||||
'' if self.conn.port == 80 else ':%d' % self.conn.port,
|
||||
bucket),
|
||||
# This is all based on the Host header the client provided,
|
||||
# and boto will double-up ports for sig v4. See
|
||||
# - https://github.com/boto/boto/issues/2623
|
||||
# - https://github.com/boto/boto/issues/3716
|
||||
# with proposed fixes at
|
||||
# - https://github.com/boto/boto/pull/3513
|
||||
# - https://github.com/boto/boto/pull/3676
|
||||
'http://%s%s:%d/%s' % (
|
||||
self.conn.host,
|
||||
'' if self.conn.port == 80 else ':%d' % self.conn.port,
|
||||
self.conn.port,
|
||||
bucket),
|
||||
'%s/%s' % (self.endpoint_url, bucket),
|
||||
))
|
||||
self.assertEqual(headers['content-length'], '0')
|
||||
|
||||
# GET Bucket(Without Object)
|
||||
status, headers, body = self.conn.make_request('GET', bucket)
|
||||
self.assertEqual(status, 200)
|
||||
resp = self.conn.list_objects(Bucket=bucket)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
headers = resp['ResponseMetadata']['HTTPHeaders']
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertIsNotNone(headers['content-type'])
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
# TODO; requires consideration
|
||||
# self.assertEqual(headers['transfer-encoding'], 'chunked')
|
||||
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Name').text, bucket)
|
||||
self.assertIsNone(elem.find('Prefix').text)
|
||||
self.assertIsNone(elem.find('Marker').text)
|
||||
self.assertEqual(
|
||||
elem.find('MaxKeys').text, str(max_bucket_listing))
|
||||
self.assertEqual(elem.find('IsTruncated').text, 'false')
|
||||
objects = elem.findall('./Contents')
|
||||
self.assertEqual(list(objects), [])
|
||||
self.assertEqual(resp['Name'], bucket)
|
||||
self.assertEqual(resp['Prefix'], '')
|
||||
self.assertEqual(resp['Marker'], '')
|
||||
self.assertEqual(resp['MaxKeys'], max_bucket_listing)
|
||||
self.assertFalse(resp['IsTruncated'])
|
||||
self.assertNotIn('Contents', bucket)
|
||||
|
||||
# GET Bucket(With Object)
|
||||
req_objects = ('object', 'object2')
|
||||
req_objects = ['object', 'object2']
|
||||
for obj in req_objects:
|
||||
self.conn.make_request('PUT', bucket, obj)
|
||||
status, headers, body = self.conn.make_request('GET', bucket)
|
||||
self.assertEqual(status, 200)
|
||||
self.conn.put_object(Bucket=bucket, Key=obj, Body=b'')
|
||||
resp = self.conn.list_objects(Bucket=bucket)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Name').text, bucket)
|
||||
self.assertIsNone(elem.find('Prefix').text)
|
||||
self.assertIsNone(elem.find('Marker').text)
|
||||
self.assertEqual(elem.find('MaxKeys').text,
|
||||
str(max_bucket_listing))
|
||||
self.assertEqual(elem.find('IsTruncated').text, 'false')
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), 2)
|
||||
for o in resp_objects:
|
||||
self.assertIn(o.find('Key').text, req_objects)
|
||||
self.assertIsNotNone(o.find('LastModified').text)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertIsNotNone(o.find('ETag').text)
|
||||
self.assertIsNotNone(o.find('Size').text)
|
||||
self.assertIsNotNone(o.find('StorageClass').text)
|
||||
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertEqual(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
self.assertEqual(resp['Name'], bucket)
|
||||
self.assertEqual(resp['Prefix'], '')
|
||||
self.assertEqual(resp['Marker'], '')
|
||||
self.assertEqual(resp['MaxKeys'], max_bucket_listing)
|
||||
self.assertFalse(resp['IsTruncated'])
|
||||
self._validate_object_listing(resp['Contents'], req_objects)
|
||||
|
||||
# HEAD Bucket
|
||||
status, headers, body = self.conn.make_request('HEAD', bucket)
|
||||
self.assertEqual(status, 200)
|
||||
resp = self.conn.head_bucket(Bucket=bucket)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
headers = resp['ResponseMetadata']['HTTPHeaders']
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertIsNotNone(headers['content-type'])
|
||||
self.assertEqual(headers['content-length'], str(len(body)))
|
||||
# TODO; requires consideration
|
||||
# self.assertEqual(headers['transfer-encoding'], 'chunked')
|
||||
|
||||
# DELETE Bucket
|
||||
for obj in req_objects:
|
||||
self.conn.make_request('DELETE', bucket, obj)
|
||||
status, headers, body = self.conn.make_request('DELETE', bucket)
|
||||
self.assertEqual(status, 204)
|
||||
self.conn.delete_object(Bucket=bucket, Key=obj)
|
||||
resp = self.conn.delete_bucket(Bucket=bucket)
|
||||
self.assertEqual(204, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
|
||||
self.assertCommonResponseHeaders(headers)
|
||||
self.assertCommonResponseHeaders(
|
||||
resp['ResponseMetadata']['HTTPHeaders'])
|
||||
|
||||
def test_put_bucket_error(self):
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', 'bucket+invalid')
|
||||
self.assertEqual(get_error_code(body), 'InvalidBucketName')
|
||||
event_system = self.conn.meta.events
|
||||
event_system.unregister(
|
||||
'before-parameter-build.s3',
|
||||
botocore.handlers.validate_bucket_name)
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
self.conn.create_bucket(Bucket='bucket+invalid')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['ResponseMetadata']['HTTPStatusCode'], 400)
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'InvalidBucketName')
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = auth_error_conn.make_request('PUT', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
auth_error_conn.create_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['ResponseMetadata']['HTTPStatusCode'], 403)
|
||||
self.assertEqual(ctx.exception.response['Error']['Code'],
|
||||
'SignatureDoesNotMatch')
|
||||
|
||||
self.conn.make_request('PUT', 'bucket')
|
||||
status, headers, body = self.conn.make_request('PUT', 'bucket')
|
||||
self.assertEqual(status, 409)
|
||||
self.assertEqual(get_error_code(body), 'BucketAlreadyOwnedByYou')
|
||||
self.conn.create_bucket(Bucket='bucket')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
self.conn.create_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['ResponseMetadata']['HTTPStatusCode'], 409)
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'BucketAlreadyOwnedByYou')
|
||||
|
||||
def test_put_bucket_error_key2(self):
|
||||
if config_true_value(tf.cluster_info['s3api'].get('s3_acl')):
|
||||
|
@ -163,15 +152,18 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
'Cannot test for BucketAlreadyExists with second user; '
|
||||
'need s3_access_key2 and s3_secret_key2 configured')
|
||||
|
||||
self.conn.make_request('PUT', 'bucket')
|
||||
self.conn.create_bucket(Bucket='bucket')
|
||||
|
||||
# Other users of the same account get the same 409 error
|
||||
conn2 = Connection(tf.config['s3_access_key2'],
|
||||
tf.config['s3_secret_key2'],
|
||||
tf.config['s3_access_key2'])
|
||||
status, headers, body = conn2.make_request('PUT', 'bucket')
|
||||
self.assertEqual(status, 409)
|
||||
self.assertEqual(get_error_code(body), 'BucketAlreadyExists')
|
||||
conn2 = get_boto3_conn(tf.config['s3_access_key2'],
|
||||
tf.config['s3_secret_key2'])
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
conn2.create_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['ResponseMetadata']['HTTPStatusCode'],
|
||||
409)
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'BucketAlreadyExists')
|
||||
|
||||
def test_put_bucket_error_key3(self):
|
||||
if 's3_access_key3' not in tf.config or \
|
||||
|
@ -179,41 +171,51 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
raise tf.SkipTest('Cannot test for AccessDenied; need '
|
||||
's3_access_key3 and s3_secret_key3 configured')
|
||||
|
||||
self.conn.make_request('PUT', 'bucket')
|
||||
self.conn.create_bucket(Bucket='bucket')
|
||||
# If the user can't create buckets, they shouldn't even know
|
||||
# whether the bucket exists.
|
||||
conn3 = Connection(tf.config['s3_access_key3'],
|
||||
tf.config['s3_secret_key3'],
|
||||
tf.config['s3_access_key3'])
|
||||
status, headers, body = conn3.make_request('PUT', 'bucket')
|
||||
self.assertEqual(status, 403)
|
||||
self.assertEqual(get_error_code(body), 'AccessDenied')
|
||||
conn3 = get_boto3_conn(tf.config['s3_access_key3'],
|
||||
tf.config['s3_secret_key3'])
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
conn3.create_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['ResponseMetadata']['HTTPStatusCode'], 403)
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'AccessDenied')
|
||||
|
||||
def test_put_bucket_with_LocationConstraint(self):
|
||||
bucket = 'bucket'
|
||||
xml = self._gen_location_xml(self.conn.conn.auth_region_name)
|
||||
status, headers, body = \
|
||||
self.conn.make_request('PUT', bucket, body=xml)
|
||||
self.assertEqual(status, 200)
|
||||
resp = self.conn.create_bucket(
|
||||
Bucket='bucket',
|
||||
CreateBucketConfiguration={'LocationConstraint': self.region})
|
||||
self.assertEqual(resp['ResponseMetadata']['HTTPStatusCode'], 200)
|
||||
|
||||
def test_get_bucket_error(self):
|
||||
self.conn.make_request('PUT', 'bucket')
|
||||
event_system = self.conn.meta.events
|
||||
event_system.unregister(
|
||||
'before-parameter-build.s3',
|
||||
botocore.handlers.validate_bucket_name)
|
||||
self.conn.create_bucket(Bucket='bucket')
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', 'bucket+invalid')
|
||||
self.assertEqual(get_error_code(body), 'InvalidBucketName')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
self.conn.list_objects(Bucket='bucket+invalid')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'InvalidBucketName')
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = auth_error_conn.make_request('GET', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
auth_error_conn.list_objects(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = self.conn.make_request('GET', 'nothing')
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
self.conn.list_objects(Bucket='nothing')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'NoSuchBucket')
|
||||
|
||||
def _prepare_test_get_bucket(self, bucket, objects):
|
||||
self.conn.make_request('PUT', bucket)
|
||||
self.conn.create_bucket(Bucket=bucket)
|
||||
for obj in objects:
|
||||
self.conn.make_request('PUT', bucket, obj)
|
||||
self.conn.put_object(Bucket=bucket, Key=obj, Body=b'')
|
||||
|
||||
def test_get_bucket_with_delimiter(self):
|
||||
bucket = 'bucket'
|
||||
|
@ -222,32 +224,16 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
delimiter = '/'
|
||||
query = 'delimiter=%s' % delimiter
|
||||
expect_objects = ('object', 'object2')
|
||||
expect_prefixes = ('dir/', 'subdir/', 'subdir2/')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Delimiter').text, delimiter)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertIsNotNone(o.find('LastModified').text)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertIsNotNone(o.find('ETag').text)
|
||||
self.assertIsNotNone(o.find('Size').text)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertEqual(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
resp_prefixes = elem.findall('CommonPrefixes')
|
||||
self.assertEqual(len(resp_prefixes), len(expect_prefixes))
|
||||
for i, p in enumerate(resp_prefixes):
|
||||
self.assertEqual(p.find('./Prefix').text, expect_prefixes[i])
|
||||
resp = self.conn.list_objects(Bucket=bucket, Delimiter=delimiter)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
self.assertEqual(resp['Delimiter'], delimiter)
|
||||
self._validate_object_listing(resp['Contents'], expect_objects)
|
||||
resp_prefixes = resp['CommonPrefixes']
|
||||
self.assertEqual(
|
||||
resp_prefixes,
|
||||
[{'Prefix': p} for p in expect_prefixes])
|
||||
|
||||
def test_get_bucket_with_encoding_type(self):
|
||||
bucket = 'bucket'
|
||||
|
@ -255,12 +241,10 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
encoding_type = 'url'
|
||||
query = 'encoding-type=%s' % encoding_type
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('EncodingType').text, encoding_type)
|
||||
resp = self.conn.list_objects(
|
||||
Bucket=bucket, EncodingType=encoding_type)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
self.assertEqual(resp['EncodingType'], encoding_type)
|
||||
|
||||
def test_get_bucket_with_marker(self):
|
||||
bucket = 'bucket'
|
||||
|
@ -269,27 +253,11 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
marker = 'object'
|
||||
query = 'marker=%s' % marker
|
||||
expect_objects = ('object2', 'subdir/object', 'subdir2/object')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Marker').text, marker)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertIsNotNone(o.find('LastModified').text)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertIsNotNone(o.find('ETag').text)
|
||||
self.assertIsNotNone(o.find('Size').text)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertEqual(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
resp = self.conn.list_objects(Bucket=bucket, Marker=marker)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
self.assertEqual(resp['Marker'], marker)
|
||||
self._validate_object_listing(resp['Contents'], expect_objects)
|
||||
|
||||
def test_get_bucket_with_max_keys(self):
|
||||
bucket = 'bucket'
|
||||
|
@ -297,28 +265,12 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
'dir/subdir/object')
|
||||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
max_keys = '2'
|
||||
query = 'max-keys=%s' % max_keys
|
||||
max_keys = 2
|
||||
expect_objects = ('dir/subdir/object', 'object')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('MaxKeys').text, max_keys)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertIsNotNone(o.find('LastModified').text)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertIsNotNone(o.find('ETag').text)
|
||||
self.assertIsNotNone(o.find('Size').text)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertEqual(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
resp = self.conn.list_objects(Bucket=bucket, MaxKeys=max_keys)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
self.assertEqual(resp['MaxKeys'], max_keys)
|
||||
self._validate_object_listing(resp['Contents'], expect_objects)
|
||||
|
||||
def test_get_bucket_with_prefix(self):
|
||||
bucket = 'bucket'
|
||||
|
@ -327,27 +279,11 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
self._prepare_test_get_bucket(bucket, req_objects)
|
||||
|
||||
prefix = 'object'
|
||||
query = 'prefix=%s' % prefix
|
||||
expect_objects = ('object', 'object2')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('Prefix').text, prefix)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertIsNotNone(o.find('LastModified').text)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertIsNotNone(o.find('ETag').text)
|
||||
self.assertIsNotNone(o.find('Size').text)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertEqual(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
resp = self.conn.list_objects(Bucket=bucket, Prefix=prefix)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
self.assertEqual(resp['Prefix'], prefix)
|
||||
self._validate_object_listing(resp['Contents'], expect_objects)
|
||||
|
||||
def test_get_bucket_v2_with_start_after(self):
|
||||
bucket = 'bucket'
|
||||
|
@ -356,26 +292,13 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
marker = 'object'
|
||||
query = 'list-type=2&start-after=%s' % marker
|
||||
expect_objects = ('object2', 'subdir/object', 'subdir2/object')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('StartAfter').text, marker)
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertIsNone(o.find('Owner/ID'))
|
||||
self.assertIsNone(o.find('Owner/DisplayName'))
|
||||
resp = self.conn.list_objects_v2(Bucket=bucket, StartAfter=marker)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
self.assertEqual(resp['StartAfter'], marker)
|
||||
self.assertEqual(resp['KeyCount'], 3)
|
||||
self._validate_object_listing(resp['Contents'], expect_objects,
|
||||
expect_owner=False)
|
||||
|
||||
def test_get_bucket_v2_with_fetch_owner(self):
|
||||
bucket = 'bucket'
|
||||
|
@ -383,28 +306,12 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
'dir/subdir/object')
|
||||
self._prepare_test_get_bucket(bucket, put_objects)
|
||||
|
||||
query = 'list-type=2&fetch-owner=true'
|
||||
expect_objects = ('dir/subdir/object', 'object', 'object2',
|
||||
'subdir/object', 'subdir2/object')
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GET', bucket, query=query)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('KeyCount').text, '5')
|
||||
resp_objects = elem.findall('./Contents')
|
||||
self.assertEqual(len(list(resp_objects)), len(expect_objects))
|
||||
for i, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text, expect_objects[i])
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertTrue(o.find('Owner/ID').text, self.conn.user_id)
|
||||
self.assertTrue(o.find('Owner/DisplayName').text,
|
||||
self.conn.user_id)
|
||||
resp = self.conn.list_objects_v2(Bucket=bucket, FetchOwner=True)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
self.assertEqual(resp['KeyCount'], 5)
|
||||
self._validate_object_listing(resp['Contents'], expect_objects)
|
||||
|
||||
def test_get_bucket_v2_with_continuation_token_and_delimiter(self):
|
||||
bucket = 'bucket'
|
||||
|
@ -421,86 +328,116 @@ class TestS3ApiBucket(S3ApiBase):
|
|||
'subdirs': []}]
|
||||
|
||||
continuation_token = ''
|
||||
query = 'list-type=2&max-keys=3&delimiter=/&continuation-token=%s'
|
||||
|
||||
for i in range(len(expected)):
|
||||
status, headers, body = self.conn.make_request(
|
||||
'GET', bucket, query=query % continuation_token)
|
||||
self.assertEqual(status, 200)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('MaxKeys').text, '3')
|
||||
resp = self.conn.list_objects_v2(
|
||||
Bucket=bucket,
|
||||
MaxKeys=3,
|
||||
Delimiter='/',
|
||||
ContinuationToken=continuation_token)
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
self.assertEqual(resp['MaxKeys'], 3)
|
||||
self.assertEqual(
|
||||
elem.find('KeyCount').text,
|
||||
str(len(expected[i]['objects']) + len(expected[i]['subdirs'])))
|
||||
expect_truncated = 'true' if i < len(expected) - 1 else 'false'
|
||||
self.assertEqual(elem.find('IsTruncated').text, expect_truncated)
|
||||
next_cont_token_elem = elem.find('NextContinuationToken')
|
||||
if expect_truncated == 'true':
|
||||
self.assertIsNotNone(next_cont_token_elem)
|
||||
continuation_token = next_cont_token_elem.text
|
||||
resp_objects = elem.findall('./Contents')
|
||||
resp['KeyCount'],
|
||||
len(expected[i]['objects']) + len(expected[i]['subdirs']))
|
||||
expect_truncated = i < len(expected) - 1
|
||||
self.assertEqual(resp['IsTruncated'], expect_truncated)
|
||||
if expect_truncated:
|
||||
self.assertIsNotNone(resp['NextContinuationToken'])
|
||||
continuation_token = resp['NextContinuationToken']
|
||||
self._validate_object_listing(resp['Contents'],
|
||||
expected[i]['objects'],
|
||||
expect_owner=False)
|
||||
resp_subdirs = resp.get('CommonPrefixes', [])
|
||||
self.assertEqual(
|
||||
len(list(resp_objects)), len(expected[i]['objects']))
|
||||
for j, o in enumerate(resp_objects):
|
||||
self.assertEqual(o.find('Key').text,
|
||||
expected[i]['objects'][j].encode('utf-8'))
|
||||
self.assertTrue(o.find('LastModified').text is not None)
|
||||
self.assertRegexpMatches(
|
||||
o.find('LastModified').text,
|
||||
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
|
||||
self.assertTrue(o.find('ETag').text is not None)
|
||||
self.assertTrue(o.find('Size').text is not None)
|
||||
self.assertEqual(o.find('StorageClass').text, 'STANDARD')
|
||||
self.assertIsNone(o.find('Owner/ID'))
|
||||
self.assertIsNone(o.find('Owner/DisplayName'))
|
||||
resp_subdirs = elem.findall('./CommonPrefixes')
|
||||
self.assertEqual(
|
||||
len(list(resp_subdirs)), len(expected[i]['subdirs']))
|
||||
for j, o in enumerate(resp_subdirs):
|
||||
self.assertEqual(
|
||||
o.find('Prefix').text,
|
||||
expected[i]['subdirs'][j].encode('utf-8'))
|
||||
resp_subdirs,
|
||||
[{'Prefix': p} for p in expected[i]['subdirs']])
|
||||
|
||||
def test_head_bucket_error(self):
|
||||
self.conn.make_request('PUT', 'bucket')
|
||||
event_system = self.conn.meta.events
|
||||
event_system.unregister(
|
||||
'before-parameter-build.s3',
|
||||
botocore.handlers.validate_bucket_name)
|
||||
|
||||
status, headers, body = \
|
||||
self.conn.make_request('HEAD', 'bucket+invalid')
|
||||
self.assertEqual(status, 400)
|
||||
self.assertEqual(body, '') # sanity
|
||||
self.conn.create_bucket(Bucket='bucket')
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('HEAD', 'bucket')
|
||||
self.assertEqual(status, 403)
|
||||
self.assertEqual(body, '') # sanity
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
self.conn.head_bucket(Bucket='bucket+invalid')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['ResponseMetadata']['HTTPStatusCode'], 400)
|
||||
self.assertEqual(ctx.exception.response['Error']['Code'], '400')
|
||||
self.assertEqual(
|
||||
ctx.exception.response[
|
||||
'ResponseMetadata']['HTTPHeaders']['content-length'], '0')
|
||||
|
||||
status, headers, body = self.conn.make_request('HEAD', 'nothing')
|
||||
self.assertEqual(status, 404)
|
||||
self.assertEqual(body, '') # sanity
|
||||
auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
auth_error_conn.head_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['ResponseMetadata']['HTTPStatusCode'], 403)
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], '403')
|
||||
self.assertEqual(
|
||||
ctx.exception.response[
|
||||
'ResponseMetadata']['HTTPHeaders']['content-length'], '0')
|
||||
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
self.conn.head_bucket(Bucket='nothing')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['ResponseMetadata']['HTTPStatusCode'], 404)
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], '404')
|
||||
self.assertEqual(
|
||||
ctx.exception.response[
|
||||
'ResponseMetadata']['HTTPHeaders']['content-length'], '0')
|
||||
|
||||
def test_delete_bucket_error(self):
|
||||
status, headers, body = \
|
||||
self.conn.make_request('DELETE', 'bucket+invalid')
|
||||
self.assertEqual(get_error_code(body), 'InvalidBucketName')
|
||||
event_system = self.conn.meta.events
|
||||
event_system.unregister(
|
||||
'before-parameter-build.s3',
|
||||
botocore.handlers.validate_bucket_name)
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
self.conn.delete_bucket(Bucket='bucket+invalid')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'InvalidBucketName')
|
||||
|
||||
auth_error_conn = Connection(aws_secret_key='invalid')
|
||||
status, headers, body = \
|
||||
auth_error_conn.make_request('DELETE', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
|
||||
auth_error_conn = get_boto3_conn(aws_secret_key='invalid')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
auth_error_conn.delete_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'SignatureDoesNotMatch')
|
||||
|
||||
status, headers, body = self.conn.make_request('DELETE', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'NoSuchBucket')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
self.conn.delete_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'NoSuchBucket')
|
||||
|
||||
def test_bucket_invalid_method_error(self):
|
||||
def _mangle_req_method(request, **kwargs):
|
||||
request.method = 'GETPUT'
|
||||
|
||||
def _mangle_req_controller_method(request, **kwargs):
|
||||
request.method = '_delete_segments_bucket'
|
||||
|
||||
event_system = self.conn.meta.events
|
||||
event_system.register(
|
||||
'request-created.s3.CreateBucket',
|
||||
_mangle_req_method)
|
||||
# non existed verb in the controller
|
||||
status, headers, body = \
|
||||
self.conn.make_request('GETPUT', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'MethodNotAllowed')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
self.conn.create_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'MethodNotAllowed')
|
||||
|
||||
event_system.unregister('request-created.s3.CreateBucket',
|
||||
_mangle_req_method)
|
||||
event_system.register('request-created.s3.CreateBucket',
|
||||
_mangle_req_controller_method)
|
||||
# the method exists in the controller but deny as MethodNotAllowed
|
||||
status, headers, body = \
|
||||
self.conn.make_request('_delete_segments_bucket', 'bucket')
|
||||
self.assertEqual(get_error_code(body), 'MethodNotAllowed')
|
||||
with self.assertRaises(botocore.exceptions.ClientError) as ctx:
|
||||
self.conn.create_bucket(Bucket='bucket')
|
||||
self.assertEqual(
|
||||
ctx.exception.response['Error']['Code'], 'MethodNotAllowed')
|
||||
|
||||
|
||||
class TestS3ApiBucketSigV4(TestS3ApiBucket):
|
||||
|
|
|
@ -32,6 +32,7 @@ from swiftclient import get_auth
|
|||
|
||||
from swift.common import constraints
|
||||
from swift.common.http import is_success
|
||||
from swift.common.swob import str_to_wsgi, wsgi_to_str
|
||||
from swift.common.utils import config_true_value
|
||||
|
||||
from test import safe_repr
|
||||
|
@ -324,7 +325,7 @@ class Connection(object):
|
|||
if path:
|
||||
quote = urllib.parse.quote
|
||||
if cfg.get('no_quote') or cfg.get('no_path_quote'):
|
||||
quote = lambda x: x
|
||||
quote = str_to_wsgi
|
||||
return '%s/%s' % (self.storage_path,
|
||||
'/'.join([quote(i) for i in path]))
|
||||
else:
|
||||
|
@ -342,7 +343,8 @@ class Connection(object):
|
|||
headers['X-Auth-Token'] = cfg.get('use_token')
|
||||
|
||||
if isinstance(hdrs, dict):
|
||||
headers.update(hdrs)
|
||||
headers.update((str_to_wsgi(h), str_to_wsgi(v))
|
||||
for h, v in hdrs.items())
|
||||
return headers
|
||||
|
||||
def make_request(self, method, path=None, data=b'', hdrs=None, parms=None,
|
||||
|
@ -489,7 +491,10 @@ class Base(object):
|
|||
'x-container-bytes-used',
|
||||
)
|
||||
|
||||
headers = dict(self.conn.response.getheaders())
|
||||
# NB: on py2, headers are always lower; on py3, they match the bytes
|
||||
# on the wire
|
||||
headers = dict((wsgi_to_str(h).lower(), wsgi_to_str(v))
|
||||
for h, v in self.conn.response.getheaders())
|
||||
ret = {}
|
||||
|
||||
for return_key, header in required_fields:
|
||||
|
@ -954,17 +959,19 @@ class File(Base):
|
|||
raise ResponseError(self.conn.response, 'HEAD',
|
||||
self.conn.make_path(self.path))
|
||||
|
||||
for hdr in self.conn.response.getheaders():
|
||||
if hdr[0].lower() == 'content-type':
|
||||
self.content_type = hdr[1]
|
||||
if hdr[0].lower().startswith('x-object-meta-'):
|
||||
self.metadata[hdr[0][14:]] = hdr[1]
|
||||
if hdr[0].lower() == 'etag':
|
||||
self.etag = hdr[1]
|
||||
if hdr[0].lower() == 'content-length':
|
||||
self.size = int(hdr[1])
|
||||
if hdr[0].lower() == 'last-modified':
|
||||
self.last_modified = hdr[1]
|
||||
for hdr, val in self.conn.response.getheaders():
|
||||
hdr = wsgi_to_str(hdr).lower()
|
||||
val = wsgi_to_str(val)
|
||||
if hdr == 'content-type':
|
||||
self.content_type = val
|
||||
if hdr.startswith('x-object-meta-'):
|
||||
self.metadata[hdr[14:]] = val
|
||||
if hdr == 'etag':
|
||||
self.etag = val
|
||||
if hdr == 'content-length':
|
||||
self.size = int(val)
|
||||
if hdr == 'last-modified':
|
||||
self.last_modified = val
|
||||
|
||||
return True
|
||||
|
||||
|
@ -1007,11 +1014,11 @@ class File(Base):
|
|||
raise ResponseError(self.conn.response, 'GET',
|
||||
self.conn.make_path(self.path))
|
||||
|
||||
for hdr in self.conn.response.getheaders():
|
||||
if hdr[0].lower() == 'content-type':
|
||||
self.content_type = hdr[1]
|
||||
if hdr[0].lower() == 'content-range':
|
||||
self.content_range = hdr[1]
|
||||
for hdr, val in self.conn.response.getheaders():
|
||||
if hdr.lower() == 'content-type':
|
||||
self.content_type = wsgi_to_str(val)
|
||||
if hdr.lower() == 'content-range':
|
||||
self.content_range = val
|
||||
|
||||
if hasattr(buffer, 'write'):
|
||||
scratch = self.conn.response.read(8192)
|
||||
|
|
|
@ -885,7 +885,7 @@ class TestContainer(unittest2.TestCase):
|
|||
new_container_name = str(uuid4())
|
||||
resp = retry(put, new_container_name, use_account=3)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 201)
|
||||
self.assertIn(resp.status, (201, 202))
|
||||
resp = retry(get, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
@ -894,7 +894,7 @@ class TestContainer(unittest2.TestCase):
|
|||
# can also delete them
|
||||
resp = retry(delete, new_container_name, use_account=3)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
self.assertIn(resp.status, (204, 404))
|
||||
resp = retry(get, use_account=3)
|
||||
listing = resp.read()
|
||||
self.assertEqual(resp.status, 200)
|
||||
|
@ -904,10 +904,10 @@ class TestContainer(unittest2.TestCase):
|
|||
empty_container_name = str(uuid4())
|
||||
resp = retry(put, empty_container_name, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 201)
|
||||
self.assertIn(resp.status, (201, 202))
|
||||
resp = retry(delete, empty_container_name, use_account=3)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
self.assertIn(resp.status, (204, 404))
|
||||
|
||||
@requires_acls
|
||||
def test_read_write_acl_metadata(self):
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
# limitations under the License.
|
||||
|
||||
from datetime import datetime
|
||||
import email.parser
|
||||
import hashlib
|
||||
import locale
|
||||
import random
|
||||
|
@ -29,6 +28,11 @@ import eventlet
|
|||
from swift.common.http import is_success, is_client_error
|
||||
from email.utils import parsedate
|
||||
|
||||
if six.PY2:
|
||||
from email.parser import FeedParser
|
||||
else:
|
||||
from email.parser import BytesFeedParser as FeedParser
|
||||
|
||||
import mock
|
||||
|
||||
from test.functional import normalized_urls, load_constraint, cluster_info
|
||||
|
@ -244,7 +248,8 @@ class TestAccount(Base):
|
|||
self.assertGreaterEqual(a['count'], 0)
|
||||
self.assertGreaterEqual(a['bytes'], 0)
|
||||
|
||||
headers = dict(self.env.conn.response.getheaders())
|
||||
headers = dict((k.lower(), v)
|
||||
for k, v in self.env.conn.response.getheaders())
|
||||
if format_type == 'json':
|
||||
self.assertEqual(headers['content-type'],
|
||||
'application/json; charset=utf-8')
|
||||
|
@ -402,7 +407,7 @@ class TestAccount(Base):
|
|||
quoted_hax = urllib.parse.quote(hax)
|
||||
conn.connection.request('GET', '/v1/' + quoted_hax, None, {})
|
||||
resp = conn.connection.getresponse()
|
||||
resp_headers = dict(resp.getheaders())
|
||||
resp_headers = dict((h.lower(), v) for h, v in resp.getheaders())
|
||||
self.assertIn('www-authenticate', resp_headers)
|
||||
actual = resp_headers['www-authenticate']
|
||||
expected = 'Swift realm="%s"' % quoted_hax
|
||||
|
@ -1271,7 +1276,7 @@ class TestFile(Base):
|
|||
if k.lower() in unexpected_hdrs:
|
||||
errors.append('Found unexpected header %s: %s' % (k, v))
|
||||
for k, v in expected_hdrs.items():
|
||||
matches = [hdr for hdr in resp_headers if hdr[0] == k]
|
||||
matches = [hdr for hdr in resp_headers if hdr[0].lower() == k]
|
||||
if not matches:
|
||||
errors.append('Missing expected header %s' % k)
|
||||
for (got_k, got_v) in matches:
|
||||
|
@ -1941,7 +1946,12 @@ class TestFile(Base):
|
|||
|
||||
if len(key) > j:
|
||||
key = key[:j]
|
||||
val = val[:j]
|
||||
# NB: we'll likely write object metadata that's *not* UTF-8
|
||||
if six.PY2:
|
||||
val = val[:j]
|
||||
else:
|
||||
val = val.encode('utf8')[:j].decode(
|
||||
'utf8', 'surrogateescape')
|
||||
|
||||
metadata[key] = val
|
||||
|
||||
|
@ -2071,8 +2081,8 @@ class TestFile(Base):
|
|||
# HTTP response bodies don't). We fake it out by constructing a
|
||||
# one-header preamble containing just the Content-Type, then
|
||||
# feeding in the response body.
|
||||
parser = email.parser.FeedParser()
|
||||
parser.feed("Content-Type: %s\r\n\r\n" % content_type)
|
||||
parser = FeedParser()
|
||||
parser.feed(b"Content-Type: %s\r\n\r\n" % content_type.encode())
|
||||
parser.feed(fetched)
|
||||
root_message = parser.close()
|
||||
self.assertTrue(root_message.is_multipart())
|
||||
|
@ -2086,7 +2096,7 @@ class TestFile(Base):
|
|||
byteranges[0]['Content-Range'],
|
||||
"bytes %d-%d/%d" % (i, i + subrange_size - 1, file_length))
|
||||
self.assertEqual(
|
||||
byteranges[0].get_payload(),
|
||||
byteranges[0].get_payload(decode=True),
|
||||
data[i:(i + subrange_size)])
|
||||
|
||||
self.assertEqual(byteranges[1]['Content-Type'],
|
||||
|
@ -2096,7 +2106,7 @@ class TestFile(Base):
|
|||
"bytes %d-%d/%d" % (i + 2 * subrange_size,
|
||||
i + 3 * subrange_size - 1, file_length))
|
||||
self.assertEqual(
|
||||
byteranges[1].get_payload(),
|
||||
byteranges[1].get_payload(decode=True),
|
||||
data[(i + 2 * subrange_size):(i + 3 * subrange_size)])
|
||||
|
||||
self.assertEqual(byteranges[2]['Content-Type'],
|
||||
|
@ -2106,7 +2116,7 @@ class TestFile(Base):
|
|||
"bytes %d-%d/%d" % (i + 4 * subrange_size,
|
||||
i + 5 * subrange_size - 1, file_length))
|
||||
self.assertEqual(
|
||||
byteranges[2].get_payload(),
|
||||
byteranges[2].get_payload(decode=True),
|
||||
data[(i + 4 * subrange_size):(i + 5 * subrange_size)])
|
||||
|
||||
# The first two ranges are satisfiable but the third is not; the
|
||||
|
@ -2123,8 +2133,8 @@ class TestFile(Base):
|
|||
self.assertTrue(content_type.startswith("multipart/byteranges"))
|
||||
self.assertIsNone(file_item.content_range)
|
||||
|
||||
parser = email.parser.FeedParser()
|
||||
parser.feed("Content-Type: %s\r\n\r\n" % content_type)
|
||||
parser = FeedParser()
|
||||
parser.feed(b"Content-Type: %s\r\n\r\n" % content_type.encode())
|
||||
parser.feed(fetched)
|
||||
root_message = parser.close()
|
||||
|
||||
|
@ -2137,7 +2147,8 @@ class TestFile(Base):
|
|||
self.assertEqual(
|
||||
byteranges[0]['Content-Range'],
|
||||
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
|
||||
self.assertEqual(byteranges[0].get_payload(), data[:subrange_size])
|
||||
self.assertEqual(byteranges[0].get_payload(decode=True),
|
||||
data[:subrange_size])
|
||||
|
||||
self.assertEqual(byteranges[1]['Content-Type'],
|
||||
"lovecraft/rugose; squamous=true")
|
||||
|
@ -2146,7 +2157,7 @@ class TestFile(Base):
|
|||
"bytes %d-%d/%d" % (2 * subrange_size, 3 * subrange_size - 1,
|
||||
file_length))
|
||||
self.assertEqual(
|
||||
byteranges[1].get_payload(),
|
||||
byteranges[1].get_payload(decode=True),
|
||||
data[(2 * subrange_size):(3 * subrange_size)])
|
||||
|
||||
# The first range is satisfiable but the second is not; the
|
||||
|
@ -2161,8 +2172,8 @@ class TestFile(Base):
|
|||
content_type = file_item.content_type
|
||||
if content_type.startswith("multipart/byteranges"):
|
||||
self.assertIsNone(file_item.content_range)
|
||||
parser = email.parser.FeedParser()
|
||||
parser.feed("Content-Type: %s\r\n\r\n" % content_type)
|
||||
parser = FeedParser()
|
||||
parser.feed(b"Content-Type: %s\r\n\r\n" % content_type.encode())
|
||||
parser.feed(fetched)
|
||||
root_message = parser.close()
|
||||
|
||||
|
@ -2175,7 +2186,8 @@ class TestFile(Base):
|
|||
self.assertEqual(
|
||||
byteranges[0]['Content-Range'],
|
||||
"bytes %d-%d/%d" % (0, subrange_size - 1, file_length))
|
||||
self.assertEqual(byteranges[0].get_payload(), data[:subrange_size])
|
||||
self.assertEqual(byteranges[0].get_payload(decode=True),
|
||||
data[:subrange_size])
|
||||
else:
|
||||
self.assertEqual(
|
||||
file_item.content_range,
|
||||
|
@ -2494,7 +2506,8 @@ class TestFile(Base):
|
|||
found, 'Unexpected file %s found in '
|
||||
'%s listing' % (file_item['name'], format_type))
|
||||
|
||||
headers = dict(self.env.conn.response.getheaders())
|
||||
headers = dict((h.lower(), v)
|
||||
for h, v in self.env.conn.response.getheaders())
|
||||
if format_type == 'json':
|
||||
self.assertEqual(headers['content-type'],
|
||||
'application/json; charset=utf-8')
|
||||
|
@ -2536,7 +2549,8 @@ class TestFile(Base):
|
|||
data = six.BytesIO(file_item.write_random(512))
|
||||
etag = File.compute_md5sum(data)
|
||||
|
||||
headers = dict(self.env.conn.response.getheaders())
|
||||
headers = dict((h.lower(), v)
|
||||
for h, v in self.env.conn.response.getheaders())
|
||||
self.assertIn('etag', headers.keys())
|
||||
|
||||
header_etag = headers['etag'].strip('"')
|
||||
|
|
|
@ -0,0 +1,142 @@
|
|||
# Copyright (c) 2019 SwiftStack, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import boto3
|
||||
from six.moves import urllib
|
||||
|
||||
from swift.common.utils import config_true_value
|
||||
|
||||
from test import get_config
|
||||
|
||||
_CONFIG = None
|
||||
|
||||
|
||||
# boto's loggign can get pretty noisy; require opt-in to see it all
|
||||
if not config_true_value(os.environ.get('BOTO3_DEBUG')):
|
||||
logging.getLogger('boto3').setLevel(logging.INFO)
|
||||
logging.getLogger('botocore').setLevel(logging.INFO)
|
||||
|
||||
|
||||
class ConfigError(Exception):
|
||||
'''Error test conf misconfigurations'''
|
||||
|
||||
|
||||
def get_opt_or_error(option):
|
||||
global _CONFIG
|
||||
if _CONFIG is None:
|
||||
_CONFIG = get_config('s3api_test')
|
||||
|
||||
value = _CONFIG.get(option)
|
||||
if not value:
|
||||
raise ConfigError('must supply [s3api_test]%s' % option)
|
||||
return value
|
||||
|
||||
|
||||
def get_opt(option, default=None):
|
||||
try:
|
||||
return get_opt_or_error(option)
|
||||
except ConfigError:
|
||||
return default
|
||||
|
||||
|
||||
def get_s3_client(user=1, signature_version='s3v4', addressing_style='path'):
|
||||
'''
|
||||
Get a boto3 client to talk to an S3 endpoint.
|
||||
|
||||
:param user: user number to use. Should be one of:
|
||||
1 -- primary user
|
||||
2 -- secondary user
|
||||
3 -- unprivileged user
|
||||
:param signature_version: S3 signing method. Should be one of:
|
||||
s3 -- v2 signatures; produces Authorization headers like
|
||||
``AWS access_key:signature``
|
||||
s3-query -- v2 pre-signed URLs; produces query strings like
|
||||
``?AWSAccessKeyId=access_key&Signature=signature``
|
||||
s3v4 -- v4 signatures; produces Authorization headers like
|
||||
``AWS4-HMAC-SHA256
|
||||
Credential=access_key/date/region/s3/aws4_request,
|
||||
Signature=signature``
|
||||
s3v4-query -- v4 pre-signed URLs; produces query strings like
|
||||
``?X-Amz-Algorithm=AWS4-HMAC-SHA256&
|
||||
X-Amz-Credential=access_key/date/region/s3/aws4_request&
|
||||
X-Amz-Signature=signature``
|
||||
:param addressing_style: One of:
|
||||
path -- produces URLs like ``http(s)://host.domain/bucket/key``
|
||||
virtual -- produces URLs like ``http(s)://bucket.host.domain/key``
|
||||
'''
|
||||
endpoint = get_opt_or_error('endpoint')
|
||||
scheme = urllib.parse.urlsplit(endpoint).scheme
|
||||
if scheme not in ('http', 'https'):
|
||||
raise ConfigError('unexpected scheme in endpoint: %r; '
|
||||
'expected http or https' % scheme)
|
||||
region = get_opt('region', 'us-east-1')
|
||||
access_key = get_opt_or_error('access_key%d' % user)
|
||||
secret_key = get_opt_or_error('secret_key%d' % user)
|
||||
|
||||
ca_cert = get_opt('ca_cert')
|
||||
if ca_cert is not None:
|
||||
try:
|
||||
# do a quick check now; it's more expensive to have boto check
|
||||
os.stat(ca_cert)
|
||||
except OSError as e:
|
||||
raise ConfigError(str(e))
|
||||
|
||||
return boto3.client(
|
||||
's3',
|
||||
endpoint_url=endpoint,
|
||||
region_name=region,
|
||||
use_ssl=(scheme == 'https'),
|
||||
verify=ca_cert,
|
||||
config=boto3.session.Config(s3={
|
||||
'signature_version': signature_version,
|
||||
'addressing_style': addressing_style,
|
||||
}),
|
||||
aws_access_key_id=access_key,
|
||||
aws_secret_access_key=secret_key,
|
||||
)
|
||||
|
||||
|
||||
class BaseS3TestCase(unittest.TestCase):
|
||||
# Default to v4 signatures (as aws-cli does), but subclasses can override
|
||||
signature_version = 's3v4'
|
||||
|
||||
@classmethod
|
||||
def get_s3_client(cls, user):
|
||||
return get_s3_client(user, cls.signature_version)
|
||||
|
||||
@classmethod
|
||||
def clear_bucket(cls, client, bucket):
|
||||
for key in client.list_objects(Bucket=bucket).get('Contents', []):
|
||||
client.delete_key(Bucket=bucket, Key=key['Name'])
|
||||
|
||||
@classmethod
|
||||
def clear_account(cls, client):
|
||||
for bucket in client.list_buckets()['Buckets']:
|
||||
cls.clear_bucket(client, bucket['Name'])
|
||||
client.delete_bucket(Bucket=bucket['Name'])
|
||||
|
||||
def tearDown(self):
|
||||
client = self.get_s3_client(1)
|
||||
self.clear_account(client)
|
||||
try:
|
||||
client = self.get_s3_client(2)
|
||||
except ConfigError:
|
||||
pass
|
||||
else:
|
||||
self.clear_account(client)
|
|
@ -0,0 +1,91 @@
|
|||
# Copyright (c) 2019 SwiftStack, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import uuid
|
||||
|
||||
from test.s3api import BaseS3TestCase, ConfigError
|
||||
|
||||
|
||||
class TestGetServiceSigV4(BaseS3TestCase):
|
||||
def test_empty_service(self):
|
||||
def do_test(client):
|
||||
access_key = client._request_signer._credentials.access_key
|
||||
resp = client.list_buckets()
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
self.assertEqual([], resp['Buckets'])
|
||||
self.assertIn('x-amz-request-id',
|
||||
resp['ResponseMetadata']['HTTPHeaders'])
|
||||
self.assertIn('DisplayName', resp['Owner'])
|
||||
self.assertEqual(access_key, resp['Owner']['DisplayName'])
|
||||
self.assertIn('ID', resp['Owner'])
|
||||
|
||||
client = self.get_s3_client(1)
|
||||
do_test(client)
|
||||
try:
|
||||
client = self.get_s3_client(3)
|
||||
except ConfigError:
|
||||
pass
|
||||
else:
|
||||
do_test(client)
|
||||
|
||||
def test_service_with_buckets(self):
|
||||
c = self.get_s3_client(1)
|
||||
buckets = [str(uuid.uuid4()) for _ in range(5)]
|
||||
for bucket in buckets:
|
||||
c.create_bucket(Bucket=bucket)
|
||||
|
||||
resp = c.list_buckets()
|
||||
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
|
||||
self.assertEqual(sorted(buckets), [
|
||||
bucket['Name'] for bucket in resp['Buckets']])
|
||||
self.assertTrue(all('CreationDate' in bucket
|
||||
for bucket in resp['Buckets']))
|
||||
self.assertIn('x-amz-request-id',
|
||||
resp['ResponseMetadata']['HTTPHeaders'])
|
||||
self.assertIn('DisplayName', resp['Owner'])
|
||||
access_key = c._request_signer._credentials.access_key
|
||||
self.assertEqual(access_key, resp['Owner']['DisplayName'])
|
||||
self.assertIn('ID', resp['Owner'])
|
||||
|
||||
# Second user can only see its own buckets
|
||||
try:
|
||||
c2 = self.get_s3_client(2)
|
||||
except ConfigError as err:
|
||||
raise unittest.SkipTest(str(err))
|
||||
buckets2 = [str(uuid.uuid4()) for _ in range(2)]
|
||||
for bucket in buckets2:
|
||||
c2.create_bucket(Bucket=bucket)
|
||||
self.assertEqual(sorted(buckets2), [
|
||||
bucket['Name'] for bucket in c2.list_buckets()['Buckets']])
|
||||
|
||||
# Unprivileged user can't see anything
|
||||
try:
|
||||
c3 = self.get_s3_client(3)
|
||||
except ConfigError as err:
|
||||
raise unittest.SkipTest(str(err))
|
||||
self.assertEqual([], c3.list_buckets()['Buckets'])
|
||||
|
||||
|
||||
class TestGetServiceSigV2(TestGetServiceSigV4):
|
||||
signature_version = 's3'
|
||||
|
||||
|
||||
class TestGetServicePresignedV2(TestGetServiceSigV4):
|
||||
signature_version = 's3-query'
|
||||
|
||||
|
||||
class TestGetServicePresignedV4(TestGetServiceSigV4):
|
||||
signature_version = 's3v4-query'
|
|
@ -1,3 +1,20 @@
|
|||
[s3api_test]
|
||||
# You just enable advanced compatibility features to pass all tests. Add the
|
||||
# following non-default options to the s3api section of your proxy-server.conf
|
||||
# s3_acl = True
|
||||
# check_bucket_owner = True
|
||||
endpoint = http://127.0.0.1:8080
|
||||
#ca_cert=/path/to/ca.crt
|
||||
region = us-east-1
|
||||
# First and second users should be account owners
|
||||
access_key1 = test:tester
|
||||
secret_key1 = testing
|
||||
access_key2 = test:tester2
|
||||
secret_key2 = testing2
|
||||
# Third user should be unprivileged
|
||||
access_key3 = test:tester3
|
||||
secret_key3 = testing3
|
||||
|
||||
[func_test]
|
||||
# Sample config for Swift with tempauth
|
||||
auth_uri = http://127.0.0.1:8080/auth/v1.0
|
||||
|
|
|
@ -0,0 +1,278 @@
|
|||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
|
||||
# use this file except in compliance with the License. You may obtain a copy
|
||||
# of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import itertools
|
||||
import json
|
||||
import mock
|
||||
import six
|
||||
import unittest
|
||||
|
||||
from swift.cli import container_deleter
|
||||
from swift.common import internal_client
|
||||
from swift.common import swob
|
||||
from swift.common import utils
|
||||
|
||||
AppCall = collections.namedtuple('AppCall', [
|
||||
'method', 'path', 'query', 'headers', 'body'])
|
||||
|
||||
|
||||
class FakeInternalClient(internal_client.InternalClient):
|
||||
def __init__(self, responses):
|
||||
self.resp_iter = iter(responses)
|
||||
self.calls = []
|
||||
|
||||
def make_request(self, method, path, headers, acceptable_statuses,
|
||||
body_file=None, params=None):
|
||||
if body_file is None:
|
||||
body = None
|
||||
else:
|
||||
body = body_file.read()
|
||||
path, _, query = path.partition('?')
|
||||
self.calls.append(AppCall(method, path, query, headers, body))
|
||||
resp = next(self.resp_iter)
|
||||
if isinstance(resp, Exception):
|
||||
raise resp
|
||||
return resp
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
unused_responses = [r for r in self.resp_iter]
|
||||
if unused_responses:
|
||||
raise Exception('Unused responses: %r' % unused_responses)
|
||||
|
||||
|
||||
class TestContainerDeleter(unittest.TestCase):
|
||||
def setUp(self):
|
||||
patcher = mock.patch.object(container_deleter.time, 'time',
|
||||
side_effect=itertools.count())
|
||||
patcher.__enter__()
|
||||
self.addCleanup(patcher.__exit__)
|
||||
|
||||
patcher = mock.patch.object(container_deleter, 'OBJECTS_PER_UPDATE', 5)
|
||||
patcher.__enter__()
|
||||
self.addCleanup(patcher.__exit__)
|
||||
|
||||
def test_make_delete_jobs(self):
|
||||
ts = '1558463777.42739'
|
||||
self.assertEqual(
|
||||
container_deleter.make_delete_jobs(
|
||||
'acct', 'cont', ['obj1', 'obj2'],
|
||||
utils.Timestamp(ts)),
|
||||
[{'name': ts.split('.')[0] + '-acct/cont/obj1',
|
||||
'deleted': 0,
|
||||
'created_at': ts,
|
||||
'etag': utils.MD5_OF_EMPTY_STRING,
|
||||
'size': 0,
|
||||
'storage_policy_index': 0,
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': ts.split('.')[0] + '-acct/cont/obj2',
|
||||
'deleted': 0,
|
||||
'created_at': ts,
|
||||
'etag': utils.MD5_OF_EMPTY_STRING,
|
||||
'size': 0,
|
||||
'storage_policy_index': 0,
|
||||
'content_type': 'application/async-deleted'}])
|
||||
|
||||
def test_make_delete_jobs_native_utf8(self):
|
||||
ts = '1558463777.42739'
|
||||
uacct = acct = u'acct-\U0001f334'
|
||||
ucont = cont = u'cont-\N{SNOWMAN}'
|
||||
uobj1 = obj1 = u'obj-\N{GREEK CAPITAL LETTER ALPHA}'
|
||||
uobj2 = obj2 = u'/obj-\N{GREEK CAPITAL LETTER OMEGA}'
|
||||
if six.PY2:
|
||||
acct = acct.encode('utf8')
|
||||
cont = cont.encode('utf8')
|
||||
obj1 = obj1.encode('utf8')
|
||||
obj2 = obj2.encode('utf8')
|
||||
self.assertEqual(
|
||||
container_deleter.make_delete_jobs(
|
||||
acct, cont, [obj1, obj2], utils.Timestamp(ts)),
|
||||
[{'name': u'%s-%s/%s/%s' % (ts.split('.')[0], uacct, ucont, uobj1),
|
||||
'deleted': 0,
|
||||
'created_at': ts,
|
||||
'etag': utils.MD5_OF_EMPTY_STRING,
|
||||
'size': 0,
|
||||
'storage_policy_index': 0,
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': u'%s-%s/%s/%s' % (ts.split('.')[0], uacct, ucont, uobj2),
|
||||
'deleted': 0,
|
||||
'created_at': ts,
|
||||
'etag': utils.MD5_OF_EMPTY_STRING,
|
||||
'size': 0,
|
||||
'storage_policy_index': 0,
|
||||
'content_type': 'application/async-deleted'}])
|
||||
|
||||
def test_make_delete_jobs_unicode_utf8(self):
|
||||
ts = '1558463777.42739'
|
||||
acct = u'acct-\U0001f334'
|
||||
cont = u'cont-\N{SNOWMAN}'
|
||||
obj1 = u'obj-\N{GREEK CAPITAL LETTER ALPHA}'
|
||||
obj2 = u'obj-\N{GREEK CAPITAL LETTER OMEGA}'
|
||||
self.assertEqual(
|
||||
container_deleter.make_delete_jobs(
|
||||
acct, cont, [obj1, obj2], utils.Timestamp(ts)),
|
||||
[{'name': u'%s-%s/%s/%s' % (ts.split('.')[0], acct, cont, obj1),
|
||||
'deleted': 0,
|
||||
'created_at': ts,
|
||||
'etag': utils.MD5_OF_EMPTY_STRING,
|
||||
'size': 0,
|
||||
'storage_policy_index': 0,
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': u'%s-%s/%s/%s' % (ts.split('.')[0], acct, cont, obj2),
|
||||
'deleted': 0,
|
||||
'created_at': ts,
|
||||
'etag': utils.MD5_OF_EMPTY_STRING,
|
||||
'size': 0,
|
||||
'storage_policy_index': 0,
|
||||
'content_type': 'application/async-deleted'}])
|
||||
|
||||
def test_mark_for_deletion_empty_no_yield(self):
|
||||
with FakeInternalClient([
|
||||
swob.Response(json.dumps([
|
||||
])),
|
||||
]) as swift:
|
||||
self.assertEqual(container_deleter.mark_for_deletion(
|
||||
swift,
|
||||
'account',
|
||||
'container',
|
||||
'marker',
|
||||
'end',
|
||||
'prefix',
|
||||
timestamp=None,
|
||||
yield_time=None,
|
||||
), 0)
|
||||
self.assertEqual(swift.calls, [
|
||||
('GET', '/v1/account/container',
|
||||
'format=json&marker=marker&end_marker=end&prefix=prefix',
|
||||
{}, None),
|
||||
])
|
||||
|
||||
def test_mark_for_deletion_empty_with_yield(self):
|
||||
with FakeInternalClient([
|
||||
swob.Response(json.dumps([
|
||||
])),
|
||||
]) as swift:
|
||||
self.assertEqual(list(container_deleter.mark_for_deletion(
|
||||
swift,
|
||||
'account',
|
||||
'container',
|
||||
'marker',
|
||||
'end',
|
||||
'prefix',
|
||||
timestamp=None,
|
||||
yield_time=0.5,
|
||||
)), [(0, None)])
|
||||
self.assertEqual(swift.calls, [
|
||||
('GET', '/v1/account/container',
|
||||
'format=json&marker=marker&end_marker=end&prefix=prefix',
|
||||
{}, None),
|
||||
])
|
||||
|
||||
def test_mark_for_deletion_one_update_no_yield(self):
|
||||
ts = '1558463777.42739'
|
||||
with FakeInternalClient([
|
||||
swob.Response(json.dumps([
|
||||
{'name': '/obj1'},
|
||||
{'name': 'obj2'},
|
||||
{'name': 'obj3'},
|
||||
])),
|
||||
swob.Response(json.dumps([
|
||||
])),
|
||||
swob.Response(status=202),
|
||||
]) as swift:
|
||||
self.assertEqual(container_deleter.mark_for_deletion(
|
||||
swift,
|
||||
'account',
|
||||
'container',
|
||||
'',
|
||||
'',
|
||||
'',
|
||||
timestamp=utils.Timestamp(ts),
|
||||
yield_time=None,
|
||||
), 3)
|
||||
self.assertEqual(swift.calls, [
|
||||
('GET', '/v1/account/container',
|
||||
'format=json&marker=&end_marker=&prefix=', {}, None),
|
||||
('GET', '/v1/account/container',
|
||||
'format=json&marker=obj3&end_marker=&prefix=', {}, None),
|
||||
('UPDATE', '/v1/.expiring_objects/' + ts.split('.')[0], '', {
|
||||
'X-Backend-Allow-Private-Methods': 'True',
|
||||
'X-Backend-Storage-Policy-Index': '0',
|
||||
'X-Timestamp': ts}, mock.ANY),
|
||||
])
|
||||
self.assertEqual(
|
||||
json.loads(swift.calls[-1].body),
|
||||
container_deleter.make_delete_jobs(
|
||||
'account', 'container', ['/obj1', 'obj2', 'obj3'],
|
||||
utils.Timestamp(ts)
|
||||
)
|
||||
)
|
||||
|
||||
def test_mark_for_deletion_two_updates_with_yield(self):
|
||||
ts = '1558463777.42739'
|
||||
with FakeInternalClient([
|
||||
swob.Response(json.dumps([
|
||||
{'name': 'obj1'},
|
||||
{'name': 'obj2'},
|
||||
{'name': 'obj3'},
|
||||
{'name': u'obj4-\N{SNOWMAN}'},
|
||||
{'name': 'obj5'},
|
||||
{'name': 'obj6'},
|
||||
])),
|
||||
swob.Response(status=202),
|
||||
swob.Response(json.dumps([
|
||||
])),
|
||||
swob.Response(status=202),
|
||||
]) as swift:
|
||||
self.assertEqual(list(container_deleter.mark_for_deletion(
|
||||
swift,
|
||||
'account',
|
||||
'container',
|
||||
'',
|
||||
'end',
|
||||
'pre',
|
||||
timestamp=utils.Timestamp(ts),
|
||||
yield_time=0,
|
||||
)), [(5, 'obj5'), (6, 'obj6'), (6, None)])
|
||||
self.assertEqual(swift.calls, [
|
||||
('GET', '/v1/account/container',
|
||||
'format=json&marker=&end_marker=end&prefix=pre', {}, None),
|
||||
('UPDATE', '/v1/.expiring_objects/' + ts.split('.')[0], '', {
|
||||
'X-Backend-Allow-Private-Methods': 'True',
|
||||
'X-Backend-Storage-Policy-Index': '0',
|
||||
'X-Timestamp': ts}, mock.ANY),
|
||||
('GET', '/v1/account/container',
|
||||
'format=json&marker=obj6&end_marker=end&prefix=pre',
|
||||
{}, None),
|
||||
('UPDATE', '/v1/.expiring_objects/' + ts.split('.')[0], '', {
|
||||
'X-Backend-Allow-Private-Methods': 'True',
|
||||
'X-Backend-Storage-Policy-Index': '0',
|
||||
'X-Timestamp': ts}, mock.ANY),
|
||||
])
|
||||
self.assertEqual(
|
||||
json.loads(swift.calls[-3].body),
|
||||
container_deleter.make_delete_jobs(
|
||||
'account', 'container',
|
||||
['obj1', 'obj2', 'obj3', u'obj4-\N{SNOWMAN}', 'obj5'],
|
||||
utils.Timestamp(ts)
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
json.loads(swift.calls[-1].body),
|
||||
container_deleter.make_delete_jobs(
|
||||
'account', 'container', ['obj6'],
|
||||
utils.Timestamp(ts)
|
||||
)
|
||||
)
|
|
@ -97,7 +97,7 @@ class TestS3ApiBucket(S3ApiTestCase):
|
|||
'/v1/AUTH_test/subdirs?delimiter=/&format=json&limit=3',
|
||||
swob.HTTPOk, {}, json.dumps([
|
||||
{'subdir': 'nothing/'},
|
||||
{'subdir': 'but/'},
|
||||
{'subdir': u'but-\u062a/'},
|
||||
{'subdir': 'subdirs/'},
|
||||
]))
|
||||
|
||||
|
@ -245,7 +245,46 @@ class TestS3ApiBucket(S3ApiTestCase):
|
|||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'true')
|
||||
self.assertEqual(elem.find('./NextMarker').text, 'but/')
|
||||
if six.PY2:
|
||||
self.assertEqual(elem.find('./NextMarker').text,
|
||||
u'but-\u062a/'.encode('utf-8'))
|
||||
else:
|
||||
self.assertEqual(elem.find('./NextMarker').text,
|
||||
u'but-\u062a/')
|
||||
|
||||
def test_bucket_GET_is_truncated_url_encoded(self):
|
||||
bucket_name = 'junk'
|
||||
|
||||
req = Request.blank(
|
||||
'/%s?encoding-type=url&max-keys=%d' % (
|
||||
bucket_name, len(self.objects)),
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'false')
|
||||
|
||||
req = Request.blank(
|
||||
'/%s?encoding-type=url&max-keys=%d' % (
|
||||
bucket_name, len(self.objects) - 1),
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'true')
|
||||
|
||||
req = Request.blank('/subdirs?encoding-type=url&delimiter=/&'
|
||||
'max-keys=2',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Authorization': 'AWS test:tester:hmac',
|
||||
'Date': self.get_date_header()})
|
||||
status, headers, body = self.call_s3api(req)
|
||||
elem = fromstring(body, 'ListBucketResult')
|
||||
self.assertEqual(elem.find('./IsTruncated').text, 'true')
|
||||
self.assertEqual(elem.find('./NextMarker').text,
|
||||
quote(u'but-\u062a/'.encode('utf-8')))
|
||||
|
||||
def test_bucket_GET_v2_is_truncated(self):
|
||||
bucket_name = 'junk'
|
||||
|
|
|
@ -697,8 +697,7 @@ class TestS3ApiObj(S3ApiTestCase):
|
|||
self.assertEqual(headers['Content-Disposition'], 'how are you')
|
||||
self.assertEqual(headers['Content-Encoding'], 'good and you')
|
||||
self.assertEqual(headers['Content-Language'], 'great')
|
||||
# Content-Type can't be set during an S3 copy operation
|
||||
self.assertIsNone(headers.get('Content-Type'))
|
||||
self.assertEqual(headers['Content-Type'], 'so')
|
||||
self.assertEqual(headers['Expires'], 'yeah')
|
||||
self.assertEqual(headers['X-Robots-Tag'], 'bye')
|
||||
|
||||
|
@ -1187,5 +1186,6 @@ class TestS3ApiObjNonUTC(TestS3ApiObj):
|
|||
os.environ['TZ'] = self.orig_tz
|
||||
time.tzset()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
from collections import Counter
|
||||
import numbers
|
||||
from six.moves import urllib
|
||||
import unittest
|
||||
import os
|
||||
import tarfile
|
||||
|
@ -28,6 +27,7 @@ from shutil import rmtree
|
|||
from tempfile import mkdtemp
|
||||
from eventlet import sleep
|
||||
from mock import patch, call
|
||||
from test.unit import debug_logger
|
||||
from test.unit.common.middleware.helpers import FakeSwift
|
||||
from swift.common import utils, constraints
|
||||
from swift.common.header_key_dict import HeaderKeyDict
|
||||
|
@ -100,42 +100,64 @@ def build_dir_tree(start_path, tree_obj):
|
|||
if isinstance(tree_obj, list):
|
||||
for obj in tree_obj:
|
||||
build_dir_tree(start_path, obj)
|
||||
return
|
||||
if isinstance(tree_obj, dict):
|
||||
for dir_name, obj in tree_obj.items():
|
||||
dir_path = os.path.join(start_path, dir_name)
|
||||
os.mkdir(dir_path)
|
||||
build_dir_tree(dir_path, obj)
|
||||
if isinstance(tree_obj, six.text_type):
|
||||
return
|
||||
if six.PY2 and isinstance(tree_obj, six.text_type):
|
||||
tree_obj = tree_obj.encode('utf8')
|
||||
if isinstance(tree_obj, str):
|
||||
obj_path = os.path.join(start_path, tree_obj)
|
||||
with open(obj_path, 'w+') as tree_file:
|
||||
tree_file.write('testing')
|
||||
return
|
||||
raise TypeError("can't build tree from %r" % tree_obj)
|
||||
|
||||
|
||||
def build_tar_tree(tar, start_path, tree_obj, base_path=''):
|
||||
if six.PY2:
|
||||
if isinstance(start_path, six.text_type):
|
||||
start_path = start_path.encode('utf8')
|
||||
if isinstance(tree_obj, six.text_type):
|
||||
tree_obj = tree_obj.encode('utf8')
|
||||
else:
|
||||
if isinstance(start_path, bytes):
|
||||
start_path = start_path.decode('utf8', 'surrogateescape')
|
||||
if isinstance(tree_obj, bytes):
|
||||
tree_obj = tree_obj.decode('utf8', 'surrogateescape')
|
||||
|
||||
if isinstance(tree_obj, list):
|
||||
for obj in tree_obj:
|
||||
build_tar_tree(tar, start_path, obj, base_path=base_path)
|
||||
return
|
||||
if isinstance(tree_obj, dict):
|
||||
for dir_name, obj in tree_obj.items():
|
||||
if six.PY2 and isinstance(dir_name, six.text_type):
|
||||
dir_name = dir_name.encode('utf8')
|
||||
elif not six.PY2 and isinstance(dir_name, bytes):
|
||||
dir_name = dir_name.decode('utf8', 'surrogateescape')
|
||||
dir_path = os.path.join(start_path, dir_name)
|
||||
tar_info = tarfile.TarInfo(dir_path[len(base_path):])
|
||||
tar_info.type = tarfile.DIRTYPE
|
||||
tar.addfile(tar_info)
|
||||
build_tar_tree(tar, dir_path, obj, base_path=base_path)
|
||||
if isinstance(tree_obj, six.text_type):
|
||||
tree_obj = tree_obj.encode('utf8')
|
||||
return
|
||||
if isinstance(tree_obj, str):
|
||||
obj_path = os.path.join(start_path, tree_obj)
|
||||
tar_info = tarfile.TarInfo('./' + obj_path[len(base_path):])
|
||||
tar.addfile(tar_info)
|
||||
return
|
||||
raise TypeError("can't build tree from %r" % tree_obj)
|
||||
|
||||
|
||||
class TestUntarMetadata(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.app = FakeSwift()
|
||||
self.bulk = bulk.filter_factory({})(self.app)
|
||||
self.bulk.logger = debug_logger()
|
||||
self.testdir = mkdtemp(suffix='tmp_test_bulk')
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -174,7 +196,7 @@ class TestUntarMetadata(unittest.TestCase):
|
|||
#
|
||||
# Still, we'll support uploads with both. Just heap more code on the
|
||||
# problem until you can forget it's under there.
|
||||
with open(os.path.join(self.testdir, "obj1")) as fh1:
|
||||
with open(os.path.join(self.testdir, "obj1"), 'rb') as fh1:
|
||||
tar_info1 = tar_file.gettarinfo(fileobj=fh1,
|
||||
arcname="obj1")
|
||||
tar_info1.pax_headers[u'SCHILY.xattr.user.mime_type'] = \
|
||||
|
@ -186,7 +208,7 @@ class TestUntarMetadata(unittest.TestCase):
|
|||
u'gigantic bucket of coffee'
|
||||
tar_file.addfile(tar_info1, fh1)
|
||||
|
||||
with open(os.path.join(self.testdir, "obj2")) as fh2:
|
||||
with open(os.path.join(self.testdir, "obj2"), 'rb') as fh2:
|
||||
tar_info2 = tar_file.gettarinfo(fileobj=fh2,
|
||||
arcname="obj2")
|
||||
tar_info2.pax_headers[
|
||||
|
@ -235,6 +257,7 @@ class TestUntar(unittest.TestCase):
|
|||
def setUp(self):
|
||||
self.app = FakeApp()
|
||||
self.bulk = bulk.filter_factory({})(self.app)
|
||||
self.bulk.logger = debug_logger()
|
||||
self.testdir = mkdtemp(suffix='tmp_test_bulk')
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -247,7 +270,7 @@ class TestUntar(unittest.TestCase):
|
|||
req, compress_format, out_content_type=out_content_type)
|
||||
first_chunk = next(iter)
|
||||
self.assertEqual(req.environ['eventlet.minimum_write_chunk_size'], 0)
|
||||
resp_body = first_chunk + ''.join(iter)
|
||||
resp_body = first_chunk + b''.join(iter)
|
||||
return resp_body
|
||||
|
||||
def test_create_container_for_path(self):
|
||||
|
@ -273,7 +296,7 @@ class TestUntar(unittest.TestCase):
|
|||
{'sub_dir2': ['sub2_file1', u'test obj \u2661']},
|
||||
'sub_file1',
|
||||
{'sub_dir3': [{'sub4_dir1': '../sub4 file1'}]},
|
||||
{'sub_dir4': None},
|
||||
{'sub_dir4': []},
|
||||
]}]
|
||||
|
||||
build_dir_tree(self.testdir, dir_tree)
|
||||
|
@ -289,7 +312,7 @@ class TestUntar(unittest.TestCase):
|
|||
tar.close()
|
||||
req = Request.blank('/tar_works/acc/cont/')
|
||||
req.environ['wsgi.input'] = open(
|
||||
os.path.join(self.testdir, 'tar_works.tar' + extension))
|
||||
os.path.join(self.testdir, 'tar_works.tar' + extension), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(req, compress_format)
|
||||
resp_data = utils.json.loads(resp_body)
|
||||
|
@ -298,15 +321,15 @@ class TestUntar(unittest.TestCase):
|
|||
# test out xml
|
||||
req = Request.blank('/tar_works/acc/cont/')
|
||||
req.environ['wsgi.input'] = open(
|
||||
os.path.join(self.testdir, 'tar_works.tar' + extension))
|
||||
os.path.join(self.testdir, 'tar_works.tar' + extension), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(
|
||||
req, compress_format, 'application/xml')
|
||||
self.assertTrue(
|
||||
'<response_status>201 Created</response_status>' in
|
||||
self.assertIn(
|
||||
b'<response_status>201 Created</response_status>',
|
||||
resp_body)
|
||||
self.assertTrue(
|
||||
'<number_files_created>6</number_files_created>' in
|
||||
self.assertIn(
|
||||
b'<number_files_created>6</number_files_created>',
|
||||
resp_body)
|
||||
|
||||
# test out nonexistent format
|
||||
|
@ -314,16 +337,16 @@ class TestUntar(unittest.TestCase):
|
|||
headers={'Accept': 'good_xml'})
|
||||
req.environ['REQUEST_METHOD'] = 'PUT'
|
||||
req.environ['wsgi.input'] = open(
|
||||
os.path.join(self.testdir, 'tar_works.tar' + extension))
|
||||
os.path.join(self.testdir, 'tar_works.tar' + extension), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
|
||||
def fake_start_response(*args, **kwargs):
|
||||
pass
|
||||
|
||||
app_iter = self.bulk(req.environ, fake_start_response)
|
||||
resp_body = ''.join([i for i in app_iter])
|
||||
resp_body = b''.join(app_iter)
|
||||
|
||||
self.assertTrue('Response Status: 406' in resp_body)
|
||||
self.assertIn(b'Response Status: 406', resp_body)
|
||||
|
||||
def test_extract_call(self):
|
||||
base_name = 'base_works_gz'
|
||||
|
@ -344,13 +367,13 @@ class TestUntar(unittest.TestCase):
|
|||
|
||||
req = Request.blank('/tar_works/acc/cont/?extract-archive=tar.gz')
|
||||
req.environ['wsgi.input'] = open(
|
||||
os.path.join(self.testdir, 'tar_works.tar.gz'))
|
||||
os.path.join(self.testdir, 'tar_works.tar.gz'), 'rb')
|
||||
self.bulk(req.environ, fake_start_response)
|
||||
self.assertEqual(self.app.calls, 1)
|
||||
|
||||
self.app.calls = 0
|
||||
req.environ['wsgi.input'] = open(
|
||||
os.path.join(self.testdir, 'tar_works.tar.gz'))
|
||||
os.path.join(self.testdir, 'tar_works.tar.gz'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'Chunked'
|
||||
req.method = 'PUT'
|
||||
app_iter = self.bulk(req.environ, fake_start_response)
|
||||
|
@ -362,9 +385,9 @@ class TestUntar(unittest.TestCase):
|
|||
req.method = 'PUT'
|
||||
req.headers['transfer-encoding'] = 'Chunked'
|
||||
req.environ['wsgi.input'] = open(
|
||||
os.path.join(self.testdir, 'tar_works.tar.gz'))
|
||||
os.path.join(self.testdir, 'tar_works.tar.gz'), 'rb')
|
||||
t = self.bulk(req.environ, fake_start_response)
|
||||
self.assertEqual(t[0], "Unsupported archive format")
|
||||
self.assertEqual(t, [b"Unsupported archive format"])
|
||||
|
||||
tar = tarfile.open(name=os.path.join(self.testdir,
|
||||
'tar_works.tar'),
|
||||
|
@ -376,20 +399,20 @@ class TestUntar(unittest.TestCase):
|
|||
req.method = 'PUT'
|
||||
req.headers['transfer-encoding'] = 'Chunked'
|
||||
req.environ['wsgi.input'] = open(
|
||||
os.path.join(self.testdir, 'tar_works.tar'))
|
||||
os.path.join(self.testdir, 'tar_works.tar'), 'rb')
|
||||
app_iter = self.bulk(req.environ, fake_start_response)
|
||||
list(app_iter) # iter over resp
|
||||
self.assertEqual(self.app.calls, 7)
|
||||
|
||||
def test_bad_container(self):
|
||||
req = Request.blank('/invalid/', body='')
|
||||
req = Request.blank('/invalid/', body=b'')
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
self.assertTrue('404 Not Found' in resp_body)
|
||||
self.assertIn(b'404 Not Found', resp_body)
|
||||
|
||||
def test_content_length_required(self):
|
||||
req = Request.blank('/create_cont_fail/acc/cont')
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
self.assertTrue('411 Length Required' in resp_body)
|
||||
self.assertIn(b'411 Length Required', resp_body)
|
||||
|
||||
def test_bad_tar(self):
|
||||
req = Request.blank('/create_cont_fail/acc/cont', body='')
|
||||
|
@ -399,7 +422,7 @@ class TestUntar(unittest.TestCase):
|
|||
|
||||
with patch.object(tarfile, 'open', bad_open):
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
self.assertTrue('400 Bad Request' in resp_body)
|
||||
self.assertIn(b'400 Bad Request', resp_body)
|
||||
|
||||
def build_tar(self, dir_tree=None):
|
||||
if not dir_tree:
|
||||
|
@ -424,7 +447,7 @@ class TestUntar(unittest.TestCase):
|
|||
self.build_tar(dir_tree)
|
||||
req = Request.blank('/tar_works/acc/')
|
||||
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
|
||||
'tar_fails.tar'))
|
||||
'tar_fails.tar'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
resp_data = utils.json.loads(resp_body)
|
||||
|
@ -435,7 +458,7 @@ class TestUntar(unittest.TestCase):
|
|||
req = Request.blank('/unauth/acc/',
|
||||
headers={'Accept': 'application/json'})
|
||||
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
|
||||
'tar_fails.tar'))
|
||||
'tar_fails.tar'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
self.assertEqual(self.app.calls, 1)
|
||||
|
@ -448,7 +471,7 @@ class TestUntar(unittest.TestCase):
|
|||
req = Request.blank('/create_obj_unauth/acc/cont/',
|
||||
headers={'Accept': 'application/json'})
|
||||
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
|
||||
'tar_fails.tar'))
|
||||
'tar_fails.tar'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
self.assertEqual(self.app.calls, 2)
|
||||
|
@ -463,7 +486,7 @@ class TestUntar(unittest.TestCase):
|
|||
req = Request.blank('/tar_works/acc/cont/',
|
||||
headers={'Accept': 'application/json'})
|
||||
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
|
||||
'tar_fails.tar'))
|
||||
'tar_fails.tar'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
self.assertEqual(self.app.calls, 6)
|
||||
|
@ -478,7 +501,7 @@ class TestUntar(unittest.TestCase):
|
|||
req = Request.blank('/tar_works/acc/cont/',
|
||||
headers={'Accept': 'application/json'})
|
||||
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
|
||||
'tar_fails.tar'))
|
||||
'tar_fails.tar'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(req, 'gz')
|
||||
self.assertEqual(self.app.calls, 0)
|
||||
|
@ -494,8 +517,8 @@ class TestUntar(unittest.TestCase):
|
|||
self.app.calls = 0
|
||||
req = Request.blank('/tar_works/acc/cont/',
|
||||
headers={'Accept': 'application/json'})
|
||||
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
|
||||
'tar_fails.tar'))
|
||||
req.environ['wsgi.input'] = open(
|
||||
os.path.join(self.testdir, 'tar_fails.tar'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
self.assertEqual(self.app.calls, 5)
|
||||
|
@ -519,7 +542,7 @@ class TestUntar(unittest.TestCase):
|
|||
req = Request.blank('/tar_works/acc/cont/',
|
||||
headers={'Accept': 'application/json'})
|
||||
req.environ['wsgi.input'] = open(
|
||||
os.path.join(self.testdir, 'tar_works.tar'))
|
||||
os.path.join(self.testdir, 'tar_works.tar'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
resp_data = utils.json.loads(resp_body)
|
||||
|
@ -557,7 +580,7 @@ class TestUntar(unittest.TestCase):
|
|||
req = Request.blank('/create_cont_fail/acc/cont/',
|
||||
headers={'Accept': 'application/json'})
|
||||
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
|
||||
'tar_fails.tar'))
|
||||
'tar_fails.tar'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
resp_data = utils.json.loads(resp_body)
|
||||
|
@ -569,7 +592,7 @@ class TestUntar(unittest.TestCase):
|
|||
req = Request.blank('/create_cont_fail/acc/cont/',
|
||||
headers={'Accept': 'application/json'})
|
||||
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
|
||||
'tar_fails.tar'))
|
||||
'tar_fails.tar'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
|
||||
def bad_create(req, path):
|
||||
|
@ -586,13 +609,13 @@ class TestUntar(unittest.TestCase):
|
|||
|
||||
def test_extract_tar_fail_unicode(self):
|
||||
dir_tree = [{'sub_dir1': ['sub1_file1']},
|
||||
{'sub_dir2': ['sub2\xdefile1', 'sub2_file2']},
|
||||
{'sub_\xdedir3': [{'sub4_dir1': 'sub4_file1'}]}]
|
||||
{'sub_dir2': [b'sub2\xdefile1', 'sub2_file2']},
|
||||
{b'sub_\xdedir3': [{'sub4_dir1': 'sub4_file1'}]}]
|
||||
self.build_tar(dir_tree)
|
||||
req = Request.blank('/tar_works/acc/',
|
||||
headers={'Accept': 'application/json'})
|
||||
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
|
||||
'tar_fails.tar'))
|
||||
'tar_fails.tar'), 'rb')
|
||||
req.headers['transfer-encoding'] = 'chunked'
|
||||
resp_body = self.handle_extract_and_iter(req, '')
|
||||
resp_data = utils.json.loads(resp_body)
|
||||
|
@ -608,13 +631,13 @@ class TestUntar(unittest.TestCase):
|
|||
txt_body = bulk.get_response_body(
|
||||
'bad_formay', {'hey': 'there'}, [['json > xml', '202 Accepted']],
|
||||
"doesn't matter for text")
|
||||
self.assertTrue('hey: there' in txt_body)
|
||||
self.assertIn(b'hey: there', txt_body)
|
||||
xml_body = bulk.get_response_body(
|
||||
'text/xml', {'hey': 'there'}, [['json > xml', '202 Accepted']],
|
||||
'root_tag')
|
||||
self.assertTrue('>' in xml_body)
|
||||
self.assertTrue(xml_body.startswith('<root_tag>\n'))
|
||||
self.assertTrue(xml_body.endswith('\n</root_tag>\n'))
|
||||
self.assertIn(b'>', xml_body)
|
||||
self.assertTrue(xml_body.startswith(b'<root_tag>\n'))
|
||||
self.assertTrue(xml_body.endswith(b'\n</root_tag>\n'))
|
||||
|
||||
|
||||
class TestDelete(unittest.TestCase):
|
||||
|
@ -623,6 +646,7 @@ class TestDelete(unittest.TestCase):
|
|||
def setUp(self):
|
||||
self.app = FakeApp()
|
||||
self.bulk = bulk.filter_factory(self.conf)(self.app)
|
||||
self.bulk.logger = debug_logger()
|
||||
|
||||
def tearDown(self):
|
||||
self.app.calls = 0
|
||||
|
@ -633,7 +657,7 @@ class TestDelete(unittest.TestCase):
|
|||
req, out_content_type=out_content_type)
|
||||
first_chunk = next(iter)
|
||||
self.assertEqual(req.environ['eventlet.minimum_write_chunk_size'], 0)
|
||||
resp_body = first_chunk + ''.join(iter)
|
||||
resp_body = first_chunk + b''.join(iter)
|
||||
return resp_body
|
||||
|
||||
def test_bulk_delete_uses_predefined_object_errors(self):
|
||||
|
@ -645,7 +669,7 @@ class TestDelete(unittest.TestCase):
|
|||
{'name': '/c/file_c', 'error': {'code': HTTP_UNAUTHORIZED,
|
||||
'message': 'unauthorized'}},
|
||||
{'name': '/c/file_d'}]
|
||||
resp_body = ''.join(self.bulk.handle_delete_iter(
|
||||
resp_body = b''.join(self.bulk.handle_delete_iter(
|
||||
req, objs_to_delete=objs_to_delete,
|
||||
out_content_type='application/json'))
|
||||
self.assertEqual(set(self.app.delete_paths),
|
||||
|
@ -756,41 +780,41 @@ class TestDelete(unittest.TestCase):
|
|||
req.environ['wsgi.input'] = BytesIO(data)
|
||||
req.content_length = len(data)
|
||||
resp_body = self.handle_delete_and_iter(req)
|
||||
self.assertTrue('413 Request Entity Too Large' in resp_body)
|
||||
self.assertIn(b'413 Request Entity Too Large', resp_body)
|
||||
|
||||
def test_bulk_delete_works_unicode(self):
|
||||
body = (u'/c/ obj \u2661\r\n'.encode('utf8') +
|
||||
'c/ objbadutf8\r\n' +
|
||||
'/c/f\xdebadutf8\n')
|
||||
b'c/ objbadutf8\r\n' +
|
||||
b'/c/f\xdebadutf8\n')
|
||||
req = Request.blank('/delete_works/AUTH_Acc', body=body,
|
||||
headers={'Accept': 'application/json'})
|
||||
req.method = 'POST'
|
||||
resp_body = self.handle_delete_and_iter(req)
|
||||
self.assertEqual(
|
||||
Counter(self.app.delete_paths),
|
||||
Counter(['/delete_works/AUTH_Acc/c/ obj \xe2\x99\xa1',
|
||||
'/delete_works/AUTH_Acc/c/ objbadutf8']))
|
||||
dict(Counter(self.app.delete_paths)),
|
||||
dict(Counter(['/delete_works/AUTH_Acc/c/ obj \xe2\x99\xa1',
|
||||
'/delete_works/AUTH_Acc/c/ objbadutf8'])))
|
||||
|
||||
self.assertEqual(self.app.calls, 2)
|
||||
resp_data = utils.json.loads(resp_body)
|
||||
self.assertEqual(resp_data['Number Deleted'], 1)
|
||||
self.assertEqual(len(resp_data['Errors']), 2)
|
||||
self.assertEqual(
|
||||
Counter(map(tuple, resp_data['Errors'])),
|
||||
Counter([(urllib.parse.quote('c/ objbadutf8'),
|
||||
'412 Precondition Failed'),
|
||||
(urllib.parse.quote('/c/f\xdebadutf8'),
|
||||
'412 Precondition Failed')]))
|
||||
dict(Counter(map(tuple, resp_data['Errors']))),
|
||||
dict(Counter([('c/%20objbadutf8',
|
||||
'412 Precondition Failed'),
|
||||
('/c/f%DEbadutf8',
|
||||
'412 Precondition Failed')])))
|
||||
|
||||
def test_bulk_delete_no_body(self):
|
||||
req = Request.blank('/unauth/AUTH_acc/')
|
||||
resp_body = self.handle_delete_and_iter(req)
|
||||
self.assertTrue('411 Length Required' in resp_body)
|
||||
self.assertIn(b'411 Length Required', resp_body)
|
||||
|
||||
def test_bulk_delete_no_files_in_body(self):
|
||||
req = Request.blank('/unauth/AUTH_acc/', body=' ')
|
||||
resp_body = self.handle_delete_and_iter(req)
|
||||
self.assertTrue('400 Bad Request' in resp_body)
|
||||
self.assertIn(b'400 Bad Request', resp_body)
|
||||
|
||||
def test_bulk_delete_unauth(self):
|
||||
req = Request.blank('/unauth/AUTH_acc/', body='/c/f\n/c/f_ok\n',
|
||||
|
@ -818,7 +842,7 @@ class TestDelete(unittest.TestCase):
|
|||
def test_bulk_delete_bad_path(self):
|
||||
req = Request.blank('/delete_cont_fail/')
|
||||
resp_body = self.handle_delete_and_iter(req)
|
||||
self.assertTrue('404 Not Found' in resp_body)
|
||||
self.assertIn(b'404 Not Found', resp_body)
|
||||
|
||||
def test_bulk_delete_container_delete(self):
|
||||
req = Request.blank('/delete_cont_fail/AUTH_Acc', body='c\n',
|
||||
|
@ -889,7 +913,7 @@ class TestDelete(unittest.TestCase):
|
|||
req = Request.blank('/delete_works/AUTH_Acc', body=body)
|
||||
req.method = 'POST'
|
||||
resp_body = self.handle_delete_and_iter(req)
|
||||
self.assertTrue('400 Bad Request' in resp_body)
|
||||
self.assertIn(b'400 Bad Request', resp_body)
|
||||
|
||||
def test_bulk_delete_max_failures(self):
|
||||
body = '\n'.join([
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -18,7 +18,7 @@ import unittest
|
|||
import json
|
||||
import mock
|
||||
|
||||
from six.moves.urllib.parse import quote, parse_qs
|
||||
from six.moves.urllib.parse import parse_qs
|
||||
from swift.common import swob
|
||||
from swift.common.middleware import symlink, copy, versioned_writes, \
|
||||
listing_formats
|
||||
|
@ -56,7 +56,7 @@ class TestSymlinkMiddlewareBase(unittest.TestCase):
|
|||
headers[0] = h
|
||||
|
||||
body_iter = app(req.environ, start_response)
|
||||
body = ''
|
||||
body = b''
|
||||
caught_exc = None
|
||||
try:
|
||||
for chunk in body_iter:
|
||||
|
@ -112,15 +112,15 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
body='')
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '412 Precondition Failed')
|
||||
self.assertEqual(body, "X-Symlink-Target header must be of "
|
||||
"the form <container name>/<object name>")
|
||||
self.assertEqual(body, b"X-Symlink-Target header must be of "
|
||||
b"the form <container name>/<object name>")
|
||||
|
||||
def test_symlink_put_non_zero_length(self):
|
||||
req = Request.blank('/v1/a/c/symlink', method='PUT', body='req_body',
|
||||
headers={'X-Symlink-Target': 'c1/o'})
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '400 Bad Request')
|
||||
self.assertEqual(body, 'Symlink requests require a zero byte body')
|
||||
self.assertEqual(body, b'Symlink requests require a zero byte body')
|
||||
|
||||
def test_symlink_put_bad_object_header(self):
|
||||
req = Request.blank('/v1/a/c/symlink', method='PUT',
|
||||
|
@ -128,8 +128,8 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
body='')
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, "412 Precondition Failed")
|
||||
self.assertEqual(body, "X-Symlink-Target header must be of "
|
||||
"the form <container name>/<object name>")
|
||||
self.assertEqual(body, b"X-Symlink-Target header must be of "
|
||||
b"the form <container name>/<object name>")
|
||||
|
||||
def test_symlink_put_bad_account_header(self):
|
||||
req = Request.blank('/v1/a/c/symlink', method='PUT',
|
||||
|
@ -138,7 +138,7 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
body='')
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, "412 Precondition Failed")
|
||||
self.assertEqual(body, "Account name cannot contain slashes")
|
||||
self.assertEqual(body, b"Account name cannot contain slashes")
|
||||
|
||||
def test_get_symlink(self):
|
||||
self.app.register('GET', '/v1/a/c/symlink', swob.HTTPOk,
|
||||
|
@ -176,7 +176,7 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
headers=req_headers)
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '200 OK')
|
||||
self.assertEqual(body, 'resp_body')
|
||||
self.assertEqual(body, b'resp_body')
|
||||
self.assertNotIn('X-Symlink-Target', dict(headers))
|
||||
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
|
||||
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
|
||||
|
@ -195,7 +195,7 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
req = Request.blank('/v1/a/c/symlink', method='GET')
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '404 Not Found')
|
||||
self.assertEqual(body, '')
|
||||
self.assertEqual(body, b'')
|
||||
self.assertNotIn('X-Symlink-Target', dict(headers))
|
||||
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
|
||||
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
|
||||
|
@ -211,8 +211,8 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '416 Requested Range Not Satisfiable')
|
||||
self.assertEqual(
|
||||
body, '<html><h1>Requested Range Not Satisfiable</h1>'
|
||||
'<p>The Range requested is not available.</p></html>')
|
||||
body, b'<html><h1>Requested Range Not Satisfiable</h1>'
|
||||
b'<p>The Range requested is not available.</p></html>')
|
||||
self.assertNotIn('X-Symlink-Target', dict(headers))
|
||||
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
|
||||
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
|
||||
|
@ -228,7 +228,7 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
headers={'Range': 'bytes=1-2'})
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '200 OK')
|
||||
self.assertEqual(body, 'es')
|
||||
self.assertEqual(body, b'es')
|
||||
self.assertNotIn('X-Symlink-Target', dict(headers))
|
||||
self.assertNotIn('X-Symlink-Target-Account', dict(headers))
|
||||
self.assertIn(('Content-Location', '/v1/a2/c1/o'), headers)
|
||||
|
@ -241,7 +241,7 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '200 OK')
|
||||
self.assertEqual(body, 'resp_body')
|
||||
self.assertEqual(body, b'resp_body')
|
||||
|
||||
# Assert special headers for symlink are not in response
|
||||
self.assertNotIn('X-Symlink-Target', dict(headers))
|
||||
|
@ -359,9 +359,10 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
headers={'X-Object-Meta-Color': 'Red'})
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '307 Temporary Redirect')
|
||||
self.assertEqual(body,
|
||||
'The requested POST was applied to a symlink. POST '
|
||||
'directly to the target to apply requested metadata.')
|
||||
self.assertEqual(
|
||||
body,
|
||||
b'The requested POST was applied to a symlink. POST '
|
||||
b'directly to the target to apply requested metadata.')
|
||||
method, path, hdrs = self.app.calls_with_headers[0]
|
||||
val = hdrs.get('X-Object-Meta-Color')
|
||||
self.assertEqual(val, 'Red')
|
||||
|
@ -379,8 +380,8 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
headers={'X-Symlink-Target': 'c1/regular_obj'})
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(status, '400 Bad Request')
|
||||
self.assertEqual(body, "A PUT request is required to set a symlink "
|
||||
"target")
|
||||
self.assertEqual(body, b"A PUT request is required to set a symlink "
|
||||
b"target")
|
||||
|
||||
def test_symlink_post_but_fail_at_server(self):
|
||||
self.app.register('POST', '/v1/a/c/o', swob.HTTPNotFound, {})
|
||||
|
@ -405,16 +406,15 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
# URL encoded is safe
|
||||
do_test({'X-Symlink-Target': 'c1%2Fo1'})
|
||||
# URL encoded + multibytes is also safe
|
||||
do_test(
|
||||
{'X-Symlink-Target':
|
||||
u'\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'})
|
||||
target = u'\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'
|
||||
encoded_target = quote(target.encode('utf-8'), '')
|
||||
do_test({'X-Symlink-Target': encoded_target})
|
||||
target = swob.bytes_to_wsgi(target.encode('utf8'))
|
||||
do_test({'X-Symlink-Target': target})
|
||||
do_test({'X-Symlink-Target': swob.wsgi_quote(target)})
|
||||
|
||||
target = swob.bytes_to_wsgi(u'\u30b0\u30e9\u30d6\u30eb'.encode('utf8'))
|
||||
do_test(
|
||||
{'X-Symlink-Target': 'cont/obj',
|
||||
'X-Symlink-Target-Account': u'\u30b0\u30e9\u30d6\u30eb'})
|
||||
'X-Symlink-Target-Account': target})
|
||||
|
||||
def test_check_symlink_header_invalid_format(self):
|
||||
def do_test(headers, status, err_msg):
|
||||
|
@ -428,54 +428,59 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
|
||||
do_test({'X-Symlink-Target': '/c1/o1'},
|
||||
'412 Precondition Failed',
|
||||
'X-Symlink-Target header must be of the '
|
||||
'form <container name>/<object name>')
|
||||
b'X-Symlink-Target header must be of the '
|
||||
b'form <container name>/<object name>')
|
||||
do_test({'X-Symlink-Target': 'c1o1'},
|
||||
'412 Precondition Failed',
|
||||
'X-Symlink-Target header must be of the '
|
||||
'form <container name>/<object name>')
|
||||
b'X-Symlink-Target header must be of the '
|
||||
b'form <container name>/<object name>')
|
||||
do_test({'X-Symlink-Target': 'c1/o1',
|
||||
'X-Symlink-Target-Account': '/another'},
|
||||
'412 Precondition Failed',
|
||||
'Account name cannot contain slashes')
|
||||
b'Account name cannot contain slashes')
|
||||
do_test({'X-Symlink-Target': 'c1/o1',
|
||||
'X-Symlink-Target-Account': 'an/other'},
|
||||
'412 Precondition Failed',
|
||||
'Account name cannot contain slashes')
|
||||
b'Account name cannot contain slashes')
|
||||
# url encoded case
|
||||
do_test({'X-Symlink-Target': '%2Fc1%2Fo1'},
|
||||
'412 Precondition Failed',
|
||||
'X-Symlink-Target header must be of the '
|
||||
'form <container name>/<object name>')
|
||||
b'X-Symlink-Target header must be of the '
|
||||
b'form <container name>/<object name>')
|
||||
do_test({'X-Symlink-Target': 'c1/o1',
|
||||
'X-Symlink-Target-Account': '%2Fanother'},
|
||||
'412 Precondition Failed',
|
||||
'Account name cannot contain slashes')
|
||||
b'Account name cannot contain slashes')
|
||||
do_test({'X-Symlink-Target': 'c1/o1',
|
||||
'X-Symlink-Target-Account': 'an%2Fother'},
|
||||
'412 Precondition Failed',
|
||||
'Account name cannot contain slashes')
|
||||
b'Account name cannot contain slashes')
|
||||
# with multi-bytes
|
||||
do_test(
|
||||
{'X-Symlink-Target':
|
||||
u'/\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'},
|
||||
'412 Precondition Failed',
|
||||
'X-Symlink-Target header must be of the '
|
||||
'form <container name>/<object name>')
|
||||
b'X-Symlink-Target header must be of the '
|
||||
b'form <container name>/<object name>')
|
||||
target = u'/\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'
|
||||
encoded_target = quote(target.encode('utf-8'), '')
|
||||
target = swob.bytes_to_wsgi(target.encode('utf8'))
|
||||
do_test(
|
||||
{'X-Symlink-Target': encoded_target},
|
||||
{'X-Symlink-Target': swob.wsgi_quote(target)},
|
||||
'412 Precondition Failed',
|
||||
'X-Symlink-Target header must be of the '
|
||||
'form <container name>/<object name>')
|
||||
b'X-Symlink-Target header must be of the '
|
||||
b'form <container name>/<object name>')
|
||||
account = u'\u30b0\u30e9\u30d6\u30eb/\u30a2\u30ba\u30ec\u30f3'
|
||||
encoded_account = quote(account.encode('utf-8'), '')
|
||||
do_test(
|
||||
{'X-Symlink-Target': 'c/o',
|
||||
'X-Symlink-Target-Account': encoded_account},
|
||||
'X-Symlink-Target-Account': account},
|
||||
'412 Precondition Failed',
|
||||
'Account name cannot contain slashes')
|
||||
b'Account name cannot contain slashes')
|
||||
account = swob.bytes_to_wsgi(account.encode('utf8'))
|
||||
do_test(
|
||||
{'X-Symlink-Target': 'c/o',
|
||||
'X-Symlink-Target-Account': swob.wsgi_quote(account)},
|
||||
'412 Precondition Failed',
|
||||
b'Account name cannot contain slashes')
|
||||
|
||||
def test_check_symlink_header_points_to_itself(self):
|
||||
req = Request.blank('/v1/a/c/o', method='PUT',
|
||||
|
@ -483,7 +488,7 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
with self.assertRaises(swob.HTTPException) as cm:
|
||||
symlink._check_symlink_header(req)
|
||||
self.assertEqual(cm.exception.status, '400 Bad Request')
|
||||
self.assertEqual(cm.exception.body, 'Symlink cannot target itself')
|
||||
self.assertEqual(cm.exception.body, b'Symlink cannot target itself')
|
||||
|
||||
# Even if set account to itself, it will fail as well
|
||||
req = Request.blank('/v1/a/c/o', method='PUT',
|
||||
|
@ -492,7 +497,7 @@ class TestSymlinkMiddleware(TestSymlinkMiddlewareBase):
|
|||
with self.assertRaises(swob.HTTPException) as cm:
|
||||
symlink._check_symlink_header(req)
|
||||
self.assertEqual(cm.exception.status, '400 Bad Request')
|
||||
self.assertEqual(cm.exception.body, 'Symlink cannot target itself')
|
||||
self.assertEqual(cm.exception.body, b'Symlink cannot target itself')
|
||||
|
||||
# sanity, the case to another account is safe
|
||||
req = Request.blank('/v1/a/c/o', method='PUT',
|
||||
|
@ -866,10 +871,10 @@ class TestSymlinkContainerContext(TestSymlinkMiddlewareBase):
|
|||
|
||||
def test_no_affect_for_account_request(self):
|
||||
with mock.patch.object(self.sym, 'app') as mock_app:
|
||||
mock_app.return_value = 'ok'
|
||||
mock_app.return_value = (b'ok',)
|
||||
req = Request.blank(path='/v1/a')
|
||||
status, headers, body = self.call_sym(req)
|
||||
self.assertEqual(body, 'ok')
|
||||
self.assertEqual(body, b'ok')
|
||||
|
||||
def test_get_container_simple_with_listing_format(self):
|
||||
self.app.register(
|
||||
|
@ -916,14 +921,14 @@ class TestSymlinkContainerContext(TestSymlinkMiddlewareBase):
|
|||
req = Request.blank(path='/v1/a/c?format=xml')
|
||||
status, headers, body = self.call_app(req, app=self.lf)
|
||||
self.assertEqual(status, '200 OK')
|
||||
self.assertEqual(body.split('\n'), [
|
||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||
'<container name="c"><object><name>sym_obj</name>'
|
||||
'<hash>etag</hash><bytes>0</bytes>'
|
||||
'<content_type>text/plain</content_type>'
|
||||
'<last_modified>2014-11-21T14:23:02.206740</last_modified>'
|
||||
'</object>'
|
||||
'<object><name>normal_obj</name><hash>etag2</hash>'
|
||||
'<bytes>32</bytes><content_type>text/plain</content_type>'
|
||||
'<last_modified>2014-11-21T14:14:27.409100</last_modified>'
|
||||
'</object></container>'])
|
||||
self.assertEqual(body.split(b'\n'), [
|
||||
b'<?xml version="1.0" encoding="UTF-8"?>',
|
||||
b'<container name="c"><object><name>sym_obj</name>'
|
||||
b'<hash>etag</hash><bytes>0</bytes>'
|
||||
b'<content_type>text/plain</content_type>'
|
||||
b'<last_modified>2014-11-21T14:23:02.206740</last_modified>'
|
||||
b'</object>'
|
||||
b'<object><name>normal_obj</name><hash>etag2</hash>'
|
||||
b'<bytes>32</bytes><content_type>text/plain</content_type>'
|
||||
b'<last_modified>2014-11-21T14:14:27.409100</last_modified>'
|
||||
b'</object></container>'])
|
||||
|
|
|
@ -354,10 +354,8 @@ class TestContainerController(unittest.TestCase):
|
|||
req.content_length = 0
|
||||
resp = server_handler.OPTIONS(req)
|
||||
self.assertEqual(200, resp.status_int)
|
||||
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split():
|
||||
self.assertTrue(
|
||||
verb in resp.headers['Allow'].split(', '))
|
||||
self.assertEqual(len(resp.headers['Allow'].split(', ')), 7)
|
||||
self.assertEqual(sorted(resp.headers['Allow'].split(', ')), sorted(
|
||||
'OPTIONS GET POST PUT DELETE HEAD REPLICATE UPDATE'.split()))
|
||||
self.assertEqual(resp.headers['Server'],
|
||||
(self.controller.server_type + '/' + swift_version))
|
||||
|
||||
|
@ -1477,6 +1475,115 @@ class TestContainerController(unittest.TestCase):
|
|||
self.assertEqual(mock_statvfs.mock_calls,
|
||||
[mock.call(os.path.join(self.testdir, 'sda1'))])
|
||||
|
||||
def test_UPDATE(self):
|
||||
ts_iter = make_timestamp_iter()
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c',
|
||||
environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'X-Timestamp': next(ts_iter).internal})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 201)
|
||||
|
||||
ts_iter = make_timestamp_iter()
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c',
|
||||
environ={'REQUEST_METHOD': 'UPDATE'},
|
||||
headers={'X-Timestamp': next(ts_iter).internal},
|
||||
body='[invalid json')
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 400)
|
||||
|
||||
ts_iter = make_timestamp_iter()
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'X-Timestamp': next(ts_iter).internal})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 204)
|
||||
|
||||
obj_ts = next(ts_iter)
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c',
|
||||
environ={'REQUEST_METHOD': 'UPDATE'},
|
||||
headers={'X-Timestamp': next(ts_iter).internal},
|
||||
body=json.dumps([
|
||||
{'name': 'some obj', 'deleted': 0,
|
||||
'created_at': obj_ts.internal,
|
||||
'etag': 'whatever', 'size': 1234,
|
||||
'storage_policy_index': POLICIES.default.idx,
|
||||
'content_type': 'foo/bar'},
|
||||
{'name': 'some tombstone', 'deleted': 1,
|
||||
'created_at': next(ts_iter).internal,
|
||||
'etag': 'noetag', 'size': 0,
|
||||
'storage_policy_index': POLICIES.default.idx,
|
||||
'content_type': 'application/deleted'},
|
||||
{'name': 'wrong policy', 'deleted': 0,
|
||||
'created_at': next(ts_iter).internal,
|
||||
'etag': 'whatever', 'size': 6789,
|
||||
'storage_policy_index': 1,
|
||||
'content_type': 'foo/bar'},
|
||||
]))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 202)
|
||||
|
||||
req = Request.blank(
|
||||
'/sda1/p/a/c?format=json',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'X-Timestamp': next(ts_iter).internal})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 200)
|
||||
self.assertEqual(json.loads(resp.body), [
|
||||
{'name': 'some obj', 'hash': 'whatever', 'bytes': 1234,
|
||||
'content_type': 'foo/bar', 'last_modified': obj_ts.isoformat},
|
||||
])
|
||||
|
||||
def test_UPDATE_autocreate(self):
|
||||
ts_iter = make_timestamp_iter()
|
||||
req = Request.blank(
|
||||
'/sda1/p/.a/c',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'X-Timestamp': next(ts_iter).internal})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 404)
|
||||
|
||||
obj_ts = next(ts_iter)
|
||||
req = Request.blank(
|
||||
'/sda1/p/.a/c',
|
||||
environ={'REQUEST_METHOD': 'UPDATE'},
|
||||
headers={
|
||||
'X-Timestamp': next(ts_iter).internal,
|
||||
'X-Backend-Storage-Policy-Index': str(POLICIES.default.idx)},
|
||||
body=json.dumps([
|
||||
{'name': 'some obj', 'deleted': 0,
|
||||
'created_at': obj_ts.internal,
|
||||
'etag': 'whatever', 'size': 1234,
|
||||
'storage_policy_index': POLICIES.default.idx,
|
||||
'content_type': 'foo/bar'},
|
||||
{'name': 'some tombstone', 'deleted': 1,
|
||||
'created_at': next(ts_iter).internal,
|
||||
'etag': 'noetag', 'size': 0,
|
||||
'storage_policy_index': POLICIES.default.idx,
|
||||
'content_type': 'application/deleted'},
|
||||
{'name': 'wrong policy', 'deleted': 0,
|
||||
'created_at': next(ts_iter).internal,
|
||||
'etag': 'whatever', 'size': 6789,
|
||||
'storage_policy_index': 1,
|
||||
'content_type': 'foo/bar'},
|
||||
]))
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 202, resp.body)
|
||||
|
||||
req = Request.blank(
|
||||
'/sda1/p/.a/c?format=json',
|
||||
environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'X-Timestamp': next(ts_iter).internal})
|
||||
resp = req.get_response(self.controller)
|
||||
self.assertEqual(resp.status_int, 200)
|
||||
self.assertEqual(json.loads(resp.body), [
|
||||
{'name': 'some obj', 'hash': 'whatever', 'bytes': 1234,
|
||||
'content_type': 'foo/bar', 'last_modified': obj_ts.isoformat},
|
||||
])
|
||||
|
||||
def test_DELETE(self):
|
||||
ts_iter = make_timestamp_iter()
|
||||
req = Request.blank(
|
||||
|
@ -4591,7 +4698,7 @@ class TestNonLegacyDefaultStoragePolicy(TestContainerController):
|
|||
def _update_object_put_headers(self, req):
|
||||
"""
|
||||
Add policy index headers for containers created with default policy
|
||||
- which in this TestCase is 1.
|
||||
- which in this TestCase is 2.
|
||||
"""
|
||||
req.headers['X-Backend-Storage-Policy-Index'] = \
|
||||
str(POLICIES.default.idx)
|
||||
|
|
|
@ -34,7 +34,7 @@ from random import shuffle, randint
|
|||
from shutil import rmtree
|
||||
from time import time
|
||||
from tempfile import mkdtemp
|
||||
from hashlib import md5
|
||||
from hashlib import md5 as _md5
|
||||
from contextlib import closing, contextmanager
|
||||
from gzip import GzipFile
|
||||
import pyeclib.ec_iface
|
||||
|
@ -70,6 +70,26 @@ test_policies = [
|
|||
]
|
||||
|
||||
|
||||
class md5(object):
|
||||
def __init__(self, s=b''):
|
||||
if not isinstance(s, bytes):
|
||||
s = s.encode('ascii')
|
||||
self.md = _md5(s)
|
||||
|
||||
def update(self, s=b''):
|
||||
if not isinstance(s, bytes):
|
||||
s = s.encode('ascii')
|
||||
return self.md.update(s)
|
||||
|
||||
@property
|
||||
def hexdigest(self):
|
||||
return self.md.hexdigest
|
||||
|
||||
@property
|
||||
def digest(self):
|
||||
return self.md.digest
|
||||
|
||||
|
||||
def find_paths_with_matching_suffixes(needed_matches=2, needed_suffixes=3):
|
||||
paths = defaultdict(list)
|
||||
while True:
|
||||
|
@ -298,49 +318,63 @@ class TestDiskFileModuleMethods(unittest.TestCase):
|
|||
u'X-Object-Meta-Strange': u'should be bytes',
|
||||
b'X-Object-Meta-x\xff': b'not utf8 \xff',
|
||||
u'X-Object-Meta-y\xe8': u'not ascii \xe8'}
|
||||
expected = {b'name': b'/a/c/o',
|
||||
as_bytes = {b'name': b'/a/c/o',
|
||||
b'Content-Length': 99,
|
||||
b'X-Object-Sysmeta-Ec-Frag-Index': 4,
|
||||
b'X-Object-Meta-Strange': b'should be bytes',
|
||||
b'X-Object-Meta-x\xff': b'not utf8 \xff',
|
||||
b'X-Object-Meta-y\xc3\xa8': b'not ascii \xc3\xa8'}
|
||||
if six.PY2:
|
||||
as_native = as_bytes
|
||||
else:
|
||||
as_native = dict((k.decode('utf-8', 'surrogateescape'),
|
||||
v if isinstance(v, int) else
|
||||
v.decode('utf-8', 'surrogateescape'))
|
||||
for k, v in as_bytes.items())
|
||||
|
||||
def check_metadata():
|
||||
def check_metadata(expected, typ):
|
||||
with open(path, 'rb') as fd:
|
||||
actual = diskfile.read_metadata(fd)
|
||||
self.assertEqual(expected, actual)
|
||||
for k in actual.keys():
|
||||
self.assertIsInstance(k, six.binary_type)
|
||||
for k in (b'name',
|
||||
b'X-Object-Meta-Strange',
|
||||
b'X-Object-Meta-x\xff',
|
||||
b'X-Object-Meta-y\xc3\xa8'):
|
||||
self.assertIsInstance(actual[k], six.binary_type)
|
||||
for k, v in actual.items():
|
||||
self.assertIsInstance(k, typ)
|
||||
self.assertIsInstance(v, (typ, int))
|
||||
|
||||
# Check can write raw bytes
|
||||
with open(path, 'wb') as fd:
|
||||
diskfile.write_metadata(fd, as_bytes)
|
||||
check_metadata(as_native, str)
|
||||
# Check can write native (with surrogates on py3)
|
||||
with open(path, 'wb') as fd:
|
||||
diskfile.write_metadata(fd, as_native)
|
||||
check_metadata(as_native, str)
|
||||
# Check can write some crazy mix
|
||||
with open(path, 'wb') as fd:
|
||||
diskfile.write_metadata(fd, metadata)
|
||||
check_metadata()
|
||||
check_metadata(as_native, str)
|
||||
|
||||
# mock the read path to check the write path encoded persisted metadata
|
||||
with mock.patch.object(diskfile, '_encode_metadata', lambda x: x):
|
||||
check_metadata()
|
||||
|
||||
# simulate a legacy diskfile that might have persisted unicode metadata
|
||||
with mock.patch.object(diskfile, '_decode_metadata', lambda x: x):
|
||||
check_metadata(as_bytes, bytes)
|
||||
|
||||
# simulate a legacy diskfile that might have persisted
|
||||
# (some) unicode metadata
|
||||
with mock.patch.object(diskfile, '_encode_metadata', lambda x: x):
|
||||
with open(path, 'wb') as fd:
|
||||
diskfile.write_metadata(fd, metadata)
|
||||
# sanity check, while still mocked, that we did persist unicode
|
||||
# sanity check: mock read path again to see that we did persist unicode
|
||||
with mock.patch.object(diskfile, '_decode_metadata', lambda x: x):
|
||||
with open(path, 'rb') as fd:
|
||||
actual = diskfile.read_metadata(fd)
|
||||
for k, v in actual.items():
|
||||
if k == u'X-Object-Meta-Strange':
|
||||
self.assertIsInstance(k, str)
|
||||
self.assertIsInstance(v, str)
|
||||
if isinstance(k, six.text_type) and \
|
||||
k == u'X-Object-Meta-Strange':
|
||||
self.assertIsInstance(v, six.text_type)
|
||||
break
|
||||
else:
|
||||
self.fail('Did not find X-Object-Meta-Strange')
|
||||
# check that read_metadata converts binary_type
|
||||
check_metadata()
|
||||
check_metadata(as_native, str)
|
||||
|
||||
|
||||
@patch_policies
|
||||
|
@ -485,8 +519,8 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
|||
os.makedirs(os.path.join(tmpdir, "sdp", "objects",
|
||||
"2607", "df3",
|
||||
"ec2871fe724411f91787462f97d30df3"))
|
||||
with open(os.path.join(tmpdir, "garbage"), "wb") as fh:
|
||||
fh.write('')
|
||||
with open(os.path.join(tmpdir, "garbage"), "wb"):
|
||||
pass
|
||||
|
||||
locations = [
|
||||
(loc.path, loc.device, loc.partition, loc.policy)
|
||||
|
@ -571,8 +605,8 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
|||
gen = diskfile.object_audit_location_generator(tmpdir,
|
||||
datadir,
|
||||
False)
|
||||
gen.next()
|
||||
gen.next()
|
||||
next(gen)
|
||||
next(gen)
|
||||
|
||||
# Auditor stopped for some reason without raising StopIterator in
|
||||
# the generator and restarts There is now only one remaining
|
||||
|
@ -580,17 +614,17 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
|||
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
|
||||
False)
|
||||
with mock_check_drive(isdir=True):
|
||||
gen.next()
|
||||
next(gen)
|
||||
|
||||
# There are no more remaining partitions
|
||||
self.assertRaises(StopIteration, gen.next)
|
||||
self.assertRaises(StopIteration, next, gen)
|
||||
|
||||
# There are no partitions to check if the auditor restarts another
|
||||
# time and the status files have not been cleared
|
||||
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
|
||||
False)
|
||||
with mock_check_drive(isdir=True):
|
||||
self.assertRaises(StopIteration, gen.next)
|
||||
self.assertRaises(StopIteration, next, gen)
|
||||
|
||||
# Reset status file
|
||||
diskfile.clear_auditor_status(tmpdir, datadir)
|
||||
|
@ -601,8 +635,8 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
|||
gen = diskfile.object_audit_location_generator(tmpdir, datadir,
|
||||
False)
|
||||
with mock_check_drive(isdir=True):
|
||||
gen.next()
|
||||
gen.next()
|
||||
next(gen)
|
||||
next(gen)
|
||||
|
||||
def test_update_auditor_status_throttle(self):
|
||||
# If there are a lot of nearly empty partitions, the
|
||||
|
@ -611,7 +645,7 @@ class TestObjectAuditLocationGenerator(unittest.TestCase):
|
|||
# status file is only written once a minute.
|
||||
with temptree([]) as tmpdir:
|
||||
os.makedirs(os.path.join(tmpdir, "sdf", "objects", "1", "a", "b"))
|
||||
with mock.patch('__builtin__.open') as mock_open:
|
||||
with mock.patch('swift.obj.diskfile.open') as mock_open:
|
||||
# File does not exist yet - write expected
|
||||
update_auditor_status(tmpdir, None, ['42'], "ALL")
|
||||
self.assertEqual(1, mock_open.call_count)
|
||||
|
@ -1217,7 +1251,8 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
|||
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
|
||||
quarantine_renamer.assert_called_once_with(
|
||||
'/srv/dev/',
|
||||
'/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900')
|
||||
('/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/' +
|
||||
'made-up-filename'))
|
||||
|
||||
def test_get_diskfile_from_hash_no_dir(self):
|
||||
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
|
||||
|
@ -2663,11 +2698,11 @@ class TestECDiskFileManager(DiskFileManagerMixin, unittest.TestCase):
|
|||
# verify .meta filename encodes content-type timestamp
|
||||
mgr = self.df_router[POLICIES.default]
|
||||
time_ = 1234567890.00001
|
||||
for delta in (0.0, .00001, 1.11111):
|
||||
for delta in (0, 1, 111111):
|
||||
t_meta = Timestamp(time_)
|
||||
t_type = Timestamp(time_ - delta)
|
||||
t_type = Timestamp(time_ - delta / 100000.)
|
||||
sign = '-' if delta else '+'
|
||||
expected = '%s%s%x.meta' % (t_meta.short, sign, 100000 * delta)
|
||||
expected = '%s%s%x.meta' % (t_meta.short, sign, delta)
|
||||
actual = mgr.make_on_disk_filename(
|
||||
t_meta, '.meta', ctype_timestamp=t_type)
|
||||
self.assertEqual(expected, actual)
|
||||
|
@ -3240,6 +3275,8 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
|
||||
def _create_test_file(self, data, timestamp=None, metadata=None,
|
||||
account='a', container='c', obj='o', **kwargs):
|
||||
if not isinstance(data, bytes):
|
||||
raise ValueError('data must be bytes')
|
||||
if metadata is None:
|
||||
metadata = {}
|
||||
metadata.setdefault('name', '/%s/%s/%s' % (account, container, obj))
|
||||
|
@ -3310,10 +3347,10 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
def test_open_expired(self):
|
||||
self.assertRaises(DiskFileExpired,
|
||||
self._create_test_file,
|
||||
'1234567890', metadata={'X-Delete-At': '0'})
|
||||
b'1234567890', metadata={'X-Delete-At': '0'})
|
||||
|
||||
try:
|
||||
self._create_test_file('1234567890', open_expired=True,
|
||||
self._create_test_file(b'1234567890', open_expired=True,
|
||||
metadata={'X-Delete-At': '0',
|
||||
'X-Object-Meta-Foo': 'bar'})
|
||||
df = self._simple_get_diskfile(open_expired=True)
|
||||
|
@ -3325,20 +3362,20 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
def test_open_not_expired(self):
|
||||
try:
|
||||
self._create_test_file(
|
||||
'1234567890', metadata={'X-Delete-At': str(2 * int(time()))})
|
||||
b'1234567890', metadata={'X-Delete-At': str(2 * int(time()))})
|
||||
except SwiftException as err:
|
||||
self.fail("Unexpected swift exception raised: %r" % err)
|
||||
|
||||
def test_get_metadata(self):
|
||||
timestamp = self.ts().internal
|
||||
df, df_data = self._create_test_file('1234567890',
|
||||
df, df_data = self._create_test_file(b'1234567890',
|
||||
timestamp=timestamp)
|
||||
md = df.get_metadata()
|
||||
self.assertEqual(md['X-Timestamp'], timestamp)
|
||||
|
||||
def test_read_metadata(self):
|
||||
timestamp = self.ts().internal
|
||||
self._create_test_file('1234567890', timestamp=timestamp)
|
||||
self._create_test_file(b'1234567890', timestamp=timestamp)
|
||||
df = self._simple_get_diskfile()
|
||||
md = df.read_metadata()
|
||||
self.assertEqual(md['X-Timestamp'], timestamp)
|
||||
|
@ -3361,7 +3398,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
|
||||
def test_get_datafile_metadata(self):
|
||||
ts_iter = make_timestamp_iter()
|
||||
body = '1234567890'
|
||||
body = b'1234567890'
|
||||
ts_data = next(ts_iter)
|
||||
metadata = {'X-Object-Meta-Test': 'test1',
|
||||
'X-Object-Sysmeta-Test': 'test1'}
|
||||
|
@ -3385,7 +3422,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
|
||||
def test_get_metafile_metadata(self):
|
||||
ts_iter = make_timestamp_iter()
|
||||
body = '1234567890'
|
||||
body = b'1234567890'
|
||||
ts_data = next(ts_iter)
|
||||
metadata = {'X-Object-Meta-Test': 'test1',
|
||||
'X-Object-Sysmeta-Test': 'test1'}
|
||||
|
@ -3496,67 +3533,67 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
self.assertEqual('True', df._metadata['X-Static-Large-Object'])
|
||||
|
||||
def test_disk_file_reader_iter(self):
|
||||
df, df_data = self._create_test_file('1234567890')
|
||||
df, df_data = self._create_test_file(b'1234567890')
|
||||
quarantine_msgs = []
|
||||
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
|
||||
self.assertEqual(''.join(reader), df_data)
|
||||
self.assertEqual(b''.join(reader), df_data)
|
||||
self.assertEqual(quarantine_msgs, [])
|
||||
|
||||
def test_disk_file_reader_iter_w_quarantine(self):
|
||||
df, df_data = self._create_test_file('1234567890')
|
||||
df, df_data = self._create_test_file(b'1234567890')
|
||||
|
||||
def raise_dfq(m):
|
||||
raise DiskFileQuarantined(m)
|
||||
|
||||
reader = df.reader(_quarantine_hook=raise_dfq)
|
||||
reader._obj_size += 1
|
||||
self.assertRaises(DiskFileQuarantined, ''.join, reader)
|
||||
self.assertRaises(DiskFileQuarantined, b''.join, reader)
|
||||
|
||||
def test_disk_file_app_iter_corners(self):
|
||||
df, df_data = self._create_test_file('1234567890')
|
||||
df, df_data = self._create_test_file(b'1234567890')
|
||||
quarantine_msgs = []
|
||||
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
|
||||
self.assertEqual(''.join(reader.app_iter_range(0, None)),
|
||||
self.assertEqual(b''.join(reader.app_iter_range(0, None)),
|
||||
df_data)
|
||||
self.assertEqual(quarantine_msgs, [])
|
||||
df = self._simple_get_diskfile()
|
||||
with df.open():
|
||||
reader = df.reader()
|
||||
self.assertEqual(''.join(reader.app_iter_range(5, None)),
|
||||
self.assertEqual(b''.join(reader.app_iter_range(5, None)),
|
||||
df_data[5:])
|
||||
|
||||
def test_disk_file_app_iter_range_w_none(self):
|
||||
df, df_data = self._create_test_file('1234567890')
|
||||
df, df_data = self._create_test_file(b'1234567890')
|
||||
quarantine_msgs = []
|
||||
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
|
||||
self.assertEqual(''.join(reader.app_iter_range(None, None)),
|
||||
self.assertEqual(b''.join(reader.app_iter_range(None, None)),
|
||||
df_data)
|
||||
self.assertEqual(quarantine_msgs, [])
|
||||
|
||||
def test_disk_file_app_iter_partial_closes(self):
|
||||
df, df_data = self._create_test_file('1234567890')
|
||||
df, df_data = self._create_test_file(b'1234567890')
|
||||
quarantine_msgs = []
|
||||
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
|
||||
it = reader.app_iter_range(0, 5)
|
||||
self.assertEqual(''.join(it), df_data[:5])
|
||||
self.assertEqual(b''.join(it), df_data[:5])
|
||||
self.assertEqual(quarantine_msgs, [])
|
||||
self.assertTrue(reader._fp is None)
|
||||
|
||||
def test_disk_file_app_iter_ranges(self):
|
||||
df, df_data = self._create_test_file('012345678911234567892123456789')
|
||||
df, df_data = self._create_test_file(b'012345678911234567892123456789')
|
||||
quarantine_msgs = []
|
||||
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
|
||||
it = reader.app_iter_ranges([(0, 10), (10, 20), (20, 30)],
|
||||
'plain/text',
|
||||
'\r\n--someheader\r\n', len(df_data))
|
||||
value = ''.join(it)
|
||||
value = b''.join(it)
|
||||
self.assertIn(df_data[:10], value)
|
||||
self.assertIn(df_data[10:20], value)
|
||||
self.assertIn(df_data[20:30], value)
|
||||
self.assertEqual(quarantine_msgs, [])
|
||||
|
||||
def test_disk_file_app_iter_ranges_w_quarantine(self):
|
||||
df, df_data = self._create_test_file('012345678911234567892123456789')
|
||||
df, df_data = self._create_test_file(b'012345678911234567892123456789')
|
||||
quarantine_msgs = []
|
||||
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
|
||||
self.assertEqual(len(df_data), reader._obj_size) # sanity check
|
||||
|
@ -3564,30 +3601,30 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
it = reader.app_iter_ranges([(0, len(df_data))],
|
||||
'plain/text',
|
||||
'\r\n--someheader\r\n', len(df_data))
|
||||
value = ''.join(it)
|
||||
value = b''.join(it)
|
||||
self.assertIn(df_data, value)
|
||||
self.assertEqual(quarantine_msgs,
|
||||
["Bytes read: %s, does not match metadata: %s" %
|
||||
(len(df_data), len(df_data) + 1)])
|
||||
|
||||
def test_disk_file_app_iter_ranges_w_no_etag_quarantine(self):
|
||||
df, df_data = self._create_test_file('012345678911234567892123456789')
|
||||
df, df_data = self._create_test_file(b'012345678911234567892123456789')
|
||||
quarantine_msgs = []
|
||||
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
|
||||
it = reader.app_iter_ranges([(0, 10)],
|
||||
'plain/text',
|
||||
'\r\n--someheader\r\n', len(df_data))
|
||||
value = ''.join(it)
|
||||
value = b''.join(it)
|
||||
self.assertIn(df_data[:10], value)
|
||||
self.assertEqual(quarantine_msgs, [])
|
||||
|
||||
def test_disk_file_app_iter_ranges_edges(self):
|
||||
df, df_data = self._create_test_file('012345678911234567892123456789')
|
||||
df, df_data = self._create_test_file(b'012345678911234567892123456789')
|
||||
quarantine_msgs = []
|
||||
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
|
||||
it = reader.app_iter_ranges([(3, 10), (0, 2)], 'application/whatever',
|
||||
'\r\n--someheader\r\n', len(df_data))
|
||||
value = ''.join(it)
|
||||
value = b''.join(it)
|
||||
self.assertIn(df_data[3:10], value)
|
||||
self.assertIn(df_data[:2], value)
|
||||
self.assertEqual(quarantine_msgs, [])
|
||||
|
@ -3595,7 +3632,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
def test_disk_file_large_app_iter_ranges(self):
|
||||
# This test case is to make sure that the disk file app_iter_ranges
|
||||
# method all the paths being tested.
|
||||
long_str = '01234567890' * 65536
|
||||
long_str = b'01234567890' * 65536
|
||||
df, df_data = self._create_test_file(long_str)
|
||||
target_strs = [df_data[3:10], df_data[0:65590]]
|
||||
quarantine_msgs = []
|
||||
|
@ -3608,34 +3645,37 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
# need to add these headers to make it as real MIME message.
|
||||
# The body of the message is produced by method app_iter_ranges
|
||||
# off of DiskFile object.
|
||||
header = ''.join(['Content-Type: multipart/byteranges;',
|
||||
'boundary=',
|
||||
'5e816ff8b8b8e9a5d355497e5d9e0301\r\n'])
|
||||
header = b''.join([b'Content-Type: multipart/byteranges;',
|
||||
b'boundary=',
|
||||
b'5e816ff8b8b8e9a5d355497e5d9e0301\r\n'])
|
||||
|
||||
value = header + ''.join(it)
|
||||
value = header + b''.join(it)
|
||||
self.assertEqual(quarantine_msgs, [])
|
||||
|
||||
parts = map(lambda p: p.get_payload(decode=True),
|
||||
email.message_from_string(value).walk())[1:3]
|
||||
if six.PY2:
|
||||
message = email.message_from_string(value)
|
||||
else:
|
||||
message = email.message_from_bytes(value)
|
||||
parts = [p.get_payload(decode=True) for p in message.walk()][1:3]
|
||||
self.assertEqual(parts, target_strs)
|
||||
|
||||
def test_disk_file_app_iter_ranges_empty(self):
|
||||
# This test case tests when empty value passed into app_iter_ranges
|
||||
# When ranges passed into the method is either empty array or None,
|
||||
# this method will yield empty string
|
||||
df, df_data = self._create_test_file('012345678911234567892123456789')
|
||||
df, df_data = self._create_test_file(b'012345678911234567892123456789')
|
||||
quarantine_msgs = []
|
||||
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
|
||||
it = reader.app_iter_ranges([], 'application/whatever',
|
||||
'\r\n--someheader\r\n', len(df_data))
|
||||
self.assertEqual(''.join(it), '')
|
||||
self.assertEqual(b''.join(it), b'')
|
||||
|
||||
df = self._simple_get_diskfile()
|
||||
with df.open():
|
||||
reader = df.reader()
|
||||
it = reader.app_iter_ranges(None, 'app/something',
|
||||
'\r\n--someheader\r\n', 150)
|
||||
self.assertEqual(''.join(it), '')
|
||||
self.assertEqual(b''.join(it), b'')
|
||||
self.assertEqual(quarantine_msgs, [])
|
||||
|
||||
def test_disk_file_mkstemp_creates_dir(self):
|
||||
|
@ -3674,7 +3714,9 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
policy = policy or POLICIES.legacy
|
||||
df = self._simple_get_diskfile(obj=obj_name, policy=policy,
|
||||
frag_index=frag_index)
|
||||
data = data or '0' * fsize
|
||||
data = data or b'0' * fsize
|
||||
if not isinstance(data, bytes):
|
||||
raise ValueError('data must be bytes')
|
||||
if policy.policy_type == EC_POLICY:
|
||||
archives = encode_frag_archive_bodies(policy, data)
|
||||
try:
|
||||
|
@ -3733,16 +3775,16 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
# We have to go below read_metadata/write_metadata to get proper
|
||||
# corruption.
|
||||
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
|
||||
wrong_byte = 'X' if meta_xattr[0] != 'X' else 'Y'
|
||||
wrong_byte = b'X' if meta_xattr[:1] != b'X' else b'Y'
|
||||
xattr.setxattr(data_files[0], "user.swift.metadata",
|
||||
wrong_byte + meta_xattr[1:])
|
||||
elif invalid_type == 'Subtly-Corrupt-Xattrs':
|
||||
# We have to go below read_metadata/write_metadata to get proper
|
||||
# corruption.
|
||||
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
|
||||
wrong_checksum = md5(meta_xattr + "some extra stuff").hexdigest()
|
||||
wrong_checksum = md5(meta_xattr + b"some extra stuff").hexdigest()
|
||||
xattr.setxattr(data_files[0], "user.swift.metadata_checksum",
|
||||
wrong_checksum)
|
||||
wrong_checksum.encode())
|
||||
elif invalid_type == 'Truncated-Xattrs':
|
||||
meta_xattr = xattr.getxattr(data_files[0], "user.swift.metadata")
|
||||
xattr.setxattr(data_files[0], "user.swift.metadata",
|
||||
|
@ -3974,7 +4016,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
self._get_open_disk_file)
|
||||
|
||||
def test_quarantine_hashdir_not_a_directory(self):
|
||||
df, df_data = self._create_test_file('1234567890', account="abc",
|
||||
df, df_data = self._create_test_file(b'1234567890', account="abc",
|
||||
container='123', obj='xyz')
|
||||
hashdir = df._datadir
|
||||
rmtree(hashdir)
|
||||
|
@ -4062,7 +4104,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
pass
|
||||
|
||||
def test_write_metadata(self):
|
||||
df, df_data = self._create_test_file('1234567890')
|
||||
df, df_data = self._create_test_file(b'1234567890')
|
||||
file_count = len(os.listdir(df._datadir))
|
||||
timestamp = Timestamp.now().internal
|
||||
metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'}
|
||||
|
@ -4074,7 +4116,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
|
||||
def test_write_metadata_with_content_type(self):
|
||||
# if metadata has content-type then its time should be in file name
|
||||
df, df_data = self._create_test_file('1234567890')
|
||||
df, df_data = self._create_test_file(b'1234567890')
|
||||
file_count = len(os.listdir(df._datadir))
|
||||
timestamp = Timestamp.now()
|
||||
metadata = {'X-Timestamp': timestamp.internal,
|
||||
|
@ -4091,11 +4133,11 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
def test_write_metadata_with_older_content_type(self):
|
||||
# if metadata has content-type then its time should be in file name
|
||||
ts_iter = make_timestamp_iter()
|
||||
df, df_data = self._create_test_file('1234567890',
|
||||
timestamp=ts_iter.next())
|
||||
df, df_data = self._create_test_file(b'1234567890',
|
||||
timestamp=next(ts_iter))
|
||||
file_count = len(os.listdir(df._datadir))
|
||||
timestamp = ts_iter.next()
|
||||
timestamp2 = ts_iter.next()
|
||||
timestamp = next(ts_iter)
|
||||
timestamp2 = next(ts_iter)
|
||||
metadata = {'X-Timestamp': timestamp2.internal,
|
||||
'X-Object-Meta-test': 'data',
|
||||
'Content-Type': 'foo',
|
||||
|
@ -4112,11 +4154,11 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
# a meta file without content-type should be cleaned up in favour of
|
||||
# a meta file at same time with content-type
|
||||
ts_iter = make_timestamp_iter()
|
||||
df, df_data = self._create_test_file('1234567890',
|
||||
timestamp=ts_iter.next())
|
||||
df, df_data = self._create_test_file(b'1234567890',
|
||||
timestamp=next(ts_iter))
|
||||
file_count = len(os.listdir(df._datadir))
|
||||
timestamp = ts_iter.next()
|
||||
timestamp2 = ts_iter.next()
|
||||
timestamp = next(ts_iter)
|
||||
timestamp2 = next(ts_iter)
|
||||
metadata = {'X-Timestamp': timestamp2.internal,
|
||||
'X-Object-Meta-test': 'data'}
|
||||
df.write_metadata(metadata)
|
||||
|
@ -4138,11 +4180,11 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
# file with content-type should be cleaned up in favour of a meta file
|
||||
# at newer time with content-type
|
||||
ts_iter = make_timestamp_iter()
|
||||
df, df_data = self._create_test_file('1234567890',
|
||||
timestamp=ts_iter.next())
|
||||
df, df_data = self._create_test_file(b'1234567890',
|
||||
timestamp=next(ts_iter))
|
||||
file_count = len(os.listdir(df._datadir))
|
||||
timestamp = ts_iter.next()
|
||||
timestamp2 = ts_iter.next()
|
||||
timestamp = next(ts_iter)
|
||||
timestamp2 = next(ts_iter)
|
||||
metadata = {'X-Timestamp': timestamp2.internal,
|
||||
'X-Object-Meta-test': 'data'}
|
||||
df.write_metadata(metadata)
|
||||
|
@ -4498,7 +4540,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
|
||||
def test_from_audit_location(self):
|
||||
df, df_data = self._create_test_file(
|
||||
'blah blah',
|
||||
b'blah blah',
|
||||
account='three', container='blind', obj='mice')
|
||||
hashdir = df._datadir
|
||||
df = self.df_mgr.get_diskfile_from_audit_location(
|
||||
|
@ -4509,7 +4551,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
|
||||
def test_from_audit_location_with_mismatched_hash(self):
|
||||
df, df_data = self._create_test_file(
|
||||
'blah blah',
|
||||
b'blah blah',
|
||||
account='this', container='is', obj='right')
|
||||
hashdir = df._datadir
|
||||
datafilename = [f for f in os.listdir(hashdir)
|
||||
|
@ -4554,12 +4596,12 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
|
||||
def test_ondisk_search_loop_ts_meta_data(self):
|
||||
df = self._simple_get_diskfile()
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=10)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=9)
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=8)
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=7)
|
||||
self._create_ondisk_file(df, 'B', ext='.data', timestamp=6)
|
||||
self._create_ondisk_file(df, 'A', ext='.data', timestamp=5)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=10)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=9)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=8)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=7)
|
||||
self._create_ondisk_file(df, b'B', ext='.data', timestamp=6)
|
||||
self._create_ondisk_file(df, b'A', ext='.data', timestamp=5)
|
||||
df = self._simple_get_diskfile()
|
||||
with self.assertRaises(DiskFileDeleted) as raised:
|
||||
df.open()
|
||||
|
@ -4567,12 +4609,12 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
|
||||
def test_ondisk_search_loop_meta_ts_data(self):
|
||||
df = self._simple_get_diskfile()
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=10)
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=9)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
|
||||
self._create_ondisk_file(df, 'B', ext='.data', timestamp=6)
|
||||
self._create_ondisk_file(df, 'A', ext='.data', timestamp=5)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
|
||||
self._create_ondisk_file(df, b'B', ext='.data', timestamp=6)
|
||||
self._create_ondisk_file(df, b'A', ext='.data', timestamp=5)
|
||||
df = self._simple_get_diskfile()
|
||||
with self.assertRaises(DiskFileDeleted) as raised:
|
||||
df.open()
|
||||
|
@ -4580,14 +4622,14 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
|
||||
def _test_ondisk_search_loop_meta_data_ts(self, legacy_durable=False):
|
||||
df = self._simple_get_diskfile()
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=10)
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=9)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9)
|
||||
self._create_ondisk_file(
|
||||
df, 'B', ext='.data', legacy_durable=legacy_durable, timestamp=8)
|
||||
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=8)
|
||||
self._create_ondisk_file(
|
||||
df, 'A', ext='.data', legacy_durable=legacy_durable, timestamp=7)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=6)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=5)
|
||||
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=7)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=6)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=5)
|
||||
df = self._simple_get_diskfile()
|
||||
with df.open():
|
||||
self.assertIn('X-Timestamp', df._metadata)
|
||||
|
@ -4604,17 +4646,17 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
def _test_ondisk_search_loop_multiple_meta_data(self,
|
||||
legacy_durable=False):
|
||||
df = self._simple_get_diskfile()
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=10,
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10,
|
||||
metadata={'X-Object-Meta-User': 'user-meta'})
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=9,
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9,
|
||||
ctype_timestamp=9,
|
||||
metadata={'Content-Type': 'newest',
|
||||
'X-Object-Meta-User': 'blah'})
|
||||
self._create_ondisk_file(
|
||||
df, 'B', ext='.data', legacy_durable=legacy_durable, timestamp=8,
|
||||
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=8,
|
||||
metadata={'Content-Type': 'newer'})
|
||||
self._create_ondisk_file(
|
||||
df, 'A', ext='.data', legacy_durable=legacy_durable, timestamp=7,
|
||||
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=7,
|
||||
metadata={'Content-Type': 'oldest'})
|
||||
df = self._simple_get_diskfile()
|
||||
with df.open():
|
||||
|
@ -4634,14 +4676,14 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
|
||||
def _test_ondisk_search_loop_stale_meta_data(self, legacy_durable=False):
|
||||
df = self._simple_get_diskfile()
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=10,
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=10,
|
||||
metadata={'X-Object-Meta-User': 'user-meta'})
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=9,
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=9,
|
||||
ctype_timestamp=7,
|
||||
metadata={'Content-Type': 'older',
|
||||
'X-Object-Meta-User': 'blah'})
|
||||
self._create_ondisk_file(
|
||||
df, 'B', ext='.data', legacy_durable=legacy_durable, timestamp=8,
|
||||
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=8,
|
||||
metadata={'Content-Type': 'newer'})
|
||||
df = self._simple_get_diskfile()
|
||||
with df.open():
|
||||
|
@ -4662,13 +4704,13 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
def _test_ondisk_search_loop_data_ts_meta(self, legacy_durable=False):
|
||||
df = self._simple_get_diskfile()
|
||||
self._create_ondisk_file(
|
||||
df, 'B', ext='.data', legacy_durable=legacy_durable, timestamp=10)
|
||||
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=10)
|
||||
self._create_ondisk_file(
|
||||
df, 'A', ext='.data', legacy_durable=legacy_durable, timestamp=9)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=6)
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=5)
|
||||
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=9)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=6)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=5)
|
||||
df = self._simple_get_diskfile()
|
||||
with df.open():
|
||||
self.assertIn('X-Timestamp', df._metadata)
|
||||
|
@ -4685,15 +4727,15 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
def _test_ondisk_search_loop_wayward_files_ignored(self,
|
||||
legacy_durable=False):
|
||||
df = self._simple_get_diskfile()
|
||||
self._create_ondisk_file(df, 'X', ext='.bar', timestamp=11)
|
||||
self._create_ondisk_file(df, b'X', ext='.bar', timestamp=11)
|
||||
self._create_ondisk_file(
|
||||
df, 'B', ext='.data', legacy_durable=legacy_durable, timestamp=10)
|
||||
df, b'B', ext='.data', legacy_durable=legacy_durable, timestamp=10)
|
||||
self._create_ondisk_file(
|
||||
df, 'A', ext='.data', legacy_durable=legacy_durable, timestamp=9)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=6)
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=5)
|
||||
df, b'A', ext='.data', legacy_durable=legacy_durable, timestamp=9)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=6)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=5)
|
||||
df = self._simple_get_diskfile()
|
||||
with df.open():
|
||||
self.assertIn('X-Timestamp', df._metadata)
|
||||
|
@ -4715,15 +4757,15 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
|
||||
|
||||
with mock.patch("os.listdir", mock_listdir_exp):
|
||||
self._create_ondisk_file(df, 'X', ext='.bar', timestamp=11)
|
||||
self._create_ondisk_file(df, 'B', ext='.data', timestamp=10,
|
||||
self._create_ondisk_file(df, b'X', ext='.bar', timestamp=11)
|
||||
self._create_ondisk_file(df, b'B', ext='.data', timestamp=10,
|
||||
legacy_durable=legacy_durable)
|
||||
self._create_ondisk_file(df, 'A', ext='.data', timestamp=9,
|
||||
self._create_ondisk_file(df, b'A', ext='.data', timestamp=9,
|
||||
legacy_durable=legacy_durable)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=8)
|
||||
self._create_ondisk_file(df, '', ext='.ts', timestamp=7)
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=6)
|
||||
self._create_ondisk_file(df, '', ext='.meta', timestamp=5)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=8)
|
||||
self._create_ondisk_file(df, b'', ext='.ts', timestamp=7)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=6)
|
||||
self._create_ondisk_file(df, b'', ext='.meta', timestamp=5)
|
||||
df = self._simple_get_diskfile()
|
||||
self.assertRaises(DiskFileError, df.open)
|
||||
|
||||
|
@ -5088,7 +5130,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
_m_unlink = mock.Mock()
|
||||
df = self._simple_get_diskfile()
|
||||
df.manager.use_linkat = False
|
||||
data = '0' * 100
|
||||
data = b'0' * 100
|
||||
metadata = {
|
||||
'ETag': md5(data).hexdigest(),
|
||||
'X-Timestamp': Timestamp.now().internal,
|
||||
|
@ -5203,7 +5245,7 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
|||
@requires_o_tmpfile_support_in_tmp
|
||||
def test_create_use_linkat_renamer_not_called(self):
|
||||
df = self._simple_get_diskfile()
|
||||
data = '0' * 100
|
||||
data = b'0' * 100
|
||||
metadata = {
|
||||
'ETag': md5(data).hexdigest(),
|
||||
'X-Timestamp': Timestamp.now().internal,
|
||||
|
@ -5803,7 +5845,7 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase):
|
|||
df = df_mgr.get_diskfile(self.existing_device, '0',
|
||||
'a', 'c', 'o', policy=policy)
|
||||
frag_2_metadata = write_diskfile(df, ts_3, frag_index=2, commit=False,
|
||||
data='new test data',
|
||||
data=b'new test data',
|
||||
legacy_durable=legacy_durable)
|
||||
# sanity check: should have 2* .data, possibly .durable, .meta, .data
|
||||
self.assertEqual(5 if legacy_durable else 4,
|
||||
|
@ -5974,7 +6016,7 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase):
|
|||
policy = POLICIES.default
|
||||
frag_size = policy.fragment_size
|
||||
# make sure there are two fragment size worth of data on disk
|
||||
data = 'ab' * policy.ec_segment_size
|
||||
data = b'ab' * policy.ec_segment_size
|
||||
df, df_data = self._create_test_file(data)
|
||||
quarantine_msgs = []
|
||||
reader = df.reader(_quarantine_hook=quarantine_msgs.append)
|
||||
|
@ -5990,7 +6032,7 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase):
|
|||
[(0, 10), (10, 20),
|
||||
(frag_size + 20, frag_size + 30)],
|
||||
'plain/text', '\r\n--someheader\r\n', len(df_data))
|
||||
value = ''.join(it)
|
||||
value = b''.join(it)
|
||||
# check that only first range which starts at 0 triggers a frag check
|
||||
self.assertEqual(1, mock_get_metadata.call_count)
|
||||
self.assertIn(df_data[:10], value)
|
||||
|
@ -6003,7 +6045,7 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase):
|
|||
# TestAuditor.test_object_audit_checks_EC_fragments just making
|
||||
# sure that checks happen in DiskFileReader layer.
|
||||
policy = POLICIES.default
|
||||
df, df_data = self._create_test_file('x' * policy.ec_segment_size,
|
||||
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
|
||||
timestamp=self.ts())
|
||||
|
||||
def do_test(corrupted_frag_body, expected_offset, expected_read):
|
||||
|
@ -6028,7 +6070,7 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase):
|
|||
|
||||
self.assertEqual(expected_read, bytes_read)
|
||||
self.assertEqual('Invalid EC metadata at offset 0x%x' %
|
||||
expected_offset, cm.exception.message)
|
||||
expected_offset, cm.exception.args[0])
|
||||
|
||||
# TODO with liberasurecode < 1.2.0 the EC metadata verification checks
|
||||
# only the magic number at offset 59 bytes into the frag so we'll
|
||||
|
@ -6036,18 +6078,18 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase):
|
|||
# required we should be able to reduce the corruption length.
|
||||
corruption_length = 64
|
||||
# corrupted first frag can be detected
|
||||
corrupted_frag_body = (' ' * corruption_length +
|
||||
corrupted_frag_body = (b' ' * corruption_length +
|
||||
df_data[corruption_length:])
|
||||
do_test(corrupted_frag_body, 0, 0)
|
||||
|
||||
# corrupted the second frag can be also detected
|
||||
corrupted_frag_body = (df_data + ' ' * corruption_length +
|
||||
corrupted_frag_body = (df_data + b' ' * corruption_length +
|
||||
df_data[corruption_length:])
|
||||
do_test(corrupted_frag_body, len(df_data), len(df_data))
|
||||
|
||||
# if the second frag is shorter than frag size then corruption is
|
||||
# detected when the reader is closed
|
||||
corrupted_frag_body = (df_data + ' ' * corruption_length +
|
||||
corrupted_frag_body = (df_data + b' ' * corruption_length +
|
||||
df_data[corruption_length:-10])
|
||||
do_test(corrupted_frag_body, len(df_data), len(corrupted_frag_body))
|
||||
|
||||
|
@ -6055,7 +6097,7 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase):
|
|||
policy = POLICIES.default
|
||||
|
||||
def do_test(exception):
|
||||
df, df_data = self._create_test_file('x' * policy.ec_segment_size,
|
||||
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
|
||||
timestamp=self.ts())
|
||||
df.manager.logger.clear()
|
||||
|
||||
|
@ -6070,7 +6112,7 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase):
|
|||
df.open()
|
||||
|
||||
self.assertEqual('Invalid EC metadata at offset 0x0',
|
||||
cm.exception.message)
|
||||
cm.exception.args[0])
|
||||
log_lines = df.manager.logger.get_lines_for_level('warning')
|
||||
self.assertIn('Quarantined object', log_lines[0])
|
||||
self.assertIn('Invalid EC metadata at offset 0x0', log_lines[0])
|
||||
|
@ -6083,14 +6125,14 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase):
|
|||
# ECDriverError should not cause quarantine, only certain subclasses
|
||||
policy = POLICIES.default
|
||||
|
||||
df, df_data = self._create_test_file('x' * policy.ec_segment_size,
|
||||
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
|
||||
timestamp=self.ts())
|
||||
|
||||
with mock.patch.object(
|
||||
df.policy.pyeclib_driver, 'get_metadata',
|
||||
side_effect=pyeclib.ec_iface.ECDriverError('testing')):
|
||||
df.open()
|
||||
read_data = ''.join([d for d in df.reader()])
|
||||
read_data = b''.join([d for d in df.reader()])
|
||||
self.assertEqual(df_data, read_data)
|
||||
log_lines = df.manager.logger.get_lines_for_level('warning')
|
||||
self.assertIn('Problem checking EC fragment', log_lines[0])
|
||||
|
@ -6103,7 +6145,7 @@ class TestECDiskFile(DiskFileMixin, unittest.TestCase):
|
|||
# type chunk incomming (that would occurre only from coding bug)
|
||||
policy = POLICIES.default
|
||||
|
||||
df, df_data = self._create_test_file('x' * policy.ec_segment_size,
|
||||
df, df_data = self._create_test_file(b'x' * policy.ec_segment_size,
|
||||
timestamp=self.ts())
|
||||
df.open()
|
||||
for invalid_type_chunk in (None, [], [[]], 1):
|
||||
|
@ -6572,7 +6614,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
self.assertIn(suffix, hashes)
|
||||
self.assertTrue(os.path.exists(hashes_file))
|
||||
self.assertIn(os.path.basename(suffix_dir), hashes)
|
||||
with open(hashes_file) as f:
|
||||
with open(hashes_file, 'rb') as f:
|
||||
found_hashes = pickle.load(f)
|
||||
found_hashes.pop('updated')
|
||||
self.assertTrue(found_hashes.pop('valid'))
|
||||
|
@ -6632,7 +6674,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
# each file is opened once to read
|
||||
expected = {
|
||||
'hashes.pkl': ['rb'],
|
||||
'hashes.invalid': ['rb'],
|
||||
'hashes.invalid': ['r'],
|
||||
}
|
||||
self.assertEqual(open_log, expected)
|
||||
|
||||
|
@ -6782,7 +6824,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
self.assertRaises(
|
||||
Exception, df_mgr.get_hashes, 'sda1', '0', [], policy)
|
||||
# sanity on-disk state is invalid
|
||||
with open(hashes_file) as f:
|
||||
with open(hashes_file, 'rb') as f:
|
||||
found_hashes = pickle.load(f)
|
||||
found_hashes.pop('updated')
|
||||
self.assertEqual(False, found_hashes.pop('valid'))
|
||||
|
@ -6810,7 +6852,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
found_hashes = pickle.load(f)
|
||||
self.assertTrue(hashes['valid'])
|
||||
self.assertEqual(hashes, found_hashes)
|
||||
with open(invalidations_file, 'rb') as f:
|
||||
with open(invalidations_file, 'r') as f:
|
||||
self.assertEqual("", f.read())
|
||||
return hashes
|
||||
|
||||
|
@ -6843,7 +6885,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
df_mgr.invalidate_hash(suffix_dir)
|
||||
self.assertTrue(mock_lock.called)
|
||||
# suffix should be in invalidations file
|
||||
with open(invalidations_file, 'rb') as f:
|
||||
with open(invalidations_file, 'r') as f:
|
||||
self.assertEqual(suffix + "\n", f.read())
|
||||
# hashes file is unchanged
|
||||
with open(hashes_file, 'rb') as f:
|
||||
|
@ -6862,7 +6904,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
suffix_dir2 = os.path.dirname(df2._datadir)
|
||||
suffix2 = os.path.basename(suffix_dir2)
|
||||
# suffix2 should be in invalidations file
|
||||
with open(invalidations_file, 'rb') as f:
|
||||
with open(invalidations_file, 'r') as f:
|
||||
self.assertEqual(suffix2 + "\n", f.read())
|
||||
# hashes file is not yet changed
|
||||
with open(hashes_file, 'rb') as f:
|
||||
|
@ -6877,7 +6919,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
df2.delete(self.ts())
|
||||
df2.delete(self.ts())
|
||||
# suffix2 should be in invalidations file
|
||||
with open(invalidations_file, 'rb') as f:
|
||||
with open(invalidations_file, 'r') as f:
|
||||
self.assertEqual("%s\n%s\n" % (suffix2, suffix2), f.read())
|
||||
# hashes file is not yet changed
|
||||
with open(hashes_file, 'rb') as f:
|
||||
|
@ -7245,7 +7287,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
# write a datafile
|
||||
timestamp = self.ts()
|
||||
with df.create() as writer:
|
||||
test_data = 'test file'
|
||||
test_data = b'test file'
|
||||
writer.write(test_data)
|
||||
metadata = {
|
||||
'X-Timestamp': timestamp.internal,
|
||||
|
@ -7707,7 +7749,6 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
quarantine_path = os.path.join(
|
||||
quarantine_base, # quarantine root
|
||||
diskfile.get_data_dir(policy), # per-policy data dir
|
||||
suffix, # first dir from which quarantined file was removed
|
||||
os.path.basename(df._datadir) # name of quarantined file
|
||||
)
|
||||
self.assertTrue(os.path.exists(quarantine_path))
|
||||
|
@ -7721,7 +7762,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
frag_index=7)
|
||||
suffix = os.path.basename(os.path.dirname(df._datadir))
|
||||
with df.create() as writer:
|
||||
test_data = 'test_data'
|
||||
test_data = b'test_data'
|
||||
writer.write(test_data)
|
||||
metadata = {
|
||||
'X-Timestamp': timestamp.internal,
|
||||
|
@ -7988,7 +8029,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
frag_index=5)
|
||||
timestamp = self.ts()
|
||||
with df.create() as writer:
|
||||
test_data = 'test_file'
|
||||
test_data = b'test_file'
|
||||
writer.write(test_data)
|
||||
metadata = {
|
||||
'X-Timestamp': timestamp.internal,
|
||||
|
@ -8008,7 +8049,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
matching_suffix = os.path.basename(os.path.dirname(df._datadir))
|
||||
timestamp = self.ts()
|
||||
with df.create() as writer:
|
||||
test_data = 'test_file'
|
||||
test_data = b'test_file'
|
||||
writer.write(test_data)
|
||||
metadata = {
|
||||
'X-Timestamp': timestamp.internal,
|
||||
|
@ -8030,7 +8071,7 @@ class TestSuffixHashes(unittest.TestCase):
|
|||
matching_suffix) # sanity
|
||||
timestamp = self.ts()
|
||||
with df.create() as writer:
|
||||
test_data = 'test_file'
|
||||
test_data = b'test_file'
|
||||
writer.write(test_data)
|
||||
metadata = {
|
||||
'X-Timestamp': timestamp.internal,
|
||||
|
@ -8261,7 +8302,7 @@ class TestHashesHelpers(unittest.TestCase):
|
|||
def test_read_legacy_hashes(self):
|
||||
hashes = {'stub': 'fake'}
|
||||
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
|
||||
with open(hashes_file, 'w') as f:
|
||||
with open(hashes_file, 'wb') as f:
|
||||
pickle.dump(hashes, f)
|
||||
expected = {
|
||||
'stub': 'fake',
|
||||
|
@ -8276,7 +8317,7 @@ class TestHashesHelpers(unittest.TestCase):
|
|||
with mock.patch('swift.obj.diskfile.time.time', return_value=now):
|
||||
diskfile.write_hashes(self.testdir, hashes)
|
||||
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
|
||||
with open(hashes_file) as f:
|
||||
with open(hashes_file, 'rb') as f:
|
||||
data = pickle.load(f)
|
||||
expected = {
|
||||
'stub': 'fake',
|
||||
|
@ -8291,7 +8332,7 @@ class TestHashesHelpers(unittest.TestCase):
|
|||
with mock.patch('swift.obj.diskfile.time.time', return_value=now):
|
||||
diskfile.write_hashes(self.testdir, hashes)
|
||||
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
|
||||
with open(hashes_file) as f:
|
||||
with open(hashes_file, 'rb') as f:
|
||||
data = pickle.load(f)
|
||||
expected = {
|
||||
'updated': now,
|
||||
|
@ -8305,7 +8346,7 @@ class TestHashesHelpers(unittest.TestCase):
|
|||
with mock.patch('swift.obj.diskfile.time.time', return_value=now):
|
||||
diskfile.write_hashes(self.testdir, hashes)
|
||||
hashes_file = os.path.join(self.testdir, diskfile.HASH_FILE)
|
||||
with open(hashes_file) as f:
|
||||
with open(hashes_file, 'rb') as f:
|
||||
data = pickle.load(f)
|
||||
expected = {
|
||||
'updated': now,
|
||||
|
|
|
@ -15,7 +15,8 @@
|
|||
|
||||
from time import time
|
||||
from unittest import main, TestCase
|
||||
from test.unit import FakeRing, mocked_http_conn, debug_logger
|
||||
from test.unit import FakeRing, mocked_http_conn, debug_logger, \
|
||||
make_timestamp_iter
|
||||
from tempfile import mkdtemp
|
||||
from shutil import rmtree
|
||||
from collections import defaultdict
|
||||
|
@ -49,14 +50,17 @@ class FakeInternalClient(object):
|
|||
"""
|
||||
:param aco_dict: A dict of account ,container, object that
|
||||
FakeInternalClient can return when each method called. Each account
|
||||
has container name dict, and each container dict has object name
|
||||
list in the container.
|
||||
has container name dict, and each container dict has a list of
|
||||
objects in the container.
|
||||
e.g. {'account1': {
|
||||
'container1: ['obj1', 'obj2', 'obj3'],
|
||||
'container1: ['obj1', 'obj2', {'name': 'obj3'}],
|
||||
'container2: [],
|
||||
},
|
||||
'account2': {},
|
||||
}
|
||||
N.B. the objects entries should be the container-server JSON style
|
||||
db rows, but this fake will dynamically detect when names are given
|
||||
and wrap them for convenience.
|
||||
"""
|
||||
self.aco_dict = defaultdict(dict)
|
||||
self.aco_dict.update(aco_dict)
|
||||
|
@ -69,8 +73,9 @@ class FakeInternalClient(object):
|
|||
|
||||
def iter_containers(self, account, prefix=''):
|
||||
acc_dict = self.aco_dict[account]
|
||||
return sorted([{'name': six.text_type(container)} for container in
|
||||
acc_dict if container.startswith(prefix)])
|
||||
return [{'name': six.text_type(container)}
|
||||
for container in sorted(acc_dict)
|
||||
if container.startswith(prefix)]
|
||||
|
||||
def delete_container(*a, **kw):
|
||||
pass
|
||||
|
@ -78,7 +83,12 @@ class FakeInternalClient(object):
|
|||
def iter_objects(self, account, container):
|
||||
acc_dict = self.aco_dict[account]
|
||||
obj_iter = acc_dict.get(container, [])
|
||||
return [{'name': six.text_type(obj)} for obj in obj_iter]
|
||||
resp = []
|
||||
for obj in obj_iter:
|
||||
if not isinstance(obj, dict):
|
||||
obj = {'name': six.text_type(obj)}
|
||||
resp.append(obj)
|
||||
return resp
|
||||
|
||||
def make_request(*a, **kw):
|
||||
pass
|
||||
|
@ -101,6 +111,7 @@ class TestObjectExpirer(TestCase):
|
|||
self.conf = {'recon_cache_path': self.rcache}
|
||||
self.logger = debug_logger('test-expirer')
|
||||
|
||||
self.ts = make_timestamp_iter()
|
||||
self.past_time = str(int(time() - 86400))
|
||||
self.future_time = str(int(time() + 86400))
|
||||
# Dummy task queue for test
|
||||
|
@ -131,9 +142,11 @@ class TestObjectExpirer(TestCase):
|
|||
|
||||
# target object paths which should be expirerd now
|
||||
self.expired_target_path_list = [
|
||||
'a0/c0/o0', 'a1/c1/o1', 'a2/c2/o2', 'a3/c3/o3', 'a4/c4/o4',
|
||||
'a5/c5/o5', 'a6/c6/o6', 'a7/c7/o7',
|
||||
'a8/c8/o8\xe2\x99\xa1', 'a9/c9/o9\xc3\xb8',
|
||||
swob.wsgi_to_str(tgt) for tgt in (
|
||||
'a0/c0/o0', 'a1/c1/o1', 'a2/c2/o2', 'a3/c3/o3', 'a4/c4/o4',
|
||||
'a5/c5/o5', 'a6/c6/o6', 'a7/c7/o7',
|
||||
'a8/c8/o8\xe2\x99\xa1', 'a9/c9/o9\xc3\xb8',
|
||||
)
|
||||
]
|
||||
|
||||
def tearDown(self):
|
||||
|
@ -251,7 +264,8 @@ class TestObjectExpirer(TestCase):
|
|||
self.deleted_objects = {}
|
||||
|
||||
def delete_object(self, target_path, delete_timestamp,
|
||||
task_account, task_container, task_object):
|
||||
task_account, task_container, task_object,
|
||||
is_async_delete):
|
||||
if task_container not in self.deleted_objects:
|
||||
self.deleted_objects[task_container] = set()
|
||||
self.deleted_objects[task_container].add(task_object)
|
||||
|
@ -300,9 +314,10 @@ class TestObjectExpirer(TestCase):
|
|||
with mock.patch.object(x, 'delete_actual_object',
|
||||
side_effect=exc) as delete_actual:
|
||||
with mock.patch.object(x, 'pop_queue') as pop_queue:
|
||||
x.delete_object(actual_obj, ts, account, container, obj)
|
||||
x.delete_object(actual_obj, ts, account, container, obj,
|
||||
False)
|
||||
|
||||
delete_actual.assert_called_once_with(actual_obj, ts)
|
||||
delete_actual.assert_called_once_with(actual_obj, ts, False)
|
||||
log_lines = x.logger.get_lines_for_level('error')
|
||||
if should_pop:
|
||||
pop_queue.assert_called_once_with(account, container, obj)
|
||||
|
@ -374,13 +389,14 @@ class TestObjectExpirer(TestCase):
|
|||
assert_parse_task_obj('1000-a/c/o', 1000, 'a', 'c', 'o')
|
||||
assert_parse_task_obj('0000-acc/con/obj', 0, 'acc', 'con', 'obj')
|
||||
|
||||
def make_task(self, delete_at, target):
|
||||
def make_task(self, delete_at, target, is_async_delete=False):
|
||||
return {
|
||||
'task_account': '.expiring_objects',
|
||||
'task_container': delete_at,
|
||||
'task_object': delete_at + '-' + target,
|
||||
'delete_timestamp': Timestamp(delete_at),
|
||||
'target_path': target,
|
||||
'is_async_delete': is_async_delete,
|
||||
}
|
||||
|
||||
def test_round_robin_order(self):
|
||||
|
@ -588,6 +604,50 @@ class TestObjectExpirer(TestCase):
|
|||
task_account_container_list, my_index, divisor)),
|
||||
expected)
|
||||
|
||||
# test some of that async delete
|
||||
async_delete_aco_dict = {
|
||||
'.expiring_objects': {
|
||||
# this task container will be checked
|
||||
self.past_time: [
|
||||
# tasks ready for execution
|
||||
{'name': self.past_time + '-a0/c0/o0',
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': self.past_time + '-a1/c1/o1',
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': self.past_time + '-a2/c2/o2',
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': self.past_time + '-a3/c3/o3',
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': self.past_time + '-a4/c4/o4',
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': self.past_time + '-a5/c5/o5',
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': self.past_time + '-a6/c6/o6',
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': self.past_time + '-a7/c7/o7',
|
||||
'content_type': 'application/async-deleted'},
|
||||
# task objects for unicode test
|
||||
{'name': self.past_time + u'-a8/c8/o8\u2661',
|
||||
'content_type': 'application/async-deleted'},
|
||||
{'name': self.past_time + u'-a9/c9/o9\xf8',
|
||||
'content_type': 'application/async-deleted'},
|
||||
]
|
||||
}
|
||||
}
|
||||
async_delete_fake_swift = FakeInternalClient(async_delete_aco_dict)
|
||||
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
|
||||
swift=async_delete_fake_swift)
|
||||
|
||||
expected = [
|
||||
self.make_task(self.past_time, target_path,
|
||||
is_async_delete=True)
|
||||
for target_path in self.expired_target_path_list]
|
||||
|
||||
self.assertEqual(
|
||||
list(x.iter_task_to_expire(
|
||||
task_account_container_list, my_index, divisor)),
|
||||
expected)
|
||||
|
||||
def test_run_once_unicode_problem(self):
|
||||
requests = []
|
||||
|
||||
|
@ -617,7 +677,7 @@ class TestObjectExpirer(TestCase):
|
|||
# executed tasks are with past time
|
||||
self.assertEqual(
|
||||
mock_method.call_args_list,
|
||||
[mock.call(target_path, self.past_time)
|
||||
[mock.call(target_path, self.past_time, False)
|
||||
for target_path in self.expired_target_path_list])
|
||||
|
||||
def test_failed_delete_keeps_entry(self):
|
||||
|
@ -635,7 +695,7 @@ class TestObjectExpirer(TestCase):
|
|||
|
||||
# all tasks are done
|
||||
with mock.patch.object(self.expirer, 'delete_actual_object',
|
||||
lambda o, t: None), \
|
||||
lambda o, t, b: None), \
|
||||
mock.patch.object(self.expirer, 'pop_queue') as mock_method:
|
||||
self.expirer.run_once()
|
||||
|
||||
|
@ -650,35 +710,36 @@ class TestObjectExpirer(TestCase):
|
|||
self.assertEqual(self.expirer.report_objects, 0)
|
||||
with mock.patch('swift.obj.expirer.MAX_OBJECTS_TO_CACHE', 0), \
|
||||
mock.patch.object(self.expirer, 'delete_actual_object',
|
||||
lambda o, t: None), \
|
||||
lambda o, t, b: None), \
|
||||
mock.patch.object(self.expirer, 'pop_queue',
|
||||
lambda a, c, o: None):
|
||||
self.expirer.run_once()
|
||||
self.assertEqual(self.expirer.report_objects, 10)
|
||||
|
||||
def test_delete_actual_object_does_not_get_unicode(self):
|
||||
got_unicode = [False]
|
||||
def test_delete_actual_object_gets_native_string(self):
|
||||
got_str = [False]
|
||||
|
||||
def delete_actual_object_test_for_unicode(actual_obj, timestamp):
|
||||
if isinstance(actual_obj, six.text_type):
|
||||
got_unicode[0] = True
|
||||
def delete_actual_object_test_for_string(actual_obj, timestamp,
|
||||
is_async_delete):
|
||||
if isinstance(actual_obj, str):
|
||||
got_str[0] = True
|
||||
|
||||
self.assertEqual(self.expirer.report_objects, 0)
|
||||
|
||||
with mock.patch.object(self.expirer, 'delete_actual_object',
|
||||
delete_actual_object_test_for_unicode), \
|
||||
delete_actual_object_test_for_string), \
|
||||
mock.patch.object(self.expirer, 'pop_queue',
|
||||
lambda a, c, o: None):
|
||||
self.expirer.run_once()
|
||||
|
||||
self.assertEqual(self.expirer.report_objects, 10)
|
||||
self.assertFalse(got_unicode[0])
|
||||
self.assertTrue(got_str[0])
|
||||
|
||||
def test_failed_delete_continues_on(self):
|
||||
def fail_delete_container(*a, **kw):
|
||||
raise Exception('failed to delete container')
|
||||
|
||||
def fail_delete_actual_object(actual_obj, timestamp):
|
||||
def fail_delete_actual_object(actual_obj, timestamp, is_async_delete):
|
||||
raise Exception('failed to delete actual object')
|
||||
|
||||
with mock.patch.object(self.fake_swift, 'delete_container',
|
||||
|
@ -713,19 +774,12 @@ class TestObjectExpirer(TestCase):
|
|||
interval = 1234
|
||||
x = expirer.ObjectExpirer({'__file__': 'unit_test',
|
||||
'interval': interval})
|
||||
orig_random = expirer.random
|
||||
orig_sleep = expirer.sleep
|
||||
try:
|
||||
expirer.random = not_random
|
||||
expirer.sleep = not_sleep
|
||||
with mock.patch.object(expirer, 'random', not_random), \
|
||||
mock.patch.object(expirer, 'sleep', not_sleep), \
|
||||
self.assertRaises(SystemExit) as caught:
|
||||
x.run_once = raise_system_exit
|
||||
x.run_forever()
|
||||
except SystemExit as err:
|
||||
pass
|
||||
finally:
|
||||
expirer.random = orig_random
|
||||
expirer.sleep = orig_sleep
|
||||
self.assertEqual(str(err), 'test_run_forever')
|
||||
self.assertEqual(str(caught.exception), 'test_run_forever')
|
||||
self.assertEqual(last_not_sleep, 0.5 * interval)
|
||||
|
||||
def test_run_forever_catches_usual_exceptions(self):
|
||||
|
@ -765,10 +819,30 @@ class TestObjectExpirer(TestCase):
|
|||
|
||||
x = expirer.ObjectExpirer({})
|
||||
ts = Timestamp('1234')
|
||||
x.delete_actual_object('/path/to/object', ts)
|
||||
x.delete_actual_object('/path/to/object', ts, False)
|
||||
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
|
||||
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
|
||||
got_env[0]['HTTP_X_IF_DELETE_AT'])
|
||||
self.assertEqual(
|
||||
got_env[0]['HTTP_X_BACKEND_CLEAN_EXPIRING_OBJECT_QUEUE'], 'no')
|
||||
|
||||
def test_delete_actual_object_bulk(self):
|
||||
got_env = [None]
|
||||
|
||||
def fake_app(env, start_response):
|
||||
got_env[0] = env
|
||||
start_response('204 No Content', [('Content-Length', '0')])
|
||||
return []
|
||||
|
||||
internal_client.loadapp = lambda *a, **kw: fake_app
|
||||
|
||||
x = expirer.ObjectExpirer({})
|
||||
ts = Timestamp('1234')
|
||||
x.delete_actual_object('/path/to/object', ts, True)
|
||||
self.assertNotIn('HTTP_X_IF_DELETE_AT', got_env[0])
|
||||
self.assertNotIn('HTTP_X_BACKEND_CLEAN_EXPIRING_OBJECT_QUEUE',
|
||||
got_env[0])
|
||||
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'], ts.internal)
|
||||
|
||||
def test_delete_actual_object_nourlquoting(self):
|
||||
# delete_actual_object should not do its own url quoting because
|
||||
|
@ -784,12 +858,41 @@ class TestObjectExpirer(TestCase):
|
|||
|
||||
x = expirer.ObjectExpirer({})
|
||||
ts = Timestamp('1234')
|
||||
x.delete_actual_object('/path/to/object name', ts)
|
||||
x.delete_actual_object('/path/to/object name', ts, False)
|
||||
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
|
||||
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
|
||||
got_env[0]['HTTP_X_IF_DELETE_AT'])
|
||||
self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name')
|
||||
|
||||
def test_delete_actual_object_async_returns_expected_error(self):
|
||||
def do_test(test_status, should_raise):
|
||||
calls = [0]
|
||||
|
||||
def fake_app(env, start_response):
|
||||
calls[0] += 1
|
||||
calls.append(env['PATH_INFO'])
|
||||
start_response(test_status, [('Content-Length', '0')])
|
||||
return []
|
||||
|
||||
internal_client.loadapp = lambda *a, **kw: fake_app
|
||||
|
||||
x = expirer.ObjectExpirer({})
|
||||
ts = Timestamp('1234')
|
||||
if should_raise:
|
||||
with self.assertRaises(internal_client.UnexpectedResponse):
|
||||
x.delete_actual_object('/path/to/object', ts, True)
|
||||
else:
|
||||
x.delete_actual_object('/path/to/object', ts, True)
|
||||
self.assertEqual(calls[0], 1, calls)
|
||||
|
||||
# object was deleted and tombstone reaped
|
||||
do_test('404 Not Found', False)
|
||||
# object was overwritten *after* the original delete, or
|
||||
# object was deleted but tombstone still exists, or ...
|
||||
do_test('409 Conflict', False)
|
||||
# Anything else, raise
|
||||
do_test('400 Bad Request', True)
|
||||
|
||||
def test_delete_actual_object_returns_expected_error(self):
|
||||
def do_test(test_status, should_raise):
|
||||
calls = [0]
|
||||
|
@ -805,9 +908,9 @@ class TestObjectExpirer(TestCase):
|
|||
ts = Timestamp('1234')
|
||||
if should_raise:
|
||||
with self.assertRaises(internal_client.UnexpectedResponse):
|
||||
x.delete_actual_object('/path/to/object', ts)
|
||||
x.delete_actual_object('/path/to/object', ts, False)
|
||||
else:
|
||||
x.delete_actual_object('/path/to/object', ts)
|
||||
x.delete_actual_object('/path/to/object', ts, False)
|
||||
self.assertEqual(calls[0], 1)
|
||||
|
||||
# object was deleted and tombstone reaped
|
||||
|
@ -832,7 +935,7 @@ class TestObjectExpirer(TestCase):
|
|||
x = expirer.ObjectExpirer({})
|
||||
exc = None
|
||||
try:
|
||||
x.delete_actual_object('/path/to/object', Timestamp('1234'))
|
||||
x.delete_actual_object('/path/to/object', Timestamp('1234'), False)
|
||||
except Exception as err:
|
||||
exc = err
|
||||
finally:
|
||||
|
@ -845,7 +948,7 @@ class TestObjectExpirer(TestCase):
|
|||
x = expirer.ObjectExpirer({})
|
||||
x.swift.make_request = mock.Mock()
|
||||
x.swift.make_request.return_value.status_int = 204
|
||||
x.delete_actual_object(name, timestamp)
|
||||
x.delete_actual_object(name, timestamp, False)
|
||||
self.assertEqual(x.swift.make_request.call_count, 1)
|
||||
self.assertEqual(x.swift.make_request.call_args[0][1],
|
||||
'/v1/' + urllib.parse.quote(name))
|
||||
|
@ -855,7 +958,7 @@ class TestObjectExpirer(TestCase):
|
|||
timestamp = Timestamp('1515544858.80602')
|
||||
x = expirer.ObjectExpirer({})
|
||||
x.swift.make_request = mock.MagicMock()
|
||||
x.delete_actual_object(name, timestamp)
|
||||
x.delete_actual_object(name, timestamp, False)
|
||||
self.assertEqual(x.swift.make_request.call_count, 1)
|
||||
header = 'X-Backend-Clean-Expiring-Object-Queue'
|
||||
self.assertEqual(
|
||||
|
@ -872,7 +975,8 @@ class TestObjectExpirer(TestCase):
|
|||
with mocked_http_conn(
|
||||
200, 200, 200, give_connect=capture_requests) as fake_conn:
|
||||
x.pop_queue('a', 'c', 'o')
|
||||
self.assertRaises(StopIteration, fake_conn.code_iter.next)
|
||||
with self.assertRaises(StopIteration):
|
||||
next(fake_conn.code_iter)
|
||||
for method, path in requests:
|
||||
self.assertEqual(method, 'DELETE')
|
||||
device, part, account, container, obj = utils.split_path(
|
||||
|
@ -881,6 +985,15 @@ class TestObjectExpirer(TestCase):
|
|||
self.assertEqual(container, 'c')
|
||||
self.assertEqual(obj, 'o')
|
||||
|
||||
def test_build_task_obj_round_trip(self):
|
||||
ts = next(self.ts)
|
||||
a = 'a1'
|
||||
c = 'c2'
|
||||
o = 'obj1'
|
||||
args = (ts, a, c, o)
|
||||
self.assertEqual(args, expirer.parse_task_obj(
|
||||
expirer.build_task_obj(ts, a, c, o)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -6812,6 +6812,34 @@ class TestObjectController(unittest.TestCase):
|
|||
tpool.execute = was_tpool_exe
|
||||
diskfile.DiskFileManager._get_hashes = was_get_hashes
|
||||
|
||||
def test_REPLICATE_pickle_protocol(self):
|
||||
|
||||
def fake_get_hashes(*args, **kwargs):
|
||||
return 0, {1: 2}
|
||||
|
||||
def my_tpool_execute(func, *args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
was_get_hashes = diskfile.DiskFileManager._get_hashes
|
||||
was_tpool_exe = tpool.execute
|
||||
try:
|
||||
diskfile.DiskFileManager._get_hashes = fake_get_hashes
|
||||
tpool.execute = my_tpool_execute
|
||||
req = Request.blank('/sda1/p/suff',
|
||||
environ={'REQUEST_METHOD': 'REPLICATE'},
|
||||
headers={})
|
||||
with mock.patch('swift.obj.server.pickle.dumps') as fake_pickle:
|
||||
fake_pickle.return_value = b''
|
||||
req.get_response(self.object_controller)
|
||||
# This is the key assertion: starting in Python 3.0, the
|
||||
# default protocol version is 3, but such pickles can't be read
|
||||
# on Python 2. As long as we may need to talk to a Python 2
|
||||
# process, we need to cap our protocol version.
|
||||
fake_pickle.assert_called_once_with({1: 2}, protocol=2)
|
||||
finally:
|
||||
tpool.execute = was_tpool_exe
|
||||
diskfile.DiskFileManager._get_hashes = was_get_hashes
|
||||
|
||||
def test_REPLICATE_timeout(self):
|
||||
|
||||
def fake_get_hashes(*args, **kwargs):
|
||||
|
@ -7493,6 +7521,8 @@ class TestObjectServer(unittest.TestCase):
|
|||
'devices': self.devices,
|
||||
'swift_dir': self.tempdir,
|
||||
'mount_check': 'false',
|
||||
# hopefully 1s is long enough to improve gate reliability?
|
||||
'client_timeout': 1,
|
||||
}
|
||||
self.logger = debug_logger('test-object-server')
|
||||
self.app = object_server.ObjectController(
|
||||
|
@ -8156,14 +8186,24 @@ class TestObjectServer(unittest.TestCase):
|
|||
conn.sock.fd._sock.close()
|
||||
else:
|
||||
conn.sock.fd._real_close()
|
||||
# We've seen a bunch of failures here -- try waiting some non-zero
|
||||
# amount of time.
|
||||
sleep(0.01)
|
||||
|
||||
# and make sure it demonstrates the client disconnect
|
||||
log_lines = self.logger.get_lines_for_level('info')
|
||||
self.assertEqual(len(log_lines), 1)
|
||||
self.assertIn(' 499 ', log_lines[0])
|
||||
# the object server needs to recognize the socket is closed
|
||||
# or at least timeout, we'll have to wait
|
||||
timeout = time() + (self.conf['client_timeout'] + 1)
|
||||
while True:
|
||||
try:
|
||||
# and make sure it demonstrates the client disconnect
|
||||
log_lines = self.logger.get_lines_for_level('info')
|
||||
self.assertEqual(len(log_lines), 1)
|
||||
except AssertionError:
|
||||
if time() < timeout:
|
||||
sleep(0.01)
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
break
|
||||
status = log_lines[0].split()[7]
|
||||
self.assertEqual(status, '499')
|
||||
|
||||
# verify successful object data and durable state file write
|
||||
put_timestamp = context['put_timestamp']
|
||||
|
|
|
@ -99,7 +99,7 @@ class PatchedObjControllerApp(proxy_server.Application):
|
|||
|
||||
def _fake_get_container_info(env, app, swift_source=None):
|
||||
_vrs, account, container, _junk = utils.split_path(
|
||||
env['PATH_INFO'], 3, 4)
|
||||
swob.wsgi_to_str(env['PATH_INFO']), 3, 4)
|
||||
|
||||
# Seed the cache with our container info so that the real
|
||||
# get_container_info finds it.
|
||||
|
|
|
@ -321,7 +321,7 @@ class TestController(unittest.TestCase):
|
|||
self.controller.account_info(self.account, self.request)
|
||||
set_http_connect(201, raise_timeout_exc=True)
|
||||
self.controller._make_request(
|
||||
nodes, partition, 'POST', '/', '', '',
|
||||
nodes, partition, 'POST', '/', '', '', None,
|
||||
self.controller.app.logger.thread_locals)
|
||||
|
||||
# tests if 200 is cached and used
|
||||
|
@ -668,6 +668,35 @@ class TestProxyServer(unittest.TestCase):
|
|||
Request.blank('/v1/a', environ={'REQUEST_METHOD': '!invalid'}))
|
||||
self.assertEqual(resp.status, '405 Method Not Allowed')
|
||||
|
||||
def test_private_method_request(self):
|
||||
baseapp = proxy_server.Application({},
|
||||
FakeMemcache(),
|
||||
container_ring=FakeRing(),
|
||||
account_ring=FakeRing())
|
||||
baseapp.logger = debug_logger()
|
||||
resp = baseapp.handle_request(
|
||||
Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'UPDATE'}))
|
||||
self.assertEqual(resp.status, '405 Method Not Allowed')
|
||||
# Note that UPDATE definitely *isn't* advertised
|
||||
self.assertEqual(sorted(resp.headers['Allow'].split(', ')), [
|
||||
'DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT'])
|
||||
|
||||
# But with appropriate (internal-only) overrides, you can still use it
|
||||
resp = baseapp.handle_request(
|
||||
Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'UPDATE'},
|
||||
headers={'X-Backend-Allow-Private-Methods': 'True',
|
||||
'X-Backend-Storage-Policy-Index': '0'}))
|
||||
# Now we actually make the requests, but there aren't any nodes
|
||||
self.assertEqual(resp.status, '503 Service Unavailable')
|
||||
|
||||
# Bad method with overrides advertises private methods
|
||||
resp = baseapp.handle_request(
|
||||
Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'BOGUS'},
|
||||
headers={'X-Backend-Allow-Private-Methods': '1'}))
|
||||
self.assertEqual(resp.status, '405 Method Not Allowed')
|
||||
self.assertEqual(sorted(resp.headers['Allow'].split(', ')), [
|
||||
'DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT', 'UPDATE'])
|
||||
|
||||
def test_calls_authorize_allow(self):
|
||||
called = [False]
|
||||
|
||||
|
|
16
tox.ini
16
tox.ini
|
@ -13,7 +13,7 @@ deps =
|
|||
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt}
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands = find . ( -type f -o -type l ) -name "*.py[c|o]" -delete
|
||||
commands = find . ( -type f -o -type l ) -name "*.py[co]" -delete
|
||||
find . -type d -name "__pycache__" -delete
|
||||
nosetests {posargs:test/unit}
|
||||
whitelist_externals = find
|
||||
|
@ -44,6 +44,7 @@ commands =
|
|||
test/unit/common/middleware/s3api/ \
|
||||
test/unit/common/middleware/test_account_quotas.py \
|
||||
test/unit/common/middleware/test_acl.py \
|
||||
test/unit/common/middleware/test_bulk.py \
|
||||
test/unit/common/middleware/test_catch_errors.py \
|
||||
test/unit/common/middleware/test_cname_lookup.py \
|
||||
test/unit/common/middleware/test_container_sync.py \
|
||||
|
@ -64,8 +65,10 @@ commands =
|
|||
test/unit/common/middleware/test_ratelimit.py \
|
||||
test/unit/common/middleware/test_read_only.py \
|
||||
test/unit/common/middleware/test_recon.py \
|
||||
test/unit/common/middleware/test_slo.py \
|
||||
test/unit/common/middleware/test_subrequest_logging.py \
|
||||
test/unit/common/middleware/test_staticweb.py \
|
||||
test/unit/common/middleware/test_symlink.py \
|
||||
test/unit/common/middleware/test_tempauth.py \
|
||||
test/unit/common/middleware/test_versioned_writes.py \
|
||||
test/unit/common/middleware/test_xprofile.py \
|
||||
|
@ -92,6 +95,8 @@ commands =
|
|||
test/unit/common/test_wsgi.py \
|
||||
test/unit/container \
|
||||
test/unit/obj/test_auditor.py \
|
||||
test/unit/obj/test_diskfile.py \
|
||||
test/unit/obj/test_expirer.py \
|
||||
test/unit/obj/test_replicator.py \
|
||||
test/unit/obj/test_server.py \
|
||||
test/unit/obj/test_updater.py \
|
||||
|
@ -117,6 +122,15 @@ commands = {[testenv:pep8]commands}
|
|||
basepython = python2.7
|
||||
commands = ./.functests {posargs}
|
||||
|
||||
[testenv:func-py3]
|
||||
basepython = python3
|
||||
# Need to pick up (unreleased as of 2019-03) commit:
|
||||
# https://github.com/eventlet/eventlet/commit/f0bc79e
|
||||
commands =
|
||||
pip install -U eventlet@git+https://github.com/eventlet/eventlet.git
|
||||
nosetests {posargs: \
|
||||
test/functional/tests.py}
|
||||
|
||||
[testenv:func-encryption]
|
||||
commands = ./.functests {posargs}
|
||||
setenv = SWIFT_TEST_IN_PROCESS=1
|
||||
|
|
Loading…
Reference in New Issue