Merge remote-tracking branch 'origin/master' into feature/losf

Change-Id: I5eb60d51b880a8e8f0578f870f6f237b61a29857
This commit is contained in:
Tim Burke 2019-10-04 21:03:48 -07:00
commit bfa8e9feb5
94 changed files with 2413 additions and 1516 deletions

1
.gitignore vendored
View File

@ -22,3 +22,4 @@ subunit.log
test/probe/.noseids
RELEASENOTES.rst
releasenotes/notes/reno.cache
/tools/playbooks/**/*.retry

View File

@ -175,7 +175,6 @@
Uses tox with the ``func-ec`` environment.
It sets TMPDIR to an XFS mount point created via
tools/test-setup.sh.
branches: ^(?!stable/ocata).*$
vars:
tox_envlist: func-ec
@ -267,8 +266,10 @@
parent: swift-dsvm-functional
description: |
Setup a Swift/Keystone environment under py3 and run Swift's func tests
(but under py2).
(also under py3).
vars:
# This tox env get run twice; once for Keystone and once for tempauth
tox_envlist: func-py3
devstack_localrc:
USE_PYTHON3: true
# explicitly clear swift's default-disabled status
@ -292,6 +293,7 @@
pre-run:
- tools/playbooks/common/install_dependencies.yaml
- tools/playbooks/saio_single_node_setup/setup_saio.yaml
- tools/playbooks/saio_single_node_setup/add_s3api.yaml
- tools/playbooks/saio_single_node_setup/make_rings.yaml
run: tools/playbooks/ceph-s3tests/run.yaml
post-run:
@ -381,6 +383,12 @@
vars:
previous_swift_version: origin/stable/rocky
- job:
name: swift-multinode-rolling-upgrade-stein
parent: swift-multinode-rolling-upgrade
vars:
previous_swift_version: origin/stable/stein
- job:
name: swift-tox-lower-constraints
parent: openstack-tox-lower-constraints
@ -574,6 +582,11 @@
- ^(api-ref|doc|releasenotes)/.*$
- ^test/.*$
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- tempest-ipv6-only:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/.*$
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- grenade-py3:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
@ -635,6 +648,11 @@
- ^(api-ref|doc|releasenotes)/.*$
- ^test/.*$
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- tempest-ipv6-only:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
- ^test/.*$
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG)$
- grenade-py3:
irrelevant-files:
- ^(api-ref|doc|releasenotes)/.*$
@ -648,6 +666,7 @@
- swift-tox-func-ec-centos-7
- swift-multinode-rolling-upgrade-queens
- swift-multinode-rolling-upgrade-rocky
- swift-multinode-rolling-upgrade-stein
post:
jobs:

View File

@ -58,6 +58,7 @@ Anh Tran (anhtt@vn.fujitsu.com)
Ankur Gupta (ankur.gupta@intel.com)
Anne Gentle (anne@openstack.org)
Arnaud JOST (arnaud.jost@ovh.net)
arzhna (arzhna@gmail.com)
Atsushi Sakai (sakaia@jp.fujitsu.com)
Azhagu Selvan SP (tamizhgeek@gmail.com)
baiwenteng (baiwenteng@inspur.com)
@ -107,6 +108,7 @@ Constantine Peresypkin (constantine.peresypk@rackspace.com)
Corey Bryant (corey.bryant@canonical.com)
Cory Wright (cory.wright@rackspace.com)
Cristian A Sanchez (cristian.a.sanchez@intel.com)
CY Chiang (cychiang@cht.com.tw)
Cyril Roelandt (cyril@redhat.com)
Dae S. Kim (dae@velatum.com)
Daisuke Morita (morita.daisuke@ntti3.com)
@ -173,6 +175,7 @@ gecong1973 (ge.cong@zte.com.cn)
gengchc2 (geng.changcai2@zte.com.cn)
Gerard Gine (ggine@swiftstack.com)
Gerry Drudy (gerry.drudy@hpe.com)
Ghanshyam Mann (gmann@ghanshyammann.com)
Gil Vernik (gilv@il.ibm.com)
Gilles Biannic (gilles.biannic@corp.ovh.com)
Gleb Samsonov (sams-gleb@yandex.ru)
@ -307,6 +310,7 @@ Ngo Quoc Cuong (cuongnq@vn.fujitsu.com)
Nguyen Hai (nguyentrihai93@gmail.com)
Nguyen Hung Phuong (phuongnh@vn.fujitsu.com)
Nguyen Phuong An (AnNP@vn.fujitsu.com)
Nguyen Quoc Viet (nguyenqviet98@gmail.com)
Nicolas Helgeson (nh202b@att.com)
Nicolas Trangez (ikke@nicolast.be)
Ning Zhang (ning@zmanda.com)
@ -323,6 +327,7 @@ Paul McMillan (paul.mcmillan@nebula.com)
Pavel Kvasnička (pavel.kvasnicka@firma.seznam.cz)
Pawel Palucki (pawel.palucki@gmail.com)
Pearl Yajing Tan (pearl.y.tan@seagate.com)
pengyuesheng (pengyuesheng@gohighsec.com)
Pete Zaitcev (zaitcev@kotori.zaitcev.us)
Peter Lisák (peter.lisak@gmail.com)
Peter Portante (peter.portante@redhat.com)

View File

@ -1,3 +1,61 @@
swift (2.23.0, OpenStack Train)
* Python 3.6 and 3.7 are now fully supported. Several py3-related
fixes are included:
* Removed a request-smuggling vector when running a mixed
py2/py3 cluster.
* Allow fallocate_reserve to be specified as a percentage.
* Fixed listings for sharded containers.
* Fixed non-ASCII account metadata handling.
* Fixed rsync output parsing.
* Fixed some title-casing of headers.
If you've been testing Swift on Python 3, upgrade at your earliest
convenience.
* Added "static symlinks", which perform some validation as they
follow redirects and include more information about their target
in container listings.
* Multi-character strings may now be used as delimiters in account
and container listings.
* Sharding improvements
* Container metadata related to sharding are now removed when no
longer needed.
* Empty container databases (such as might be created on handoffs)
now shard much more quickly.
* The proxy-server now ignores 404 responses from handoffs that have
no data when deciding on the correct response for object requests,
similar to what it already does for account and container requests.
* Static Large Object sizes in listings for versioned containers are
now more accurate.
* When refetching Static Large Object manifests, non-manifest responses
are now handled better.
* S3 API now translates 503 Service Unavailable responses to a more
S3-like response instead of raising an error.
* Improved proxy-to-backend requests to be more RFC-compliant.
* Dependency update: eventlet must be at least 0.25.0. This also
dragged forward minimum-supported versions of dnspython (1.15.0),
greenlet (0.3.2), and six (1.10.0).
* Various other minor bug fixes and improvements.
swift (2.22.0)
* Experimental support for Python 3.6 and 3.7 is now available.
@ -73,7 +131,8 @@ swift (2.22.0)
* Various other minor bug fixes and improvements.
swift (2.21.0, OpenStack Stein release)
swift (2.21.0, OpenStack Stein)
* Change the behavior of the EC reconstructor to perform a
fragment rebuild to a handoff node when a primary peer responds
@ -131,6 +190,7 @@ swift (2.21.0, OpenStack Stein release)
* Various other minor bug fixes and improvements.
swift (2.20.0)
* S3 API compatibility updates
@ -237,6 +297,7 @@ swift (2.20.0)
* Various other minor bug fixes and improvements.
swift (2.19.1, rocky stable backports)
* Prevent PyKMIP's kmip_protocol logger from logging at DEBUG.
@ -251,6 +312,7 @@ swift (2.19.1, rocky stable backports)
* Fixed a bug in how Swift uses eventlet that was exposed under high
concurrency.
swift (2.19.0, OpenStack Rocky)
* TempURLs now support IP range restrictions. Please see
@ -341,6 +403,7 @@ swift (2.19.0, OpenStack Rocky)
* Various other minor bug fixes and improvements.
swift (2.18.0)
* Added container sharding, an operator controlled feature that
@ -414,6 +477,7 @@ swift (2.18.0)
* Various other minor bug fixes and improvements.
swift (2.17.1, queens stable backports)
* Fix SLO delete for accounts with non-ASCII names.
@ -424,6 +488,7 @@ swift (2.17.1, queens stable backports)
* Fixed a bug in how Swift uses eventlet that was exposed under high
concurrency.
swift (2.17.0, OpenStack Queens)
* Added symlink objects support.
@ -616,6 +681,7 @@ swift (2.16.0)
* Various other minor bug fixes and improvements.
swift (2.15.2, pike stable backports)
* Fixed a cache invalidation issue related to GET and PUT requests to
@ -636,6 +702,7 @@ swift (2.15.2, pike stable backports)
* Send ETag header in 206 Partial Content responses to SLO reads.
swift (2.15.1, OpenStack Pike)
* Fixed a bug introduced in 2.15.0 where the object reconstructor
@ -3062,6 +3129,7 @@ swift (1.4.0)
* Stats uploaders now allow overrides for source_filename_pattern and
new_log_cutoff values.
----
Changelog entries for previous versions are incomplete

View File

@ -23,6 +23,7 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
from swift import __version__
import subprocess
@ -154,20 +155,22 @@ pygments_style = 'sphinx'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
try:
html_last_updated_fmt = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE).communicate()[0]
except OSError:
warnings.warn('Cannot get last updated time from git repository. '
'Not setting "html_last_updated_fmt".')
if 'SOURCE_DATE_EPOCH' in os.environ:
now = float(os.environ.get('SOURCE_DATE_EPOCH'))
html_last_updated_fmt = datetime.datetime.utcfromtimestamp(now).isoformat()
else:
if not isinstance(html_last_updated_fmt, str):
# for py3
html_last_updated_fmt = html_last_updated_fmt.decode('ascii')
git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
"-n1"]
try:
html_last_updated_fmt = subprocess.Popen(
git_cmd, stdout=subprocess.PIPE).communicate()[0]
except OSError:
warnings.warn('Cannot get last updated time from git repository. '
'Not setting "html_last_updated_fmt".')
else:
if not isinstance(html_last_updated_fmt, str):
# for py3
html_last_updated_fmt = html_last_updated_fmt.decode('ascii')
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True

View File

@ -16,9 +16,7 @@
# B106 : hardcoded_password_funcarg
# B107 : hardcoded_password_default
# B108 : hardcoded_tmp_directory
# B109 : password_config_option_not_marked_secret
# B110 : try_except_pass
# B111 : execute_with_run_as_root_equals_true
# B112 : try_except_continue
# B201 : flask_debug_true
# B301 : pickle
@ -42,6 +40,9 @@
# B319 : xml_bad_pulldom
# B320 : xml_bad_etree
# B321 : ftplib
# B322 : input
# B323 : unverified_context
# B325 : tempnam
# B401 : import_telnetlib
# B402 : import_ftplib
# B403 : import_pickle
@ -54,12 +55,15 @@
# B410 : import_lxml
# B411 : import_xmlrpclib
# B412 : import_httpoxy
# B413 : import_pycrypto
# B414 : import_pycryptodome
# B501 : request_with_no_cert_validation
# B502 : ssl_with_bad_version
# B503 : ssl_with_bad_defaults
# B504 : ssl_with_no_version
# B505 : weak_cryptographic_key
# B506 : yaml_load
# B507 : ssh_no_host_key_verification
# B601 : paramiko_calls
# B602 : subprocess_popen_with_shell_equals_true
# B603 : subprocess_without_shell_equals_true
@ -69,8 +73,11 @@
# B607 : start_process_with_partial_path
# B608 : hardcoded_sql_expressions
# B609 : linux_commands_wildcard_injection
# B610 : django_extra_used
# B611 : django_rawsql_used
# B701 : jinja2_autoescape_false
# B702 : use_of_mako_templates
# B703 : django_mark_safe
# (optional) list included test IDs here, eg '[B101, B406]':
tests: [B102, B103, B302, B306, B308, B309, B310, B401, B501, B502, B506, B601, B602, B609]

View File

@ -53,7 +53,7 @@ Example (sends SIGTERM to all orphaned Swift processes older than two hours):
for root, directories, files in os.walk(options.run_dir):
for name in files:
if name.endswith('.pid'):
if name.endswith(('.pid', '.pid.d')):
pids.append(open(os.path.join(root, name)).read().strip())
pids.extend(subprocess.Popen(
['ps', '--ppid', pids[-1], '-o', 'pid', '--no-headers'],

View File

@ -8,3 +8,4 @@ openstackdocstheme>=1.30.0 # Apache-2.0
reno>=1.8.0 # Apache-2.0
os-api-ref>=1.0.0 # Apache-2.0
python-keystoneclient!=2.1.0,>=2.0.0 # Apache-2.0
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD

View File

@ -55,7 +55,8 @@ extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'openstackdocstheme']
'openstackdocstheme',
'sphinxcontrib.rsvgconverter']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
@ -72,7 +73,12 @@ master_doc = 'index'
# General information about the project.
project = u'Swift'
copyright = u'%d, OpenStack Foundation' % datetime.datetime.now().year
if 'SOURCE_DATE_EPOCH' in os.environ:
now = float(os.environ.get('SOURCE_DATE_EPOCH'))
now = datetime.datetime.utcfromtimestamp(now)
else:
now = datetime.date.today()
copyright = u'%d, OpenStack Foundation' % now.year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@ -218,7 +224,7 @@ htmlhelp_basename = 'swiftdoc'
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Swift.tex', u'Swift Documentation',
('index', 'doc-swift.tex', u'Swift Documentation',
u'Swift Team', 'manual'),
]
@ -239,6 +245,8 @@ latex_documents = [
# If false, no module index is generated.
# latex_use_modindex = True
latex_use_xindy = False
# -- Options for openstackdocstheme -------------------------------------------
repository_name = 'openstack/swift'
bug_project = 'swift'

View File

@ -11,14 +11,12 @@ most Linux platforms.
Swift is written in Python and has these dependencies:
* Python 2.7
* Python (2.7, 3.6, or 3.7)
* rsync 3.0
* The Python packages listed in `the requirements file <https://github.com/openstack/swift/blob/master/requirements.txt>`_
* Testing additionally requires `the test dependencies <https://github.com/openstack/swift/blob/master/test-requirements.txt>`_
* Testing requires `these distribution packages <https://github.com/openstack/swift/blob/master/bindep.txt>`_
There is no current support for Python 3.
-----------
Development
-----------

View File

@ -33,7 +33,7 @@ be found on the `OpenStack wiki`_ and at http://docs.openstack.org.
.. toctree::
:maxdepth: 1
:maxdepth: 2
getting_started

View File

@ -75,17 +75,17 @@ of ``.rlistings``, an error will occur if used with
============================== ================================================
Element Description
============================== ================================================
``.r:*`` Any user has access to objects. No token is
.r:* Any user has access to objects. No token is
required in the request.
``.r:<referrer>`` The referrer is granted access to objects. The
.r:<referrer> The referrer is granted access to objects. The
referrer is identified by the ``Referer``
request header in the request. No token is
required.
``.r:-<referrer>`` This syntax (with "-" prepended to the
.r:-<referrer> This syntax (with "-" prepended to the
referrer) is supported. However, it does not
deny access if another element (e.g., ``.r:*``)
grants access.
``.rlistings`` Any user can perform a HEAD or GET operation
.rlistings Any user can perform a HEAD or GET operation
on the container provided the user also has
read access on objects (e.g., also has ``.r:*``
or ``.r:<referrer>``. No token is required.
@ -106,22 +106,22 @@ to take effect.
============================== ================================================
Element Description
============================== ================================================
``<project-id>:<user-id>`` The specified user, provided a token
<project-id>:<user-id> The specified user, provided a token
scoped to the project is included
in the request, is granted access.
Access to the container is also granted
when used in ``X-Container-Read``.
``<project-id>:*`` Any user with a role in the specified Keystone
<project-id>:\* Any user with a role in the specified Keystone
project has access. A token scoped to the
project must be included in the request.
Access to the container is also granted
when used in ``X-Container-Read``.
``*:<user-id>`` The specified user has access. A token
\*:<user-id> The specified user has access. A token
for the user (scoped to any
project) must be included in the request.
Access to the container is also granted
when used in ``X-Container-Read``.
``*:*`` Any user has access.
\*:\* Any user has access.
Access to the container is also granted
when used in ``X-Container-Read``.
The ``*:*`` element differs from the ``.r:*``
@ -131,7 +131,7 @@ Element Description
does not require a token. In addition,
``.r:*`` does not grant access to the
container listing.
``<role_name>`` A user with the specified role *name* on the
<role_name> A user with the specified role *name* on the
project within which the container is stored is
granted access. A user token scoped to the
project must be included in the request. Access
@ -142,7 +142,7 @@ Element Description
.. note::
Keystone project (tenant) or user *names* (i.e.,
``<project-name>:<user-name``) must no longer be
``<project-name>:<user-name>``) must no longer be
used because with the introduction
of domains in Keystone, names are not globally unique. You should
use user and project *ids* instead.
@ -167,7 +167,7 @@ the elements described in :ref:`acl_common_elements`.
============================== ================================================
Element Description
============================== ================================================
``<user-name>`` The named user is granted access. The
<user-name> The named user is granted access. The
wildcard ("*") character is not supported.
A token from the user must be included in the
request.
@ -407,4 +407,4 @@ admin These identities have "swift_owner" privileges. A user with
For more details, see :mod:`swift.common.middleware.tempauth`. For details
on the ACL format, see :mod:`swift.common.middleware.acl`.
on the ACL format, see :mod:`swift.common.middleware.acl`.

View File

@ -82,30 +82,33 @@ List of Devices
The list of devices is known internally to the Ring class as ``devs``. Each
item in the list of devices is a dictionary with the following keys:
====== ======= ==============================================================
id integer The index into the list of devices.
zone integer The zone in which the device resides.
region integer The region in which the zone resides.
weight float The relative weight of the device in comparison to other
devices. This usually corresponds directly to the amount of
disk space the device has compared to other devices. For
instance a device with 1 terabyte of space might have a weight
of 100.0 and another device with 2 terabytes of space might
have a weight of 200.0. This weight can also be used to bring
back into balance a device that has ended up with more or less
data than desired over time. A good average weight of 100.0
allows flexibility in lowering the weight later if necessary.
ip string The IP address or hostname of the server containing the device.
port int The TCP port on which the server process listens to serve
requests for the device.
device string The on-disk name of the device on the server.
For example: ``sdb1``
meta string A general-use field for storing additional information for the
device. This information isn't used directly by the server
processes, but can be useful in debugging. For example, the
date and time of installation and hardware manufacturer could
be stored here.
====== ======= ==============================================================
.. table::
:widths: 10 10 80
====== ======= ==============================================================
id integer The index into the list of devices.
zone integer The zone in which the device resides.
region integer The region in which the zone resides.
weight float The relative weight of the device in comparison to other
devices. This usually corresponds directly to the amount of
disk space the device has compared to other devices. For
instance a device with 1 terabyte of space might have a weight
of 100.0 and another device with 2 terabytes of space might
have a weight of 200.0. This weight can also be used to bring
back into balance a device that has ended up with more or less
data than desired over time. A good average weight of 100.0
allows flexibility in lowering the weight later if necessary.
ip string The IP address or hostname of the server containing the device.
port int The TCP port on which the server process listens to serve
requests for the device.
device string The on-disk name of the device on the server.
For example: ``sdb1``
meta string A general-use field for storing additional information for the
device. This information isn't used directly by the server
processes, but can be useful in debugging. For example, the
date and time of installation and hardware manufacturer could
be stored here.
====== ======= ==============================================================
.. note::
The list of devices may contain holes, or indexes set to ``None``, for

View File

@ -0,0 +1,74 @@
---
features:
- |
Python 3.6 and 3.7 are now fully supported. If you've been testing Swift
on Python 3, upgrade at your earliest convenience.
- |
Added "static symlinks", which perform some validation as they
follow redirects and include more information about their target
in container listings. For more information, see the `symlink middleware
<https://docs.openstack.org/swift/latest/middleware.html#symlink>`__
section of the documentation.
- |
Multi-character strings may now be used as delimiters in account
and container listings.
upgrade:
- |
**Dependency update**: ``eventlet`` must be at least 0.25.0. This also
dragged forward minimum-supported versions of ``dnspython`` (1.15.0),
``greenlet`` (0.3.2), and ``six`` (1.10.0).
fixes:
- |
Python 3 fixes:
* Removed a request-smuggling vector when running a mixed
py2/py3 cluster.
* Allow ``fallocate_reserve`` to be specified as a percentage.
* Fixed listings for sharded containers.
* Fixed non-ASCII account metadata handling.
* Fixed ``rsync`` output parsing.
* Fixed some title-casing of headers.
If you've been testing Swift on Python 3, upgrade at your earliest
convenience.
- |
Sharding improvements
* Container metadata related to sharding are now removed when no
longer needed.
* Empty container databases (such as might be created on handoffs)
now shard much more quickly.
- |
The ``proxy-server`` now ignores 404 responses from handoffs that have
no data when deciding on the correct response for object requests,
similar to what it already does for account and container requests.
- |
Static Large Object sizes in listings for versioned containers are
now more accurate.
- |
When refetching Static Large Object manifests, non-manifest responses
are now handled better.
- |
S3 API now translates ``503 Service Unavailable`` responses to a more
S3-like response instead of raising an error.
- |
Improved proxy-to-backend requests to be more RFC-compliant.
- |
Various other minor bug fixes and improvements.

View File

@ -7,6 +7,8 @@
current
train
stein
rocky

View File

@ -1,75 +0,0 @@
# Sungjin Kang <gang.sungjin@gmail.com>, 2017. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-08-25 00:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-02-07 03:09+0000\n"
"Last-Translator: Sungjin Kang <gang.sungjin@gmail.com>\n"
"Language-Team: Korean (South Korea)\n"
"Language: ko_KR\n"
"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=1; plural=0\n"
msgid "2.10.0"
msgstr "2.10.0"
msgid "2.10.1"
msgstr "2.10.1"
msgid "2.11.0"
msgstr "2.11.0"
msgid "2.12.0"
msgstr "2.12.0"
msgid "Bug Fixes"
msgstr "버그 수정"
msgid "Critical Issues"
msgstr "치명적인 이슈"
msgid "Current (Unreleased) Release Notes"
msgstr "현재 (릴리드전) 릴리즈 노트"
msgid "New Features"
msgstr "새로운 기능"
msgid "Newton Series Release Notes"
msgstr "Newton 시리즈 릴리즈 노트"
msgid "Other Notes"
msgstr "기타 기능"
msgid "Swift Release Notes"
msgstr "Swift 릴리즈 노트"
msgid ""
"Update dnspython dependency to 1.14, removing the need to have separate "
"dnspython dependencies for Py2 and Py3."
msgstr ""
"Dnspython 의존성을 1.14로 업그래이드 하여 Py2 와 Py3 에 대한 별도의 "
"dnspython 의존성을 제거할 필요가 없습니다."
msgid "Updated the PyECLib dependency to 1.3.1."
msgstr "PyECLib 의존성을 1.3.1 로 업그레이드 하였습니다."
msgid "Upgrade Notes"
msgstr "업그레이드 노트"
msgid "Various other minor bug fixes and improvements."
msgstr "다양한 다른 마이너 버그 수정 및 개선."
msgid ""
"WARNING: If you are using the ISA-L library for erasure codes, please "
"upgrade to liberasurecode 1.3.1 (or later) as soon as possible. If you are "
"using isa_l_rs_vand with more than 4 parity, please read https://bugs."
"launchpad.net/swift/+bug/1639691 and take necessary action."
msgstr ""
"경고: Erasure 코드에서 사용하는 ISA-L 라이브러리를 사용하는 경우, 최대한 빨"
"리 liberasurecode 1.3.1 (또는 그 이상) 으로 업그레이드하십시오. 4 parity 보"
"다 큰 isa_l_rs_vand 를 사용하는 경우, https://bugs.launchpad.net/swift/"
"+bug/1639691 을 읽고 필요한 조치를 취하십시오."

View File

@ -0,0 +1,6 @@
==========================
Train Series Release Notes
==========================
.. release-notes::
:branch: stable/train

View File

@ -457,12 +457,16 @@ class AccountBroker(DatabaseBroker):
end = name.find(delimiter, len(prefix))
if end > 0:
if reverse:
end_marker = name[:end + 1]
end_marker = name[:end + len(delimiter)]
else:
marker = name[:end] + chr(ord(delimiter) + 1)
marker = ''.join([
name[:end],
delimiter[:-1],
chr(ord(delimiter[-1:]) + 1),
])
# we want result to be inclusive of delim+1
delim_force_gte = True
dir_name = name[:end + 1]
dir_name = name[:end + len(delimiter)]
if dir_name != orig_marker:
results.append([dir_name, 0, 0, '0', 1])
curs.close()

View File

@ -207,9 +207,6 @@ class AccountController(BaseStorageServer):
drive, part, account = split_and_validate_path(req, 3)
prefix = get_param(req, 'prefix')
delimiter = get_param(req, 'delimiter')
if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
# delimiters can be made more flexible later
return HTTPPreconditionFailed(body='Bad delimiter')
limit = constraints.ACCOUNT_LISTING_LIMIT
given_limit = get_param(req, 'limit')
reverse = config_true_value(get_param(req, 'reverse'))

View File

@ -1167,7 +1167,7 @@ swift-ring-builder <ring_file> write_builder [min_part_hours]
'parts': ring.partition_count,
'devs': ring.devs,
'devs_changed': False,
'version': 0,
'version': ring.version or 0,
'_replica2part2dev': ring._replica2part2dev_id,
'_last_part_moves_epoch': None,
'_last_part_moves': None,

View File

@ -34,7 +34,7 @@ import socket
import eventlet
from eventlet.green.httplib import CONTINUE, HTTPConnection, HTTPMessage, \
HTTPResponse, HTTPSConnection, _UNKNOWN
from six.moves.urllib.parse import quote
from six.moves.urllib.parse import quote, parse_qsl, urlencode
import six
if six.PY2:
@ -292,6 +292,15 @@ def http_connect_raw(ipaddr, port, method, path, headers=None,
else:
conn = BufferedHTTPConnection('%s:%s' % (ipaddr, port))
if query_string:
# Round trip to ensure proper quoting
if six.PY2:
query_string = urlencode(parse_qsl(
query_string, keep_blank_values=True))
else:
query_string = urlencode(
parse_qsl(query_string, keep_blank_values=True,
encoding='latin1'),
encoding='latin1')
path += '?' + query_string
conn.path = path
conn.putrequest(method, path, skip_host=(headers and 'Host' in headers))

View File

@ -22,6 +22,7 @@ import signal
import time
import subprocess
import re
import six
from swift import gettext_ as _
import tempfile
@ -721,6 +722,8 @@ class Server(object):
else:
output = proc.stdout.read()
proc.stdout.close()
if not six.PY2:
output = output.decode('utf8', 'backslashreplace')
if kwargs.get('once', False):
# if you don't want once to wait you can send it to the

View File

@ -543,10 +543,15 @@ class UploadController(Controller):
# Iterate over the segment objects and delete them individually
objects = json.loads(resp.body)
for o in objects:
container = req.container_name + MULTIUPLOAD_SUFFIX
obj = bytes_to_wsgi(o['name'].encode('utf-8'))
req.get_response(self.app, container=container, obj=obj)
while objects:
for o in objects:
container = req.container_name + MULTIUPLOAD_SUFFIX
obj = bytes_to_wsgi(o['name'].encode('utf-8'))
req.get_response(self.app, container=container, obj=obj)
query['marker'] = objects[-1]['name']
resp = req.get_response(self.app, 'GET', container, '',
query=query)
objects = json.loads(resp.body)
return HTTPNoContent()

View File

@ -19,12 +19,26 @@ To enable this middleware to your configuration, add the s3api middleware
in front of the auth middleware. See ``proxy-server.conf-sample`` for more
detail and configurable options.
To set up your client, the access key will be the concatenation of the
account and user strings that should look like test:tester, and the
secret access key is the account password. The host should also point
to the swift storage hostname.
To set up your client, ensure you are using the tempauth or keystone auth
system for swift project.
When your swift on a SAIO environment, make sure you have setting the tempauth
middleware configuration in ``proxy-server.conf``, and the access key will be
the concatenation of the account and user strings that should look like
test:tester, and the secret access key is the account password. The host should
also point to the swift storage hostname.
An example client using the python boto library is as follows::
The tempauth option example:
.. code-block:: ini
[filter:tempauth]
use = egg:swift#tempauth
user_admin_admin = admin .admin .reseller_admin
user_test_tester = testing
An example client using tempauth with the python boto library is as follows:
.. code-block:: python
from boto.s3.connection import S3Connection
connection = S3Connection(
@ -35,6 +49,39 @@ An example client using the python boto library is as follows::
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat())
And if you using keystone auth, you need the ec2 credentials, which can
be downloaded from the API Endpoints tab of the dashboard or by openstack
ec2 command.
Here is showing to create an EC2 credential:
.. code-block:: console
# openstack ec2 credentials create
+------------+---------------------------------------------------+
| Field | Value |
+------------+---------------------------------------------------+
| access | c2e30f2cd5204b69a39b3f1130ca8f61 |
| links | {u'self': u'http://controller:5000/v3/......'} |
| project_id | 407731a6c2d0425c86d1e7f12a900488 |
| secret | baab242d192a4cd6b68696863e07ed59 |
| trust_id | None |
| user_id | 00f0ee06afe74f81b410f3fe03d34fbc |
+------------+---------------------------------------------------+
An example client using keystone auth with the python boto library will be:
.. code-block:: python
from boto.s3.connection import S3Connection
connection = S3Connection(
aws_access_key_id='c2e30f2cd5204b69a39b3f1130ca8f61',
aws_secret_access_key='baab242d192a4cd6b68696863e07ed59',
port=8080,
host='127.0.0.1',
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat())
----------
Deployment
----------
@ -47,23 +94,35 @@ To enable all compatibility currently supported, you should make sure that
bulk, slo, and your auth middleware are also included in your proxy
pipeline setting.
Minimum example config is::
Using tempauth, the minimum example config is:
.. code-block:: ini
[pipeline:main]
pipeline = proxy-logging cache s3api tempauth bulk slo proxy-logging
proxy-server
pipeline = proxy-logging cache s3api tempauth bulk slo proxy-logging \
proxy-server
When using keystone, the config will be::
When using keystone, the config will be:
.. code-block:: ini
[pipeline:main]
pipeline = proxy-logging cache s3api s3token keystoneauth bulk slo
proxy-logging proxy-server
pipeline = proxy-logging cache authtoken s3api s3token keystoneauth bulk \
slo proxy-logging proxy-server
Finally, add the s3api middleware section:
.. code-block:: ini
[filter:s3api]
use = egg:swift#s3api
.. note::
``keystonemiddleware.authtoken`` can be located before/after s3api but
we recommend to put it before s3api because when authtoken is after s3api,
both authtoken and s3token will issue the acceptable token to keystone
(i.e. authenticate twice).
(i.e. authenticate twice). And in the ``keystonemiddleware.authtoken``
middleware , you should set ``delay_auth_decision`` option to ``True``.
-----------
Constraints

View File

@ -537,7 +537,6 @@ class S3Request(swob.Request):
'string_to_sign': self.string_to_sign,
'check_signature': self.check_signature,
}
self.token = None
self.account = None
self.user_id = None
self.slo_enabled = slo_enabled
@ -1136,8 +1135,6 @@ class S3Request(swob.Request):
if method is not None:
env['REQUEST_METHOD'] = method
env['HTTP_X_AUTH_TOKEN'] = self.token
if obj:
path = '/v1/%s/%s/%s' % (account, container, obj)
elif container:
@ -1329,7 +1326,7 @@ class S3Request(swob.Request):
except swob.HTTPException as err:
sw_resp = err
else:
# reuse account and tokens
# reuse account
_, self.account, _ = split_path(sw_resp.environ['PATH_INFO'],
2, 3, True)
@ -1339,10 +1336,11 @@ class S3Request(swob.Request):
if not self.user_id:
if 'HTTP_X_USER_NAME' in sw_resp.environ:
# keystone
self.user_id = \
utf8encode("%s:%s" %
(sw_resp.environ['HTTP_X_TENANT_NAME'],
sw_resp.environ['HTTP_X_USER_NAME']))
self.user_id = "%s:%s" % (
sw_resp.environ['HTTP_X_TENANT_NAME'],
sw_resp.environ['HTTP_X_USER_NAME'])
if six.PY2 and not isinstance(self.user_id, bytes):
self.user_id = self.user_id.encode('utf8')
else:
# tempauth
self.user_id = self.access_key
@ -1505,8 +1503,8 @@ class S3AclRequest(S3Request):
# keystone
self.user_id = "%s:%s" % (sw_resp.environ['HTTP_X_TENANT_NAME'],
sw_resp.environ['HTTP_X_USER_NAME'])
self.user_id = utf8encode(self.user_id)
self.token = sw_resp.environ.get('HTTP_X_AUTH_TOKEN')
if six.PY2 and not isinstance(self.user_id, bytes):
self.user_id = self.user_id.encode('utf8')
else:
# tempauth
self.user_id = self.access_key

View File

@ -111,10 +111,7 @@ def parse_v2_response(token):
'X-Project-Id': access_info['token']['tenant']['id'],
'X-Project-Name': access_info['token']['tenant']['name'],
}
return (
headers,
access_info['token'].get('id'),
access_info['token']['tenant'])
return headers, access_info['token']['tenant']
def parse_v3_response(token):
@ -134,7 +131,7 @@ def parse_v3_response(token):
'X-Project-Domain-Id': token['project']['domain']['id'],
'X-Project-Domain-Name': token['project']['domain']['name'],
}
return headers, None, token['project']
return headers, token['project']
class S3Token(object):
@ -308,7 +305,13 @@ class S3Token(object):
if memcache_client:
cached_auth_data = memcache_client.get(memcache_token_key)
if cached_auth_data:
headers, token_id, tenant, secret = cached_auth_data
if len(cached_auth_data) == 4:
# Old versions of swift may have cached token, too,
# but we don't need it
headers, _token, tenant, secret = cached_auth_data
else:
headers, tenant, secret = cached_auth_data
if s3_auth_details['check_signature'](secret):
self._logger.debug("Cached creds valid")
else:
@ -348,9 +351,9 @@ class S3Token(object):
try:
token = resp.json()
if 'access' in token:
headers, token_id, tenant = parse_v2_response(token)
headers, tenant = parse_v2_response(token)
elif 'token' in token:
headers, token_id, tenant = parse_v3_response(token)
headers, tenant = parse_v3_response(token)
else:
raise ValueError
if memcache_client:
@ -363,7 +366,7 @@ class S3Token(object):
access=access)
memcache_client.set(
memcache_token_key,
(headers, token_id, tenant, cred_ref.secret),
(headers, tenant, cred_ref.secret),
time=self._secret_cache_duration)
self._logger.debug("Cached keystone credentials")
except Exception:
@ -391,7 +394,6 @@ class S3Token(object):
environ, start_response)
req.headers.update(headers)
req.headers['X-Auth-Token'] = token_id
tenant_to_connect = force_tenant or tenant['id']
if six.PY2 and isinstance(tenant_to_connect, six.text_type):
tenant_to_connect = tenant_to_connect.encode('utf-8')

View File

@ -232,6 +232,8 @@ class Owner(object):
"""
def __init__(self, id, name):
self.id = id
if not (name is None or isinstance(name, six.string_types)):
raise TypeError('name must be a string or None')
self.name = name

View File

@ -383,13 +383,18 @@ class VersionedWritesContext(WSGIContext):
return source_resp
def _put_versioned_obj(self, req, put_path_info, source_resp):
# Create a new Request object to PUT to the versions container, copying
# Create a new Request object to PUT to the container, copying
# all headers from the source object apart from x-timestamp.
put_req = make_pre_authed_request(
req.environ, path=wsgi_quote(put_path_info), method='PUT',
swift_source='VW')
copy_header_subset(source_resp, put_req,
lambda k: k.lower() != 'x-timestamp')
slo_size = put_req.headers.get('X-Object-Sysmeta-Slo-Size')
if slo_size:
put_req.headers['Content-Type'] += '; swift_bytes=' + slo_size
put_req.environ['swift.content_type_overridden'] = True
put_req.environ['wsgi.input'] = FileLikeIter(source_resp.app_iter)
put_resp = put_req.get_response(self.app)
close_if_possible(source_resp.app_iter)

View File

@ -364,13 +364,15 @@ class RingBuilder(object):
# shift an unsigned int >I right to obtain the partition for the
# int).
if not self._replica2part2dev:
self._ring = RingData([], devs, self.part_shift)
self._ring = RingData([], devs, self.part_shift,
version=self.version)
else:
self._ring = \
RingData([array('H', p2d) for p2d in
self._replica2part2dev],
devs, self.part_shift,
self.next_part_power)
self.next_part_power,
self.version)
return self._ring
def add_dev(self, dev):

View File

@ -22,11 +22,11 @@ from os.path import getmtime
import struct
from time import time
import os
from io import BufferedReader
from hashlib import md5
from itertools import chain, count
from tempfile import NamedTemporaryFile
import sys
import zlib
from six.moves import range
@ -41,15 +41,77 @@ def calc_replica_count(replica2part2dev_id):
return base + extra
class RingReader(object):
chunk_size = 2 ** 16
def __init__(self, filename):
self.fp = open(filename, 'rb')
self._reset()
def _reset(self):
self._buffer = b''
self.size = 0
self.raw_size = 0
self._md5 = md5()
self._decomp = zlib.decompressobj(32 + zlib.MAX_WBITS)
@property
def close(self):
return self.fp.close
def seek(self, pos, ref=0):
if (pos, ref) != (0, 0):
raise NotImplementedError
self._reset()
return self.fp.seek(pos, ref)
def _buffer_chunk(self):
chunk = self.fp.read(self.chunk_size)
if not chunk:
return False
self.size += len(chunk)
self._md5.update(chunk)
chunk = self._decomp.decompress(chunk)
self.raw_size += len(chunk)
self._buffer += chunk
return True
def read(self, amount=-1):
if amount < 0:
raise IOError("don't be greedy")
while amount > len(self._buffer):
if not self._buffer_chunk():
break
result, self._buffer = self._buffer[:amount], self._buffer[amount:]
return result
def readline(self):
# apparently pickle needs this?
while b'\n' not in self._buffer:
if not self._buffer_chunk():
break
line, sep, self._buffer = self._buffer.partition(b'\n')
return line + sep
@property
def md5(self):
return self._md5.hexdigest()
class RingData(object):
"""Partitioned consistent hashing ring data (used for serialization)."""
def __init__(self, replica2part2dev_id, devs, part_shift,
next_part_power=None):
next_part_power=None, version=None):
self.devs = devs
self._replica2part2dev_id = replica2part2dev_id
self._part_shift = part_shift
self.next_part_power = next_part_power
self.version = version
self.md5 = self.size = self.raw_size = None
for dev in self.devs:
if dev is not None:
@ -104,7 +166,7 @@ class RingData(object):
:param bool metadata_only: If True, only load `devs` and `part_shift`.
:returns: A RingData instance containing the loaded data.
"""
gz_file = BufferedReader(GzipFile(filename, 'rb'))
gz_file = RingReader(filename)
# See if the file is in the new format
magic = gz_file.read(4)
@ -124,7 +186,10 @@ class RingData(object):
if not hasattr(ring_data, 'devs'):
ring_data = RingData(ring_data['replica2part2dev_id'],
ring_data['devs'], ring_data['part_shift'],
ring_data.get('next_part_power'))
ring_data.get('next_part_power'),
ring_data.get('version'))
for attr in ('md5', 'size', 'raw_size'):
setattr(ring_data, attr, getattr(gz_file, attr))
return ring_data
def serialize_v1(self, file_obj):
@ -138,6 +203,9 @@ class RingData(object):
'replica_count': len(ring['replica2part2dev_id']),
'byteorder': sys.byteorder}
if ring['version'] is not None:
_text['version'] = ring['version']
next_part_power = ring.get('next_part_power')
if next_part_power is not None:
_text['next_part_power'] = next_part_power
@ -175,7 +243,8 @@ class RingData(object):
return {'devs': self.devs,
'replica2part2dev_id': self._replica2part2dev_id,
'part_shift': self._part_shift,
'next_part_power': self.next_part_power}
'next_part_power': self.next_part_power,
'version': self.version}
class Ring(object):
@ -239,6 +308,10 @@ class Ring(object):
self._rebuild_tier_data()
self._update_bookkeeping()
self._next_part_power = ring_data.next_part_power
self._version = ring_data.version
self._md5 = ring_data.md5
self._size = ring_data.size
self._raw_size = ring_data.raw_size
def _update_bookkeeping(self):
# Do this now, when we know the data has changed, rather than
@ -257,12 +330,19 @@ class Ring(object):
zones = set()
ips = set()
self._num_devs = 0
self._num_assigned_devs = 0
self._num_weighted_devs = 0
for dev in self._devs:
if dev and dev['id'] in dev_ids_with_parts:
if dev is None:
continue
self._num_devs += 1
if dev.get('weight', 0) > 0:
self._num_weighted_devs += 1
if dev['id'] in dev_ids_with_parts:
regions.add(dev['region'])
zones.add((dev['region'], dev['zone']))
ips.add((dev['region'], dev['zone'], dev['ip']))
self._num_devs += 1
self._num_assigned_devs += 1
self._num_regions = len(regions)
self._num_zones = len(zones)
self._num_ips = len(ips)
@ -275,6 +355,22 @@ class Ring(object):
def part_power(self):
return 32 - self._part_shift
@property
def version(self):
return self._version
@property
def md5(self):
return self._md5
@property
def size(self):
return self._size
@property
def raw_size(self):
return self._raw_size
def _rebuild_tier_data(self):
self.tier2devs = defaultdict(list)
for dev in self._devs:
@ -301,6 +397,21 @@ class Ring(object):
"""Number of partitions in the ring."""
return len(self._replica2part2dev_id[0])
@property
def device_count(self):
"""Number of devices in the ring."""
return self._num_devs
@property
def weighted_device_count(self):
"""Number of devices with weight in the ring."""
return self._num_weighted_devs
@property
def assigned_device_count(self):
"""Number of devices with assignments in the ring."""
return self._num_assigned_devs
@property
def devs(self):
"""devices in the ring"""
@ -490,7 +601,7 @@ class Ring(object):
hit_all_ips = True
break
hit_all_devs = len(used) == self._num_devs
hit_all_devs = len(used) == self._num_assigned_devs
for handoff_part in chain(range(start, parts, inc),
range(inc - ((parts - start) % inc),
start, inc)):
@ -505,6 +616,6 @@ class Ring(object):
dev = self._devs[dev_id]
yield dict(dev, handoff_index=next(index))
used.add(dev_id)
if len(used) == self._num_devs:
if len(used) == self._num_assigned_devs:
hit_all_devs = True
break

View File

@ -32,6 +32,7 @@ from eventlet.green import socket, ssl, os as green_os
import six
from six import BytesIO
from six import StringIO
from six.moves import configparser
from swift.common import utils, constraints
from swift.common.storage_policy import BindPortsCache
@ -55,6 +56,23 @@ except (ImportError, NotImplementedError):
CPU_COUNT = 1
if not six.PY2:
# In general, we haven't really thought much about interpolation in
# configs. Python's default ConfigParser has always supported it, though,
# so *we* got it "for free". Unfortunatley, since we "supported"
# interpolation, we have to assume there are deployments in the wild that
# use it, and try not to break them. So, do what we can to mimic the py2
# behavior of passing through values like "1%" (which we want to support
# for fallocate_reserve).
class NicerInterpolation(configparser.BasicInterpolation):
def before_get(self, parser, section, option, value, defaults):
if '%(' not in value:
return value
return super(NicerInterpolation, self).before_get(
parser, section, option, value, defaults)
configparser.ConfigParser._DEFAULT_INTERPOLATION = NicerInterpolation()
class NamedConfigLoader(loadwsgi.ConfigLoader):
"""
Patch paste.deploy's ConfigLoader so each context object will know what
@ -480,6 +498,11 @@ class SwiftHttpProtocol(wsgi.HttpProtocol):
break
header, value = line.split(':', 1)
value = value.strip(' \t\n\r')
# NB: Eventlet looks at the headers obj to figure out
# whether the client said the connection should close;
# see https://github.com/eventlet/eventlet/blob/v0.25.0/
# eventlet/wsgi.py#L504
self.headers.add_header(header, value)
headers_raw.append((header, value))
wsgi_key = 'HTTP_' + header.replace('-', '_').encode(
'latin1').upper().decode('latin1')
@ -488,6 +511,20 @@ class SwiftHttpProtocol(wsgi.HttpProtocol):
wsgi_key = wsgi_key[5:]
environ[wsgi_key] = value
environ['headers_raw'] = tuple(headers_raw)
# Since we parsed some more headers, check to see if they
# change how our wsgi.input should behave
te = environ.get('HTTP_TRANSFER_ENCODING', '').lower()
if te.rsplit(',', 1)[-1].strip() == 'chunked':
environ['wsgi.input'].chunked_input = True
else:
length = environ.get('CONTENT_LENGTH')
if length:
length = int(length)
environ['wsgi.input'].content_length = length
if environ.get('HTTP_EXPECT', '').lower() == '100-continue':
environ['wsgi.input'].wfile = self.wfile
environ['wsgi.input'].wfile_line = \
b'HTTP/1.1 100 Continue\r\n'
return environ

View File

@ -1186,19 +1186,27 @@ class ContainerBroker(DatabaseBroker):
continue
if end >= 0 and len(name) > end + len(delimiter):
if reverse:
end_marker = name[:end + 1]
end_marker = name[:end + len(delimiter)]
else:
marker = name[:end] + chr(ord(delimiter) + 1)
marker = ''.join([
name[:end],
delimiter[:-1],
chr(ord(delimiter[-1:]) + 1),
])
curs.close()
break
elif end >= 0:
if reverse:
end_marker = name[:end + 1]
end_marker = name[:end + len(delimiter)]
else:
marker = name[:end] + chr(ord(delimiter) + 1)
marker = ''.join([
name[:end],
delimiter[:-1],
chr(ord(delimiter[-1:]) + 1),
])
# we want result to be inclusive of delim+1
delim_force_gte = True
dir_name = name[:end + 1]
dir_name = name[:end + len(delimiter)]
if dir_name != orig_marker:
results.append([dir_name, '0', 0, None, ''])
curs.close()
@ -1996,6 +2004,22 @@ class ContainerBroker(DatabaseBroker):
self.update_metadata({'X-Container-Sysmeta-Shard-' + key:
(value, Timestamp.now().internal)})
def get_sharding_sysmeta_with_timestamps(self):
"""
Returns sharding specific info from the broker's metadata with
timestamps.
:param key: if given the value stored under ``key`` in the sharding
info will be returned.
:return: a dict of sharding info with their timestamps.
"""
prefix = 'X-Container-Sysmeta-Shard-'
return {
k[len(prefix):]: v
for k, v in self.metadata.items()
if k.startswith(prefix)
}
def get_sharding_sysmeta(self, key=None):
"""
Returns sharding specific info from the broker's metadata.
@ -2005,13 +2029,11 @@ class ContainerBroker(DatabaseBroker):
:return: either a dict of sharding info or the value stored under
``key`` in that dict.
"""
prefix = 'X-Container-Sysmeta-Shard-'
metadata = self.metadata
info = dict((k[len(prefix):], v[0]) for
k, v in metadata.items() if k.startswith(prefix))
info = self.get_sharding_sysmeta_with_timestamps()
if key:
return info.get(key)
return info
return info.get(key, (None, None))[0]
else:
return {k: v[0] for k, v in info.items()}
def _load_root_info(self):
"""

View File

@ -637,9 +637,6 @@ class ContainerController(BaseStorageServer):
path = get_param(req, 'path')
prefix = get_param(req, 'prefix')
delimiter = get_param(req, 'delimiter')
if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254):
# delimiters can be made more flexible later
return HTTPPreconditionFailed(body='Bad delimiter')
marker = get_param(req, 'marker', '')
end_marker = get_param(req, 'end_marker')
limit = constraints.CONTAINER_LISTING_LIMIT

View File

@ -40,6 +40,11 @@ from swift.container.backend import ContainerBroker, \
from swift.container.replicator import ContainerReplicator
CLEAVE_SUCCESS = 0
CLEAVE_FAILED = 1
CLEAVE_EMPTY = 2
def sharding_enabled(broker):
# NB all shards will by default have been created with
# X-Container-Sysmeta-Sharding set and will therefore be candidates for
@ -220,6 +225,10 @@ class CleavingContext(object):
yield 'ranges_done', self.ranges_done
yield 'ranges_todo', self.ranges_todo
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join(
'%s=%r' % prop for prop in self))
def _encode(cls, value):
if value is not None and six.PY2 and isinstance(value, six.text_type):
return value.encode('utf-8')
@ -241,6 +250,26 @@ class CleavingContext(object):
def _make_ref(cls, broker):
return broker.get_info()['id']
@classmethod
def load_all(cls, broker):
"""
Returns all cleaving contexts stored in the broker.
:param broker:
:return: list of tuples of (CleavingContext, timestamp)
"""
brokers = broker.get_brokers()
sysmeta = brokers[-1].get_sharding_sysmeta_with_timestamps()
for key, (val, timestamp) in sysmeta.items():
# If the value is of length 0, then the metadata is
# marked for deletion
if key.startswith("Context-") and len(val) > 0:
try:
yield cls(**json.loads(val)), timestamp
except ValueError:
continue
@classmethod
def load(cls, broker):
"""
@ -287,6 +316,11 @@ class CleavingContext(object):
return all((self.misplaced_done, self.cleaving_done,
self.max_row == self.cleave_to_row))
def delete(self, broker):
# These will get reclaimed when `_reclaim_metadata` in
# common/db.py is called.
broker.set_sharding_sysmeta('Context-' + self.ref, '')
DEFAULT_SHARD_CONTAINER_THRESHOLD = 1000000
DEFAULT_SHARD_SHRINK_POINT = 25
@ -607,6 +641,7 @@ class ContainerSharder(ContainerReplicator):
"""
part = self.ring.get_part(shard_range.account, shard_range.container)
node = self.find_local_handoff_for_part(part)
put_timestamp = Timestamp.now().internal
if not node:
raise DeviceUnavailable(
'No mounted devices found suitable for creating shard broker '
@ -615,7 +650,7 @@ class ContainerSharder(ContainerReplicator):
shard_broker = ContainerBroker.create_broker(
os.path.join(self.root, node['device']), part, shard_range.account,
shard_range.container, epoch=shard_range.epoch,
storage_policy_index=policy_index)
storage_policy_index=policy_index, put_timestamp=put_timestamp)
# Get the valid info into the broker.container, etc
shard_broker.get_info()
@ -625,7 +660,7 @@ class ContainerSharder(ContainerReplicator):
'X-Container-Sysmeta-Sharding':
('True', Timestamp.now().internal)})
return part, shard_broker, node['id']
return part, shard_broker, node['id'], put_timestamp
def _audit_root_container(self, broker):
# This is the root container, and therefore the tome of knowledge,
@ -724,12 +759,20 @@ class ContainerSharder(ContainerReplicator):
self._increment_stat('audit_shard', 'success', statsd=True)
return True
def _audit_cleave_contexts(self, broker):
now = Timestamp.now()
for context, last_mod in CleavingContext.load_all(broker):
if Timestamp(last_mod).timestamp + self.reclaim_age < \
now.timestamp:
context.delete(broker)
def _audit_container(self, broker):
if broker.is_deleted():
# if the container has been marked as deleted, all metadata will
# have been erased so no point auditing. But we want it to pass, in
# case any objects exist inside it.
return True
self._audit_cleave_contexts(broker)
if broker.is_root_container():
return self._audit_root_container(broker)
return self._audit_shard_container(broker)
@ -804,7 +847,7 @@ class ContainerSharder(ContainerReplicator):
last_index = next_index = 0
for obj in objs:
if dest_shard_range is None:
# no more destinations: yield remainder of batch and return
# no more destinations: yield remainder of batch and bail
# NB there may be more batches of objects but none of them
# will be placed so no point fetching them
yield objs[last_index:], None, info
@ -893,7 +936,7 @@ class ContainerSharder(ContainerReplicator):
continue
if dest_shard_range not in dest_brokers:
part, dest_broker, node_id = self._get_shard_broker(
part, dest_broker, node_id, _junk = self._get_shard_broker(
dest_shard_range, src_broker.root_path, policy_index)
# save the broker info that was sampled prior to the *first*
# yielded objects for this destination
@ -1122,12 +1165,15 @@ class ContainerSharder(ContainerReplicator):
start = time.time()
policy_index = broker.storage_policy_index
try:
shard_part, shard_broker, node_id = self._get_shard_broker(
shard_range, broker.root_path, policy_index)
shard_part, shard_broker, node_id, put_timestamp = \
self._get_shard_broker(shard_range, broker.root_path,
policy_index)
except DeviceUnavailable as duex:
self.logger.warning(str(duex))
self._increment_stat('cleaved', 'failure', statsd=True)
return False
return CLEAVE_FAILED
own_shard_range = broker.get_own_shard_range()
# only cleave from the retiring db - misplaced objects handler will
# deal with any objects in the fresh db
@ -1138,13 +1184,36 @@ class ContainerSharder(ContainerReplicator):
source_db_id = source_broker.get_info()['id']
source_max_row = source_broker.get_max_row()
sync_point = shard_broker.get_sync(source_db_id)
if sync_point < source_max_row:
if sync_point < source_max_row or source_max_row == -1:
sync_from_row = max(cleaving_context.last_cleave_to_row or -1,
sync_point)
objects = None
for objects, info in self.yield_objects(
source_broker, shard_range,
since_row=sync_from_row):
shard_broker.merge_items(objects)
if objects is None:
self.logger.info("Cleaving '%s': %r - zero objects found",
broker.path, shard_range)
if shard_broker.get_info()['put_timestamp'] == put_timestamp:
# This was just created; don't need to replicate this
# SR because there was nothing there. So cleanup and
# remove the shard_broker from its hand off location.
self.delete_db(shard_broker)
cleaving_context.cursor = shard_range.upper_str
cleaving_context.ranges_done += 1
cleaving_context.ranges_todo -= 1
if shard_range.upper >= own_shard_range.upper:
# cleaving complete
cleaving_context.cleaving_done = True
cleaving_context.store(broker)
# Because nothing was here we wont count it in the shard
# batch count.
return CLEAVE_EMPTY
# Else, it wasn't newly created by us, and
# we don't know what's in it or why. Let it get
# replicated and counted in the batch count.
# Note: the max row stored as a sync point is sampled *before*
# objects are yielded to ensure that is less than or equal to
# the last yielded row. Other sync points are also copied from the
@ -1159,8 +1228,6 @@ class ContainerSharder(ContainerReplicator):
self.logger.debug("Cleaving '%s': %r - shard db already in sync",
broker.path, shard_range)
own_shard_range = broker.get_own_shard_range()
replication_quorum = self.existing_shard_replication_quorum
if shard_range.includes(own_shard_range):
# When shrinking, include deleted own (donor) shard range in
@ -1202,7 +1269,7 @@ class ContainerSharder(ContainerReplicator):
'%s successes, %s required.', shard_range, broker.path,
replication_successes, replication_quorum)
self._increment_stat('cleaved', 'failure', statsd=True)
return False
return CLEAVE_FAILED
elapsed = round(time.time() - start, 3)
self._min_stat('cleaved', 'min_time', elapsed)
@ -1219,7 +1286,7 @@ class ContainerSharder(ContainerReplicator):
'Cleaved %s for shard range %s in %gs.',
broker.path, shard_range, elapsed)
self._increment_stat('cleaved', 'success', statsd=True)
return True
return CLEAVE_SUCCESS
def _cleave(self, broker):
# Returns True if misplaced objects have been moved and the entire
@ -1264,23 +1331,30 @@ class ContainerSharder(ContainerReplicator):
cleaving_context.ranges_todo, broker.path)
ranges_done = []
for shard_range in ranges_todo[:self.cleave_batch_size]:
for shard_range in ranges_todo:
if shard_range.state == ShardRange.FOUND:
break
elif shard_range.state in (ShardRange.CREATED,
ShardRange.CLEAVED,
ShardRange.ACTIVE):
if self._cleave_shard_range(
broker, cleaving_context, shard_range):
cleave_result = self._cleave_shard_range(
broker, cleaving_context, shard_range)
if cleave_result == CLEAVE_SUCCESS:
ranges_done.append(shard_range)
else:
if len(ranges_done) == self.cleave_batch_size:
break
elif cleave_result == CLEAVE_FAILED:
break
# else, no errors, but no rows found either. keep going,
# and don't count it against our batch size
else:
self.logger.warning('Unexpected shard range state for cleave',
shard_range.state)
break
if not ranges_done:
# _cleave_shard_range always store()s the context on success; make
# sure we *also* do that if we hit a failure right off the bat
cleaving_context.store(broker)
self.logger.debug(
'Cleaved %s shard ranges for %s', len(ranges_done), broker.path)
@ -1307,6 +1381,7 @@ class ContainerSharder(ContainerReplicator):
modified_shard_ranges.append(own_shard_range)
broker.merge_shard_ranges(modified_shard_ranges)
if broker.set_sharded_state():
cleaving_context.delete(broker)
return True
else:
self.logger.warning(

View File

@ -7,15 +7,16 @@
# Ettore Atalan <atalanttore@googlemail.com>, 2014-2015
# Jonas John <jonas.john@e-werkzeug.eu>, 2015
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
# Andreas Jaeger <jaegerandi@gmail.com>, 2019. #zanata
msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-09 00:12+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-06-02 07:02+0000\n"
"PO-Revision-Date: 2019-10-03 06:49+0000\n"
"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
"Language: de\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
@ -57,10 +58,26 @@ msgstr ""
"%(replicated)d/%(total)d (%(percentage).2f%%) Partitionen repliziert in "
"%(time).2fs (%(rate).2f/s, %(remaining)s verbleibend)"
#, python-format
msgid "%(replication_ip)s/%(device)s responded as unmounted"
msgstr "%(replication_ip)s/%(device)s antwortet als unmounted"
#, python-format
msgid "%(server)s #%(number)d not running (%(conf)s)"
msgstr "%(server)s #%(number)d wird nicht ausgeführt (%(conf)s)"
#, python-format
msgid "%(server)s (%(pid)s) appears to have stopped"
msgstr "%(server)s (%(pid)s) scheinbar gestoppt"
#, python-format
msgid "%(server)s running (%(pid)s - %(conf)s)"
msgstr "%(server)s wird ausgeführt (%(pid)s - %(conf)s)"
#, python-format
msgid "%(server)s running (%(pid)s - %(pid_file)s)"
msgstr "%(server)s wird ausgeführt (%(pid)s - %(pid_file)s)"
#, python-format
msgid "%(success)s successes, %(failure)s failures"
msgstr "%(success)s Erfolge, %(failure)s Fehlschläge"
@ -93,44 +110,9 @@ msgstr "%s zurückgemeldet als ausgehängt"
msgid "%s: Connection reset by peer"
msgstr "%s: Verbindung zurückgesetzt durch Peer"
#, python-format
msgid ", %s containers deleted"
msgstr ", %s Container gelöscht"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ", %s Container möglicherweise verbleibend"
#, python-format
msgid ", %s containers remaining"
msgstr ", %s Container verbleibend"
#, python-format
msgid ", %s objects deleted"
msgstr ", %s Objekte gelöscht"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ", %s Objekte möglicherweise verbleibend"
#, python-format
msgid ", %s objects remaining"
msgstr ", %s Objekte verbleibend"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ", vergangen: %.02fs"
msgid ", return codes: "
msgstr ", Rückgabecodes: "
msgid "Account"
msgstr "Konto"
#, python-format
msgid "Account %(account)s has not been reaped since %(time)s"
msgstr "Konto %(account)s wurde nicht aufgeräumt seit %(time)s"
#, python-format
msgid "Account audit \"once\" mode completed: %.02fs"
msgstr "Kontoprüfungsmodus \"once\" abgeschlossen: %.02fs"
@ -139,6 +121,13 @@ msgstr "Kontoprüfungsmodus \"once\" abgeschlossen: %.02fs"
msgid "Account audit pass completed: %.02fs"
msgstr "Kontoprüfungsdurchlauf abgeschlossen: %.02fs"
#, python-format
msgid ""
"Adding required filter %(filter_name)s to pipeline at position %(insert_at)d"
msgstr ""
"Füge erforderlichen Filter %(filter_name)s zu Pipeline an Position "
"%(insert_at)d hinzu"
#, python-format
msgid ""
"Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)"
@ -150,6 +139,14 @@ msgstr ""
msgid "Audit Failed for %(path)s: %(err)s"
msgstr "Prüfung fehlgeschlagen für %(path)s: %(err)s"
#, python-format
msgid "Audit passed for %s"
msgstr "Prüfung für %s erfolgt"
#, python-format
msgid "Bad key for %(name)r: %(err)s"
msgstr "Schlechter Schlüssel für %(name)r: %(err)s"
#, python-format
msgid "Bad rsync return code: %(ret)d <- %(args)s"
msgstr "Falscher rsync-Rückgabecode: %(ret)d <- %(args)s"
@ -181,10 +178,6 @@ msgstr "Einzelthread-Scanvorgang für Objektaktualisierung wird gestartet"
msgid "Begin object update sweep"
msgstr "Scanvorgang für Objektaktualisierung wird gestartet"
#, python-format
msgid "Beginning pass on account %s"
msgstr "Durchlauf für Konto %s wird gestartet"
msgid "Beginning replication run"
msgstr "Replizierungsdurchlauf wird gestartet"
@ -242,6 +235,9 @@ msgstr ""
msgid "Connection refused"
msgstr "Verbindung abgelehnt"
msgid "Connection reset"
msgstr "Verbindung zurückgesetzt"
msgid "Connection timeout"
msgstr "Verbindungszeitüberschreitung"
@ -260,6 +256,18 @@ msgstr "Containerprüfungsdurchlauf abgeschlossen: %.02fs"
msgid "Container sync \"once\" mode completed: %.02fs"
msgstr "Containersynchronisationsmodus \"once\" abgeschlossen: %.02fs"
#, python-format
msgid ""
"Container sync report: %(container)s, time window start: %(start)s, time "
"window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, "
"bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, "
"total_rows: %(total)s"
msgstr ""
"Container Synchronisierungsbericht: %(container)s, Beginn Zeitfenster: "
"%(start)s, Ende Zeitfenster: %(end)s, puts: %(puts)s, posts: %(posts)s, "
"deletes: %(deletes)s, bytes: %(bytes)s, sync_point1: %(point1)s, "
"sync_point2: %(point2)s, total_rows: %(total)s"
#, python-format
msgid ""
"Container update single threaded sweep completed: %(elapsed).02fs, "
@ -282,6 +290,10 @@ msgstr ""
"%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler, %(no_change)s ohne "
"Änderungen"
#, python-format
msgid "Could not autocreate account %r"
msgstr "Kann das Konto %r nicht automatisch erstellen"
#, python-format
msgid ""
"Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds"
@ -297,10 +309,6 @@ msgstr "%(conf)r konnte nicht geladen werden: %(error)s"
msgid "Data download error: %s"
msgstr "Fehler beim Downloaden von Daten: %s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "Gerätedurchgang abgeschlossen: %.02fs"
#, python-format
msgid "Directory %(directory)r does not map to a valid policy (%(error)s)"
msgstr ""
@ -323,6 +331,10 @@ msgstr "FEHLER %(status)d %(body)s Vom Objektserver bezüglich: %(path)s"
msgid "ERROR %(status)d Expect: 100-continue From Object Server"
msgstr "FEHLER %(status)d Erwartet: 100-continue von Objektserver"
#, python-format
msgid "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server"
msgstr "FEHLER %(status)d Versuch, %(method)s %(path)s von %(type)s Server"
#, python-format
msgid ""
"ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry "
@ -353,6 +365,14 @@ msgstr ""
"Aktualisierung zu einem späteren Zeitpunkt gespeichert): %(status)d Antwort "
"von %(ip)s:%(port)s/%(dev)s"
#, python-format
msgid ""
"ERROR Container update failed: different numbers of hosts and devices in "
"request: \"%(hosts)s\" vs \"%(devices)s\""
msgstr ""
"FEHLER Container Aktualisierung fehlgeschlagen: Unterschiedliche Anzahl von "
"Hosts und Geräten in der Anfrage: \"%(hosts)s\" vs \"%(devices)s\""
#, python-format
msgid "ERROR Could not get account info %s"
msgstr "FEHLER Kontoinfo %s konnte nicht abgerufen werden"
@ -497,6 +517,34 @@ msgstr "FEHLER: Auf %(path)s kann nicht zugegriffen werden: %(error)s"
msgid "ERROR: Unable to run auditing: %s"
msgstr "FEHLER: Prüfung konnte nicht durchgeführt werden: %s"
#, python-format
msgid ""
"Error code %(status)d is returned from remote server %(ip)s: %(port)s / "
"%(device)s"
msgstr ""
"Fehlercode %(status)d wurde vom entfernten Server %(ip)s:%(port)s / "
"%(device)s zurück gegeben"
#, python-format
msgid "Error decoding fragments for %r"
msgstr "Fehler beim Dekodieren von Fragmenten für %r"
#, python-format
msgid "Error decrypting %(resp_type)s: %(reason)s"
msgstr "Fehler beim Entschlüsseln %(resp_type)s: %(reason)s"
#, python-format
msgid "Error decrypting %(resp_type)s: Missing %(key)s"
msgstr "Fehler beim Entschlüsseln %(resp_type)s: Es fehlt %(key)s"
#, python-format
msgid "Error decrypting header %(header)s: %(error)s"
msgstr "Fehler beim Entschlüsseln des header %(header)s: %(error)s"
#, python-format
msgid "Error decrypting object: %s"
msgstr "Fehler beim Entschlüsseln object: %s"
msgid "Error hashing suffix"
msgstr "Fehler beim Hashing des Suffix"
@ -526,6 +574,10 @@ msgstr "Fehler beim Lesen der swift.conf"
msgid "Error retrieving recon data"
msgstr "Fehler beim Abrufen der recon-Daten"
#, python-format
msgid "Error sending UDP message to %(target)r: %(err)s"
msgstr "Fehler beim Senden von UDP Nachricht zu %(target)r: %(err)s"
msgid "Error syncing handoff partition"
msgstr "Fehler bei der Synchronisierung der Übergabepartition"
@ -552,30 +604,25 @@ msgstr "Fehler: fehlendes Konfigurationspfadargument"
msgid "Error: unable to locate %s"
msgstr "Fehler: %s kann nicht lokalisiert werden"
msgid "Exception in top-level account reaper loop"
msgstr "Ausnahme in Reaper-Loop für Konto der höchsten Ebene"
#, python-format
msgid "Exception fetching fragments for %r"
msgstr "Ausnahme beim Abrufen von Fragmenten für %r"
msgid "Exception in top-levelreconstruction loop"
msgstr "Ausnahme in Rekonstruktionsloop der höchsten Ebene"
msgid "Exception in top-level reconstruction loop"
msgstr "Ausnahme in Replizierungsloop der höchsten Ebene"
#, python-format
msgid "Exception in top-level replication loop: %s"
msgstr "Ausnahme in Replizierungsschleife der höchsten Ebene: %s"
#, python-format
msgid "Exception while deleting container %(account)s %(container)s %(err)s"
msgstr "Ausnahme beim Löschen von Container %(account)s %(container)s %(err)s"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Ausnahme bei %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Exception with account %s"
msgstr "Ausnahme mit Account %s"
#, python-format
msgid "Exception with containers for account %s"
msgstr "Ausnahme bei Containern für Konto %s"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr ""
"Ausnahme bei Objekten für Container %(container)s für Konto %(account)s"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "Erwartet: 100-continue auf %s"
@ -594,13 +641,16 @@ msgstr ""
"Der Modus 'handoffs_first' ist noch nicht abgeschlossen. Der aktuelle "
"Replikationsdurchgang wird abgebrochen."
msgid ""
"Handoffs only mode still has handoffs remaining. Next pass will continue to "
"revert handoffs."
msgstr ""
"Der Modus 'handoffs_first' ist noch nicht abgeschlossen. Der nächste "
"Durchgang setzt die Übergaben fort."
msgid "Host unreachable"
msgstr "Host nicht erreichbar"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "Unvollständiger Durchgang auf Konto %s"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "Ungültiges X-Container-Sync-To-Format %r"
@ -629,6 +679,9 @@ msgstr ""
"Ungültiges Schema %r in X-Container-Sync-To, muss \"//\", \"http\" oder "
"\"https\" sein."
msgid "Invalid swift_bytes"
msgstr "Ungültige swift_bytes"
#, python-format
msgid "Killing long-running rsync: %s"
msgstr "Lange laufendes rsync wird gekillt: %s"
@ -644,6 +697,13 @@ msgstr "Suche erkannt. Live-Coros werden gelöscht."
msgid "Mapped %(given_domain)s to %(found_domain)s"
msgstr "%(given_domain)s zugeordnet zu %(found_domain)s"
#, python-format
msgid "Missing key for %r"
msgstr "Fehlender Schlüssel für %r"
msgid "Network unreachable"
msgstr "Netzwerk nicht erreichbar"
#, python-format
msgid "No %s running"
msgstr "Kein %s läuft"
@ -698,6 +758,20 @@ msgstr "Objekt"
msgid "Object PUT"
msgstr "Objekt PUT"
#, python-format
msgid ""
"Object PUT exceptions after last send, %(conns)s/%(nodes)s required "
"connections"
msgstr ""
"Objekt PUT Ausnahme nach letztem Senden, %(conns)s/%(nodes)s erfordert eine "
"Verbindung"
#, python-format
msgid ""
"Object PUT exceptions during send, %(conns)s/%(nodes)s required connections"
msgstr ""
"Objekt PUT Ausnahme beim Senden %(conns)s/%(nodes)s erfordern eine Verbindung"
#, python-format
msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r"
msgstr ""
@ -708,6 +782,11 @@ msgstr ""
msgid "Object PUT returning 412, %(statuses)r"
msgstr "Objekt PUT Rückgabe 412, %(statuses)r"
#, python-format
msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections"
msgstr ""
"Objekt PUT gibt 503 zurück, %(conns)s/%(nodes)s erfordert eine Verbindung"
#, python-format
msgid ""
"Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total "
@ -769,9 +848,25 @@ msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs"
msgstr ""
"Partitionszeiten: max. %(max).4fs, min. %(min).4fs, durchschnittl. %(med).4fs"
#, python-format
msgid "Pass so far %(time)ds; %(objects)d objects expired"
msgstr "Bisherige Durchgänge %(time)ds; %(objects)d Objekte abgelaufen"
msgid "Path required in X-Container-Sync-To"
msgstr "Pfad in X-Container-Sync-To ist erforderlich"
#, python-format
msgid "Pipeline is \"%s\""
msgstr "Pipeline ist \"%s\""
#, python-format
msgid "Pipeline was modified. New pipeline is \"%s\"."
msgstr "Pipeline wurde geändert. Neue Pipeline ist \"%s\"."
#, python-format
msgid "Problem checking EC fragment %(datadir)s: %(err)s"
msgstr "Problemüberprüfung EC Fragment %(datadir)s: %(err)s"
#, python-format
msgid "Problem cleaning up %(datadir)s (%(err)s)"
msgstr "Problem bei der Bereinigung von %(datadir)s (%(err)s)"
@ -780,10 +875,20 @@ msgstr "Problem bei der Bereinigung von %(datadir)s (%(err)s)"
msgid "Problem cleaning up %s"
msgstr "Problem bei der Bereinigung von %s"
#, python-format
msgid "Problem with fragment response: %s"
msgstr "Problem mit Antwort von Fragment: %s"
#, python-format
msgid "Profiling Error: %s"
msgstr "Fehler bei der Profilerstellung: %s"
#, python-format
msgid "Quarantined %(db_dir)s to %(quar_path)s due to %(reason)s"
msgstr ""
"%(db_dir)s nach %(quar_path)s aufgrund von %(reason)s in Quarantäne "
"verschoben."
#, python-format
msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory"
msgstr ""
@ -892,6 +997,10 @@ msgstr ""
"Seit %(time)s: Containerprüfungen: %(pass)s bestandene Prüfung, %(fail)s "
"nicht bestandene Prüfung"
#, python-format
msgid "Skipping %(datadir)s because %(err)s"
msgstr "Überspringe %(datadir)s aufgrund %(err)s"
#, python-format
msgid "Skipping %s as it is not mounted"
msgstr "%s wird übersprungen, weil es nicht eingehängt ist"
@ -928,8 +1037,8 @@ msgstr ""
"der Summe der %(key)s für alle Richtlinien (%(sum)s)"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Zeitüberschreitungsausnahme bei %(ip)s:%(port)s/%(device)s"
msgid "Timeout fetching fragments for %r"
msgstr "Zeitüberschreitung beim Abrufen von Fragmenten für %r"
#, python-format
msgid "Trying to %(method)s %(path)s"
@ -939,10 +1048,15 @@ msgstr "Versuch, %(method)s %(path)s"
msgid "Trying to GET %(full_path)s"
msgstr "Versuch, %(full_path)s mit GET abzurufen"
#, python-format
msgid "Trying to get %(status_type)s status of PUT to %(path)s"
msgstr ""
"Es wird versucht, %(status_type)s Status von PUT für %(path)s abzurufen"
msgid "Trying to read during GET"
msgstr "Versuch, während des GET-Vorgangs zu lesen"
msgid "Trying to read during GET (retrying)"
msgid "Trying to read object during GET (retrying)"
msgstr "Versuch, während des GET-Vorgangs zu lesen (Wiederholung)"
msgid "Trying to send to client"
@ -1001,6 +1115,14 @@ msgstr "Konfiguration aus %s kann nicht gelesen werden"
msgid "Unauth %(sync_from)r => %(sync_to)r"
msgstr "Nicht genehmigte %(sync_from)r => %(sync_to)r"
#, python-format
msgid ""
"Unexpected fragment data type (not quarantined) %(datadir)s: %(type)s at "
"offset 0x%(offset)x"
msgstr ""
"Unerwarteter Fragment Datentyp (nicht unter Quarantäne) %(datadir)s: "
"%(type)s at offset 0x%(offset)x"
msgid "Unhandled exception"
msgstr "Nicht behandelte Exception"
@ -1024,6 +1146,14 @@ msgstr ""
"WARNUNG: SSL sollte nur zu Testzwecken aktiviert werden. Verwenden Sie die "
"externe SSL-Beendigung für eine Implementierung in der Produktionsumgebung."
msgid ""
"WARNING: Unable to modify I/O scheduling class and priority of process. "
"Keeping unchanged! Check logs for more info."
msgstr ""
"WARNUNG: I/O Planungsklasse und Priorität des Prozesses können nicht "
"geändert werden. Es werden keine Änderungen vorgenommen! Überprüfen Sie die "
"Log-Dateien für mehr Informationen."
msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?"
msgstr ""
"WARNUNG: Grenzwert für Dateideskriptoren kann nicht geändert werden. Wird "
@ -1039,6 +1169,14 @@ msgstr ""
"WARNUNG: Grenzwert für Speicher kann nicht geändert werden. Wird nicht als "
"Root ausgeführt?"
msgid ""
"WARNING: Unable to modify scheduling priority of process. Keeping unchanged! "
"Check logs for more info. "
msgstr ""
"WARNUNG: Priorität der Planung des Prozesses kann nicht geändert werden. Es "
"wird keine Änderung vorgenommen! Überprüfen Sie die Log-Dateien für mehr "
"Informationen."
#, python-format
msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up"
msgstr ""
@ -1055,6 +1193,10 @@ msgstr ""
"Warnung: Geschwindigkeitsbegrenzung kann nicht ohne memcached-Client "
"durchgeführt werden"
#, python-format
msgid "autocreate account %r"
msgstr "Automatisch erstelltes Konto %r"
#, python-format
msgid "method %s is not allowed."
msgstr "Methode %s ist nicht erlaubt."

View File

@ -10,7 +10,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-25 00:41+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -115,44 +115,9 @@ msgstr "%s responded as unmounted"
msgid "%s: Connection reset by peer"
msgstr "%s: Connection reset by peer"
#, python-format
msgid ", %s containers deleted"
msgstr ", %s containers deleted"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ", %s containers possibly remaining"
#, python-format
msgid ", %s containers remaining"
msgstr ", %s containers remaining"
#, python-format
msgid ", %s objects deleted"
msgstr ", %s objects deleted"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ", %s objects possibly remaining"
#, python-format
msgid ", %s objects remaining"
msgstr ", %s objects remaining"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ", elapsed: %.02fs"
msgid ", return codes: "
msgstr ", return codes: "
msgid "Account"
msgstr "Account"
#, python-format
msgid "Account %(account)s has not been reaped since %(time)s"
msgstr "Account %(account)s has not been reaped since %(time)s"
#, python-format
msgid "Account audit \"once\" mode completed: %.02fs"
msgstr "Account audit \"once\" mode completed: %.02fs"
@ -220,10 +185,6 @@ msgstr "Begin object update single threaded sweep"
msgid "Begin object update sweep"
msgstr "Begin object update sweep"
#, python-format
msgid "Beginning pass on account %s"
msgstr "Beginning pass on account %s"
msgid "Beginning replication run"
msgstr "Beginning replication run"
@ -346,10 +307,6 @@ msgstr "Could not load %(conf)r: %(error)s"
msgid "Data download error: %s"
msgstr "Data download error: %s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "Devices pass completed: %.02fs"
msgid "Did not get a keys dict"
msgstr "Did not get a keys dict"
@ -651,16 +608,10 @@ msgstr "Error: unable to locate %s"
msgid "Exception fetching fragments for %r"
msgstr "Exception fetching fragments for %r"
msgid "Exception in top-level account reaper loop"
msgstr "Exception in top-level account reaper loop"
#, python-format
msgid "Exception in top-level replication loop: %s"
msgstr "Exception in top-level replication loop: %s"
msgid "Exception in top-levelreconstruction loop"
msgstr "Exception in top-level reconstruction loop"
#, python-format
msgid "Exception while deleting container %(account)s %(container)s %(err)s"
msgstr "Exception while deleting container %(account)s %(container)s %(err)s"
@ -669,20 +620,6 @@ msgstr "Exception while deleting container %(account)s %(container)s %(err)s"
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Exception with %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Exception with account %s"
msgstr "Exception with account %s"
#, python-format
msgid "Exception with containers for account %s"
msgstr "Exception with containers for account %s"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr ""
"Exception with objects for container %(container)s for account %(account)s"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "Expect: 100-continue on %s"
@ -718,10 +655,6 @@ msgstr ""
msgid "Host unreachable"
msgstr "Host unreachable"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "Incomplete pass on account %s"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "Invalid X-Container-Sync-To format %r"
@ -1114,10 +1047,6 @@ msgstr ""
"The total %(key)s for the container (%(total)s) does not match the sum of "
"%(key)s across policies (%(sum)s)"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Timeout Exception with %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Timeout fetching fragments for %r"
msgstr "Timeout fetching fragments for %r"
@ -1137,9 +1066,6 @@ msgstr "Trying to get %(status_type)s status of PUT to %(path)s"
msgid "Trying to read during GET"
msgstr "Trying to read during GET"
msgid "Trying to read during GET (retrying)"
msgstr "Trying to read during GET (retrying)"
msgid "Trying to send to client"
msgstr "Trying to send to client"
@ -1191,14 +1117,6 @@ msgstr "Unable to read config from %s"
msgid "Unauth %(sync_from)r => %(sync_to)r"
msgstr "Unauth %(sync_from)r => %(sync_to)r"
#, python-format
msgid ""
"Unexpected fragment data type (not quarantined)%(datadir)s: %(type)s at "
"offset 0x%(offset)x"
msgstr ""
"Unexpected fragment data type (not quarantined)%(datadir)s: %(type)s at "
"offset 0x%(offset)x"
msgid "Unhandled exception"
msgstr "Unhandled exception"

View File

@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-25 00:41+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -103,44 +103,9 @@ msgstr "%s ha respondido como desmontado"
msgid "%s: Connection reset by peer"
msgstr "%s: Restablecimiento de conexión por igual"
#, python-format
msgid ", %s containers deleted"
msgstr ", %s contenedores suprimidos"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ", %s contenedores posiblemente restantes"
#, python-format
msgid ", %s containers remaining"
msgstr ", %s contenedores restantes"
#, python-format
msgid ", %s objects deleted"
msgstr ", %s objetos suprimidos"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ", %s objetos posiblemente restantes"
#, python-format
msgid ", %s objects remaining"
msgstr ", %s objectos restantes"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ", transcurrido: %.02fs"
msgid ", return codes: "
msgstr ", códigos de retorno:"
msgid "Account"
msgstr "Cuenta"
#, python-format
msgid "Account %(account)s has not been reaped since %(time)s"
msgstr "La cuenta %(account)s no se ha cosechado desde %(time)s"
#, python-format
msgid "Account audit \"once\" mode completed: %.02fs"
msgstr "Auditoría de cuenta en modalidad de \"una vez\" finalizada: %.02fs"
@ -201,10 +166,6 @@ msgstr "Comenzar el barrido de hebra única de actualización del objeto"
msgid "Begin object update sweep"
msgstr "Comenzar el barrido de actualización del objeto"
#, python-format
msgid "Beginning pass on account %s"
msgstr "Iniciando el paso en la cuenta %s"
msgid "Beginning replication run"
msgstr "Iniciando la ejecución de la replicación"
@ -327,10 +288,6 @@ msgstr "No se ha podido cargar %(conf)r: %(error)s"
msgid "Data download error: %s"
msgstr "Error de descarga de datos: %s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "Paso de dispositivos finalizado: %.02fs"
msgid "Did not get a keys dict"
msgstr "No tuvimos un diccionario de claves"
@ -600,31 +557,10 @@ msgstr "Error: falta el argumento de vía de acceso de configuración"
msgid "Error: unable to locate %s"
msgstr "Error: no se ha podido localizar %s"
msgid "Exception in top-level account reaper loop"
msgstr "Excepción en el bucle cosechador de cuenta de nivel superior"
msgid "Exception in top-levelreconstruction loop"
msgstr "Excepción en el bucle de reconstrucción de nivel superior"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Excepción con %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Exception with account %s"
msgstr "Excepción con la cuenta %s"
#, python-format
msgid "Exception with containers for account %s"
msgstr "Excepción con los contenedores para la cuenta %s"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr ""
"Excepción con objetos para el contenedor %(container)s para la cuenta "
"%(account)s"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "Esperado: 100-continuo en %s"
@ -646,10 +582,6 @@ msgstr ""
msgid "Host unreachable"
msgstr "Host no alcanzable"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "Paso incompleto en la cuenta %s"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "Formato de X-Container-Sync-To no válido %r"
@ -1009,10 +941,6 @@ msgstr ""
"El total de %(key)s del contenedor (%(total)s) no coincide con la suma de "
"%(key)s en las políticas (%(sum)s)"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Excepción de tiempo de espera superado con %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "Intentando %(method)s %(path)s"
@ -1028,9 +956,6 @@ msgstr "Intentando obtener %(status_type)s el estado de PUT a %(path)s"
msgid "Trying to read during GET"
msgstr "Intentado leer durante GET"
msgid "Trying to read during GET (retrying)"
msgstr "Intentando leer durante GET (reintento)"
msgid "Trying to send to client"
msgstr "Intentando enviar al cliente"

View File

@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-09 00:12+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -83,37 +83,6 @@ msgstr "%s ont été identifié(es) comme étant démonté(es)"
msgid "%s: Connection reset by peer"
msgstr "%s : Connexion réinitialisée par l'homologue"
#, python-format
msgid ", %s containers deleted"
msgstr ", %s containers supprimés"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ", %s conteneur(s) restant(s), le cas échéant"
#, python-format
msgid ", %s containers remaining"
msgstr ", %s conteneur(s) restant(s)"
#, python-format
msgid ", %s objects deleted"
msgstr ", %s objets supprimés"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ", %s objet(s) restant(s), le cas échéant"
#, python-format
msgid ", %s objects remaining"
msgstr ", %s objet(s) restant(s)"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ", temps écoulé : %.02fs"
msgid ", return codes: "
msgstr ", return codes: "
msgid "Account"
msgstr "Compte"
@ -165,10 +134,6 @@ msgstr ""
msgid "Begin object update sweep"
msgstr "Démarrer le balayage des mises à jour d'objet"
#, python-format
msgid "Beginning pass on account %s"
msgstr "Démarrage de la session d'audit sur le compte %s"
msgid "Beginning replication run"
msgstr "Démarrage du cycle de réplication"
@ -260,10 +225,6 @@ msgstr ""
msgid "Data download error: %s"
msgstr "Erreur de téléchargement des données: %s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "Session d'audit d'unité terminée : %.02fs"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "ERREUR %(db_file)s : %(validate_sync_to_err)s"
@ -500,31 +461,10 @@ msgstr "Erreur: Manque argument de configuration du chemin"
msgid "Error: unable to locate %s"
msgstr "Erreur: impossible de localiser %s"
msgid "Exception in top-level account reaper loop"
msgstr "Exception dans la boucle de collecteur de compte de niveau supérieur"
msgid "Exception in top-levelreconstruction loop"
msgstr "Exception dans la boucle de reconstruction de niveau supérieur"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Exception liée à %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Exception with account %s"
msgstr "Exception avec le compte %s"
#, python-format
msgid "Exception with containers for account %s"
msgstr "Exception avec les containers pour le compte %s"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr ""
"Exception liée aux objets pour le conteneur %(container)s et le compte "
"%(account)s"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "Attendus(s) : 100 - poursuivre sur %s"
@ -547,10 +487,6 @@ msgstr ""
msgid "Host unreachable"
msgstr "Hôte inaccessible"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "Session d'audit incomplète sur le compte %s"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "Non valide X-Container-Sync-To format %r"
@ -865,12 +801,6 @@ msgstr ""
"Le total %(key)s du conteneur (%(total)s) ne correspond pas à la somme des "
"clés %(key)s des différentes règles (%(sum)s)"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr ""
"Exception liée à un dépassement de délai concernant %(ip)s:%(port)s/"
"%(device)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "Tentative d'exécution de %(method)s %(path)s"
@ -882,9 +812,6 @@ msgstr "Tentative de lecture de %(full_path)s"
msgid "Trying to read during GET"
msgstr "Tentative de lecture pendant une opération GET"
msgid "Trying to read during GET (retrying)"
msgstr "Tentative de lecture pendant une opération GET (nouvelle tentative)"
msgid "Trying to send to client"
msgstr "Tentative d'envoi au client"

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-09 00:12+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -82,44 +82,9 @@ msgstr "%s ha risposto come smontato"
msgid "%s: Connection reset by peer"
msgstr "%s: Connessione reimpostata dal peer"
#, python-format
msgid ", %s containers deleted"
msgstr ", %s contenitori eliminati"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ", %s contenitori probabilmente rimanenti"
#, python-format
msgid ", %s containers remaining"
msgstr ", %s contenitori rimanenti"
#, python-format
msgid ", %s objects deleted"
msgstr ", %s oggetti eliminati"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ", %s oggetti probabilmente rimanenti"
#, python-format
msgid ", %s objects remaining"
msgstr ", %s oggetti rimanenti"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ", trascorso: %.02fs"
msgid ", return codes: "
msgstr ", codici di ritorno: "
msgid "Account"
msgstr "Conto"
#, python-format
msgid "Account %(account)s has not been reaped since %(time)s"
msgstr "Il conto %(account)s non è stato verificato dal %(time)s"
#, python-format
msgid "Account audit \"once\" mode completed: %.02fs"
msgstr "Modalità \"once\" verifica account completata: %.02fs"
@ -166,10 +131,6 @@ msgstr "Avvio pulizia a singolo thread aggiornamento oggetto"
msgid "Begin object update sweep"
msgstr "Avvio pulizia aggiornamento oggetto"
#, python-format
msgid "Beginning pass on account %s"
msgstr "Avvio della trasmissione sull'account %s"
msgid "Beginning replication run"
msgstr "Avvio replica"
@ -262,10 +223,6 @@ msgstr ""
msgid "Data download error: %s"
msgstr "Errore di download dei dati: %s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "Trasmissione dei dispositivi completata: %.02fs"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "ERRORE %(db_file)s: %(validate_sync_to_err)s"
@ -499,31 +456,10 @@ msgstr "Errore: Argomento path della configurazione mancante"
msgid "Error: unable to locate %s"
msgstr "Errore: impossibile individuare %s"
msgid "Exception in top-level account reaper loop"
msgstr "Eccezione nel loop reaper dell'account di livello superiore"
msgid "Exception in top-levelreconstruction loop"
msgstr "Eccezione nel loop di ricostruzione di livello superiore"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Eccezione relativa a %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Exception with account %s"
msgstr "Eccezione relativa all'account %s"
#, python-format
msgid "Exception with containers for account %s"
msgstr "Eccezione relativa ai contenitori per l'account %s"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr ""
"Eccezione relativa agli oggetti per il contenitore %(container)s per "
"l'account %(account)s"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "Previsto: 100-continue su %s"
@ -546,10 +482,6 @@ msgstr ""
msgid "Host unreachable"
msgstr "Host non raggiungibile"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "Trasmissione non completa sull'account %s"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "Formato X-Container-Sync-To non valido %r"
@ -855,10 +787,6 @@ msgstr ""
"Il numero totale di %(key)s per il contenitore (%(total)s) non corrisponde "
"alla somma di %(key)s tra le politiche (%(sum)s)"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Eccezione di timeout con %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "Tentativo di %(method)s %(path)s"
@ -870,9 +798,6 @@ msgstr "Tentativo di eseguire GET %(full_path)s"
msgid "Trying to read during GET"
msgstr "Tentativo di lettura durante GET"
msgid "Trying to read during GET (retrying)"
msgstr "Tentativo di lettura durante GET (nuovo tentativo)"
msgid "Trying to send to client"
msgstr "Tentativo di invio al client"

View File

@ -10,7 +10,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-25 00:41+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -104,44 +104,9 @@ msgstr "%s はアンマウントとして応答しました"
msgid "%s: Connection reset by peer"
msgstr "%s: 接続がピアによってリセットされました"
#, python-format
msgid ", %s containers deleted"
msgstr "、%s コンテナーが削除されました"
#, python-format
msgid ", %s containers possibly remaining"
msgstr "、%s コンテナーが残っていると思われます"
#, python-format
msgid ", %s containers remaining"
msgstr "、%s コンテナーが残っています"
#, python-format
msgid ", %s objects deleted"
msgstr "、%s オブジェクトが削除されました"
#, python-format
msgid ", %s objects possibly remaining"
msgstr "、%s オブジェクトが残っていると思われます"
#, python-format
msgid ", %s objects remaining"
msgstr "、%s オブジェクトが残っています"
#, fuzzy, python-format
msgid ", elapsed: %.02fs"
msgstr "、経過時間: %.02fs"
msgid ", return codes: "
msgstr "、戻りコード: "
msgid "Account"
msgstr "アカウント"
#, python-format
msgid "Account %(account)s has not been reaped since %(time)s"
msgstr "アカウント %(account)s は %(time)s 以降リープされていません"
#, python-format
msgid "Account audit \"once\" mode completed: %.02fs"
msgstr "アカウント監査 \"once\" モードが完了しました: %.02fs"
@ -198,10 +163,6 @@ msgstr "オブジェクト更新単一スレッド化スイープの開始"
msgid "Begin object update sweep"
msgstr "オブジェクト更新スイープの開始"
#, python-format
msgid "Beginning pass on account %s"
msgstr "アカウント %s でパスを開始中"
msgid "Beginning replication run"
msgstr "複製の実行を開始中"
@ -293,10 +254,6 @@ msgstr "%(conf)r をロードできませんでした: %(error)s"
msgid "Data download error: %s"
msgstr "データダウンロードエラー: %s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "デバイスの処理が完了しました: %.02fs"
msgid "Did not get a keys dict"
msgstr "キーの辞書を取得できませんでした。"
@ -546,31 +503,10 @@ msgstr "エラー: 構成パス引数がありません"
msgid "Error: unable to locate %s"
msgstr "エラー: %s が見つかりません"
msgid "Exception in top-level account reaper loop"
msgstr "最上位アカウントリーパーループで例外が発生しました"
msgid "Exception in top-levelreconstruction loop"
msgstr "最上位再構成ループで例外が発生しました"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s で例外が発生しました"
#, python-format
msgid "Exception with account %s"
msgstr "アカウント %s で例外が発生しました"
#, python-format
msgid "Exception with containers for account %s"
msgstr "アカウント %s のコンテナーで例外が発生しました"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr ""
"アカウント %(account)s のコンテナー %(container)s のオブジェクトで例外が発生"
"しました"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "予期: %s での 100-continue"
@ -592,10 +528,6 @@ msgstr ""
msgid "Host unreachable"
msgstr "ホストが到達不能です"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "アカウント %s での不完全なパス"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "X-Container-Sync-To 形式 %r が無効です"
@ -914,10 +846,6 @@ msgstr ""
"コンテナーの合計 %(key)s (%(total)s) がポリシー全体の合計 %(key)s(%(sum)s) に"
"一致しません"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s のタイムアウト例外"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "%(method)s %(path)s を試行中"
@ -929,9 +857,6 @@ msgstr "GET %(full_path)s を試行中"
msgid "Trying to read during GET"
msgstr "GET 時に読み取りを試行中"
msgid "Trying to read during GET (retrying)"
msgstr "GET 時に読み取りを試行中 (再試行中)"
msgid "Trying to send to client"
msgstr "クライアントへの送信を試行中"

View File

@ -10,7 +10,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-09 00:12+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -84,37 +84,6 @@ msgstr "%s이(가) 마운트 해제된 것으로 응답"
msgid "%s: Connection reset by peer"
msgstr "%s: 피어에서 연결 재설정"
#, python-format
msgid ", %s containers deleted"
msgstr ", %s 지워진 컨테이너"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ", %s 여분의 컨테이너"
#, python-format
msgid ", %s containers remaining"
msgstr ", %s 남은 컨테이너"
#, python-format
msgid ", %s objects deleted"
msgstr ", %s 지워진 오브젝트"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ", %s o여분의 오브젝트"
#, python-format
msgid ", %s objects remaining"
msgstr ", %s 남은 오브젝트"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ", 경과됨: %.02fs"
msgid ", return codes: "
msgstr ", 반환 코드들:"
msgid "Account"
msgstr "계정"
@ -163,10 +132,6 @@ msgstr "오브젝트 업데이트 단일 스레드 스윕 시작"
msgid "Begin object update sweep"
msgstr "오브젝트 업데이트 스윕 시작"
#, python-format
msgid "Beginning pass on account %s"
msgstr "계정 패스 시작 %s"
msgid "Beginning replication run"
msgstr "복제 실행 시작"
@ -253,10 +218,6 @@ msgstr ""
msgid "Data download error: %s"
msgstr "데이터 다운로드 오류: %s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "장치 패스 완료 : %.02fs"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "오류 %(db_file)s: %(validate_sync_to_err)s"
@ -480,29 +441,10 @@ msgstr "오류: 구성 경로 인수 누락"
msgid "Error: unable to locate %s"
msgstr "오류: %s을(를) 찾을 수 없음"
msgid "Exception in top-level account reaper loop"
msgstr "최상위 계정 루프의 예외 "
msgid "Exception in top-levelreconstruction loop"
msgstr "최상위 레벨 재구성 루프에서 예외 발생"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s 예외"
#, python-format
msgid "Exception with account %s"
msgstr "예외 계정 %s"
#, python-format
msgid "Exception with containers for account %s"
msgstr "계정 콘테이너의 예외 %s"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr "계정 %(account)s의 컨테이너 %(container)s에 대한 오브젝트에 예외 발생"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "%s에서 100-continue 예상"
@ -524,10 +466,6 @@ msgstr ""
msgid "Host unreachable"
msgstr "호스트 도달 불가능"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "계정 패스 미완료 %s"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "올바르지 않은 X-Container-Sync-To 형식 %r"
@ -819,10 +757,6 @@ msgstr ""
"컨테이너의 총 %(key)s가 (%(total)s) 과 %(key)s의 총합 (%(sum)s)가 일치하지 "
"않습니다."
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s에서 제한시간 초과 예외 발생"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "%(method)s %(path)s 시도 중"
@ -834,9 +768,6 @@ msgstr "GET %(full_path)s 시도 중"
msgid "Trying to read during GET"
msgstr "가져오기 중 읽기를 시도함"
msgid "Trying to read during GET (retrying)"
msgstr "가져오기(재시도) 중 읽기를 시도함"
msgid "Trying to send to client"
msgstr "클라이언트로 전송 시도 중"

View File

@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-09 00:12+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -86,37 +86,6 @@ msgstr "%s respondeu como não montado"
msgid "%s: Connection reset by peer"
msgstr "%s: Reconfiguração da conexão por peer"
#, python-format
msgid ", %s containers deleted"
msgstr ", %s containers apagados"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ", %s containers possivelmente restando"
#, python-format
msgid ", %s containers remaining"
msgstr ", %s containers restando"
#, python-format
msgid ", %s objects deleted"
msgstr ", %s objetos apagados"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ", %s objetos possivelmente restando"
#, python-format
msgid ", %s objects remaining"
msgstr ", %s objetos restando"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ", passados: %.02fs"
msgid ", return codes: "
msgstr ", códigos de retorno:"
msgid "Account"
msgstr "Conta"
@ -165,10 +134,6 @@ msgstr "Inicie a varredura de encadeamento único da atualização do objeto"
msgid "Begin object update sweep"
msgstr "Inicie a varredura da atualização do objeto"
#, python-format
msgid "Beginning pass on account %s"
msgstr "Iniciando a estapa nas contas %s"
msgid "Beginning replication run"
msgstr "Começando execução de replicação"
@ -257,10 +222,6 @@ msgstr ""
msgid "Data download error: %s"
msgstr "Erro ao fazer download de dados: %s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "Dispositivos finalizados: %.02fs"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "ERRO %(db_file)s: %(validate_sync_to_err)s"
@ -485,30 +446,10 @@ msgstr "Erro: argumento do caminho de configuração ausente"
msgid "Error: unable to locate %s"
msgstr "Erro: não é possível localizar %s"
msgid "Exception in top-level account reaper loop"
msgstr "Exceção no loop do removedor da conta de nível superior"
msgid "Exception in top-levelreconstruction loop"
msgstr "Exceção no loop de reconstrução de nível superior"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Exceção com %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Exception with account %s"
msgstr "Exceção com a conta %s"
#, python-format
msgid "Exception with containers for account %s"
msgstr "Exceção com os containers para a conta %s"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr ""
"Exceção com objetos para o container %(container)s para conta %(account)s"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "Expectativa: 100-continuar em %s"
@ -530,10 +471,6 @@ msgstr ""
msgid "Host unreachable"
msgstr "Destino inalcançável"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "Estapa incompleta nas contas %s"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "Formato X-Container-Sync-To inválido %r"
@ -840,10 +777,6 @@ msgstr ""
"O total %(key)s para o container (%(total)s) não confere com a soma %(key)s "
"pelas politicas (%(sum)s)"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Exceção de tempo limite com %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "Tentando %(method)s %(path)s"
@ -855,9 +788,6 @@ msgstr "Tentando GET %(full_path)s"
msgid "Trying to read during GET"
msgstr "Tentando ler durante GET"
msgid "Trying to read during GET (retrying)"
msgstr "Tentando ler durante GET (tentando novamente)"
msgid "Trying to send to client"
msgstr "Tentando enviar para o cliente"

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-09 00:12+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -84,37 +84,6 @@ msgstr "%s ответил как размонтированный"
msgid "%s: Connection reset by peer"
msgstr "%s: соединение сброшено на другой стороне"
#, python-format
msgid ", %s containers deleted"
msgstr ", удалено контейнеров: %s"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ", осталось контейнеров (возможно): %s"
#, python-format
msgid ", %s containers remaining"
msgstr ", осталось контейнеров: %s"
#, python-format
msgid ", %s objects deleted"
msgstr ", удалено объектов: %s"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ", осталось объектов (возможно): %s"
#, python-format
msgid ", %s objects remaining"
msgstr ", осталось объектов: %s"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ", прошло: %.02fs"
msgid ", return codes: "
msgstr ", коды возврата: "
msgid "Account"
msgstr "Учетная запись"
@ -163,10 +132,6 @@ msgstr "Начать однонитевую сплошную проверку о
msgid "Begin object update sweep"
msgstr "Начать сплошную проверку обновлений объекта"
#, python-format
msgid "Beginning pass on account %s"
msgstr "Начинается проход для учетной записи %s"
msgid "Beginning replication run"
msgstr "Запуск репликации"
@ -255,10 +220,6 @@ msgstr ""
msgid "Data download error: %s"
msgstr "Ошибка загрузки данных: %s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "Проход устройств выполнен: %.02fs"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "Ошибка %(db_file)s: %(validate_sync_to_err)s"
@ -491,32 +452,10 @@ msgstr "Ошибка: отсутствует аргумент пути конф
msgid "Error: unable to locate %s"
msgstr "Ошибка: не удалось найти %s"
msgid "Exception in top-level account reaper loop"
msgstr ""
"Исключительная ситуация в цикле чистильщика учетных записей верхнего уровня"
msgid "Exception in top-levelreconstruction loop"
msgstr "Исключение в цикле реконструкции верхнего уровня"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Исключительная ситуация в %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Exception with account %s"
msgstr "Исключительная ситуация в учетной записи %s"
#, python-format
msgid "Exception with containers for account %s"
msgstr "Исключительная ситуация в контейнерах для учетной записи %s"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr ""
"Исключительная ситуация в объектах для контейнера %(container)s для учетной "
"записи %(account)s"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "Ожидаемое значение: 100-continue в %s"
@ -538,10 +477,6 @@ msgstr ""
msgid "Host unreachable"
msgstr "Хост недоступен"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "Не завершен проход для учетной записи %s"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "Недопустимый формат X-Container-Sync-To %r"
@ -849,10 +784,6 @@ msgstr ""
"Общее число %(key)s для контейнера (%(total)s) не соответствует сумме "
"%(key)s в стратегиях (%(sum)s)"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "Исключение по таймауту %(ip)s:%(port)s/%(device)s"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "Попытка выполнения метода %(method)s %(path)s"
@ -864,9 +795,6 @@ msgstr "Попытка GET-запроса %(full_path)s"
msgid "Trying to read during GET"
msgstr "Попытка чтения во время операции GET"
msgid "Trying to read during GET (retrying)"
msgstr "Попытка чтения во время операции GET (выполняется повтор)"
msgid "Trying to send to client"
msgstr "Попытка отправки клиенту"

View File

@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-09 00:12+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -83,37 +83,6 @@ msgstr "%s bağlı değil olarak yanıt verdi"
msgid "%s: Connection reset by peer"
msgstr "%s: Bağlantı eş tarafından sıfırlandı"
#, python-format
msgid ", %s containers deleted"
msgstr ", %s kap silindi"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ", %s kap kaldı muhtemelen"
#, python-format
msgid ", %s containers remaining"
msgstr ", %s kap kaldı"
#, python-format
msgid ", %s objects deleted"
msgstr ", %s nesne silindi"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ", %s nesne kaldı muhtemelen"
#, python-format
msgid ", %s objects remaining"
msgstr ", %s nesne kaldı"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ", geçen süre: %.02fs"
msgid ", return codes: "
msgstr ", dönen kodlar: "
msgid "Account"
msgstr "Hesap"
@ -161,10 +130,6 @@ msgstr "Nesne güncelleme tek iş iplikli süpürmeye başla"
msgid "Begin object update sweep"
msgstr "Nesne güncelleme süpürmesine başla"
#, python-format
msgid "Beginning pass on account %s"
msgstr "%s hesabı üzerinde geçiş başlatılıyor"
msgid "Beginning replication run"
msgstr "Çoğaltmanın çalıştırılmasına başlanıyor"
@ -249,10 +214,6 @@ msgstr ""
msgid "Data download error: %s"
msgstr "Veri indirme hatası: %s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "Aygıtlar geçişi tamamlandı: %.02fs"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "HATA %(db_file)s: %(validate_sync_to_err)s"
@ -474,29 +435,10 @@ msgstr "Hata: yapılandırma yolu değişkeni eksik"
msgid "Error: unable to locate %s"
msgstr "Hata: %s bulunamıyor"
msgid "Exception in top-level account reaper loop"
msgstr "Üst seviye hesap biçme döngüsünde istisna"
msgid "Exception in top-levelreconstruction loop"
msgstr "Üst seviye yeniden oluşturma döngüsünde istisna"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s ile istisna"
#, python-format
msgid "Exception with account %s"
msgstr "%s hesabında istisna"
#, python-format
msgid "Exception with containers for account %s"
msgstr "%s hesabı için kaplarla ilgili istisna"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr "%(account)s hesabı için %(container)s kabı için nesneler için istisna"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "Beklenen: 100-%s üzerinden devam et"
@ -511,10 +453,6 @@ msgstr "Yapılandırmalar bulundu:"
msgid "Host unreachable"
msgstr "İstemci erişilebilir değil"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "%s hesabından tamamlanmamış geçiş"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "Geçersix X-Container-Sync-To biçimi %r"
@ -796,10 +734,6 @@ msgstr ""
"(%(total)s) kabı için %(key)s toplamı ilkeler arasındaki %(key)s toplamıyla "
"eşleşmiyor (%(sum)s)"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s ile zaman aşımı istisnası"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "%(method)s %(path)s deneniyor"
@ -811,9 +745,6 @@ msgstr "%(full_path)s GET deneniyor"
msgid "Trying to read during GET"
msgstr "GET sırasında okuma deneniyor"
msgid "Trying to read during GET (retrying)"
msgstr "GET sırasında okuma deneniyor (yeniden deneniyor)"
msgid "Trying to send to client"
msgstr "İstemciye gönderilmeye çalışılıyor"

View File

@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-09 00:12+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -81,37 +81,6 @@ msgstr "%s 响应为未安装"
msgid "%s: Connection reset by peer"
msgstr "%s已由同级重置连接"
#, python-format
msgid ", %s containers deleted"
msgstr ",删除容器%s"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ",可能剩余容器%s"
#, python-format
msgid ", %s containers remaining"
msgstr ",剩余容器%s"
#, python-format
msgid ", %s objects deleted"
msgstr ",删除对象%s"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ",可能剩余对象%s"
#, python-format
msgid ", %s objects remaining"
msgstr ",剩余对象%s"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ",耗时:%.02fs"
msgid ", return codes: "
msgstr ",返回代码:"
msgid "Account"
msgstr "账号"
@ -159,10 +128,6 @@ msgstr "开始对象更新单线程扫除"
msgid "Begin object update sweep"
msgstr "开始对象更新扫除"
#, python-format
msgid "Beginning pass on account %s"
msgstr "账号%s开始通过"
msgid "Beginning replication run"
msgstr "开始运行复制"
@ -247,10 +212,6 @@ msgstr ""
msgid "Data download error: %s"
msgstr "数据下载错误:%s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "设备通过完成: %.02fs"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "错误 %(db_file)s: %(validate_sync_to_err)s"
@ -468,29 +429,10 @@ msgstr "错误:设置路径信息丢失"
msgid "Error: unable to locate %s"
msgstr "错误:无法查询到 %s"
msgid "Exception in top-level account reaper loop"
msgstr "异常出现在top-level账号reaper环"
msgid "Exception in top-levelreconstruction loop"
msgstr " top-levelreconstruction 环中发生异常"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s出现异常"
#, python-format
msgid "Exception with account %s"
msgstr "账号%s出现异常"
#, python-format
msgid "Exception with containers for account %s"
msgstr "账号%s内容器出现异常"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr "账号%(account)s容器%(container)s的对象出现异常"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "已知100-continue on %s"
@ -510,10 +452,6 @@ msgstr "Handoffs 优先方式仍有 handoffs。正在中止当前复制过程。
msgid "Host unreachable"
msgstr "无法连接到主机"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "账号%s未完成通过"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "无效的X-Container-Sync-To格式%r"
@ -800,10 +738,6 @@ msgid ""
"%(key)s across policies (%(sum)s)"
msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)"
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s 发生超时异常"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "尝试执行%(method)s %(path)s"
@ -815,9 +749,6 @@ msgstr "正尝试获取 %(full_path)s"
msgid "Trying to read during GET"
msgstr "执行GET时尝试读取"
msgid "Trying to read during GET (retrying)"
msgstr "执行GET时尝试读取(重新尝试)"
msgid "Trying to send to client"
msgstr "尝试发送到客户端"

View File

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: swift VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-08-09 00:12+0000\n"
"POT-Creation-Date: 2019-10-04 06:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@ -81,37 +81,6 @@ msgstr "%s 已回應為未裝載"
msgid "%s: Connection reset by peer"
msgstr "%s已由對等項目重設連線"
#, python-format
msgid ", %s containers deleted"
msgstr ",已刪除 %s 個儲存器"
#, python-format
msgid ", %s containers possibly remaining"
msgstr ",可能剩餘 %s 個儲存器"
#, python-format
msgid ", %s containers remaining"
msgstr ",剩餘 %s 個儲存器"
#, python-format
msgid ", %s objects deleted"
msgstr ",已刪除 %s 個物件"
#, python-format
msgid ", %s objects possibly remaining"
msgstr ",可能剩餘 %s 個物件"
#, python-format
msgid ", %s objects remaining"
msgstr ",剩餘 %s 個物件"
#, python-format
msgid ", elapsed: %.02fs"
msgstr ",經歷時間:%.02fs"
msgid ", return codes: "
msgstr ",回覆碼:"
msgid "Account"
msgstr "帳戶"
@ -159,10 +128,6 @@ msgstr "開始物件更新單一執行緒清理"
msgid "Begin object update sweep"
msgstr "開始物件更新清理"
#, python-format
msgid "Beginning pass on account %s"
msgstr "正在開始帳戶 %s 上的通過"
msgid "Beginning replication run"
msgstr "正在開始抄寫執行"
@ -247,10 +212,6 @@ msgstr ""
msgid "Data download error: %s"
msgstr "資料下載錯誤:%s"
#, python-format
msgid "Devices pass completed: %.02fs"
msgstr "裝置通過已完成:%.02fs"
#, python-format
msgid "ERROR %(db_file)s: %(validate_sync_to_err)s"
msgstr "錯誤:%(db_file)s%(validate_sync_to_err)s"
@ -469,29 +430,10 @@ msgstr "錯誤:遺漏配置路徑引數"
msgid "Error: unable to locate %s"
msgstr "錯誤:找不到 %s"
msgid "Exception in top-level account reaper loop"
msgstr "最上層帳戶 Reaper 迴圈發生異常狀況"
msgid "Exception in top-levelreconstruction loop"
msgstr "最上層重新建構迴圈中發生異常狀況"
#, python-format
msgid "Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s 發生異常狀況"
#, python-format
msgid "Exception with account %s"
msgstr "帳戶 %s 發生異常狀況"
#, python-format
msgid "Exception with containers for account %s"
msgstr "帳戶 %s 的儲存器發生異常狀況"
#, python-format
msgid ""
"Exception with objects for container %(container)s for account %(account)s"
msgstr "帳戶 %(account)s 儲存器 %(container)s 的物件發生異常狀況"
#, python-format
msgid "Expect: 100-continue on %s"
msgstr "預期 100 - 在 %s 上繼續"
@ -511,10 +453,6 @@ msgstr "「遞交作業最先」模式仍有剩餘的遞交作業。正在中斷
msgid "Host unreachable"
msgstr "無法抵達主機"
#, python-format
msgid "Incomplete pass on account %s"
msgstr "帳戶 %s 上的通過未完成"
#, python-format
msgid "Invalid X-Container-Sync-To format %r"
msgstr "無效的 X-Container-Sync-To 格式 %r"
@ -803,10 +741,6 @@ msgid ""
msgstr ""
"儲存器的 %(key)s 總計 (%(total)s) 不符合原則中的 %(key)s 總和 (%(sum)s) "
#, python-format
msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s"
msgstr "%(ip)s:%(port)s/%(device)s 發生逾時異常狀況"
#, python-format
msgid "Trying to %(method)s %(path)s"
msgstr "正在嘗試 %(method)s %(path)s"
@ -818,9 +752,6 @@ msgstr "正在嘗試對 %(full_path)s 執行 GET 動作"
msgid "Trying to read during GET"
msgstr "正在嘗試於 GET 期間讀取"
msgid "Trying to read during GET (retrying)"
msgstr "正在嘗試於 GET 期間讀取(正在重試)"
msgid "Trying to send to client"
msgstr "正在嘗試傳送至用戶端"

View File

@ -2342,6 +2342,10 @@ class BaseDiskFile(object):
self._account = account
self._container = container
self._obj = obj
elif account or container or obj:
raise ValueError(
'Received a/c/o args %r, %r, and %r. Either none or all must '
'be provided.' % (account, container, obj))
else:
# gets populated when we read the metadata
self._name = None
@ -2435,7 +2439,7 @@ class BaseDiskFile(object):
@classmethod
def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition, policy):
return cls(mgr, device_path, None, partition, _datadir=hash_dir_path,
return cls(mgr, device_path, partition, _datadir=hash_dir_path,
policy=policy)
def open(self, modernize=False, current_time=None):

View File

@ -44,7 +44,7 @@ import six
from swift.common.wsgi import make_pre_authed_env, make_pre_authed_request
from swift.common.utils import Timestamp, config_true_value, \
public, split_path, list_from_csv, GreenthreadSafeIterator, \
GreenAsyncPile, quorum_size, parse_content_type, \
GreenAsyncPile, quorum_size, parse_content_type, close_if_possible, \
document_iters_to_http_response_body, ShardRange, find_shard_range
from swift.common.bufferedhttp import http_connect
from swift.common import constraints
@ -357,6 +357,7 @@ def get_container_info(env, app, swift_source=None):
env, ("/%s/%s/%s" % (version, wsgi_account, wsgi_container)),
(swift_source or 'GET_CONTAINER_INFO'))
resp = req.get_response(app)
close_if_possible(resp.app_iter)
# Check in infocache to see if the proxy (or anyone else) already
# populated the cache for us. If they did, just use what's there.
#
@ -412,6 +413,7 @@ def get_account_info(env, app, swift_source=None):
env, "/%s/%s" % (version, wsgi_account),
(swift_source or 'GET_ACCOUNT_INFO'))
resp = req.get_response(app)
close_if_possible(resp.app_iter)
# Check in infocache to see if the proxy (or anyone else) already
# populated the cache for us. If they did, just use what's there.
#

View File

@ -16,6 +16,7 @@
from swift import gettext_ as _
import json
import six
from six.moves.urllib.parse import unquote
from swift.common.utils import public, private, csv_append, Timestamp, \
@ -27,7 +28,8 @@ from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation, set_info_cache, clear_info_cache
from swift.common.storage_policy import POLICIES
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPNotFound, HTTPServiceUnavailable, str_to_wsgi, wsgi_to_bytes
HTTPNotFound, HTTPServiceUnavailable, str_to_wsgi, wsgi_to_str, \
bytes_to_wsgi
class ContainerController(Controller):
@ -162,8 +164,8 @@ class ContainerController(Controller):
params.pop('states', None)
req.headers.pop('X-Backend-Record-Type', None)
reverse = config_true_value(params.get('reverse'))
marker = params.get('marker')
end_marker = params.get('end_marker')
marker = wsgi_to_str(params.get('marker'))
end_marker = wsgi_to_str(params.get('end_marker'))
limit = req_limit
for shard_range in shard_ranges:
@ -176,9 +178,9 @@ class ContainerController(Controller):
if objects:
last_name = objects[-1].get('name',
objects[-1].get('subdir', u''))
params['marker'] = last_name.encode('utf-8')
params['marker'] = bytes_to_wsgi(last_name.encode('utf-8'))
elif marker:
params['marker'] = marker
params['marker'] = str_to_wsgi(marker)
else:
params['marker'] = ''
# Always set end_marker to ensure that misplaced objects beyond the
@ -186,7 +188,7 @@ class ContainerController(Controller):
# object obscuring correctly placed objects in the next shard
# range.
if end_marker and end_marker in shard_range:
params['end_marker'] = end_marker
params['end_marker'] = str_to_wsgi(end_marker)
elif reverse:
params['end_marker'] = str_to_wsgi(shard_range.lower_str)
else:
@ -213,13 +215,13 @@ class ContainerController(Controller):
if limit <= 0:
break
if (end_marker and reverse and
(wsgi_to_bytes(end_marker) >=
objects[-1]['name'].encode('utf-8'))):
last_name = objects[-1].get('name',
objects[-1].get('subdir', u''))
if six.PY2:
last_name = last_name.encode('utf8')
if end_marker and reverse and end_marker >= last_name:
break
if (end_marker and not reverse and
(wsgi_to_bytes(end_marker) <=
objects[-1]['name'].encode('utf-8'))):
if end_marker and not reverse and end_marker <= last_name:
break
resp.body = json.dumps(objects).encode('ascii')

View File

@ -927,8 +927,8 @@ class ReplicatedObjectController(BaseObjectController):
send_chunk(chunk)
if req.content_length and (
bytes_transferred < req.content_length):
ml = req.message_length()
if ml and bytes_transferred < ml:
req.client_disconnect = True
self.app.logger.warning(
_('Client disconnected without sending enough data'))
@ -2638,8 +2638,8 @@ class ECObjectController(BaseObjectController):
send_chunk(chunk)
if req.content_length and (
bytes_transferred < req.content_length):
ml = req.message_length()
if ml and bytes_transferred < ml:
req.client_disconnect = True
self.app.logger.warning(
_('Client disconnected without sending enough data'))
@ -2787,7 +2787,8 @@ class ECObjectController(BaseObjectController):
policy = POLICIES.get_by_index(policy_index)
expected_frag_size = None
if req.content_length:
ml = req.message_length()
if ml:
# TODO: PyECLib <= 1.2.0 looks to return the segment info
# different from the input for aligned data efficiency but
# Swift never does. So calculate the fragment length Swift
@ -2797,12 +2798,12 @@ class ECObjectController(BaseObjectController):
# and the next call is to get info for the last segment
# get number of fragments except the tail - use truncation //
num_fragments = req.content_length // policy.ec_segment_size
num_fragments = ml // policy.ec_segment_size
expected_frag_size = policy.fragment_size * num_fragments
# calculate the tail fragment_size by hand and add it to
# expected_frag_size
last_segment_size = req.content_length % policy.ec_segment_size
last_segment_size = ml % policy.ec_segment_size
if last_segment_size:
last_info = policy.pyeclib_driver.get_segment_info(
last_segment_size, policy.ec_segment_size)

View File

@ -457,6 +457,13 @@ class Application(object):
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
te = req.headers.get('transfer-encoding', '').lower()
if te.rsplit(',', 1)[-1].strip() == 'chunked' and \
'content-length' in req.headers:
# RFC says if both are present, transfer-encoding wins.
# Definitely *don't* forward on the header the backend
# ought to ignore; that offers request-smuggling vectors.
del req.headers['content-length']
return req
def handle_request(self, req):

View File

@ -43,8 +43,6 @@ from swift.common.storage_policy import parse_storage_policies, PolicyError
from swift.common.utils import set_swift_dir
from test import get_config, listen_zero
from test.functional.swift_test_client import Account, Connection, Container, \
ResponseError
from test.unit import debug_logger, FakeMemcache
# importing skip_if_no_xattrs so that functional tests can grab it from the
@ -75,6 +73,10 @@ eventlet.hubs.use_hub(utils.get_hub())
eventlet.patcher.monkey_patch(all=False, socket=True)
eventlet.debug.hub_exceptions(False)
# swift_test_client import from swiftclient, so move after the monkey-patching
from test.functional.swift_test_client import Account, Connection, Container, \
ResponseError
from swiftclient import get_auth, http_connection
has_insecure = False
@ -956,12 +958,18 @@ def setup_package():
swift_test_user[0] = config['username']
swift_test_tenant[0] = config['account']
swift_test_key[0] = config['password']
if 'domain' in config:
swift_test_domain[0] = config['domain']
swift_test_user[1] = config['username2']
swift_test_tenant[1] = config['account2']
swift_test_key[1] = config['password2']
if 'domain2' in config:
swift_test_domain[1] = config['domain2']
swift_test_user[2] = config['username3']
swift_test_tenant[2] = config['account']
swift_test_key[2] = config['password3']
if 'domain3' in config:
swift_test_domain[2] = config['domain3']
if 'username4' in config:
swift_test_user[3] = config['username4']
swift_test_tenant[3] = config['account4']
@ -971,10 +979,14 @@ def setup_package():
swift_test_user[4] = config['username5']
swift_test_tenant[4] = config['account5']
swift_test_key[4] = config['password5']
if 'domain5' in config:
swift_test_domain[4] = config['domain5']
if 'username6' in config:
swift_test_user[5] = config['username6']
swift_test_tenant[5] = config['account6']
swift_test_key[5] = config['password6']
if 'domain6' in config:
swift_test_domain[5] = config['domain6']
for _ in range(5):
swift_test_perm[_] = swift_test_tenant[_] + ':' \

View File

@ -235,6 +235,24 @@ class TestS3ApiBucket(S3ApiBaseBoto3):
resp_prefixes,
[{'Prefix': p} for p in expect_prefixes])
def test_get_bucket_with_multi_char_delimiter(self):
bucket = 'bucket'
put_objects = ('object', 'object2', 'subdir/object', 'subdir2/object',
'dir/subdir/object')
self._prepare_test_get_bucket(bucket, put_objects)
delimiter = '/obj'
expect_objects = ('object', 'object2')
expect_prefixes = ('dir/subdir/obj', 'subdir/obj', 'subdir2/obj')
resp = self.conn.list_objects(Bucket=bucket, Delimiter=delimiter)
self.assertEqual(200, resp['ResponseMetadata']['HTTPStatusCode'])
self.assertEqual(resp['Delimiter'], delimiter)
self._validate_object_listing(resp['Contents'], expect_objects)
resp_prefixes = resp['CommonPrefixes']
self.assertEqual(
resp_prefixes,
[{'Prefix': p} for p in expect_prefixes])
def test_get_bucket_with_encoding_type(self):
bucket = 'bucket'
put_objects = ('object', 'object2')

View File

@ -208,6 +208,7 @@ class Connection(object):
self.insecure = config_true_value(config.get('insecure', 'false'))
self.auth_version = str(config.get('auth_version', '1'))
self.domain = config.get('domain')
self.account = config.get('account')
self.username = config['username']
self.password = config['password']
@ -269,8 +270,13 @@ class Connection(object):
else:
requests.packages.urllib3.disable_warnings(
InsecureRequestWarning)
if self.domain:
os_opts = {'project_domain_name': self.domain,
'user_domain_name': self.domain}
else:
os_opts = {}
authargs = dict(snet=False, tenant_name=self.account,
auth_version=self.auth_version, os_options={},
auth_version=self.auth_version, os_options=os_opts,
insecure=self.insecure)
(storage_url, storage_token) = get_auth(
self.auth_url, auth_user, self.password, **authargs)

View File

@ -915,10 +915,11 @@ class TestObjectVersioningHistoryMode(TestObjectVersioning):
expected = [b'old content', b'112233', b'new content', b'']
name_len = len(obj_name if six.PY2 else obj_name.encode('utf8'))
bodies = [
self.env.versions_container.file(f).read()
for f in self.env.versions_container.files(parms={
'prefix': '%03x%s/' % (len(obj_name), obj_name)})]
'prefix': '%03x%s/' % (name_len, obj_name)})]
self.assertEqual(expected, bodies)
def test_versioning_check_acl(self):
@ -962,6 +963,11 @@ class TestObjectVersioningHistoryMode(TestObjectVersioning):
self.assertEqual(4, self.env.versions_container.info()['object_count'])
class TestObjectVersioningHistoryModeUTF8(
Base2, TestObjectVersioningHistoryMode):
pass
class TestSloWithVersioning(unittest2.TestCase):
def setUp(self):
@ -982,7 +988,10 @@ class TestSloWithVersioning(unittest2.TestCase):
self.segments_container = self.account.container(Utils.create_name())
if not self.container.create(
hdrs={'X-Versions-Location': self.versions_container.name}):
raise ResponseError(self.conn.response)
if self.conn.response.status == 412:
raise SkipTest("Object versioning not enabled")
else:
raise ResponseError(self.conn.response)
if 'versions' not in self.container.info():
raise SkipTest("Object versioning not enabled")
@ -1023,10 +1032,16 @@ class TestSloWithVersioning(unittest2.TestCase):
self.fail("GET with multipart-manifest=get got invalid json")
self.assertEqual(1, len(manifest))
key_map = {'etag': 'hash', 'size_bytes': 'bytes', 'path': 'name'}
key_map = {'etag': 'hash', 'size_bytes': 'bytes'}
for k_client, k_slo in key_map.items():
self.assertEqual(self.seg_info[seg_name][k_client],
manifest[0][k_slo])
if six.PY2:
self.assertEqual(self.seg_info[seg_name]['path'].decode('utf8'),
manifest[0]['name'])
else:
self.assertEqual(self.seg_info[seg_name]['path'],
manifest[0]['name'])
def _assert_is_object(self, file_item, seg_data):
file_contents = file_item.read()
@ -1064,3 +1079,137 @@ class TestSloWithVersioning(unittest2.TestCase):
# expect the original manifest file to be restored
self._assert_is_manifest(file_item, 'a')
self._assert_is_object(file_item, b'a')
def test_slo_manifest_version_size(self):
file_item = self._create_manifest('a')
# sanity check: read the manifest, then the large object
self._assert_is_manifest(file_item, 'a')
self._assert_is_object(file_item, b'a')
# original manifest size
primary_list = self.container.files(parms={'format': 'json'})
self.assertEqual(1, len(primary_list))
org_size = primary_list[0]['bytes']
# upload new manifest
file_item = self._create_manifest('b')
# sanity check: read the manifest, then the large object
self._assert_is_manifest(file_item, 'b')
self._assert_is_object(file_item, b'b')
versions_list = self.versions_container.files(parms={'format': 'json'})
self.assertEqual(1, len(versions_list))
version_file = self.versions_container.file(versions_list[0]['name'])
version_file_size = versions_list[0]['bytes']
# check the version is still a manifest
self._assert_is_manifest(version_file, 'a')
self._assert_is_object(version_file, b'a')
# check the version size is correct
self.assertEqual(version_file_size, org_size)
# delete the newest manifest
file_item.delete()
# expect the original manifest file to be restored
self._assert_is_manifest(file_item, 'a')
self._assert_is_object(file_item, b'a')
primary_list = self.container.files(parms={'format': 'json'})
self.assertEqual(1, len(primary_list))
primary_file_size = primary_list[0]['bytes']
# expect the original manifest file size to be the same
self.assertEqual(primary_file_size, org_size)
class TestSloWithVersioningUTF8(Base2, TestSloWithVersioning):
pass
class TestObjectVersioningChangingMode(Base):
env = TestObjectVersioningHistoryModeEnv
def test_delete_while_changing_mode(self):
container = self.env.container
versions_container = self.env.versions_container
cont_info = container.info()
self.assertEqual(cont_info['versions'], quote(versions_container.name))
obj_name = Utils.create_name()
versioned_obj = container.file(obj_name)
versioned_obj.write(
b"version1", hdrs={'Content-Type': 'text/jibberish01'})
versioned_obj.write(
b"version2", hdrs={'Content-Type': 'text/jibberish01'})
# sanity, version1 object should have moved to versions_container
self.assertEqual(1, versions_container.info()['object_count'])
versioned_obj.delete()
# version2 and the delete marker should have put in versions_container
self.assertEqual(3, versions_container.info()['object_count'])
delete_marker_name = versions_container.files()[2]
delete_marker = versions_container.file(delete_marker_name)
delete_marker.initialize()
self.assertEqual(
delete_marker.content_type,
'application/x-deleted;swift_versions_deleted=1')
# change to stack mode
hdrs = {'X-Versions-Location': versions_container.name}
container.update_metadata(hdrs=hdrs)
versioned_obj.delete()
# version2 object should have been moved in container
self.assertEqual(b"version2", versioned_obj.read())
# and there's only one version1 is left in versions_container
self.assertEqual(1, versions_container.info()['object_count'])
versioned_obj_name = versions_container.files()[0]
prev_version = versions_container.file(versioned_obj_name)
prev_version.initialize()
self.assertEqual(b"version1", prev_version.read())
self.assertEqual(prev_version.content_type, 'text/jibberish01')
# reset and test double delete
# change back to history mode
hdrs = {'X-History-Location': versions_container.name}
container.update_metadata(hdrs=hdrs)
# double delete, second DELETE returns a 404 as expected
versioned_obj.delete()
with self.assertRaises(ResponseError) as cm:
versioned_obj.delete()
self.assertEqual(404, cm.exception.status)
# There should now be 4 objects total in versions_container
# 2 are delete markers
self.assertEqual(4, versions_container.info()['object_count'])
# change to stack mode
hdrs = {'X-Versions-Location': versions_container.name}
container.update_metadata(hdrs=hdrs)
# a delete, just deletes one delete marker, it doesn't yet pop
# version2 back in the container
# This DELETE doesn't return a 404!
versioned_obj.delete()
self.assertEqual(3, versions_container.info()['object_count'])
self.assertEqual(0, container.info()['object_count'])
# neither does this one!
versioned_obj.delete()
# version2 object should have been moved in container
self.assertEqual(b"version2", versioned_obj.read())
# and there's only one version1 is left in versions_container
self.assertEqual(1, versions_container.info()['object_count'])
class TestObjectVersioningChangingModeUTF8(
Base2, TestObjectVersioningChangingMode):
pass

View File

@ -301,6 +301,25 @@ class TestAccount(Base):
results = [r for r in results if r in expected]
self.assertEqual(expected, results)
def testListMultiCharDelimiter(self):
delimiter = '-&'
containers = ['test', delimiter.join(['test', 'bar']),
delimiter.join(['test', 'foo'])]
for c in containers:
cont = self.env.account.container(c)
self.assertTrue(cont.create())
results = self.env.account.containers(parms={'delimiter': delimiter})
expected = ['test', 'test-&']
results = [r for r in results if r in expected]
self.assertEqual(expected, results)
results = self.env.account.containers(parms={'delimiter': delimiter,
'reverse': 'yes'})
expected.reverse()
results = [r for r in results if r in expected]
self.assertEqual(expected, results)
def testListDelimiterAndPrefix(self):
delimiter = 'a'
containers = ['bar', 'bazar']
@ -668,6 +687,36 @@ class TestContainer(Base):
results = [x.get('name', x.get('subdir')) for x in results]
self.assertEqual(results, ['test-', 'test'])
def testListMultiCharDelimiter(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())
delimiter = '-&'
files = ['test', delimiter.join(['test', 'bar']),
delimiter.join(['test', 'foo'])]
for f in files:
file_item = cont.file(f)
self.assertTrue(file_item.write_random())
for format_type in [None, 'json', 'xml']:
results = cont.files(parms={'format': format_type})
if isinstance(results[0], dict):
results = [x.get('name', x.get('subdir')) for x in results]
self.assertEqual(results, ['test', 'test-&bar', 'test-&foo'])
results = cont.files(parms={'delimiter': delimiter,
'format': format_type})
if isinstance(results[0], dict):
results = [x.get('name', x.get('subdir')) for x in results]
self.assertEqual(results, ['test', 'test-&'])
results = cont.files(parms={'delimiter': delimiter,
'format': format_type,
'reverse': 'yes'})
if isinstance(results[0], dict):
results = [x.get('name', x.get('subdir')) for x in results]
self.assertEqual(results, ['test-&', 'test'])
def testListDelimiterAndPrefix(self):
cont = self.env.account.container(Utils.create_name())
self.assertTrue(cont.create())

View File

@ -164,7 +164,7 @@ def add_ring_devs_to_ipport2server(ring, server_type, ipport2server,
# We'll number the servers by order of unique occurrence of:
# IP, if servers_per_port > 0 OR there > 1 IP in ring
# ipport, otherwise
unique_ip_count = len(set(dev['ip'] for dev in ring.devs if dev))
unique_ip_count = len({dev['ip'] for dev in ring.devs if dev})
things_to_number = {}
number = 0
for dev in filter(None, ring.devs):
@ -244,7 +244,7 @@ def get_ring(ring_name, required_replicas, required_devices,
if p.returncode:
raise unittest.SkipTest('unable to connect to rsync '
'export %s (%s)' % (rsync_export, cmd))
for line in stdout.splitlines():
for line in stdout.decode().splitlines():
if line.rsplit(None, 1)[-1] == dev['device']:
break
else:
@ -295,11 +295,11 @@ def kill_orphans():
class Body(object):
def __init__(self, total=3.5 * 2 ** 20):
self.length = total
self.length = int(total)
self.hasher = md5()
self.read_amount = 0
self.chunk = uuid4().hex * 2 ** 10
self.buff = ''
self.chunk = uuid4().hex.encode('ascii') * 2 ** 10
self.buff = b''
@property
def etag(self):
@ -320,9 +320,9 @@ class Body(object):
def __iter__(self):
return self
def next(self):
def __next__(self):
if self.buff:
rv, self.buff = self.buff, ''
rv, self.buff = self.buff, b''
return rv
if self.read_amount >= self.length:
raise StopIteration()
@ -331,8 +331,8 @@ class Body(object):
self.hasher.update(rv)
return rv
def __next__(self):
return next(self)
# for py2 compat:
next = __next__
class ProbeTest(unittest.TestCase):

View File

@ -56,7 +56,7 @@ class TestAccountGetFakeResponsesMatch(ReplProbeTest):
raise Exception("Unexpected status %s\n%s" %
(resp.status, resp.read()))
response_headers = dict(resp.getheaders())
response_headers = {h.lower(): v for h, v in resp.getheaders()}
response_body = resp.read()
resp.close()
return response_headers, response_body
@ -98,8 +98,8 @@ class TestAccountGetFakeResponsesMatch(ReplProbeTest):
fake_acct, headers={'Accept': 'application/xml'})
# the account name is in the XML response
real_body = re.sub('AUTH_\w{4}', 'AUTH_someaccount', real_body)
fake_body = re.sub('AUTH_\w{4}', 'AUTH_someaccount', fake_body)
real_body = re.sub(br'AUTH_\w{4}', b'AUTH_someaccount', real_body)
fake_body = re.sub(br'AUTH_\w{4}', b'AUTH_someaccount', fake_body)
self.assertEqual(real_body, fake_body)
self.assertEqual(real_headers['content-type'],

View File

@ -130,7 +130,7 @@ class TestContainerFailures(ReplProbeTest):
onode = onodes[0]
db_files = []
for onode in onodes:
node_id = (onode['port'] - 6000) / 10
node_id = (onode['port'] - 6000) // 10
device = onode['device']
hash_str = hash_path(self.account, container)
server_conf = readconf(self.configs['container-server'][node_id])

View File

@ -68,7 +68,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.brain.stop_handoff_half()
self.brain.put_container()
self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
contents='VERIFY')
contents=b'VERIFY')
self.brain.start_handoff_half()
# make sure we have some manner of split brain
container_part, container_nodes = self.container_ring.get_nodes(
@ -78,9 +78,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for
node, metadata in head_responses)
found_policy_indexes = {
metadata['X-Backend-Storage-Policy-Index']
for node, metadata in head_responses}
self.assertGreater(
len(found_policy_indexes), 1,
'primary nodes did not disagree about policy index %r' %
@ -116,9 +116,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for
node, metadata in head_responses)
found_policy_indexes = {
metadata['X-Backend-Storage-Policy-Index']
for node, metadata in head_responses}
self.assertEqual(len(found_policy_indexes), 1,
'primary nodes disagree about policy index %r' %
head_responses)
@ -144,7 +144,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
orig_policy_index))
# verify that the object data read by external client is correct
headers, data = self._get_object_patiently(expected_policy_index)
self.assertEqual('VERIFY', data)
self.assertEqual(b'VERIFY', data)
self.assertEqual('custom-meta', headers['x-object-meta-test'])
def test_reconcile_delete(self):
@ -165,9 +165,9 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for
node, metadata in head_responses)
found_policy_indexes = {
metadata['X-Backend-Storage-Policy-Index']
for node, metadata in head_responses}
self.assertGreater(
len(found_policy_indexes), 1,
'primary nodes did not disagree about policy index %r' %
@ -208,15 +208,14 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
metadata = direct_client.direct_head_container(
node, container_part, self.account, self.container_name)
head_responses.append((node, metadata))
new_found_policy_indexes = \
set(metadata['X-Backend-Storage-Policy-Index'] for node,
metadata in head_responses)
self.assertEqual(len(new_found_policy_indexes), 1,
node_to_policy = {
node['port']: metadata['X-Backend-Storage-Policy-Index']
for node, metadata in head_responses}
policies = set(node_to_policy.values())
self.assertEqual(len(policies), 1,
'primary nodes disagree about policy index %r' %
dict((node['port'],
metadata['X-Backend-Storage-Policy-Index'])
for node, metadata in head_responses))
expected_policy_index = new_found_policy_indexes.pop()
node_to_policy)
expected_policy_index = policies.pop()
self.assertEqual(orig_policy_index, expected_policy_index)
# validate object fully deleted
for policy_index in found_policy_indexes:
@ -257,7 +256,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
manifest_data = []
def write_part(i):
body = 'VERIFY%0.2d' % i + '\x00' * 1048576
body = b'VERIFY%0.2d' % i + b'\x00' * 1048576
part_name = 'manifest_part_%0.2d' % i
manifest_entry = {
"path": "/%s/%s" % (self.container_name, part_name),
@ -310,7 +309,8 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
'bytes': data['size_bytes'],
'name': data['path'],
}
direct_manifest_data = map(translate_direct, manifest_data)
direct_manifest_data = [translate_direct(item)
for item in manifest_data]
headers = {
'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n
in self.container_ring.devs),
@ -320,11 +320,12 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
'X-Backend-Storage-Policy-Index': wrong_policy.idx,
'X-Static-Large-Object': 'True',
}
body = utils.json.dumps(direct_manifest_data).encode('ascii')
for node in nodes:
direct_client.direct_put_object(
node, part, self.account, self.container_name,
direct_manifest_name,
contents=utils.json.dumps(direct_manifest_data),
contents=body,
headers=headers)
break # one should do it...
@ -347,8 +348,8 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
self.assertEqual(int(metadata['content-length']),
sum(part['size_bytes'] for part in manifest_data))
self.assertEqual(body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576
for i in range(20)))
self.assertEqual(body, b''.join(b'VERIFY%0.2d' % i + b'\x00' * 1048576
for i in range(20)))
# and regular upload should work now too
client.put_object(self.url, self.token, self.container_name,
@ -375,7 +376,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.brain.start_primary_half()
# write some target data
client.put_object(self.url, self.token, self.container_name, 'target',
contents='this is the target data')
contents=b'this is the target data')
# write the symlink
self.brain.stop_handoff_half()
@ -401,7 +402,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
self.assertEqual(metadata['x-symlink-target'],
'%s/target' % self.container_name)
self.assertEqual(metadata['content-type'], 'application/symlink')
self.assertEqual(body, '')
self.assertEqual(body, b'')
# ... although in the wrong policy
object_ring = POLICIES.get_object_ring(int(wrong_policy), '/etc/swift')
part, nodes = object_ring.get_nodes(
@ -423,7 +424,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
# now the symlink works
metadata, body = client.get_object(self.url, self.token,
self.container_name, 'symlink')
self.assertEqual(body, 'this is the target data')
self.assertEqual(body, b'this is the target data')
# and it's in the correct policy
object_ring = POLICIES.get_object_ring(int(policy), '/etc/swift')
part, nodes = object_ring.get_nodes(
@ -469,7 +470,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
# hopefully memcache still has the new policy cached
self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
contents='VERIFY')
contents=b'VERIFY')
# double-check object correctly written to new policy
conf_files = []
for server in Manager(['container-reconciler']).servers:
@ -546,7 +547,7 @@ class TestContainerMergePolicyIndex(ReplProbeTest):
# verify that the object data read by external client is correct
headers, data = self._get_object_patiently(int(new_policy))
self.assertEqual('VERIFY', data)
self.assertEqual(b'VERIFY', data)
self.assertEqual('custom-meta', headers['x-object-meta-test'])

View File

@ -113,7 +113,7 @@ class TestContainerSync(BaseTestContainerSync):
resp_headers, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, 'test-body')
self.assertEqual(body, b'test-body')
self.assertIn('x-object-meta-test', resp_headers)
self.assertEqual('put_value', resp_headers['x-object-meta-test'])
@ -136,7 +136,7 @@ class TestContainerSync(BaseTestContainerSync):
# verify that metadata changes were sync'd
resp_headers, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, 'test-body')
self.assertEqual(body, b'test-body')
self.assertIn('x-object-meta-test', resp_headers)
self.assertEqual('post_value', resp_headers['x-object-meta-test'])
self.assertEqual('image/jpeg', resp_headers['content-type'])
@ -180,7 +180,7 @@ class TestContainerSync(BaseTestContainerSync):
# upload a segment to source
segment_name = 'segment-%s' % uuid.uuid4()
segment_data = 'segment body' # it's ok for first segment to be small
segment_data = b'segment body' # it's ok for first segment to be small
segment_etag = client.put_object(
self.url, self.token, segs_container, segment_name,
segment_data)
@ -270,7 +270,7 @@ class TestContainerSync(BaseTestContainerSync):
Manager(['container-sync']).once()
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, 'test-body')
self.assertEqual(body, b'test-body')
def test_sync_lazy_dkey(self):
# Create synced containers, but with no key at dest
@ -297,7 +297,7 @@ class TestContainerSync(BaseTestContainerSync):
Manager(['container-sync']).once()
_junk, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, 'test-body')
self.assertEqual(body, b'test-body')
def test_sync_with_stale_container_rows(self):
source_container, dest_container = self._setup_synced_containers()
@ -351,7 +351,7 @@ class TestContainerSync(BaseTestContainerSync):
# verify sync'd object has same content and headers
dest_headers, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, 'new-test-body')
self.assertEqual(body, b'new-test-body')
mismatched_headers = []
for k in ('etag', 'content-length', 'content-type', 'x-timestamp',
'last-modified'):
@ -381,7 +381,7 @@ class TestContainerSync(BaseTestContainerSync):
# verify that the remote object did not change
resp_headers, body = client.get_object(self.url, self.token,
dest_container, object_name)
self.assertEqual(body, 'new-test-body')
self.assertEqual(body, b'new-test-body')
class TestContainerSyncAndSymlink(BaseTestContainerSync):
@ -413,7 +413,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
# upload a target to source
target_name = 'target-%s' % uuid.uuid4()
target_body = 'target body'
target_body = b'target body'
client.put_object(
self.url, self.token, tgt_container, target_name,
target_body)
@ -432,7 +432,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
resp_headers, symlink_body = client.get_object(
self.url, self.token, source_container, symlink_name,
query_string='symlink=get')
self.assertEqual('', symlink_body)
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
# verify symlink behavior
@ -453,7 +453,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
resp_headers, symlink_body = client.get_object(
dest_account['url'], dest_account['token'], dest_container,
symlink_name, query_string='symlink=get')
self.assertEqual('', symlink_body)
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
# attempt to GET the target object via symlink will fail because
@ -480,7 +480,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
resp_headers, symlink_body = client.get_object(
dest_account['url'], dest_account['token'], dest_container,
symlink_name, query_string='symlink=get')
self.assertEqual('', symlink_body)
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
# verify GET of target object via symlink now succeeds
@ -511,7 +511,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
# upload a target to source
target_name = 'target-%s' % uuid.uuid4()
target_body = 'target body'
target_body = b'target body'
client.put_object(tgt_account['url'], tgt_account['token'],
tgt_container, target_name, target_body)
@ -531,7 +531,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
resp_headers, symlink_body = client.get_object(
self.url, self.token, source_container, symlink_name,
query_string='symlink=get')
self.assertEqual('', symlink_body)
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
self.assertIn('x-symlink-target-account', resp_headers)
@ -553,7 +553,7 @@ class TestContainerSyncAndSymlink(BaseTestContainerSync):
resp_headers, symlink_body = client.get_object(
self.url, self.token, dest_container,
symlink_name, query_string='symlink=get')
self.assertEqual('', symlink_body)
self.assertEqual(b'', symlink_body)
self.assertIn('x-symlink-target', resp_headers)
self.assertIn('x-symlink-target-account', resp_headers)

View File

@ -69,7 +69,7 @@ class TestEmptyDevice(ReplProbeTest):
# Create container/obj (goes to two primary servers and one handoff)
client.put_object(self.url, self.token, container, obj, 'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
@ -87,7 +87,7 @@ class TestEmptyDevice(ReplProbeTest):
# Indirectly through proxy assert we can get container/obj
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Restart those other two container/obj primary servers

View File

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from io import StringIO
from io import BytesIO
from unittest import main, SkipTest
from uuid import uuid4
@ -82,7 +82,7 @@ class TestObjectAsyncUpdate(ReplProbeTest):
self.assertEqual(err.http_status, 503)
# Assert handoff device has a container replica
another_cnode = self.container_ring.get_more_nodes(cpart).next()
another_cnode = next(self.container_ring.get_more_nodes(cpart))
direct_client.direct_get_container(
another_cnode, cpart, self.account, container)
@ -143,7 +143,7 @@ class TestUpdateOverrides(ReplProbeTest):
self.policy.name})
int_client.upload_object(
StringIO(u'stuff'), self.account, 'c1', 'o1', headers)
BytesIO(b'stuff'), self.account, 'c1', 'o1', headers)
# Run the object-updaters to be sure updates are done
Manager(['object-updater']).once()

View File

@ -40,7 +40,7 @@ class TestPutIfNoneMatchRepl(ReplProbeTest):
self.brain.put_container()
self.brain.stop_primary_half()
# put object to only 1 of 3 primaries
self.brain.put_object(contents='VERIFY')
self.brain.put_object(contents=b'VERIFY')
self.brain.start_primary_half()
# Restart services and attempt to overwrite
@ -64,16 +64,16 @@ class TestPutIfNoneMatchRepl(ReplProbeTest):
# ...and verify the object was not overwritten
_headers, body = client.get_object(
self.url, self.token, self.container_name, self.object_name)
self.assertEqual(body, 'VERIFY')
self.assertEqual(body, b'VERIFY')
def test_content_length_nonzero(self):
self._do_test('OVERWRITE')
self._do_test(b'OVERWRITE')
def test_content_length_zero(self):
self._do_test('')
self._do_test(b'')
def test_chunked(self):
self._do_test(chunker('OVERWRITE'))
self._do_test(chunker(b'OVERWRITE'))
def test_chunked_empty(self):
self._do_test(chunker(''))
self._do_test(chunker(b''))

View File

@ -78,7 +78,7 @@ class TestObjectFailures(ReplProbeTest):
container = 'container-%s' % uuid4()
obj = 'object-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
'VERIFY')
b'VERIFY')
# Stash the on disk data for future comparison - this may not equal
# 'VERIFY' if for example the proxy has crypto enabled
backend_data = direct_client.direct_get_object(
@ -105,7 +105,7 @@ class TestObjectFailures(ReplProbeTest):
container = 'container-range-%s' % uuid4()
obj = 'object-range-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj,
'RANGE')
b'RANGE')
# Stash the on disk data for future comparison - this may not equal
# 'VERIFY' if for example the proxy has crypto enabled
backend_data = direct_client.direct_get_object(
@ -137,7 +137,8 @@ class TestObjectFailures(ReplProbeTest):
def run_quarantine_zero_byte_get(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
onode, opart, data_file = self._setup_data_file(
container, obj, b'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
@ -155,7 +156,8 @@ class TestObjectFailures(ReplProbeTest):
def run_quarantine_zero_byte_head(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
onode, opart, data_file = self._setup_data_file(
container, obj, b'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
@ -173,7 +175,8 @@ class TestObjectFailures(ReplProbeTest):
def run_quarantine_zero_byte_post(self):
container = 'container-zbyte-%s' % uuid4()
obj = 'object-zbyte-%s' % uuid4()
onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
onode, opart, data_file = self._setup_data_file(
container, obj, b'DATA')
metadata = read_metadata(data_file)
unlink(data_file)
@ -217,7 +220,7 @@ class TestECObjectFailures(ECProbeTest):
# PUT object, should go to primary nodes
client.put_object(self.url, self.token, container_name,
object_name, contents='object contents')
object_name, contents=b'object contents')
# get our node lists
opart, onodes = self.object_ring.get_nodes(
@ -226,7 +229,7 @@ class TestECObjectFailures(ECProbeTest):
# sanity test
odata = client.get_object(self.url, self.token, container_name,
object_name)[-1]
self.assertEqual('object contents', odata)
self.assertEqual(b'object contents', odata)
# make all fragments non-durable
for node in onodes:

View File

@ -53,9 +53,9 @@ class TestObjectHandoff(ReplProbeTest):
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Create container/obj (goes to two primary servers and one handoff)
client.put_object(self.url, self.token, container, obj, 'VERIFY')
client.put_object(self.url, self.token, container, obj, b'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
@ -73,7 +73,7 @@ class TestObjectHandoff(ReplProbeTest):
# Indirectly through proxy assert we can get container/obj
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
@ -139,13 +139,13 @@ class TestObjectHandoff(ReplProbeTest):
port_num = node['replication_port']
except KeyError:
port_num = node['port']
node_id = (port_num - 6000) / 10
node_id = (port_num - 6000) // 10
Manager(['object-replicator']).once(number=node_id)
try:
another_port_num = another_onode['replication_port']
except KeyError:
another_port_num = another_onode['port']
another_num = (another_port_num - 6000) / 10
another_num = (another_port_num - 6000) // 10
Manager(['object-replicator']).once(number=another_num)
# Assert the first container/obj primary server now has container/obj
@ -231,9 +231,9 @@ class TestObjectHandoff(ReplProbeTest):
port_num = node['replication_port']
except KeyError:
port_num = node['port']
node_id = (port_num - 6000) / 10
node_id = (port_num - 6000) // 10
Manager(['object-replicator']).once(number=node_id)
another_node_id = (another_port_num - 6000) / 10
another_node_id = (another_port_num - 6000) // 10
Manager(['object-replicator']).once(number=another_node_id)
# Assert primary node no longer has container/obj
@ -261,9 +261,9 @@ class TestObjectHandoff(ReplProbeTest):
kill_server((onode['ip'], onode['port']), self.ipport2server)
# Create container/obj (goes to two primaries and one handoff)
client.put_object(self.url, self.token, container, obj, 'VERIFY')
client.put_object(self.url, self.token, container, obj, b'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
@ -318,9 +318,9 @@ class TestObjectHandoff(ReplProbeTest):
# Create container/obj (goes to all three primaries)
obj = 'object-%s' % uuid4()
client.put_object(self.url, self.token, container, obj, 'VERIFY')
client.put_object(self.url, self.token, container, obj, b'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
if odata != b'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))

View File

@ -13,7 +13,7 @@
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import StringIO
from io import BytesIO
import unittest
import os
@ -134,9 +134,9 @@ class Test(ReplProbeTest):
modified = Timestamp(metadata['x-timestamp']).isoformat
self.assertEqual(listing['last_modified'], modified)
def _put_object(self, headers=None, body=u'stuff'):
def _put_object(self, headers=None, body=b'stuff'):
headers = headers or {}
self.int_client.upload_object(StringIO(body), self.account,
self.int_client.upload_object(BytesIO(body), self.account,
self.container_name,
self.object_name, headers)
@ -232,12 +232,12 @@ class Test(ReplProbeTest):
self.brain.put_container()
# put object
self._put_object(headers={'Content-Type': 'foo'}, body=u'older')
self._put_object(headers={'Content-Type': 'foo'}, body=b'older')
# put newer object to first server subset
self.brain.stop_primary_half()
self.container_brain.stop_primary_half()
self._put_object(headers={'Content-Type': 'bar'}, body=u'newer')
self._put_object(headers={'Content-Type': 'bar'}, body=b'newer')
metadata = self._get_object_metadata()
etag = metadata['etag']
self.brain.start_primary_half()

View File

@ -51,8 +51,8 @@ class TestPartPowerIncrease(ProbeTest):
self.devices = [
self.device_dir('object', {'ip': ip, 'port': port, 'device': ''})
for ip, port in set((dev['ip'], dev['port'])
for dev in self.object_ring.devs)]
for ip, port in {(dev['ip'], dev['port'])
for dev in self.object_ring.devs}]
def tearDown(self):
# Keep a backup copy of the modified .builder file

View File

@ -24,6 +24,7 @@ import shutil
import random
import os
import time
import six
from swift.common.direct_client import DirectClientException
from test.probe.common import ECProbeTest
@ -38,10 +39,10 @@ from swiftclient import client, ClientException
class Body(object):
def __init__(self, total=3.5 * 2 ** 20):
self.total = total
self.total = int(total)
self.hasher = md5()
self.size = 0
self.chunk = 'test' * 16 * 2 ** 10
self.chunk = b'test' * 16 * 2 ** 10
@property
def etag(self):
@ -50,21 +51,21 @@ class Body(object):
def __iter__(self):
return self
def next(self):
def __next__(self):
if self.size > self.total:
raise StopIteration()
self.size += len(self.chunk)
self.hasher.update(self.chunk)
return self.chunk
def __next__(self):
return next(self)
# for py2 compat
next = __next__
class TestReconstructorRebuild(ECProbeTest):
def _make_name(self, prefix):
return '%s%s' % (prefix, uuid.uuid4())
return ('%s%s' % (prefix, uuid.uuid4())).encode()
def setUp(self):
super(TestReconstructorRebuild, self).setUp()
@ -131,10 +132,13 @@ class TestReconstructorRebuild(ECProbeTest):
{'X-Backend-Fragment-Preferences': json.dumps([])})
# node dict has unicode values so utf8 decode our path parts too in
# case they have non-ascii characters
if six.PY2:
acc, con, obj = (s.decode('utf8') for s in (
self.account, self.container_name, self.object_name))
else:
acc, con, obj = self.account, self.container_name, self.object_name
headers, data = direct_client.direct_get_object(
node, part, self.account.decode('utf8'),
self.container_name.decode('utf8'),
self.object_name.decode('utf8'), headers=req_headers,
node, part, acc, con, obj, headers=req_headers,
resp_chunk_size=64 * 2 ** 20)
hasher = md5()
for chunk in data:
@ -402,7 +406,7 @@ class TestReconstructorRebuild(ECProbeTest):
opart, onodes = self.object_ring.get_nodes(
self.account, self.container_name, self.object_name)
delete_at = int(time.time() + 3)
contents = 'body-%s' % uuid.uuid4()
contents = ('body-%s' % uuid.uuid4()).encode()
headers = {'x-delete-at': delete_at}
client.put_object(self.url, self.token, self.container_name,
self.object_name, headers=headers, contents=contents)
@ -418,15 +422,17 @@ class TestReconstructorRebuild(ECProbeTest):
# wait for the delete_at to pass, and check that it thinks the object
# is expired
timeout = time.time() + 5
err = None
while time.time() < timeout:
try:
direct_client.direct_head_object(
post_fail_node, opart, self.account, self.container_name,
self.object_name, headers={
'X-Backend-Storage-Policy-Index': int(self.policy)})
except direct_client.ClientException as err:
if err.http_status != 404:
except direct_client.ClientException as client_err:
if client_err.http_status != 404:
raise
err = client_err
break
else:
time.sleep(0.1)
@ -454,7 +460,7 @@ class TestReconstructorRebuild(ECProbeTest):
class TestReconstructorRebuildUTF8(TestReconstructorRebuild):
def _make_name(self, prefix):
return '%s\xc3\xa8-%s' % (prefix, uuid.uuid4())
return b'%s\xc3\xa8-%s' % (prefix.encode(), str(uuid.uuid4()).encode())
if __name__ == "__main__":

View File

@ -132,7 +132,7 @@ class TestReconstructorRevert(ECProbeTest):
# fire up reconstructor on handoff nodes only
for hnode in hnodes:
hnode_id = (hnode['port'] - 6000) / 10
hnode_id = (hnode['port'] - 6000) // 10
self.reconstructor.once(number=hnode_id)
# first three primaries have data again

View File

@ -19,6 +19,7 @@ import shutil
import uuid
from nose import SkipTest
import six
from swift.common import direct_client, utils
from swift.common.manager import Manager
@ -27,6 +28,7 @@ from swift.common.direct_client import DirectClientException
from swift.common.utils import ShardRange, parse_db_filename, get_db_files, \
quorum_size, config_true_value, Timestamp
from swift.container.backend import ContainerBroker, UNSHARDED, SHARDING
from swift.container.sharder import CleavingContext
from swiftclient import client, get_auth, ClientException
from swift.proxy.controllers.base import get_cache_key
@ -65,8 +67,8 @@ class BaseTestContainerSharding(ReplProbeTest):
'container-server configs')
skip_reasons = []
auto_shard = all([config_true_value(c.get('auto_shard', False))
for c in cont_configs])
auto_shard = all(config_true_value(c.get('auto_shard', False))
for c in cont_configs)
if not auto_shard:
skip_reasons.append(
'auto_shard must be true in all container_sharder configs')
@ -83,7 +85,7 @@ class BaseTestContainerSharding(ReplProbeTest):
MAX_SHARD_CONTAINER_THRESHOLD))
def skip_check(reason_list, option, required):
values = set([int(c.get(option, required)) for c in cont_configs])
values = {int(c.get(option, required)) for c in cont_configs}
if values != {required}:
reason_list.append('%s must be %s' % (option, required))
@ -259,8 +261,8 @@ class BaseTestContainerSharding(ReplProbeTest):
def assert_dict_contains(self, expected_items, actual_dict):
ignored = set(expected_items) ^ set(actual_dict)
filtered_actual = dict((k, actual_dict[k])
for k in actual_dict if k not in ignored)
filtered_actual = {k: actual_dict[k]
for k in actual_dict if k not in ignored}
self.assertEqual(expected_items, filtered_actual)
def assert_shard_ranges_contiguous(self, expected_number, shard_ranges,
@ -268,8 +270,8 @@ class BaseTestContainerSharding(ReplProbeTest):
if shard_ranges and isinstance(shard_ranges[0], ShardRange):
actual_shard_ranges = sorted(shard_ranges)
else:
actual_shard_ranges = sorted([ShardRange.from_dict(d)
for d in shard_ranges])
actual_shard_ranges = sorted(ShardRange.from_dict(d)
for d in shard_ranges)
self.assertLengthEqual(actual_shard_ranges, expected_number)
if expected_number:
with annotate_failure('Ranges %s.' % actual_shard_ranges):
@ -300,7 +302,7 @@ class BaseTestContainerSharding(ReplProbeTest):
[sr.state for sr in shard_ranges])
def assert_total_object_count(self, expected_object_count, shard_ranges):
actual = sum([sr['object_count'] for sr in shard_ranges])
actual = sum(sr['object_count'] for sr in shard_ranges)
self.assertEqual(expected_object_count, actual)
def assert_container_listing(self, expected_listing):
@ -309,7 +311,8 @@ class BaseTestContainerSharding(ReplProbeTest):
self.assertIn('x-container-object-count', headers)
expected_obj_count = len(expected_listing)
self.assertEqual(expected_listing, [
x['name'].encode('utf-8') for x in actual_listing])
x['name'].encode('utf-8') if six.PY2 else x['name']
for x in actual_listing])
self.assertEqual(str(expected_obj_count),
headers['x-container-object-count'])
return headers, actual_listing
@ -407,15 +410,16 @@ class TestContainerShardingNonUTF8(BaseTestContainerSharding):
self.put_objects(obj_names)
# choose some names approx in middle of each expected shard range
markers = [
obj_names[i] for i in range(self.max_shard_size / 4,
obj_names[i] for i in range(self.max_shard_size // 4,
2 * self.max_shard_size,
self.max_shard_size / 2)]
self.max_shard_size // 2)]
def check_listing(objects, **params):
qs = '&'.join(['%s=%s' % param for param in params.items()])
headers, listing = client.get_container(
self.url, self.token, self.container_name, query_string=qs)
listing = [x['name'].encode('utf-8') for x in listing]
listing = [x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing]
if params.get('reverse'):
marker = params.get('marker', ShardRange.MAX)
end_marker = params.get('end_marker', ShardRange.MIN)
@ -429,12 +433,12 @@ class TestContainerShardingNonUTF8(BaseTestContainerSharding):
expected = expected[:params['limit']]
self.assertEqual(expected, listing)
def check_listing_precondition_fails(**params):
def check_listing_fails(exp_status, **params):
qs = '&'.join(['%s=%s' % param for param in params.items()])
with self.assertRaises(ClientException) as cm:
client.get_container(
self.url, self.token, self.container_name, query_string=qs)
self.assertEqual(412, cm.exception.http_status)
self.assertEqual(exp_status, cm.exception.http_status)
return cm.exception
def do_listing_checks(objects):
@ -443,16 +447,17 @@ class TestContainerShardingNonUTF8(BaseTestContainerSharding):
check_listing(objects, marker=markers[0], end_marker=markers[2])
check_listing(objects, marker=markers[1], end_marker=markers[3])
check_listing(objects, marker=markers[1], end_marker=markers[3],
limit=self.max_shard_size / 4)
limit=self.max_shard_size // 4)
check_listing(objects, marker=markers[1], end_marker=markers[3],
limit=self.max_shard_size / 4)
limit=self.max_shard_size // 4)
check_listing(objects, marker=markers[1], end_marker=markers[2],
limit=self.max_shard_size / 2)
limit=self.max_shard_size // 2)
check_listing(objects, marker=markers[1], end_marker=markers[1])
check_listing(objects, reverse=True)
check_listing(objects, reverse=True, end_marker=markers[1])
check_listing(objects, reverse=True, marker=markers[3],
end_marker=markers[1], limit=self.max_shard_size / 4)
end_marker=markers[1],
limit=self.max_shard_size // 4)
check_listing(objects, reverse=True, marker=markers[3],
end_marker=markers[1], limit=0)
check_listing([], marker=markers[0], end_marker=markers[0])
@ -465,12 +470,16 @@ class TestContainerShardingNonUTF8(BaseTestContainerSharding):
self.url, self.token, self.container_name,
query_string='delimiter=-')
self.assertEqual([{'subdir': 'obj-'}], listing)
headers, listing = client.get_container(
self.url, self.token, self.container_name,
query_string='delimiter=j-')
self.assertEqual([{'subdir': 'obj-'}], listing)
limit = self.cluster_info['swift']['container_listing_limit']
exc = check_listing_precondition_fails(limit=limit + 1)
self.assertIn('Maximum limit', exc.http_response_content)
exc = check_listing_precondition_fails(delimiter='ab')
self.assertIn('Bad delimiter', exc.http_response_content)
exc = check_listing_fails(412, limit=limit + 1)
self.assertIn(b'Maximum limit', exc.http_response_content)
exc = check_listing_fails(400, delimiter='%ff')
self.assertIn(b'not valid UTF-8', exc.http_response_content)
# sanity checks
do_listing_checks(obj_names)
@ -544,7 +553,9 @@ class TestContainerShardingUTF8(TestContainerShardingNonUTF8):
obj_names = []
for x in range(number):
name = (u'obj-\u00e4\u00ea\u00ec\u00f2\u00fb-%04d' % x)
name = name.encode('utf8').ljust(name_length, 'o')
name = name.encode('utf8').ljust(name_length, b'o')
if not six.PY2:
name = name.decode('utf8')
obj_names.append(name)
return obj_names
@ -553,7 +564,9 @@ class TestContainerShardingUTF8(TestContainerShardingNonUTF8):
super(TestContainerShardingUTF8, self)._setup_container_name()
name_length = self.cluster_info['swift']['max_container_name_length']
cont_name = self.container_name + u'-\u00e4\u00ea\u00ec\u00f2\u00fb'
self.conainer_name = cont_name.encode('utf8').ljust(name_length, 'x')
self.conainer_name = cont_name.ljust(name_length, 'x')
if six.PY2:
self.conainer_name = self.container_name.encode('utf8')
class TestContainerSharding(BaseTestContainerSharding):
@ -573,8 +586,9 @@ class TestContainerSharding(BaseTestContainerSharding):
headers, pre_sharding_listing = client.get_container(
self.url, self.token, self.container_name)
self.assertEqual(obj_names, [x['name'].encode('utf-8')
for x in pre_sharding_listing]) # sanity
self.assertEqual(obj_names, [
x['name'].encode('utf-8') if six.PY2 else x['name']
for x in pre_sharding_listing]) # sanity
# Shard it
client.post_container(self.url, self.admin_token, self.container_name,
@ -603,6 +617,8 @@ class TestContainerSharding(BaseTestContainerSharding):
self.assert_shard_ranges_contiguous(2, orig_root_shard_ranges)
self.assertEqual([ShardRange.ACTIVE, ShardRange.ACTIVE],
[sr['state'] for sr in orig_root_shard_ranges])
contexts = list(CleavingContext.load_all(broker))
self.assertEqual([], contexts) # length check
self.direct_delete_container(expect_failure=True)
self.assertLengthEqual(found['normal_dbs'], 2)
@ -620,6 +636,9 @@ class TestContainerSharding(BaseTestContainerSharding):
orig_root_shard_ranges, shard_ranges,
excludes=['meta_timestamp', 'state', 'state_timestamp'])
contexts = list(CleavingContext.load_all(broker))
self.assertEqual([], contexts) # length check
if run_replicators:
Manager(['container-replicator']).once()
# replication doesn't change the db file names
@ -650,6 +669,9 @@ class TestContainerSharding(BaseTestContainerSharding):
self.assertGreaterEqual(updated.meta_timestamp,
orig['meta_timestamp'])
contexts = list(CleavingContext.load_all(broker))
self.assertEqual([], contexts) # length check
# Check that entire listing is available
headers, actual_listing = self.assert_container_listing(obj_names)
# ... and check some other container properties
@ -740,6 +762,16 @@ class TestContainerSharding(BaseTestContainerSharding):
first_lower=orig_root_shard_ranges[0]['lower'],
last_upper=orig_root_shard_ranges[0]['upper'])
contexts = list(CleavingContext.load_all(broker))
self.assertEqual(len(contexts), 1)
context, _lm = contexts[0]
self.assertIs(context.cleaving_done, False)
self.assertIs(context.misplaced_done, True)
self.assertEqual(context.ranges_done, 2)
self.assertEqual(context.ranges_todo, 1)
self.assertEqual(context.max_row,
self.max_shard_size * 3 // 2)
# but third replica still has no idea it should be sharding
self.assertLengthEqual(found_for_shard['normal_dbs'], 3)
self.assertEqual(
@ -767,16 +799,36 @@ class TestContainerSharding(BaseTestContainerSharding):
ShardRange.SHARDING, broker.get_own_shard_range().state)
self.assertFalse(broker.get_shard_ranges())
contexts = list(CleavingContext.load_all(broker))
self.assertEqual([], contexts) # length check
# ...until sub-shard ranges are replicated from another shard replica;
# there may also be a sub-shard replica missing so run replicators on
# all nodes to fix that if necessary
self.brain.servers.start(number=shard_1_nodes[2])
self.replicators.once()
# Now that the replicators have all run, third replica sees cleaving
# contexts for the first two
contexts = list(CleavingContext.load_all(broker))
self.assertEqual(len(contexts), 2)
# now run sharder again on third replica
self.sharders.once(
number=shard_1_nodes[2],
additional_args='--partitions=%s' % shard_1_part)
sharding_broker = ContainerBroker(found_for_shard['normal_dbs'][2])
self.assertEqual('sharding', sharding_broker.get_db_state())
broker_id = broker.get_info()['id']
# Old, unsharded DB doesn't have the context...
contexts = list(CleavingContext.load_all(broker))
self.assertEqual(len(contexts), 2)
self.assertNotIn(broker_id, [ctx[0].ref for ctx in contexts])
# ...but the sharding one does
contexts = list(CleavingContext.load_all(sharding_broker))
self.assertEqual(len(contexts), 3)
self.assertIn(broker_id, [ctx[0].ref for ctx in contexts])
# check original first shard range state and sub-shards - all replicas
# should now be in consistent state
@ -852,6 +904,8 @@ class TestContainerSharding(BaseTestContainerSharding):
self.assert_container_listing(['alpha'] + more_obj_names + obj_names)
# Run sharders again so things settle.
self.run_sharders(shard_1)
# Also run replicators to settle cleaving contexts
self.replicators.once()
# check original first shard range shards
for db_file in found_for_shard['shard_dbs']:
@ -862,6 +916,11 @@ class TestContainerSharding(BaseTestContainerSharding):
self.assertEqual(
[ShardRange.ACTIVE] * 3,
[sr.state for sr in broker.get_shard_ranges()])
# Make sure our cleaving contexts got cleaned up
contexts = list(CleavingContext.load_all(broker))
self.assertEqual([], contexts)
# check root shard ranges
root_shard_ranges = self.direct_get_container_shard_ranges()
for node, (hdrs, root_shards) in root_shard_ranges.items():
@ -901,7 +960,7 @@ class TestContainerSharding(BaseTestContainerSharding):
old_shard_range = by_name.pop(
orig_root_shard_ranges[0]['name'])
self.assertTrue(old_shard_range.deleted)
self.assert_shard_ranges_contiguous(4, by_name.values())
self.assert_shard_ranges_contiguous(4, list(by_name.values()))
else:
# Everyone's on the same page. Well, except for
# meta_timestamps, since the shards each reported
@ -1070,26 +1129,29 @@ class TestContainerSharding(BaseTestContainerSharding):
start_listing = [
o for o in obj_names if o <= expected_shard_ranges[1].upper]
self.assertEqual(
[x['name'].encode('utf-8') for x in listing[:len(start_listing)]],
[x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing[:len(start_listing)]],
start_listing)
# we can't assert much about the remaining listing, other than that
# there should be something
self.assertTrue(
[x['name'].encode('utf-8') for x in listing[len(start_listing):]])
[x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing[len(start_listing):]])
self.assertIn('x-container-object-count', headers)
self.assertEqual(str(len(listing)),
headers['x-container-object-count'])
headers, listing = client.get_container(self.url, self.token,
self.container_name,
query_string='reverse=on')
self.assertEqual([x['name'].encode('utf-8')
self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing[-len(start_listing):]],
list(reversed(start_listing)))
self.assertIn('x-container-object-count', headers)
self.assertEqual(str(len(listing)),
headers['x-container-object-count'])
self.assertTrue(
[x['name'].encode('utf-8') for x in listing[:-len(start_listing)]])
[x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing[:-len(start_listing)]])
# Run the sharders again to get everything to settle
self.sharders.once()
@ -1099,7 +1161,8 @@ class TestContainerSharding(BaseTestContainerSharding):
# now all shards have been cleaved we should get the complete listing
headers, listing = client.get_container(self.url, self.token,
self.container_name)
self.assertEqual([x['name'].encode('utf-8') for x in listing],
self.assertEqual([x['name'].encode('utf-8') if six.PY2 else x['name']
for x in listing],
obj_names)
def test_shrinking(self):
@ -1409,7 +1472,7 @@ class TestContainerSharding(BaseTestContainerSharding):
# put objects while all servers are up
obj_names = self._make_object_names(
num_shards * self.max_shard_size / 2)
num_shards * self.max_shard_size // 2)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
@ -2004,7 +2067,7 @@ class TestContainerSharding(BaseTestContainerSharding):
if n['id'] not in primary_ids)
num_shards = 3
obj_names = self._make_object_names(
num_shards * self.max_shard_size / 2)
num_shards * self.max_shard_size // 2)
self.put_objects(obj_names)
client.post_container(self.url, self.admin_token, self.container_name,
headers={'X-Container-Sharding': 'on'})

View File

@ -48,10 +48,10 @@ class TestWSGIServerProcessHandling(unittest.TestCase):
manager = Manager([server_name])
manager.start()
starting_pids = set(pid for server in manager.servers
for (_, pid) in server.iter_pid_files())
starting_pids = {pid for server in manager.servers
for (_, pid) in server.iter_pid_files()}
body = 'test' * 10
body = b'test' * 10
conn = httplib.HTTPConnection('%s:%s' % (ip, port))
# sanity request
@ -68,8 +68,8 @@ class TestWSGIServerProcessHandling(unittest.TestCase):
manager.reload()
post_reload_pids = set(pid for server in manager.servers
for (_, pid) in server.iter_pid_files())
post_reload_pids = {pid for server in manager.servers
for (_, pid) in server.iter_pid_files()}
# none of the pids we started with are being tracked after reload
msg = 'expected all pids from %r to have died, but found %r' % (
@ -92,8 +92,8 @@ class TestWSGIServerProcessHandling(unittest.TestCase):
conn.close()
# sanity
post_close_pids = set(pid for server in manager.servers
for (_, pid) in server.iter_pid_files())
post_close_pids = {pid for server in manager.servers
for (_, pid) in server.iter_pid_files()}
self.assertEqual(post_reload_pids, post_close_pids)
def test_proxy_reload(self):

View File

@ -1485,13 +1485,6 @@ class TestAccountController(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 406)
def test_GET_delimiter_too_long(self):
req = Request.blank('/sda1/p/a?delimiter=xx',
environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412)
def test_GET_prefix_delimiter_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
@ -1655,6 +1648,110 @@ class TestAccountController(unittest.TestCase):
listing.append(node2.firstChild.nodeValue)
self.assertEqual(listing, ['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_multichar_delimiter(self):
self.maxDiff = None
req = Request.blank('/sda1/p/a', method='PUT', headers={
'x-timestamp': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201, resp.body)
for i in ('US~~TX~~A', 'US~~TX~~B', 'US~~OK~~A', 'US~~OK~~B',
'US~~OK~Tulsa~~A', 'US~~OK~Tulsa~~B',
'US~~UT~~A', 'US~~UT~~~B'):
req = Request.blank('/sda1/p/a/%s' % i, method='PUT', headers={
'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a?prefix=US~~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~OK~Tulsa~~"},
{"subdir": "US~~OK~~"},
{"subdir": "US~~TX~~"},
{"subdir": "US~~UT~~"}])
req = Request.blank(
'/sda1/p/a?prefix=US~~&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~UT~~"},
{"subdir": "US~~TX~~"},
{"subdir": "US~~OK~~"},
{"subdir": "US~~OK~Tulsa~~"}])
req = Request.blank(
'/sda1/p/a?prefix=US~~UT&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~UT~~"}])
req = Request.blank(
'/sda1/p/a?prefix=US~~UT&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~UT~~"}])
req = Request.blank(
'/sda1/p/a?prefix=US~~UT~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~A"},
{"subdir": "US~~UT~~~"}])
req = Request.blank(
'/sda1/p/a?prefix=US~~UT~&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"subdir": "US~~UT~~~"},
{"name": "US~~UT~~A"}])
req = Request.blank(
'/sda1/p/a?prefix=US~~UT~~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~A"},
{"name": "US~~UT~~~B"}])
req = Request.blank(
'/sda1/p/a?prefix=US~~UT~~&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~~B"},
{"name": "US~~UT~~A"}])
req = Request.blank(
'/sda1/p/a?prefix=US~~UT~~~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~~B"}])
def test_through_call(self):
inbuf = BytesIO()
errbuf = StringIO()
@ -1779,18 +1876,13 @@ class TestAccountController(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
# Good UTF8 sequence for delimiter, too long (1 byte delimiters only)
req = Request.blank('/sda1/p/a?delimiter=\xce\xa9',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412,
"%d on param delimiter" % (resp.status_int))
Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'prefix', 'end_marker', 'format'):
for param in ('limit', 'marker', 'prefix', 'end_marker', 'format',
'delimiter'):
req = Request.blank('/sda1/p/a?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)

View File

@ -2263,10 +2263,34 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
# Note that we've picked up an extension
builder = RingBuilder.load(self.tmpfile + '.builder')
# Version was recorded in the .ring.gz!
self.assertEqual(builder.version, 5)
# Note that this is different from the original! But it more-closely
# reflects the reality that we have an extra replica for 12 of 64 parts
self.assertEqual(builder.replicas, 1.1875)
def test_write_builder_no_version(self):
self.create_sample_ring()
rb = RingBuilder.load(self.tmpfile)
rb.rebalance()
# Make sure we write down the ring in the old way, with no version
rd = rb.get_ring()
rd.version = None
rd.save(self.tmpfile + ".ring.gz")
ring_file = os.path.join(os.path.dirname(self.tmpfile),
os.path.basename(self.tmpfile) + ".ring.gz")
os.remove(self.tmpfile) # loses file...
argv = ["", ring_file, "write_builder", "24"]
self.assertIsNone(ringbuilder.main(argv))
# Note that we've picked up an extension
builder = RingBuilder.load(self.tmpfile + '.builder')
# No version in the .ring.gz; default to 0
self.assertEqual(builder.version, 0)
def test_write_builder_after_device_removal(self):
# Test regenerating builder file after having removed a device
# and lost the builder file

View File

@ -89,12 +89,25 @@ class TestS3ApiMultiUpload(S3ApiTestCase):
objects = [{'name': item[0], 'last_modified': item[1],
'hash': item[2], 'bytes': item[3]}
for item in OBJECTS_TEMPLATE]
object_list = json.dumps(objects)
self.swift.register('PUT', segment_bucket,
swob.HTTPAccepted, {}, None)
# default to just returning everybody...
self.swift.register('GET', segment_bucket, swob.HTTPOk, {},
object_list)
json.dumps(objects))
# but for the listing when aborting an upload, break it up into pages
self.swift.register(
'GET', '%s?delimiter=/&format=json&prefix=object/X/' % (
segment_bucket, ),
swob.HTTPOk, {}, json.dumps(objects[:1]))
self.swift.register(
'GET', '%s?delimiter=/&format=json&marker=%s&prefix=object/X/' % (
segment_bucket, objects[0]['name']),
swob.HTTPOk, {}, json.dumps(objects[1:]))
self.swift.register(
'GET', '%s?delimiter=/&format=json&marker=%s&prefix=object/X/' % (
segment_bucket, objects[-1]['name']),
swob.HTTPOk, {}, '[]')
self.swift.register('HEAD', segment_bucket + '/object/X',
swob.HTTPOk,
{'x-object-meta-foo': 'bar',
@ -1660,6 +1673,13 @@ class TestS3ApiMultiUpload(S3ApiTestCase):
status, headers, body = \
self._test_for_s3acl('DELETE', '?uploadId=X', 'test:full_control')
self.assertEqual(status.split()[0], '204')
self.assertEqual([
path for method, path in self.swift.calls if method == 'DELETE'
], [
'/v1/AUTH_test/bucket+segments/object/X',
'/v1/AUTH_test/bucket+segments/object/X/1',
'/v1/AUTH_test/bucket+segments/object/X/2',
])
@s3acl(s3acl_only=True)
def test_complete_multipart_upload_acl_without_permission(self):

View File

@ -22,7 +22,6 @@ import hashlib
import mock
import requests
import json
import copy
import six
from six.moves.urllib.parse import unquote, quote
@ -34,6 +33,7 @@ from swift.common.swob import Request
from keystonemiddleware.auth_token import AuthProtocol
from keystoneauth1.access import AccessInfoV2
from test.unit import debug_logger
from test.unit.common.middleware.s3api import S3ApiTestCase
from test.unit.common.middleware.s3api.helpers import FakeSwift
from test.unit.common.middleware.s3api.test_s3token import \
@ -335,7 +335,7 @@ class TestS3ApiMiddleware(S3ApiTestCase):
self.assertEqual(status.split()[0], '200', body)
for _, _, headers in self.swift.calls_with_headers:
self.assertNotIn('Authorization', headers)
self.assertIsNone(headers['X-Auth-Token'])
self.assertNotIn('X-Auth-Token', headers)
def test_signed_urls_v4_bad_credential(self):
def test(credential, message, extra=b''):
@ -767,7 +767,7 @@ class TestS3ApiMiddleware(S3ApiTestCase):
self.assertEqual(status.split()[0], '200', body)
for _, _, headers in self.swift.calls_with_headers:
self.assertEqual(authz_header, headers['Authorization'])
self.assertIsNone(headers['X-Auth-Token'])
self.assertNotIn('X-Auth-Token', headers)
def test_signature_v4_no_date(self):
environ = {
@ -1096,6 +1096,7 @@ class TestS3ApiMiddleware(S3ApiTestCase):
self.s3_token = S3Token(
self.keystone_auth, {'auth_uri': 'https://fakehost/identity'})
self.s3api = S3ApiMiddleware(self.s3_token, self.conf)
self.s3api.logger = debug_logger()
req = Request.blank(
'/bucket',
environ={'REQUEST_METHOD': 'PUT'},
@ -1122,6 +1123,7 @@ class TestS3ApiMiddleware(S3ApiTestCase):
self.s3_token = S3Token(
self.keystone_auth, {'auth_uri': 'https://fakehost/identity'})
self.s3api = S3ApiMiddleware(self.s3_token, self.conf)
self.s3api.logger = debug_logger()
req = Request.blank(
'/bucket',
environ={'REQUEST_METHOD': 'PUT'},
@ -1150,6 +1152,7 @@ class TestS3ApiMiddleware(S3ApiTestCase):
self.s3_token = S3Token(
self.auth_token, {'auth_uri': 'https://fakehost/identity'})
self.s3api = S3ApiMiddleware(self.s3_token, self.conf)
self.s3api.logger = debug_logger()
req = Request.blank(
'/bucket',
environ={'REQUEST_METHOD': 'PUT'},
@ -1162,6 +1165,8 @@ class TestS3ApiMiddleware(S3ApiTestCase):
with patch.object(self.s3_token, '_json_request') as mock_req:
with patch.object(self.auth_token,
'_do_fetch_token') as mock_fetch:
# sanity check
self.assertIn('id', GOOD_RESPONSE_V2['access']['token'])
mock_resp = requests.Response()
mock_resp._content = json.dumps(
GOOD_RESPONSE_V2).encode('ascii')
@ -1174,21 +1179,28 @@ class TestS3ApiMiddleware(S3ApiTestCase):
mock_fetch.return_value = (MagicMock(), mock_access_info)
status, headers, body = self.call_s3api(req)
self.assertEqual(body, b'')
# Even though s3token got a token back from keystone, we drop
# it on the floor, resulting in a 401 Unauthorized at
# `swift.common.middleware.keystoneauth` because
# keystonemiddleware's auth_token strips out all auth headers,
# significantly 'X-Identity-Status'. Without a token, it then
# sets 'X-Identity-Status: Invalid' and never contacts
# Keystone.
self.assertEqual('403 Forbidden', status)
self.assertEqual(1, mock_req.call_count)
# With X-Auth-Token, auth_token will call _do_fetch_token to
# connect to keystone in auth_token, again
self.assertEqual(1, mock_fetch.call_count)
# it never even tries to contact keystone
self.assertEqual(0, mock_fetch.call_count)
def test_s3api_with_s3_token_no_pass_token_to_auth_token(self):
def test_s3api_with_only_s3_token_in_s3acl(self):
self.swift = FakeSwift()
self.keystone_auth = KeystoneAuth(
self.swift, {'operator_roles': 'swift-user'})
self.auth_token = AuthProtocol(
self.keystone_auth, {'delay_auth_decision': 'True'})
self.s3_token = S3Token(
self.auth_token, {'auth_uri': 'https://fakehost/identity'})
self.keystone_auth, {'auth_uri': 'https://fakehost/identity'})
self.conf['s3_acl'] = True
self.s3api = S3ApiMiddleware(self.s3_token, self.conf)
self.s3api.logger = debug_logger()
req = Request.blank(
'/bucket',
environ={'REQUEST_METHOD': 'PUT'},
@ -1196,41 +1208,21 @@ class TestS3ApiMiddleware(S3ApiTestCase):
'Date': self.get_date_header()})
self.swift.register('PUT', '/v1/AUTH_TENANT_ID/bucket',
swob.HTTPCreated, {}, None)
self.swift.register('HEAD', '/v1/AUTH_TENANT_ID',
swob.HTTPOk, {}, None)
# For now, s3 acl commits the bucket owner acl via POST
# after PUT container so we need to register the resposne here
self.swift.register('POST', '/v1/AUTH_TENANT_ID/bucket',
swob.HTTPNoContent, {}, None)
self.swift.register('TEST', '/v1/AUTH_TENANT_ID',
swob.HTTPMethodNotAllowed, {}, None)
with patch.object(self.s3_token, '_json_request') as mock_req:
with patch.object(self.auth_token,
'_do_fetch_token') as mock_fetch:
mock_resp = requests.Response()
no_token_id_good_resp = copy.deepcopy(GOOD_RESPONSE_V2)
# delete token id
del no_token_id_good_resp['access']['token']['id']
mock_resp._content = json.dumps(
no_token_id_good_resp).encode('ascii')
mock_resp.status_code = 201
mock_req.return_value = mock_resp
mock_resp = requests.Response()
mock_resp._content = json.dumps(GOOD_RESPONSE_V2).encode('ascii')
mock_resp.status_code = 201
mock_req.return_value = mock_resp
mock_access_info = AccessInfoV2(GOOD_RESPONSE_V2)
mock_access_info.will_expire_soon = \
lambda stale_duration: False
mock_fetch.return_value = (MagicMock(), mock_access_info)
status, headers, body = self.call_s3api(req)
# No token provided from keystone result in 401 Unauthorized
# at `swift.common.middleware.keystoneauth` because auth_token
# will remove all auth headers including 'X-Identity-Status'[1]
# and then, set X-Identity-Status: Invalid at [2]
#
# 1: https://github.com/openstack/keystonemiddleware/blob/
# master/keystonemiddleware/auth_token/__init__.py#L620
# 2: https://github.com/openstack/keystonemiddleware/blob/
# master/keystonemiddleware/auth_token/__init__.py#L627-L629
self.assertEqual('403 Forbidden', status)
self.assertEqual(1, mock_req.call_count)
# if no token provided from keystone, we can skip the call to
# fetch the token
self.assertEqual(0, mock_fetch.call_count)
status, headers, body = self.call_s3api(req)
self.assertEqual(body, b'')
self.assertEqual(1, mock_req.call_count)
if __name__ == '__main__':
unittest.main()

View File

@ -249,9 +249,11 @@ class TestRequest(S3ApiTestCase):
m_swift_resp.return_value = FakeSwiftResponse()
s3_req = S3AclRequest(req.environ, MagicMock())
self.assertNotIn('s3api.auth_details', s3_req.environ)
self.assertEqual(s3_req.token, 'token')
def test_to_swift_req_Authorization_not_exist_in_swreq(self):
# the difference from
# test_authenticate_delete_Authorization_from_s3req_headers above is
# this method asserts *to_swift_req* method.
container = 'bucket'
obj = 'obj'
method = 'GET'
@ -264,9 +266,12 @@ class TestRequest(S3ApiTestCase):
m_swift_resp.return_value = FakeSwiftResponse()
s3_req = S3AclRequest(req.environ, MagicMock())
# Yes, we *want* to assert this
sw_req = s3_req.to_swift_req(method, container, obj)
# So since the result of S3AclRequest init tests and with this
# result to_swift_req doesn't add Authorization header and token
self.assertNotIn('s3api.auth_details', sw_req.environ)
self.assertEqual(sw_req.headers['X-Auth-Token'], 'token')
self.assertNotIn('X-Auth-Token', sw_req.headers)
def test_to_swift_req_subrequest_proxy_access_log(self):
container = 'bucket'

View File

@ -196,11 +196,11 @@ class S3TokenMiddlewareTestGood(S3TokenMiddlewareTestBase):
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
def _assert_authorized(self, req, expect_token=True,
account_path='/v1/AUTH_TENANT_ID/'):
def _assert_authorized(self, req, account_path='/v1/AUTH_TENANT_ID/'):
self.assertTrue(
req.path.startswith(account_path),
'%r does not start with %r' % (req.path, account_path))
self.assertNotIn('X-Auth-Token', req.headers)
expected_headers = {
'X-Identity-Status': 'Confirmed',
'X-Roles': 'swift-user,_member_',
@ -210,12 +210,8 @@ class S3TokenMiddlewareTestGood(S3TokenMiddlewareTestBase):
'X-Tenant-Name': 'TENANT_NAME',
'X-Project-Id': 'TENANT_ID',
'X-Project-Name': 'TENANT_NAME',
'X-Auth-Token': 'TOKEN_ID',
}
for header, value in expected_headers.items():
if header == 'X-Auth-Token' and not expect_token:
self.assertNotIn(header, req.headers)
continue
self.assertIn(header, req.headers)
self.assertEqual(value, req.headers[header])
# WSGI wants native strings for headers
@ -253,7 +249,7 @@ class S3TokenMiddlewareTestGood(S3TokenMiddlewareTestBase):
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req, expect_token=False)
self._assert_authorized(req)
def test_authorized_bytes(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
@ -533,7 +529,7 @@ class S3TokenMiddlewareTestGood(S3TokenMiddlewareTestBase):
cache = MOCK_CACHE_FROM_ENV.return_value
fake_cache_response = ({}, 'token_id', {'id': 'tenant_id'}, 'secret')
fake_cache_response = ({}, {'id': 'tenant_id'}, 'secret')
cache.get.return_value = fake_cache_response
MOCK_REQUEST.return_value = TestResponse({
@ -600,8 +596,7 @@ class S3TokenMiddlewareTestGood(S3TokenMiddlewareTestBase):
self.assertTrue(MOCK_REQUEST.called)
tenant = GOOD_RESPONSE_V2['access']['token']['tenant']
token = GOOD_RESPONSE_V2['access']['token']['id']
expected_cache = (expected_headers, token, tenant, 'secret')
expected_cache = (expected_headers, tenant, 'secret')
cache.set.assert_called_once_with('s3secret/access', expected_cache,
time=20)

View File

@ -16,6 +16,7 @@
import array
import collections
import six.moves.cPickle as pickle
import hashlib
import os
import unittest
import stat
@ -63,6 +64,8 @@ class TestRingData(unittest.TestCase):
rd_got._replica2part2dev_id)
self.assertEqual(rd_expected.devs, rd_got.devs)
self.assertEqual(rd_expected._part_shift, rd_got._part_shift)
self.assertEqual(rd_expected.next_part_power, rd_got.next_part_power)
self.assertEqual(rd_expected.version, rd_got.version)
def test_attrs(self):
r2p2d = [[0, 1, 0, 1], [0, 1, 0, 1]]
@ -230,6 +233,17 @@ class TestRing(TestRingBase):
self.assertEqual(self.ring.devs, self.intended_devs)
self.assertEqual(self.ring.reload_time, self.intended_reload_time)
self.assertEqual(self.ring.serialized_path, self.testgz)
self.assertIsNone(self.ring.version)
with open(self.testgz, 'rb') as fp:
expected_md5 = hashlib.md5()
expected_size = 0
for chunk in iter(lambda: fp.read(2 ** 16), b''):
expected_md5.update(chunk)
expected_size += len(chunk)
self.assertEqual(self.ring.md5, expected_md5.hexdigest())
self.assertEqual(self.ring.size, expected_size)
# test invalid endcap
with mock.patch.object(utils, 'HASH_PATH_SUFFIX', b''), \
mock.patch.object(utils, 'HASH_PATH_PREFIX', b''), \
@ -900,6 +914,7 @@ class TestRing(TestRingBase):
rb.rebalance()
rb.get_ring().save(self.testgz)
r = ring.Ring(self.testdir, ring_name='whatever')
self.assertEqual(r.version, rb.version)
class CountingRingTable(object):

View File

@ -56,7 +56,8 @@ class TestBufferedHTTP(unittest.TestCase):
b'RESPONSE')
fp.flush()
line = fp.readline()
path = b'/dev/' + expected_par + b'/path/..%25/?omg&no=%7f'
path = (b'/dev/' + expected_par +
b'/path/..%25/?omg=&no=%7F&%FF=%FF&no=%25ff')
self.assertEqual(
line,
b'PUT ' + path + b' HTTP/1.1\r\n')
@ -83,7 +84,7 @@ class TestBufferedHTTP(unittest.TestCase):
'PUT', '/path/..%/', {
'content-length': 7,
'x-header': 'value'},
query_string='omg&no=%7f')
query_string='omg&no=%7f&\xff=%ff&no=%25ff')
conn.send(b'REQUEST\r\n')
self.assertTrue(conn.sock.getsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY))

View File

@ -1288,9 +1288,9 @@ class TestServer(unittest.TestCase):
# setup pipe
rfd, wfd = os.pipe()
# subprocess connection to read stdout
self.stdout = os.fdopen(rfd)
self.stdout = os.fdopen(rfd, 'rb')
# real process connection to write stdout
self._stdout = os.fdopen(wfd, 'w')
self._stdout = os.fdopen(wfd, 'wb')
self.delay = delay
self.finished = False
self.returncode = None
@ -1317,9 +1317,9 @@ class TestServer(unittest.TestCase):
pass
def fail(self):
print('mock process started', file=self._stdout)
self._stdout.write(b'mock process started\n')
sleep(self.delay) # perform setup processing
print('mock process failed to start', file=self._stdout)
self._stdout.write(b'mock process failed to start\n')
self.close_stdout()
def poll(self):
@ -1327,12 +1327,12 @@ class TestServer(unittest.TestCase):
return self.returncode or None
def run(self):
print('mock process started', file=self._stdout)
self._stdout.write(b'mock process started\n')
sleep(self.delay) # perform setup processing
print('setup complete!', file=self._stdout)
self._stdout.write(b'setup complete!\n')
self.close_stdout()
sleep(self.delay) # do some more processing
print('mock process finished', file=self._stdout)
self._stdout.write(b'mock process finished\n')
self.finished = True
class MockTime(object):

View File

@ -70,6 +70,7 @@ class TestWSGI(unittest.TestCase):
config = """
[DEFAULT]
swift_dir = TEMPDIR
fallocate_reserve = 1%
[pipeline:main]
pipeline = proxy-server
@ -122,6 +123,7 @@ class TestWSGI(unittest.TestCase):
'__file__': conf_file,
'here': os.path.dirname(conf_file),
'conn_timeout': '0.2',
'fallocate_reserve': '1%',
'swift_dir': t,
'__name__': 'proxy-server'
}
@ -2064,6 +2066,7 @@ class TestPipelineModification(unittest.TestCase):
[filter:tempauth]
use = egg:swift#tempauth
user_test_tester = t%%sting .admin
[filter:copy]
use = egg:swift#copy
@ -2090,7 +2093,10 @@ class TestPipelineModification(unittest.TestCase):
for version, pipeline, expected in to_test:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents % (t, pipeline))
to_write = contents % (t, pipeline)
# Sanity check that the password only has one % in it
self.assertIn('t%sting', to_write)
f.write(to_write)
app = wsgi.loadapp(conf_file, global_conf={})
actual = ' '.join(m.rsplit('.', 1)[1]

View File

@ -3028,13 +3028,14 @@ class TestContainerController(unittest.TestCase):
'/sda1/p/a/c', method='PUT', headers=headers, body=body)
self.assertEqual(202, req.get_response(self.controller).status_int)
def do_test(params):
def do_test(params, expected_status):
params['format'] = 'json'
headers = {'X-Backend-Record-Type': 'shard'}
req = Request.blank('/sda1/p/a/c', method='GET',
headers=headers, params=params)
with mock_timestamp_now(ts_now):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, expected_status)
self.assertEqual(resp.content_type, 'text/html')
self.assertNotIn('X-Backend-Record-Type', resp.headers)
self.assertNotIn('X-Backend-Sharding-State', resp.headers)
@ -3042,26 +3043,19 @@ class TestContainerController(unittest.TestCase):
self.assertNotIn('X-Container-Bytes-Used', resp.headers)
self.assertNotIn('X-Timestamp', resp.headers)
self.assertNotIn('X-PUT-Timestamp', resp.headers)
return resp
resp = do_test({'states': 'bad'})
self.assertEqual(resp.status_int, 400)
resp = do_test({'delimiter': 'bad'})
self.assertEqual(resp.status_int, 412)
resp = do_test({'limit': str(constraints.CONTAINER_LISTING_LIMIT + 1)})
self.assertEqual(resp.status_int, 412)
do_test({'states': 'bad'}, 400)
do_test({'limit': str(constraints.CONTAINER_LISTING_LIMIT + 1)}, 412)
with mock.patch('swift.container.server.check_drive',
side_effect=ValueError('sda1 is not mounted')):
resp = do_test({})
self.assertEqual(resp.status_int, 507)
do_test({}, 507)
# delete the container
req = Request.blank('/sda1/p/a/c', method='DELETE',
headers={'X-Timestamp': next(ts_iter).normal})
self.assertEqual(204, req.get_response(self.controller).status_int)
resp = do_test({'states': 'bad'})
self.assertEqual(resp.status_int, 404)
do_test({'states': 'bad'}, 404)
def test_GET_auto_record_type(self):
# make a container
@ -3982,13 +3976,6 @@ class TestContainerController(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(resp.body.split(b'\n'), [b'a1', b'a2', b'a3', b''])
def test_GET_delimiter_too_long(self):
req = Request.blank('/sda1/p/a/c?delimiter=xx',
environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412)
def test_GET_delimiter(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
@ -4014,6 +4001,111 @@ class TestContainerController(unittest.TestCase):
{"subdir": "US-TX-"},
{"subdir": "US-UT-"}])
def test_GET_multichar_delimiter(self):
self.maxDiff = None
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for i in ('US~~TX~~A', 'US~~TX~~B', 'US~~OK~~A', 'US~~OK~~B',
'US~~OK~Tulsa~~A', 'US~~OK~Tulsa~~B',
'US~~UT~~A', 'US~~UT~~~B'):
req = Request.blank(
'/sda1/p/a/c/%s' % i,
environ={
'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_CONTENT_TYPE': 'text/plain', 'HTTP_X_ETAG': 'x',
'HTTP_X_SIZE': 0})
self._update_object_put_headers(req)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank(
'/sda1/p/a/c?prefix=US~~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~OK~Tulsa~~"},
{"subdir": "US~~OK~~"},
{"subdir": "US~~TX~~"},
{"subdir": "US~~UT~~"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~UT~~"},
{"subdir": "US~~TX~~"},
{"subdir": "US~~OK~~"},
{"subdir": "US~~OK~Tulsa~~"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~UT~~"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
json.loads(resp.body),
[{"subdir": "US~~UT~~"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~A"},
{"subdir": "US~~UT~~~"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT~&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"subdir": "US~~UT~~~"},
{"name": "US~~UT~~A"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT~~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~A"},
{"name": "US~~UT~~~B"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT~~&delimiter=~~&format=json&reverse=on',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~~B"},
{"name": "US~~UT~~A"}])
req = Request.blank(
'/sda1/p/a/c?prefix=US~~UT~~~&delimiter=~~&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(
[{k: v for k, v in item.items() if k in ('subdir', 'name')}
for item in json.loads(resp.body)],
[{"name": "US~~UT~~~B"}])
def test_GET_delimiter_non_ascii(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT',
@ -4279,18 +4371,12 @@ class TestContainerController(unittest.TestCase):
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
# Good UTF8 sequence for delimiter, too long (1 byte delimiters only)
req = Request.blank('/sda1/p/a/c?delimiter=\xce\xa9',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412,
"%d on param delimiter" % (resp.status_int))
req = Request.blank('/sda1/p/a/c', method='PUT',
headers={'X-Timestamp': Timestamp(1).internal})
req.get_response(self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'path', 'prefix', 'end_marker',
'format'):
'format', 'delimiter'):
req = Request.blank('/sda1/p/a/c?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)

View File

@ -21,6 +21,7 @@ import os
import shutil
from contextlib import contextmanager
from tempfile import mkdtemp
from uuid import uuid4
import mock
import unittest
@ -1320,6 +1321,65 @@ class TestSharder(BaseTestSharder):
self.assertEqual(8, context.cleave_to_row)
self.assertEqual(8, context.max_row)
def test_cleave_root_empty_db_with_ranges(self):
broker = self._make_broker()
broker.enable_sharding(Timestamp.now())
shard_bounds = (('', 'd'), ('d', 'x'), ('x', ''))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CREATED)
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
sharder_conf = {'cleave_batch_size': 1}
with self._mock_sharder(sharder_conf) as sharder:
self.assertTrue(sharder._cleave(broker))
info_lines = sharder.logger.get_lines_for_level('info')
expected_zero_obj = [line for line in info_lines
if " - zero objects found" in line]
self.assertEqual(len(expected_zero_obj), len(shard_bounds))
cleaving_context = CleavingContext.load(broker)
# even though there is a cleave_batch_size of 1, we don't count empty
# ranges when cleaving seeing as they aren't replicated
self.assertEqual(cleaving_context.ranges_done, 3)
self.assertEqual(cleaving_context.ranges_todo, 0)
self.assertTrue(cleaving_context.cleaving_done)
def test_cleave_root_empty_db_with_pre_existing_shard_db_handoff(self):
broker = self._make_broker()
broker.enable_sharding(Timestamp.now())
shard_bounds = (('', 'd'), ('d', 'x'), ('x', ''))
shard_ranges = self._make_shard_ranges(
shard_bounds, state=ShardRange.CREATED)
broker.merge_shard_ranges(shard_ranges)
self.assertTrue(broker.set_sharding_state())
sharder_conf = {'cleave_batch_size': 1}
with self._mock_sharder(sharder_conf) as sharder:
# pre-create a shard broker on a handoff location. This will force
# the sharder to not skip it but instead force to replicate it and
# use up a cleave_batch_size count.
sharder._get_shard_broker(shard_ranges[0], broker.root_path,
0)
self.assertFalse(sharder._cleave(broker))
info_lines = sharder.logger.get_lines_for_level('info')
expected_zero_obj = [line for line in info_lines
if " - zero objects found" in line]
self.assertEqual(len(expected_zero_obj), 1)
cleaving_context = CleavingContext.load(broker)
# even though there is a cleave_batch_size of 1, we don't count empty
# ranges when cleaving seeing as they aren't replicated
self.assertEqual(cleaving_context.ranges_done, 1)
self.assertEqual(cleaving_context.ranges_todo, 2)
self.assertFalse(cleaving_context.cleaving_done)
def test_cleave_shard(self):
broker = self._make_broker(account='.shards_a', container='shard_c')
own_shard_range = ShardRange(
@ -4486,6 +4546,63 @@ class TestSharder(BaseTestSharder):
set((call[0][0].path, call[0][1]['id'], call[0][2])
for call in mock_process_broker.call_args_list))
def test_audit_cleave_contexts(self):
def add_cleave_context(id, last_modified):
params = {'ref': id,
'cursor': 'curs',
'max_row': 2,
'cleave_to_row': 2,
'last_cleave_to_row': 1,
'cleaving_done': False,
'misplaced_done': True,
'ranges_done': 2,
'ranges_todo': 4}
key = 'X-Container-Sysmeta-Shard-Context-%s' % id
with mock_timestamp_now(Timestamp(last_modified)):
broker.update_metadata(
{key: (json.dumps(params),
Timestamp(last_modified).internal)})
def get_context(id, broker):
data = broker.get_sharding_sysmeta().get('Context-%s' % id)
if data:
return CleavingContext(**json.loads(data))
return data
reclaim_age = 100
broker = self._make_broker()
# sanity check
self.assertIsNone(broker.get_own_shard_range(no_default=True))
self.assertEqual(UNSHARDED, broker.get_db_state())
# Setup some cleaving contexts
id_old, id_newish = [str(uuid4()) for _ in range(2)]
contexts = ((id_old, 1),
(id_newish, reclaim_age // 2))
for id, last_modified in contexts:
add_cleave_context(id, last_modified)
with self._mock_sharder({'reclaim_age': str(reclaim_age)}) as sharder:
with mock_timestamp_now(Timestamp(reclaim_age + 2)):
sharder._audit_cleave_contexts(broker)
old_ctx = get_context(id_old, broker)
self.assertEqual(old_ctx, "")
newish_ctx = get_context(id_newish, broker)
self.assertEqual(newish_ctx.ref, id_newish)
# If we push time another reclaim age later, and they all be removed
# minus id_missing_lm as it has a later last_modified.
with self._mock_sharder({'reclaim_age': str(reclaim_age)}) as sharder:
with mock_timestamp_now(Timestamp(reclaim_age * 2)):
sharder._audit_cleave_contexts(broker)
newish_ctx = get_context(id_newish, broker)
self.assertEqual(newish_ctx, "")
class TestCleavingContext(BaseTestSharder):
def test_init(self):
@ -4571,11 +4688,85 @@ class TestCleavingContext(BaseTestSharder):
self.assertEqual(2, ctx.ranges_done)
self.assertEqual(4, ctx.ranges_todo)
def test_load_all(self):
broker = self._make_broker()
last_ctx = None
timestamp = Timestamp.now()
db_ids = [str(uuid4()) for _ in range(6)]
for db_id in db_ids:
params = {'ref': db_id,
'cursor': 'curs',
'max_row': 2,
'cleave_to_row': 2,
'last_cleave_to_row': 1,
'cleaving_done': False,
'misplaced_done': True,
'ranges_done': 2,
'ranges_todo': 4}
key = 'X-Container-Sysmeta-Shard-Context-%s' % db_id
broker.update_metadata(
{key: (json.dumps(params), timestamp.internal)})
first_ctx = None
for ctx, lm in CleavingContext.load_all(broker):
if not first_ctx:
first_ctx = ctx
last_ctx = ctx
self.assertIn(ctx.ref, db_ids)
self.assertEqual(lm, timestamp.internal)
# If a context is deleted (metadata is "") then it's skipped
last_ctx.delete(broker)
db_ids.remove(last_ctx.ref)
# and let's modify the first
with mock_timestamp_now() as new_timestamp:
first_ctx.store(broker)
for ctx, lm in CleavingContext.load_all(broker):
self.assertIn(ctx.ref, db_ids)
if ctx.ref == first_ctx.ref:
self.assertEqual(lm, new_timestamp.internal)
else:
self.assertEqual(lm, timestamp.internal)
def test_delete(self):
broker = self._make_broker()
db_id = broker.get_info()['id']
params = {'ref': db_id,
'cursor': 'curs',
'max_row': 2,
'cleave_to_row': 2,
'last_cleave_to_row': 1,
'cleaving_done': False,
'misplaced_done': True,
'ranges_done': 2,
'ranges_todo': 4}
key = 'X-Container-Sysmeta-Shard-Context-%s' % db_id
broker.update_metadata(
{key: (json.dumps(params), Timestamp.now().internal)})
ctx = CleavingContext.load(broker)
self.assertEqual(db_id, ctx.ref)
# Now let's delete it. When deleted the metadata key will exist, but
# the value will be "" as this means it'll be reaped later.
ctx.delete(broker)
sysmeta = broker.get_sharding_sysmeta()
for key, val in sysmeta.items():
if key == "Context-%s" % db_id:
self.assertEqual(val, "")
break
else:
self.fail("Deleted context 'Context-%s' not found")
def test_store(self):
broker = self._make_sharding_broker()
old_db_id = broker.get_brokers()[0].get_info()['id']
last_mod = Timestamp.now()
ctx = CleavingContext(old_db_id, 'curs', 12, 11, 2, True, True, 2, 4)
ctx.store(broker)
with mock_timestamp_now(last_mod):
ctx.store(broker)
key = 'X-Container-Sysmeta-Shard-Context-%s' % old_db_id
data = json.loads(broker.metadata[key][0])
expected = {'ref': old_db_id,
@ -4588,6 +4779,8 @@ class TestCleavingContext(BaseTestSharder):
'ranges_done': 2,
'ranges_todo': 4}
self.assertEqual(expected, data)
# last modified is the metadata timestamp
self.assertEqual(broker.metadata[key][1], last_mod.internal)
def test_store_add_row_load(self):
# adding row to older db changes only max_row in the context

View File

@ -794,11 +794,13 @@ class TestContainerSync(unittest.TestCase):
# Succeeds because no rows match
log_line = cs.logger.get_lines_for_level('info')[0]
lines = log_line.split(',')
self.assertTrue('sync_point2: 5', lines.pop().strip())
self.assertTrue('sync_point1: 5', lines.pop().strip())
self.assertTrue('bytes: 1100', lines.pop().strip())
self.assertTrue('deletes: 2', lines.pop().strip())
self.assertTrue('puts: 3', lines.pop().strip())
self.assertEqual('total_rows: 1', lines.pop().strip())
self.assertEqual('sync_point2: None', lines.pop().strip())
self.assertEqual('sync_point1: 5', lines.pop().strip())
self.assertEqual('bytes: 0', lines.pop().strip())
self.assertEqual('deletes: 0', lines.pop().strip())
self.assertEqual('posts: 0', lines.pop().strip())
self.assertEqual('puts: 0', lines.pop().strip())
def test_container_sync_row_delete(self):
self._test_container_sync_row_delete(None, None)

View File

@ -214,27 +214,38 @@ def setup_servers(the_object_server=object_server, extra_conf=None):
logging_prosv = proxy_logging.ProxyLoggingMiddleware(
listing_formats.ListingFilter(prosrv), conf, logger=prosrv.logger)
prospa = spawn(wsgi.server, prolis, logging_prosv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
con1spa = spawn(wsgi.server, con1lis, con1srv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
con2spa = spawn(wsgi.server, con2lis, con2srv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj4spa = spawn(wsgi.server, obj4lis, obj4srv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj5spa = spawn(wsgi.server, obj5lis, obj5srv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
obj6spa = spawn(wsgi.server, obj6lis, obj6srv, nl,
protocol=SwiftHttpProtocol)
protocol=SwiftHttpProtocol,
capitalize_response_headers=False)
context["test_coros"] = \
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa,
obj4spa, obj5spa, obj6spa)

View File

@ -471,7 +471,7 @@ class TestObjectReplicator(unittest.TestCase):
for job in jobs:
jobs_by_pol_part[str(int(job['policy'])) + job['partition']] = job
self.assertEqual(len(jobs_to_delete), 2)
self.assertTrue('1', jobs_to_delete[0]['partition'])
self.assertEqual('1', jobs_to_delete[0]['partition'])
self.assertEqual(
[node['id'] for node in jobs_by_pol_part['00']['nodes']], [1, 2])
self.assertEqual(

View File

@ -19,14 +19,15 @@ import socket
import unittest
from eventlet import Timeout
import six
from six.moves import urllib
from swift.common.constraints import CONTAINER_LISTING_LIMIT
from swift.common.swob import Request
from swift.common.swob import Request, bytes_to_wsgi, str_to_wsgi, wsgi_quote
from swift.common.utils import ShardRange, Timestamp
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_container_info, Controller, \
get_container_info
from swift.proxy.controllers.base import headers_to_container_info, \
Controller, get_container_info
from test import annotate_failure
from test.unit import fake_http_connect, FakeRing, FakeMemcache, \
make_timestamp_iter
@ -435,13 +436,21 @@ class TestContainerController(TestRingBase):
self._assert_responses('POST', POST_TEST_CASES)
def _make_shard_objects(self, shard_range):
lower = ord(shard_range.lower[0]) if shard_range.lower else ord('@')
upper = ord(shard_range.upper[0]) if shard_range.upper else ord('z')
if six.PY2:
lower = ord(shard_range.lower.decode('utf8')[0]
if shard_range.lower else '@')
upper = ord(shard_range.upper.decode('utf8')[0]
if shard_range.upper else u'\U0001ffff')
else:
lower = ord(shard_range.lower[0] if shard_range.lower else '@')
upper = ord(shard_range.upper[0] if shard_range.upper
else '\U0001ffff')
objects = [{'name': chr(i), 'bytes': i, 'hash': 'hash%s' % chr(i),
objects = [{'name': six.unichr(i), 'bytes': i,
'hash': 'hash%s' % six.unichr(i),
'content_type': 'text/plain', 'deleted': 0,
'last_modified': next(self.ts_iter).isoformat}
for i in range(lower + 1, upper + 1)]
for i in range(lower + 1, upper + 1)][:1024]
return objects
def _check_GET_shard_listing(self, mock_responses, expected_objects,
@ -484,9 +493,12 @@ class TestContainerController(TestRingBase):
with annotate_failure('Request check at index %d.' % i):
# strip off /sdx/0/ from path
self.assertEqual(exp_path, req['path'][7:])
self.assertEqual(
dict(exp_params, format='json'),
dict(urllib.parse.parse_qsl(req['qs'], True)))
if six.PY2:
got_params = dict(urllib.parse.parse_qsl(req['qs'], True))
else:
got_params = dict(urllib.parse.parse_qsl(
req['qs'], True, encoding='latin1'))
self.assertEqual(dict(exp_params, format='json'), got_params)
for k, v in exp_headers.items():
self.assertIn(k, req['headers'])
self.assertEqual(v, req['headers'][k])
@ -517,10 +529,11 @@ class TestContainerController(TestRingBase):
self.assertEqual(headers_to_container_info(info_hdrs), info)
def test_GET_sharded_container(self):
shard_bounds = (('', 'ham'), ('ham', 'pie'), ('pie', ''))
# Don't worry, ShardRange._encode takes care of unicode/bytes issues
shard_bounds = ('', 'ham', 'pie', u'\N{SNOWMAN}', u'\U0001F334', '')
shard_ranges = [
ShardRange('.shards_a/c_%s' % upper, Timestamp.now(), lower, upper)
for lower, upper in shard_bounds]
for lower, upper in zip(shard_bounds[:-1], shard_bounds[1:])]
sr_dicts = [dict(sr) for sr in shard_ranges]
sr_objs = [self._make_shard_objects(sr) for sr in shard_ranges]
shard_resp_hdrs = [
@ -530,7 +543,7 @@ class TestContainerController(TestRingBase):
sum([obj['bytes'] for obj in sr_objs[i]]),
'X-Container-Meta-Flavour': 'flavour%d' % i,
'X-Backend-Storage-Policy-Index': 0}
for i in range(3)]
for i, _ in enumerate(shard_ranges)]
all_objects = []
for objects in sr_objs:
@ -556,7 +569,9 @@ class TestContainerController(TestRingBase):
(200, sr_dicts, root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2])
(200, sr_objs[2], shard_resp_hdrs[2]),
(200, sr_objs[3], shard_resp_hdrs[3]),
(200, sr_objs[4], shard_resp_hdrs[4]),
]
expected_requests = [
# path, headers, params
@ -564,15 +579,29 @@ class TestContainerController(TestRingBase):
dict(states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='ham\x00', limit=str(limit),
states='listing')), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))), # 200
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='p', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))) # 200
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='p', end_marker='\xe2\x98\x83\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='\xd1\xb0', end_marker='\xf0\x9f\x8c\xb4\x00',
states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1]
+ sr_objs[2])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[4].name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='\xe2\xa8\x83', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1] + sr_objs[2]
+ sr_objs[3])))), # 200
]
resp = self._check_GET_shard_listing(
@ -588,7 +617,7 @@ class TestContainerController(TestRingBase):
(200, sr_dicts[:2] + [dict(root_range)], root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], root_resp_hdrs)
(200, sr_objs[2] + sr_objs[3] + sr_objs[4], root_resp_hdrs)
]
expected_requests = [
# path, headers, params
@ -615,6 +644,8 @@ class TestContainerController(TestRingBase):
mock_responses = [
# status, body, headers
(200, list(reversed(sr_dicts)), root_shard_resp_hdrs),
(200, list(reversed(sr_objs[4])), shard_resp_hdrs[4]),
(200, list(reversed(sr_objs[3])), shard_resp_hdrs[3]),
(200, list(reversed(sr_objs[2])), shard_resp_hdrs[2]),
(200, list(reversed(sr_objs[1])), shard_resp_hdrs[1]),
(200, list(reversed(sr_objs[0])), shard_resp_hdrs[0]),
@ -623,15 +654,31 @@ class TestContainerController(TestRingBase):
# path, headers, params
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing', reverse='true')),
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='pie', reverse='true',
limit=str(limit), states='listing')), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'},
(wsgi_quote(str_to_wsgi(shard_ranges[4].name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='', end_marker='\xf0\x9f\x8c\xb4', states='listing',
reverse='true', limit=str(limit))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='\xf0\x9f\x8c\xb5', end_marker='\xe2\x98\x83',
states='listing', reverse='true',
limit=str(limit - len(sr_objs[4])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='\xe2\x98\x84', end_marker='pie', states='listing',
reverse='true',
limit=str(limit - len(sr_objs[4] + sr_objs[3])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='q', end_marker='ham', states='listing',
reverse='true', limit=str(limit - len(sr_objs[2])))), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'},
reverse='true',
limit=str(limit - len(sr_objs[4] + sr_objs[3]
+ sr_objs[2])))), # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto'},
dict(marker='i', end_marker='', states='listing', reverse='true',
limit=str(limit - len(sr_objs[2] + sr_objs[1])))), # 200
limit=str(limit - len(sr_objs[4] + sr_objs[3] + sr_objs[2]
+ sr_objs[1])))), # 200
]
resp = self._check_GET_shard_listing(
@ -656,15 +703,18 @@ class TestContainerController(TestRingBase):
dict(limit=str(limit), states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(limit=str(limit), states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'}, # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))),
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'}, # 200
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='p', end_marker='', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1]))))
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='p', end_marker='\xe2\x98\x83\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
@ -672,31 +722,35 @@ class TestContainerController(TestRingBase):
self.check_response(resp, root_resp_hdrs)
# GET with marker
marker = sr_objs[1][2]['name']
first_included = len(sr_objs[0]) + 2
marker = bytes_to_wsgi(sr_objs[3][2]['name'].encode('utf8'))
first_included = (len(sr_objs[0]) + len(sr_objs[1])
+ len(sr_objs[2]) + 2)
limit = CONTAINER_LISTING_LIMIT
expected_objects = all_objects[first_included:]
mock_responses = [
(404, '', {}),
(200, sr_dicts[1:], root_shard_resp_hdrs),
(200, sr_dicts[3:], root_shard_resp_hdrs),
(404, '', {}),
(200, sr_objs[1][2:], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2])
(200, sr_objs[3][2:], shard_resp_hdrs[3]),
(200, sr_objs[4], shard_resp_hdrs[4]),
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(marker=marker, states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(marker=marker, states='listing')), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'}, # 404
dict(marker=marker, end_marker='pie\x00', states='listing',
limit=str(limit))),
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'}, # 200
dict(marker=marker, end_marker='pie\x00', states='listing',
limit=str(limit))),
(shard_ranges[2].name, {'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='p', end_marker='', states='listing',
limit=str(limit - len(sr_objs[1][2:])))),
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker=marker, end_marker='\xf0\x9f\x8c\xb4\x00',
states='listing', limit=str(limit))),
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker=marker, end_marker='\xf0\x9f\x8c\xb4\x00',
states='listing', limit=str(limit))),
(wsgi_quote(str_to_wsgi(shard_ranges[4].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='\xe2\xa8\x83', end_marker='', states='listing',
limit=str(limit - len(sr_objs[3][2:])))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
@ -704,30 +758,51 @@ class TestContainerController(TestRingBase):
self.check_response(resp, root_resp_hdrs)
# GET with end marker
end_marker = sr_objs[1][6]['name']
first_excluded = len(sr_objs[0]) + 6
end_marker = bytes_to_wsgi(sr_objs[3][6]['name'].encode('utf8'))
first_excluded = (len(sr_objs[0]) + len(sr_objs[1])
+ len(sr_objs[2]) + 6)
expected_objects = all_objects[:first_excluded]
mock_responses = [
(404, '', {}),
(200, sr_dicts[:2], root_shard_resp_hdrs),
(200, sr_dicts[:4], root_shard_resp_hdrs),
(200, sr_objs[0], shard_resp_hdrs[0]),
(404, '', {}),
(200, sr_objs[1][:6], shard_resp_hdrs[1])
(200, sr_objs[1], shard_resp_hdrs[1]),
(200, sr_objs[2], shard_resp_hdrs[2]),
(404, '', {}),
(200, sr_objs[3][:6], shard_resp_hdrs[3]),
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(end_marker=end_marker, states='listing')), # 404
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(end_marker=end_marker, states='listing')), # 200
(shard_ranges[0].name, {'X-Backend-Record-Type': 'auto'}, # 200
(wsgi_quote(str_to_wsgi(shard_ranges[0].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='', end_marker='ham\x00', states='listing',
limit=str(limit))),
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'}, # 404
dict(marker='h', end_marker=end_marker, states='listing',
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto'}, # 404
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='h', end_marker=end_marker, states='listing',
(wsgi_quote(str_to_wsgi(shard_ranges[1].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='h', end_marker='pie\x00', states='listing',
limit=str(limit - len(sr_objs[0])))),
(wsgi_quote(str_to_wsgi(shard_ranges[2].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='p', end_marker='\xe2\x98\x83\x00', states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1])))),
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto'}, # 404
dict(marker='\xd1\xb0', end_marker=end_marker, states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1]
+ sr_objs[2])))),
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker='\xd1\xb0', end_marker=end_marker, states='listing',
limit=str(limit - len(sr_objs[0] + sr_objs[1]
+ sr_objs[2])))),
]
resp = self._check_GET_shard_listing(
mock_responses, expected_objects, expected_requests,
@ -738,14 +813,15 @@ class TestContainerController(TestRingBase):
limit = 2
expected_objects = all_objects[first_included:first_excluded]
mock_responses = [
(200, sr_dicts[1:2], root_shard_resp_hdrs),
(200, sr_objs[1][2:6], shard_resp_hdrs[1])
(200, sr_dicts[3:4], root_shard_resp_hdrs),
(200, sr_objs[3][2:6], shard_resp_hdrs[1])
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(states='listing', limit=str(limit),
marker=marker, end_marker=end_marker)), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'}, # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker=marker, end_marker=end_marker, states='listing',
limit=str(limit))),
]
@ -758,14 +834,15 @@ class TestContainerController(TestRingBase):
# reverse with marker, end_marker
expected_objects.reverse()
mock_responses = [
(200, sr_dicts[1:2], root_shard_resp_hdrs),
(200, list(reversed(sr_objs[1][2:6])), shard_resp_hdrs[1])
(200, sr_dicts[3:4], root_shard_resp_hdrs),
(200, list(reversed(sr_objs[3][2:6])), shard_resp_hdrs[1])
]
expected_requests = [
('a/c', {'X-Backend-Record-Type': 'auto'},
dict(marker=end_marker, reverse='true', end_marker=marker,
limit=str(limit), states='listing',)), # 200
(shard_ranges[1].name, {'X-Backend-Record-Type': 'auto'}, # 200
(wsgi_quote(str_to_wsgi(shard_ranges[3].name)),
{'X-Backend-Record-Type': 'auto'}, # 200
dict(marker=end_marker, end_marker=marker, states='listing',
limit=str(limit), reverse='true')),
]

View File

@ -1104,6 +1104,7 @@ class TestReplicatedObjController(CommonObjectControllerMixin,
body = unchunk_body(body)
self.assertEqual('100-continue', headers['Expect'])
self.assertEqual('chunked', headers['Transfer-Encoding'])
self.assertNotIn('Content-Length', headers)
else:
self.assertNotIn('Transfer-Encoding', headers)
if body or not test_body:
@ -4677,6 +4678,15 @@ class TestECObjControllerMimePutter(BaseObjectControllerMixin,
self.assertEqual(resp.status_int, 201)
def test_PUT_with_body(self):
self._test_PUT_with_body()
def test_PUT_with_chunked_body(self):
self._test_PUT_with_body(chunked=True, content_length=False)
def test_PUT_with_both_body(self):
self._test_PUT_with_body(chunked=True, content_length=True)
def _test_PUT_with_body(self, chunked=False, content_length=True):
segment_size = self.policy.ec_segment_size
test_body = (b'asdf' * segment_size)[:-10]
# make the footers callback not include Etag footer so that we can
@ -4689,6 +4699,10 @@ class TestECObjControllerMimePutter(BaseObjectControllerMixin,
etag = md5(test_body).hexdigest()
size = len(test_body)
req.body = test_body
if chunked:
req.headers['Transfer-Encoding'] = 'chunked'
if not content_length:
del req.headers['Content-Length']
codes = [201] * self.replicas()
resp_headers = {
'Some-Other-Header': 'Four',
@ -4705,8 +4719,8 @@ class TestECObjControllerMimePutter(BaseObjectControllerMixin,
conn_id = kwargs['connection_id']
put_requests[conn_id]['boundary'] = headers[
'X-Backend-Obj-Multipart-Mime-Boundary']
put_requests[conn_id]['backend-content-length'] = headers[
'X-Backend-Obj-Content-Length']
put_requests[conn_id]['backend-content-length'] = headers.get(
'X-Backend-Obj-Content-Length')
put_requests[conn_id]['x-timestamp'] = headers[
'X-Timestamp']
@ -4734,9 +4748,6 @@ class TestECObjControllerMimePutter(BaseObjectControllerMixin,
self.assertIsNotNone(info['boundary'],
"didn't get boundary for conn %r" % (
connection_id,))
self.assertTrue(size > int(info['backend-content-length']) > 0,
"invalid backend-content-length for conn %r" % (
connection_id,))
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
@ -4759,12 +4770,19 @@ class TestECObjControllerMimePutter(BaseObjectControllerMixin,
obj_payload = obj_part.get_payload(decode=True)
frag_archives.append(obj_payload)
# assert length was correct for this connection
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[-1]))
# assert length was the same for all connections
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[0]))
if chunked:
self.assertIsNone(info['backend-content-length'])
else:
self.assertTrue(
size > int(info['backend-content-length']) > 0,
"invalid backend-content-length for conn %r" % (
connection_id,))
# assert length was correct for this connection
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[-1]))
# assert length was the same for all connections
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[0]))
# validate some footer metadata
self.assertEqual(footer_part['X-Document'], 'object metadata')

View File

@ -2432,6 +2432,41 @@ class TestReplicatedObjectController(
self.assertEqual(res.status_int, 200)
self.assertEqual(res.body, b'')
@unpatch_policies
def test_PUT_GET_unicode_metadata(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
path = b'/v1/a/c/o.zerobyte'
fd.write(b'PUT %s HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'X-Storage-Token: t\r\n'
b'Expect: 100-continue\r\n'
b'Transfer-Encoding: chunked\r\n'
b'Content-Type: application/octet-stream\r\n'
b'X-Object-Meta-\xf0\x9f\x8c\xb4: \xf0\x9f\x91\x8d\r\n'
b'\r\n0\r\n\r\n' % (path,))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 100'
self.assertEqual(headers[:len(exp)], exp)
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
fd.write(b'GET %s HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'\r\n' % (path,))
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'X-Object-Meta-\xf0\x9f\x8c\xb4: \xf0\x9f\x91\x8d',
headers.split(b'\r\n'))
@unpatch_policies
def test_GET_short_read(self):
prolis = _test_sockets[0]
@ -3136,14 +3171,68 @@ class TestReplicatedObjectController(
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
with mock.patch('swift.obj.diskfile.fallocate') as mock_fallocate:
fd.write(b'PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: keep-alive\r\n'
b'X-Storage-Token: t\r\n'
b'Content-Type: application/octet-stream\r\n'
b'Content-Length: 33\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2\r\n'
b'oh\r\n'
b'4\r\n'
b' say\r\n'
b'4\r\n'
b' can\r\n'
b'4\r\n'
b' you\r\n'
b'4\r\n'
b' see\r\n'
b'3\r\n'
b' by\r\n'
b'4\r\n'
b' the\r\n'
b'8\r\n'
b' dawns\'\n\r\n'
b'0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
self.assertFalse(mock_fallocate.mock_calls)
fd.write(b'GET /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'Content-Length: 33', headers.split(b'\r\n'))
self.assertEqual(b"oh say can you see by the dawns'\n", fd.read(33))
@unpatch_policies
def test_PUT_message_length_using_both_with_crazy_meta(self):
prolis = _test_sockets[0]
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(b'PUT /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'X-Storage-Token: t\r\n'
b'Content-Type: application/octet-stream\r\n'
b'Content-Length: 33\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2\r\n'
b'X-Object-Meta-\xf0\x9f\x8c\xb4: \xf0\x9f\x91\x8d\r\n'
b'Expect: 100-continue\r\n'
b'Transfer-Encoding: chunked\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 100 Continue'
self.assertEqual(headers[:len(exp)], exp)
# Since we got our 100 Continue, now we can send the body
fd.write(b'2\r\n'
b'oh\r\n'
b'4\r\n'
b' say\r\n'
@ -3165,6 +3254,20 @@ class TestReplicatedObjectController(
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
fd.write(b'GET /v1/a/c/o.chunked HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: close\r\n'
b'X-Storage-Token: t\r\n'
b'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'Content-Length: 33', headers.split(b'\r\n'))
self.assertIn(b'X-Object-Meta-\xf0\x9f\x8c\xb4: \xf0\x9f\x91\x8d',
headers.split(b'\r\n'))
self.assertEqual(b"oh say can you see by the dawns'\n", fd.read(33))
@unpatch_policies
def test_PUT_bad_message_length(self):
prolis = _test_sockets[0]
@ -4282,6 +4385,31 @@ class TestReplicatedObjectController(
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
# chunked transfers basically go "until I stop sending bytes"
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': DisconnectedBody()},
headers={'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201) # ... so, no disconnect
# chunked transfer trumps content-length
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': DisconnectedBody()},
headers={'Content-Length': '4',
'Transfer-Encoding': 'chunked',
'Content-Type': 'text/plain'})
self.app.update_request(req)
set_http_connect(200, 200, 201, 201, 201)
# acct cont obj obj obj
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_node_read_timeout(self):
with save_globals():
self.app.account_ring.get_nodes('account')
@ -5332,7 +5460,7 @@ class TestReplicatedObjectController(
exp = b'HTTP/1.1 200'
self.assertEqual(headers[:len(exp)], exp)
self.assertIn(b'\r\nX-Object-Meta-%s: %s\r\n' %
(quote(ustr_short).lower().encode('ascii'),
(quote(ustr_short).title().encode('ascii'),
quote(ustr).encode('ascii')), headers)
@unpatch_policies
@ -7188,6 +7316,104 @@ class BaseTestECObjectController(BaseTestObjectController):
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
# try it chunked
_test_servers[0].logger.clear()
chunk = 'a' * 64 * 2 ** 10
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Transfer-Encoding: chunked\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (self.ec_policy.name,)).encode('ascii'))
fd.write(('%x\r\n%s\r\n' % (len(chunk), chunk)).encode('ascii'))
# no zero-byte end chunk
fd.flush()
fd.close()
sock.close()
# sleep to trampoline enough
condition = \
lambda: _test_servers[0].logger.get_lines_for_level('warning')
self._sleep_enough(condition)
expected = ['Client disconnected without sending last chunk']
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual(expected, warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
_test_servers[0].logger.clear()
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Transfer-Encoding: chunked\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (self.ec_policy.name,)).encode('ascii'))
fd.write(('%x\r\n%s\r\n' % (len(chunk), chunk)).encode('ascii')[:-10])
fd.flush()
fd.close()
sock.close()
# sleep to trampoline enough
condition = \
lambda: _test_servers[0].logger.get_lines_for_level('warning')
self._sleep_enough(condition)
expected = ['Client disconnected without sending last chunk']
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual(expected, warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
_test_servers[0].logger.clear()
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Transfer-Encoding: chunked\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (self.ec_policy.name,)).encode('ascii'))
fd.write(('%x\r\n' % len(chunk)).encode('ascii'))
fd.flush()
fd.close()
sock.close()
# sleep to trampoline enough
condition = \
lambda: _test_servers[0].logger.get_lines_for_level('warning')
self._sleep_enough(condition)
expected = ['Client disconnected without sending last chunk']
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual(expected, warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
# Do a valid guy with conflicting headers
_test_servers[0].logger.clear()
chunk = 'a' * 64 * 2 ** 10
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile('rwb')
fd.write(('PUT /v1/a/%s-discon/test HTTP/1.1\r\n'
'Host: localhost\r\n'
'Transfer-Encoding: chunked\r\n'
'Content-Length: 999999999999999999999999\r\n'
'X-Storage-Token: t\r\n'
'Content-Type: donuts\r\n'
'\r\n' % (self.ec_policy.name,)).encode('ascii'))
fd.write(('%x\r\n%s\r\n0\r\n\r\n' % (
len(chunk), chunk)).encode('ascii'))
# no zero-byte end chunk
fd.flush()
headers = readuntil2crlfs(fd)
exp = b'HTTP/1.1 201'
self.assertEqual(headers[:len(exp)], exp)
fd.close()
sock.close()
warns = _test_servers[0].logger.get_lines_for_level('warning')
self.assertEqual([], warns)
errors = _test_servers[0].logger.get_lines_for_level('error')
self.assertEqual([], errors)
class TestECObjectController(BaseTestECObjectController, unittest.TestCase):
def setUp(self):

View File

@ -14,16 +14,13 @@
# limitations under the License.
- hosts: all
tasks:
# TODO: remove this task when s3api is in the pipeline by default
- name: Add s3api in proxy-server.conf
replace:
path: "/etc/swift/proxy-server.conf"
regexp: "container_sync tempauth"
replace: "container_sync s3api tempauth"
- name: Shutdown main swift services
shell: "swift-init stop main"
become: true
ignore_errors: true
- name: Starts main swift servers
shell: "swift-init main start"
- name: Start main swift services
shell: "swift-init start main"
become: true
- name: Clone s3compat repository
@ -35,17 +32,18 @@
pip:
requirements: "{{ ansible_env.HOME }}/s3compat/requirements.txt"
virtualenv: "{{ ansible_env.HOME }}/venv"
virtualenv_python: python2
become: true
- name: Run s3compat tests
shell: '{{ ansible_env.HOME }}/venv/bin/python {{ ansible_env.HOME }}/s3compat/bin/run_ceph_tests.py "$@" || true'
shell: '{{ ansible_env.HOME }}/venv/bin/python {{ ansible_env.HOME }}/s3compat/bin/run_ceph_tests.py "$@"'
ignore_errors: true
environment:
S3TEST_CONF: "{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/tools/playbooks/ceph-s3tests/ceph-s3.conf"
S3ACL: "true"
DNS_BUCKET_NAMES: "false"
CHECK_BUCKET_OWNER: "true"
args:
chdir: '{{ ansible_env.HOME }}/s3compat'
tags:
- tests
- name: Show report
shell: |
@ -58,3 +56,5 @@
args:
chdir:
"{{ ansible_env.HOME }}/s3compat"
tags:
- tests

View File

@ -21,7 +21,13 @@
- name: "checkout a previous version: {{ previous_swift_version | default(latest_swift_release.stdout) }}"
shell:
cmd: git checkout {{ previous_swift_verion | default(latest_swift_release.stdout) }} -b previous_swift_version
cmd: git checkout {{ previous_swift_version | default(latest_swift_release.stdout) }} -b previous_swift_version
executable: /bin/bash
chdir: '{{ zuul.project.src_dir }}'
- name: confirm checked out version
shell:
cmd: git describe
executable: /bin/bash
chdir: '{{ zuul.project.src_dir }}'

View File

@ -0,0 +1,23 @@
# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: all
tasks:
# TODO: remove this task when s3api is in the pipeline by default
- name: Add s3api in proxy-server.conf
replace:
path: "/etc/swift/proxy-server.conf"
regexp: "container_sync tempauth"
replace: "container_sync s3api tempauth"
become: true

40
tox.ini
View File

@ -10,7 +10,7 @@ setenv = VIRTUAL_ENV={envdir}
NOSE_WITH_COVERAGE=1
NOSE_COVER_BRANCHES=1
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt}
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands = find . ( -type f -o -type l ) -name "*.py[co]" -delete
@ -46,48 +46,29 @@ commands = ./.functests {posargs}
[testenv:func-py3]
basepython = python3
commands =
nosetests {posargs: \
test/functional/s3api/test_acl.py \
test/functional/s3api/test_multi_delete.py \
test/functional/s3api/test_multi_upload.py \
test/functional/s3api/test_object.py \
test/functional/s3api/test_presigned.py \
test/functional/s3api/test_service.py \
test/functional/test_access_control.py \
test/functional/test_account.py \
test/functional/test_container.py \
test/functional/test_dlo.py \
test/functional/test_domain_remap.py \
test/functional/test_object.py \
test/functional/test_slo.py \
test/functional/test_staticweb.py \
test/functional/test_symlink.py \
test/functional/test_tempurl.py \
test/functional/test_versioned_writes.py \
test/functional/tests.py}
commands = ./.functests {posargs}
[testenv:func-ec-py3]
basepython = python3
commands = {[testenv:func-py3]commands}
commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=ec
[testenv:func-s3api-py3]
basepython = python3
commands = {[testenv:func-py3]commands}
commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=s3api
[testenv:func-encryption-py3]
basepython = python3
commands = {[testenv:func-py3]commands}
commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=encryption
[testenv:func-domain-remap-staticweb-py3]
basepython = python3
commands = {[testenv:func-py3]commands}
commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=domain_remap_staticweb
@ -174,3 +155,12 @@ deps =
-c{toxinidir}/lower-constraints.txt
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt
[testenv:pdf-docs]
basepython = python3
deps = {[testenv:docs]deps}
whitelist_externals =
make
commands =
sphinx-build -W -b latex doc/source doc/build/pdf
make -C doc/build/pdf