Merge branch 'master' into feature/deep

Change-Id: I2db1ff7120b582d4276ece02b232ffc06507753e
This commit is contained in:
Tim Burke 2017-10-13 17:58:58 +00:00
commit ded0343892
23 changed files with 698 additions and 285 deletions

View File

@ -44,9 +44,9 @@ You can also feed a list of urls to the script through stdin.
Examples!
%(cmd)s SOSO_88ad0b83-b2c5-4fa1-b2d6-60c597202076
%(cmd)s SOSO_88ad0b83-b2c5-4fa1-b2d6-60c597202076/container/object
%(cmd)s -e errors.txt SOSO_88ad0b83-b2c5-4fa1-b2d6-60c597202076/container
%(cmd)s AUTH_88ad0b83-b2c5-4fa1-b2d6-60c597202076
%(cmd)s AUTH_88ad0b83-b2c5-4fa1-b2d6-60c597202076/container/object
%(cmd)s -e errors.txt AUTH_88ad0b83-b2c5-4fa1-b2d6-60c597202076/container
%(cmd)s < errors.txt
%(cmd)s -c 25 -d < errors.txt
""" % {'cmd': sys.argv[0]}
@ -108,7 +108,7 @@ class Auditor(object):
consistent = False
print(' MD5 does not match etag for "%s" on %s/%s'
% (path, node['ip'], node['device']))
etags.append(resp.getheader('ETag'))
etags.append((resp.getheader('ETag'), node))
else:
conn = http_connect(node['ip'], node['port'],
node['device'], part, 'HEAD',
@ -120,7 +120,7 @@ class Auditor(object):
print(' Bad status HEADing object "%s" on %s/%s'
% (path, node['ip'], node['device']))
continue
etags.append(resp.getheader('ETag'))
etags.append((resp.getheader('ETag'), node))
except Exception:
self.object_exceptions += 1
consistent = False
@ -131,8 +131,8 @@ class Auditor(object):
consistent = False
print(" Failed fo fetch object %s at all!" % path)
elif hash:
for etag in etags:
if resp.getheader('ETag').strip('"') != hash:
for etag, node in etags:
if etag.strip('"') != hash:
consistent = False
self.object_checksum_mismatch += 1
print(' ETag mismatch for "%s" on %s/%s'

View File

@ -0,0 +1,138 @@
.\"
.\" Author: HCLTech-SSW <hcl_ss_oss@hcl.com>
.\" Copyright (c) 2010-2017 OpenStack Foundation.
.\"
.\" Licensed under the Apache License, Version 2.0 (the "License");
.\" you may not use this file except in compliance with the License.
.\" You may obtain a copy of the License at
.\"
.\" http://www.apache.org/licenses/LICENSE-2.0
.\"
.\" Unless required by applicable law or agreed to in writing, software
.\" distributed under the License is distributed on an "AS IS" BASIS,
.\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
.\" implied.
.\" See the License for the specific language governing permissions and
.\" limitations under the License.
.\"
.TH container-sync-realms.conf 5 "10/09/2017" "Linux" "OpenStack Swift"
.SH NAME
.LP
.B container-sync-realms.conf
\- configuration file for the OpenStack Swift container sync realms
.SH SYNOPSIS
.LP
.B container-sync-realms.conf
.SH DESCRIPTION
.PP
This is the configuration file used by the Object storage Swift to perform container to container
synchronization. This configuration file is used to configure clusters to allow/accept sync
requests to/from other clusters. Using this configuration file, the user specifies where
to sync their container to along with a secret synchronization key.
You can find more information about container to container synchronization at
\fIhttps://docs.openstack.org/swift/latest/overview_container_sync.html\fR
The configuration file follows the python-pastedeploy syntax. The file is divided
into sections, which are enclosed by square brackets. Each section will contain a
certain number of key/value parameters which are described later.
Any line that begins with a '#' symbol is ignored.
You can find more information about python-pastedeploy configuration format at
\fIhttp://pythonpaste.org/deploy/#config-format\fR
.SH GLOBAL SECTION
.PD 1
.RS 0
This is indicated by section named [DEFAULT]. Below are the parameters that
are acceptable within this section.
.IP "\fBmtime_check_interval\fR"
The number of seconds between checking the modified time of this config file for changes
and therefore reloading it. The default value is 300.
.RE
.PD
.SH REALM SECTIONS
.PD 1
.RS 0
Each section name is the name of a sync realm, for example [realm1].
A sync realm is a set of clusters that have agreed to allow container syncing with each other.
Realm names will be considered case insensitive. Below are the parameters that are acceptable
within this section.
.IP "\fBcluster_clustername1\fR"
Any values in the realm section whose name begin with cluster_ will indicate the name and
endpoint of a cluster and will be used by external users in their container's
X-Container-Sync-To metadata header values with the format as "realm_name/cluster_name/container_name".
The Realm and cluster names are considered to be case insensitive.
.IP "\fBcluster_clustername2\fR"
Any values in the realm section whose name begin with cluster_ will indicate the name and
endpoint of a cluster and will be used by external users in their container's
X-Container-Sync-To metadata header values with the format as "realm_name/cluster_name/container_name".
The Realm and cluster names are considered to be case insensitive.
The endpoint is what the container sync daemon will use when sending out
requests to that cluster. Keep in mind this endpoint must be reachable by all
container servers, since that is where the container sync daemon runs. Note
that the endpoint ends with /v1/ and that the container sync daemon will then
add the account/container/obj name after that.
.IP "\fBkey\fR"
The key is the overall cluster-to-cluster key used in combination with the external
users' key that they set on their containers' X-Container-Sync-Key metadata header
values. These keys will be used to sign each request the container sync daemon makes
and used to validate each incoming container sync request.
.IP "\fBkey2\fR"
The key2 is optional and is an additional key incoming requests will be checked
against. This is so you can rotate keys if you wish; you move the existing
key to key2 and make a new key value.
.RE
.PD
.SH EXAMPLE
.nf
.RS 0
[DEFAULT]
mtime_check_interval = 300
[realm1]
key = realm1key
key2 = realm1key2
cluster_clustername1 = https://host1/v1/
cluster_clustername2 = https://host2/v1/
[realm2]
key = realm2key
key2 = realm2key2
cluster_clustername3 = https://host3/v1/
cluster_clustername4 = https://host4/v1/
.RE
.fi
.SH DOCUMENTATION
.LP
More in depth documentation in regards to
.BI swift-container-sync
and also about OpenStack Swift as a whole can be found at
.BI https://docs.openstack.org/swift/latest/overview_container_sync.html
and
.BI https://docs.openstack.org/swift/latest/
.SH "SEE ALSO"
.BR swift-container-sync(1)

View File

@ -46,9 +46,9 @@ Also download files and verify md5
.SH EXAMPLES
.nf
/usr/bin/swift\-account\-audit\/ SOSO_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076
/usr/bin/swift\-account\-audit\/ SOSO_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076/container/object
/usr/bin/swift\-account\-audit\/ \fB\-e\fR errors.txt SOSO_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076/container
/usr/bin/swift\-account\-audit\/ AUTH_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076
/usr/bin/swift\-account\-audit\/ AUTH_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076/container/object
/usr/bin/swift\-account\-audit\/ \fB\-e\fR errors.txt AUTH_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076/container
/usr/bin/swift\-account\-audit\/ < errors.txt
/usr/bin/swift\-account\-audit\/ \fB\-c\fR 25 \fB\-d\fR < errors.txt
.fi

View File

@ -28,6 +28,7 @@ pipeline = catch_errors proxy-logging cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
account_autocreate = true
# See proxy-server.conf-sample for options
[filter:cache]

View File

@ -544,6 +544,8 @@ use = egg:swift#domain_remap
# can be specified separated by a comma
# storage_domain = example.com
# Specify a root path part that will be added to the start of paths if not
# already present.
# path_root = v1
# Browsers can convert a host header to lowercase, so check that reseller
@ -556,6 +558,14 @@ use = egg:swift#domain_remap
# reseller_prefixes = AUTH
# default_reseller_prefix =
# Enable legacy remapping behavior for versioned path requests:
# c.a.example.com/v1/o -> /v1/AUTH_a/c/o
# instead of
# c.a.example.com/v1/o -> /v1/AUTH_a/c/v1/o
# ... by default all path parts after a remapped domain are considered part of
# the object name with no special case for the path "v1"
# mangle_client_paths = False
[filter:catch_errors]
use = egg:swift#catch_errors
# You can override the default log routing for this filter here:

View File

@ -0,0 +1,67 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
msgid ""
msgstr ""
"Project-Id-Version: Swift Release Notes 2.15.2\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2017-10-10 22:05+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-10-05 03:59+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en-GB\n"
"X-Generator: Zanata 3.9.6\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
msgid "2.10.0"
msgstr "2.10.0"
msgid "2.10.1"
msgstr "2.10.1"
msgid "2.10.2"
msgstr "2.10.2"
msgid "2.11.0"
msgstr "2.11.0"
msgid "2.12.0"
msgstr "2.12.0"
msgid "2.13.0"
msgstr "2.13.0"
msgid "2.13.1"
msgstr "2.13.1"
msgid "2.14.0"
msgstr "2.14.0"
msgid "2.15.0"
msgstr "2.15.0"
msgid "2.15.1"
msgstr "2.15.1"
msgid ""
"A PUT or POST to a container will now update the container's Last-Modified "
"time, and that value will be included in a GET/HEAD response."
msgstr ""
"A PUT or POST to a container will now update the container's Last-Modified "
"time, and that value will be included in a GET/HEAD response."
msgid "Current (Unreleased) Release Notes"
msgstr "Current (Unreleased) Release Notes"
msgid "Swift Release Notes"
msgstr "Swift Release Notes"
msgid "domain_remap now accepts a list of domains in \"storage_domain\"."
msgstr "domain_remap now accepts a list of domains in \"storage_domain\"."
msgid "name_check and cname_lookup keys have been added to `/info`."
msgstr "name_check and cname_lookup keys have been added to `/info`."
msgid "swift-recon now respects storage policy aliases."
msgstr "swift-recon now respects storage policy aliases."

View File

@ -218,7 +218,6 @@ def _parse_set_weight_values(argvish):
# --options format,
# but not both. If both are specified, raise an error.
try:
devs = []
if not new_cmd_format:
if len(args) % 2 != 0:
print(Commands.set_weight.__doc__.strip())
@ -227,7 +226,7 @@ def _parse_set_weight_values(argvish):
devs_and_weights = izip(islice(argvish, 0, len(argvish), 2),
islice(argvish, 1, len(argvish), 2))
for devstr, weightstr in devs_and_weights:
devs.extend(builder.search_devs(
devs = (builder.search_devs(
parse_search_value(devstr)) or [])
weight = float(weightstr)
_set_weight_values(devs, weight, opts)
@ -236,7 +235,7 @@ def _parse_set_weight_values(argvish):
print(Commands.set_weight.__doc__.strip())
exit(EXIT_ERROR)
devs.extend(builder.search_devs(
devs = (builder.search_devs(
parse_search_values_from_opts(opts)) or [])
weight = float(args[0])
_set_weight_values(devs, weight, opts)

View File

@ -17,42 +17,91 @@
"""
Domain Remap Middleware
Middleware that translates container and account parts of a domain to
path parameters that the proxy server understands.
Middleware that translates container and account parts of a domain to path
parameters that the proxy server understands.
container.account.storageurl/object gets translated to
container.account.storageurl/path_root/account/container/object
Translation is only performed when the request URL's host domain matches one of
a list of domains. This list may be configured by the option
``storage_domain``, and defaults to the single domain ``example.com``.
account.storageurl/path_root/container/object gets translated to
account.storageurl/path_root/account/container/object
If not already present, a configurable ``path_root``, which defaults to ``v1``,
will be added to the start of the translated path.
Browsers can convert a host header to lowercase, so check that reseller
prefix on the account is the correct case. This is done by comparing the
items in the reseller_prefixes config option to the found prefix. If they
match except for case, the item from reseller_prefixes will be used
instead of the found reseller prefix. When none match, the default reseller
prefix is used. When no default reseller prefix is configured, any request with
an account prefix not in that list will be ignored by this middleware.
reseller_prefixes defaults to 'AUTH'.
For example, with the default configuration::
container.AUTH-account.example.com/object
container.AUTH-account.example.com/v1/object
would both be translated to::
container.AUTH-account.example.com/v1/AUTH_account/container/object
and::
AUTH-account.example.com/container/object
AUTH-account.example.com/v1/container/object
would both be translated to::
AUTH-account.example.com/v1/AUTH_account/container/object
Additionally, translation is only performed when the account name in the
translated path starts with a reseller prefix matching one of a list configured
by the option ``reseller_prefixes``, or when no match is found but a
``default_reseller_prefix`` has been configured.
The ``reseller_prefixes`` list defaults to the single prefix ``AUTH``. The
``default_reseller_prefix`` is not configured by default.
Browsers can convert a host header to lowercase, so the middleware checks that
the reseller prefix on the account name is the correct case. This is done by
comparing the items in the ``reseller_prefixes`` config option to the found
prefix. If they match except for case, the item from ``reseller_prefixes`` will
be used instead of the found reseller prefix. The middleware will also replace
any hyphen ('-') in the account name with an underscore ('_').
For example, with the default configuration::
auth-account.example.com/container/object
AUTH-account.example.com/container/object
auth_account.example.com/container/object
AUTH_account.example.com/container/object
would all be translated to::
<unchanged>.example.com/v1/AUTH_account/container/object
When no match is found in ``reseller_prefixes``, the
``default_reseller_prefix`` config option is used. When no
``default_reseller_prefix`` is configured, any request with an account prefix
not in the ``reseller_prefixes`` list will be ignored by this middleware.
For example, with ``default_reseller_prefix = AUTH``::
account.example.com/container/object
would be translated to::
account.example.com/v1/AUTH_account/container/object
Note that this middleware requires that container names and account names
(except as described above) must be DNS-compatible. This means that the
account name created in the system and the containers created by users
cannot exceed 63 characters or have UTF-8 characters. These are
restrictions over and above what swift requires and are not explicitly
checked. Simply put, the this middleware will do a best-effort attempt to
derive account and container names from elements in the domain name and
put those derived values into the URL path (leaving the Host header
unchanged).
(except as described above) must be DNS-compatible. This means that the account
name created in the system and the containers created by users cannot exceed 63
characters or have UTF-8 characters. These are restrictions over and above what
Swift requires and are not explicitly checked. Simply put, this middleware
will do a best-effort attempt to derive account and container names from
elements in the domain name and put those derived values into the URL path
(leaving the ``Host`` header unchanged).
Also note that using container sync with remapped domain names is not
advised. With container sync, you should use the true storage end points as
sync destinations.
Also note that using :doc:`overview_container_sync` with remapped domain names
is not advised. With :doc:`overview_container_sync`, you should use the true
storage end points as sync destinations.
"""
from swift.common.middleware import RewriteContext
from swift.common.swob import Request, HTTPBadRequest
from swift.common.utils import list_from_csv, register_swift_info
from swift.common.utils import config_true_value, list_from_csv, \
register_swift_info
class _DomainRemapContext(RewriteContext):
@ -78,12 +127,14 @@ class DomainRemapMiddleware(object):
if not s.startswith('.')]
self.storage_domain += [s for s in list_from_csv(storage_domain)
if s.startswith('.')]
self.path_root = '/' + conf.get('path_root', 'v1').strip('/')
self.path_root = conf.get('path_root', 'v1').strip('/') + '/'
prefixes = conf.get('reseller_prefixes', 'AUTH')
self.reseller_prefixes = list_from_csv(prefixes)
self.reseller_prefixes_lower = [x.lower()
for x in self.reseller_prefixes]
self.default_reseller_prefix = conf.get('default_reseller_prefix')
self.mangle_client_paths = config_true_value(
conf.get('mangle_client_paths'))
def __call__(self, env, start_response):
if not self.storage_domain:
@ -129,14 +180,14 @@ class DomainRemapMiddleware(object):
# account prefix is not in config list. bail.
return self.app(env, start_response)
requested_path = path = env['PATH_INFO']
new_path_parts = [self.path_root, account]
requested_path = env['PATH_INFO']
path = requested_path[1:]
new_path_parts = ['', self.path_root[:-1], account]
if container:
new_path_parts.append(container)
if path.startswith(self.path_root):
if self.mangle_client_paths and (path + '/').startswith(
self.path_root):
path = path[len(self.path_root):]
if path.startswith('/'):
path = path[1:]
new_path_parts.append(path)
new_path = '/'.join(new_path_parts)
env['PATH_INFO'] = new_path

View File

@ -27,7 +27,7 @@ Uploading the Manifest
----------------------
After the user has uploaded the objects to be concatenated, a manifest is
uploaded. The request must be a PUT with the query parameter::
uploaded. The request must be a ``PUT`` with the query parameter::
?multipart-manifest=put
@ -47,52 +47,49 @@ range (optional) the (inclusive) range within the object to
use as a segment. If omitted, the entire object is used.
=========== ========================================================
The format of the list will be:
.. code::
The format of the list will be::
[{"path": "/cont/object",
"etag": "etagoftheobjectsegment",
"size_bytes": 10485760,
"range": "1048576-2097151"}, ...]
"range": "1048576-2097151"},
...]
The number of object segments is limited to a configurable amount, default
1000. Each segment must be at least 1 byte. On upload, the middleware will
head every segment passed in to verify:
1. the segment exists (i.e. the HEAD was successful);
2. the segment meets minimum size requirements;
3. if the user provided a non-null etag, the etag matches;
4. if the user provided a non-null size_bytes, the size_bytes matches; and
5. if the user provided a range, it is a singular, syntactically correct range
that is satisfiable given the size of the object.
1. the segment exists (i.e. the ``HEAD`` was successful);
2. the segment meets minimum size requirements;
3. if the user provided a non-null ``etag``, the etag matches;
4. if the user provided a non-null ``size_bytes``, the size_bytes matches; and
5. if the user provided a ``range``, it is a singular, syntactically correct
range that is satisfiable given the size of the object.
Note that the etag and size_bytes keys are optional; if omitted, the
Note that the ``etag`` and ``size_bytes`` keys are optional; if omitted, the
verification is not performed. If any of the objects fail to verify (not
found, size/etag mismatch, below minimum size, invalid range) then the user
will receive a 4xx error response. If everything does match, the user will
receive a 2xx response and the SLO object is ready for downloading.
Behind the scenes, on success, a json manifest generated from the user input is
sent to object servers with an extra "X-Static-Large-Object: True" header
and a modified Content-Type. The items in this manifest will include the etag
and size_bytes for each segment, regardless of whether the client specified
them for verification. The parameter: swift_bytes=$total_size will be
appended to the existing Content-Type, where total_size is the sum of all
the included segments' size_bytes. This extra parameter will be hidden from
the user.
Behind the scenes, on success, a JSON manifest generated from the user input is
sent to object servers with an extra ``X-Static-Large-Object: True`` header
and a modified ``Content-Type``. The items in this manifest will include the
``etag`` and ``size_bytes`` for each segment, regardless of whether the client
specified them for verification. The parameter ``swift_bytes=$total_size`` will
be appended to the existing ``Content-Type``, where ``$total_size`` is the sum
of all the included segments' ``size_bytes``. This extra parameter will be
hidden from the user.
Manifest files can reference objects in separate containers, which will improve
concurrent upload speed. Objects can be referenced by multiple manifests. The
segments of a SLO manifest can even be other SLO manifests. Treat them as any
other object i.e., use the Etag and Content-Length given on the PUT of the
sub-SLO in the manifest to the parent SLO.
other object i.e., use the ``Etag`` and ``Content-Length`` given on the ``PUT``
of the sub-SLO in the manifest to the parent SLO.
While uploading a manifest, a user can send Etag for verification. It needs to
be md5 of the segments' etags, if there is no range specified. For example, if
the manifest to be uploaded looks like this:
.. code::
While uploading a manifest, a user can send ``Etag`` for verification. It needs
to be md5 of the segments' etags, if there is no range specified. For example,
if the manifest to be uploaded looks like this::
[{"path": "/cont/object1",
"etag": "etagoftheobjectsegment1",
@ -101,16 +98,12 @@ the manifest to be uploaded looks like this:
"etag": "etagoftheobjectsegment2",
"size_bytes": 10485760}]
The Etag of the above manifest would be md5 of etagoftheobjectsegment1 and
etagoftheobjectsegment2. This could be computed in the following way:
.. code::
The Etag of the above manifest would be md5 of ``etagoftheobjectsegment1`` and
``etagoftheobjectsegment2``. This could be computed in the following way::
echo -n 'etagoftheobjectsegment1etagoftheobjectsegment2' | md5sum
If a manifest to be uploaded with a segment range looks like this:
.. code::
If a manifest to be uploaded with a segment range looks like this::
[{"path": "/cont/object1",
"etag": "etagoftheobjectsegmentone",
@ -122,10 +115,8 @@ If a manifest to be uploaded with a segment range looks like this:
"range": "3-4"}]
While computing the Etag of the above manifest, internally each segment's etag
will be taken in the form of 'etagvalue:rangevalue;'. Hence the Etag of the
above manifest would be:
.. code::
will be taken in the form of ``etagvalue:rangevalue;``. Hence the Etag of the
above manifest would be::
echo -n 'etagoftheobjectsegmentone:1-2;etagoftheobjectsegmenttwo:3-4;' \
| md5sum
@ -136,65 +127,65 @@ Range Specification
-------------------
Users now have the ability to specify ranges for SLO segments.
Users can now include an optional 'range' field in segment descriptions
Users can now include an optional ``range`` field in segment descriptions
to specify which bytes from the underlying object should be used for the
segment data. Only one range may be specified per segment.
.. note::
.. note::
The 'etag' and 'size_bytes' fields still describe the backing object as a
whole.
The ``etag`` and ``size_bytes`` fields still describe the backing object
as a whole.
If a user uploads this manifest:
If a user uploads this manifest::
.. code::
[{"path": "/con/obj_seg_1", "size_bytes": 2097152, "range": "0-1048576"},
{"path": "/con/obj_seg_2", "size_bytes": 2097152,
"range": "512-1550000"},
{"path": "/con/obj_seg_1", "size_bytes": 2097152, "range": "-2048"}]
[{"path": "/con/obj_seg_1", "size_bytes": 2097152, "range": "0-1048576"},
{"path": "/con/obj_seg_2", "size_bytes": 2097152,
"range": "512-1550000"},
{"path": "/con/obj_seg_1", "size_bytes": 2097152, "range": "-2048"}]
The segment will consist of the first 1048576 bytes of /con/obj_seg_1,
followed by bytes 513 through 1550000 (inclusive) of /con/obj_seg_2, and
finally bytes 2095104 through 2097152 (i.e., the last 2048 bytes) of
/con/obj_seg_1.
.. note::
.. note::
The minimum sized range is 1 byte. This is the same as the minimum
segment size.
The minimum sized range is 1 byte. This is the same as the minimum
segment size.
-------------------------
Retrieving a Large Object
-------------------------
A GET request to the manifest object will return the concatenation of the
A ``GET`` request to the manifest object will return the concatenation of the
objects from the manifest much like DLO. If any of the segments from the
manifest are not found or their Etag/Content Length have changed since upload,
the connection will drop. In this case a 409 Conflict will be logged in the
proxy logs and the user will receive incomplete results. Note that this will be
enforced regardless of whether the user performed per-segment validation during
upload.
manifest are not found or their ``Etag``/``Content-Length`` have changed since
upload, the connection will drop. In this case a ``409 Conflict`` will be
logged in the proxy logs and the user will receive incomplete results. Note
that this will be enforced regardless of whether the user performed per-segment
validation during upload.
The headers from this GET or HEAD request will return the metadata attached
to the manifest object itself with some exceptions::
The headers from this ``GET`` or ``HEAD`` request will return the metadata
attached to the manifest object itself with some exceptions:
Content-Length: the total size of the SLO (the sum of the sizes of
the segments in the manifest)
X-Static-Large-Object: True
Etag: the etag of the SLO (generated the same way as DLO)
===================== ==================================================
Header Value
===================== ==================================================
Content-Length the total size of the SLO (the sum of the sizes of
the segments in the manifest)
X-Static-Large-Object the string "True"
Etag the etag of the SLO (generated the same way as DLO)
===================== ==================================================
A GET request with the query parameter::
A ``GET`` request with the query parameter::
?multipart-manifest=get
will return a transformed version of the original manifest, containing
additional fields and different key names. For example, the first manifest in
the example above would look like this:
.. code::
the example above would look like this::
[{"name": "/cont/object",
"hash": "etagoftheobjectsegment",
@ -222,9 +213,10 @@ left to the user to use caution in handling the segments.
Deleting a Large Object
-----------------------
A DELETE request will just delete the manifest object itself.
A ``DELETE`` request will just delete the manifest object itself. The segment
data referenced by the manifest will remain unchanged.
A DELETE with a query parameter::
A ``DELETE`` with a query parameter::
?multipart-manifest=delete
@ -235,22 +227,22 @@ itself. The failure response will be similar to the bulk delete middleware.
Modifying a Large Object
------------------------
PUTs / POSTs will work as expected, PUTs will just overwrite the manifest
object for example.
``PUT`` and ``POST`` requests will work as expected; ``PUT``\s will just
overwrite the manifest object for example.
------------------
Container Listings
------------------
In a container listing the size listed for SLO manifest objects will be the
total_size of the concatenated segments in the manifest. The overall
X-Container-Bytes-Used for the container (and subsequently for the account)
will not reflect total_size of the manifest but the actual size of the json
``total_size`` of the concatenated segments in the manifest. The overall
``X-Container-Bytes-Used`` for the container (and subsequently for the account)
will not reflect ``total_size`` of the manifest but the actual size of the JSON
data stored. The reason for this somewhat confusing discrepancy is we want the
container listing to reflect the size of the manifest object when it is
downloaded. We do not, however, want to count the bytes-used twice (for both
the manifest and the segments it's referring to) in the container and account
metadata which can be used for stats purposes.
metadata which can be used for stats and billing purposes.
"""
from collections import defaultdict
@ -296,20 +288,20 @@ def parse_and_validate_input(req_body, req_path):
Given a request body, parses it and returns a list of dictionaries.
The output structure is nearly the same as the input structure, but it
is not an exact copy. Given a valid input dictionary `d_in`, its
corresponding output dictionary `d_out` will be as follows:
is not an exact copy. Given a valid input dictionary ``d_in``, its
corresponding output dictionary ``d_out`` will be as follows:
* d_out['etag'] == d_in['etag']
* d_out['etag'] == d_in['etag']
* d_out['path'] == d_in['path']
* d_out['path'] == d_in['path']
* d_in['size_bytes'] can be a string ("12") or an integer (12), but
d_out['size_bytes'] is an integer.
* d_in['size_bytes'] can be a string ("12") or an integer (12), but
d_out['size_bytes'] is an integer.
* (optional) d_in['range'] is a string of the form "M-N", "M-", or
"-N", where M and N are non-negative integers. d_out['range'] is the
corresponding swob.Range object. If d_in does not have a key
'range', neither will d_out.
* (optional) d_in['range'] is a string of the form "M-N", "M-", or
"-N", where M and N are non-negative integers. d_out['range'] is the
corresponding swob.Range object. If d_in does not have a key
'range', neither will d_out.
:raises HTTPException: on parse errors or semantic errors (e.g. bogus
JSON structure, syntactically invalid ranges)
@ -435,7 +427,7 @@ class SloGetContext(WSGIContext):
agent='%(orig)s SLO MultipartGET', swift_source='SLO')
sub_resp = sub_req.get_response(self.slo.app)
if not is_success(sub_resp.status_int):
if not sub_resp.is_success:
close_if_possible(sub_resp.app_iter)
raise ListingIterError(
'ERROR: while fetching %s, GET of submanifest %s '
@ -615,8 +607,9 @@ class SloGetContext(WSGIContext):
thing with them. Returns an iterator suitable for sending up the WSGI
chain.
:param req: swob.Request object; is a GET or HEAD request aimed at
what may be a static large object manifest (or may not).
:param req: :class:`~swift.common.swob.Request` object; is a ``GET`` or
``HEAD`` request aimed at what may (or may not) be a static
large object manifest.
:param start_response: WSGI start_response callable
"""
if req.params.get('multipart-manifest') != 'get':
@ -898,7 +891,9 @@ class StaticLargeObject(object):
The response body (only on GET, of course) will consist of the
concatenation of the segments.
:params req: a swob.Request with a path referencing an object
:param req: a :class:`~swift.common.swob.Request` with a path
referencing an object
:param start_response: WSGI start_response callable
:raises HttpException: on errors
"""
return SloGetContext(self).handle_slo_get_or_head(req, start_response)
@ -910,13 +905,11 @@ class StaticLargeObject(object):
save a manifest generated from the user input. Uses WSGIContext to
call self and start_response and returns a WSGI iterator.
:params req: a swob.Request with an obj in path
:param req: a :class:`~swift.common.swob.Request` with an obj in path
:param start_response: WSGI start_response callable
:raises HttpException: on errors
"""
try:
vrs, account, container, obj = req.split_path(1, 4, True)
except ValueError:
return self.app(req.environ, start_response)
vrs, account, container, obj = req.split_path(4, rest_with_last=True)
if req.content_length > self.max_manifest_size:
raise HTTPRequestEntityTooLarge(
"Manifest File > %d bytes" % self.max_manifest_size)
@ -1073,7 +1066,8 @@ class StaticLargeObject(object):
A generator function to be used to delete all the segments and
sub-segments referenced in a manifest.
:params req: a swob.Request with an SLO manifest in path
:param req: a :class:`~swift.common.swob.Request` with an SLO manifest
in path
:raises HTTPPreconditionFailed: on invalid UTF8 in request path
:raises HTTPBadRequest: on too many buffered sub segments and
on invalid SLO manifest path
@ -1109,8 +1103,12 @@ class StaticLargeObject(object):
def get_slo_segments(self, obj_name, req):
"""
Performs a swob.Request and returns the SLO manifest's segments.
Performs a :class:`~swift.common.swob.Request` and returns the SLO
manifest's segments.
:param obj_name: the name of the object being deleted,
as ``/container/object``
:param req: the base :class:`~swift.common.swob.Request`
:raises HTTPServerError: on unable to load obj_name or
on unable to load the SLO manifest data.
:raises HTTPBadRequest: on not an SLO manifest
@ -1151,7 +1149,7 @@ class StaticLargeObject(object):
Will delete all the segments in the SLO manifest and then, if
successful, will delete the manifest file.
:params req: a swob.Request with an obj in path
:param req: a :class:`~swift.common.swob.Request` with an obj in path
:returns: swob.Response whose app_iter set to Bulk.handle_delete_iter
"""
req.headers['Content-Type'] = None # Ignore content-type from client

View File

@ -177,8 +177,6 @@ from __future__ import print_function
from time import time
from traceback import format_exc
from uuid import uuid4
from hashlib import sha1
import hmac
import base64
from eventlet import Timeout
@ -437,20 +435,21 @@ class TempAuth(object):
s3_auth_details = env.get('swift3.auth_details')
if s3_auth_details:
if 'check_signature' not in s3_auth_details:
self.logger.warning(
'Swift3 did not provide a check_signature function; '
'upgrade Swift3 if you want to use it with tempauth')
return None
account_user = s3_auth_details['access_key']
signature_from_user = s3_auth_details['signature']
if account_user not in self.users:
return None
account, user = account_user.split(':', 1)
account_id = self.users[account_user]['url'].rsplit('/', 1)[-1]
path = env['PATH_INFO']
env['PATH_INFO'] = path.replace(account_user, account_id, 1)
valid_signature = base64.encodestring(hmac.new(
self.users[account_user]['key'],
s3_auth_details['string_to_sign'],
sha1).digest()).strip()
if signature_from_user != valid_signature:
user = self.users[account_user]
account = account_user.split(':', 1)[0]
account_id = user['url'].rsplit('/', 1)[-1]
if not s3_auth_details['check_signature'](user['key']):
return None
env['PATH_INFO'] = env['PATH_INFO'].replace(
account_user, account_id, 1)
groups = self._get_user_groups(account, account_user, account_id)
return groups

View File

@ -77,6 +77,7 @@ pipeline = catch_errors proxy-logging cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
account_autocreate = true
# See proxy-server.conf-sample for options
[filter:cache]

View File

@ -86,7 +86,8 @@ METADATA_KEY = 'user.swift.metadata'
DROP_CACHE_WINDOW = 1024 * 1024
# These are system-set metadata keys that cannot be changed with a POST.
# They should be lowercase.
DATAFILE_SYSTEM_META = set('content-length deleted etag'.split())
RESERVED_DATAFILE_META = {'content-length', 'deleted', 'etag'}
DATAFILE_SYSTEM_META = {'x-static-large-object'}
DATADIR_BASE = 'objects'
ASYNCDIR_BASE = 'async_pending'
TMP_BASE = 'tmp'
@ -2415,7 +2416,8 @@ class BaseDiskFile(object):
self._merge_content_type_metadata(ctype_file)
sys_metadata = dict(
[(key, val) for key, val in self._datafile_metadata.items()
if key.lower() in DATAFILE_SYSTEM_META
if key.lower() in (RESERVED_DATAFILE_META |
DATAFILE_SYSTEM_META)
or is_sys_meta('object', key)])
self._metadata.update(self._metafile_metadata)
self._metadata.update(sys_metadata)

View File

@ -27,7 +27,7 @@ from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
DiskFileCollision, DiskFileDeleted, DiskFileNotOpen
from swift.common.request_helpers import is_sys_meta
from swift.common.swob import multi_range_iterator
from swift.obj.diskfile import DATAFILE_SYSTEM_META
from swift.obj.diskfile import DATAFILE_SYSTEM_META, RESERVED_DATAFILE_META
class InMemoryFileSystem(object):
@ -433,7 +433,8 @@ class DiskFile(object):
# with the object data.
immutable_metadata = dict(
[(key, val) for key, val in cur_mdata.items()
if key.lower() in DATAFILE_SYSTEM_META
if key.lower() in (RESERVED_DATAFILE_META |
DATAFILE_SYSTEM_META)
or is_sys_meta('object', key)])
metadata.update(immutable_metadata)
metadata['name'] = self._name

View File

@ -1097,8 +1097,9 @@ class ObjectReconstructor(Daemon):
self.part_count += len(partitions)
for partition in partitions:
part_path = join(obj_path, partition)
if partition in ('auditor_status_ALL.json',
'auditor_status_ZBF.json'):
if (partition.startswith('auditor_status_') and
partition.endswith('.json')):
# ignore auditor status files
continue
if not partition.isdigit():
self.logger.warning(

View File

@ -55,7 +55,7 @@ from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \
HTTPServerError
from swift.obj.diskfile import DATAFILE_SYSTEM_META, DiskFileRouter
from swift.obj.diskfile import RESERVED_DATAFILE_META, DiskFileRouter
def iter_mime_headers_and_bodies(wsgi_input, mime_boundary, read_chunk_size):
@ -148,7 +148,7 @@ class ObjectController(BaseStorageServer):
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in DATAFILE_SYSTEM_META:
if header not in RESERVED_DATAFILE_META:
self.allowed_headers.add(header)
self.auto_create_account_prefix = \
conf.get('auto_create_account_prefix') or '.'
@ -555,11 +555,6 @@ class ObjectController(BaseStorageServer):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
def _preserve_slo_manifest(self, update_metadata, orig_metadata):
if 'X-Static-Large-Object' in orig_metadata:
update_metadata['X-Static-Large-Object'] = \
orig_metadata['X-Static-Large-Object']
@public
@timing_stats()
def POST(self, request):
@ -602,7 +597,6 @@ class ObjectController(BaseStorageServer):
if req_timestamp > orig_timestamp:
metadata = {'X-Timestamp': req_timestamp.internal}
self._preserve_slo_manifest(metadata, orig_metadata)
metadata.update(val for val in request.headers.items()
if (is_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))

View File

@ -89,7 +89,6 @@ class AccountController(Controller):
# creates the account if necessary. If we feed it a perfect
# lie, it'll just try to create the container without
# creating the account, and that'll fail.
req.params = {} # clear our format override
resp = account_listing_response(
self.account_name, req,
listing_formats.get_listing_content_type(req))

View File

@ -804,6 +804,21 @@ class TestCommands(unittest.TestCase, RunSwiftRingBuilderMixin):
ring.rebalance()
self.assertTrue(ring.validate())
def test_set_weight_old_format_two_devices(self):
# Would block without the 'yes' argument
self.create_sample_ring()
argv = ["", self.tmpfile, "set_weight",
"d2", "3.14", "d1", "6.28", "--yes"]
self.assertSystemExit(EXIT_SUCCESS, ringbuilder.main, argv)
ring = RingBuilder.load(self.tmpfile)
# Check that weight was changed
self.assertEqual(ring.devs[2]['weight'], 3.14)
self.assertEqual(ring.devs[1]['weight'], 6.28)
# Check that other devices in ring are not affected
self.assertEqual(ring.devs[0]['weight'], 100)
self.assertEqual(ring.devs[3]['weight'], 100)
def test_set_weight_ipv4_old_format(self):
self.create_sample_ring()
# Test ipv4(old format)

View File

@ -85,17 +85,17 @@ class TestDomainRemap(unittest.TestCase):
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['Bad domain in host header'])
def test_domain_remap_account_with_path_root(self):
def test_domain_remap_account_with_path_root_container(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/'])
self.assertEqual(resp, ['/v1/AUTH_a/v1'])
def test_domain_remap_account_container_with_path_root(self):
def test_domain_remap_account_container_with_path_root_obj(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/c/'])
self.assertEqual(resp, ['/v1/AUTH_a/c/v1'])
def test_domain_remap_account_container_with_path_obj_slash_v1(self):
# Include http://localhost because urlparse used in Request.__init__
@ -111,7 +111,7 @@ class TestDomainRemap(unittest.TestCase):
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/c//v1'])
self.assertEqual(resp, ['/v1/AUTH_a/c/v1//v1'])
def test_domain_remap_account_container_with_path_trailing_slash(self):
req = Request.blank('/obj/', environ={'REQUEST_METHOD': 'GET'},
@ -129,7 +129,13 @@ class TestDomainRemap(unittest.TestCase):
req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/c/obj'])
self.assertEqual(resp, ['/v1/AUTH_a/c/v1/obj'])
def test_domain_remap_with_path_root_and_path_no_slash(self):
req = Request.blank('/v1obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/c/v1obj'])
def test_domain_remap_account_matching_ending_not_domain(self):
req = Request.blank('/dontchange', environ={'REQUEST_METHOD': 'GET'},
@ -249,6 +255,58 @@ class TestDomainRemap(unittest.TestCase):
'http://cont.auth-uuid.example.com/test/')
class TestDomainRemapClientMangling(unittest.TestCase):
def setUp(self):
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), {
'mangle_client_paths': True})
def test_domain_remap_account_with_path_root_container(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/'])
def test_domain_remap_account_container_with_path_root_obj(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/c/'])
def test_domain_remap_account_container_with_path_obj_slash_v1(self):
# Include http://localhost because urlparse used in Request.__init__
# parse //v1 as http://v1
req = Request.blank('http://localhost//v1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/c//v1'])
def test_domain_remap_account_container_with_root_path_obj_slash_v1(self):
req = Request.blank('/v1//v1',
environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/c//v1'])
def test_domain_remap_account_container_with_path_trailing_slash(self):
req = Request.blank('/obj/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/c/obj/'])
def test_domain_remap_account_container_with_path_root_and_path(self):
req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/c/obj'])
def test_domain_remap_with_path_root_and_path_no_slash(self):
req = Request.blank('/v1obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEqual(resp, ['/v1/AUTH_a/c/v1obj'])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
@ -257,17 +315,17 @@ class TestSwiftInfo(unittest.TestCase):
def test_registered_defaults(self):
domain_remap.filter_factory({})
swift_info = utils.get_swift_info()
self.assertTrue('domain_remap' in swift_info)
self.assertTrue(
swift_info['domain_remap'].get('default_reseller_prefix') is None)
self.assertIn('domain_remap', swift_info)
self.assertEqual(swift_info['domain_remap'], {
'default_reseller_prefix': None})
def test_registered_nondefaults(self):
domain_remap.filter_factory({'default_reseller_prefix': 'cupcake'})
domain_remap.filter_factory({'default_reseller_prefix': 'cupcake',
'mangle_client_paths': 'yes'})
swift_info = utils.get_swift_info()
self.assertTrue('domain_remap' in swift_info)
self.assertEqual(
swift_info['domain_remap'].get('default_reseller_prefix'),
'cupcake')
self.assertIn('domain_remap', swift_info)
self.assertEqual(swift_info['domain_remap'], {
'default_reseller_prefix': 'cupcake'})
if __name__ == '__main__':

View File

@ -389,37 +389,35 @@ class TestSloPutManifest(SloTestCase):
'PUT', '/v1/AUTH_test/checktest/man_3', swob.HTTPCreated, {}, None)
def test_put_manifest_too_quick_fail(self):
req = Request.blank('/v1/a/c/o')
req = Request.blank('/v1/a/c/o?multipart-manifest=put', method='PUT')
req.content_length = self.slo.max_manifest_size + 1
try:
self.slo.handle_multipart_put(req, fake_start_response)
except HTTPException as e:
pass
self.assertEqual(e.status_int, 413)
status, headers, body = self.call_slo(req)
self.assertEqual(status, '413 Request Entity Too Large')
with patch.object(self.slo, 'max_manifest_segments', 0):
req = Request.blank('/v1/a/c/o', body=test_json_data)
e = None
try:
self.slo.handle_multipart_put(req, fake_start_response)
except HTTPException as e:
pass
self.assertEqual(e.status_int, 413)
req = Request.blank('/v1/a/c/o?multipart-manifest=put',
method='PUT', body=test_json_data)
status, headers, body = self.call_slo(req)
self.assertEqual(status, '413 Request Entity Too Large')
req = Request.blank('/v1/a/c/o', headers={'X-Copy-From': 'lala'})
try:
self.slo.handle_multipart_put(req, fake_start_response)
except HTTPException as e:
pass
self.assertEqual(e.status_int, 405)
req = Request.blank('/v1/a/c/o?multipart-manifest=put', method='PUT',
headers={'X-Copy-From': 'lala'})
status, headers, body = self.call_slo(req)
self.assertEqual(status, '405 Method Not Allowed')
# ignores requests to /
req = Request.blank(
'/?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=test_json_data)
self.assertEqual(
list(self.slo.handle_multipart_put(req, fake_start_response)),
['passed'])
# we already validated that there are enough path segments in __call__
for path in ('/', '/v1/', '/v1/a/', '/v1/a/c/'):
req = Request.blank(
path + '?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=test_json_data)
with self.assertRaises(ValueError):
list(self.slo.handle_multipart_put(req, fake_start_response))
req = Request.blank(
path.rstrip('/') + '?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=test_json_data)
with self.assertRaises(ValueError):
list(self.slo.handle_multipart_put(req, fake_start_response))
def test_handle_multipart_put_success(self):
req = Request.blank(
@ -430,11 +428,9 @@ class TestSloPutManifest(SloTestCase):
'X-Object-Sysmeta-Slo-Size'):
self.assertNotIn(h, req.headers)
def my_fake_start_response(*args, **kwargs):
gen_etag = '"' + md5hex('etagoftheobjectsegment') + '"'
self.assertIn(('Etag', gen_etag), args[1])
self.slo(req.environ, my_fake_start_response)
status, headers, body = self.call_slo(req)
gen_etag = '"' + md5hex('etagoftheobjectsegment') + '"'
self.assertIn(('Etag', gen_etag), headers)
self.assertIn('X-Static-Large-Object', req.headers)
self.assertEqual(req.headers['X-Static-Large-Object'], 'True')
self.assertIn('X-Object-Sysmeta-Slo-Etag', req.headers)
@ -486,10 +482,10 @@ class TestSloPutManifest(SloTestCase):
{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 100}])
req = Request.blank('/v1/a/c/o', body=test_json_data)
with self.assertRaises(HTTPException) as catcher:
self.slo.handle_multipart_put(req, fake_start_response)
self.assertEqual(catcher.exception.status_int, 400)
req = Request.blank('/v1/a/c/o?multipart-manifest=put',
method='PUT', body=test_json_data)
status, headers, body = self.call_slo(req)
self.assertEqual(status, '400 Bad Request')
def test_handle_multipart_put_disallow_empty_last_segment(self):
test_json_data = json.dumps([{'path': '/cont/object',
@ -498,10 +494,10 @@ class TestSloPutManifest(SloTestCase):
{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 0}])
req = Request.blank('/v1/a/c/o', body=test_json_data)
with self.assertRaises(HTTPException) as catcher:
self.slo.handle_multipart_put(req, fake_start_response)
self.assertEqual(catcher.exception.status_int, 400)
req = Request.blank('/v1/a/c/o?multipart-manifest=put',
method='PUT', body=test_json_data)
status, headers, body = self.call_slo(req)
self.assertEqual(status, '400 Bad Request')
def test_handle_multipart_put_success_unicode(self):
test_json_data = json.dumps([{'path': u'/cont/object\u2661',
@ -512,7 +508,7 @@ class TestSloPutManifest(SloTestCase):
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
body=test_json_data)
self.assertNotIn('X-Static-Large-Object', req.headers)
self.slo(req.environ, fake_start_response)
self.call_slo(req)
self.assertIn('X-Static-Large-Object', req.headers)
self.assertEqual(req.environ['PATH_INFO'], '/v1/AUTH_test/c/man')
self.assertIn(('HEAD', '/v1/AUTH_test/cont/object\xe2\x99\xa1'),
@ -523,7 +519,7 @@ class TestSloPutManifest(SloTestCase):
'/test_good/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, headers={'Accept': 'test'},
body=test_xml_data)
no_xml = self.slo(req.environ, fake_start_response)
no_xml = list(self.slo(req.environ, fake_start_response))
self.assertEqual(no_xml, ['Manifest must be valid JSON.\n'])
def test_handle_multipart_put_bad_data(self):
@ -533,14 +529,15 @@ class TestSloPutManifest(SloTestCase):
req = Request.blank(
'/test_good/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=bad_data)
self.assertRaises(HTTPException, self.slo.handle_multipart_put, req,
fake_start_response)
status, headers, body = self.call_slo(req)
self.assertEqual(status, '400 Bad Request')
self.assertIn('invalid size_bytes', body)
for bad_data in [
json.dumps([{'path': '/cont', 'etag': 'etagoftheobj',
'size_bytes': 100}]),
json.dumps('asdf'), json.dumps(None), json.dumps(5),
'not json', '1234', None, '', json.dumps({'path': None}),
'not json', '1234', '', json.dumps({'path': None}),
json.dumps([{'path': '/cont/object', 'etag': None,
'size_bytes': 12}]),
json.dumps([{'path': '/cont/object', 'etag': 'asdf',
@ -557,8 +554,14 @@ class TestSloPutManifest(SloTestCase):
req = Request.blank(
'/v1/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=bad_data)
self.assertRaises(HTTPException, self.slo.handle_multipart_put,
req, fake_start_response)
status, headers, body = self.call_slo(req)
self.assertEqual(status, '400 Bad Request')
req = Request.blank(
'/v1/AUTH_test/c/man?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=None)
status, headers, body = self.call_slo(req)
self.assertEqual(status, '411 Length Required')
def test_handle_multipart_put_check_data(self):
good_data = json.dumps(
@ -642,10 +645,11 @@ class TestSloPutManifest(SloTestCase):
{'path': '/cont/small_object',
'etag': 'etagoftheobjectsegment',
'size_bytes': 100}])
req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data)
with self.assertRaises(HTTPException) as cm:
self.slo.handle_multipart_put(req, fake_start_response)
self.assertEqual(cm.exception.status_int, 400)
req = Request.blank('/v1/AUTH_test/c/o?multipart-manifest=put',
method='PUT', body=test_json_data)
status, headers, body = self.call_slo(req)
self.assertEqual(status, '400 Bad Request')
self.assertIn('Too small; each segment must be at least 1 byte', body)
def test_handle_multipart_put_skip_size_check_no_early_bailout(self):
# The first is too small (it's 0 bytes), and
@ -657,12 +661,12 @@ class TestSloPutManifest(SloTestCase):
{'path': '/cont/object2',
'etag': 'wrong wrong wrong',
'size_bytes': 100}])
req = Request.blank('/v1/AUTH_test/c/o', body=test_json_data)
with self.assertRaises(HTTPException) as cm:
self.slo.handle_multipart_put(req, fake_start_response)
self.assertEqual(cm.exception.status_int, 400)
self.assertIn('at least 1 byte', cm.exception.body)
self.assertIn('Etag Mismatch', cm.exception.body)
req = Request.blank('/v1/AUTH_test/c/o?multipart-manifest=put',
method='PUT', body=test_json_data)
status, headers, body = self.call_slo(req)
self.assertEqual(status, '400 Bad Request')
self.assertIn('at least 1 byte', body)
self.assertIn('Etag Mismatch', body)
def test_handle_multipart_put_skip_etag_check(self):
good_data = json.dumps([
@ -694,10 +698,9 @@ class TestSloPutManifest(SloTestCase):
req = Request.blank(
'/v1/AUTH_test/checktest/man_3?multipart-manifest=put',
environ={'REQUEST_METHOD': 'PUT'}, body=bad_data)
with self.assertRaises(HTTPException) as catcher:
self.slo.handle_multipart_put(req, fake_start_response)
self.assertEqual(400, catcher.exception.status_int)
self.assertIn("Unsatisfiable Range", catcher.exception.body)
status, headers, body = self.call_slo(req)
self.assertEqual('400 Bad Request', status)
self.assertIn("Unsatisfiable Range", body)
def test_handle_multipart_put_success_conditional(self):
test_json_data = json.dumps([{'path': u'/cont/object',
@ -2771,29 +2774,25 @@ class TestSloGetManifest(SloTestCase):
self.assertTrue(error_lines[0].startswith(
'ERROR: An error occurred while retrieving segments'))
def test_download_takes_too_long(self):
the_time = [time.time()]
def mock_time():
return the_time[0]
# this is just a convenient place to hang a time jump; there's nothing
# special about the choice of is_success().
def mock_is_success(status_int):
the_time[0] += 7 * 3600
return status_int // 100 == 2
@patch('swift.common.request_helpers.time')
def test_download_takes_too_long(self, mock_time):
mock_time.time.side_effect = [
0, # start time
1, # just building the first segment request; purely local
2, # build the second segment request object, too, so we know we
# can't coalesce and should instead go fetch the first segment
7 * 3600, # that takes a while, but gets serviced; we build the
# third request and service the second
21 * 3600, # which takes *even longer* (ostensibly something to
# do with submanifests), but we build the fourth...
28 * 3600, # and before we go to service it we time out
]
req = Request.blank(
'/v1/AUTH_test/gettest/manifest-abcd',
environ={'REQUEST_METHOD': 'GET'})
with patch.object(slo, 'is_success', mock_is_success), \
patch('swift.common.request_helpers.time.time',
mock_time), \
patch('swift.common.request_helpers.is_success',
mock_is_success):
status, headers, body, exc = self.call_slo(
req, expect_exception=True)
status, headers, body, exc = self.call_slo(
req, expect_exception=True)
self.assertIsInstance(exc, SegmentError)
self.assertEqual(status, '200 OK')

View File

@ -19,7 +19,6 @@ import unittest
from contextlib import contextmanager
from base64 import b64encode
from time import time
import mock
from swift.common.middleware import tempauth as auth
from swift.common.middleware.acl import format_acl
@ -265,27 +264,58 @@ class TestAuth(unittest.TestCase):
self.assertEqual(req.environ['swift.authorize'],
local_auth.denied_response)
def test_auth_with_s3_authorization(self):
def test_auth_with_s3_authorization_good(self):
local_app = FakeApp()
local_auth = auth.filter_factory(
{'user_s3_s3': 'secret .admin'})(local_app)
req = self._make_request('/v1/AUTH_s3', environ={
req = self._make_request('/v1/s3:s3', environ={
'swift3.auth_details': {
'access_key': 's3:s3',
'signature': b64encode('sig'),
'string_to_sign': 't',
'check_signature': lambda secret: True}})
resp = req.get_response(local_auth)
self.assertEqual(resp.status_int, 404)
self.assertEqual(local_app.calls, 1)
self.assertEqual(req.environ['PATH_INFO'], '/v1/AUTH_s3')
self.assertEqual(req.environ['swift.authorize'],
local_auth.authorize)
def test_auth_with_s3_authorization_invalid(self):
local_app = FakeApp()
local_auth = auth.filter_factory(
{'user_s3_s3': 'secret .admin'})(local_app)
req = self._make_request('/v1/s3:s3', environ={
'swift3.auth_details': {
'access_key': 's3:s3',
'signature': b64encode('sig'),
'string_to_sign': 't',
'check_signature': lambda secret: False}})
resp = req.get_response(local_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(local_app.calls, 1)
self.assertEqual(req.environ['PATH_INFO'], '/v1/s3:s3')
self.assertEqual(req.environ['swift.authorize'],
local_auth.denied_response)
def test_auth_with_old_s3_details(self):
local_app = FakeApp()
local_auth = auth.filter_factory(
{'user_s3_s3': 'secret .admin'})(local_app)
req = self._make_request('/v1/s3:s3', environ={
'swift3.auth_details': {
'access_key': 's3:s3',
'signature': b64encode('sig'),
'string_to_sign': 't'}})
resp = req.get_response(local_auth)
with mock.patch('hmac.new') as hmac:
hmac.return_value.digest.return_value = 'sig'
resp = req.get_response(local_auth)
self.assertEqual(hmac.mock_calls, [
mock.call('secret', 't', mock.ANY),
mock.call().digest()])
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.status_int, 401)
self.assertEqual(local_app.calls, 1)
self.assertEqual(req.environ['PATH_INFO'], '/v1/s3:s3')
self.assertEqual(req.environ['swift.authorize'],
local_auth.authorize)
local_auth.denied_response)
def test_auth_no_reseller_prefix_no_token(self):
# Check that normally we set up a call back to our authorize.

View File

@ -3138,6 +3138,32 @@ class DiskFileMixin(BaseDiskFileTestMixin):
# original sysmeta keys are preserved
self.assertEqual('Value1', df._metadata['X-Object-Sysmeta-Key1'])
def test_disk_file_preserves_slo(self):
# build an object with some meta (at t0)
orig_metadata = {'X-Static-Large-Object': 'True',
'Content-Type': 'text/garbage'}
df = self._get_open_disk_file(ts=self.ts().internal,
extra_metadata=orig_metadata)
# sanity test
with df.open():
self.assertEqual('True', df._metadata['X-Static-Large-Object'])
if df.policy.policy_type == EC_POLICY:
expected = df.policy.pyeclib_driver.get_segment_info(
1024, df.policy.ec_segment_size)['fragment_size']
else:
expected = 1024
self.assertEqual(str(expected), df._metadata['Content-Length'])
# write some new metadata (fast POST, don't send orig meta, at t0+1s)
df = self._simple_get_diskfile()
df.write_metadata({'X-Timestamp': self.ts().internal})
df = self._simple_get_diskfile()
with df.open():
# non-fast-post updateable keys are preserved
self.assertEqual('text/garbage', df._metadata['Content-Type'])
self.assertEqual('True', df._metadata['X-Static-Large-Object'])
def test_disk_file_reader_iter(self):
df, df_data = self._create_test_file('1234567890')
quarantine_msgs = []

View File

@ -346,6 +346,7 @@ class TestObjectController(unittest.TestCase):
req = Request.blank('/sda1/p/a/c/o')
resp = req.get_response(self.object_controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(dict(resp.headers), {
'Content-Type': 'application/x-test',
'Content-Length': '6',

View File

@ -7740,10 +7740,9 @@ class TestContainerController(unittest.TestCase):
# return 404 (as account is not found) and don't cache container
test_status_map((404, 404, 404), 404, None, 404)
# cache a 204 for the account because it's sort of like it
# exists
# cache a 200 for the account because it appears to be created
self.app.account_autocreate = True
test_status_map((404, 404, 404), 404, None, 204)
test_status_map((404, 404, 404), 404, None, 200)
def test_PUT_policy_headers(self):
backend_requests = []
@ -8913,15 +8912,39 @@ class TestAccountController(unittest.TestCase):
# ALL nodes are asked to create the account
# If successful, the GET request is repeated.
controller.app.account_autocreate = True
self.assert_status_map(controller.GET,
(404, 404, 404), 204)
self.assert_status_map(controller.GET,
(404, 503, 404), 204)
expected = 200
self.assert_status_map(controller.GET, (404, 404, 404), expected)
self.assert_status_map(controller.GET, (404, 503, 404), expected)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.GET,
(500, 500, 400), 503)
def _check_autocreate_listing_with_query_string(self, query_string):
controller = proxy_server.AccountController(self.app, 'a')
controller.app.account_autocreate = True
statuses = (404, 404, 404)
expected = 200
# get the response to check it has json content
with save_globals():
set_http_connect(*statuses)
req = Request.blank('/v1/a' + query_string)
self.app.update_request(req)
res = controller.GET(req)
headers = res.headers
self.assertEqual(
'yes', headers.get('X-Backend-Fake-Account-Listing'))
self.assertEqual(
'application/json; charset=utf-8',
headers.get('Content-Type'))
self.assertEqual([], json.loads(res.body))
self.assertEqual(res.status_int, expected)
def test_auto_create_account_listing_response_is_json(self):
self._check_autocreate_listing_with_query_string('')
self._check_autocreate_listing_with_query_string('?format=plain')
self._check_autocreate_listing_with_query_string('?format=json')
self._check_autocreate_listing_with_query_string('?format=xml')
def test_HEAD(self):
# Same behaviour as GET
with save_globals():
@ -8950,9 +8973,9 @@ class TestAccountController(unittest.TestCase):
(404, 404, 404), 404)
controller.app.account_autocreate = True
self.assert_status_map(controller.HEAD,
(404, 404, 404), 204)
(404, 404, 404), 200)
self.assert_status_map(controller.HEAD,
(500, 404, 404), 204)
(500, 404, 404), 200)
# We always return 503 if no majority between 4xx, 3xx or 2xx found
self.assert_status_map(controller.HEAD,
(500, 500, 400), 503)